diff --git a/info/labs/lab02/build.sbt b/info/labs/lab02/build.sbt
new file mode 100644
index 0000000000000000000000000000000000000000..48a2d6bbe5aea6212fafc355584140e5f758b651
--- /dev/null
+++ b/info/labs/lab02/build.sbt
@@ -0,0 +1,16 @@
+version := "1.7"
+organization := "ch.epfl.lara"
+scalaVersion := "3.5.2"
+assembly / test := {}
+name := "amyc"
+
+Compile / scalaSource := baseDirectory.value / "src"
+scalacOptions ++= Seq("-feature")
+
+Test / scalaSource := baseDirectory.value / "test" / "scala"
+Test / parallelExecution := false
+libraryDependencies += "com.novocode" % "junit-interface" % "0.11" % "test"
+libraryDependencies += "org.apache.commons" % "commons-lang3" % "3.4" % "test"
+testOptions += Tests.Argument(TestFrameworks.JUnit, "-v")
+
+
diff --git a/info/labs/lab02/examples/Arithmetic.amy b/info/labs/lab02/examples/Arithmetic.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bc47b3dd27d257e4e4b2d70d654aa7cd894d5082
--- /dev/null
+++ b/info/labs/lab02/examples/Arithmetic.amy
@@ -0,0 +1,34 @@
+object Arithmetic
+  def pow(b: Int(32), e: Int(32)): Int(32) = {
+    if (e == 0) { 1 }
+    else {
+      if (e % 2 == 0) {
+        val rec: Int(32) = pow(b, e/2);
+        rec * rec
+      } else {
+        b * pow(b, e - 1)
+      }
+    }
+  }
+
+  def gcd(a: Int(32), b: Int(32)): Int(32) = {
+    if (a == 0 || b == 0) {
+      a + b
+    } else {
+      if (a < b) {
+        gcd(a, b % a)
+      } else {
+        gcd(a % b, b)
+      }
+    }
+  }
+
+  Std.printInt(pow(0, 10));
+  Std.printInt(pow(1, 5));
+  Std.printInt(pow(2, 10));
+  Std.printInt(pow(3, 3));
+  Std.printInt(gcd(0, 10));
+  Std.printInt(gcd(17, 99)); // 1
+  Std.printInt(gcd(16, 46)); // 2
+  Std.printInt(gcd(222, 888)) // 222
+end Arithmetic
diff --git a/info/labs/lab02/examples/Factorial.amy b/info/labs/lab02/examples/Factorial.amy
new file mode 100644
index 0000000000000000000000000000000000000000..d31b150a6f62dd31e7b3063a7def606d26c602e9
--- /dev/null
+++ b/info/labs/lab02/examples/Factorial.amy
@@ -0,0 +1,12 @@
+object Factorial
+  def fact(i: Int(32)): Int(32) = {
+    if (i < 2) { 1 }
+    else { 
+      val rec: Int(32) = fact(i-1);
+      i * rec
+    }
+  }
+
+  Std.printString("5! = "  ++ Std.intToString(fact(5)));
+  Std.printString("10! = " ++ Std.intToString(fact(10)))
+end Factorial
diff --git a/info/labs/lab02/examples/Hanoi.amy b/info/labs/lab02/examples/Hanoi.amy
new file mode 100644
index 0000000000000000000000000000000000000000..911f7739f78487d4db3ffe7df283be04146e980a
--- /dev/null
+++ b/info/labs/lab02/examples/Hanoi.amy
@@ -0,0 +1,16 @@
+object Hanoi
+	
+  def solve(n : Int(32)) : Int(32) = {
+    if (n < 1) { 
+      error("can't solve Hanoi for less than 1 plate")
+    } else {
+      if (n == 1) {
+        1
+      } else {
+        2 * solve(n - 1) + 1
+      }
+    }
+  }
+
+  Std.printString("Hanoi for 4 plates: " ++ Std.intToString(solve(4)))
+end Hanoi
\ No newline at end of file
diff --git a/info/labs/lab02/examples/Hello.amy b/info/labs/lab02/examples/Hello.amy
new file mode 100644
index 0000000000000000000000000000000000000000..5cc4b6ea8c91294dfa6d00bc7a63be9881c1d6dd
--- /dev/null
+++ b/info/labs/lab02/examples/Hello.amy
@@ -0,0 +1,3 @@
+object Hello
+  Std.printString("Hello " ++ "world!")
+end Hello
diff --git a/info/labs/lab02/examples/HelloInt.amy b/info/labs/lab02/examples/HelloInt.amy
new file mode 100644
index 0000000000000000000000000000000000000000..79cab1aa6f078a8de78b9bb576f2ab559a2451e2
--- /dev/null
+++ b/info/labs/lab02/examples/HelloInt.amy
@@ -0,0 +1,7 @@
+object HelloInt
+  Std.printString("What is your name?");
+  val name: String = Std.readString();
+  Std.printString("Hello " ++ name ++ "! And how old are you?");
+  val age: Int(32) = Std.readInt();
+  Std.printString(Std.intToString(age) ++ " years old then.")
+end HelloInt
diff --git a/info/labs/lab02/examples/Printing.amy b/info/labs/lab02/examples/Printing.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bc3bac0381c6752e9729c7354a5f0eca2d27ae69
--- /dev/null
+++ b/info/labs/lab02/examples/Printing.amy
@@ -0,0 +1,12 @@
+object Printing
+  Std.printInt(0); Std.printInt(-222); Std.printInt(42);
+  Std.printBoolean(true); Std.printBoolean(false);
+  Std.printString(Std.digitToString(0));
+  Std.printString(Std.digitToString(5));
+  Std.printString(Std.digitToString(9));
+  Std.printString(Std.intToString(0));
+  Std.printString(Std.intToString(-111));
+  Std.printString(Std.intToString(22));
+  Std.printString("Hello " ++ "world!");
+  Std.printString("" ++ "")
+end Printing
diff --git a/info/labs/lab02/examples/TestLists.amy b/info/labs/lab02/examples/TestLists.amy
new file mode 100644
index 0000000000000000000000000000000000000000..a01698881a0b68e1cba5a92d59e01676339e0096
--- /dev/null
+++ b/info/labs/lab02/examples/TestLists.amy
@@ -0,0 +1,6 @@
+object TestLists 
+  val l: L.List = L.Cons(5, L.Cons(-5, L.Cons(-1, L.Cons(0, L.Cons(10, L.Nil())))));
+  Std.printString(L.toString(L.concat(L.Cons(1, L.Cons(2, L.Nil())), L.Cons(3, L.Nil()))));
+  Std.printInt(L.sum(l));
+  Std.printString(L.toString(L.mergeSort(l)))
+end TestLists
diff --git a/info/labs/lab02/lab02-README.md b/info/labs/lab02/lab02-README.md
new file mode 100644
index 0000000000000000000000000000000000000000..7cbc6fbc31a33893a09448d6e27091f50941b1a1
--- /dev/null
+++ b/info/labs/lab02/lab02-README.md
@@ -0,0 +1,104 @@
+# Lab 02: Lexer
+
+This assignment is the first stage of the Amy compiler.
+
+## Code Scaffold
+
+In this lab you will start your own compiler from scratch, meaning that you will no longer rely on the compiler frontend which was previously provided to you as a jar file. In this lab you will build the lexical analysis phase (`lexer`).
+
+As we are now starting to work on your own compiler, we will start with a fresh scaffold. We suggest to keep the interpreter alongside, and we will tell you at what point you can add back your interpreter in your project.
+
+For now, you should work in this new project that will become your full compiler. Following labs will be delivered as files to add to this project.
+
+The structure of your project `src` directory should be as follows:
+
+```text
+lib 
+ └── scallion-assembly-0.6.1.jar    
+
+library
+ ├── ...
+ └── ...
+
+examples
+ ├── ...
+ └── ...
+
+amyc
+ ├── Main.scala                         
+ │
+ ├── parsing                             
+ │    ├── Lexer.scala
+ │    └── Tokens.scala
+ │
+ └── utils                               
+      ├── AmycFatalError.scala
+      ├── Context.scala
+      ├── Document.scala
+      ├── Env.scala
+      ├── Pipeline.scala
+      ├── Position.scala
+      ├── Reporter.scala
+      └── UniqueCounter.scala
+
+test
+├── scala
+│    └── amyc
+│         └── test
+│               ├── CompilerTest.scala
+│               ├── LexerTests.scala
+│               ├── TestSuite.scala
+│               └── TestUtils.scala
+└── resources
+      └── lexer
+           └── ...
+
+```
+
+This lab will focus on the following two files:
+
+* `src/amyc/parsing/Tokens.scala`: list of tokens and token kinds.
+* `src/amyc/parsing/Lexer.scala`: skeleton for the `Lexer` phase.
+
+Below you will find the instructions for the first lab assignment in which you will get to know and implement an lexer for the Amy language.
+
+## A Lexer for Amy
+
+The role of a lexer is to read the input text as a string and convert it to a list of tokens. Tokens are the smallest useful units in a source file: a name referring to a variable, a bracket, a keyword etc. The role of the lexer is to group together those useful units (e.g. return the keyword else as a unit, as opposed to individual characters e, l, s, e) and to abstract away all useless information (i.e. whitespace, comments).
+
+## Code structure
+
+You can find the `lexer` in the `Lexer.scala` file. It is based on Scallion and Silex, a pair of Scala libraries which simplify the implementation of parsing pipelines. Silex allows you to transform an input character stream (such as the contents of an Amy source file) into a sequence of Tokens. We are going to take a closer look at Scallion in the next lab, where our goal will be to build Amy's parser. You can find more information on Scallion [here](https://github.com/epfl-lara/scallion) and Silex [here](https://github.com/epfl-lara/silex), but we also included a short reference of Silex's API in `Lexer.scala`.
+
+The `Lexer` has the following components:
+
+* The public method is `run`. It just calls `lexer.spawn`(`source`) for every input file and concatenates the results.
+* `lexer` is the Silex-based definition of tokenization rules. Each rule corresponds to a regular expression matching a prefix of the remaining program input. Silex will compose all of these rules into one finite state machine and apply the maximum-munch or longest match rule you've seen in class.
+* Whenever a rule is found to match a (maximal) prefix of the remaining input, Scallion will call the transformation function provided using the `|>` operator in the rule. This function is given the matched input characters (`cs`) along with positional information (`range`) and should then produce an instance of `Token`. You can find its definition in `Tokens.scala`, which includes a list of all the different kinds of tokens that your Amy compiler should process. For instance, KeywordToken(`if`) represents an occurrence of the reserved word `if` in a program.
+For more details on how to write new rules, read the short introduction to Silex's API at the top of `Lexer.scala` or consider the examples on the Scallion website. You can also refer to [Silex's Scaladoc page](https://epfl-lara.github.io/silex/).
+
+Your task is to complete the rules in `Lexer.scala` and implement the filtering of irrelevant tokens.
+
+## Notes
+
+Here are some details you should pay attention to:
+
+* Make sure you recognize keywords as their own token kind. `if`, for instance, should be lexed as a token `KeywordToken(“if”)`, not as an identifier with the content `if`.
+* Make sure you correctly register the position of all tokens. Note the range parameter of the transformer functions. Once you have created a token, use `setPos`(`range._1`) to associate it with its position in the program source.
+* In general, it is good to output as many errors as possible (this will be helpful to whomever uses your compiler, including yourself). Your lexer should therefore not give up after the first error, but rather skip the erroneous token, emit an error message, and then continue lexing. Scallion takes care of this for you for the most part. However, there are certain inputs that you might explicitly want to map to `ErrorToken`, such as unclosed multi-line comments.
+* The Lexer does not immediately read and return all tokens, it returns an `Iterator`[`Token`] that will be used by future phases to read tokens on demand.
+Comments and whitespace should not produce tokens. (The most convenient way of doing this in Scallion is to first produce dedicated tokens and then filter them out later; See the related TODO in `Lexer.scala`.)
+* Returned tokens should be fresh instances of the the appropriate Token subclass. Value tokens (tokens that carry a value, such as identifiers), need to be constructed with the appropriate value.
+* Make sure to correctly implement the Amy lexing rules for literals and identifiers.
+
+## Example Output
+
+For reference, you can look at resources in the test folder to see example outputs.
+
+## Deliverables
+
+Deadline: **Deadline: **14.03.2025 23:59:59****.
+
+As for the previous lab, you should submit your work on the corresponding Moodle assignment. You should submit the following file:
+
+* `Lexer.scala`: your implementation of the lexer.
diff --git a/info/labs/lab02/lib/scallion-assembly-0.6.1.jar b/info/labs/lab02/lib/scallion-assembly-0.6.1.jar
new file mode 100644
index 0000000000000000000000000000000000000000..074c47c01983b0096ab078cac3fceafc97814db4
Binary files /dev/null and b/info/labs/lab02/lib/scallion-assembly-0.6.1.jar differ
diff --git a/info/labs/lab02/library/List.amy b/info/labs/lab02/library/List.amy
new file mode 100644
index 0000000000000000000000000000000000000000..60fc3bc110a4df310733fd87d42ee24e4f99ee98
--- /dev/null
+++ b/info/labs/lab02/library/List.amy
@@ -0,0 +1,144 @@
+object L 
+  abstract class List
+  case class Nil() extends List
+  case class Cons(h: Int(32), t: List) extends List
+ 
+  def isEmpty(l : List): Boolean = { l match {
+    case Nil() => true
+    case _ => false 
+  }}
+
+  def length(l: List): Int(32) = { l match {
+    case Nil() => 0
+    case Cons(_, t) => 1 + length(t)
+  }}
+
+  def head(l: List): Int(32) = {
+    l match {
+      case Cons(h, _) => h
+      case Nil() => error("head(Nil)")
+    }
+  }
+
+  def headOption(l: List): O.Option = {
+    l match {
+      case Cons(h, _) => O.Some(h)
+      case Nil() => O.None()
+    }
+  }
+
+  def reverse(l: List): List = {
+    reverseAcc(l, Nil())
+  }
+
+  def reverseAcc(l: List, acc: List): List = {
+    l match {
+      case Nil() => acc
+      case Cons(h, t) => reverseAcc(t, Cons(h, acc))
+    }
+  }
+
+  def indexOf(l: List, i: Int(32)): Int(32) = {
+    l match {
+      case Nil() => -1
+      case Cons(h, t) =>
+        if (h == i) { 0 }
+        else {
+          val rec: Int(32) = indexOf(t, i);
+          if (0 <= rec) { rec + 1 }
+          else { -1 }
+        }
+    }
+  }
+
+  def range(from: Int(32), to: Int(32)): List = {
+    if (to < from) { Nil() }
+    else {
+      Cons(from, range(from + 1, to))
+    }
+  }
+
+  def sum(l: List): Int(32) = { l match {
+    case Nil() => 0
+    case Cons(h, t) => h + sum(t)
+  }}
+
+  def concat(l1: List, l2: List): List = {
+    l1 match {
+      case Nil() => l2
+      case Cons(h, t) => Cons(h, concat(t, l2))
+    }
+  }
+
+  def contains(l: List, elem: Int(32)): Boolean = { l match {
+    case Nil() =>
+      false
+    case Cons(h, t) =>
+      h == elem || contains(t, elem)
+  }}
+
+  abstract class LPair
+  case class LP(l1: List, l2: List) extends LPair
+
+  def merge(l1: List, l2: List): List = {
+    l1 match {
+      case Nil() => l2
+      case Cons(h1, t1) =>
+        l2 match {
+          case Nil() => l1
+          case Cons(h2, t2) =>
+            if (h1 <= h2) {
+              Cons(h1, merge(t1, l2))
+            } else {
+              Cons(h2, merge(l1, t2))
+            }
+        }
+    }
+  }
+
+  def split(l: List): LPair = {
+    l match {
+      case Cons(h1, Cons(h2, t)) =>
+        val rec: LPair = split(t);
+        rec match {
+          case LP(rec1, rec2) =>
+            LP(Cons(h1, rec1), Cons(h2, rec2))
+        }
+      case _ =>
+        LP(l, Nil())
+    }
+  }
+  def mergeSort(l: List): List = {
+    l match {
+      case Nil() => l
+      case Cons(h, Nil()) => l
+      case xs =>
+        split(xs) match {
+          case LP(l1, l2) =>
+            merge(mergeSort(l1), mergeSort(l2))
+        }
+    }
+  }
+  
+  def toString(l: List): String = { l match {
+    case Nil() => "List()"
+    case more => "List(" ++ toString1(more) ++ ")"
+  }}
+
+  def toString1(l : List): String = { l match {
+    case Cons(h, Nil()) => Std.intToString(h)
+    case Cons(h, t) => Std.intToString(h) ++ ", " ++ toString1(t)
+  }}
+
+  def take(l: List, n: Int(32)): List = {
+    if (n <= 0) { Nil() }
+    else { 
+      l match {
+        case Nil() => Nil()
+        case Cons(h, t) =>
+          Cons(h, take(t, n-1))
+      }
+    }
+  }
+    
+end L
diff --git a/info/labs/lab02/library/Option.amy b/info/labs/lab02/library/Option.amy
new file mode 100644
index 0000000000000000000000000000000000000000..dabec722fbf00083b815768e62e6bbf9f7096b23
--- /dev/null
+++ b/info/labs/lab02/library/Option.amy
@@ -0,0 +1,40 @@
+object O 
+  abstract class Option
+  case class None() extends Option
+  case class Some(v: Int(32)) extends Option
+
+  def isdefined(o: Option): Boolean = {
+    o match {
+      case None() => false
+      case _ => true
+    }
+  }
+
+  def get(o: Option): Int(32) = {
+    o match {
+      case Some(i) => i
+      case None() => error("get(None)")
+    }
+  }
+
+  def getOrElse(o: Option, i: Int(32)): Int(32) = {
+    o match {
+      case None() => i
+      case Some(oo) => oo
+    }
+  }
+
+  def orElse(o1: Option, o2: Option): Option = {
+    o1 match {
+      case Some(_) => o1
+      case None() => o2
+    }
+  }
+
+  def toList(o: Option): L.List = {
+    o match {
+      case Some(i) => L.Cons(i, L.Nil())
+      case None() => L.Nil()
+    }
+  }
+end O
diff --git a/info/labs/lab02/library/Std.amy b/info/labs/lab02/library/Std.amy
new file mode 100644
index 0000000000000000000000000000000000000000..511bb6eb1f7584652516f2fc3daf84da5cc8d987
--- /dev/null
+++ b/info/labs/lab02/library/Std.amy
@@ -0,0 +1,40 @@
+/** This module contains basic functionality for Amy,
+  * including stub implementations for some built-in functions
+  * (implemented in WASM or JavaScript)
+  */
+object Std 
+  def printInt(i: Int(32)): Unit = {
+    error("") // Stub implementation
+  }
+  def printString(s: String): Unit = {
+    error("") // Stub implementation
+  }
+  def printBoolean(b: Boolean): Unit = {
+    printString(booleanToString(b))
+  }
+
+  def readString(): String = {
+    error("") // Stub implementation
+  }
+
+  def readInt(): Int(32) = {
+    error("") // Stub implementation
+  }
+
+  def intToString(i: Int(32)): String = {
+    if (i < 0) {
+      "-" ++ intToString(-i)
+    } else {
+      val rem: Int(32) = i % 10;
+      val div: Int(32) = i / 10;
+      if (div == 0) { digitToString(rem) }
+      else { intToString(div) ++ digitToString(rem) }
+    }
+  }
+  def digitToString(i: Int(32)): String = {
+    error("") // Stub implementation
+  }
+  def booleanToString(b: Boolean): String = {
+    if (b) { "true" } else { "false" }
+  }
+end Std
diff --git a/info/labs/lab02/project/build.properties b/info/labs/lab02/project/build.properties
new file mode 100644
index 0000000000000000000000000000000000000000..73df629ac1a71e9f7a1c2a1b576bfa037a6142bd
--- /dev/null
+++ b/info/labs/lab02/project/build.properties
@@ -0,0 +1 @@
+sbt.version=1.10.7
diff --git a/info/labs/lab02/project/plugins.sbt b/info/labs/lab02/project/plugins.sbt
new file mode 100644
index 0000000000000000000000000000000000000000..04934558068c370e38064654370e09e029476366
--- /dev/null
+++ b/info/labs/lab02/project/plugins.sbt
@@ -0,0 +1,3 @@
+addSbtPlugin("com.lightbend.sbt" % "sbt-proguard" % "0.3.0")
+
+addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "1.2.0")
\ No newline at end of file
diff --git a/info/labs/lab02/src/amyc/Main.scala b/info/labs/lab02/src/amyc/Main.scala
new file mode 100644
index 0000000000000000000000000000000000000000..01037cebc7d1891bd323a7b2796b5e08a3ff55e2
--- /dev/null
+++ b/info/labs/lab02/src/amyc/Main.scala
@@ -0,0 +1,51 @@
+package amyc
+
+import utils._
+
+import java.io.File
+
+import amyc.parsing._
+
+object Main {
+  private def parseArgs(args: Array[String]): Context = {
+    var ctx = Context(new Reporter, Nil)
+    args foreach {
+      case "--interpret"   => ctx = ctx.copy(interpret = true)
+      case "--help"        => ctx = ctx.copy(help = true)
+      case "--type-check"  => ctx = ctx.copy(typeCheck = true)
+      case "--printTokens" => ctx = ctx.copy(printTokens = true)
+      case file             => ctx = ctx.copy(files = ctx.files :+ file)
+    }
+    ctx
+  }
+
+  def main(args: Array[String]): Unit = {
+    val ctx = parseArgs(args)
+    val pipeline = AmyLexer.andThen(DisplayTokens)
+
+    if ctx.help then
+      println("Usage: amyc [ --interpret | --type-check | --printTokens ] file1.amy file2.amy ...")
+      sys.exit(0)
+    val files = ctx.files.map(new File(_))
+
+    try {
+      if (files.isEmpty) {
+        ctx.reporter.fatal("No input files")
+      }
+      files.find(!_.exists()).foreach { f =>
+        ctx.reporter.fatal(s"File not found: ${f.getName}")
+      }
+      if ctx.interpret || ctx.typeCheck then
+        ctx.reporter.fatal("Unsupported actions for now")
+      else if ctx.printTokens then
+        pipeline.run(ctx)(files)
+        ctx.reporter.terminateIfErrors()
+      else
+        ctx.reporter.fatal("No action specified")
+      ctx.reporter.terminateIfErrors()
+    } catch {
+      case AmycFatalError(_) =>
+        sys.exit(1)
+    }
+  }
+}
\ No newline at end of file
diff --git a/info/labs/lab02/src/amyc/parsing/Lexer.scala b/info/labs/lab02/src/amyc/parsing/Lexer.scala
new file mode 100644
index 0000000000000000000000000000000000000000..60865dd0fed431ef6804b5aa023e0bc49ce8b786
--- /dev/null
+++ b/info/labs/lab02/src/amyc/parsing/Lexer.scala
@@ -0,0 +1,138 @@
+package amyc
+package parsing
+
+import amyc.utils._
+import java.io.File
+
+import silex._
+
+import amyc.utils.Position
+
+// The lexer for Amy.
+object AmyLexer extends Pipeline[List[File], Iterator[Token]]
+                with Lexers {
+
+  /** Tiny Silex reference:
+    * ==============================
+    * Silex's lexer essentially allows you to define a list of regular expressions
+    * in their order of priority. To tokenize a given input stream of characters, each
+    * individual regular expression is applied in turn. If a given expression matches, it
+    * is used to produce a token of maximal length. Whenever a regular expression does not
+    * match, the expression of next-highest priority is tried.
+    * The result is a stream of tokens.
+    *
+    * Regular expressions `r` can be built using the following operators:
+    *   - `word("abc")`  matches the sequence "abc" exactly
+    *   - `r1 | r2`      matches either expression `r1` or expression `r2`
+    *   - `r1 ~ r2`      matches `r1` followed by `r2`
+    *   - `oneOf("xy")`  matches either "x" or "y"
+    *                    (i.e., it is a shorthand of `word` and `|` for single characters)
+    *   - `elem(c)`      matches character `c`
+    *   - `elem(f)`      matches any character for which the boolean predicate `f` holds 
+    *   - `opt(r)`       matches `r` or nothing at all
+    *   - `many(r)`      matches any number of repetitions of `r` (including none at all)
+    *   - `many1(r)`     matches any non-zero number of repetitions of `r`
+    *  
+    * To define the token that should be output for a given expression, one can use
+    * the `|>` combinator with an expression on the left-hand side and a function
+    * producing the token on the right. The function is given the sequence of matched
+    * characters and the source-position range as arguments.
+    * 
+    * For instance,
+    *
+    *   `elem(_.isDigit) ~ word("kg") |> {
+    *     (cs, range) => WeightLiteralToken(cs.mkString).setPos(range._1)) }`
+    *
+    * will match a single digit followed by the characters "kg" and turn them into a
+    * "WeightLiteralToken" whose value will be the full string matched (e.g. "1kg").
+    */
+
+
+  // Type of characters consumed.
+  type Character = Char
+
+  // Type of positions.
+  type Position = SourcePosition
+
+  // Type of tokens produced.
+  type Token = parsing.Token
+
+  import Tokens._
+
+  val lexer = Lexer(
+    // Keywords,
+    word("abstract") | word("case") | word("class") |
+    word("def") | word("else") | word("extends") |
+    word("if") | word("match") | word("object") |
+    word("val") | word("error") | word("_") | word("end")
+      |> { (cs, range) => KeywordToken(cs.mkString).setPos(range._1) },
+
+    // Primitive type names,
+    // TODO
+    
+
+    // Boolean literals,
+    // TODO
+
+    // Operators,
+    // NOTE: You can use `oneof("abc")` as a shortcut for `word("a") | word("b") | word("c")`
+    // TODO
+    
+    // Identifiers,
+    // TODO
+    
+    // Integer literal,
+    // NOTE: Make sure to handle invalid (e.g. overflowing) integer values safely by
+    //       emitting an ErrorToken instead.
+    // TODO
+    
+    // String literal,
+    // TODO
+    
+    // Delimiters,
+    // TODO
+
+
+    // Whitespace,
+    // TODO
+
+    // Single line comment,
+    word("//") ~ many(elem(_ != '\n'))
+      |> { cs => CommentToken(cs.mkString("")) },
+
+    // Multiline comments,
+    // NOTE: Amy does not support nested multi-line comments (e.g. `/* foo /* bar */ */`).
+    //       Make sure that unclosed multi-line comments result in an ErrorToken.
+    // TODO
+  ) onError {
+    // We also emit ErrorTokens for Silex-handled errors.
+    (cs, range) => ErrorToken(cs.mkString).setPos(range._1)
+  } onEnd {
+    // Once all the input has been consumed, we emit one EOFToken.
+    pos => EOFToken().setPos(pos)
+  }
+
+  override def run(ctx: amyc.utils.Context)(files: List[File]): Iterator[Token] = {
+    var it = Seq[Token]().iterator
+
+    for (file <- files) {
+      val source = Source.fromFile(file.toString, SourcePositioner(file))
+      it ++= lexer.spawn(source).filter {
+        token =>
+          // TODO: Remove all whitespace and comment tokens
+          ???
+      }.map {
+        case token@ErrorToken(error) => ctx.reporter.fatal("Unknown token at " + token.position + ": " + error)
+        case token => token
+      }
+    }
+    it
+  }
+}
+
+/** Extracts all tokens from input and displays them */
+object DisplayTokens extends Pipeline[Iterator[Token], Unit] {
+  override def run(ctx: Context)(tokens: Iterator[Token]): Unit = {
+    tokens.foreach(println(_))
+  }
+}
diff --git a/info/labs/lab02/src/amyc/parsing/Tokens.scala b/info/labs/lab02/src/amyc/parsing/Tokens.scala
new file mode 100644
index 0000000000000000000000000000000000000000..e002e1f71795aa2368726281e07e7373fcbfbe8b
--- /dev/null
+++ b/info/labs/lab02/src/amyc/parsing/Tokens.scala
@@ -0,0 +1,58 @@
+package amyc
+package parsing
+
+import amyc.utils.Positioned
+
+sealed trait Token extends Positioned with Product {
+  override def toString = {
+    productPrefix + productIterator.mkString("(", ",", ")") + "(" + position.withoutFile + ")"
+  }
+}
+
+object Tokens {
+  final case class KeywordToken(value: String) extends Token    // e.g. keyword "if"
+  final case class IdentifierToken(name: String) extends Token  // e.g. variable name "x" 
+  final case class PrimTypeToken(value: String) extends Token   // e.g. primitive type "Int"
+  final case class IntLitToken(value: Int) extends Token        // e.g. integer literal "123"
+  final case class StringLitToken(value: String) extends Token
+  final case class BoolLitToken(value: Boolean) extends Token
+  final case class DelimiterToken(value: String) extends Token  // .,:;(){}[]= and =>
+  final case class OperatorToken(name: String) extends Token    // e.g. "+"
+  final case class CommentToken(text: String) extends Token     // e.g. "// this is a comment"
+  final case class SpaceToken() extends Token                   // e.g. "\n  "
+  final case class ErrorToken(content: String) extends Token
+  final case class EOFToken() extends Token                     // special token at the end of file
+}
+
+sealed abstract class TokenKind(representation: String) {
+  override def toString: String = representation
+}
+
+object TokenKinds {
+  final case class KeywordKind(value: String) extends TokenKind(value)
+  case object IdentifierKind extends TokenKind("<Identifier>")
+  case object PrimTypeKind extends TokenKind("<Primitive Type>")
+  case object LiteralKind extends TokenKind("<Literal>")
+  final case class DelimiterKind(value: String) extends TokenKind(value)
+  final case class OperatorKind(value: String) extends TokenKind(value)
+  case object EOFKind extends TokenKind("<EOF>")
+  case object NoKind extends TokenKind("<???>")
+}
+
+object TokenKind {
+  import Tokens._
+  import TokenKinds._
+
+  def of(token: Token): TokenKind = token match {
+    case KeywordToken(value) => KeywordKind(value)
+    case IdentifierToken(_) => IdentifierKind
+    case PrimTypeToken(_) => PrimTypeKind
+    case BoolLitToken(_) => LiteralKind
+    case IntLitToken(_) => LiteralKind
+    case StringLitToken(_) => LiteralKind
+    case DelimiterToken(value) => DelimiterKind(value)
+    case OperatorToken(value) => OperatorKind(value)
+    case EOFToken() => EOFKind
+    case _ => NoKind
+  }
+}
\ No newline at end of file
diff --git a/info/labs/lab02/src/amyc/utils/AmycFatalError.scala b/info/labs/lab02/src/amyc/utils/AmycFatalError.scala
new file mode 100644
index 0000000000000000000000000000000000000000..36f2839f7daaffec9de0f7811a41a59b5d3ee4f3
--- /dev/null
+++ b/info/labs/lab02/src/amyc/utils/AmycFatalError.scala
@@ -0,0 +1,3 @@
+package amyc.utils
+
+case class AmycFatalError(msg: String) extends Exception(msg)
diff --git a/info/labs/lab02/src/amyc/utils/Context.scala b/info/labs/lab02/src/amyc/utils/Context.scala
new file mode 100644
index 0000000000000000000000000000000000000000..6781a090d51907556f640107ae657200233f10c5
--- /dev/null
+++ b/info/labs/lab02/src/amyc/utils/Context.scala
@@ -0,0 +1,13 @@
+package amyc.utils
+
+// Contains a reporter and configuration for the compiler
+case class Context(
+  reporter: Reporter,
+  files: List[String],
+  printTokens: Boolean = false,
+  printTrees: Boolean = false,
+  printNames: Boolean = false,
+  interpret: Boolean = false,
+  typeCheck: Boolean = false,
+  help: Boolean = false
+)
diff --git a/info/labs/lab02/src/amyc/utils/Document.scala b/info/labs/lab02/src/amyc/utils/Document.scala
new file mode 100644
index 0000000000000000000000000000000000000000..93aed6eb941b866f36a018a54443c5f2984babb6
--- /dev/null
+++ b/info/labs/lab02/src/amyc/utils/Document.scala
@@ -0,0 +1,49 @@
+package amyc.utils
+
+// A structured document to be printed with nice indentation
+abstract class Document {
+
+  def <:>(other: Document) = Lined(List(this, other))
+
+  def print: String = {
+    val sb = new StringBuffer()
+
+    def rec(d: Document)(implicit ind: Int, first: Boolean): Unit = d match {
+      case Raw(s) =>
+        if (first && s.nonEmpty) sb.append(("  " * ind))
+        sb.append(s)
+      case Indented(doc) =>
+        rec(doc)(ind + 1, first)
+      case Unindented(doc) =>
+        assume(ind > 0)
+        rec(doc)(ind - 1, first)
+      case Lined(Nil, _) => // skip
+      case Lined(docs, sep) =>
+        rec(docs.head)
+        docs.tail foreach { doc =>
+          rec(sep)(ind, false)
+          rec(doc)(ind, false)
+        }
+      case Stacked(Nil, _) => // skip
+      case Stacked(docs, emptyLines) =>
+        rec(docs.head)
+        docs.tail foreach { doc =>
+          sb.append("\n")
+          if (emptyLines) sb.append("\n")
+          rec(doc)(ind, true)
+        }
+    }
+
+    rec(this)(0, true)
+    sb.toString
+  }
+}
+case class Indented(content: Document) extends Document
+case class Unindented(content: Document) extends Document
+case class Stacked(docs: List[Document], emptyLines: Boolean = false) extends Document
+case class Lined(docs: List[Document], separator: Document = Raw("")) extends Document
+case class Raw(s: String) extends Document
+
+object Stacked {
+  def apply(docs: Document*): Stacked = Stacked(docs.toList)
+}
\ No newline at end of file
diff --git a/info/labs/lab02/src/amyc/utils/Env.scala b/info/labs/lab02/src/amyc/utils/Env.scala
new file mode 100644
index 0000000000000000000000000000000000000000..b8040eea32106540b6cdbee6dcdcfaf947c38efb
--- /dev/null
+++ b/info/labs/lab02/src/amyc/utils/Env.scala
@@ -0,0 +1,19 @@
+package amyc.utils
+
+object Env {
+  trait OS
+  object Linux extends OS
+  object Windows extends OS
+  object Mac extends OS
+
+  lazy val os = {
+    // If all fails returns Linux
+    val optOsName = Option(System.getProperty("os.name"))
+    optOsName.map(_.toLowerCase()).map { osName =>
+      if (osName.contains("linux")) then Linux
+      else if (osName.contains("win")) then Windows
+      else if (osName.contains("mac")) then Mac
+      else Linux
+    } getOrElse Linux
+  }
+}
diff --git a/info/labs/lab02/src/amyc/utils/Pipeline.scala b/info/labs/lab02/src/amyc/utils/Pipeline.scala
new file mode 100644
index 0000000000000000000000000000000000000000..eb85f877c2a8ab1aec1e4b3915660c1d5b0c1b87
--- /dev/null
+++ b/info/labs/lab02/src/amyc/utils/Pipeline.scala
@@ -0,0 +1,21 @@
+package amyc.utils
+
+// A sequence of operations to be run by the compiler,
+// with interruption at every stage if there is an error
+abstract class Pipeline[-F, +T] {
+  self =>
+
+  def andThen[G](thenn: Pipeline[T, G]): Pipeline[F, G] = new Pipeline[F,G] {
+    def run(ctx : Context)(v : F) : G = {
+      val first = self.run(ctx)(v)
+      ctx.reporter.terminateIfErrors()
+      thenn.run(ctx)(first)
+    }
+  }
+
+  def run(ctx: Context)(v: F): T
+}
+
+case class Noop[T]() extends Pipeline[T, T] {
+  def run(ctx: Context)(v: T) = v
+}
diff --git a/info/labs/lab02/src/amyc/utils/Position.scala b/info/labs/lab02/src/amyc/utils/Position.scala
new file mode 100644
index 0000000000000000000000000000000000000000..a938eecfc113966d8d25d8cffd56693f4ee360bd
--- /dev/null
+++ b/info/labs/lab02/src/amyc/utils/Position.scala
@@ -0,0 +1,81 @@
+package amyc.utils
+
+import java.io.File
+
+import silex._
+
+object Position {
+  /** Number of bits used to encode the line number */
+  private final val LINE_BITS   = 20
+  /** Number of bits used to encode the column number */
+  private final val COLUMN_BITS = 31 - LINE_BITS // no negatives => 31
+  /** Mask to decode the line number */
+  private final val LINE_MASK   = (1 << LINE_BITS) - 1
+  /** Mask to decode the column number */
+  private final val COLUMN_MASK = (1 << COLUMN_BITS) - 1
+
+  private def lineOf(pos: Int): Int = (pos >> COLUMN_BITS) & LINE_MASK
+  private def columnOf(pos: Int): Int = pos & COLUMN_MASK
+
+  def fromFile(f: File, i: Int) = {
+    SourcePosition(f, lineOf(i), columnOf(i))
+  }
+}
+
+abstract class Position {
+  val file: File
+  val line: Int
+  val col: Int
+
+  def isDefined: Boolean
+  def withoutFile: String
+}
+
+case class SourcePosition(file: File, line: Int, col: Int) extends Position {
+  override def toString: String = s"${file.getPath}:$line:$col"
+  def withoutFile = s"$line:$col"
+  val isDefined = true
+}
+
+case object NoPosition extends Position {
+  val file = null
+  val line = 0
+  val col = 0
+
+  override def toString: String = "?:?"
+  def withoutFile = toString
+  val isDefined = false
+}
+
+// A trait for entities which have a position in a file
+trait Positioned {
+
+  protected var pos_ : Position = NoPosition
+
+  def hasPosition = pos_ != NoPosition
+
+  def position = pos_
+
+  def setPos(pos: Position): this.type = {
+    pos_ = pos
+    this
+  }
+
+  def setPos(other: Positioned): this.type = {
+    setPos(other.position)
+  }
+
+}
+
+case class SourcePositioner(file: File) extends Positioner[Char, SourcePosition] {
+  override val start: SourcePosition = SourcePosition(file, 1, 1)
+
+  override def increment(position: SourcePosition, character: Char): SourcePosition =
+    if (character == '\n') {
+      position.copy(line = position.line + 1, col = 1)
+    }
+    else {
+      position.copy(col = position.col + 1)
+    }
+}
+
diff --git a/info/labs/lab02/src/amyc/utils/Reporter.scala b/info/labs/lab02/src/amyc/utils/Reporter.scala
new file mode 100644
index 0000000000000000000000000000000000000000..48ed7d28bbd44899b02cc6dc636723d121f87d67
--- /dev/null
+++ b/info/labs/lab02/src/amyc/utils/Reporter.scala
@@ -0,0 +1,87 @@
+package amyc.utils
+
+import java.io.File
+import scala.io.Source
+
+// Reports errors and warnings during compilation
+class Reporter {
+
+  /** Issues some information from the compiler */
+  def info(msg: Any, pos: Position = NoPosition): Unit = {
+    report("[ Info  ]", msg, pos)
+  }
+
+  /** Issues a warning from the compiler */
+  def warning(msg: Any, pos: Position = NoPosition): Unit = {
+    report("[Warning]", msg, pos)
+  }
+
+  private var hasErrors = false
+
+  /** Issues a recoverable error message */
+  def error(msg: Any, pos: Position = NoPosition): Unit = {
+    hasErrors = true
+    report("[ Error ]", msg, pos)
+  }
+
+  /** Used for an unrecoverable error: Issues a message, then exits the compiler */
+  def fatal(msg: Any, pos: Position = NoPosition): Nothing = {
+    report("[ Fatal ]", msg, pos)
+    // Despite printing the message, we store it in the error for testing
+    val errMsg = s"$pos: $msg"
+    throw AmycFatalError(errMsg)
+  }
+
+  // Versions for Positioned
+  def info(msg: Any, pos: Positioned): Unit = info(msg, pos.position)
+  def warning(msg: Any, pos: Positioned): Unit = warning(msg, pos.position)
+  def error(msg: Any, pos: Positioned): Unit = error(msg, pos.position)
+  def fatal(msg: Any, pos: Positioned): Nothing = fatal(msg, pos.position)
+
+
+  /** Terminates the compiler if any errors have been detected. */
+  def terminateIfErrors() = {
+    if (hasErrors) {
+      fatal("There were errors.")
+    }
+  }
+
+  private def err(msg: String): Unit = {
+    Console.err.println(msg)
+  }
+
+  private def report(prefix: String, msg: Any, pos: Position): Unit = {
+    if (pos.isDefined) {
+      err(s"$prefix $pos: $msg")
+
+      val lines = getLines(pos.file)
+
+      if (pos.line > 0 && pos.line-1 < lines.size) {
+        err(s"$prefix ${lines(pos.line-1)}")
+        err(prefix + " " + " "*(pos.col - 1)+"^")
+      } else {
+        err(s"$prefix <line unavailable in source file>")
+      }
+    } else {
+      err(s"$prefix $msg")
+    }
+  }
+
+  private var filesToLines = Map[File, IndexedSeq[String]]()
+
+  private def getLines(f: File): IndexedSeq[String] = {
+    filesToLines.get(f) match {
+      case Some(lines) =>
+        lines
+
+      case None =>
+        val source = Source.fromFile(f).withPositioning(true)
+        val lines = source.getLines().toIndexedSeq
+        source.close()
+
+        filesToLines += f -> lines
+
+        lines
+    }
+  }
+}
diff --git a/info/labs/lab02/src/amyc/utils/UniqueCounter.scala b/info/labs/lab02/src/amyc/utils/UniqueCounter.scala
new file mode 100644
index 0000000000000000000000000000000000000000..a3a9cc666b9e20d2aca9d6c942a9ece3d75fb8e7
--- /dev/null
+++ b/info/labs/lab02/src/amyc/utils/UniqueCounter.scala
@@ -0,0 +1,14 @@
+package amyc.utils
+
+import scala.collection.mutable
+
+// Generates unique counters for each element of a type K
+class UniqueCounter[K] {
+  private val elemIds = mutable.Map[K, Int]().withDefaultValue(-1)
+
+  def next(key: K): Int = synchronized {
+    elemIds(key) += 1
+    elemIds(key)
+  }
+
+}
diff --git a/info/labs/lab02/test/resources/lexer/failing/Invalid.grading.amy b/info/labs/lab02/test/resources/lexer/failing/Invalid.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..d7d516487e6811c8d6468e5f106ed26514eee95c
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/failing/Invalid.grading.amy
@@ -0,0 +1 @@
+^
diff --git a/info/labs/lab02/test/resources/lexer/failing/SingleAmp.grading.amy b/info/labs/lab02/test/resources/lexer/failing/SingleAmp.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..4bd7e7a4b714afc3586d662aabe18c81067db135
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/failing/SingleAmp.grading.amy
@@ -0,0 +1 @@
+&
diff --git a/info/labs/lab02/test/resources/lexer/failing/SingleBar.grading.amy b/info/labs/lab02/test/resources/lexer/failing/SingleBar.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..948cf947f86682cc5c1a28187aa1fc360525a7e9
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/failing/SingleBar.grading.amy
@@ -0,0 +1 @@
+|
diff --git a/info/labs/lab02/test/resources/lexer/failing/TooBigInt.grading.amy b/info/labs/lab02/test/resources/lexer/failing/TooBigInt.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..18ac27b89c4d5a7fee665889c4071bf186fd941a
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/failing/TooBigInt.grading.amy
@@ -0,0 +1 @@
+999999999999999999999999999
diff --git a/info/labs/lab02/test/resources/lexer/failing/UnclosedComment.grading.amy b/info/labs/lab02/test/resources/lexer/failing/UnclosedComment.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..aa0e17243104a39e24a8a82013af1e8b8f227d61
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/failing/UnclosedComment.grading.amy
@@ -0,0 +1 @@
+/* * /
diff --git a/info/labs/lab02/test/resources/lexer/failing/UnclosedComment2.grading.amy b/info/labs/lab02/test/resources/lexer/failing/UnclosedComment2.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..63ea916ef5f30255aef0c0b0b63f37b0e1d99fb2
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/failing/UnclosedComment2.grading.amy
@@ -0,0 +1 @@
+/*/
diff --git a/info/labs/lab02/test/resources/lexer/failing/UnclosedComment3.grading.amy b/info/labs/lab02/test/resources/lexer/failing/UnclosedComment3.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..f1206f0677cc3d61e393121668c78ab50d827856
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/failing/UnclosedComment3.grading.amy
@@ -0,0 +1 @@
+/***
\ No newline at end of file
diff --git a/info/labs/lab02/test/resources/lexer/failing/UnclosedString1.grading.amy b/info/labs/lab02/test/resources/lexer/failing/UnclosedString1.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..1cc80efa6fc7641f9cc21ee1b177fbba1a89e2c3
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/failing/UnclosedString1.grading.amy
@@ -0,0 +1 @@
+"
diff --git a/info/labs/lab02/test/resources/lexer/failing/UnclosedString2.grading.amy b/info/labs/lab02/test/resources/lexer/failing/UnclosedString2.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..cb276c1017e5e7221c716575d2f9817890e3ee7f
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/failing/UnclosedString2.grading.amy
@@ -0,0 +1,2 @@
+"
+"
diff --git a/info/labs/lab02/test/resources/lexer/outputs/Combinations.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/Combinations.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4864eff8574166b43fd4990d605463a001bbfee9
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/Combinations.grading.txt
@@ -0,0 +1,19 @@
+KeywordToken(object)(1:1)
+OperatorToken(<=)(1:7)
+IdentifierToken(id_1)(1:9)
+OperatorToken(++)(1:13)
+OperatorToken(+)(1:15)
+KeywordToken(_)(1:16)
+IntLitToken(1)(1:17)
+DelimiterToken({)(1:18)
+IdentifierToken(id)(1:19)
+DelimiterToken(})(1:21)
+DelimiterToken(()(1:22)
+DelimiterToken())(1:23)
+KeywordToken(class)(1:24)
+OperatorToken(<=)(1:29)
+DelimiterToken(=>)(1:31)
+OperatorToken(==)(1:33)
+OperatorToken(<)(1:35)
+OperatorToken(%)(1:36)
+EOFToken()(2:1)
\ No newline at end of file
diff --git a/info/labs/lab02/test/resources/lexer/outputs/CommentClosedTwice.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/CommentClosedTwice.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..45edd1f23caa974b25f05d0134eeeb7cbb8997f4
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/CommentClosedTwice.grading.txt
@@ -0,0 +1,3 @@
+OperatorToken(*)(4:3)
+OperatorToken(/)(4:4)
+EOFToken()(4:5)
diff --git a/info/labs/lab02/test/resources/lexer/outputs/Comments.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/Comments.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8e31001be1927d630f8d9aef9ac05012930637a2
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/Comments.grading.txt
@@ -0,0 +1,8 @@
+IntLitToken(1)(1:1)
+IntLitToken(2)(3:1)
+IntLitToken(3)(4:1)
+IntLitToken(4)(5:1)
+IntLitToken(5)(10:1)
+IntLitToken(6)(11:12)
+IntLitToken(7)(12:1)
+EOFToken()(14:1)
\ No newline at end of file
diff --git a/info/labs/lab02/test/resources/lexer/outputs/Delimiters.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/Delimiters.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0282fae7374357f7494b08822b350f1e92d8ddcd
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/Delimiters.grading.txt
@@ -0,0 +1,11 @@
+DelimiterToken({)(1:1)
+DelimiterToken(})(1:3)
+DelimiterToken(()(1:5)
+DelimiterToken())(1:7)
+DelimiterToken(,)(1:9)
+DelimiterToken(:)(1:11)
+DelimiterToken(.)(1:13)
+DelimiterToken(=)(1:15)
+DelimiterToken(=>)(1:17)
+KeywordToken(_)(1:20)
+EOFToken()(2:1)
\ No newline at end of file
diff --git a/info/labs/lab02/test/resources/lexer/outputs/Identifiers.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/Identifiers.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a14fd9f41e5277fff6aebed89469b95932e32991
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/Identifiers.grading.txt
@@ -0,0 +1,21 @@
+IdentifierToken(app)(1:1)
+IdentifierToken(boolean)(1:5)
+IdentifierToken(caSe)(2:1)
+IdentifierToken(Class)(2:6)
+IdentifierToken(df)(2:12)
+IdentifierToken(elze)(2:15)
+IdentifierToken(Error)(2:20)
+IdentifierToken(Extends)(2:26)
+IdentifierToken(False)(2:34)
+IdentifierToken(iff)(2:40)
+IdentifierToken(int)(2:44)
+IdentifierToken(module)(2:48)
+IdentifierToken(string)(2:55)
+IdentifierToken(True)(3:1)
+IdentifierToken(unit)(3:6)
+IdentifierToken(vals)(3:11)
+IdentifierToken(this_is_id)(5:1)
+IdentifierToken(this_IS_id2)(5:12)
+IdentifierToken(st1ll1s_1d)(5:24)
+IdentifierToken(St1ll1s_1d)(7:1)
+EOFToken()(8:1)
diff --git a/info/labs/lab02/test/resources/lexer/outputs/IntLiterals.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/IntLiterals.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..289b7358b473f12827ce309abc4a2558867adfcc
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/IntLiterals.grading.txt
@@ -0,0 +1,6 @@
+IntLitToken(123)(1:1)
+IntLitToken(12345)(1:5)
+IntLitToken(6789)(2:1)
+OperatorToken(+)(2:5)
+IntLitToken(12345)(2:6)
+EOFToken()(3:1)
diff --git a/info/labs/lab02/test/resources/lexer/outputs/Keywords.txt b/info/labs/lab02/test/resources/lexer/outputs/Keywords.txt
new file mode 100644
index 0000000000000000000000000000000000000000..521b69bfd954641d3686c8259570d0082ce43f6e
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/Keywords.txt
@@ -0,0 +1,19 @@
+KeywordToken(abstract)(1:1)
+PrimTypeToken(Boolean)(1:10)
+KeywordToken(case)(2:1)
+KeywordToken(class)(2:6)
+KeywordToken(def)(2:12)
+KeywordToken(else)(2:16)
+KeywordToken(error)(2:21)
+KeywordToken(extends)(2:27)
+BoolLitToken(false)(2:35)
+KeywordToken(if)(2:41)
+PrimTypeToken(Int)(2:44)
+KeywordToken(match)(2:48)
+KeywordToken(object)(2:54)
+PrimTypeToken(String)(2:61)
+BoolLitToken(true)(3:1)
+PrimTypeToken(Unit)(3:6)
+KeywordToken(val)(3:11)
+KeywordToken(end)(3:15)
+EOFToken()(4:1)
diff --git a/info/labs/lab02/test/resources/lexer/outputs/Operators.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/Operators.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e901ec948a2e92a69c75aa399a739a1644e1353f
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/Operators.grading.txt
@@ -0,0 +1,14 @@
+DelimiterToken(;)(1:1)
+OperatorToken(+)(1:3)
+OperatorToken(-)(1:5)
+OperatorToken(*)(1:7)
+OperatorToken(/)(1:9)
+OperatorToken(%)(1:11)
+OperatorToken(<)(1:13)
+OperatorToken(<=)(1:15)
+OperatorToken(&&)(1:18)
+OperatorToken(||)(1:21)
+OperatorToken(==)(1:24)
+OperatorToken(++)(1:27)
+OperatorToken(!)(1:30)
+EOFToken()(2:1)
diff --git a/info/labs/lab02/test/resources/lexer/outputs/StringLiterals.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/StringLiterals.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..12abcd1fee64aee67356b96298403500cdb3d49a
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/StringLiterals.grading.txt
@@ -0,0 +1,5 @@
+StringLitToken(This is a string)(1:1)
+StringLitToken(Another with ^^ | # invalid chars)(2:1)
+StringLitToken(No escape \n characters \t)(3:1)
+StringLitToken( \\ No comments /* either )(4:1)
+EOFToken()(5:1)
diff --git a/info/labs/lab02/test/resources/lexer/outputs/TwoFiles.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/TwoFiles.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..60b6c51dd363a885eca9434f60d375f458d0d856
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/TwoFiles.grading.txt
@@ -0,0 +1,34 @@
+KeywordToken(abstract)(1:1)
+PrimTypeToken(Boolean)(1:10)
+KeywordToken(case)(2:1)
+KeywordToken(class)(2:6)
+KeywordToken(def)(2:12)
+KeywordToken(else)(2:16)
+KeywordToken(error)(2:21)
+KeywordToken(extends)(2:27)
+BoolLitToken(false)(2:35)
+KeywordToken(if)(2:41)
+PrimTypeToken(Int)(2:44)
+KeywordToken(match)(2:48)
+KeywordToken(object)(2:54)
+PrimTypeToken(String)(2:61)
+BoolLitToken(true)(3:1)
+PrimTypeToken(Unit)(3:6)
+KeywordToken(val)(3:11)
+KeywordToken(end)(3:15)
+EOFToken()(4:1)
+
+DelimiterToken(;)(1:1)
+OperatorToken(+)(1:3)
+OperatorToken(-)(1:5)
+OperatorToken(*)(1:7)
+OperatorToken(/)(1:9)
+OperatorToken(%)(1:11)
+OperatorToken(<)(1:13)
+OperatorToken(<=)(1:15)
+OperatorToken(&&)(1:18)
+OperatorToken(||)(1:21)
+OperatorToken(==)(1:24)
+OperatorToken(++)(1:27)
+OperatorToken(!)(1:30)
+EOFToken()(2:1)
diff --git a/info/labs/lab02/test/resources/lexer/outputs/Whitespace.grading.txt b/info/labs/lab02/test/resources/lexer/outputs/Whitespace.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..95609c96158df2795b3c03de0fd2ebbf320acad2
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/outputs/Whitespace.grading.txt
@@ -0,0 +1,3 @@
+IntLitToken(1)(1:2)
+IntLitToken(2)(2:5)
+EOFToken()(3:1)
diff --git a/info/labs/lab02/test/resources/lexer/passing/Combinations.grading.amy b/info/labs/lab02/test/resources/lexer/passing/Combinations.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..11625fe536bdc1cbd33ad154928cbde42941dd77
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/Combinations.grading.amy
@@ -0,0 +1 @@
+object<=id_1+++_1{id}()class<==>==<%
diff --git a/info/labs/lab02/test/resources/lexer/passing/CommentClosedTwice.grading.amy b/info/labs/lab02/test/resources/lexer/passing/CommentClosedTwice.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..1ac7d3965a7ffcd7526299aeb0132a4aecdcff0e
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/CommentClosedTwice.grading.amy
@@ -0,0 +1,4 @@
+/* This comment is closed twice,
+which should be an error:
+
+*/*/
\ No newline at end of file
diff --git a/info/labs/lab02/test/resources/lexer/passing/Comments.grading.amy b/info/labs/lab02/test/resources/lexer/passing/Comments.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..a9d2c9f61e5605d2887146a9d8f8ef2084f1e7cf
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/Comments.grading.amy
@@ -0,0 +1,13 @@
+1
+// This is a comment
+2 /* This is also a comment */
+3 /* Still * /* comment */
+4 /* Multiline
+/*
+*
+Comment
+*/
+5 /***/
+/* abc **/ 6 /* def */
+7
+//
diff --git a/info/labs/lab02/test/resources/lexer/passing/Delimiters.grading.amy b/info/labs/lab02/test/resources/lexer/passing/Delimiters.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..57ce3b29997aea1d250af1a19357669a1f51a53d
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/Delimiters.grading.amy
@@ -0,0 +1 @@
+{ } ( ) , : . = => _
diff --git a/info/labs/lab02/test/resources/lexer/passing/Identifiers.grading.amy b/info/labs/lab02/test/resources/lexer/passing/Identifiers.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..d14272d79e91d9336c36ed0842d34d3db2ecd577
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/Identifiers.grading.amy
@@ -0,0 +1,7 @@
+app boolean
+caSe Class df elze Error Extends False iff int module string
+True unit vals
+
+this_is_id this_IS_id2 st1ll1s_1d
+
+St1ll1s_1d
diff --git a/info/labs/lab02/test/resources/lexer/passing/IntLiterals.grading.amy b/info/labs/lab02/test/resources/lexer/passing/IntLiterals.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..6f20e8887546b8e265bb3d41b3df3c1414dc0971
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/IntLiterals.grading.amy
@@ -0,0 +1,2 @@
+123 012345
+6789+12345
diff --git a/info/labs/lab02/test/resources/lexer/passing/Keywords.amy b/info/labs/lab02/test/resources/lexer/passing/Keywords.amy
new file mode 100644
index 0000000000000000000000000000000000000000..06c4ef0bc5e7f8b639a4ae62e0bc5e73c60d0558
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/Keywords.amy
@@ -0,0 +1,3 @@
+abstract Boolean
+case class def else error extends false if Int match object String
+true Unit val end
diff --git a/info/labs/lab02/test/resources/lexer/passing/Operators.grading.amy b/info/labs/lab02/test/resources/lexer/passing/Operators.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bd293d768a9e6b997ee76b96b7c90958b134f4ae
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/Operators.grading.amy
@@ -0,0 +1 @@
+; + - * / % < <= && || == ++ !
diff --git a/info/labs/lab02/test/resources/lexer/passing/StringLiterals.grading.amy b/info/labs/lab02/test/resources/lexer/passing/StringLiterals.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..1309e0479078156624422a623efb9537a7b8861f
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/StringLiterals.grading.amy
@@ -0,0 +1,4 @@
+"This is a string"
+"Another with ^^ | # invalid chars"
+"No escape \n characters \t"
+" \\ No comments /* either "
diff --git a/info/labs/lab02/test/resources/lexer/passing/Whitespace.grading.amy b/info/labs/lab02/test/resources/lexer/passing/Whitespace.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..ff92277b28103502905b09b145b8ed31634c5de1
--- /dev/null
+++ b/info/labs/lab02/test/resources/lexer/passing/Whitespace.grading.amy
@@ -0,0 +1,2 @@
+	1 // Tab indented
+    2 // Space indented
diff --git a/info/labs/lab02/test/scala/amyc/test/CompilerTest.scala b/info/labs/lab02/test/scala/amyc/test/CompilerTest.scala
new file mode 100644
index 0000000000000000000000000000000000000000..1024cfb7ef93b16acb940b9c150b461b19c537ab
--- /dev/null
+++ b/info/labs/lab02/test/scala/amyc/test/CompilerTest.scala
@@ -0,0 +1,92 @@
+package amyc.test
+
+import amyc.utils._
+import java.io.File
+
+import org.junit.Assert.fail
+
+abstract class CompilerTest extends TestUtils {
+  private def runPipeline(pipeline: Pipeline[List[File], Unit], fileNames: List[String]) = {
+    val ctx = Context(new Reporter, fileNames)
+    val files = ctx.files.map(new File(_))
+    pipeline.run(ctx)(files)
+    ctx.reporter.terminateIfErrors()
+  }
+
+  private def runPipelineRedirected(
+    pipeline: Pipeline[List[File], Unit],
+    compiledFiles: List[String],
+    input: String
+  ): String = {
+    testWithRedirectedIO(runPipeline(pipeline, compiledFiles), input)
+  }
+
+  private def assertEqual(output: String, expected: String) = {
+    val rejectLine = (s: String) =>
+      s.isEmpty ||
+        s.startsWith("[ Info  ]") ||
+        s.startsWith("[Warning]") ||
+        s.startsWith("[ Error ]") ||
+        s.startsWith("[ Fatal ]")
+    def filtered(s: String) = s.linesIterator.filterNot(rejectLine).mkString("\n")
+    val filteredOutput = filtered(output)
+    val filteredExpected = filtered(expected)
+    if (filteredOutput != filteredExpected) {
+      val sb = new StringBuffer()
+      sb.append("\nOutput is different:\n")
+      sb.append("\nOutput: \n")
+      sb.append(filteredOutput)
+      sb.append("\n\nExpected output: \n")
+      sb.append(filteredExpected)
+      sb.append("\n")
+      fail(sb.toString)
+    }
+  }
+
+  protected def compareOutputs(
+    pipeline: Pipeline[List[File], Unit],
+    compiledFiles: List[String],
+    expectedFile: String,
+    input: String = ""
+  ) = {
+    try {
+      val output = runPipelineRedirected(pipeline, compiledFiles, input)
+      val expected = scala.io.Source.fromFile(new File(expectedFile)).mkString
+      assertEqual(output, expected)
+    } catch {
+      // We only want to catch AmyFatalError gracefully, the rest can propagate
+      case AmycFatalError(msg) =>
+        fail(s"\n  $msg\n")
+    }
+  }
+
+  protected def demandPass(
+    pipeline: Pipeline[List[File], Unit],
+    compiledFiles: List[String],
+    input: String = ""
+  ) = {
+    try {
+      runPipelineRedirected(pipeline, compiledFiles, input)
+    } catch {
+      case AmycFatalError(msg) =>
+        fail(s"\n  $msg\n")
+    }
+  }
+
+  protected def demandFailure(
+    pipeline: Pipeline[List[File], Unit],
+    compiledFiles: List[String],
+    input: String = ""
+  ) = {
+    try {
+      runPipelineRedirected(pipeline, compiledFiles, input)
+      fail("Test should fail but it passed!")
+    } catch {
+      case AmycFatalError(_) =>
+      // Ok, this is what we wanted. Other exceptions should propagate though
+    }
+
+  }
+
+
+}
diff --git a/info/labs/lab02/test/scala/amyc/test/LexerTests.scala b/info/labs/lab02/test/scala/amyc/test/LexerTests.scala
new file mode 100644
index 0000000000000000000000000000000000000000..b4fbceff44045d3807180d286f2d7d8aea0d4178
--- /dev/null
+++ b/info/labs/lab02/test/scala/amyc/test/LexerTests.scala
@@ -0,0 +1,53 @@
+package amyc.test
+
+import amyc.parsing._
+import org.junit.Test
+
+class LexerTests extends TestSuite {
+  val pipeline = AmyLexer.andThen(DisplayTokens)
+
+  val baseDir = "lexer"
+
+  val outputExt = "txt"
+
+  @Test def testKeywords = shouldOutput("Keywords")
+
+  @Test def testIdentifiers = shouldOutput("Identifiers")
+
+  @Test def testOperators = shouldOutput("Operators")
+
+  @Test def testDelimiters = shouldOutput("Delimiters")
+
+  @Test def testCombinations = shouldOutput("Combinations")
+
+  @Test def testComments = shouldOutput("Comments")
+
+  @Test def testIntLiterals = shouldOutput("IntLiterals")
+
+  @Test def testStringLiterals = shouldOutput("StringLiterals")
+
+  @Test def testTwoFiles = shouldOutput(List("Keywords", "Operators"), "TwoFiles")
+
+  @Test def testSingleAmp = shouldFail("SingleAmp")
+
+  @Test def testSingleBar = shouldFail("SingleBar")
+
+  @Test def testUnclosedComment = shouldFail("UnclosedComment")
+
+  @Test def testUnclosedComment2 = shouldFail("UnclosedComment2")
+
+  @Test def testUnclosedComment3 = shouldFail("UnclosedComment3")
+
+  @Test def testCommentClosedTwice = shouldOutput("CommentClosedTwice")
+
+  @Test def testUnclosedString1 = shouldFail("UnclosedString1")
+
+  @Test def testUnclosedString2 = shouldFail("UnclosedString2")
+
+  @Test def testInvalid = shouldFail("Invalid")
+
+  @Test def testTooBigInt = shouldFail("TooBigInt")
+
+  @Test def testWhitespace = shouldOutput("Whitespace")
+
+}
diff --git a/info/labs/lab02/test/scala/amyc/test/TestSuite.scala b/info/labs/lab02/test/scala/amyc/test/TestSuite.scala
new file mode 100644
index 0000000000000000000000000000000000000000..3ce9ebf63be767da33d155a3b191db745a8b0109
--- /dev/null
+++ b/info/labs/lab02/test/scala/amyc/test/TestSuite.scala
@@ -0,0 +1,78 @@
+package amyc.test
+
+import amyc.utils.Pipeline
+import java.io.File
+import java.nio.file.Files
+import java.nio.file.Path
+import java.nio.file.StandardCopyOption
+
+abstract class TestSuite extends CompilerTest {
+  val pipeline: Pipeline[List[File], Unit]
+
+  val baseDir: String
+  lazy val effectiveBaseDir: String =
+    // getClass.getResource(s"/$baseDir").getPath
+    s"test/resources/$baseDir"
+
+  val passing = "passing"
+  val failing = "failing"
+  val outputs = "outputs"
+
+  val tmpDir = Files.createTempDirectory("amyc");
+
+  val outputExt: String
+
+  def getResourcePath(relativePath: String, otherPath: Option[String] = None): String =
+    val firstPath = Path.of(effectiveBaseDir, relativePath)
+
+    val (stream, path) = 
+      if Files.exists(firstPath) then
+        (Files.newInputStream(firstPath), relativePath)
+      else
+        otherPath match
+          case Some(p) =>
+            val secondPath = Path.of(effectiveBaseDir, p)
+            (Files.newInputStream(secondPath), p)
+          case None =>
+            assert(false, s"can not read $effectiveBaseDir/$relativePath")
+            (null, "")
+
+    val targetPath = tmpDir.resolve(path)
+    Files.createDirectories(targetPath.getParent())
+    Files.copy(stream, targetPath, StandardCopyOption.REPLACE_EXISTING)
+    targetPath.toAbsolutePath().toString()
+
+  def shouldOutput(inputFiles: List[String], outputFile: String, input: String = ""): Unit = {
+    compareOutputs(
+      pipeline,
+      inputFiles map (f => getResourcePath(s"$passing/$f.amy", Some(s"$passing/$f.grading.amy"))),
+      getResourcePath(s"$outputs/$outputFile.$outputExt", Some(s"$outputs/$outputFile.grading.$outputExt")),
+      input
+    )
+  }
+
+  def shouldOutput(inputFile: String): Unit = {
+    shouldOutput(List(inputFile), inputFile)
+  }
+
+  def shouldFail(inputFiles: List[String], input: String = ""): Unit = {
+    demandFailure(
+      pipeline,
+      inputFiles map (f => getResourcePath(s"$failing/$f.amy", Some(s"$failing/$f.grading.amy"))),
+      input
+    )
+  }
+
+  def shouldFail(inputFile: String): Unit = {
+    shouldFail(List(inputFile))
+  }
+
+  def shouldPass(inputFiles: List[String], input: String = ""): Unit = {
+    demandPass(pipeline, inputFiles map (f => getResourcePath(s"$passing/$f.amy", Some(s"$passing/$f.grading.amy"))), input)
+  }
+
+  def shouldPass(inputFile: String): Unit = {
+    shouldPass(List(inputFile))
+  }
+
+}
diff --git a/info/labs/lab02/test/scala/amyc/test/TestUtils.scala b/info/labs/lab02/test/scala/amyc/test/TestUtils.scala
new file mode 100644
index 0000000000000000000000000000000000000000..6fe74a037e1b04c7fb8ec0a3dffdc920a3db5f42
--- /dev/null
+++ b/info/labs/lab02/test/scala/amyc/test/TestUtils.scala
@@ -0,0 +1,24 @@
+package amyc.test
+
+import java.io._
+
+/** Some utilities for running tests */
+trait TestUtils {
+  /** Run test,
+    * with input also redirected from a String,
+    * and output is redirected to a local StringBuilder.
+    */
+  def testWithRedirectedIO[T](test: => T, input: String): String = {
+    import scala.Console._
+    val inputS  = new StringReader(input)
+    val outputS = new ByteArrayOutputStream()
+    withOut(outputS) {
+      withErr(outputS) {
+        withIn(inputS) {
+          test
+        }
+      }
+    }
+    outputS.toString()
+  }
+}