diff --git a/info/labs/lab03/build.sbt b/info/labs/lab03/build.sbt
new file mode 100644
index 0000000000000000000000000000000000000000..269712f6f2834fefc5b86079f23066d2d93690df
--- /dev/null
+++ b/info/labs/lab03/build.sbt
@@ -0,0 +1,21 @@
+version := "1.7"
+organization := "ch.epfl.lara"
+scalaVersion := "3.5.2"
+assembly / test := {}
+name := "amyc"
+
+Compile / scalaSource := baseDirectory.value / "src"
+scalacOptions ++= Seq("-feature")
+
+Test / scalaSource := baseDirectory.value / "test" / "scala"
+Test / parallelExecution := false
+libraryDependencies += "com.novocode" % "junit-interface" % "0.11" % "test"
+libraryDependencies += "org.apache.commons" % "commons-lang3" % "3.4" % "test"
+testOptions += Tests.Argument(TestFrameworks.JUnit, "-v")
+
+
+assembly / assemblyMergeStrategy  := {
+  {
+    case _ => MergeStrategy.first
+  }
+}
diff --git a/info/labs/lab03/examples/Arithmetic.amy b/info/labs/lab03/examples/Arithmetic.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bc47b3dd27d257e4e4b2d70d654aa7cd894d5082
--- /dev/null
+++ b/info/labs/lab03/examples/Arithmetic.amy
@@ -0,0 +1,34 @@
+object Arithmetic
+  def pow(b: Int(32), e: Int(32)): Int(32) = {
+    if (e == 0) { 1 }
+    else {
+      if (e % 2 == 0) {
+        val rec: Int(32) = pow(b, e/2);
+        rec * rec
+      } else {
+        b * pow(b, e - 1)
+      }
+    }
+  }
+
+  def gcd(a: Int(32), b: Int(32)): Int(32) = {
+    if (a == 0 || b == 0) {
+      a + b
+    } else {
+      if (a < b) {
+        gcd(a, b % a)
+      } else {
+        gcd(a % b, b)
+      }
+    }
+  }
+
+  Std.printInt(pow(0, 10));
+  Std.printInt(pow(1, 5));
+  Std.printInt(pow(2, 10));
+  Std.printInt(pow(3, 3));
+  Std.printInt(gcd(0, 10));
+  Std.printInt(gcd(17, 99)); // 1
+  Std.printInt(gcd(16, 46)); // 2
+  Std.printInt(gcd(222, 888)) // 222
+end Arithmetic
diff --git a/info/labs/lab03/examples/Factorial.amy b/info/labs/lab03/examples/Factorial.amy
new file mode 100644
index 0000000000000000000000000000000000000000..d31b150a6f62dd31e7b3063a7def606d26c602e9
--- /dev/null
+++ b/info/labs/lab03/examples/Factorial.amy
@@ -0,0 +1,12 @@
+object Factorial
+  def fact(i: Int(32)): Int(32) = {
+    if (i < 2) { 1 }
+    else { 
+      val rec: Int(32) = fact(i-1);
+      i * rec
+    }
+  }
+
+  Std.printString("5! = "  ++ Std.intToString(fact(5)));
+  Std.printString("10! = " ++ Std.intToString(fact(10)))
+end Factorial
diff --git a/info/labs/lab03/examples/Hanoi.amy b/info/labs/lab03/examples/Hanoi.amy
new file mode 100644
index 0000000000000000000000000000000000000000..911f7739f78487d4db3ffe7df283be04146e980a
--- /dev/null
+++ b/info/labs/lab03/examples/Hanoi.amy
@@ -0,0 +1,16 @@
+object Hanoi
+	
+  def solve(n : Int(32)) : Int(32) = {
+    if (n < 1) { 
+      error("can't solve Hanoi for less than 1 plate")
+    } else {
+      if (n == 1) {
+        1
+      } else {
+        2 * solve(n - 1) + 1
+      }
+    }
+  }
+
+  Std.printString("Hanoi for 4 plates: " ++ Std.intToString(solve(4)))
+end Hanoi
\ No newline at end of file
diff --git a/info/labs/lab03/examples/Hello.amy b/info/labs/lab03/examples/Hello.amy
new file mode 100644
index 0000000000000000000000000000000000000000..5cc4b6ea8c91294dfa6d00bc7a63be9881c1d6dd
--- /dev/null
+++ b/info/labs/lab03/examples/Hello.amy
@@ -0,0 +1,3 @@
+object Hello
+  Std.printString("Hello " ++ "world!")
+end Hello
diff --git a/info/labs/lab03/examples/HelloInt.amy b/info/labs/lab03/examples/HelloInt.amy
new file mode 100644
index 0000000000000000000000000000000000000000..79cab1aa6f078a8de78b9bb576f2ab559a2451e2
--- /dev/null
+++ b/info/labs/lab03/examples/HelloInt.amy
@@ -0,0 +1,7 @@
+object HelloInt
+  Std.printString("What is your name?");
+  val name: String = Std.readString();
+  Std.printString("Hello " ++ name ++ "! And how old are you?");
+  val age: Int(32) = Std.readInt();
+  Std.printString(Std.intToString(age) ++ " years old then.")
+end HelloInt
diff --git a/info/labs/lab03/examples/Printing.amy b/info/labs/lab03/examples/Printing.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bc3bac0381c6752e9729c7354a5f0eca2d27ae69
--- /dev/null
+++ b/info/labs/lab03/examples/Printing.amy
@@ -0,0 +1,12 @@
+object Printing
+  Std.printInt(0); Std.printInt(-222); Std.printInt(42);
+  Std.printBoolean(true); Std.printBoolean(false);
+  Std.printString(Std.digitToString(0));
+  Std.printString(Std.digitToString(5));
+  Std.printString(Std.digitToString(9));
+  Std.printString(Std.intToString(0));
+  Std.printString(Std.intToString(-111));
+  Std.printString(Std.intToString(22));
+  Std.printString("Hello " ++ "world!");
+  Std.printString("" ++ "")
+end Printing
diff --git a/info/labs/lab03/examples/TestLists.amy b/info/labs/lab03/examples/TestLists.amy
new file mode 100644
index 0000000000000000000000000000000000000000..a01698881a0b68e1cba5a92d59e01676339e0096
--- /dev/null
+++ b/info/labs/lab03/examples/TestLists.amy
@@ -0,0 +1,6 @@
+object TestLists 
+  val l: L.List = L.Cons(5, L.Cons(-5, L.Cons(-1, L.Cons(0, L.Cons(10, L.Nil())))));
+  Std.printString(L.toString(L.concat(L.Cons(1, L.Cons(2, L.Nil())), L.Cons(3, L.Nil()))));
+  Std.printInt(L.sum(l));
+  Std.printString(L.toString(L.mergeSort(l)))
+end TestLists
diff --git a/info/labs/lab03/lab03-README.md b/info/labs/lab03/lab03-README.md
new file mode 100644
index 0000000000000000000000000000000000000000..210ec71435b3dacad9186e53d39183310438dc98
--- /dev/null
+++ b/info/labs/lab03/lab03-README.md
@@ -0,0 +1,201 @@
+# Lab 03: Parser
+
+## Preamble
+
+Please make sure to refer to the [Amy specification](../amy-specification/AmySpec.md) as the language might be updated.
+
+## Introduction
+
+Starting from this week you will work on the second stage of the Amy
+compiler, the parser. The task of the parser is to take a sequence of
+tokens produced by the lexer and transform it into an Abstract Syntax
+Tree (AST).
+
+For this purpose you will write a grammar for Amy programs in a Domain
+Specific Language (DSL) that can be embedded in Scala. Similarly to what
+you have seen in the Lexer lab, each grammar rule will also be
+associated with a transformation function that maps the parse result to
+an AST. The overall grammar will then be used to automatically parse
+sequences of tokens into Amy ASTs, while abstracting away extraneous
+syntactical details, such as commas and parentheses.
+
+As you have seen (and will see) in the lectures, there are various
+algorithms to parse syntax trees corresponding to context-free grammars.
+Any context-free grammar (after some normalization) can be parsed using
+the CYK algorithm. However, this algorithm is rather slow: its
+complexity is in O(n\^3 \* g) where n is the size of the program and g
+the size of the grammar. On the other hand, a more restricted LL(1)
+grammar can parse inputs in linear time. Thus, the goal of this lab will
+be to develop an LL(1) version of the Amy grammar.
+
+### The Parser Combinator DSL
+
+In the previous lab you already started working with **Silex**, which
+was the library we used to tokenize program inputs based on a
+prioritized list of regular expressions. In this lab we will start using
+its companion library, **Scallion**: Once an input string has been
+tokenized, Scallion allows us to parse the token stream using the rules
+of an LL(1) grammar and translate to a target data structure, such as an
+AST.
+
+To familiarize yourself with the parsing functionality of Scallion,
+please make sure you read the [Introduction to (Scallion) Parser
+Combinators](material/scallion.md). In it, you will learn how to describe grammars
+in Scallion\'s parser combinator DSL and how to ensure that your grammar
+lies in LL(1) (which Scallion requires to function correctly).
+
+Once you understand parser combinators, you can get to work on your own
+implementation of an Amy parser in `Parser.scala`. Note that in this lab
+you will essentially operate on two data structures: Your parser will
+consume a sequence of `Token`s (defined in `Tokens.scala`) and produce
+an AST (as defined by `NominalTreeModule` in `TreeModule.scala`). To
+accomplish this, you will have to define appropriate parsing rules and
+translation functions for Scallion.
+
+In `Parser.scala` you will already find a number of parsing rules given
+to you, including the starting non-terminal `program`. Others, such as
+`expr` are stubs (marked by `???`) that you will have to complete
+yourself. Make sure to take advantage of Scallion\'s various helpers
+such as the `operators` method that simplifies defining operators of
+different precedence and associativity.
+
+### An LL(1) grammar for Amy
+
+As usual, the [Amy specification](../amy-specification/AmySpec.md) will guide you when it comes to deciding what exactly should be accepted by your parser.
+Carefully read the *Syntax* section.
+
+Note that the EBNF grammar in the specification merely represents an
+over-approximation of Amy's true grammar -- it is too imprecise to be
+useful for parsing. Firstly, this grammar is ambiguous. That
+is, it allows multiple ways to parse an expression. For example, `x + y * z`
+could be parsed as either `(x + y) * z` or as `x + (y * z)`. In other
+words, the grammar doesn't enforce either operator precedence or
+associativity correctly. Additionally, the restrictions mentioned
+throughout the *Syntax* section of the specification are not followed.
+
+Your task is thus to come up with appropriate rules that encode Amy's
+true grammar. Furthermore, this grammar should be LL(1) for reasons of
+efficiency. Scallion will read your grammar, examine if it is indeed an LL(1) grammar, and, if so, parse input programs. If Scallion determines that the
+grammar is not an LL(1) grammar, it will report an error. You can also instruct
+Scallion to generate some counter-examples for you (see the `checkLL1`
+function).
+
+### Translating to ASTs
+
+Scallion will parse a sequence of tokens according to the grammar you
+provide, however, without additional help, it does not know how to build
+Amy ASTs. For instance, a (nonsensical) grammar that only accepts
+sequences of identifier tokens, e.g.
+
+    many(elem(IdentifierKind)): Syntax[Seq[Token]]
+
+will be useful in deciding whether the input matches the expected form,
+but will simply return the tokens unchanged when parsing succeeds.
+
+Scallion does allow you to map parse results from one type to another,
+however. For instance, in the above example we might want to provide a
+function `f(idTokens: Seq[Token]): Seq[Variable]` that transforms the
+identifier tokens into (Amy-AST) variables of those names.
+
+For more information on how to use Scallion's `Syntax#map` method
+please refer to the [Scallion introduction](material/scallion.md).
+
+## Notes
+
+### Understanding the AST: Nominal vs. Symbolic Trees
+
+If you check the TreeModule file containing the ASTs, you will notice it
+is structured in an unusual way: there is a `TreeModule` trait extended
+by `NominalTreeModule` and `SymbolicTreeModule`. The reason for this
+design is that we need two very similar ASTs, but with different types
+representing names in each case. Just after parsing (this assignment),
+all names are just *Strings* and qualified names are essentially pairs of
+*Strings*. We call ASTs that only use such String-based names `Nominal`
+-- the variant we will be using in this lab. Later, during name
+analysis, these names will be resolved to unique identifiers, e.g. two
+variables that refer to different definitions will be distinct, even if
+they have the same name. For now you can just look at the TreeModule and
+substitute the types that are not defined there (`Name` and
+`QualifiedName`) with their definitions inside `NominalTreeModule`.
+
+### Positions
+
+As you will notice in the code we provide, all generated ASTs have their
+position set. The position of each node of the AST is defined as its
+starting position, i.e., the position of its first character in the text file. It is important that you set the positions in all the
+trees that you create for better error reporting later. Although our
+testing infrastructure cannot directly check for presence of positions,
+we will check it manually.
+
+### Pretty Printing
+
+Along with the stubs, we provide a printer for Amy ASTs. It will print
+parentheses around all expressions so you can clearly see how your
+parser interprets precedence and associativity. You can use it to test
+your parser, and it will also be used during our testing to compare the
+output of your parser with the reference parser.
+
+## Skeleton
+
+As usual, you can find the skeleton in the git repository. This lab
+builds on your previous work, so -- given your implementation of the
+lexer -- you will only unpack two files from the skeleton.
+
+The structure of your project `src` directory should be as follows:
+
+    lib 
+     └── scallion-assembly-0.6.1.jar    
+
+    library
+     ├── ...
+     └── ...
+
+    examples
+     ├── ...
+     └── ...
+    src
+     ├── amyc
+     │    ├── Main.scala                           (updated)
+     │    │
+     │    ├── ast                                  (new)
+     │    │    ├── Identifier.scala
+     │    │    ├── Printer.scala
+     │    │    └── TreeModule.scala
+     │    │
+     │    ├── parsing
+     │    │    ├── Parser.scala                    (new)
+     │    │    ├── Lexer.scala
+     │    │    └── Tokens.scala
+     │    │
+     │    └── utils
+     │        ├── AmycFatalError.scala
+     │        ├── Context.scala
+     │        ├── Document.scala
+     │        ├── Pipeline.scala
+     │        ├── Position.scala
+     │        ├── Reporter.scala
+     │        └── UniqueCounter.scala
+     │
+     └──test
+         ├── scala
+         │    └── amyc
+         │         └── test
+         │              ├── CompilerTest.scala
+         │              ├── LexerTests.scala
+         │              ├── ParserTests.scala      (new)
+         │              ├── TestSuite.scala
+         │              └── TestUtils.scala
+         └── resources
+                 ├── lexer
+                 │    └── ...
+                 └── parser                        (new)
+                     └── ...
+
+## Deliverables
+
+Deadline: **28.03.2025 23:59:59**
+
+You should submit your files to Moodle in the corresponding assignment.
+You should submit the following files:
+
+- `Parser.scala`: Your implementation of the Amy parser.
diff --git a/info/labs/lab03/lib/scallion-assembly-0.6.1.jar b/info/labs/lab03/lib/scallion-assembly-0.6.1.jar
new file mode 100644
index 0000000000000000000000000000000000000000..074c47c01983b0096ab078cac3fceafc97814db4
Binary files /dev/null and b/info/labs/lab03/lib/scallion-assembly-0.6.1.jar differ
diff --git a/info/labs/lab03/library/List.amy b/info/labs/lab03/library/List.amy
new file mode 100644
index 0000000000000000000000000000000000000000..60fc3bc110a4df310733fd87d42ee24e4f99ee98
--- /dev/null
+++ b/info/labs/lab03/library/List.amy
@@ -0,0 +1,144 @@
+object L 
+  abstract class List
+  case class Nil() extends List
+  case class Cons(h: Int(32), t: List) extends List
+ 
+  def isEmpty(l : List): Boolean = { l match {
+    case Nil() => true
+    case _ => false 
+  }}
+
+  def length(l: List): Int(32) = { l match {
+    case Nil() => 0
+    case Cons(_, t) => 1 + length(t)
+  }}
+
+  def head(l: List): Int(32) = {
+    l match {
+      case Cons(h, _) => h
+      case Nil() => error("head(Nil)")
+    }
+  }
+
+  def headOption(l: List): O.Option = {
+    l match {
+      case Cons(h, _) => O.Some(h)
+      case Nil() => O.None()
+    }
+  }
+
+  def reverse(l: List): List = {
+    reverseAcc(l, Nil())
+  }
+
+  def reverseAcc(l: List, acc: List): List = {
+    l match {
+      case Nil() => acc
+      case Cons(h, t) => reverseAcc(t, Cons(h, acc))
+    }
+  }
+
+  def indexOf(l: List, i: Int(32)): Int(32) = {
+    l match {
+      case Nil() => -1
+      case Cons(h, t) =>
+        if (h == i) { 0 }
+        else {
+          val rec: Int(32) = indexOf(t, i);
+          if (0 <= rec) { rec + 1 }
+          else { -1 }
+        }
+    }
+  }
+
+  def range(from: Int(32), to: Int(32)): List = {
+    if (to < from) { Nil() }
+    else {
+      Cons(from, range(from + 1, to))
+    }
+  }
+
+  def sum(l: List): Int(32) = { l match {
+    case Nil() => 0
+    case Cons(h, t) => h + sum(t)
+  }}
+
+  def concat(l1: List, l2: List): List = {
+    l1 match {
+      case Nil() => l2
+      case Cons(h, t) => Cons(h, concat(t, l2))
+    }
+  }
+
+  def contains(l: List, elem: Int(32)): Boolean = { l match {
+    case Nil() =>
+      false
+    case Cons(h, t) =>
+      h == elem || contains(t, elem)
+  }}
+
+  abstract class LPair
+  case class LP(l1: List, l2: List) extends LPair
+
+  def merge(l1: List, l2: List): List = {
+    l1 match {
+      case Nil() => l2
+      case Cons(h1, t1) =>
+        l2 match {
+          case Nil() => l1
+          case Cons(h2, t2) =>
+            if (h1 <= h2) {
+              Cons(h1, merge(t1, l2))
+            } else {
+              Cons(h2, merge(l1, t2))
+            }
+        }
+    }
+  }
+
+  def split(l: List): LPair = {
+    l match {
+      case Cons(h1, Cons(h2, t)) =>
+        val rec: LPair = split(t);
+        rec match {
+          case LP(rec1, rec2) =>
+            LP(Cons(h1, rec1), Cons(h2, rec2))
+        }
+      case _ =>
+        LP(l, Nil())
+    }
+  }
+  def mergeSort(l: List): List = {
+    l match {
+      case Nil() => l
+      case Cons(h, Nil()) => l
+      case xs =>
+        split(xs) match {
+          case LP(l1, l2) =>
+            merge(mergeSort(l1), mergeSort(l2))
+        }
+    }
+  }
+  
+  def toString(l: List): String = { l match {
+    case Nil() => "List()"
+    case more => "List(" ++ toString1(more) ++ ")"
+  }}
+
+  def toString1(l : List): String = { l match {
+    case Cons(h, Nil()) => Std.intToString(h)
+    case Cons(h, t) => Std.intToString(h) ++ ", " ++ toString1(t)
+  }}
+
+  def take(l: List, n: Int(32)): List = {
+    if (n <= 0) { Nil() }
+    else { 
+      l match {
+        case Nil() => Nil()
+        case Cons(h, t) =>
+          Cons(h, take(t, n-1))
+      }
+    }
+  }
+    
+end L
diff --git a/info/labs/lab03/library/Option.amy b/info/labs/lab03/library/Option.amy
new file mode 100644
index 0000000000000000000000000000000000000000..dabec722fbf00083b815768e62e6bbf9f7096b23
--- /dev/null
+++ b/info/labs/lab03/library/Option.amy
@@ -0,0 +1,40 @@
+object O 
+  abstract class Option
+  case class None() extends Option
+  case class Some(v: Int(32)) extends Option
+
+  def isdefined(o: Option): Boolean = {
+    o match {
+      case None() => false
+      case _ => true
+    }
+  }
+
+  def get(o: Option): Int(32) = {
+    o match {
+      case Some(i) => i
+      case None() => error("get(None)")
+    }
+  }
+
+  def getOrElse(o: Option, i: Int(32)): Int(32) = {
+    o match {
+      case None() => i
+      case Some(oo) => oo
+    }
+  }
+
+  def orElse(o1: Option, o2: Option): Option = {
+    o1 match {
+      case Some(_) => o1
+      case None() => o2
+    }
+  }
+
+  def toList(o: Option): L.List = {
+    o match {
+      case Some(i) => L.Cons(i, L.Nil())
+      case None() => L.Nil()
+    }
+  }
+end O
diff --git a/info/labs/lab03/library/Std.amy b/info/labs/lab03/library/Std.amy
new file mode 100644
index 0000000000000000000000000000000000000000..511bb6eb1f7584652516f2fc3daf84da5cc8d987
--- /dev/null
+++ b/info/labs/lab03/library/Std.amy
@@ -0,0 +1,40 @@
+/** This module contains basic functionality for Amy,
+  * including stub implementations for some built-in functions
+  * (implemented in WASM or JavaScript)
+  */
+object Std 
+  def printInt(i: Int(32)): Unit = {
+    error("") // Stub implementation
+  }
+  def printString(s: String): Unit = {
+    error("") // Stub implementation
+  }
+  def printBoolean(b: Boolean): Unit = {
+    printString(booleanToString(b))
+  }
+
+  def readString(): String = {
+    error("") // Stub implementation
+  }
+
+  def readInt(): Int(32) = {
+    error("") // Stub implementation
+  }
+
+  def intToString(i: Int(32)): String = {
+    if (i < 0) {
+      "-" ++ intToString(-i)
+    } else {
+      val rem: Int(32) = i % 10;
+      val div: Int(32) = i / 10;
+      if (div == 0) { digitToString(rem) }
+      else { intToString(div) ++ digitToString(rem) }
+    }
+  }
+  def digitToString(i: Int(32)): String = {
+    error("") // Stub implementation
+  }
+  def booleanToString(b: Boolean): String = {
+    if (b) { "true" } else { "false" }
+  }
+end Std
diff --git a/info/labs/lab03/material/scallion-playground/build.sbt b/info/labs/lab03/material/scallion-playground/build.sbt
new file mode 100644
index 0000000000000000000000000000000000000000..d179bc7b68a39de902dd337ba780b5982313d549
--- /dev/null
+++ b/info/labs/lab03/material/scallion-playground/build.sbt
@@ -0,0 +1,6 @@
+scalaVersion     := "3.5.2"
+version          := "1.0.0"
+organization     := "ch.epfl.lara"
+organizationName := "LARA"
+name             := "calculator"
+libraryDependencies ++= Seq("org.scalatest" %% "scalatest" % "3.2.10" % "test")
\ No newline at end of file
diff --git a/info/labs/lab03/material/scallion-playground/lib/scallion-assembly-0.6.1.jar b/info/labs/lab03/material/scallion-playground/lib/scallion-assembly-0.6.1.jar
new file mode 100644
index 0000000000000000000000000000000000000000..074c47c01983b0096ab078cac3fceafc97814db4
Binary files /dev/null and b/info/labs/lab03/material/scallion-playground/lib/scallion-assembly-0.6.1.jar differ
diff --git a/info/labs/lab03/material/scallion-playground/project/build.properties b/info/labs/lab03/material/scallion-playground/project/build.properties
new file mode 100755
index 0000000000000000000000000000000000000000..10fd9eee04ac574059d24caf1482e72ebf2d40b3
--- /dev/null
+++ b/info/labs/lab03/material/scallion-playground/project/build.properties
@@ -0,0 +1 @@
+sbt.version=1.5.5
diff --git a/info/labs/lab03/material/scallion-playground/src/main/scala/calculator/Calculator.scala b/info/labs/lab03/material/scallion-playground/src/main/scala/calculator/Calculator.scala
new file mode 100644
index 0000000000000000000000000000000000000000..d1164f6b10da34080432a3e8091940e17ec9e9d6
--- /dev/null
+++ b/info/labs/lab03/material/scallion-playground/src/main/scala/calculator/Calculator.scala
@@ -0,0 +1,183 @@
+/* Copyright 2020 EPFL, Lausanne
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package calculator
+
+import silex._
+import scallion._
+
+sealed trait Token
+case class NumberToken(value: Int) extends Token
+case class OperatorToken(operator: Char) extends Token
+case class ParenthesisToken(isOpen: Boolean) extends Token
+case object SpaceToken extends Token
+case class UnknownToken(content: String) extends Token
+
+object CalcLexer extends Lexers with CharLexers {
+  type Position = Unit
+  type Token = calculator.Token
+
+  val lexer = Lexer(
+    // Operators
+    oneOf("-+/*!")
+      |> { cs => OperatorToken(cs.head) },
+
+    // Parentheses
+    elem('(') |> ParenthesisToken(true),
+    elem(')') |> ParenthesisToken(false),
+
+    // Spaces
+    many1(whiteSpace) |> SpaceToken,
+
+    // Numbers
+    {
+      elem('0') |
+      nonZero ~ many(digit)
+    }
+      |> { cs => NumberToken(cs.mkString.toInt) }
+  ) onError {
+    (cs, _) => UnknownToken(cs.mkString)
+  }
+
+
+  def apply(it: String): Iterator[Token] = {
+    val source = Source.fromString(it, NoPositioner)
+
+    val tokens = lexer(source)
+
+    tokens.filter((token: Token) => token != SpaceToken)
+  }
+}
+
+sealed abstract class TokenKind(text: String) {
+  override def toString = text
+}
+case object NumberClass extends TokenKind("<number>")
+case class OperatorClass(op: Char) extends TokenKind(op.toString)
+case class ParenthesisClass(isOpen: Boolean) extends TokenKind(if (isOpen) "(" else ")")
+case object OtherClass extends TokenKind("?")
+
+sealed abstract class Expr
+case class LitExpr(value: Int) extends Expr
+case class BinaryExpr(op: Char, left: Expr, right: Expr) extends Expr
+case class UnaryExpr(op: Char, inner: Expr) extends Expr
+
+object CalcParser extends Parsers {
+  type Token = calculator.Token
+  type Kind = calculator.TokenKind
+
+  import Implicits._
+
+  override def getKind(token: Token): TokenKind = token match {
+    case NumberToken(_) => NumberClass
+    case OperatorToken(c) => OperatorClass(c)
+    case ParenthesisToken(o) => ParenthesisClass(o)
+    case _ => OtherClass
+  }
+
+  val number: Syntax[Expr] = accept(NumberClass) {
+    case NumberToken(n) => LitExpr(n)
+  }
+
+  def binOp(char: Char): Syntax[Char] = accept(OperatorClass(char)) {
+    case _ => char
+  }
+
+  val plus = binOp('+')
+  val minus = binOp('-')
+  val times = binOp('*')
+  val div = binOp('/')
+
+  val fac: Syntax[Char] = accept(OperatorClass('!')) {
+    case _ => '!'
+  }
+
+  def parens(isOpen: Boolean) = elem(ParenthesisClass(isOpen))
+  val open = parens(true)
+  val close = parens(false)
+
+  lazy val expr: Syntax[Expr] = recursive {
+    (term ~ moreTerms).map {
+      case first ~ opNexts => opNexts.foldLeft(first) {
+        case (acc, op ~ next) => BinaryExpr(op, acc, next)
+      }
+    }
+  }
+
+  lazy val term: Syntax[Expr] = (factor ~ moreFactors).map {
+    case first ~ opNexts => opNexts.foldLeft(first) {
+      case (acc, op ~ next) => BinaryExpr(op, acc, next)
+    }
+  }
+
+  lazy val moreTerms: Syntax[Seq[Char ~ Expr]] = recursive {
+    epsilon(Seq.empty[Char ~ Expr]) |
+    ((plus | minus) ~ term ~ moreTerms).map {
+      case op ~ t ~ ots => (op ~ t) +: ots
+    }
+  }
+
+  lazy val factor: Syntax[Expr] = (basic ~ fac.opt).map {
+    case e ~ None => e
+    case e ~ Some(op) => UnaryExpr(op, e)
+  }
+
+  lazy val moreFactors: Syntax[Seq[Char ~ Expr]] = recursive {
+    epsilon(Seq.empty[Char ~ Expr]) |
+    ((times | div) ~ factor ~ moreFactors).map {
+      case op ~ t ~ ots => (op ~ t) +: ots
+    }
+  }
+
+  lazy val basic: Syntax[Expr] = number | open.skip ~ expr ~ close.skip
+
+
+  // Or, using operators...
+  //
+  // lazy val expr: Syntax[Expr] = recursive {
+  //   operators(factor)(
+  //     times | div is LeftAssociative,
+  //     plus | minus is LeftAssociative
+  //   ) {
+  //     case (l, op, r) => BinaryExpr(op, l, r)
+  //   }
+  // }
+  //
+  // Then, you can get rid of term, moreTerms, and moreFactors.
+
+  def apply(tokens: Iterator[Token]): Option[Expr] = Parser(expr)(tokens).getValue
+}
+
+object Main {
+  def main(args: Array[String]): Unit = {
+    if (!CalcParser.expr.isLL1) {
+      CalcParser.debug(CalcParser.expr, false)
+      return
+    }
+
+    println("Welcome to the awesome calculator expression parser.")
+    while (true) {
+      print("Enter an expression: ")
+      val line = scala.io.StdIn.readLine()
+      if (line.isEmpty) {
+        return
+      }
+      CalcParser(CalcLexer(line)) match {
+        case None => println("Could not parse your line...")
+        case Some(parsed) => println("Syntax tree: " + parsed)
+      }
+    }
+  }
+}
diff --git a/info/labs/lab03/material/scallion-playground/src/test/scala/calculator/Tests.scala b/info/labs/lab03/material/scallion-playground/src/test/scala/calculator/Tests.scala
new file mode 100644
index 0000000000000000000000000000000000000000..72e905cbcab9f1fcdc30cfe90a3b9755c62e7eee
--- /dev/null
+++ b/info/labs/lab03/material/scallion-playground/src/test/scala/calculator/Tests.scala
@@ -0,0 +1,48 @@
+/* Copyright 2019 EPFL, Lausanne
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package calculator
+
+import org.scalatest._
+import flatspec._
+
+class Tests extends AnyFlatSpec with Inside {
+
+  "Parser" should "be LL(1)" in {
+    assert(CalcParser.expr.isLL1)
+  }
+
+  it should "be able to parse some strings" in {
+    val result = CalcParser(CalcLexer("1 + 3 * (5! / 7) + 42"))
+
+    assert(result.nonEmpty)
+
+    val parsed = result.get
+
+    inside(parsed) {
+      case BinaryExpr('+', BinaryExpr('+', one, mult), fortytwo) => {
+        assert(one == LitExpr(1))
+        assert(fortytwo == LitExpr(42))
+        inside(mult) {
+          case BinaryExpr('*', three, BinaryExpr('/', UnaryExpr('!', five), seven)) => {
+            assert(three == LitExpr(3))
+            assert(five == LitExpr(5))
+            assert(seven == LitExpr(7))
+          }
+        }
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/info/labs/lab03/material/scallion.md b/info/labs/lab03/material/scallion.md
new file mode 100644
index 0000000000000000000000000000000000000000..02b9aae73eba52d8a594f1ab4e28fac7c747254b
--- /dev/null
+++ b/info/labs/lab03/material/scallion.md
@@ -0,0 +1,405 @@
+**For a brief overview of Scallion and its purpose, you can watch [this
+video](https://mediaspace.epfl.ch/media/0_lypn7l0x).** What follows below is
+a slightly more detailed description, and an example project you can use
+to familiarize yourself with Scallion.
+
+## Introduction to Parser Combinators
+
+The next part of the compiler you will be working on is the parser. The
+goal of the parser is to convert the sequence of tokens generated by the
+lexer into an Amy *abstract syntax tree* (AST).
+
+There are many approaches to writing parsers, such as:
+
+-   Writing the parser by hand directly in the compiler's language using
+    mutually recursive functions, or
+-   Writing the parser in a *domain specific language* (DSL) and using a
+    parser generator (such as Bison) to produce the parser.
+
+Another approach, which we will be using, is *parser combinators*. The
+idea behind the approach is very simple:
+
+-   Have a set of simple primitive parsers, and
+-   Have ways to combine them together into more and more complex
+    parsers. Hence the name *parser combinators*.
+
+Usually, those primitive parsers and combinators are provided as a
+library directly in the language used by the compiler. In our case, we
+will be working with **Scallion**, a Scala parser combinators library
+developed by *LARA*.
+
+Parser combinators have many advantages -- the main one being easy to
+write, read and maintain.
+
+## Scallion Parser Combinators
+
+### Documentation
+
+In this document, we will introduce parser combinators in Scallion and
+showcase how to use them. This document is not intended to be a complete
+reference to Scallion. Fortunately, the library comes with a
+[comprehensive
+API](https://epfl-lara.github.io/scallion) which
+fulfills that role. Feel free to refer to it while working on your
+project!
+
+### Playground Project
+
+We have set up [an example project](scallion-playground) that
+implements a lexer and parser for a simple expression language using
+Scallion. Feel free to experiment and play with it. The project
+showcases the API of Scallion and some of the more advanced combinators.
+
+### Setup
+
+In Scallion, parsers are defined within a trait called `Syntaxes`. This
+trait takes as parameters two types:
+
+-   The type of tokens,
+-   The type of *token kinds*. Token kinds represent groups of tokens.
+    They abstract away all the details found in the actual tokens, such
+    as for instance positions or identifiers name. Each token has a
+    unique kind.
+
+In our case, the tokens will be of type `Token` that we introduced and
+used in the previous project. The token kinds will be `TokenKind`, which
+we have already defined for you.
+
+    object Parser extends Pipeline[Iterator[Token], Program]
+                     with Parsers {
+
+      type Token = myproject.Token
+      type Kind = myproject.TokenKind
+
+      // Indicates the kind of the various tokens.
+      override def getKind(token: Token): TokenKind = TokenKind.of(token)
+      
+      // You parser implementation goes here.
+    }
+
+The `Parsers` trait (mixed into the `Parser` object above) comes from
+Scallion and provides all functions and types you will use to define
+your grammar and AST translation.
+
+### Writing Parsers
+
+When writing a parser using parser combinators, one defines many smaller
+parsers and combines them together into more and more complex parsers.
+The top-level, most complex, of those parser then defines the entire
+syntax for the language. In our case, that top-level parser will be
+called `program`.
+
+All those parsers are objects of the type `Syntax[A]`. The type
+parameter `A` indicates the type of values produced by the parser. For
+instance, a parser of type `Syntax[Int]` produces `Int`s and a parser of
+type `Syntax[Expr]` produces `Expr`s. Our top-level parser has the
+following signature:
+
+    lazy val program: Parser[Program] = ...
+
+Contrary to the types of tokens and token kinds, which are fixed, the
+type of values produced is a type parameter of the various `Syntax`s.
+This allows your different parsers to produce different types of values.
+
+The various parsers are stored as `val` members of the `Parser` object.
+In the case of mutually dependent parsers, we use `lazy val` instead.
+
+    lazy val definition: Syntax[ClassOrFunDef] =
+      functionDefinition | abstractClassDefinition | caseClassDefinition
+     
+    lazy val functionDefinition: Syntax[ClassOrFunDef] = ...
+
+    lazy val abstractClassDefinition: Syntax[ClassOrFunDef] = ...
+
+    lazy val caseClassDefinition: Syntax[ClassOrFunDef] = ...
+
+### Running Parsers
+
+Parsers of type `Syntax[A]` can be converted to objects of type
+`Parser[A]`, which have an `apply` method which takes as parameter an
+iterator of tokens and returns a value of type `ParseResult[A]`, which
+can be one of three things:
+
+-   A `Parsed(value, rest)`, which indicates that the parser was
+    successful and produced the value `value`. The entirety of the input
+    iterator was consumed by the parser.
+-   An `UnexpectedToken(token, rest)`, which indicates that the parser
+    encountered an unexpected token `token`. The input iterator was
+    consumed up to the erroneous token.
+-   An `UnexpectedEnd(rest)`, which indicates that the end of the
+    iterator was reached and the parser could not finish at this point.
+    The input iterator was completely consumed.
+
+In each case, the additional value `rest` is itself some sort of a
+`Parser[A]`. That parser represents the parser after the successful
+parse or at the point of error. This parser could be used to provide
+useful error messages or even to resume parsing.
+
+    override def run(ctx: Context)(tokens: Iterator[Token]): Program = {
+      import ctx.reporter._
+
+      val parser = Parser(program)
+
+      parser(tokens) match {
+        case Parsed(result, rest) => result
+        case UnexpectedEnd(rest) => fatal("Unexpected end of input.")
+        case UnexpectedToken(token, rest) => fatal("Unexpected token: " + token)
+      }
+    }
+
+### Parsers and Grammars
+
+As you will see, parsers built using parser combinators will look a lot
+like grammars. However, unlike grammars, parsers not only describe the
+syntax of your language, but also directly specify how to turn this
+syntax into a value. Also, as we will see, parser combinators have a
+richer vocabulary than your usual *BNF* grammars.
+
+Interestingly, a lot of concepts that you have seen on grammars, such as
+`FIRST` sets and nullability can be straightforwardly transposed to
+parsers.
+
+#### FIRST set
+
+In Scallion, parsers offer a `first` method which returns the set of
+token kinds that are accepted as a first token.
+
+    definition.first === Set(def, abstract, case)
+
+#### Nullability
+
+Parsers have a `nullable` method which checks for nullability of a
+parser. The method returns `Some(value)` if the parser would produce
+`value` given an empty input token sequence, and `None` if the parser
+would not accept the empty sequence.
+
+### Basic Parsers
+
+We can now finally have a look at the toolbox we have at our disposition
+to build parsers, starting from the basic parsers. Each parser that you
+will write, however complex, is a combination of these basic parsers.
+The basic parsers play the same role as terminal symbols do in grammars.
+
+#### Elem
+
+The first of the basic parsers is `elem(kind)`. The function `elem`
+takes argument the kind of tokens to be accepted by the parser. The
+value produced by the parser is the token that was matched. For
+instance, here is how to match against the *end-of-file* token.
+
+    val eof: Parser[Token] = elem(EOFKind)
+
+#### Accept
+
+The function `accept` is a variant of `elem` which directly applies a
+transformation to the matched token when it is produced.
+
+    val identifier: Syntax[String] = accept(IdentifierKind) {
+      case IdentifierToken(name) => name
+    }
+
+#### Epsilon
+
+The parser `epsilon(value)` is a parser that produces the `value`
+without consuming any input. It corresponds to the *𝛆* found in
+grammars.
+
+### Parser Combinators
+
+In this section, we will see how to combine parsers together to create
+more complex parsers.
+
+#### Disjunction
+
+The first combinator we have is disjunction, that we write, for parsers
+`p1` and `p2`, simply `p1 | p2`. When both `p1` and `p2` are of type
+`Syntax[A]`, the disjunction `p1 | p2` is also of type `Syntax[A]`. The
+disjunction operator is associative and commutative.
+
+Disjunction works just as you think it does. If either of the parsers
+`p1` or `p2` would accept the sequence of tokens, then the disjunction
+also accepts the tokens. The value produced is the one produced by
+either `p1` or `p2`.
+
+Note that `p1` and `p2` must have disjoint `first` sets. This
+restriction ensures that no ambiguities can arise and that parsing can
+be done efficiently.[^1] We will see later how to automatically detect
+when this is not the case and how fix the issue.
+
+#### Sequencing
+
+The second combinator we have is sequencing. We write, for parsers `p1`
+and `p2`, the sequence of `p1` and `p2` as `p1 ~ p2`. When `p1` is of
+type `A` and `p2` of type `B`, their sequence is of type `A ~ B`, which
+is simply a pair of an `A` and a `B`.
+
+If the parser `p1` accepts the prefix of a sequence of tokens and `p2`
+accepts the postfix, the parser `p1 ~ p2` accepts the entire sequence
+and produces the pair of values produced by `p1` and `p2`.
+
+Note that the `first` set of `p2` should be disjoint from the `first`
+set of all sub-parsers in `p1` that are *nullable* and in trailing
+position (available via the `followLast` method). This restriction
+ensures that the combinator does not introduce ambiguities.
+
+#### Transforming Values
+
+The method `map` makes it possible to apply a transformation to the
+values produced by a parser. Using `map` does not influence the sequence
+of tokens accepted or rejected by the parser, it merely modifies the
+value produced. Generally, you will use `map` on a sequence of parsers,
+as in:
+
+    lazy val abstractClassDefinition: Syntax[ClassOrFunDef] =
+      (kw("abstract") ~ kw("class") ~ identifier).map {
+        case kw ~ _ ~ id => AbstractClassDef(id).setPos(kw)
+      }
+
+The above parser accepts abstract class definitions in Amy syntax. It
+does so by accepting the sequence of keywords `abstract` and `class`,
+followed by any identifier. The method `map` is used to convert the
+produced values into an `AbstractClassDef`. The position of the keyword
+`abstract` is used as the position of the definition.
+
+#### Recursive Parsers
+
+It is highly likely that some of your parsers will require to
+recursively invoke themselves. In this case, you should indicate that
+the parser is recursive using the `recursive` combinator:
+
+    lazy val expr: Syntax[Expr] = recursive {
+      ...
+    }
+
+If you were to omit it, a `StackOverflow` exception would be triggered
+during the initialisation of your `Parser` object.
+
+The `recursive` combinator in itself does not change the behaviour of
+the underlying parser. It is there to *tie the knot*[^2].
+
+In practice, it is only required in very few places. In order to avoid
+`StackOverflow` exceptions during initialisation, you should make sure
+that all recursive parsers (stored in `lazy val`s) must not be able to
+reenter themselves without going through a `recursive` combinator
+somewhere along the way.
+
+#### Other Combinators
+
+So far, many of the combinators that we have seen, such as disjunction
+and sequencing, directly correspond to constructs found in `BNF`
+grammars. Some of the combinators that we will see now are more
+expressive and implement useful patterns.
+
+##### Optional parsers using opt
+
+The combinator `opt` makes a parser optional. The value produced by the
+parser is wrapped in `Some` if the parser accepts the input sequence and
+in `None` otherwise.
+
+    opt(p) === p.map(Some(_)) | epsilon(None)
+
+##### Repetitions using many and many1
+
+The combinator `many` returns a parser that accepts any number of
+repetitions of its argument parser, including 0. The variant `many1`
+forces the parser to match at least once.
+
+##### Repetitions with separators repsep and rep1sep
+
+The combinator `repsep` returns a parser that accepts any number of
+repetitions of its argument parser, separated by an other parser,
+including 0. The variant `rep1sep` forces the parser to match at least
+once.
+
+The separator parser is restricted to the type `Syntax[Unit]` to ensure
+that important values do not get ignored. You may use `unit()` to on a
+parser to turn its value to `Unit` if you explicitly want to ignore the
+values a parser produces.
+
+##### Binary operators with operators
+
+Scallion also contains combinators to easily build parsers for infix
+binary operators, with different associativities and priority levels.
+This combinator is defined in an additional trait called `Operators`,
+which you should mix into `Parsers` if you want to use the combinator.
+By default, it should already be mixed-in.
+
+    val times: Syntax[String] =
+      accept(OperatorKind("*")) {
+        case _ => "*"
+      }
+
+    ...
+
+    lazy val operation: Syntax[Expr] =
+      operators(number)(
+        // Defines the different operators, by decreasing priority.
+        times | div   is LeftAssociative,
+        plus  | minus is LeftAssociative,
+        ...
+      ) {
+        // Defines how to apply the various operators.
+        case (lhs, "*", rhs) => Times(lhs, rhs).setPos(lhs)
+        ...
+      }
+
+Documentation for `operators` is [available on this
+page](https://epfl-lara.github.io/scallion/scallion/Operators.html).
+
+##### Upcasting
+
+In Scallion, the type `Syntax[A]` is invariant with `A`, meaning that,
+even when `A` is a (strict) subtype of some type `B`, we *won\'t* have
+that `Syntax[A]` is a subtype of `Syntax[B]`. To upcast a `Syntax[A]` to
+a syntax `Syntax[B]` (when `A` is a subtype of `B`), you should use the
+`.up[B]` method.
+
+For instance, you may need to upcast a syntax of type
+`Syntax[Literal[_]]` to a `Syntax[Expr]` in your assignment. To do so,
+simply use `.up[Expr]`.
+
+### LL(1) Checking
+
+In Scallion, non-LL(1) parsers can be written, but the result of
+applying such a parser is not specified. In practice, we therefore
+restrict ourselves only to LL(1) parsers. The reason behind this is that
+LL(1) parsers are unambiguous and can be run in time linear in the input
+size.
+
+Writing LL(1) parsers is non-trivial. However, some of the higher-level
+combinators of Scallion already alleviate part of this pain. In
+addition, LL(1) violations can be detected before the parser is run.
+Syntaxes have an `isLL1` method which returns `true` if the parser is
+LL(1) and `false` otherwise, and so without needing to see any tokens of
+input.
+
+#### Conflict Witnesses
+
+In case your parser is not LL(1), the method `conflicts` of the parser
+will return the set of all `LL1Conflict`s. The various conflicts are:
+
+-   `NullableConflict`, which indicates that two branches of a
+    disjunction are nullable.
+-   `FirstConflict`, which indicates that the `first` set of two
+    branches of a disjunction are not disjoint.
+-   `FollowConflict`, which indicates that the `first` set of a nullable
+    parser is not disjoint from the `first` set of a parser that
+    directly follows it.
+
+The `LL1Conflict`s objects contain fields which can help you pinpoint
+the exact location of conflicts in your parser and hopefully help you
+fix those.
+
+The helper method `debug` prints a summary of the LL(1) conflicts of a
+parser. We added code in the handout skeleton so that, by default, a
+report is outputted in case of conflicts when you initialise your
+parser.
+
+[^1]: Scallion is not the only parser combinator library to exist, far
+    from it! Many of those libraries do not have this restriction. Those
+    libraries generally need to backtrack to try the different
+    alternatives when a branch fails.
+
+[^2]: See [a good explanation of what tying the knot means in the
+    context of lazy
+    languages.](https://stackoverflow.com/questions/357956/explanation-of-tying-the-knot)
diff --git a/info/labs/lab03/project/build.properties b/info/labs/lab03/project/build.properties
new file mode 100644
index 0000000000000000000000000000000000000000..73df629ac1a71e9f7a1c2a1b576bfa037a6142bd
--- /dev/null
+++ b/info/labs/lab03/project/build.properties
@@ -0,0 +1 @@
+sbt.version=1.10.7
diff --git a/info/labs/lab03/project/plugins.sbt b/info/labs/lab03/project/plugins.sbt
new file mode 100644
index 0000000000000000000000000000000000000000..04934558068c370e38064654370e09e029476366
--- /dev/null
+++ b/info/labs/lab03/project/plugins.sbt
@@ -0,0 +1,3 @@
+addSbtPlugin("com.lightbend.sbt" % "sbt-proguard" % "0.3.0")
+
+addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "1.2.0")
\ No newline at end of file
diff --git a/info/labs/lab03/src/amyc/Main.scala b/info/labs/lab03/src/amyc/Main.scala
new file mode 100644
index 0000000000000000000000000000000000000000..52f485984fc3fa2e71b7a329e3f39184374b5f3a
--- /dev/null
+++ b/info/labs/lab03/src/amyc/Main.scala
@@ -0,0 +1,77 @@
+package amyc
+
+import ast._
+import utils._
+import parsing._
+
+import java.io.File
+
+object Main extends MainHelpers {
+  private def parseArgs(args: Array[String]): Context = {
+    var ctx = Context(new Reporter, Nil)
+    args foreach {
+      case "--printTokens" => ctx = ctx.copy(printTokens = true)
+      case "--printTrees"  => ctx = ctx.copy(printTrees = true)
+      case "--interpret"   => ctx = ctx.copy(interpret = true)
+      case "--help"        => ctx = ctx.copy(help = true)
+      case file            => ctx = ctx.copy(files = ctx.files :+ file)
+    }
+    ctx
+  }
+
+  def main(args: Array[String]): Unit = {
+    val ctx = parseArgs(args)
+    if (ctx.help) {
+      val helpMsg = {
+        """Welcome to the Amy reference compiler, v.1.5
+          |
+          |Options:
+          |  --printTokens    Print lexer tokens (with positions) after lexing and exit
+          |  --printTrees     Print trees after parsing and exit
+          |  --interpret      Interpret the program instead of compiling
+          |  --help           Print this message
+        """.stripMargin
+      }
+      println(helpMsg)
+      sys.exit(0)
+    }
+    val pipeline = 
+      AmyLexer.andThen(
+        if (ctx.printTokens) DisplayTokens
+        else Parser.andThen(
+          treePrinterN("Trees after parsing")))
+
+    val files = ctx.files.map(new File(_))
+
+    try {
+      if (files.isEmpty) {
+        ctx.reporter.fatal("No input files")
+      }
+      if (ctx.interpret) {
+        ctx.reporter.fatal("Unsupported actions for now")
+      }
+      files.find(!_.exists()).foreach { f =>
+        ctx.reporter.fatal(s"File not found: ${f.getName}")
+      }
+      pipeline.run(ctx)(files)
+      ctx.reporter.terminateIfErrors()
+    } catch {
+      case AmycFatalError(_) =>
+        sys.exit(1)
+    }
+  }
+}
+
+trait MainHelpers {
+  import SymbolicTreeModule.{Program => SP}
+  import NominalTreeModule.{Program => NP}
+
+  def treePrinterN(title: String): Pipeline[NP, Unit] = {
+    new Pipeline[NP, Unit] {
+      def run(ctx: Context)(v: NP) = {
+        println(title)
+        println(NominalPrinter(v))
+      }
+    }
+  }
+}
\ No newline at end of file
diff --git a/info/labs/lab03/src/amyc/ast/Identifier.scala b/info/labs/lab03/src/amyc/ast/Identifier.scala
new file mode 100644
index 0000000000000000000000000000000000000000..bdf15b3ae504c2484342900f8b9f6222dd295fc2
--- /dev/null
+++ b/info/labs/lab03/src/amyc/ast/Identifier.scala
@@ -0,0 +1,19 @@
+package amyc.ast
+
+object Identifier {
+  private val counter = new amyc.utils.UniqueCounter[String]
+
+  def fresh(name: String): Identifier = new Identifier(name)
+}
+
+// Denotes a unique identifier in an Amy program
+// Notice that we rely on reference equality to compare Identifiers.
+// The numeric id will be generated lazily,
+// so the Identifiers are numbered in order when we print the program.
+final class Identifier private(val name: String) {
+  private lazy val id = Identifier.counter.next(name)
+
+  def fullName = s"${name}_$id"
+
+  override def toString: String = name
+}
diff --git a/info/labs/lab03/src/amyc/ast/Printer.scala b/info/labs/lab03/src/amyc/ast/Printer.scala
new file mode 100644
index 0000000000000000000000000000000000000000..a6c5b020827edcbbef0c2a6e91430b02da16db66
--- /dev/null
+++ b/info/labs/lab03/src/amyc/ast/Printer.scala
@@ -0,0 +1,201 @@
+package amyc.ast
+
+import scala.language.implicitConversions
+import amyc.utils._
+
+// A printer for Amy trees
+trait Printer {
+
+  val treeModule: TreeModule
+  import treeModule._
+
+  implicit def printName(name: Name)(implicit printUniqueIds: Boolean): Document
+  implicit def printQName(name: QualifiedName)(implicit printUniqueIds: Boolean): Document
+
+  protected implicit def stringToDoc(s: String): Raw = Raw(s)
+
+  def apply(t: Tree)(implicit printUniqueIDs: Boolean = false): String = {
+
+    def binOp(e1: Expr, op: String, e2: Expr) = "(" <:> rec(e1) <:> " " + op + " " <:> rec(e2) <:> ")"
+
+    def rec(t: Tree, parens: Boolean = true): Document = t match {
+      /* Definitions */
+      case Program(modules) =>
+        Stacked(modules map (rec(_)), emptyLines = true)
+
+      case ModuleDef(name, defs, optExpr) =>
+        Stacked(
+          "object " <:> name,
+          "",
+          Indented(Stacked(defs ++ optExpr.toList map (rec(_, false)), emptyLines = true)),
+          "end " <:> name,
+          ""
+        )
+
+      case AbstractClassDef(name) =>
+        "abstract class " <:> printName(name)
+
+      case CaseClassDef(name, fields, parent) =>
+        def printField(f: TypeTree) = "v: " <:> rec(f)
+        "case class " <:> name <:> "(" <:> Lined(fields map printField, ", ") <:> ") extends " <:> parent
+
+      case FunDef(name, params, retType, body) =>
+        Stacked(
+          "def " <:> name <:> "(" <:> Lined(params map (rec(_)), ", ") <:> "): " <:> rec(retType) <:> " = {",
+          Indented(rec(body, false)),
+          "}"
+        )
+
+      case ParamDef(name, tpe) =>
+        name <:> ": " <:> rec(tpe)
+
+      /* Expressions */
+      case Variable(name) =>
+        name
+      case IntLiteral(value) =>
+        value.toString
+      case BooleanLiteral(value) =>
+        value.toString
+      case StringLiteral(value) =>
+        "\"" + value + '"'
+      case UnitLiteral() =>
+        "()"
+      case Plus(lhs, rhs) =>
+        binOp(lhs, "+", rhs)
+      case Minus(lhs, rhs) =>
+        binOp(lhs, "-", rhs)
+      case Times(lhs, rhs) =>
+        binOp(lhs, "*", rhs)
+      case Div(lhs, rhs) =>
+        binOp(lhs, "/", rhs)
+      case Mod(lhs, rhs) =>
+        binOp(lhs, "%", rhs)
+      case LessThan(lhs, rhs) =>
+        binOp(lhs, "<", rhs)
+      case LessEquals(lhs, rhs) =>
+        binOp(lhs, "<=", rhs)
+      case And(lhs, rhs) =>
+        binOp(lhs, "&&", rhs)
+      case Or(lhs, rhs) =>
+        binOp(lhs, "||", rhs)
+      case Equals(lhs, rhs) =>
+        binOp(lhs, "==", rhs)
+      case Concat(lhs, rhs) =>
+        binOp(lhs, "++", rhs)
+      case Not(e) =>
+        "!(" <:> rec(e) <:> ")"
+      case Neg(e) =>
+        "-(" <:> rec(e) <:> ")"
+      case Call(name, args) =>
+        name <:> "(" <:> Lined(args map (rec(_)), ", ") <:> ")"
+      case Sequence(lhs, rhs) =>
+        val main = Stacked(
+          rec(lhs, false) <:> ";",
+          rec(rhs, false),
+        )
+        if (parens) {
+          Stacked(
+            "(",
+            Indented(main),
+            ")"
+          )
+        } else {
+          main
+        }
+      case Let(df, value, body) =>
+        val main = Stacked(
+          "val " <:> rec(df) <:> " =",
+          Indented(rec(value)) <:> ";",
+          rec(body, false) // For demonstration purposes, the scope or df is indented
+        )
+        if (parens) {
+          Stacked(
+            "(",
+            Indented(main),
+            ")"
+          )
+        } else {
+          main
+        }
+      case Ite(cond, thenn, elze) =>
+        Stacked(
+          "(if(" <:> rec(cond) <:> ") {",
+          Indented(rec(thenn)),
+          "} else {",
+          Indented(rec(elze)),
+          "})"
+        )
+      case Match(scrut, cases) =>
+        Stacked(
+          rec(scrut) <:> " match {",
+          Indented(Stacked(cases map (rec(_)))),
+          "}"
+        )
+      case Error(msg) =>
+        "error(" <:> rec(msg) <:> ")"
+
+      /* cases and patterns */
+      case MatchCase(pat, expr) =>
+        Stacked(
+          "case " <:> rec(pat) <:> " =>",
+          Indented(rec(expr))
+        )
+      case WildcardPattern() =>
+        "_"
+      case IdPattern(name) =>
+        name
+      case LiteralPattern(lit) =>
+        rec(lit)
+      case CaseClassPattern(name, args) =>
+        name <:> "(" <:> Lined(args map (rec(_)), ", ") <:> ")"
+
+      /* Types */
+      case TypeTree(tp) =>
+        tp match {
+          case IntType => "Int(32)"
+          case BooleanType => "Boolean"
+          case StringType => "String"
+          case UnitType => "Unit"
+          case ClassType(name) => name
+        }
+
+    }
+
+    rec(t).print
+  }
+}
+
+object NominalPrinter extends Printer {
+  val treeModule: NominalTreeModule.type = NominalTreeModule
+  import NominalTreeModule._
+
+  implicit def printName(name: Name)(implicit printUniqueIds: Boolean): Document = Raw(name)
+
+  implicit def printQName(name: QualifiedName)(implicit printUniqueIds: Boolean): Document = {
+    Raw(name match {
+      case QualifiedName(Some(module), name) =>
+        s"$module.$name"
+      case QualifiedName(None, name) =>
+        name
+    })
+  }
+}
+
+object SymbolicPrinter extends SymbolicPrinter
+trait SymbolicPrinter extends Printer {
+  val treeModule: SymbolicTreeModule.type = SymbolicTreeModule
+  import SymbolicTreeModule._
+
+  implicit def printName(name: Name)(implicit printUniqueIds: Boolean): Document = {
+    if (printUniqueIds) {
+      name.fullName
+    } else {
+      name.name
+    }
+  }
+
+  @inline implicit def printQName(name: QualifiedName)(implicit printUniqueIds: Boolean): Document = {
+    printName(name)
+  }
+}
+
diff --git a/info/labs/lab03/src/amyc/ast/TreeModule.scala b/info/labs/lab03/src/amyc/ast/TreeModule.scala
new file mode 100644
index 0000000000000000000000000000000000000000..b92bf4b647cfffd1ad87241299355f373816cbfe
--- /dev/null
+++ b/info/labs/lab03/src/amyc/ast/TreeModule.scala
@@ -0,0 +1,142 @@
+package amyc.ast
+
+import amyc.utils.Positioned
+
+/* A polymorphic module containing definitions of Amy trees.
+ *
+ * This trait represents either nominal trees (where names have not been resolved)
+ * or symbolic trees (where names/qualified names) have been resolved to unique identifiers.
+ * This is done by having two type fields within the module,
+ * which will be instantiated differently by the two different modules.
+ *
+ */
+
+trait TreeModule { self =>
+  /* Represents the type for the name for this tree module.
+   * (It will be either a plain string, or a unique symbol)
+   */
+  type Name
+
+  // Represents a name within an module
+  type QualifiedName
+
+  // A printer that knows how to print trees in this module.
+  // The modules will instantiate it as appropriate
+  val printer: Printer { val treeModule: self.type }
+
+  // Common ancestor for all trees
+  trait Tree extends Positioned {
+    override def toString: String = printer(this)
+  }
+
+  // Expressions
+  trait Expr extends Tree
+
+  // Variables
+  case class Variable(name: Name) extends Expr
+
+  // Literals
+  trait Literal[+T] extends Expr { val value: T }
+  case class IntLiteral(value: Int) extends Literal[Int]
+  case class BooleanLiteral(value: Boolean) extends Literal[Boolean]
+  case class StringLiteral(value: String) extends Literal[String]
+  case class UnitLiteral() extends Literal[Unit] { val value: Unit = () }
+
+  // Binary operators
+  case class Plus(lhs: Expr, rhs: Expr) extends Expr
+  case class Minus(lhs: Expr, rhs: Expr) extends Expr
+  case class Times(lhs: Expr, rhs: Expr) extends Expr
+  case class Div(lhs: Expr, rhs: Expr) extends Expr
+  case class Mod(lhs: Expr, rhs: Expr) extends Expr
+  case class LessThan(lhs: Expr, rhs: Expr) extends Expr
+  case class LessEquals(lhs: Expr, rhs: Expr) extends Expr
+  case class And(lhs: Expr, rhs: Expr) extends Expr
+  case class Or(lhs: Expr, rhs: Expr) extends Expr
+  case class Equals(lhs: Expr, rhs: Expr) extends Expr
+  case class Concat(lhs: Expr, rhs: Expr) extends Expr
+
+  // Unary operators
+  case class Not(e: Expr) extends Expr
+  case class Neg(e: Expr) extends Expr
+
+  // Function/constructor call
+  case class Call(qname: QualifiedName, args: List[Expr]) extends Expr
+  // The ; operator
+  case class Sequence(e1: Expr, e2: Expr) extends Expr
+  // Local variable definition
+  case class Let(df: ParamDef, value: Expr, body: Expr) extends Expr
+  // If-then-else
+  case class Ite(cond: Expr, thenn: Expr, elze: Expr) extends Expr
+  // Pattern matching
+  case class Match(scrut: Expr, cases: List[MatchCase]) extends Expr {
+    require(cases.nonEmpty)
+  }
+  // Represents a computational error; prints its message, then exits
+  case class Error(msg: Expr) extends Expr
+
+  // Cases and patterns for Match expressions
+  case class MatchCase(pat: Pattern, expr: Expr) extends Tree
+
+  abstract class Pattern extends Tree
+  case class WildcardPattern() extends Pattern // _
+  case class IdPattern(name: Name) extends Pattern // x
+  case class LiteralPattern[+T](lit: Literal[T]) extends Pattern // 42, true
+  case class CaseClassPattern(constr: QualifiedName, args: List[Pattern]) extends Pattern // C(arg1, arg2)
+
+  // Definitions
+  trait Definition extends Tree { val name: Name }
+  case class ModuleDef(name: Name, defs: List[ClassOrFunDef], optExpr: Option[Expr]) extends Definition
+  trait ClassOrFunDef extends Definition
+  case class FunDef(name: Name, params: List[ParamDef], retType: TypeTree, body: Expr) extends ClassOrFunDef {
+    def paramNames = params.map(_.name)
+  }
+  case class AbstractClassDef(name: Name) extends ClassOrFunDef
+  case class CaseClassDef(name: Name, fields: List[TypeTree], parent: Name) extends ClassOrFunDef
+  case class ParamDef(name: Name, tt: TypeTree) extends Definition
+
+  // Types
+  trait Type
+  case object IntType extends Type {
+    override def toString: String = "Int"
+  }
+  case object BooleanType extends Type {
+    override def toString: String = "Boolean"
+  }
+  case object StringType extends Type {
+    override def toString: String = "String"
+  }
+  case object UnitType extends Type {
+    override def toString: String = "Unit"
+  }
+  case class ClassType(qname: QualifiedName) extends Type {
+    override def toString: String = printer.printQName(qname)(false).print
+  }
+
+  // A wrapper for types that is also a Tree (i.e. has a position)
+  case class TypeTree(tpe: Type) extends Tree
+
+  // All is wrapped in a program
+  case class Program(modules: List[ModuleDef]) extends Tree
+}
+
+/* A module containing trees where the names have not been resolved.
+ * Instantiates Name to String and QualifiedName to a pair of Strings
+ * representing (module, name) (where module is optional)
+ */
+object NominalTreeModule extends TreeModule {
+  type Name = String
+  case class QualifiedName(module: Option[String], name: String) {
+    override def toString: String = printer.printQName(this)(false).print
+  }
+  val printer = NominalPrinter
+}
+
+/* A module containing trees where the names have been resolved to unique identifiers.
+ * Both Name and ModuleName are instantiated to Identifier.
+ */
+object SymbolicTreeModule extends TreeModule {
+  type Name = Identifier
+  type QualifiedName = Identifier
+  val printer = SymbolicPrinter
+}
+
diff --git a/info/labs/lab03/src/amyc/parsing/Lexer.scala b/info/labs/lab03/src/amyc/parsing/Lexer.scala
new file mode 100644
index 0000000000000000000000000000000000000000..60865dd0fed431ef6804b5aa023e0bc49ce8b786
--- /dev/null
+++ b/info/labs/lab03/src/amyc/parsing/Lexer.scala
@@ -0,0 +1,138 @@
+package amyc
+package parsing
+
+import amyc.utils._
+import java.io.File
+
+import silex._
+
+import amyc.utils.Position
+
+// The lexer for Amy.
+object AmyLexer extends Pipeline[List[File], Iterator[Token]]
+                with Lexers {
+
+  /** Tiny Silex reference:
+    * ==============================
+    * Silex's lexer essentially allows you to define a list of regular expressions
+    * in their order of priority. To tokenize a given input stream of characters, each
+    * individual regular expression is applied in turn. If a given expression matches, it
+    * is used to produce a token of maximal length. Whenever a regular expression does not
+    * match, the expression of next-highest priority is tried.
+    * The result is a stream of tokens.
+    *
+    * Regular expressions `r` can be built using the following operators:
+    *   - `word("abc")`  matches the sequence "abc" exactly
+    *   - `r1 | r2`      matches either expression `r1` or expression `r2`
+    *   - `r1 ~ r2`      matches `r1` followed by `r2`
+    *   - `oneOf("xy")`  matches either "x" or "y"
+    *                    (i.e., it is a shorthand of `word` and `|` for single characters)
+    *   - `elem(c)`      matches character `c`
+    *   - `elem(f)`      matches any character for which the boolean predicate `f` holds 
+    *   - `opt(r)`       matches `r` or nothing at all
+    *   - `many(r)`      matches any number of repetitions of `r` (including none at all)
+    *   - `many1(r)`     matches any non-zero number of repetitions of `r`
+    *  
+    * To define the token that should be output for a given expression, one can use
+    * the `|>` combinator with an expression on the left-hand side and a function
+    * producing the token on the right. The function is given the sequence of matched
+    * characters and the source-position range as arguments.
+    * 
+    * For instance,
+    *
+    *   `elem(_.isDigit) ~ word("kg") |> {
+    *     (cs, range) => WeightLiteralToken(cs.mkString).setPos(range._1)) }`
+    *
+    * will match a single digit followed by the characters "kg" and turn them into a
+    * "WeightLiteralToken" whose value will be the full string matched (e.g. "1kg").
+    */
+
+
+  // Type of characters consumed.
+  type Character = Char
+
+  // Type of positions.
+  type Position = SourcePosition
+
+  // Type of tokens produced.
+  type Token = parsing.Token
+
+  import Tokens._
+
+  val lexer = Lexer(
+    // Keywords,
+    word("abstract") | word("case") | word("class") |
+    word("def") | word("else") | word("extends") |
+    word("if") | word("match") | word("object") |
+    word("val") | word("error") | word("_") | word("end")
+      |> { (cs, range) => KeywordToken(cs.mkString).setPos(range._1) },
+
+    // Primitive type names,
+    // TODO
+    
+
+    // Boolean literals,
+    // TODO
+
+    // Operators,
+    // NOTE: You can use `oneof("abc")` as a shortcut for `word("a") | word("b") | word("c")`
+    // TODO
+    
+    // Identifiers,
+    // TODO
+    
+    // Integer literal,
+    // NOTE: Make sure to handle invalid (e.g. overflowing) integer values safely by
+    //       emitting an ErrorToken instead.
+    // TODO
+    
+    // String literal,
+    // TODO
+    
+    // Delimiters,
+    // TODO
+
+
+    // Whitespace,
+    // TODO
+
+    // Single line comment,
+    word("//") ~ many(elem(_ != '\n'))
+      |> { cs => CommentToken(cs.mkString("")) },
+
+    // Multiline comments,
+    // NOTE: Amy does not support nested multi-line comments (e.g. `/* foo /* bar */ */`).
+    //       Make sure that unclosed multi-line comments result in an ErrorToken.
+    // TODO
+  ) onError {
+    // We also emit ErrorTokens for Silex-handled errors.
+    (cs, range) => ErrorToken(cs.mkString).setPos(range._1)
+  } onEnd {
+    // Once all the input has been consumed, we emit one EOFToken.
+    pos => EOFToken().setPos(pos)
+  }
+
+  override def run(ctx: amyc.utils.Context)(files: List[File]): Iterator[Token] = {
+    var it = Seq[Token]().iterator
+
+    for (file <- files) {
+      val source = Source.fromFile(file.toString, SourcePositioner(file))
+      it ++= lexer.spawn(source).filter {
+        token =>
+          // TODO: Remove all whitespace and comment tokens
+          ???
+      }.map {
+        case token@ErrorToken(error) => ctx.reporter.fatal("Unknown token at " + token.position + ": " + error)
+        case token => token
+      }
+    }
+    it
+  }
+}
+
+/** Extracts all tokens from input and displays them */
+object DisplayTokens extends Pipeline[Iterator[Token], Unit] {
+  override def run(ctx: Context)(tokens: Iterator[Token]): Unit = {
+    tokens.foreach(println(_))
+  }
+}
diff --git a/info/labs/lab03/src/amyc/parsing/Parser.scala b/info/labs/lab03/src/amyc/parsing/Parser.scala
new file mode 100644
index 0000000000000000000000000000000000000000..fdf814bd35fefbc4daeaaa06dcdf18a34335af93
--- /dev/null
+++ b/info/labs/lab03/src/amyc/parsing/Parser.scala
@@ -0,0 +1,156 @@
+package amyc
+package parsing
+
+import scala.language.implicitConversions
+
+import amyc.ast.NominalTreeModule._
+import amyc.utils._
+import Tokens._
+import TokenKinds._
+
+import scallion._
+
+// The parser for Amy
+object Parser extends Pipeline[Iterator[Token], Program]
+                 with Parsers {
+
+  type Token = amyc.parsing.Token
+  type Kind = amyc.parsing.TokenKind
+
+  import Implicits._
+
+  override def getKind(token: Token): TokenKind = TokenKind.of(token)
+
+  val eof: Syntax[Token] = elem(EOFKind)
+  def op(string: String): Syntax[Token] = elem(OperatorKind(string))
+  def kw(string: String): Syntax[Token] = elem(KeywordKind(string))
+
+  implicit def delimiter(string: String): Syntax[Token] = elem(DelimiterKind(string))
+
+  // An entire program (the starting rule for any Amy file).
+  lazy val program: Syntax[Program] = many1(many1(module) ~<~ eof).map(ms => Program(ms.flatten.toList).setPos(ms.head.head))
+
+  // A module (i.e., a collection of definitions and an initializer expression)
+  lazy val module: Syntax[ModuleDef] = (kw("object") ~ identifier ~ many(definition) ~ opt(expr) ~ kw("end") ~ identifier).map {
+    case obj ~ id ~ defs ~ body ~ _ ~ id1 => 
+      if id == id1 then 
+        ModuleDef(id, defs.toList, body).setPos(obj)
+      else 
+        throw new AmycFatalError("Begin and end module names do not match: " + id + " and " + id1)
+  }
+
+  // An identifier.
+  val identifier: Syntax[String] = accept(IdentifierKind) {
+    case IdentifierToken(name) => name
+  }
+
+  // An identifier along with its position.
+  val identifierPos: Syntax[(String, Position)] = accept(IdentifierKind) {
+    case id@IdentifierToken(name) => (name, id.position)
+  }
+
+  // A definition within a module.
+  lazy val definition: Syntax[ClassOrFunDef] = 
+   ???
+  
+
+  // A list of parameter definitions.
+  lazy val parameters: Syntax[List[ParamDef]] = repsep(parameter, ",").map(_.toList)
+
+  // A parameter definition, i.e., an identifier along with the expected type.
+  lazy val parameter: Syntax[ParamDef] = 
+   ???
+
+  // A type expression.
+  lazy val typeTree: Syntax[TypeTree] = primitiveType | identifierType
+
+  // A built-in type (such as `Int`).
+  val primitiveType: Syntax[TypeTree] = (accept(PrimTypeKind) {
+    case tk@PrimTypeToken(name) => TypeTree(name match {
+      case "Unit" => UnitType
+      case "Boolean" => BooleanType
+      case "Int" => IntType
+      case "String" => StringType
+      case _ => throw new java.lang.Error("Unexpected primitive type name: " + name)
+    }).setPos(tk)
+  } ~ opt("(" ~ literal ~ ")")).map { 
+    case (prim@TypeTree(IntType)) ~ Some(_ ~ IntLiteral(32) ~ _) => prim
+    case TypeTree(IntType) ~ Some(_ ~ IntLiteral(width) ~ _) => 
+      throw new AmycFatalError("Int type can only be used with a width of 32 bits, found : " + width)
+    case TypeTree(IntType) ~ Some(_ ~ lit ~ _) =>
+      throw new AmycFatalError("Int type should have an integer width (only 32 bits is supported)")
+    case TypeTree(IntType) ~ None => 
+      throw new AmycFatalError("Int type should have a specific width (only 32 bits is supported)")
+    case prim ~ Some(_) => 
+      throw new AmycFatalError("Only Int type can have a specific width")
+    case prim ~ None => prim
+  }
+
+  // A user-defined type (such as `List`).
+  lazy val identifierType: Syntax[TypeTree] = 
+    ???
+
+  // An expression.
+  // HINT: You can use `operators` to take care of associativity and precedence
+  lazy val expr: Syntax[Expr] = recursive { 
+    ???
+  }
+
+
+  // A literal expression.
+  lazy val literal: Syntax[Literal[?]] = 
+    ???
+
+  // A pattern as part of a mach case.
+  lazy val pattern: Syntax[Pattern] = recursive { 
+    ???
+  }
+
+
+  lazy val literalPattern: Syntax[Pattern] = 
+    ???
+
+  lazy val wildPattern: Syntax[Pattern] = 
+    ???
+
+
+
+  // HINT: It is useful to have a restricted set of expressions that don't include any more operators on the outer level.
+  lazy val simpleExpr: Syntax[Expr] = 
+    literal.up[Expr] | variableOrCall | ???
+
+  lazy val variableOrCall: Syntax[Expr] = ???
+
+
+  // TODO: Other definitions.
+  //       Feel free to decompose the rules in whatever way convenient.
+
+
+  // Ensures the grammar is in LL(1)
+  lazy val checkLL1: Boolean = {
+    if (program.isLL1) {
+      true
+    } else {
+      // Set `showTrails` to true to make Scallion generate some counterexamples for you.
+      // Depending on your grammar, this may be very slow.
+      val showTrails = false
+      debug(program, showTrails)
+      false
+    }
+  }
+
+  override def run(ctx: Context)(tokens: Iterator[Token]): Program = {
+    import ctx.reporter._
+    if (!checkLL1) {
+      ctx.reporter.fatal("Program grammar is not LL1!")
+    }
+
+    val parser = Parser(program)
+
+    parser(tokens) match {
+      case Parsed(result, rest) => result
+      case UnexpectedEnd(rest) => fatal("Unexpected end of input.")
+      case UnexpectedToken(token, rest) => fatal("Unexpected token: " + token + ", possible kinds: " + rest.first.map(_.toString).mkString(", "))
+    }
+  }
+}
\ No newline at end of file
diff --git a/info/labs/lab03/src/amyc/parsing/Tokens.scala b/info/labs/lab03/src/amyc/parsing/Tokens.scala
new file mode 100644
index 0000000000000000000000000000000000000000..e002e1f71795aa2368726281e07e7373fcbfbe8b
--- /dev/null
+++ b/info/labs/lab03/src/amyc/parsing/Tokens.scala
@@ -0,0 +1,58 @@
+package amyc
+package parsing
+
+import amyc.utils.Positioned
+
+sealed trait Token extends Positioned with Product {
+  override def toString = {
+    productPrefix + productIterator.mkString("(", ",", ")") + "(" + position.withoutFile + ")"
+  }
+}
+
+object Tokens {
+  final case class KeywordToken(value: String) extends Token    // e.g. keyword "if"
+  final case class IdentifierToken(name: String) extends Token  // e.g. variable name "x" 
+  final case class PrimTypeToken(value: String) extends Token   // e.g. primitive type "Int"
+  final case class IntLitToken(value: Int) extends Token        // e.g. integer literal "123"
+  final case class StringLitToken(value: String) extends Token
+  final case class BoolLitToken(value: Boolean) extends Token
+  final case class DelimiterToken(value: String) extends Token  // .,:;(){}[]= and =>
+  final case class OperatorToken(name: String) extends Token    // e.g. "+"
+  final case class CommentToken(text: String) extends Token     // e.g. "// this is a comment"
+  final case class SpaceToken() extends Token                   // e.g. "\n  "
+  final case class ErrorToken(content: String) extends Token
+  final case class EOFToken() extends Token                     // special token at the end of file
+}
+
+sealed abstract class TokenKind(representation: String) {
+  override def toString: String = representation
+}
+
+object TokenKinds {
+  final case class KeywordKind(value: String) extends TokenKind(value)
+  case object IdentifierKind extends TokenKind("<Identifier>")
+  case object PrimTypeKind extends TokenKind("<Primitive Type>")
+  case object LiteralKind extends TokenKind("<Literal>")
+  final case class DelimiterKind(value: String) extends TokenKind(value)
+  final case class OperatorKind(value: String) extends TokenKind(value)
+  case object EOFKind extends TokenKind("<EOF>")
+  case object NoKind extends TokenKind("<???>")
+}
+
+object TokenKind {
+  import Tokens._
+  import TokenKinds._
+
+  def of(token: Token): TokenKind = token match {
+    case KeywordToken(value) => KeywordKind(value)
+    case IdentifierToken(_) => IdentifierKind
+    case PrimTypeToken(_) => PrimTypeKind
+    case BoolLitToken(_) => LiteralKind
+    case IntLitToken(_) => LiteralKind
+    case StringLitToken(_) => LiteralKind
+    case DelimiterToken(value) => DelimiterKind(value)
+    case OperatorToken(value) => OperatorKind(value)
+    case EOFToken() => EOFKind
+    case _ => NoKind
+  }
+}
\ No newline at end of file
diff --git a/info/labs/lab03/src/amyc/utils/AmycFatalError.scala b/info/labs/lab03/src/amyc/utils/AmycFatalError.scala
new file mode 100644
index 0000000000000000000000000000000000000000..36f2839f7daaffec9de0f7811a41a59b5d3ee4f3
--- /dev/null
+++ b/info/labs/lab03/src/amyc/utils/AmycFatalError.scala
@@ -0,0 +1,3 @@
+package amyc.utils
+
+case class AmycFatalError(msg: String) extends Exception(msg)
diff --git a/info/labs/lab03/src/amyc/utils/Context.scala b/info/labs/lab03/src/amyc/utils/Context.scala
new file mode 100644
index 0000000000000000000000000000000000000000..6781a090d51907556f640107ae657200233f10c5
--- /dev/null
+++ b/info/labs/lab03/src/amyc/utils/Context.scala
@@ -0,0 +1,13 @@
+package amyc.utils
+
+// Contains a reporter and configuration for the compiler
+case class Context(
+  reporter: Reporter,
+  files: List[String],
+  printTokens: Boolean = false,
+  printTrees: Boolean = false,
+  printNames: Boolean = false,
+  interpret: Boolean = false,
+  typeCheck: Boolean = false,
+  help: Boolean = false
+)
diff --git a/info/labs/lab03/src/amyc/utils/Document.scala b/info/labs/lab03/src/amyc/utils/Document.scala
new file mode 100644
index 0000000000000000000000000000000000000000..93aed6eb941b866f36a018a54443c5f2984babb6
--- /dev/null
+++ b/info/labs/lab03/src/amyc/utils/Document.scala
@@ -0,0 +1,49 @@
+package amyc.utils
+
+// A structured document to be printed with nice indentation
+abstract class Document {
+
+  def <:>(other: Document) = Lined(List(this, other))
+
+  def print: String = {
+    val sb = new StringBuffer()
+
+    def rec(d: Document)(implicit ind: Int, first: Boolean): Unit = d match {
+      case Raw(s) =>
+        if (first && s.nonEmpty) sb.append(("  " * ind))
+        sb.append(s)
+      case Indented(doc) =>
+        rec(doc)(ind + 1, first)
+      case Unindented(doc) =>
+        assume(ind > 0)
+        rec(doc)(ind - 1, first)
+      case Lined(Nil, _) => // skip
+      case Lined(docs, sep) =>
+        rec(docs.head)
+        docs.tail foreach { doc =>
+          rec(sep)(ind, false)
+          rec(doc)(ind, false)
+        }
+      case Stacked(Nil, _) => // skip
+      case Stacked(docs, emptyLines) =>
+        rec(docs.head)
+        docs.tail foreach { doc =>
+          sb.append("\n")
+          if (emptyLines) sb.append("\n")
+          rec(doc)(ind, true)
+        }
+    }
+
+    rec(this)(0, true)
+    sb.toString
+  }
+}
+case class Indented(content: Document) extends Document
+case class Unindented(content: Document) extends Document
+case class Stacked(docs: List[Document], emptyLines: Boolean = false) extends Document
+case class Lined(docs: List[Document], separator: Document = Raw("")) extends Document
+case class Raw(s: String) extends Document
+
+object Stacked {
+  def apply(docs: Document*): Stacked = Stacked(docs.toList)
+}
\ No newline at end of file
diff --git a/info/labs/lab03/src/amyc/utils/Env.scala b/info/labs/lab03/src/amyc/utils/Env.scala
new file mode 100644
index 0000000000000000000000000000000000000000..b8040eea32106540b6cdbee6dcdcfaf947c38efb
--- /dev/null
+++ b/info/labs/lab03/src/amyc/utils/Env.scala
@@ -0,0 +1,19 @@
+package amyc.utils
+
+object Env {
+  trait OS
+  object Linux extends OS
+  object Windows extends OS
+  object Mac extends OS
+
+  lazy val os = {
+    // If all fails returns Linux
+    val optOsName = Option(System.getProperty("os.name"))
+    optOsName.map(_.toLowerCase()).map { osName =>
+      if (osName.contains("linux")) then Linux
+      else if (osName.contains("win")) then Windows
+      else if (osName.contains("mac")) then Mac
+      else Linux
+    } getOrElse Linux
+  }
+}
diff --git a/info/labs/lab03/src/amyc/utils/Pipeline.scala b/info/labs/lab03/src/amyc/utils/Pipeline.scala
new file mode 100644
index 0000000000000000000000000000000000000000..eb85f877c2a8ab1aec1e4b3915660c1d5b0c1b87
--- /dev/null
+++ b/info/labs/lab03/src/amyc/utils/Pipeline.scala
@@ -0,0 +1,21 @@
+package amyc.utils
+
+// A sequence of operations to be run by the compiler,
+// with interruption at every stage if there is an error
+abstract class Pipeline[-F, +T] {
+  self =>
+
+  def andThen[G](thenn: Pipeline[T, G]): Pipeline[F, G] = new Pipeline[F,G] {
+    def run(ctx : Context)(v : F) : G = {
+      val first = self.run(ctx)(v)
+      ctx.reporter.terminateIfErrors()
+      thenn.run(ctx)(first)
+    }
+  }
+
+  def run(ctx: Context)(v: F): T
+}
+
+case class Noop[T]() extends Pipeline[T, T] {
+  def run(ctx: Context)(v: T) = v
+}
diff --git a/info/labs/lab03/src/amyc/utils/Position.scala b/info/labs/lab03/src/amyc/utils/Position.scala
new file mode 100644
index 0000000000000000000000000000000000000000..a938eecfc113966d8d25d8cffd56693f4ee360bd
--- /dev/null
+++ b/info/labs/lab03/src/amyc/utils/Position.scala
@@ -0,0 +1,81 @@
+package amyc.utils
+
+import java.io.File
+
+import silex._
+
+object Position {
+  /** Number of bits used to encode the line number */
+  private final val LINE_BITS   = 20
+  /** Number of bits used to encode the column number */
+  private final val COLUMN_BITS = 31 - LINE_BITS // no negatives => 31
+  /** Mask to decode the line number */
+  private final val LINE_MASK   = (1 << LINE_BITS) - 1
+  /** Mask to decode the column number */
+  private final val COLUMN_MASK = (1 << COLUMN_BITS) - 1
+
+  private def lineOf(pos: Int): Int = (pos >> COLUMN_BITS) & LINE_MASK
+  private def columnOf(pos: Int): Int = pos & COLUMN_MASK
+
+  def fromFile(f: File, i: Int) = {
+    SourcePosition(f, lineOf(i), columnOf(i))
+  }
+}
+
+abstract class Position {
+  val file: File
+  val line: Int
+  val col: Int
+
+  def isDefined: Boolean
+  def withoutFile: String
+}
+
+case class SourcePosition(file: File, line: Int, col: Int) extends Position {
+  override def toString: String = s"${file.getPath}:$line:$col"
+  def withoutFile = s"$line:$col"
+  val isDefined = true
+}
+
+case object NoPosition extends Position {
+  val file = null
+  val line = 0
+  val col = 0
+
+  override def toString: String = "?:?"
+  def withoutFile = toString
+  val isDefined = false
+}
+
+// A trait for entities which have a position in a file
+trait Positioned {
+
+  protected var pos_ : Position = NoPosition
+
+  def hasPosition = pos_ != NoPosition
+
+  def position = pos_
+
+  def setPos(pos: Position): this.type = {
+    pos_ = pos
+    this
+  }
+
+  def setPos(other: Positioned): this.type = {
+    setPos(other.position)
+  }
+
+}
+
+case class SourcePositioner(file: File) extends Positioner[Char, SourcePosition] {
+  override val start: SourcePosition = SourcePosition(file, 1, 1)
+
+  override def increment(position: SourcePosition, character: Char): SourcePosition =
+    if (character == '\n') {
+      position.copy(line = position.line + 1, col = 1)
+    }
+    else {
+      position.copy(col = position.col + 1)
+    }
+}
+
diff --git a/info/labs/lab03/src/amyc/utils/Reporter.scala b/info/labs/lab03/src/amyc/utils/Reporter.scala
new file mode 100644
index 0000000000000000000000000000000000000000..48ed7d28bbd44899b02cc6dc636723d121f87d67
--- /dev/null
+++ b/info/labs/lab03/src/amyc/utils/Reporter.scala
@@ -0,0 +1,87 @@
+package amyc.utils
+
+import java.io.File
+import scala.io.Source
+
+// Reports errors and warnings during compilation
+class Reporter {
+
+  /** Issues some information from the compiler */
+  def info(msg: Any, pos: Position = NoPosition): Unit = {
+    report("[ Info  ]", msg, pos)
+  }
+
+  /** Issues a warning from the compiler */
+  def warning(msg: Any, pos: Position = NoPosition): Unit = {
+    report("[Warning]", msg, pos)
+  }
+
+  private var hasErrors = false
+
+  /** Issues a recoverable error message */
+  def error(msg: Any, pos: Position = NoPosition): Unit = {
+    hasErrors = true
+    report("[ Error ]", msg, pos)
+  }
+
+  /** Used for an unrecoverable error: Issues a message, then exits the compiler */
+  def fatal(msg: Any, pos: Position = NoPosition): Nothing = {
+    report("[ Fatal ]", msg, pos)
+    // Despite printing the message, we store it in the error for testing
+    val errMsg = s"$pos: $msg"
+    throw AmycFatalError(errMsg)
+  }
+
+  // Versions for Positioned
+  def info(msg: Any, pos: Positioned): Unit = info(msg, pos.position)
+  def warning(msg: Any, pos: Positioned): Unit = warning(msg, pos.position)
+  def error(msg: Any, pos: Positioned): Unit = error(msg, pos.position)
+  def fatal(msg: Any, pos: Positioned): Nothing = fatal(msg, pos.position)
+
+
+  /** Terminates the compiler if any errors have been detected. */
+  def terminateIfErrors() = {
+    if (hasErrors) {
+      fatal("There were errors.")
+    }
+  }
+
+  private def err(msg: String): Unit = {
+    Console.err.println(msg)
+  }
+
+  private def report(prefix: String, msg: Any, pos: Position): Unit = {
+    if (pos.isDefined) {
+      err(s"$prefix $pos: $msg")
+
+      val lines = getLines(pos.file)
+
+      if (pos.line > 0 && pos.line-1 < lines.size) {
+        err(s"$prefix ${lines(pos.line-1)}")
+        err(prefix + " " + " "*(pos.col - 1)+"^")
+      } else {
+        err(s"$prefix <line unavailable in source file>")
+      }
+    } else {
+      err(s"$prefix $msg")
+    }
+  }
+
+  private var filesToLines = Map[File, IndexedSeq[String]]()
+
+  private def getLines(f: File): IndexedSeq[String] = {
+    filesToLines.get(f) match {
+      case Some(lines) =>
+        lines
+
+      case None =>
+        val source = Source.fromFile(f).withPositioning(true)
+        val lines = source.getLines().toIndexedSeq
+        source.close()
+
+        filesToLines += f -> lines
+
+        lines
+    }
+  }
+}
diff --git a/info/labs/lab03/src/amyc/utils/UniqueCounter.scala b/info/labs/lab03/src/amyc/utils/UniqueCounter.scala
new file mode 100644
index 0000000000000000000000000000000000000000..a3a9cc666b9e20d2aca9d6c942a9ece3d75fb8e7
--- /dev/null
+++ b/info/labs/lab03/src/amyc/utils/UniqueCounter.scala
@@ -0,0 +1,14 @@
+package amyc.utils
+
+import scala.collection.mutable
+
+// Generates unique counters for each element of a type K
+class UniqueCounter[K] {
+  private val elemIds = mutable.Map[K, Int]().withDefaultValue(-1)
+
+  def next(key: K): Int = synchronized {
+    elemIds(key) += 1
+    elemIds(key)
+  }
+
+}
diff --git a/info/labs/lab03/test/resources/lexer/failing/Invalid.grading.amy b/info/labs/lab03/test/resources/lexer/failing/Invalid.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..d7d516487e6811c8d6468e5f106ed26514eee95c
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/failing/Invalid.grading.amy
@@ -0,0 +1 @@
+^
diff --git a/info/labs/lab03/test/resources/lexer/failing/SingleAmp.grading.amy b/info/labs/lab03/test/resources/lexer/failing/SingleAmp.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..4bd7e7a4b714afc3586d662aabe18c81067db135
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/failing/SingleAmp.grading.amy
@@ -0,0 +1 @@
+&
diff --git a/info/labs/lab03/test/resources/lexer/failing/SingleBar.grading.amy b/info/labs/lab03/test/resources/lexer/failing/SingleBar.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..948cf947f86682cc5c1a28187aa1fc360525a7e9
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/failing/SingleBar.grading.amy
@@ -0,0 +1 @@
+|
diff --git a/info/labs/lab03/test/resources/lexer/failing/TooBigInt.grading.amy b/info/labs/lab03/test/resources/lexer/failing/TooBigInt.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..18ac27b89c4d5a7fee665889c4071bf186fd941a
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/failing/TooBigInt.grading.amy
@@ -0,0 +1 @@
+999999999999999999999999999
diff --git a/info/labs/lab03/test/resources/lexer/failing/UnclosedComment.grading.amy b/info/labs/lab03/test/resources/lexer/failing/UnclosedComment.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..aa0e17243104a39e24a8a82013af1e8b8f227d61
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/failing/UnclosedComment.grading.amy
@@ -0,0 +1 @@
+/* * /
diff --git a/info/labs/lab03/test/resources/lexer/failing/UnclosedComment2.grading.amy b/info/labs/lab03/test/resources/lexer/failing/UnclosedComment2.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..63ea916ef5f30255aef0c0b0b63f37b0e1d99fb2
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/failing/UnclosedComment2.grading.amy
@@ -0,0 +1 @@
+/*/
diff --git a/info/labs/lab03/test/resources/lexer/failing/UnclosedComment3.grading.amy b/info/labs/lab03/test/resources/lexer/failing/UnclosedComment3.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..f1206f0677cc3d61e393121668c78ab50d827856
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/failing/UnclosedComment3.grading.amy
@@ -0,0 +1 @@
+/***
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/lexer/failing/UnclosedString1.grading.amy b/info/labs/lab03/test/resources/lexer/failing/UnclosedString1.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..1cc80efa6fc7641f9cc21ee1b177fbba1a89e2c3
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/failing/UnclosedString1.grading.amy
@@ -0,0 +1 @@
+"
diff --git a/info/labs/lab03/test/resources/lexer/failing/UnclosedString2.grading.amy b/info/labs/lab03/test/resources/lexer/failing/UnclosedString2.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..cb276c1017e5e7221c716575d2f9817890e3ee7f
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/failing/UnclosedString2.grading.amy
@@ -0,0 +1,2 @@
+"
+"
diff --git a/info/labs/lab03/test/resources/lexer/outputs/Combinations.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/Combinations.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4864eff8574166b43fd4990d605463a001bbfee9
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/Combinations.grading.txt
@@ -0,0 +1,19 @@
+KeywordToken(object)(1:1)
+OperatorToken(<=)(1:7)
+IdentifierToken(id_1)(1:9)
+OperatorToken(++)(1:13)
+OperatorToken(+)(1:15)
+KeywordToken(_)(1:16)
+IntLitToken(1)(1:17)
+DelimiterToken({)(1:18)
+IdentifierToken(id)(1:19)
+DelimiterToken(})(1:21)
+DelimiterToken(()(1:22)
+DelimiterToken())(1:23)
+KeywordToken(class)(1:24)
+OperatorToken(<=)(1:29)
+DelimiterToken(=>)(1:31)
+OperatorToken(==)(1:33)
+OperatorToken(<)(1:35)
+OperatorToken(%)(1:36)
+EOFToken()(2:1)
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/lexer/outputs/CommentClosedTwice.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/CommentClosedTwice.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..45edd1f23caa974b25f05d0134eeeb7cbb8997f4
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/CommentClosedTwice.grading.txt
@@ -0,0 +1,3 @@
+OperatorToken(*)(4:3)
+OperatorToken(/)(4:4)
+EOFToken()(4:5)
diff --git a/info/labs/lab03/test/resources/lexer/outputs/Comments.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/Comments.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8e31001be1927d630f8d9aef9ac05012930637a2
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/Comments.grading.txt
@@ -0,0 +1,8 @@
+IntLitToken(1)(1:1)
+IntLitToken(2)(3:1)
+IntLitToken(3)(4:1)
+IntLitToken(4)(5:1)
+IntLitToken(5)(10:1)
+IntLitToken(6)(11:12)
+IntLitToken(7)(12:1)
+EOFToken()(14:1)
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/lexer/outputs/Delimiters.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/Delimiters.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..0282fae7374357f7494b08822b350f1e92d8ddcd
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/Delimiters.grading.txt
@@ -0,0 +1,11 @@
+DelimiterToken({)(1:1)
+DelimiterToken(})(1:3)
+DelimiterToken(()(1:5)
+DelimiterToken())(1:7)
+DelimiterToken(,)(1:9)
+DelimiterToken(:)(1:11)
+DelimiterToken(.)(1:13)
+DelimiterToken(=)(1:15)
+DelimiterToken(=>)(1:17)
+KeywordToken(_)(1:20)
+EOFToken()(2:1)
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/lexer/outputs/Identifiers.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/Identifiers.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..a14fd9f41e5277fff6aebed89469b95932e32991
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/Identifiers.grading.txt
@@ -0,0 +1,21 @@
+IdentifierToken(app)(1:1)
+IdentifierToken(boolean)(1:5)
+IdentifierToken(caSe)(2:1)
+IdentifierToken(Class)(2:6)
+IdentifierToken(df)(2:12)
+IdentifierToken(elze)(2:15)
+IdentifierToken(Error)(2:20)
+IdentifierToken(Extends)(2:26)
+IdentifierToken(False)(2:34)
+IdentifierToken(iff)(2:40)
+IdentifierToken(int)(2:44)
+IdentifierToken(module)(2:48)
+IdentifierToken(string)(2:55)
+IdentifierToken(True)(3:1)
+IdentifierToken(unit)(3:6)
+IdentifierToken(vals)(3:11)
+IdentifierToken(this_is_id)(5:1)
+IdentifierToken(this_IS_id2)(5:12)
+IdentifierToken(st1ll1s_1d)(5:24)
+IdentifierToken(St1ll1s_1d)(7:1)
+EOFToken()(8:1)
diff --git a/info/labs/lab03/test/resources/lexer/outputs/IntLiterals.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/IntLiterals.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..289b7358b473f12827ce309abc4a2558867adfcc
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/IntLiterals.grading.txt
@@ -0,0 +1,6 @@
+IntLitToken(123)(1:1)
+IntLitToken(12345)(1:5)
+IntLitToken(6789)(2:1)
+OperatorToken(+)(2:5)
+IntLitToken(12345)(2:6)
+EOFToken()(3:1)
diff --git a/info/labs/lab03/test/resources/lexer/outputs/Keywords.txt b/info/labs/lab03/test/resources/lexer/outputs/Keywords.txt
new file mode 100644
index 0000000000000000000000000000000000000000..521b69bfd954641d3686c8259570d0082ce43f6e
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/Keywords.txt
@@ -0,0 +1,19 @@
+KeywordToken(abstract)(1:1)
+PrimTypeToken(Boolean)(1:10)
+KeywordToken(case)(2:1)
+KeywordToken(class)(2:6)
+KeywordToken(def)(2:12)
+KeywordToken(else)(2:16)
+KeywordToken(error)(2:21)
+KeywordToken(extends)(2:27)
+BoolLitToken(false)(2:35)
+KeywordToken(if)(2:41)
+PrimTypeToken(Int)(2:44)
+KeywordToken(match)(2:48)
+KeywordToken(object)(2:54)
+PrimTypeToken(String)(2:61)
+BoolLitToken(true)(3:1)
+PrimTypeToken(Unit)(3:6)
+KeywordToken(val)(3:11)
+KeywordToken(end)(3:15)
+EOFToken()(4:1)
diff --git a/info/labs/lab03/test/resources/lexer/outputs/Operators.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/Operators.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e901ec948a2e92a69c75aa399a739a1644e1353f
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/Operators.grading.txt
@@ -0,0 +1,14 @@
+DelimiterToken(;)(1:1)
+OperatorToken(+)(1:3)
+OperatorToken(-)(1:5)
+OperatorToken(*)(1:7)
+OperatorToken(/)(1:9)
+OperatorToken(%)(1:11)
+OperatorToken(<)(1:13)
+OperatorToken(<=)(1:15)
+OperatorToken(&&)(1:18)
+OperatorToken(||)(1:21)
+OperatorToken(==)(1:24)
+OperatorToken(++)(1:27)
+OperatorToken(!)(1:30)
+EOFToken()(2:1)
diff --git a/info/labs/lab03/test/resources/lexer/outputs/StringLiterals.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/StringLiterals.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..12abcd1fee64aee67356b96298403500cdb3d49a
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/StringLiterals.grading.txt
@@ -0,0 +1,5 @@
+StringLitToken(This is a string)(1:1)
+StringLitToken(Another with ^^ | # invalid chars)(2:1)
+StringLitToken(No escape \n characters \t)(3:1)
+StringLitToken( \\ No comments /* either )(4:1)
+EOFToken()(5:1)
diff --git a/info/labs/lab03/test/resources/lexer/outputs/TwoFiles.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/TwoFiles.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..60b6c51dd363a885eca9434f60d375f458d0d856
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/TwoFiles.grading.txt
@@ -0,0 +1,34 @@
+KeywordToken(abstract)(1:1)
+PrimTypeToken(Boolean)(1:10)
+KeywordToken(case)(2:1)
+KeywordToken(class)(2:6)
+KeywordToken(def)(2:12)
+KeywordToken(else)(2:16)
+KeywordToken(error)(2:21)
+KeywordToken(extends)(2:27)
+BoolLitToken(false)(2:35)
+KeywordToken(if)(2:41)
+PrimTypeToken(Int)(2:44)
+KeywordToken(match)(2:48)
+KeywordToken(object)(2:54)
+PrimTypeToken(String)(2:61)
+BoolLitToken(true)(3:1)
+PrimTypeToken(Unit)(3:6)
+KeywordToken(val)(3:11)
+KeywordToken(end)(3:15)
+EOFToken()(4:1)
+
+DelimiterToken(;)(1:1)
+OperatorToken(+)(1:3)
+OperatorToken(-)(1:5)
+OperatorToken(*)(1:7)
+OperatorToken(/)(1:9)
+OperatorToken(%)(1:11)
+OperatorToken(<)(1:13)
+OperatorToken(<=)(1:15)
+OperatorToken(&&)(1:18)
+OperatorToken(||)(1:21)
+OperatorToken(==)(1:24)
+OperatorToken(++)(1:27)
+OperatorToken(!)(1:30)
+EOFToken()(2:1)
diff --git a/info/labs/lab03/test/resources/lexer/outputs/Whitespace.grading.txt b/info/labs/lab03/test/resources/lexer/outputs/Whitespace.grading.txt
new file mode 100644
index 0000000000000000000000000000000000000000..95609c96158df2795b3c03de0fd2ebbf320acad2
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/outputs/Whitespace.grading.txt
@@ -0,0 +1,3 @@
+IntLitToken(1)(1:2)
+IntLitToken(2)(2:5)
+EOFToken()(3:1)
diff --git a/info/labs/lab03/test/resources/lexer/passing/Combinations.grading.amy b/info/labs/lab03/test/resources/lexer/passing/Combinations.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..11625fe536bdc1cbd33ad154928cbde42941dd77
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/Combinations.grading.amy
@@ -0,0 +1 @@
+object<=id_1+++_1{id}()class<==>==<%
diff --git a/info/labs/lab03/test/resources/lexer/passing/CommentClosedTwice.grading.amy b/info/labs/lab03/test/resources/lexer/passing/CommentClosedTwice.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..1ac7d3965a7ffcd7526299aeb0132a4aecdcff0e
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/CommentClosedTwice.grading.amy
@@ -0,0 +1,4 @@
+/* This comment is closed twice,
+which should be an error:
+
+*/*/
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/lexer/passing/Comments.grading.amy b/info/labs/lab03/test/resources/lexer/passing/Comments.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..a9d2c9f61e5605d2887146a9d8f8ef2084f1e7cf
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/Comments.grading.amy
@@ -0,0 +1,13 @@
+1
+// This is a comment
+2 /* This is also a comment */
+3 /* Still * /* comment */
+4 /* Multiline
+/*
+*
+Comment
+*/
+5 /***/
+/* abc **/ 6 /* def */
+7
+//
diff --git a/info/labs/lab03/test/resources/lexer/passing/Delimiters.grading.amy b/info/labs/lab03/test/resources/lexer/passing/Delimiters.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..57ce3b29997aea1d250af1a19357669a1f51a53d
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/Delimiters.grading.amy
@@ -0,0 +1 @@
+{ } ( ) , : . = => _
diff --git a/info/labs/lab03/test/resources/lexer/passing/Identifiers.grading.amy b/info/labs/lab03/test/resources/lexer/passing/Identifiers.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..d14272d79e91d9336c36ed0842d34d3db2ecd577
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/Identifiers.grading.amy
@@ -0,0 +1,7 @@
+app boolean
+caSe Class df elze Error Extends False iff int module string
+True unit vals
+
+this_is_id this_IS_id2 st1ll1s_1d
+
+St1ll1s_1d
diff --git a/info/labs/lab03/test/resources/lexer/passing/IntLiterals.grading.amy b/info/labs/lab03/test/resources/lexer/passing/IntLiterals.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..6f20e8887546b8e265bb3d41b3df3c1414dc0971
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/IntLiterals.grading.amy
@@ -0,0 +1,2 @@
+123 012345
+6789+12345
diff --git a/info/labs/lab03/test/resources/lexer/passing/Keywords.amy b/info/labs/lab03/test/resources/lexer/passing/Keywords.amy
new file mode 100644
index 0000000000000000000000000000000000000000..06c4ef0bc5e7f8b639a4ae62e0bc5e73c60d0558
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/Keywords.amy
@@ -0,0 +1,3 @@
+abstract Boolean
+case class def else error extends false if Int match object String
+true Unit val end
diff --git a/info/labs/lab03/test/resources/lexer/passing/Operators.grading.amy b/info/labs/lab03/test/resources/lexer/passing/Operators.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bd293d768a9e6b997ee76b96b7c90958b134f4ae
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/Operators.grading.amy
@@ -0,0 +1 @@
+; + - * / % < <= && || == ++ !
diff --git a/info/labs/lab03/test/resources/lexer/passing/StringLiterals.grading.amy b/info/labs/lab03/test/resources/lexer/passing/StringLiterals.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..1309e0479078156624422a623efb9537a7b8861f
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/StringLiterals.grading.amy
@@ -0,0 +1,4 @@
+"This is a string"
+"Another with ^^ | # invalid chars"
+"No escape \n characters \t"
+" \\ No comments /* either "
diff --git a/info/labs/lab03/test/resources/lexer/passing/Whitespace.grading.amy b/info/labs/lab03/test/resources/lexer/passing/Whitespace.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..ff92277b28103502905b09b145b8ed31634c5de1
--- /dev/null
+++ b/info/labs/lab03/test/resources/lexer/passing/Whitespace.grading.amy
@@ -0,0 +1,2 @@
+	1 // Tab indented
+    2 // Space indented
diff --git a/info/labs/lab03/test/resources/parser/failing/ArgsError1.grading.amy b/info/labs/lab03/test/resources/parser/failing/ArgsError1.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..9cc31c3e5e6b9c255d6ad8b0e41374be8d84f0d4
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/ArgsError1.grading.amy
@@ -0,0 +1,3 @@
+object Args
+  foo(bar, )
+end Args
diff --git a/info/labs/lab03/test/resources/parser/failing/ArgsError2.grading.amy b/info/labs/lab03/test/resources/parser/failing/ArgsError2.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..f55dacb513e4795b5a6f77a39434d1de75e3a35c
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/ArgsError2.grading.amy
@@ -0,0 +1,3 @@
+object Args
+  foo(bar baz)
+end Args
diff --git a/info/labs/lab03/test/resources/parser/failing/ClassDefError1.grading.amy b/info/labs/lab03/test/resources/parser/failing/ClassDefError1.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..384cf065e9d83b1cca573401807a0163da725af6
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/ClassDefError1.grading.amy
@@ -0,0 +1,3 @@
+object ClassDef
+  case class Foo()
+end ClassDef
diff --git a/info/labs/lab03/test/resources/parser/failing/ClassDefError2.grading.amy b/info/labs/lab03/test/resources/parser/failing/ClassDefError2.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..c8ed50cc0b9919493404890104e55a01b5b56056
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/ClassDefError2.grading.amy
@@ -0,0 +1,3 @@
+object ClassDef
+  case class Foo extends Bar
+end ClassDef
diff --git a/info/labs/lab03/test/resources/parser/failing/ClassDefError3.grading.amy b/info/labs/lab03/test/resources/parser/failing/ClassDefError3.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..1ce13b18582af0f6098bdb727be4d71dd7c0d505
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/ClassDefError3.grading.amy
@@ -0,0 +1,3 @@
+object ClassDef
+  case class Foo(x, y: Int(32)) extends Bar
+end ClassDef
diff --git a/info/labs/lab03/test/resources/parser/failing/ClassDefError4.grading.amy b/info/labs/lab03/test/resources/parser/failing/ClassDefError4.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..fdb8f69de4f2345c59955a93646783c6e7840be9
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/ClassDefError4.grading.amy
@@ -0,0 +1,3 @@
+object ClassDef
+  case class Foo(Int, y: Int(32)) extends Bar
+end ClassDef
diff --git a/info/labs/lab03/test/resources/parser/failing/CommentClosedTwice.grading.amy b/info/labs/lab03/test/resources/parser/failing/CommentClosedTwice.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bf829d8e3976334fae6dba46ccaa9e4db600acd5
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/CommentClosedTwice.grading.amy
@@ -0,0 +1,4 @@
+/* This comment is closed twice,
+which should not be interpreted as a single closed comment
+
+*/*/
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/failing/EmptyFile.grading.amy b/info/labs/lab03/test/resources/parser/failing/EmptyFile.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/info/labs/lab03/test/resources/parser/failing/FunDefError1.grading.amy b/info/labs/lab03/test/resources/parser/failing/FunDefError1.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..63be12c7c21f355b768a737f2a271fb04335d0ea
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/FunDefError1.grading.amy
@@ -0,0 +1,3 @@
+object FunDef
+  def foo: Int(32) = { 42 }
+end FunDef
diff --git a/info/labs/lab03/test/resources/parser/failing/FunDefError2.grading.amy b/info/labs/lab03/test/resources/parser/failing/FunDefError2.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..33deba800d79a2b3f9165872f525bedf4519ca97
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/FunDefError2.grading.amy
@@ -0,0 +1,3 @@
+object FunDef
+  def foo(i: Int(32)) = { 42 }
+end FunDef
diff --git a/info/labs/lab03/test/resources/parser/failing/FunDefError3.grading.amy b/info/labs/lab03/test/resources/parser/failing/FunDefError3.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..418f5d1c8d7269a2b0ea77cb4f9f986ab0b6d746
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/FunDefError3.grading.amy
@@ -0,0 +1,3 @@
+object FunDef
+  def foo(i): Int(32) = { 42 }
+end FunDef
diff --git a/info/labs/lab03/test/resources/parser/failing/FunDefError4.grading.amy b/info/labs/lab03/test/resources/parser/failing/FunDefError4.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..4fd5282541980ca54694b2c0ae53114efd8e5682
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/FunDefError4.grading.amy
@@ -0,0 +1,3 @@
+object FunDef
+  def foo(i: Int(32)): Int(32) = 42
+end FunDef
diff --git a/info/labs/lab03/test/resources/parser/failing/FunDefError5.grading.amy b/info/labs/lab03/test/resources/parser/failing/FunDefError5.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..d4576ce5228db83e3370b072e518cdfd9d769c28
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/FunDefError5.grading.amy
@@ -0,0 +1,3 @@
+object FunDef
+  def foo(i: Int(32),): Int(32) = { 42 }
+end FunDef
diff --git a/info/labs/lab03/test/resources/parser/failing/FunDefError6.grading.amy b/info/labs/lab03/test/resources/parser/failing/FunDefError6.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..66435004b814d965674a0c8b2d58c9dc6a8543f8
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/FunDefError6.grading.amy
@@ -0,0 +1,3 @@
+object FunDef
+  def foo(i: Int(32) j: Int(32)): Int = { 42 }
+end FunDef
diff --git a/info/labs/lab03/test/resources/parser/failing/IfPrecedence.grading.amy b/info/labs/lab03/test/resources/parser/failing/IfPrecedence.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..5d7a832cc8c3ed863dff667e3c907c9777a63359
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/IfPrecedence.grading.amy
@@ -0,0 +1,3 @@
+object IfPrecedence
+  if (true) { 1 } else { 0 } + 42
+end IfPrecedence
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/failing/IntError1.grading.amy b/info/labs/lab03/test/resources/parser/failing/IntError1.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..0436f447de756154537d9b0b9687c4c83e07bf21
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/IntError1.grading.amy
@@ -0,0 +1,3 @@
+object IntError 
+  def foo(): Int = { 42 }
+end IntError
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/failing/IntError2.grading.amy b/info/labs/lab03/test/resources/parser/failing/IntError2.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..1f55e05be721dc81b487788ccb65ba878e949996
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/IntError2.grading.amy
@@ -0,0 +1,3 @@
+object IntError 
+  def foo(): Int(64) = { 42 }
+end IntError
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/failing/IntError3.grading.amy b/info/labs/lab03/test/resources/parser/failing/IntError3.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..11456ccddc2d672bcd80248623e3c9797a3de7f8
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/IntError3.grading.amy
@@ -0,0 +1,3 @@
+object IntError 
+  def foo(): Int(true) = { 42 }
+end IntError
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/failing/MatchAsOperand.grading.amy b/info/labs/lab03/test/resources/parser/failing/MatchAsOperand.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..19bde86783d487b5a6f420964814050c90dab683
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/MatchAsOperand.grading.amy
@@ -0,0 +1,5 @@
+object Match
+  x match {
+    case y => z
+  } + 1
+end Match
diff --git a/info/labs/lab03/test/resources/parser/failing/MissingOperand.grading.amy b/info/labs/lab03/test/resources/parser/failing/MissingOperand.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..7918a1d810c7733f246ba968681801741a081d86
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/MissingOperand.grading.amy
@@ -0,0 +1,3 @@
+object Operand
+  true || (x && )
+end Operand
diff --git a/info/labs/lab03/test/resources/parser/failing/TypeWidth.grading.amy b/info/labs/lab03/test/resources/parser/failing/TypeWidth.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..693d3e8b840b98b6470450371ac90d6661102dab
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/TypeWidth.grading.amy
@@ -0,0 +1,3 @@
+object TypeWidth
+  def foo(): Boolean(32) = { true }
+end TypeWidth
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/failing/UnaryWithinUnary.grading.amy b/info/labs/lab03/test/resources/parser/failing/UnaryWithinUnary.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..fcaaf5972a3bc8413cae00723863af4cae7e189a
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/UnaryWithinUnary.grading.amy
@@ -0,0 +1,3 @@
+object Unary
+  --x
+end Unary
diff --git a/info/labs/lab03/test/resources/parser/failing/UnmatchedModule.grading.amy b/info/labs/lab03/test/resources/parser/failing/UnmatchedModule.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..b034c0f254815ea288766ccbb8d4fbfb8fc7a8aa
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/UnmatchedModule.grading.amy
@@ -0,0 +1 @@
+object UnlosedModule
diff --git a/info/labs/lab03/test/resources/parser/failing/UnmatchedModuleName.grading.amy b/info/labs/lab03/test/resources/parser/failing/UnmatchedModuleName.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..da894a910f91726d0d7cd38ff249b6116a477ed0
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/UnmatchedModuleName.grading.amy
@@ -0,0 +1,2 @@
+object UnmatchedModule
+end Module
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/failing/UnmatchedParen.grading.amy b/info/labs/lab03/test/resources/parser/failing/UnmatchedParen.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bc5edf7db3309b571bb807ade60127a53be56565
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/UnmatchedParen.grading.amy
@@ -0,0 +1,3 @@
+object UnmatchedParen
+  1 + ( 2 - 3
+end UnmatchedParen
diff --git a/info/labs/lab03/test/resources/parser/failing/ValAsOperand.grading.amy b/info/labs/lab03/test/resources/parser/failing/ValAsOperand.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..2b6d8a14033aeda248c20250cbe4eaf2cac4d334
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/ValAsOperand.grading.amy
@@ -0,0 +1,3 @@
+object Val
+  1 + val x: Int(32) = 42; x
+end Val
diff --git a/info/labs/lab03/test/resources/parser/failing/ValError.grading.amy b/info/labs/lab03/test/resources/parser/failing/ValError.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..146aac0f5c5a5f2fbe70a3cf923200a48bbbe1eb
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/ValError.grading.amy
@@ -0,0 +1,3 @@
+object Val
+  val x = 42; x
+end Val
diff --git a/info/labs/lab03/test/resources/parser/failing/ValInVal.grading.amy b/info/labs/lab03/test/resources/parser/failing/ValInVal.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..409ff35470e6e9da0b720188382acce79517ff11
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/ValInVal.grading.amy
@@ -0,0 +1,3 @@
+object Val
+  val x: Int(32) = val y: Int = 10; 5; 42
+end Val
diff --git a/info/labs/lab03/test/resources/parser/failing/WrongQName1.grading.amy b/info/labs/lab03/test/resources/parser/failing/WrongQName1.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..e4e4981a5893c3bae663b62e22e0e47f2dd8b51e
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/WrongQName1.grading.amy
@@ -0,0 +1,2 @@
+object Foo.bar
+end Foo.bar
diff --git a/info/labs/lab03/test/resources/parser/failing/WrongQName2.grading.amy b/info/labs/lab03/test/resources/parser/failing/WrongQName2.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..2e12b6c7035ce676ac697a5896018e3fa99fc318
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/WrongQName2.grading.amy
@@ -0,0 +1,3 @@
+object WrongQName
+  abstract class Foo.bar
+end WrongQName
diff --git a/info/labs/lab03/test/resources/parser/failing/WrongQName3.grading.amy b/info/labs/lab03/test/resources/parser/failing/WrongQName3.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..937765382ce5730a9a60497f43d81ea8f4eca3cc
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/WrongQName3.grading.amy
@@ -0,0 +1,3 @@
+object WrongQName
+  case class Foo() extends Baz.bar
+end WrongQName
diff --git a/info/labs/lab03/test/resources/parser/failing/WrongQName4.grading.amy b/info/labs/lab03/test/resources/parser/failing/WrongQName4.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..d8193aba21605391e4606654f3d7d50c5a9324ac
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/failing/WrongQName4.grading.amy
@@ -0,0 +1,3 @@
+object WrongQName
+  42 + foo.bar + 1
+end WrongQName
diff --git a/info/labs/lab03/test/resources/parser/outputs/Assoc.grading.amy b/info/labs/lab03/test/resources/parser/outputs/Assoc.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..dbb040d052b967d5ed2d21924f29437ebc44de04
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/Assoc.grading.amy
@@ -0,0 +1,8 @@
+object Assoc
+  ((1 + 2) - 3);
+  ((x / y) * z);
+  -(42);
+  ((1 < 2) <= 3);
+  (true || false);
+  ((1 < 2) && (2 < 3))
+end Assoc
diff --git a/info/labs/lab03/test/resources/parser/outputs/AssocSemicolon.grading.amy b/info/labs/lab03/test/resources/parser/outputs/AssocSemicolon.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..9bc56b557c6498850936785efbf49f04650bc5e6
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/AssocSemicolon.grading.amy
@@ -0,0 +1,10 @@
+object AssocSemicolon
+  1;
+  2;
+  val x: Int(32) =
+    z;
+  val y: Int(32) =
+    w;
+  u
+end AssocSemicolon
+
diff --git a/info/labs/lab03/test/resources/parser/outputs/ChainedMatch.grading.amy b/info/labs/lab03/test/resources/parser/outputs/ChainedMatch.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..eb76149f847b38b3743cd6b438daa2527321656b
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/ChainedMatch.grading.amy
@@ -0,0 +1,10 @@
+object ChainedMatch
+
+  0 match {
+    case x =>
+      x
+  } match {
+    case 42 =>
+      "Yeah"
+  }
+end ChainedMatch
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/outputs/ClassDefs.grading.amy b/info/labs/lab03/test/resources/parser/outputs/ClassDefs.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bf40046c57045d7ec7fb2e21ba317058a3c18e98
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/ClassDefs.grading.amy
@@ -0,0 +1,7 @@
+object CaseClassDefs
+  abstract class Foo
+  case class Bar() extends Foo
+  case class Bar(v: Int(32)) extends Foo
+  abstract class Foo
+  case class Bar(v: Int(32), v: A) extends Foo
+end CaseClassDefs
diff --git a/info/labs/lab03/test/resources/parser/outputs/Empty.amy b/info/labs/lab03/test/resources/parser/outputs/Empty.amy
new file mode 100644
index 0000000000000000000000000000000000000000..b8246700158e5b24a7415a233d6c5028b0850733
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/Empty.amy
@@ -0,0 +1,2 @@
+object Empty
+end Empty
diff --git a/info/labs/lab03/test/resources/parser/outputs/ErrorToken1.amy b/info/labs/lab03/test/resources/parser/outputs/ErrorToken1.amy
new file mode 100644
index 0000000000000000000000000000000000000000..e865cef3934f0ce95ff66f0275b1c4844c56fe69
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/ErrorToken1.amy
@@ -0,0 +1,3 @@
+object ErrorToken1
+  error("")
+end ErrorToken1
diff --git a/info/labs/lab03/test/resources/parser/outputs/ErrorToken2.amy b/info/labs/lab03/test/resources/parser/outputs/ErrorToken2.amy
new file mode 100644
index 0000000000000000000000000000000000000000..96c73e53a98e5832141667b1b2d215cc6eb69719
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/ErrorToken2.amy
@@ -0,0 +1,3 @@
+object ErrorToken2
+  (false || error(""))
+end ErrorToken2
diff --git a/info/labs/lab03/test/resources/parser/outputs/FunCalls.grading.amy b/info/labs/lab03/test/resources/parser/outputs/FunCalls.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..2c3845f1567922151095af9c3c6308e7b1a287a0
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/FunCalls.grading.amy
@@ -0,0 +1,30 @@
+object FunCalls
+  def foo(i: Int(32)): Int(32) = {
+    i
+  }
+  def bar(i: Int(32), j: Int(32)): Int(32) = {
+    (i + j)
+  }
+  foo(1);
+  val foz: Int(32) =
+    4;
+  foo(foz);
+  foo((
+    val f: Int(32) =
+      42;
+    f
+  ));
+  bar(1, 2);
+  val baz: Int(32) =
+    4;
+  foo(foz, baz);
+  foo((
+    val f: Int(32) =
+      42;
+    f
+  ), (
+    val b: Int(32) =
+      1;
+    b
+  ))
+end FunCalls
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/outputs/FunDefs.grading.amy b/info/labs/lab03/test/resources/parser/outputs/FunDefs.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..85b2d76ecb053a5a120cc397bfd93a06ec12aec2
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/FunDefs.grading.amy
@@ -0,0 +1,11 @@
+object FunDefs
+  def foo(): Int(32) = {
+    ()
+  }
+  def foo(i: Int(32)): Int(32) = {
+    42
+  }
+  def foo(i: I, j: Boolean, s: String, u: Unit, x: X): Type = {
+    42
+  }
+end FunDefs
diff --git a/info/labs/lab03/test/resources/parser/outputs/IfCondition.grading.amy b/info/labs/lab03/test/resources/parser/outputs/IfCondition.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..491ed6cda8ed1510626fb24d1416baf52112a7d4
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/IfCondition.grading.amy
@@ -0,0 +1,11 @@
+object IfCondition
+  (if((
+    val x: Boolean =
+      true;
+    x
+  )) {
+    1
+  } else {
+    2
+  })
+end IfCondition
diff --git a/info/labs/lab03/test/resources/parser/outputs/List.grading.amy b/info/labs/lab03/test/resources/parser/outputs/List.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..0bc8352d7e2fc1ec380cef3c09a51e61ef745cd5
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/List.grading.amy
@@ -0,0 +1,176 @@
+object L
+  abstract class List
+  case class Nil() extends List
+  case class Cons(v: Int(32), v: List) extends List
+  def isEmpty(l: List): Boolean = {
+    l match {
+      case Nil() =>
+        true
+      case _ =>
+        false
+    }
+  }
+  def length(l: List): Int(32) = {
+    l match {
+      case Nil() =>
+        0
+      case Cons(_, t) =>
+        (1 + length(t))
+    }
+  }
+  def head(l: List): Int(32) = {
+    l match {
+      case Cons(h, _) =>
+        h
+      case Nil() =>
+        error("head(Nil)")
+    }
+  }
+  def headOption(l: List): O.Option = {
+    l match {
+      case Cons(h, _) =>
+        O.Some(h)
+      case Nil() =>
+        O.None()
+    }
+  }
+  def reverse(l: List): List = {
+    reverseAcc(l, Nil())
+  }
+  def reverseAcc(l: List, acc: List): List = {
+    l match {
+      case Nil() =>
+        acc
+      case Cons(h, t) =>
+        reverseAcc(t, Cons(h, acc))
+    }
+  }
+  def indexOf(l: List, i: Int(32)): Int(32) = {
+    l match {
+      case Nil() =>
+        -(1)
+      case Cons(h, t) =>
+        (if((h == i)) {
+          0
+        } else {
+          (
+            val rec: Int(32) =
+              indexOf(t, i);
+            (if((0 <= rec)) {
+              (rec + 1)
+            } else {
+              -(1)
+            })
+          )
+        })
+    }
+  }
+  def range(from: Int(32), to: Int(32)): List = {
+    (if((to < from)) {
+      Nil()
+    } else {
+      Cons(from, range((from + 1), to))
+    })
+  }
+  def sum(l: List): Int(32) = {
+    l match {
+      case Nil() =>
+        0
+      case Cons(h, t) =>
+        (h + sum(t))
+    }
+  }
+  def concat(l1: List, l2: List): List = {
+    l1 match {
+      case Nil() =>
+        l2
+      case Cons(h, t) =>
+        Cons(h, concat(t, l2))
+    }
+  }
+  def contains(l: List, elem: Int(32)): Boolean = {
+    l match {
+      case Nil() =>
+        false
+      case Cons(h, t) =>
+        ((h == elem) || contains(t, elem))
+    }
+  }
+  abstract class LPair
+  case class LP(v: List, v: List) extends LPair
+  def merge(l1: List, l2: List): List = {
+    l1 match {
+      case Nil() =>
+        l2
+      case Cons(h1, t1) =>
+        l2 match {
+          case Nil() =>
+            l1
+          case Cons(h2, t2) =>
+            (if((h1 <= h2)) {
+              Cons(h1, merge(t1, l2))
+            } else {
+              Cons(h2, merge(l1, t2))
+            })
+        }
+    }
+  }
+  def split(l: List): LPair = {
+    l match {
+      case Cons(h1, Cons(h2, t)) =>
+        (
+          val rec: LPair =
+            split(t);
+          rec match {
+            case LP(rec1, rec2) =>
+              LP(Cons(h1, rec1), Cons(h2, rec2))
+          }
+        )
+      case _ =>
+        LP(l, Nil())
+    }
+  }
+  def mergeSort(l: List): List = {
+    l match {
+      case Nil() =>
+        l
+      case Cons(h, Nil()) =>
+        l
+      case l =>
+        split(l) match {
+          case LP(l1, l2) =>
+            merge(mergeSort(l1), mergeSort(l2))
+        }
+    }
+  }
+  def toString(l: List): String = {
+    l match {
+      case Nil() =>
+        "List()"
+      case more =>
+        (("List(" ++ toString1(more)) ++ ")")
+    }
+  }
+  def toString1(l: List): String = {
+    l match {
+      case Cons(h, Nil()) =>
+        Std.intToString(h)
+      case Cons(h, t) =>
+        ((Std.intToString(h) ++ ", ") ++ toString1(t))
+    }
+  }
+  def take(l: List, n: Int(32)): List = {
+    (if((n <= 0)) {
+      Nil()
+    } else {
+      l match {
+        case Nil() =>
+          Nil()
+        case Cons(h, t) =>
+          Cons(h, take(t, (n - 1)))
+      }
+    })
+  }
+end L
+
+
diff --git a/info/labs/lab03/test/resources/parser/outputs/Literals.amy b/info/labs/lab03/test/resources/parser/outputs/Literals.amy
new file mode 100644
index 0000000000000000000000000000000000000000..6a6997cba8590ab3a9613ff4cdd95ebaae993a90
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/Literals.amy
@@ -0,0 +1,7 @@
+object Literals
+  1;
+  ();
+  ();
+  "Hello";
+  true
+end Literals
diff --git a/info/labs/lab03/test/resources/parser/outputs/MatchScrutinee.grading.amy b/info/labs/lab03/test/resources/parser/outputs/MatchScrutinee.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..8e563e84277717615780a4ec05f561465972f0e0
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/MatchScrutinee.grading.amy
@@ -0,0 +1,12 @@
+object IfMatch
+  (if(true) {
+    1
+  } else {
+    2
+  }) match {
+    case 1 =>
+      true
+    case 2 =>
+      false
+  }
+end IfMatch
diff --git a/info/labs/lab03/test/resources/parser/outputs/NestedMatch.grading.amy b/info/labs/lab03/test/resources/parser/outputs/NestedMatch.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..66cc61a52f31da59edc7253c8b88bf11d66161cc
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/NestedMatch.grading.amy
@@ -0,0 +1,9 @@
+object NestedMatch
+  foo match {
+    case bar =>
+      baz match {
+        case 42 =>
+          ()
+      }
+  }
+end NestedMatch
diff --git a/info/labs/lab03/test/resources/parser/outputs/Parens.grading.amy b/info/labs/lab03/test/resources/parser/outputs/Parens.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..270cc571867c720be4089a6ac0962b2b9711a6b6
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/Parens.grading.amy
@@ -0,0 +1,17 @@
+object Parens
+  -(!((1 + ((2 * 3) / (
+    val x: Int(32) =
+      (
+        42;
+        x
+      );
+    (((x + z match {
+      case foo =>
+        bar
+    }) - 3) == (
+      1;
+      2
+    ))
+  )))))
+end Parens
+
diff --git a/info/labs/lab03/test/resources/parser/outputs/Patterns.grading.amy b/info/labs/lab03/test/resources/parser/outputs/Patterns.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..77863cc37c5ab7015c1fe96e4f02ead88f536cee
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/Patterns.grading.amy
@@ -0,0 +1,30 @@
+object Patterns
+  x match {
+    case 1 =>
+      0
+    case () =>
+      0
+    case "" =>
+      0
+    case "Hello" =>
+      0
+    case true =>
+      0
+    case false =>
+      0
+    case x =>
+      0
+    case Variable =>
+      0
+    case _ =>
+      0
+    case C() =>
+      0
+    case C(1) =>
+      0
+    case C(C(1)) =>
+      0
+    case C(C(_, "", C(1, "Hello", ()), ()), D(D(_, D(3))), ()) =>
+      0
+  }
+end Patterns
diff --git a/info/labs/lab03/test/resources/parser/outputs/Precedence.grading.amy b/info/labs/lab03/test/resources/parser/outputs/Precedence.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..9d59185dde75c281ccdd1c46c4fbed3edfa4728c
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/Precedence.grading.amy
@@ -0,0 +1,14 @@
+object Precedence
+  val v: Int(32) =
+    (((1 * 2) < x) || (y + 3));
+  u match {
+    case foo =>
+      bar
+  };
+  (w == -((if(x) {
+    y
+  } else {
+    z
+  })))
+end Precedence
+
diff --git a/info/labs/lab03/test/resources/parser/outputs/QualifiedNames.grading.amy b/info/labs/lab03/test/resources/parser/outputs/QualifiedNames.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..ec7536967ce73c8026ce3888d501461e53841c6b
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/outputs/QualifiedNames.grading.amy
@@ -0,0 +1,11 @@
+object QualifiedNames
+  val x: Faz.boo =
+    Foo.bar(x match {
+      case Foo.baz(foo) =>
+        1
+      case Foo.baz(Foo.bar(), foo) =>
+        2
+    });
+  42
+end QualifiedNames
+
diff --git a/info/labs/lab03/test/resources/parser/passing/Assoc.grading.amy b/info/labs/lab03/test/resources/parser/passing/Assoc.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..b8c9855dcb3ead355444e1be0119cf2955152831
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/Assoc.grading.amy
@@ -0,0 +1,8 @@
+object Assoc
+  1 + 2 - 3;
+  x / y * z;
+  -42;
+  1 < 2 <= 3;
+  true || false;
+  1 < 2 && 2 < 3
+end Assoc
diff --git a/info/labs/lab03/test/resources/parser/passing/AssocSemicolon.grading.amy b/info/labs/lab03/test/resources/parser/passing/AssocSemicolon.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..e6366005e03e7b9a7b7109467a2945f321197409
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/AssocSemicolon.grading.amy
@@ -0,0 +1,3 @@
+object AssocSemicolon
+  1; 2; val x: Int(32) = z; val y: Int(32) = w; u
+end AssocSemicolon
diff --git a/info/labs/lab03/test/resources/parser/passing/ChainedMatch.grading.amy b/info/labs/lab03/test/resources/parser/passing/ChainedMatch.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..4d41695e4ed76eab308275e2e4f205a12500761d
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/ChainedMatch.grading.amy
@@ -0,0 +1,7 @@
+object ChainedMatch
+  0 match {
+    case x => x 
+  } match {
+    case 42 => "Yeah"
+  }
+end ChainedMatch
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/passing/ClassDefs.grading.amy b/info/labs/lab03/test/resources/parser/passing/ClassDefs.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..c5d54b0cd980857dfb13b019e067eb35d012f840
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/ClassDefs.grading.amy
@@ -0,0 +1,7 @@
+object CaseClassDefs
+  abstract class Foo
+  case class Bar() extends Foo
+  case class Bar(x: Int(32)) extends Foo
+  abstract class Foo
+  case class Bar(x: Int(32), y: A) extends Foo
+end CaseClassDefs
diff --git a/info/labs/lab03/test/resources/parser/passing/Empty.amy b/info/labs/lab03/test/resources/parser/passing/Empty.amy
new file mode 100644
index 0000000000000000000000000000000000000000..b8246700158e5b24a7415a233d6c5028b0850733
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/Empty.amy
@@ -0,0 +1,2 @@
+object Empty
+end Empty
diff --git a/info/labs/lab03/test/resources/parser/passing/ErrorToken1.amy b/info/labs/lab03/test/resources/parser/passing/ErrorToken1.amy
new file mode 100644
index 0000000000000000000000000000000000000000..e865cef3934f0ce95ff66f0275b1c4844c56fe69
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/ErrorToken1.amy
@@ -0,0 +1,3 @@
+object ErrorToken1
+  error("")
+end ErrorToken1
diff --git a/info/labs/lab03/test/resources/parser/passing/ErrorToken2.amy b/info/labs/lab03/test/resources/parser/passing/ErrorToken2.amy
new file mode 100644
index 0000000000000000000000000000000000000000..f876856a7554d97e3e37ab314ee629ef00b4516e
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/ErrorToken2.amy
@@ -0,0 +1,3 @@
+object ErrorToken2
+  false || error("")
+end ErrorToken2
diff --git a/info/labs/lab03/test/resources/parser/passing/FunCalls.grading.amy b/info/labs/lab03/test/resources/parser/passing/FunCalls.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..40a220df9c7e13c546b255e94f402c4a1fab5622
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/FunCalls.grading.amy
@@ -0,0 +1,19 @@
+object FunCalls
+  def foo(i: Int(32)): Int(32) = {
+    i
+  }
+
+  def bar(i: Int(32), j: Int(32)): Int(32) = {
+    i + j
+  }
+
+  foo(1);
+  val foz: Int(32) = 4;
+  foo(foz);
+  foo(val f: Int(32) = 42; f);
+
+  bar(1, 2);
+  val baz: Int(32) = 4;
+  foo(foz, baz);
+  foo(val f: Int(32) = 42; f, val b: Int(32) = 1; b)
+end FunCalls
\ No newline at end of file
diff --git a/info/labs/lab03/test/resources/parser/passing/FunDefs.grading.amy b/info/labs/lab03/test/resources/parser/passing/FunDefs.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..c8d440f91db911140aadc5ac2a7f3774d07814d5
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/FunDefs.grading.amy
@@ -0,0 +1,5 @@
+object FunDefs
+  def foo(): Int(32) = {()}
+  def foo(i: Int(32)): Int(32) = { 42 }
+  def foo(i: I, j: Boolean, s: String, u: Unit, x: X): Type = { 42 }
+end FunDefs
diff --git a/info/labs/lab03/test/resources/parser/passing/IfCondition.grading.amy b/info/labs/lab03/test/resources/parser/passing/IfCondition.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..bb5ae58f4fec76d7685ad2b97934019bd998e580
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/IfCondition.grading.amy
@@ -0,0 +1,3 @@
+object IfCondition
+  if (val x: Boolean = true; x) { 1 } else { 2 }
+end IfCondition
diff --git a/info/labs/lab03/test/resources/parser/passing/List.grading.amy b/info/labs/lab03/test/resources/parser/passing/List.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..8a52a8a7fe9ff1a959fe7cbe192b8992e54a831f
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/List.grading.amy
@@ -0,0 +1,143 @@
+object L
+  abstract class List
+  case class Nil() extends List
+  case class Cons(h: Int(32), t: List) extends List
+ 
+  def isEmpty(l : List): Boolean = { l match {
+    case Nil() => true
+    case _ => false 
+  }}
+
+  def length(l: List): Int(32) = { l match {
+    case Nil() => 0
+    case Cons(_, t) => 1 + length(t)
+  }}
+
+  def head(l: List): Int(32) = {
+    l match {
+      case Cons(h, _) => h
+      case Nil() => error("head(Nil)")
+    }
+  }
+
+  def headOption(l: List): O.Option = {
+    l match {
+      case Cons(h, _) => O.Some(h)
+      case Nil() => O.None()
+    }
+  }
+
+  def reverse(l: List): List = {
+    reverseAcc(l, Nil())
+  }
+
+  def reverseAcc(l: List, acc: List): List = {
+    l match {
+      case Nil() => acc
+      case Cons(h, t) => reverseAcc(t, Cons(h, acc))
+    }
+  }
+
+  def indexOf(l: List, i: Int(32)): Int(32) = {
+    l match {
+      case Nil() => -1
+      case Cons(h, t) =>
+        if (h == i) { 0 }
+        else {
+          val rec: Int(32) = indexOf(t, i);
+          if (0 <= rec) { rec + 1 }
+          else { -1 }
+        }
+    }
+  }
+
+  def range(from: Int(32), to: Int(32)): List = {
+    if (to < from) { Nil() }
+    else {
+      Cons(from, range(from + 1, to))
+    }
+  }
+
+  def sum(l: List): Int(32) = { l match {
+    case Nil() => 0
+    case Cons(h, t) => h + sum(t)
+  }}
+
+  def concat(l1: List, l2: List): List = {
+    l1 match {
+      case Nil() => l2
+      case Cons(h, t) => Cons(h, concat(t, l2))
+    }
+  }
+
+  def contains(l: List, elem: Int(32)): Boolean = { l match {
+    case Nil() =>
+      false
+    case Cons(h, t) =>
+      h == elem || contains(t, elem)
+  }}
+
+  abstract class LPair
+  case class LP(l1: List, l2: List) extends LPair
+
+  def merge(l1: List, l2: List): List = {
+    l1 match {
+      case Nil() => l2
+      case Cons(h1, t1) =>
+        l2 match {
+          case Nil() => l1
+          case Cons(h2, t2) =>
+            if (h1 <= h2) {
+              Cons(h1, merge(t1, l2))
+            } else {
+              Cons(h2, merge(l1, t2))
+            }
+        }
+    }
+  }
+
+  def split(l: List): LPair = {
+    l match {
+      case Cons(h1, Cons(h2, t)) =>
+        val rec: LPair = split(t);
+        rec match {
+          case LP(rec1, rec2) =>
+            LP(Cons(h1, rec1), Cons(h2, rec2))
+        }
+      case _ =>
+        LP(l, Nil())
+    }
+  }
+  def mergeSort(l: List): List = {
+    l match {
+      case Nil() => l
+      case Cons(h, Nil()) => l
+      case l =>
+        split(l) match {
+          case LP(l1, l2) =>
+            merge(mergeSort(l1), mergeSort(l2))
+        }
+    }
+  }
+  
+  def toString(l: List): String = { l match {
+    case Nil() => "List()"
+    case more => "List(" ++ toString1(more) ++ ")"
+  }}
+
+  def toString1(l : List): String = { l match {
+    case Cons(h, Nil()) => Std.intToString(h)
+    case Cons(h, t) => Std.intToString(h) ++ ", " ++ toString1(t)
+  }}
+
+  def take(l: List, n: Int(32)): List = {
+    if (n <= 0) { Nil() }
+    else { 
+      l match {
+        case Nil() => Nil()
+        case Cons(h, t) =>
+          Cons(h, take(t, n-1))
+      }
+    }
+  }
+end L 
diff --git a/info/labs/lab03/test/resources/parser/passing/Literals.amy b/info/labs/lab03/test/resources/parser/passing/Literals.amy
new file mode 100644
index 0000000000000000000000000000000000000000..5c35726afaa7cb8db8bbff334f363996e5abdda3
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/Literals.amy
@@ -0,0 +1,3 @@
+object Literals
+  1; (); ( ); "Hello"; true
+end Literals
diff --git a/info/labs/lab03/test/resources/parser/passing/MatchScrutinee.grading.amy b/info/labs/lab03/test/resources/parser/passing/MatchScrutinee.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..aa946a6aa10c91b5041afd7783bfbe1b4c2d8c12
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/MatchScrutinee.grading.amy
@@ -0,0 +1,6 @@
+object IfMatch
+  if (true) { 1 } else { 2 } match {
+    case 1 => true
+    case 2 => false
+  }
+end IfMatch
diff --git a/info/labs/lab03/test/resources/parser/passing/NestedMatch.grading.amy b/info/labs/lab03/test/resources/parser/passing/NestedMatch.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..fb12eb239142aeaa31c9af0540d4b283a8f814d1
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/NestedMatch.grading.amy
@@ -0,0 +1,8 @@
+object NestedMatch
+  foo match {
+    case bar => baz match {
+      case 42 => ()
+    }
+  }
+  
+end NestedMatch
diff --git a/info/labs/lab03/test/resources/parser/passing/Parens.grading.amy b/info/labs/lab03/test/resources/parser/passing/Parens.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..8355d402193584fae0c23cecfc14e1050008081b
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/Parens.grading.amy
@@ -0,0 +1,3 @@
+object Parens
+  -(!(1 + (2 * 3) / (val x: Int(32) = (42; x) ;( (x + (z match { case foo => bar }) - 3)) == (1; 2))))
+end Parens
diff --git a/info/labs/lab03/test/resources/parser/passing/Patterns.grading.amy b/info/labs/lab03/test/resources/parser/passing/Patterns.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..6d2ee19f46634efdc4038ab195cc9cb56c605480
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/Patterns.grading.amy
@@ -0,0 +1,17 @@
+object Patterns
+  x match {
+    case 1 => 0
+    case () => 0
+    case "" => 0
+    case "Hello" => 0
+    case true => 0
+    case false => 0
+    case x => 0
+    case Variable => 0
+    case _ => 0
+    case C() => 0
+    case C(1) => 0
+    case C(C(1)) => 0
+    case C(C(_, "", C(1, "Hello", ()), ()), D(D(_, D(3))), ()) => 0
+  }
+end Patterns
diff --git a/info/labs/lab03/test/resources/parser/passing/Precedence.grading.amy b/info/labs/lab03/test/resources/parser/passing/Precedence.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..61a81521c76e99148d8456ebadb38ad5db5c1366
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/Precedence.grading.amy
@@ -0,0 +1,3 @@
+object Precedence
+  val v: Int(32) = 1 * 2 < x || y + 3;  u match { case foo => bar } ; w == -(if(x) { y } else { z })
+end Precedence
diff --git a/info/labs/lab03/test/resources/parser/passing/QualifiedNames.grading.amy b/info/labs/lab03/test/resources/parser/passing/QualifiedNames.grading.amy
new file mode 100644
index 0000000000000000000000000000000000000000..742c4f26858ed884655313e570a991fe86eab131
--- /dev/null
+++ b/info/labs/lab03/test/resources/parser/passing/QualifiedNames.grading.amy
@@ -0,0 +1,7 @@
+object QualifiedNames
+  val x: Faz.boo = Foo.bar( x match { 
+    case Foo.baz(foo) => 1
+    case Foo.baz(Foo.bar(), foo) => 2
+  });
+  42
+end QualifiedNames
diff --git a/info/labs/lab03/test/scala/amyc/test/CompilerTest.scala b/info/labs/lab03/test/scala/amyc/test/CompilerTest.scala
new file mode 100644
index 0000000000000000000000000000000000000000..1024cfb7ef93b16acb940b9c150b461b19c537ab
--- /dev/null
+++ b/info/labs/lab03/test/scala/amyc/test/CompilerTest.scala
@@ -0,0 +1,92 @@
+package amyc.test
+
+import amyc.utils._
+import java.io.File
+
+import org.junit.Assert.fail
+
+abstract class CompilerTest extends TestUtils {
+  private def runPipeline(pipeline: Pipeline[List[File], Unit], fileNames: List[String]) = {
+    val ctx = Context(new Reporter, fileNames)
+    val files = ctx.files.map(new File(_))
+    pipeline.run(ctx)(files)
+    ctx.reporter.terminateIfErrors()
+  }
+
+  private def runPipelineRedirected(
+    pipeline: Pipeline[List[File], Unit],
+    compiledFiles: List[String],
+    input: String
+  ): String = {
+    testWithRedirectedIO(runPipeline(pipeline, compiledFiles), input)
+  }
+
+  private def assertEqual(output: String, expected: String) = {
+    val rejectLine = (s: String) =>
+      s.isEmpty ||
+        s.startsWith("[ Info  ]") ||
+        s.startsWith("[Warning]") ||
+        s.startsWith("[ Error ]") ||
+        s.startsWith("[ Fatal ]")
+    def filtered(s: String) = s.linesIterator.filterNot(rejectLine).mkString("\n")
+    val filteredOutput = filtered(output)
+    val filteredExpected = filtered(expected)
+    if (filteredOutput != filteredExpected) {
+      val sb = new StringBuffer()
+      sb.append("\nOutput is different:\n")
+      sb.append("\nOutput: \n")
+      sb.append(filteredOutput)
+      sb.append("\n\nExpected output: \n")
+      sb.append(filteredExpected)
+      sb.append("\n")
+      fail(sb.toString)
+    }
+  }
+
+  protected def compareOutputs(
+    pipeline: Pipeline[List[File], Unit],
+    compiledFiles: List[String],
+    expectedFile: String,
+    input: String = ""
+  ) = {
+    try {
+      val output = runPipelineRedirected(pipeline, compiledFiles, input)
+      val expected = scala.io.Source.fromFile(new File(expectedFile)).mkString
+      assertEqual(output, expected)
+    } catch {
+      // We only want to catch AmyFatalError gracefully, the rest can propagate
+      case AmycFatalError(msg) =>
+        fail(s"\n  $msg\n")
+    }
+  }
+
+  protected def demandPass(
+    pipeline: Pipeline[List[File], Unit],
+    compiledFiles: List[String],
+    input: String = ""
+  ) = {
+    try {
+      runPipelineRedirected(pipeline, compiledFiles, input)
+    } catch {
+      case AmycFatalError(msg) =>
+        fail(s"\n  $msg\n")
+    }
+  }
+
+  protected def demandFailure(
+    pipeline: Pipeline[List[File], Unit],
+    compiledFiles: List[String],
+    input: String = ""
+  ) = {
+    try {
+      runPipelineRedirected(pipeline, compiledFiles, input)
+      fail("Test should fail but it passed!")
+    } catch {
+      case AmycFatalError(_) =>
+      // Ok, this is what we wanted. Other exceptions should propagate though
+    }
+
+  }
+
+
+}
diff --git a/info/labs/lab03/test/scala/amyc/test/LexerTests.scala b/info/labs/lab03/test/scala/amyc/test/LexerTests.scala
new file mode 100644
index 0000000000000000000000000000000000000000..b4fbceff44045d3807180d286f2d7d8aea0d4178
--- /dev/null
+++ b/info/labs/lab03/test/scala/amyc/test/LexerTests.scala
@@ -0,0 +1,53 @@
+package amyc.test
+
+import amyc.parsing._
+import org.junit.Test
+
+class LexerTests extends TestSuite {
+  val pipeline = AmyLexer.andThen(DisplayTokens)
+
+  val baseDir = "lexer"
+
+  val outputExt = "txt"
+
+  @Test def testKeywords = shouldOutput("Keywords")
+
+  @Test def testIdentifiers = shouldOutput("Identifiers")
+
+  @Test def testOperators = shouldOutput("Operators")
+
+  @Test def testDelimiters = shouldOutput("Delimiters")
+
+  @Test def testCombinations = shouldOutput("Combinations")
+
+  @Test def testComments = shouldOutput("Comments")
+
+  @Test def testIntLiterals = shouldOutput("IntLiterals")
+
+  @Test def testStringLiterals = shouldOutput("StringLiterals")
+
+  @Test def testTwoFiles = shouldOutput(List("Keywords", "Operators"), "TwoFiles")
+
+  @Test def testSingleAmp = shouldFail("SingleAmp")
+
+  @Test def testSingleBar = shouldFail("SingleBar")
+
+  @Test def testUnclosedComment = shouldFail("UnclosedComment")
+
+  @Test def testUnclosedComment2 = shouldFail("UnclosedComment2")
+
+  @Test def testUnclosedComment3 = shouldFail("UnclosedComment3")
+
+  @Test def testCommentClosedTwice = shouldOutput("CommentClosedTwice")
+
+  @Test def testUnclosedString1 = shouldFail("UnclosedString1")
+
+  @Test def testUnclosedString2 = shouldFail("UnclosedString2")
+
+  @Test def testInvalid = shouldFail("Invalid")
+
+  @Test def testTooBigInt = shouldFail("TooBigInt")
+
+  @Test def testWhitespace = shouldOutput("Whitespace")
+
+}
diff --git a/info/labs/lab03/test/scala/amyc/test/ParserTests.scala b/info/labs/lab03/test/scala/amyc/test/ParserTests.scala
new file mode 100644
index 0000000000000000000000000000000000000000..7419cb399457819d517356123368d933db8132fd
--- /dev/null
+++ b/info/labs/lab03/test/scala/amyc/test/ParserTests.scala
@@ -0,0 +1,98 @@
+package amyc.test
+
+import amyc.utils._
+import amyc.ast._
+import amyc.parsing._
+import org.junit.Test
+
+class ParserTests extends TestSuite {
+
+  import NominalTreeModule.{Program => NP}
+
+  def treePrinterN(title: String): Pipeline[NP, Unit] = {
+    new Pipeline[NP, Unit] {
+      def run(ctx: Context)(v: NP) = {
+        println(title)
+        println(NominalPrinter(v))
+      }
+    }
+  }
+  val pipeline = AmyLexer.andThen(Parser).andThen(treePrinterN(""))
+
+  val baseDir = "parser"
+
+  val outputExt = "amy"
+
+  @Test def testLL1 = {
+    assert(Parser.program.isLL1)
+  }
+
+  
+  @Test def testEmpty = shouldOutput("Empty")
+
+  @Test def testErrorToken1 = shouldOutput("ErrorToken1")
+
+  @Test def testErrorToken2 = shouldOutput("ErrorToken2")
+
+  @Test def testLiterals = shouldOutput("Literals")
+
+  @Test def testPrecedence = shouldOutput("Precedence")
+
+  @Test def testAssoc = shouldOutput("Assoc")
+
+  @Test def testAssocSemicolon = shouldOutput("AssocSemicolon")
+
+  @Test def testFunDefs = shouldOutput("FunDefs")
+
+  @Test def testFunCalls = shouldOutput("FunCalls")
+
+  @Test def testClassDefs = shouldOutput("ClassDefs")
+
+  @Test def testPatterns = shouldOutput("Patterns")
+
+  @Test def testNestedMatch = shouldOutput("NestedMatch")
+
+  @Test def testParens = shouldOutput("Parens")
+
+  @Test def testQualifiedNames = shouldOutput("QualifiedNames")
+
+  @Test def testList = shouldOutput("List")
+
+  @Test def testIfCondition = shouldOutput("IfCondition")
+  @Test def testMatchScrutinee = shouldOutput("MatchScrutinee")
+  @Test def testChainedMatch = shouldOutput("ChainedMatch")
+
+  @Test def testArgsError1 = shouldFail("ArgsError1")
+  @Test def testArgsError2 = shouldFail("ArgsError2")
+  @Test def testClassDefError1 = shouldFail("ClassDefError1")
+  @Test def testClassDefError2 = shouldFail("ClassDefError2")
+  @Test def testClassDefError3 = shouldFail("ClassDefError3")
+  @Test def testClassDefError4 = shouldFail("ClassDefError4")
+  @Test def testCommentClosedTwice = shouldFail("CommentClosedTwice")
+  @Test def testEmptyFile = shouldFail("EmptyFile")
+  @Test def testFunDefError1 = shouldFail("FunDefError1")
+  @Test def testFunDefError2 = shouldFail("FunDefError2")
+  @Test def testFunDefError3 = shouldFail("FunDefError3")
+  @Test def testFunDefError4 = shouldFail("FunDefError4")
+  @Test def testFunDefError5 = shouldFail("FunDefError5")
+  @Test def testFunDefError6 = shouldFail("FunDefError6")
+  @Test def testIfPrecedence = shouldFail("IfPrecedence")
+  @Test def testIntError1 = shouldFail("IntError1")
+  @Test def testIntError2 = shouldFail("IntError2")
+  @Test def testIntError3 = shouldFail("IntError3")
+  @Test def testTypeWidth = shouldFail("TypeWidth")
+  @Test def testMatchAsOperand = shouldFail("MatchAsOperand")
+  @Test def testMissingOperand = shouldFail("MissingOperand")
+  @Test def testUnaryWithinUnary = shouldFail("UnaryWithinUnary")
+  @Test def testUnmatchedModule = shouldFail("UnmatchedModule")
+  @Test def testUnmatchedModuleName = shouldFail("UnmatchedModuleName")
+  @Test def testUnmatchedParen = shouldFail("UnmatchedParen")
+  @Test def testValAsOperand = shouldFail("ValAsOperand")
+  @Test def testValError = shouldFail("ValError")
+  @Test def testValInVal = shouldFail("ValInVal")
+  @Test def testWrongQName1 = shouldFail("WrongQName1")
+  @Test def testWrongQName2 = shouldFail("WrongQName2")
+  @Test def testWrongQName3 = shouldFail("WrongQName3")
+  @Test def testWrongQName4 = shouldFail("WrongQName4")
+
+}
diff --git a/info/labs/lab03/test/scala/amyc/test/TestSuite.scala b/info/labs/lab03/test/scala/amyc/test/TestSuite.scala
new file mode 100644
index 0000000000000000000000000000000000000000..3ce9ebf63be767da33d155a3b191db745a8b0109
--- /dev/null
+++ b/info/labs/lab03/test/scala/amyc/test/TestSuite.scala
@@ -0,0 +1,78 @@
+package amyc.test
+
+import amyc.utils.Pipeline
+import java.io.File
+import java.nio.file.Files
+import java.nio.file.Path
+import java.nio.file.StandardCopyOption
+
+abstract class TestSuite extends CompilerTest {
+  val pipeline: Pipeline[List[File], Unit]
+
+  val baseDir: String
+  lazy val effectiveBaseDir: String =
+    // getClass.getResource(s"/$baseDir").getPath
+    s"test/resources/$baseDir"
+
+  val passing = "passing"
+  val failing = "failing"
+  val outputs = "outputs"
+
+  val tmpDir = Files.createTempDirectory("amyc");
+
+  val outputExt: String
+
+  def getResourcePath(relativePath: String, otherPath: Option[String] = None): String =
+    val firstPath = Path.of(effectiveBaseDir, relativePath)
+
+    val (stream, path) = 
+      if Files.exists(firstPath) then
+        (Files.newInputStream(firstPath), relativePath)
+      else
+        otherPath match
+          case Some(p) =>
+            val secondPath = Path.of(effectiveBaseDir, p)
+            (Files.newInputStream(secondPath), p)
+          case None =>
+            assert(false, s"can not read $effectiveBaseDir/$relativePath")
+            (null, "")
+
+    val targetPath = tmpDir.resolve(path)
+    Files.createDirectories(targetPath.getParent())
+    Files.copy(stream, targetPath, StandardCopyOption.REPLACE_EXISTING)
+    targetPath.toAbsolutePath().toString()
+
+  def shouldOutput(inputFiles: List[String], outputFile: String, input: String = ""): Unit = {
+    compareOutputs(
+      pipeline,
+      inputFiles map (f => getResourcePath(s"$passing/$f.amy", Some(s"$passing/$f.grading.amy"))),
+      getResourcePath(s"$outputs/$outputFile.$outputExt", Some(s"$outputs/$outputFile.grading.$outputExt")),
+      input
+    )
+  }
+
+  def shouldOutput(inputFile: String): Unit = {
+    shouldOutput(List(inputFile), inputFile)
+  }
+
+  def shouldFail(inputFiles: List[String], input: String = ""): Unit = {
+    demandFailure(
+      pipeline,
+      inputFiles map (f => getResourcePath(s"$failing/$f.amy", Some(s"$failing/$f.grading.amy"))),
+      input
+    )
+  }
+
+  def shouldFail(inputFile: String): Unit = {
+    shouldFail(List(inputFile))
+  }
+
+  def shouldPass(inputFiles: List[String], input: String = ""): Unit = {
+    demandPass(pipeline, inputFiles map (f => getResourcePath(s"$passing/$f.amy", Some(s"$passing/$f.grading.amy"))), input)
+  }
+
+  def shouldPass(inputFile: String): Unit = {
+    shouldPass(List(inputFile))
+  }
+
+}
diff --git a/info/labs/lab03/test/scala/amyc/test/TestUtils.scala b/info/labs/lab03/test/scala/amyc/test/TestUtils.scala
new file mode 100644
index 0000000000000000000000000000000000000000..6fe74a037e1b04c7fb8ec0a3dffdc920a3db5f42
--- /dev/null
+++ b/info/labs/lab03/test/scala/amyc/test/TestUtils.scala
@@ -0,0 +1,24 @@
+package amyc.test
+
+import java.io._
+
+/** Some utilities for running tests */
+trait TestUtils {
+  /** Run test,
+    * with input also redirected from a String,
+    * and output is redirected to a local StringBuilder.
+    */
+  def testWithRedirectedIO[T](test: => T, input: String): String = {
+    import scala.Console._
+    val inputS  = new StringReader(input)
+    val outputS = new ByteArrayOutputStream()
+    withOut(outputS) {
+      withErr(outputS) {
+        withIn(inputS) {
+          test
+        }
+      }
+    }
+    outputS.toString()
+  }
+}