Browse Source

feat: Adding ECAL parser

Matthias Ladkau 3 years ago
parent
commit
d8a3618752

+ 27 - 20
lang/ecal/parser/const.go

@@ -165,18 +165,27 @@ Available parser AST node types
 const (
 	NodeEOF = "EOF"
 
-	NodeVALUE = "value" // Simple value
+	NodeCOMMENT    = "comment"    // Comment
+	NodeSTRING     = "string"     // String constant
+	NodeNUMBER     = "number"     // Number constant
+	NodeIDENTIFIER = "identifier" // Idendifier
 
 	// Constructed tokens
 
 	NodeSTATEMENTS = "statements" // List of statements
-	NodeLIST       = "list"       // List value
-	NodeMAP        = "map"        // Map value
-	NodeGUARD      = "guard"      // Guard expressions for conditional statements
 
-	// Map entries
+	// Assignment statement
 
-	NodeMAPENTRY = "entry" // Map entry value
+	NodeASSIGN = ":="
+
+	// Arithmetic operators
+
+	NodePLUS   = "plus"
+	NodeMINUS  = "minus"
+	NodeTIMES  = "times"
+	NodeDIV    = "div"
+	NodeMODINT = "modint"
+	NodeDIVINT = "divint"
 
 	// Boolean operators
 
@@ -186,11 +195,11 @@ const (
 
 	// Condition operators
 
-	NodeLIKE       = "like"
-	NodeIN         = "in"
-	NodeBEGINSWITH = "beginswith"
-	NodeENDSWITH   = "endswith"
-	NodeNOTIN      = "notin"
+	NodeLIKE      = "like"
+	NodeIN        = "in"
+	NodeHASPREFIX = "hasPrefix"
+	NodeHASSUFFIX = "hasSuffix"
+	NodeNOTIN     = "notin"
 
 	NodeGEQ = ">="
 	NodeLEQ = "<="
@@ -205,18 +214,15 @@ const (
 	NodeFALSE = "false"
 	NodeNULL  = "null"
 
-	// Arithmetic operators
+/*
 
-	NodePLUS   = "plus"
-	NodeMINUS  = "minus"
-	NodeTIMES  = "times"
-	NodeDIV    = "div"
-	NodeMODINT = "modint"
-	NodeDIVINT = "divint"
+	NodeLIST       = "list"       // List value
+	NodeMAP        = "map"        // Map value
+	NodeGUARD      = "guard"      // Guard expressions for conditional statements
 
-	// Assignment statement
+	// Map entries
 
-	NodeASSIGN = ":="
+	NodeMAPENTRY = "entry" // Map entry value
 
 	// Function call statement
 
@@ -244,4 +250,5 @@ const (
 
 	NodeBREAK    = "break"
 	NodeCONTINUE = "continue"
+*/
 )

+ 151 - 0
lang/ecal/parser/helper.go

@@ -0,0 +1,151 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"bytes"
+	"fmt"
+
+	"devt.de/krotik/common/datautil"
+	"devt.de/krotik/common/stringutil"
+)
+
+// AST Nodes
+// =========
+
+/*
+ASTNode models a node in the AST
+*/
+type ASTNode struct {
+	Name     string     // Name of the node
+	Token    *LexToken  // Lexer token of this ASTNode
+	Children []*ASTNode // Child nodes
+	Runtime  Runtime    // Runtime component for this ASTNode
+
+	binding        int                                                             // Binding power of this node
+	nullDenotation func(p *parser, self *ASTNode) (*ASTNode, error)                // Configure token as beginning node
+	leftDenotation func(p *parser, self *ASTNode, left *ASTNode) (*ASTNode, error) // Configure token as left node
+}
+
+/*
+Create a new instance of this ASTNode which is connected to a concrete lexer token.
+*/
+func (n *ASTNode) instance(p *parser, t *LexToken) *ASTNode {
+
+	ret := &ASTNode{n.Name, t, make([]*ASTNode, 0, 2), nil, n.binding, n.nullDenotation, n.leftDenotation}
+
+	if p.rp != nil {
+		ret.Runtime = p.rp.Runtime(ret)
+	}
+
+	return ret
+}
+
+/*
+String returns a string representation of this token.
+*/
+func (n *ASTNode) String() string {
+	var buf bytes.Buffer
+	n.levelString(0, &buf)
+	return buf.String()
+}
+
+/*
+levelString function to recursively print the tree.
+*/
+func (n *ASTNode) levelString(indent int, buf *bytes.Buffer) {
+
+	// Print current level
+
+	buf.WriteString(stringutil.GenerateRollingString(" ", indent*2))
+
+	if n.Name == NodeCOMMENT {
+		buf.WriteString(fmt.Sprintf("%v: %20v", n.Name, n.Token.Val))
+	} else if n.Name == NodeSTRING {
+		buf.WriteString(fmt.Sprintf("%v: '%v'", n.Name, n.Token.Val))
+	} else if n.Name == NodeNUMBER {
+		buf.WriteString(fmt.Sprintf("%v: %v", n.Name, n.Token.Val))
+	} else if n.Name == NodeIDENTIFIER {
+		buf.WriteString(fmt.Sprintf("%v: %v", n.Name, n.Token.Val))
+	} else {
+		buf.WriteString(n.Name)
+	}
+
+	buf.WriteString("\n")
+
+	// Print children
+
+	for _, child := range n.Children {
+		child.levelString(indent+1, buf)
+	}
+}
+
+// Look ahead buffer
+// =================
+
+/*
+ASTNode models a node in the AST
+*/
+type LABuffer struct {
+	tokens chan LexToken
+	buffer *datautil.RingBuffer
+}
+
+/*
+Create a new instance of this ASTNode which is connected to a concrete lexer token.
+*/
+func NewLABuffer(c chan LexToken, size int) *LABuffer {
+
+	if size < 1 {
+		size = 1
+	}
+
+	ret := &LABuffer{c, datautil.NewRingBuffer(size)}
+
+	v, more := <-ret.tokens
+	ret.buffer.Add(v)
+
+	for ret.buffer.Size() < size && more && v.ID != TokenEOF {
+		v, more = <-ret.tokens
+		ret.buffer.Add(v)
+	}
+
+	return ret
+}
+
+/*
+Next returns the next item.
+*/
+func (b *LABuffer) Next() (LexToken, bool) {
+
+	ret := b.buffer.Poll()
+
+	if v, more := <-b.tokens; more {
+		b.buffer.Add(v)
+	}
+
+	if ret == nil {
+		return LexToken{ID: TokenEOF}, false
+	}
+
+	return ret.(LexToken), true
+}
+
+/*
+Peek looks inside the buffer starting with 0 as the next item.
+*/
+func (b *LABuffer) Peek(pos int) (LexToken, bool) {
+
+	if pos >= b.buffer.Size() {
+		return LexToken{ID: TokenEOF}, false
+	}
+
+	return b.buffer.Get(pos).(LexToken), true
+}

+ 191 - 0
lang/ecal/parser/helper_test.go

@@ -0,0 +1,191 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"testing"
+)
+
+func TestLABuffer(t *testing.T) {
+
+	buf := NewLABuffer(Lex("test", "1 2 3 4 5 6 7 8 9"), 3)
+
+	if token, ok := buf.Next(); token.Val != "1" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.Val != "2" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	// Check Peek
+
+	if token, ok := buf.Peek(0); token.Val != "3" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Peek(1); token.Val != "4" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Peek(2); token.Val != "5" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Peek(3); token.ID != TokenEOF || ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	// Continue
+
+	if token, ok := buf.Next(); token.Val != "3" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.Val != "4" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.Val != "5" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.Val != "6" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.Val != "7" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.Val != "8" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	// Check Peek
+
+	if token, ok := buf.Peek(0); token.Val != "9" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Peek(1); token.ID != TokenEOF || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Peek(2); token.ID != TokenEOF || ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	// Continue
+
+	if token, ok := buf.Next(); token.Val != "9" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	// Check Peek
+
+	if token, ok := buf.Peek(0); token.ID != TokenEOF || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Peek(1); token.ID != TokenEOF || ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	// Continue
+
+	if token, ok := buf.Next(); token.ID != TokenEOF || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	// New Buffer
+
+	buf = NewLABuffer(Lex("test", "1 2 3"), 3)
+
+	if token, ok := buf.Next(); token.Val != "1" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.Val != "2" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	// Check Peek
+
+	if token, ok := buf.Peek(0); token.Val != "3" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Peek(1); token.ID != TokenEOF || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Peek(2); token.ID != TokenEOF || ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.Val != "3" || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.ID != TokenEOF || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	// New Buffer - test edge case
+
+	buf = NewLABuffer(Lex("test", ""), 0)
+
+	if token, ok := buf.Peek(0); token.ID != TokenEOF || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.ID != TokenEOF || !ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Peek(0); token.ID != TokenEOF || ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+
+	if token, ok := buf.Next(); token.ID != TokenEOF || ok {
+		t.Error("Unexpected result: ", token, ok)
+		return
+	}
+}

+ 391 - 0
lang/ecal/parser/parser.go

@@ -0,0 +1,391 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"fmt"
+)
+
+/*
+Map of AST nodes corresponding to lexer tokens
+*/
+var astNodeMap map[LexTokenID]*ASTNode
+
+func init() {
+	astNodeMap = map[LexTokenID]*ASTNode{
+		TokenEOF: {NodeEOF, nil, nil, nil, 0, ndTerm, nil},
+
+		// Value tokens
+
+		TokenCOMMENT:    {NodeCOMMENT, nil, nil, nil, 0, ndTerm, nil},
+		TokenSTRING:     {NodeSTRING, nil, nil, nil, 0, ndTerm, nil},
+		TokenNUMBER:     {NodeNUMBER, nil, nil, nil, 0, ndTerm, nil},
+		TokenIDENTIFIER: {NodeIDENTIFIER, nil, nil, nil, 0, ndTerm, nil},
+
+		// Constructed tokens
+
+		TokenSTATEMENTS: {NodeSTATEMENTS, nil, nil, nil, 0, nil, nil},
+		TokenSEMICOLON:  {"", nil, nil, nil, 0, nil, nil},
+		/*
+			TokenLIST:       {NodeLIST, nil, nil, nil, 0, nil, nil},
+			TokenMAP:        {NodeMAP, nil, nil, nil, 0, nil, nil},
+			TokenGUARD:      {NodeGUARD, nil, nil, nil, 0, nil, nil},
+		*/
+
+		// Grouping symbols
+
+		TokenLPAREN: {"", nil, nil, nil, 150, ndInner, nil},
+		TokenRPAREN: {"", nil, nil, nil, 0, nil, nil},
+
+		// Separators
+
+		TokenCOMMA: {"", nil, nil, nil, 0, nil, nil},
+
+		// Assignment statement
+
+		TokenASSIGN: {NodeASSIGN, nil, nil, nil, 10, nil, ldInfix},
+
+		// Simple arithmetic expressions
+
+		TokenPLUS:   {NodePLUS, nil, nil, nil, 110, ndPrefix, ldInfix},
+		TokenMINUS:  {NodeMINUS, nil, nil, nil, 110, ndPrefix, ldInfix},
+		TokenTIMES:  {NodeTIMES, nil, nil, nil, 120, nil, ldInfix},
+		TokenDIV:    {NodeDIV, nil, nil, nil, 120, nil, ldInfix},
+		TokenDIVINT: {NodeDIVINT, nil, nil, nil, 120, nil, ldInfix},
+		TokenMODINT: {NodeMODINT, nil, nil, nil, 120, nil, ldInfix},
+
+		// Boolean operators
+
+		TokenOR:  {NodeOR, nil, nil, nil, 30, nil, ldInfix},
+		TokenAND: {NodeAND, nil, nil, nil, 40, nil, ldInfix},
+		TokenNOT: {NodeNOT, nil, nil, nil, 20, ndPrefix, nil},
+
+		// Condition operators
+
+		TokenLIKE:      {NodeLIKE, nil, nil, nil, 60, nil, ldInfix},
+		TokenIN:        {NodeIN, nil, nil, nil, 60, nil, ldInfix},
+		TokenHASPREFIX: {NodeHASPREFIX, nil, nil, nil, 60, nil, ldInfix},
+		TokenHASSUFFIX: {NodeHASSUFFIX, nil, nil, nil, 60, nil, ldInfix},
+		TokenNOTIN:     {NodeNOTIN, nil, nil, nil, 60, nil, ldInfix},
+
+		TokenGEQ: {NodeGEQ, nil, nil, nil, 60, nil, ldInfix},
+		TokenLEQ: {NodeLEQ, nil, nil, nil, 60, nil, ldInfix},
+		TokenNEQ: {NodeNEQ, nil, nil, nil, 60, nil, ldInfix},
+		TokenEQ:  {NodeEQ, nil, nil, nil, 60, nil, ldInfix},
+		TokenGT:  {NodeGT, nil, nil, nil, 60, nil, ldInfix},
+		TokenLT:  {NodeLT, nil, nil, nil, 60, nil, ldInfix},
+
+		// Constants
+
+		TokenFALSE: {NodeFALSE, nil, nil, nil, 0, ndTerm, nil},
+		TokenTRUE:  {NodeTRUE, nil, nil, nil, 0, ndTerm, nil},
+		TokenNULL:  {NodeNULL, nil, nil, nil, 0, ndTerm, nil},
+	}
+}
+
+// Parser
+// ======
+
+/*
+Parser data structure
+*/
+type parser struct {
+	name   string          // Name to identify the input
+	node   *ASTNode        // Current ast node
+	tokens *LABuffer       // Buffer which is connected to the channel which contains lex tokens
+	rp     RuntimeProvider // Runtime provider which creates runtime components
+}
+
+/*
+Parse parses a given input string and returns an AST.
+*/
+func Parse(name string, input string) (*ASTNode, error) {
+	return ParseWithRuntime(name, input, nil)
+}
+
+/*
+ParseWithRuntime parses a given input string and returns an AST decorated with
+runtime components.
+*/
+func ParseWithRuntime(name string, input string, rp RuntimeProvider) (*ASTNode, error) {
+
+	// Create a new parser with a look-ahead buffer of 3
+
+	p := &parser{name, nil, NewLABuffer(Lex(name, input), 3), rp}
+
+	// Read and set initial AST node
+
+	node, err := p.next()
+
+	if err != nil {
+		return nil, err
+	}
+
+	p.node = node
+
+	n, err := p.run(0)
+
+	if err == nil && hasMoreStatements(p, n) {
+
+		st := astNodeMap[TokenSTATEMENTS].instance(p, nil)
+		st.Children = append(st.Children, n)
+
+		for err == nil && hasMoreStatements(p, n) {
+
+			// Skip semicolons
+
+			if p.node.Token.ID == TokenSEMICOLON {
+				skipToken(p, TokenSEMICOLON)
+			}
+
+			n, err = p.run(0)
+			st.Children = append(st.Children, n)
+		}
+
+		n = st
+	}
+
+	if err == nil && p.node != nil && p.node.Token.ID != TokenEOF {
+		token := *p.node.Token
+		err = p.newParserError(ErrUnexpectedEnd, fmt.Sprintf("extra token id:%v (%v)",
+			token.ID, token), token)
+	}
+
+	return n, err
+}
+
+/*
+run models the main parser function.
+*/
+func (p *parser) run(rightBinding int) (*ASTNode, error) {
+	var err error
+
+	n := p.node
+
+	p.node, err = p.next()
+	if err != nil {
+		return nil, err
+	}
+
+	// Start with the null denotation of this statement / expression
+
+	if n.nullDenotation == nil {
+		return nil, p.newParserError(ErrImpossibleNullDenotation,
+			n.Token.String(), *n.Token)
+	}
+
+	left, err := n.nullDenotation(p, n)
+	if err != nil {
+		return nil, err
+	}
+
+	// Collect left denotations as long as the left binding power is greater
+	// than the initial right one
+
+	for rightBinding < p.node.binding {
+		var nleft *ASTNode
+
+		n = p.node
+
+		if n.leftDenotation == nil {
+
+			if left.Token.Lline < n.Token.Lline {
+
+				// If the impossible left denotation is on a new line
+				// we might be parsing a new statement
+
+				return left, nil
+			}
+
+			return nil, p.newParserError(ErrImpossibleLeftDenotation,
+				n.Token.String(), *n.Token)
+		}
+
+		p.node, err = p.next()
+
+		if err != nil {
+			return nil, err
+		}
+
+		// Get the next left denotation
+
+		nleft, err = n.leftDenotation(p, n, left)
+
+		left = nleft
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return left, nil
+}
+
+/*
+next retrieves the next lexer token.
+*/
+func (p *parser) next() (*ASTNode, error) {
+
+	token, more := p.tokens.Next()
+
+	if !more {
+
+		// Unexpected end of input - the associated token is an empty error token
+
+		return nil, p.newParserError(ErrUnexpectedEnd, "", token)
+
+	} else if token.ID == TokenError {
+
+		// There was a lexer error wrap it in a parser error
+
+		return nil, p.newParserError(ErrLexicalError, token.Val, token)
+
+	} else if node, ok := astNodeMap[token.ID]; ok {
+
+		// We got a normal AST component
+
+		return node.instance(p, &token), nil
+	}
+
+	return nil, p.newParserError(ErrUnknownToken, fmt.Sprintf("id:%v (%v)", token.ID, token), token)
+}
+
+// Standard null denotation functions
+// ==================================
+
+/*
+ndTerm is used for terminals.
+*/
+func ndTerm(p *parser, self *ASTNode) (*ASTNode, error) {
+	return self, nil
+}
+
+/*
+ndInner returns the inner expression of an enclosed block and discard the
+block token. This method is used for brackets.
+*/
+func ndInner(p *parser, self *ASTNode) (*ASTNode, error) {
+
+	// Get the inner expression
+
+	exp, err := p.run(0)
+	if err != nil {
+		return nil, err
+	}
+
+	// We return here the inner expression - discarding the bracket tokens
+
+	return exp, skipToken(p, TokenRPAREN)
+}
+
+/*
+ndPrefix is used for prefix operators.
+*/
+func ndPrefix(p *parser, self *ASTNode) (*ASTNode, error) {
+
+	// Make sure a prefix will only prefix the next item
+
+	val, err := p.run(self.binding + 20)
+	if err != nil {
+		return nil, err
+	}
+
+	self.Children = append(self.Children, val)
+
+	return self, nil
+}
+
+// Standard left denotation functions
+// ==================================
+
+/*
+ldInfix is used for infix operators.
+*/
+func ldInfix(p *parser, self *ASTNode, left *ASTNode) (*ASTNode, error) {
+
+	right, err := p.run(self.binding)
+	if err != nil {
+		return nil, err
+	}
+
+	self.Children = append(self.Children, left)
+	self.Children = append(self.Children, right)
+
+	return self, nil
+}
+
+// Helper functions
+// ================
+
+/*
+hasMoreStatements returns true if there are more statements to parse.
+*/
+func hasMoreStatements(p *parser, currentNode *ASTNode) bool {
+	nextNode := p.node
+
+	if nextNode == nil || nextNode.Token.ID == TokenEOF {
+		return false
+	} else if nextNode.Token.ID == TokenSEMICOLON {
+		return true
+	}
+
+	return currentNode != nil && currentNode.Token.Lline < nextNode.Token.Lline
+}
+
+/*
+skipToken skips over a given token.
+*/
+func skipToken(p *parser, ids ...LexTokenID) error {
+	var err error
+
+	canSkip := func(id LexTokenID) bool {
+		for _, i := range ids {
+			if i == id {
+				return true
+			}
+		}
+		return false
+	}
+
+	if !canSkip(p.node.Token.ID) {
+		if p.node.Token.ID == TokenEOF {
+			return p.newParserError(ErrUnexpectedEnd, "", *p.node.Token)
+		}
+		return p.newParserError(ErrUnexpectedToken, p.node.Token.Val, *p.node.Token)
+	}
+
+	// This should never return an error unless we skip over EOF or complex tokens
+	// like values
+
+	p.node, err = p.next()
+
+	return err
+}
+
+/*
+acceptChild accepts the current token as a child.
+*/
+func acceptChild(p *parser, self *ASTNode, id LexTokenID) error {
+	var err error
+
+	current := p.node
+
+	p.node, err = p.next()
+	if err != nil {
+		return err
+	}
+
+	if current.Token.ID == id {
+		self.Children = append(self.Children, current)
+		return nil
+	}
+
+	return p.newParserError(ErrUnexpectedToken, current.Token.Val, *current.Token)
+}

+ 63 - 0
lang/ecal/parser/parser_helper_test.go

@@ -0,0 +1,63 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"testing"
+)
+
+// Main function for all tests in this package
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	res := m.Run()
+
+	// Check if all nodes have been tested
+
+	for _, n := range astNodeMap {
+		if _, ok := usedNodes[n.Name]; !ok {
+			fmt.Println("Not tested node: ", n.Name)
+		}
+	}
+
+	os.Exit(res)
+}
+
+// Used nodes map which is filled during unit testing. Prefilled with tokens which
+// will not be generated by the parser
+//
+var usedNodes = map[string]bool{
+	NodeEOF: true, // Only used as end term
+	"":      true, // No node e.g. semicolon - These nodes should never be part of an AST
+}
+
+func UnitTestParse(name string, input string) (*ASTNode, error) {
+	n, err := ParseWithRuntime(name, input, &DummyRuntimeProvider{})
+
+	return n, err
+}
+
+// Helper objects
+
+type DummyRuntimeProvider struct {
+}
+
+func (d *DummyRuntimeProvider) Runtime(n *ASTNode) Runtime {
+
+	// Make the encountered node as used
+
+	usedNodes[n.Name] = true
+
+	return nil
+}

+ 81 - 0
lang/ecal/parser/parser_main_test.go

@@ -0,0 +1,81 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestSimpleExpressionParsing(t *testing.T) {
+
+	// Test error output
+
+	input := `"bl\*a"conversion`
+	if _, err := UnitTestParse("mytest", input); err.Error() !=
+		"Parse error in mytest: Lexical error (invalid syntax while parsing string) (Line:1 Pos:1)" {
+		t.Error(err)
+		return
+	}
+
+	// Test incomplete expression
+
+	input = `a *`
+	if _, err := UnitTestParse("mytest", input); err.Error() !=
+		"Parse error in mytest: Unexpected end" {
+		t.Error(err)
+		return
+	}
+
+	input = `not ==`
+	if _, err := UnitTestParse("mytest", input); err.Error() !=
+		"Parse error in mytest: Term cannot start an expression (==) (Line:1 Pos:5)" {
+		t.Error(err)
+		return
+	}
+
+	input = `(==)`
+	if _, err := UnitTestParse("mytest", input); err.Error() !=
+		"Parse error in mytest: Term cannot start an expression (==) (Line:1 Pos:2)" {
+		t.Error(err)
+		return
+	}
+
+	input = "5 ( 5"
+	if _, err := UnitTestParse("mytest", input); err.Error() !=
+		"Parse error in mytest: Term can only start an expression (() (Line:1 Pos:3)" {
+		t.Error(err)
+		return
+	}
+
+	input = "5 + \""
+	if _, err := UnitTestParse("mytest", input); err.Error() !=
+		"Parse error in mytest: Lexical error (Unexpected end while reading string value (unclosed quotes)) (Line:1 Pos:5)" {
+		t.Error(err)
+		return
+	}
+
+	// Test prefix operator
+
+	input = ` + a - -5`
+	expectedOutput := `
+minus
+  plus
+    identifier: a
+  minus
+    number: 5
+`[1:]
+
+	if res, err := UnitTestParse("mytest", input); err != nil || fmt.Sprint(res) != expectedOutput {
+		t.Error("Unexpected parser output:\n", res, "expected was:\n", expectedOutput, "Error:", err)
+		return
+	}
+
+}

+ 64 - 0
lang/ecal/parser/parsererror.go

@@ -0,0 +1,64 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"errors"
+	"fmt"
+)
+
+/*
+newParserError creates a new ParserError object.
+*/
+func (p *parser) newParserError(t error, d string, token LexToken) error {
+	return &Error{p.name, t, d, token.Lline, token.Lpos}
+}
+
+/*
+Error models a parser related error.
+*/
+type Error struct {
+	Source string // Name of the source which was given to the parser
+	Type   error  // Error type (to be used for equal checks)
+	Detail string // Details of this error
+	Line   int    // Line of the error
+	Pos    int    // Position of the error
+}
+
+/*
+Error returns a human-readable string representation of this error.
+*/
+func (pe *Error) Error() string {
+	var ret string
+
+	if pe.Detail != "" {
+		ret = fmt.Sprintf("Parse error in %s: %v (%v)", pe.Source, pe.Type, pe.Detail)
+	} else {
+		ret = fmt.Sprintf("Parse error in %s: %v", pe.Source, pe.Type)
+	}
+
+	if pe.Line != 0 {
+		return fmt.Sprintf("%s (Line:%d Pos:%d)", ret, pe.Line, pe.Pos)
+	}
+
+	return ret
+}
+
+/*
+Parser related error types
+*/
+var (
+	ErrUnexpectedEnd            = errors.New("Unexpected end")
+	ErrLexicalError             = errors.New("Lexical error")
+	ErrUnknownToken             = errors.New("Unknown term")
+	ErrImpossibleNullDenotation = errors.New("Term cannot start an expression")
+	ErrImpossibleLeftDenotation = errors.New("Term can only start an expression")
+	ErrUnexpectedToken          = errors.New("Unexpected term")
+)

+ 67 - 0
lang/ecal/parser/runtime.go

@@ -0,0 +1,67 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+/*
+RuntimeProvider provides runtime components for a parse tree.
+*/
+type RuntimeProvider interface {
+
+	/*
+	   Runtime returns a runtime component for a given ASTNode.
+	*/
+	Runtime(node *ASTNode) Runtime
+}
+
+/*
+Runtime provides the runtime for an ASTNode.
+*/
+type Runtime interface {
+
+	/*
+	   Validate this runtime component and all its child components.
+	*/
+	Validate() error
+
+	/*
+		Eval evaluate this runtime component. It gets passed the current variable
+		scope and the instance state.
+
+		The instance state is created per execution instance and can be used
+		for generator functions to store their current state.
+	*/
+	Eval(VarsScope, map[string]interface{}) (interface{}, error)
+}
+
+/*
+VarsScope is used to store variable data and keep track of scoping.
+*/
+type VarsScope interface {
+
+	/*
+	   NewChild creates a new child variable scope.
+	*/
+	NewChild(name string) VarsScope
+
+	/*
+	   SetValue sets a new value for a variable.
+	*/
+	SetValue(varName string, varValue interface{}) error
+
+	/*
+	   GetValue gets the current value of a variable.
+	*/
+	GetValue(varName string) (interface{}, bool, error)
+
+	/*
+	   String returns a string representation of this variable scope.
+	*/
+	String() string
+}