Browse Source

fix: Remove ECAL parser and fix some race conditions

Matthias Ladkau 8 months ago
parent
commit
d6510dd9cd

+ 13 - 6
flowutil/eventpump.go

@@ -75,22 +75,29 @@ func (ep *EventPump) PostEvent(event string, eventSource interface{}) {
 		panic("Posting an event requires the event and its source")
 	}
 
-	ep.eventsObserversLock.Lock()
-	defer ep.eventsObserversLock.Unlock()
-
 	postEvent := func(event string, eventSource interface{}) {
 
-		if sources, ok := ep.eventsObservers[event]; ok {
+		ep.eventsObserversLock.Lock()
+		sources, ok := ep.eventsObservers[event]
+		if ok {
+			origsources := sources
+			sources = make(map[interface{}][]EventCallback)
+			for source, callbacks := range origsources {
+				sources[source] = callbacks
+			}
+		}
+		ep.eventsObserversLock.Unlock()
+
+		if ok {
 			for source, callbacks := range sources {
 				if source == eventSource || source == nil {
 					for _, callback := range callbacks {
-						ep.eventsObserversLock.Unlock()
 						callback(event, eventSource)
-						ep.eventsObserversLock.Lock()
 					}
 				}
 			}
 		}
+
 	}
 
 	postEvent(event, eventSource)

File diff suppressed because it is too large
+ 0 - 371
lang/ecal/README.md


+ 0 - 285
lang/ecal/parser/const.go

@@ -1,285 +0,0 @@
-/*
- * Public Domain Software
- *
- * I (Matthias Ladkau) am the author of the source code in this file.
- * I have placed the source code in this file in the public domain.
- *
- * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-/*
-Package parser contains a ECAL parser.
-
-Lexer for Source Text
-
-Lex() is a lexer function to convert a given search query into a list of tokens.
-
-Based on a talk by Rob Pike: Lexical Scanning in Go
-
-https://www.youtube.com/watch?v=HxaD_trXwRE
-
-The lexer's output is pushed into a channel which is consumed by the parser.
-This design enables the concurrent processing of the input text by lexer and
-parser.
-
-Parser
-
-Parse() is a parser which produces a parse tree from a given set of lexer tokens.
-
-Based on an article by Douglas Crockford: Top Down Operator Precedence
-
-http://crockford.com/javascript/tdop/tdop.html
-
-which is based on the ideas of Vaughan Pratt and his paper: Top Down Operator Precedence
-
-http://portal.acm.org/citation.cfm?id=512931
-https://tdop.github.io/
-
-ParseWithRuntime() parses a given input and decorates the resulting parse tree
-with runtime components which can be used to interpret the parsed query.
-*/
-package parser
-
-/*
-LexTokenID represents a unique lexer token ID
-*/
-type LexTokenID int
-
-/*
-Available meta data types
-*/
-const (
-	MetaDataPreComment  = "MetaDataPreComment"
-	MetaDataPostComment = "MetaDataPostComment"
-	MetaDataGeneral     = "MetaDataGeneral"
-)
-
-/*
-Available lexer token types
-*/
-const (
-	TokenError LexTokenID = iota // Lexing error token with a message as val
-	TokenEOF                     // End-of-file token
-	TokenANY                     // Unspecified token (used when building an AST from a Go map structure)
-
-	TokenPRECOMMENT  // Comment /* ... */
-	TokenPOSTCOMMENT // Comment # ...
-
-	// Value tokens
-
-	TokenSTRING     // String constant
-	TokenNUMBER     // Number constant
-	TokenIDENTIFIER // Idendifier
-
-	// Constructed tokens which are generated by the parser not the lexer
-
-	TokenSTATEMENTS // A code block
-	TokenFUNCCALL   // A function call
-	TokenCOMPACCESS // Access to a composition structure
-	TokenLIST       // List value
-	TokenMAP        // MAP value
-	TokenPARAMS     // Function parameters
-	TokenGUARD      // Conditional statements
-
-	TOKENodeSYMBOLS // Used to separate symbols from other tokens in this list
-
-	// Condition operators
-
-	TokenGEQ
-	TokenLEQ
-	TokenNEQ
-	TokenEQ
-	TokenGT
-	TokenLT
-
-	// Grouping symbols
-
-	TokenLPAREN
-	TokenRPAREN
-	TokenLBRACK
-	TokenRBRACK
-	TokenLBRACE
-	TokenRBRACE
-
-	// Separators
-
-	TokenDOT
-	TokenCOMMA
-	TokenSEMICOLON
-
-	// Grouping
-
-	TokenCOLON
-	TokenEQUAL
-
-	// Arithmetic operators
-
-	TokenPLUS
-	TokenMINUS
-	TokenTIMES
-	TokenDIV
-	TokenDIVINT
-	TokenMODINT
-
-	// Assignment statement
-
-	TokenASSIGN
-
-	TOKENodeKEYWORDS // Used to separate keywords from other tokens in this list
-
-	// Import statement
-
-	TokenIMPORT
-	TokenAS
-
-	// Sink definition
-
-	TokenSINK
-	TokenKINDMATCH
-	TokenSCOPEMATCH
-	TokenSTATEMATCH
-	TokenPRIORITY
-	TokenSUPPRESSES
-
-	// Function definition
-
-	TokenFUNC
-	TokenRETURN
-
-	// Boolean operators
-
-	TokenAND
-	TokenOR
-	TokenNOT
-
-	// Condition operators
-
-	TokenLIKE
-	TokenIN
-	TokenHASPREFIX
-	TokenHASSUFFIX
-	TokenNOTIN
-
-	// Constant terminals
-
-	TokenFALSE
-	TokenTRUE
-	TokenNULL
-
-	// Conditional statements
-
-	TokenIF
-	TokenELIF
-	TokenELSE
-
-	// Loop statements
-
-	TokenFOR
-	TokenBREAK
-	TokenCONTINUE
-
-	TokenENDLIST
-)
-
-/*
-IsValidTokenID check if a given token ID is valid.
-*/
-func IsValidTokenID(value int) bool {
-	return value < int(TokenENDLIST)
-}
-
-/*
-Available parser AST node types
-*/
-const (
-	NodeEOF = "EOF"
-
-	NodeSTRING     = "string"     // String constant
-	NodeNUMBER     = "number"     // Number constant
-	NodeIDENTIFIER = "identifier" // Idendifier
-
-	// Constructed tokens
-
-	NodeSTATEMENTS = "statements" // List of statements
-	NodeFUNCCALL   = "funccall"   // Function call
-	NodeCOMPACCESS = "compaccess" // Composition structure access
-	NodeLIST       = "list"       // List value
-	NodeMAP        = "map"        // Map value
-	NodePARAMS     = "params"     // Function parameters
-	NodeGUARD      = "guard"      // Guard expressions for conditional statements
-
-	// Condition operators
-
-	NodeGEQ = ">="
-	NodeLEQ = "<="
-	NodeNEQ = "!="
-	NodeEQ  = "=="
-	NodeGT  = ">"
-	NodeLT  = "<"
-
-	// Separators
-
-	NodeKVP    = "kvp"    // Key-value pair
-	NodePRESET = "preset" // Preset value
-
-	// Arithmetic operators
-
-	NodePLUS   = "plus"
-	NodeMINUS  = "minus"
-	NodeTIMES  = "times"
-	NodeDIV    = "div"
-	NodeMODINT = "modint"
-	NodeDIVINT = "divint"
-
-	// Assignment statement
-
-	NodeASSIGN = ":="
-
-	// Import statement
-
-	NodeIMPORT = "import"
-
-	// Sink definition
-
-	NodeSINK       = "sink"
-	NodeKINDMATCH  = "kindmatch"
-	NodeSCOPEMATCH = "scopematch"
-	NodeSTATEMATCH = "statematch"
-	NodePRIORITY   = "priority"
-	NodeSUPPRESSES = "suppresses"
-
-	// Function definition
-
-	NodeFUNC   = "function"
-	NodeRETURN = "return"
-
-	// Boolean operators
-
-	NodeAND = "and"
-	NodeOR  = "or"
-	NodeNOT = "not"
-
-	// Condition operators
-
-	NodeLIKE      = "like"
-	NodeIN        = "in"
-	NodeHASPREFIX = "hasprefix"
-	NodeHASSUFFIX = "hassuffix"
-	NodeNOTIN     = "notin"
-
-	// Constant terminals
-
-	NodeTRUE  = "true"
-	NodeFALSE = "false"
-	NodeNULL  = "null"
-
-	// Conditional statements
-
-	NodeIF = "if"
-
-	// Loop statements
-
-	NodeLOOP     = "loop"
-	NodeBREAK    = "break"
-	NodeCONTINUE = "continue"
-)

+ 0 - 454
lang/ecal/parser/helper.go

@@ -1,454 +0,0 @@
-/*
- * Public Domain Software
- *
- * I (Matthias Ladkau) am the author of the source code in this file.
- * I have placed the source code in this file in the public domain.
- *
- * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package parser
-
-import (
-	"bytes"
-	"fmt"
-	"strconv"
-
-	"devt.de/krotik/common/datautil"
-	"devt.de/krotik/common/stringutil"
-)
-
-// AST Nodes
-// =========
-
-/*
-MetaData is auxiliary data which can be attached to ASTs.
-*/
-type MetaData interface {
-
-	/*
-		Type returns the type of the meta data.
-	*/
-	Type() string
-
-	/*
-		Value returns the value of the meta data.
-	*/
-	Value() string
-}
-
-/*
-metaData is a minimal MetaData implementation.
-*/
-type metaData struct {
-	metatype  string
-	metavalue string
-}
-
-/*
-Type returns the type of the meta data.
-*/
-func (m *metaData) Type() string {
-	return m.metatype
-}
-
-/*
-Value returns the value of the meta data.
-*/
-func (m *metaData) Value() string {
-	return m.metavalue
-}
-
-/*
-ASTNode models a node in the AST
-*/
-type ASTNode struct {
-	Name     string     // Name of the node
-	Token    *LexToken  // Lexer token of this ASTNode
-	Meta     []MetaData // Meta data for this ASTNode (e.g. comments)
-	Children []*ASTNode // Child nodes
-	Runtime  Runtime    // Runtime component for this ASTNode
-
-	binding        int                                                             // Binding power of this node
-	nullDenotation func(p *parser, self *ASTNode) (*ASTNode, error)                // Configure token as beginning node
-	leftDenotation func(p *parser, self *ASTNode, left *ASTNode) (*ASTNode, error) // Configure token as left node
-}
-
-/*
-Create a new instance of this ASTNode which is connected to a concrete lexer token.
-*/
-func (n *ASTNode) instance(p *parser, t *LexToken) *ASTNode {
-
-	ret := &ASTNode{n.Name, t, nil, make([]*ASTNode, 0, 2), nil, n.binding, n.nullDenotation, n.leftDenotation}
-
-	if p.rp != nil {
-		ret.Runtime = p.rp.Runtime(ret)
-	}
-
-	return ret
-}
-
-/*
-Equals checks if this AST data equals another AST data. Returns also a message describing
-what is the found difference.
-*/
-func (n *ASTNode) Equals(other *ASTNode, ignoreTokenPosition bool) (bool, string) {
-	return n.equalsPath(n.Name, other, ignoreTokenPosition)
-}
-
-/*
-equalsPath checks if this AST data equals another AST data while preserving the search path.
-Returns also a message describing what is the found difference.
-*/
-func (n *ASTNode) equalsPath(path string, other *ASTNode, ignoreTokenPosition bool) (bool, string) {
-	var res = true
-	var msg = ""
-
-	if n.Name != other.Name {
-		res = false
-		msg = fmt.Sprintf("Name is different %v vs %v\n", n.Name, other.Name)
-	}
-
-	if n.Token != nil && other.Token != nil {
-		if ok, tokenMSG := n.Token.Equals(*other.Token, ignoreTokenPosition); !ok {
-			res = false
-			msg += fmt.Sprintf("Token is different:\n%v\n", tokenMSG)
-		}
-	}
-
-	if len(n.Meta) != len(other.Meta) {
-		res = false
-		msg = fmt.Sprintf("Number of meta data entries is different %v vs %v\n",
-			len(n.Meta), len(other.Meta))
-	} else {
-		for i, meta := range n.Meta {
-
-			// Check for different in meta entries
-
-			if meta.Type() != other.Meta[i].Type() {
-				res = false
-				msg += fmt.Sprintf("Meta data type is different %v vs %v\n", meta.Type(), other.Meta[i].Type())
-			} else if meta.Value() != other.Meta[i].Value() {
-				res = false
-				msg += fmt.Sprintf("Meta data value is different %v vs %v\n", meta.Value(), other.Meta[i].Value())
-			}
-		}
-	}
-
-	if len(n.Children) != len(other.Children) {
-		res = false
-		msg = fmt.Sprintf("Number of children is different %v vs %v\n",
-			len(n.Children), len(other.Children))
-	} else {
-		for i, child := range n.Children {
-
-			// Check for different in children
-
-			if ok, childMSG := child.equalsPath(fmt.Sprintf("%v > %v", path, child.Name),
-				other.Children[i], ignoreTokenPosition); !ok {
-				return ok, childMSG
-			}
-		}
-	}
-
-	if msg != "" {
-		var buf bytes.Buffer
-		buf.WriteString("AST Nodes:\n")
-		n.levelString(0, &buf, 1)
-		buf.WriteString("vs\n")
-		other.levelString(0, &buf, 1)
-		msg = fmt.Sprintf("Path to difference: %v\n\n%v\n%v", path, msg, buf.String())
-	}
-
-	return res, msg
-}
-
-/*
-String returns a string representation of this token.
-*/
-func (n *ASTNode) String() string {
-	var buf bytes.Buffer
-	n.levelString(0, &buf, -1)
-	return buf.String()
-}
-
-/*
-levelString function to recursively print the tree.
-*/
-func (n *ASTNode) levelString(indent int, buf *bytes.Buffer, printChildren int) {
-
-	// Print current level
-
-	buf.WriteString(stringutil.GenerateRollingString(" ", indent*2))
-
-	if n.Name == NodeSTRING {
-		buf.WriteString(fmt.Sprintf("%v: '%v'", n.Name, n.Token.Val))
-	} else if n.Name == NodeNUMBER {
-		buf.WriteString(fmt.Sprintf("%v: %v", n.Name, n.Token.Val))
-	} else if n.Name == NodeIDENTIFIER {
-		buf.WriteString(fmt.Sprintf("%v: %v", n.Name, n.Token.Val))
-	} else {
-		buf.WriteString(n.Name)
-	}
-
-	if len(n.Meta) > 0 {
-		buf.WriteString(" # ")
-		for i, c := range n.Meta {
-			buf.WriteString(c.Value())
-			if i < len(n.Meta)-1 {
-				buf.WriteString(" ")
-			}
-		}
-	}
-
-	buf.WriteString("\n")
-
-	if printChildren == -1 || printChildren > 0 {
-
-		if printChildren != -1 {
-			printChildren--
-		}
-
-		// Print children
-
-		for _, child := range n.Children {
-			child.levelString(indent+1, buf, printChildren)
-		}
-	}
-}
-
-/*
-ToJSONObject returns this ASTNode and all its children as a JSON object.
-*/
-func (n *ASTNode) ToJSONObject() map[string]interface{} {
-	ret := make(map[string]interface{})
-
-	ret["name"] = n.Name
-
-	lenMeta := len(n.Meta)
-
-	if lenMeta > 0 {
-		meta := make([]map[string]interface{}, lenMeta)
-		for i, metaChild := range n.Meta {
-			meta[i] = map[string]interface{}{
-				"type":  metaChild.Type(),
-				"value": metaChild.Value(),
-			}
-		}
-
-		ret["meta"] = meta
-	}
-
-	lenChildren := len(n.Children)
-
-	if lenChildren > 0 {
-		children := make([]map[string]interface{}, lenChildren)
-		for i, child := range n.Children {
-			children[i] = child.ToJSONObject()
-		}
-
-		ret["children"] = children
-	}
-
-	// The value is what the lexer found in the source
-
-	if n.Token != nil {
-		ret["id"] = n.Token.ID
-		if n.Token.Val != "" {
-			ret["value"] = n.Token.Val
-		}
-		ret["identifier"] = n.Token.Identifier
-		ret["pos"] = n.Token.Pos
-		ret["line"] = n.Token.Lline
-		ret["linepos"] = n.Token.Lpos
-	}
-
-	return ret
-}
-
-/*
-ASTFromJSONObject creates an AST from a JSON Object.
-The following nested map structure is expected:
-
-	{
-		name     : <name of node>
-
-		// Optional node information
-		value    : <value of node>
-		children : [ <child nodes> ]
-
-		// Optional token information
-		id       : <token id>
-	}
-*/
-func ASTFromJSONObject(jsonAST map[string]interface{}) (*ASTNode, error) {
-	var astMeta []MetaData
-	var astChildren []*ASTNode
-	var pos, line, linepos int
-
-	nodeID := TokenANY
-
-	name, ok := jsonAST["name"]
-	if !ok {
-		return nil, fmt.Errorf("Found json ast node without a name: %v", jsonAST)
-	}
-
-	if nodeIDString, ok := jsonAST["id"]; ok {
-		if nodeIDInt, err := strconv.Atoi(fmt.Sprint(nodeIDString)); err == nil && IsValidTokenID(nodeIDInt) {
-			nodeID = LexTokenID(nodeIDInt)
-		}
-	}
-
-	value, ok := jsonAST["value"]
-	if !ok {
-		value = ""
-	}
-
-	identifier, ok := jsonAST["identifier"]
-	if !ok {
-		identifier = false
-	}
-
-	if posString, ok := jsonAST["pos"]; ok {
-		pos, _ = strconv.Atoi(fmt.Sprint(posString))
-	} else {
-		pos = 0
-	}
-
-	if lineString, ok := jsonAST["line"]; ok {
-		line, _ = strconv.Atoi(fmt.Sprint(lineString))
-	} else {
-		line = 0
-	}
-
-	if lineposString, ok := jsonAST["linepos"]; ok {
-		linepos, _ = strconv.Atoi(fmt.Sprint(lineposString))
-	} else {
-		linepos = 0
-	}
-
-	// Create meta data
-
-	if meta, ok := jsonAST["meta"]; ok {
-
-		if ic, ok := meta.([]interface{}); ok {
-
-			// Do a list conversion if necessary - this is necessary when we parse
-			// JSON with map[string]interface{}
-
-			metaList := make([]map[string]interface{}, len(ic))
-			for i := range ic {
-				metaList[i] = ic[i].(map[string]interface{})
-			}
-
-			meta = metaList
-		}
-
-		for _, metaChild := range meta.([]map[string]interface{}) {
-			astMeta = append(astMeta, &metaData{
-				fmt.Sprint(metaChild["type"]), fmt.Sprint(metaChild["value"])})
-		}
-	}
-
-	// Create children
-
-	if children, ok := jsonAST["children"]; ok {
-
-		if ic, ok := children.([]interface{}); ok {
-
-			// Do a list conversion if necessary - this is necessary when we parse
-			// JSON with map[string]interface{}
-
-			childrenList := make([]map[string]interface{}, len(ic))
-			for i := range ic {
-				childrenList[i] = ic[i].(map[string]interface{})
-			}
-
-			children = childrenList
-		}
-
-		for _, child := range children.([]map[string]interface{}) {
-
-			astChild, err := ASTFromJSONObject(child)
-			if err != nil {
-				return nil, err
-			}
-
-			astChildren = append(astChildren, astChild)
-		}
-	}
-
-	token := &LexToken{
-		nodeID,             // ID
-		pos,                // Pos
-		fmt.Sprint(value),  // Val
-		identifier == true, // Identifier
-		line,               // Lline
-		linepos,            // Lpos
-	}
-
-	return &ASTNode{fmt.Sprint(name), token, astMeta, astChildren, nil, 0, nil, nil}, nil
-}
-
-// Look ahead buffer
-// =================
-
-/*
-LABuffer models a look-ahead buffer.
-*/
-type LABuffer struct {
-	tokens chan LexToken
-	buffer *datautil.RingBuffer
-}
-
-/*
-NewLABuffer creates a new NewLABuffer instance.
-*/
-func NewLABuffer(c chan LexToken, size int) *LABuffer {
-
-	if size < 1 {
-		size = 1
-	}
-
-	ret := &LABuffer{c, datautil.NewRingBuffer(size)}
-
-	v, more := <-ret.tokens
-	ret.buffer.Add(v)
-
-	for ret.buffer.Size() < size && more && v.ID != TokenEOF {
-		v, more = <-ret.tokens
-		ret.buffer.Add(v)
-	}
-
-	return ret
-}
-
-/*
-Next returns the next item.
-*/
-func (b *LABuffer) Next() (LexToken, bool) {
-
-	ret := b.buffer.Poll()
-
-	if v, more := <-b.tokens; more {
-		b.buffer.Add(v)
-	}
-
-	if ret == nil {
-		return LexToken{ID: TokenEOF}, false
-	}
-
-	return ret.(LexToken), true
-}
-
-/*
-Peek looks inside the buffer starting with 0 as the next item.
-*/
-func (b *LABuffer) Peek(pos int) (LexToken, bool) {
-
-	if pos >= b.buffer.Size() {
-		return LexToken{ID: TokenEOF}, false
-	}
-
-	return b.buffer.Get(pos).(LexToken), true
-}

+ 0 - 450
lang/ecal/parser/helper_test.go

@@ -1,450 +0,0 @@
-/*
- * Public Domain Software
- *
- * I (Matthias Ladkau) am the author of the source code in this file.
- * I have placed the source code in this file in the public domain.
- *
- * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package parser
-
-import (
-	"testing"
-)
-
-func TestASTNode(t *testing.T) {
-
-	n, err := ParseWithRuntime("", "- 1", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	n2, err := ParseWithRuntime("", "-2", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	if ok, msg := n.Equals(n2, false); ok || msg != `Path to difference: minus > number
-
-Token is different:
-Pos is different 2 vs 1
-Val is different 1 vs 2
-Lpos is different 3 vs 2
-{
-  "ID": 6,
-  "Pos": 2,
-  "Val": "1",
-  "Identifier": false,
-  "Lline": 1,
-  "Lpos": 3
-}
-vs
-{
-  "ID": 6,
-  "Pos": 1,
-  "Val": "2",
-  "Identifier": false,
-  "Lline": 1,
-  "Lpos": 2
-}
-
-AST Nodes:
-number: 1
-vs
-number: 2
-` {
-		t.Error("Unexpected result: ", msg)
-		return
-	}
-
-	n, err = ParseWithRuntime("", "-1", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	n2, err = ParseWithRuntime("", "-a", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	if ok, msg := n.Equals(n2, true); ok || msg != `Path to difference: minus > number
-
-Name is different number vs identifier
-Token is different:
-ID is different 6 vs 7
-Val is different 1 vs a
-Identifier is different false vs true
-{
-  "ID": 6,
-  "Pos": 1,
-  "Val": "1",
-  "Identifier": false,
-  "Lline": 1,
-  "Lpos": 2
-}
-vs
-{
-  "ID": 7,
-  "Pos": 1,
-  "Val": "a",
-  "Identifier": true,
-  "Lline": 1,
-  "Lpos": 2
-}
-
-AST Nodes:
-number: 1
-vs
-identifier: a
-` {
-		t.Error("Unexpected result: ", msg)
-		return
-	}
-
-	n, err = ParseWithRuntime("", "- 1", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	n2, err = ParseWithRuntime("", "a - b", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	if ok, msg := n.Equals(n2, false); ok || msg != `Path to difference: minus
-
-Number of children is different 1 vs 2
-
-AST Nodes:
-minus
-  number: 1
-vs
-minus
-  identifier: a
-  identifier: b
-` {
-		t.Error("Unexpected result: ", msg)
-		return
-	}
-
-	n, err = ParseWithRuntime("", "-1 #test", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	n2, err = ParseWithRuntime("", "-1", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	if ok, msg := n.Equals(n2, false); ok || msg != `Path to difference: minus > number
-
-Number of meta data entries is different 1 vs 0
-
-AST Nodes:
-number: 1 # test
-vs
-number: 1
-` {
-		t.Error("Unexpected result: ", msg)
-		return
-	}
-
-	n, err = ParseWithRuntime("", "-1 #test", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	n2, err = ParseWithRuntime("", "-1 #wurst", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	if ok, msg := n.Equals(n2, false); ok || msg != `Path to difference: minus > number
-
-Meta data value is different test vs wurst
-
-AST Nodes:
-number: 1 # test
-vs
-number: 1 # wurst
-` {
-		t.Error("Unexpected result: ", msg)
-		return
-	}
-
-	n, err = ParseWithRuntime("", "1 #test", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	n2, err = ParseWithRuntime("", "/*test*/ 1", &DummyRuntimeProvider{})
-	if err != nil {
-		t.Error("Cannot parse test AST:", err)
-		return
-	}
-
-	if ok, msg := n.Equals(n2, false); ok || msg != `Path to difference: number
-
-Token is different:
-Pos is different 0 vs 9
-Lpos is different 1 vs 10
-{
-  "ID": 6,
-  "Pos": 0,
-  "Val": "1",
-  "Identifier": false,
-  "Lline": 1,
-  "Lpos": 1
-}
-vs
-{
-  "ID": 6,
-  "Pos": 9,
-  "Val": "1",
-  "Identifier": false,
-  "Lline": 1,
-  "Lpos": 10
-}
-Meta data type is different MetaDataPostComment vs MetaDataPreComment
-
-AST Nodes:
-number: 1 # test
-vs
-number: 1 # test
-` {
-		t.Error("Unexpected result: ", msg)
-		return
-	}
-
-	// Test building an AST from an invalid
-
-	if _, err := ASTFromJSONObject(map[string]interface{}{
-		"value": "foo",
-	}); err == nil || err.Error() != "Found json ast node without a name: map[value:foo]" {
-		t.Error("Unexpected result: ", err)
-		return
-	}
-
-	if _, err := ASTFromJSONObject(map[string]interface{}{
-		"name": "foo",
-		"children": []map[string]interface{}{
-			{
-				"value": "bar",
-			},
-		},
-	}); err == nil || err.Error() != "Found json ast node without a name: map[value:bar]" {
-		t.Error("Unexpected result: ", err)
-		return
-	}
-
-	// Test population of missing information
-
-	if ast, err := ASTFromJSONObject(map[string]interface{}{
-		"name": "foo",
-	}); err != nil || ast.String() != "foo\n" || ast.Token.String() != `v:""` {
-		t.Error("Unexpected result: ", ast.Token.String(), ast.String(), err)
-		return
-	}
-
-	if ast, err := ASTFromJSONObject(map[string]interface{}{
-		"name": "foo",
-		"children": []map[string]interface{}{
-			{
-				"name": "bar",
-			},
-		},
-	}); err != nil || ast.String() != "foo\n  bar\n" || ast.Token.String() != `v:""` {
-		t.Error("Unexpected result: ", ast.Token.String(), ast.String(), err)
-		return
-	}
-}
-
-func TestLABuffer(t *testing.T) {
-
-	buf := NewLABuffer(Lex("test", "1 2 3 4 5 6 7 8 9"), 3)
-
-	if token, ok := buf.Next(); token.Val != "1" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.Val != "2" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	// Check Peek
-
-	if token, ok := buf.Peek(0); token.Val != "3" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Peek(1); token.Val != "4" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Peek(2); token.Val != "5" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Peek(3); token.ID != TokenEOF || ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	// Continue
-
-	if token, ok := buf.Next(); token.Val != "3" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.Val != "4" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.Val != "5" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.Val != "6" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.Val != "7" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.Val != "8" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	// Check Peek
-
-	if token, ok := buf.Peek(0); token.Val != "9" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Peek(1); token.ID != TokenEOF || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Peek(2); token.ID != TokenEOF || ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	// Continue
-
-	if token, ok := buf.Next(); token.Val != "9" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	// Check Peek
-
-	if token, ok := buf.Peek(0); token.ID != TokenEOF || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Peek(1); token.ID != TokenEOF || ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	// Continue
-
-	if token, ok := buf.Next(); token.ID != TokenEOF || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	// New Buffer
-
-	buf = NewLABuffer(Lex("test", "1 2 3"), 3)
-
-	if token, ok := buf.Next(); token.Val != "1" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.Val != "2" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	// Check Peek
-
-	if token, ok := buf.Peek(0); token.Val != "3" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Peek(1); token.ID != TokenEOF || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Peek(2); token.ID != TokenEOF || ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.Val != "3" || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.ID != TokenEOF || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	// New Buffer - test edge case
-
-	buf = NewLABuffer(Lex("test", ""), 0)
-
-	if token, ok := buf.Peek(0); token.ID != TokenEOF || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.ID != TokenEOF || !ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Peek(0); token.ID != TokenEOF || ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-
-	if token, ok := buf.Next(); token.ID != TokenEOF || ok {
-		t.Error("Unexpected result: ", token, ok)
-		return
-	}
-}

+ 0 - 767
lang/ecal/parser/lexer.go

@@ -1,767 +0,0 @@
-/*
- * Public Domain Software
- *
- * I (Matthias Ladkau) am the author of the source code in this file.
- * I have placed the source code in this file in the public domain.
- *
- * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package parser
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"regexp"
-	"strconv"
-	"strings"
-	"unicode"
-	"unicode/utf8"
-)
-
-var namePattern = regexp.MustCompile("^[A-Za-z][A-Za-z0-9]*$")
-var numberPattern = regexp.MustCompile("^[0-9].*$")
-
-/*
-LexToken represents a token which is returned by the lexer.
-*/
-type LexToken struct {
-	ID         LexTokenID // Token kind
-	Pos        int        // Starting position (in bytes)
-	Val        string     // Token value
-	Identifier bool       // Flag if the value is an identifier (not quoted and not a number)
-	Lline      int        // Line in the input this token appears
-	Lpos       int        // Position in the input line this token appears
-}
-
-/*
-NewLexTokenInstance creates a new LexToken object instance from given LexToken values.
-*/
-func NewLexTokenInstance(t LexToken) *LexToken {
-	return &LexToken{
-		t.ID,
-		t.Pos,
-		t.Val,
-		t.Identifier,
-		t.Lline,
-		t.Lpos,
-	}
-}
-
-/*
-Equals checks if this LexToken equals another LexToken. Returns also a message describing
-what is the found difference.
-*/
-func (t LexToken) Equals(other LexToken, ignorePosition bool) (bool, string) {
-	var res = true
-	var msg = ""
-
-	if t.ID != other.ID {
-		res = false
-		msg += fmt.Sprintf("ID is different %v vs %v\n", t.ID, other.ID)
-	}
-
-	if !ignorePosition && t.Pos != other.Pos {
-		res = false
-		msg += fmt.Sprintf("Pos is different %v vs %v\n", t.Pos, other.Pos)
-	}
-
-	if t.Val != other.Val {
-		res = false
-		msg += fmt.Sprintf("Val is different %v vs %v\n", t.Val, other.Val)
-	}
-
-	if t.Identifier != other.Identifier {
-		res = false
-		msg += fmt.Sprintf("Identifier is different %v vs %v\n", t.Identifier, other.Identifier)
-	}
-
-	if !ignorePosition && t.Lline != other.Lline {
-		res = false
-		msg += fmt.Sprintf("Lline is different %v vs %v\n", t.Lline, other.Lline)
-	}
-
-	if !ignorePosition && t.Lpos != other.Lpos {
-		res = false
-		msg += fmt.Sprintf("Lpos is different %v vs %v\n", t.Lpos, other.Lpos)
-	}
-
-	if msg != "" {
-		var buf bytes.Buffer
-		out, _ := json.MarshalIndent(t, "", "  ")
-		buf.WriteString(string(out))
-		buf.WriteString("\nvs\n")
-		out, _ = json.MarshalIndent(other, "", "  ")
-		buf.WriteString(string(out))
-		msg = fmt.Sprintf("%v%v", msg, buf.String())
-	}
-
-	return res, msg
-}
-
-/*
-PosString returns the position of this token in the origianl input as a string.
-*/
-func (t LexToken) PosString() string {
-	return fmt.Sprintf("Line %v, Pos %v", t.Lline, t.Lpos)
-}
-
-/*
-String returns a string representation of a token.
-*/
-func (t LexToken) String() string {
-
-	prefix := ""
-
-	if !t.Identifier {
-		prefix = "v:" // Value is not an identifier
-	}
-
-	switch {
-
-	case t.ID == TokenEOF:
-		return "EOF"
-
-	case t.ID == TokenError:
-		return fmt.Sprintf("Error: %s (%s)", t.Val, t.PosString())
-
-	case t.ID == TokenPRECOMMENT:
-		return fmt.Sprintf("/* %s */", t.Val)
-
-	case t.ID == TokenPOSTCOMMENT:
-		return fmt.Sprintf("# %s", t.Val)
-
-	case t.ID > TOKENodeSYMBOLS && t.ID < TOKENodeKEYWORDS:
-		return fmt.Sprintf("%s", strings.ToUpper(t.Val))
-
-	case t.ID > TOKENodeKEYWORDS:
-		return fmt.Sprintf("<%s>", strings.ToUpper(t.Val))
-
-	case len(t.Val) > 20:
-
-		// Special case for very long values
-
-		return fmt.Sprintf("%s%.10q...", prefix, t.Val)
-	}
-
-	return fmt.Sprintf("%s%q", prefix, t.Val)
-}
-
-// Meta data interface
-
-/*
-Type returns the meta data type.
-*/
-func (t LexToken) Type() string {
-	if t.ID == TokenPRECOMMENT {
-		return MetaDataPreComment
-	} else if t.ID == TokenPOSTCOMMENT {
-		return MetaDataPostComment
-	}
-	return MetaDataGeneral
-}
-
-/*
-Value returns the meta data value.
-*/
-func (t LexToken) Value() string {
-	return t.Val
-}
-
-/*
-KeywordMap is a map of keywords - these require spaces between them
-*/
-var KeywordMap = map[string]LexTokenID{
-
-	// Import statement
-
-	"import": TokenIMPORT,
-	"as":     TokenAS,
-
-	// Sink definition
-
-	"sink":       TokenSINK,
-	"kindmatch":  TokenKINDMATCH,
-	"scopematch": TokenSCOPEMATCH,
-	"statematch": TokenSTATEMATCH,
-	"priority":   TokenPRIORITY,
-	"suppresses": TokenSUPPRESSES,
-
-	// Function definition
-
-	"func":   TokenFUNC,
-	"return": TokenRETURN,
-
-	// Boolean operators
-
-	"and": TokenAND,
-	"or":  TokenOR,
-	"not": TokenNOT,
-
-	// String operators
-
-	"like":      TokenLIKE,
-	"hasprefix": TokenHASPREFIX,
-	"hassuffix": TokenHASSUFFIX,
-
-	// List operators
-
-	"in":    TokenIN,
-	"notin": TokenNOTIN,
-
-	// Constant terminals
-
-	"false": TokenFALSE,
-	"true":  TokenTRUE,
-	"null":  TokenNULL,
-
-	// Conditional statements
-
-	"if":   TokenIF,
-	"elif": TokenELIF,
-	"else": TokenELSE,
-
-	// Loop statements
-
-	"for":      TokenFOR,
-	"break":    TokenBREAK,
-	"continue": TokenCONTINUE,
-}
-
-/*
-SymbolMap is a map of special symbols which will always be unique - these will separate unquoted strings
-Symbols can be maximal 2 characters long.
-*/
-var SymbolMap = map[string]LexTokenID{
-
-	// Condition operators
-
-	">=": TokenGEQ,
-	"<=": TokenLEQ,
-	"!=": TokenNEQ,
-	"==": TokenEQ,
-	">":  TokenGT,
-	"<":  TokenLT,
-
-	// Grouping symbols
-
-	"(": TokenLPAREN,
-	")": TokenRPAREN,
-	"[": TokenLBRACK,
-	"]": TokenRBRACK,
-	"{": TokenLBRACE,
-	"}": TokenRBRACE,
-
-	// Separators
-
-	".": TokenDOT,
-	",": TokenCOMMA,
-	";": TokenSEMICOLON,
-
-	// Grouping
-
-	":": TokenCOLON,
-	"=": TokenEQUAL,
-
-	// Arithmetic operators
-
-	"+":  TokenPLUS,
-	"-":  TokenMINUS,
-	"*":  TokenTIMES,
-	"/":  TokenDIV,
-	"//": TokenDIVINT,
-	"%":  TokenMODINT,
-
-	// Assignment statement
-
-	":=": TokenASSIGN,
-}
-
-// Lexer
-// =====
-
-/*
-RuneEOF is a special rune which represents the end of the input
-*/
-const RuneEOF = -1
-
-/*
-Function which represents the current state of the lexer and returns the next state
-*/
-type lexFunc func(*lexer) lexFunc
-
-/*
-Lexer data structure
-*/
-type lexer struct {
-	name   string        // Name to identify the input
-	input  string        // Input string of the lexer
-	pos    int           // Current rune pointer
-	line   int           // Current line pointer
-	lastnl int           // Last newline position
-	width  int           // Width of last rune
-	start  int           // Start position of the current red token
-	tokens chan LexToken // Channel for lexer output
-}
-
-/*
-Lex lexes a given input. Returns a channel which contains tokens.
-*/
-func Lex(name string, input string) chan LexToken {
-	l := &lexer{name, input, 0, 0, 0, 0, 0, make(chan LexToken)}
-	go l.run()
-	return l.tokens
-}
-
-/*
-LexToList lexes a given input. Returns a list of tokens.
-*/
-func LexToList(name string, input string) []LexToken {
-	var tokens []LexToken
-
-	for t := range Lex(name, input) {
-		tokens = append(tokens, t)
-	}
-
-	return tokens
-}
-
-/*
-Main loop of the lexer.
-*/
-func (l *lexer) run() {
-
-	if skipWhiteSpace(l) {
-		for state := lexToken; state != nil; {
-			state = state(l)
-
-			if !skipWhiteSpace(l) {
-				break
-			}
-		}
-	}
-
-	close(l.tokens)
-}
-
-/*
-next returns the next rune in the input and advances the current rune pointer
-if peek is 0. If peek is >0 then the nth character is returned without advancing
-the rune pointer.
-*/
-func (l *lexer) next(peek int) rune {
-
-	// Check if we reached the end
-
-	if int(l.pos) >= len(l.input) {
-		return RuneEOF
-	}
-
-	// Decode the next rune
-
-	pos := l.pos
-	if peek > 0 {
-		pos += peek - 1
-	}
-
-	r, w := utf8.DecodeRuneInString(l.input[pos:])
-
-	if peek == 0 {
-		l.width = w
-		l.pos += l.width
-	}
-
-	return r
-}
-
-/*
-backup sets the pointer one rune back. Can only be called once per next call.
-*/
-func (l *lexer) backup(width int) {
-	if width == 0 {
-		width = l.width
-	}
-	l.pos -= width
-}
-
-/*
-startNew starts a new token.
-*/
-func (l *lexer) startNew() {
-	l.start = l.pos
-}
-
-/*
-emitToken passes a token back to the client.
-*/
-func (l *lexer) emitToken(t LexTokenID) {
-	if t == TokenEOF {
-		l.emitTokenAndValue(t, "", false)
-		return
-	}
-
-	if l.tokens != nil {
-		l.tokens <- LexToken{t, l.start, l.input[l.start:l.pos], false,
-			l.line + 1, l.start - l.lastnl + 1}
-	}
-}
-
-/*
-emitTokenAndValue passes a token with a given value back to the client.
-*/
-func (l *lexer) emitTokenAndValue(t LexTokenID, val string, identifier bool) {
-	if l.tokens != nil {
-		l.tokens <- LexToken{t, l.start, val, identifier, l.line + 1, l.start - l.lastnl + 1}
-	}
-}
-
-/*
-emitError passes an error token back to the client.
-*/
-func (l *lexer) emitError(msg string) {
-	if l.tokens != nil {
-		l.tokens <- LexToken{TokenError, l.start, msg, false, l.line + 1, l.start - l.lastnl + 1}
-	}
-}
-
-// Helper functions
-// ================
-
-/*
-skipWhiteSpace skips any number of whitespace characters. Returns false if the parser
-reaches EOF while skipping whitespaces.
-*/
-func skipWhiteSpace(l *lexer) bool {
-	r := l.next(0)
-
-	for unicode.IsSpace(r) || unicode.IsControl(r) || r == RuneEOF {
-		if r == '\n' {
-			l.line++
-			l.lastnl = l.pos
-		}
-		r = l.next(0)
-
-		if r == RuneEOF {
-			l.emitToken(TokenEOF)
-			return false
-		}
-	}
-
-	l.backup(0)
-	return true
-}
-
-/*
-lexTextBlock lexes a block of text without whitespaces. Interprets
-optionally all one or two letter tokens.
-*/
-func lexTextBlock(l *lexer, interpretToken bool) {
-
-	r := l.next(0)
-
-	if interpretToken {
-
-		// Check if we start with a known symbol
-
-		nr := l.next(1)
-		if _, ok := SymbolMap[strings.ToLower(string(r)+string(nr))]; ok {
-			l.next(0)
-			return
-		}
-
-		if _, ok := SymbolMap[strings.ToLower(string(r))]; ok {
-			return
-		}
-	}
-
-	for !unicode.IsSpace(r) && !unicode.IsControl(r) && r != RuneEOF {
-
-		if interpretToken {
-
-			// Check if we find a token in the block
-
-			if _, ok := SymbolMap[strings.ToLower(string(r))]; ok {
-				l.backup(0)
-				return
-			}
-
-			nr := l.next(1)
-			if _, ok := SymbolMap[strings.ToLower(string(r)+string(nr))]; ok {
-				l.backup(0)
-				return
-			}
-		}
-
-		r = l.next(0)
-	}
-
-	if r != RuneEOF {
-		l.backup(0)
-	}
-}
-
-/*
-lexNumberBlock lexes a block potentially containing a number.
-*/
-func lexNumberBlock(l *lexer) {
-
-	r := l.next(0)
-
-	for !unicode.IsSpace(r) && !unicode.IsControl(r) && r != RuneEOF {
-
-		if !unicode.IsNumber(r) && r != '.' {
-			if r == 'e' {
-
-				l1 := l.next(1)
-				l2 := l.next(2)
-				if l1 != '+' || !unicode.IsNumber(l2) {
-					break
-				}
-				l.next(0)
-				l.next(0)
-			} else {
-				break
-			}
-		}
-		r = l.next(0)
-	}
-
-	if r != RuneEOF {
-		l.backup(0)
-	}
-}
-
-// State functions
-// ===============
-
-/*
-lexToken is the main entry function for the lexer.
-*/
-func lexToken(l *lexer) lexFunc {
-
-	// Check if we got a quoted value or a comment
-
-	n1 := l.next(1)
-	n2 := l.next(2)
-
-	// Parse comments
-
-	if (n1 == '/' && n2 == '*') || n1 == '#' {
-		return lexComment
-	}
-
-	// Parse strings
-
-	if (n1 == '"' || n1 == '\'') || (n1 == 'r' && (n2 == '"' || n2 == '\'')) {
-		return lexValue
-	}
-
-	// Lex a block of text and emit any found tokens
-
-	l.startNew()
-
-	// First try to parse a number
-
-	lexNumberBlock(l)
-	identifierCandidate := l.input[l.start:l.pos]
-	keywordCandidate := strings.ToLower(identifierCandidate)
-
-	// Check for number
-
-	if numberPattern.MatchString(keywordCandidate) {
-		_, err := strconv.ParseFloat(keywordCandidate, 64)
-
-		if err == nil {
-			l.emitTokenAndValue(TokenNUMBER, keywordCandidate, false)
-			return lexToken
-		}
-	}
-
-	if len(keywordCandidate) > 0 {
-		l.backup(l.pos - l.start)
-	}
-	lexTextBlock(l, true)
-	identifierCandidate = l.input[l.start:l.pos]
-	keywordCandidate = strings.ToLower(identifierCandidate)
-
-	// Check for keyword
-
-	token, ok := KeywordMap[keywordCandidate]
-
-	if !ok {
-
-		// Check for symbol
-
-		token, ok = SymbolMap[keywordCandidate]
-	}
-
-	if ok {
-
-		// A known token was found
-
-		l.emitToken(token)
-
-	} else {
-
-		if !namePattern.MatchString(keywordCandidate) {
-			l.emitError(fmt.Sprintf("Cannot parse identifier '%v'. Identifies may only contain [a-zA-Z] and [a-zA-Z0-9] from the second character", keywordCandidate))
-			return nil
-		}
-
-		// An identifier was found
-
-		l.emitTokenAndValue(TokenIDENTIFIER, identifierCandidate, true)
-	}
-
-	return lexToken
-}
-
-/*
-lexValue lexes a string value.
-
-Values can be declared in different ways:
-
-' ... ' or " ... "
-Characters are parsed between quotes (escape sequences are interpreted)
-
-r' ... ' or r" ... "
-Characters are parsed plain between quote
-*/
-func lexValue(l *lexer) lexFunc {
-	var endToken rune
-
-	l.startNew()
-
-	allowEscapes := false
-
-	r := l.next(0)
-
-	// Check if we have a raw quoted string
-
-	if q := l.next(1); r == 'r' && (q == '"' || q == '\'') {
-		endToken = q
-		l.next(0)
-	} else {
-		allowEscapes = true
-		endToken = r
-	}
-
-	r = l.next(0)
-	rprev := ' '
-	lLine := l.line
-	lLastnl := l.lastnl
-
-	for (!allowEscapes && r != endToken) ||
-		(allowEscapes && (r != endToken || rprev == '\\')) {
-
-		if r == '\n' {
-			lLine++
-			lLastnl = l.pos
-		}
-		rprev = r
-		r = l.next(0)
-
-		if r == RuneEOF {
-			l.emitError("Unexpected end while reading string value (unclosed quotes)")
-			return nil
-		}
-	}
-
-	if allowEscapes {
-		val := l.input[l.start+1 : l.pos-1]
-
-		// Interpret escape sequences right away
-
-		if endToken == '\'' {
-
-			// Escape double quotes in a single quoted string
-
-			val = strings.Replace(val, "\"", "\\\"", -1)
-		}
-
-		s, err := strconv.Unquote("\"" + val + "\"")
-		if err != nil {
-			l.emitError(err.Error() + " while parsing string")
-			return nil
-		}
-
-		l.emitTokenAndValue(TokenSTRING, s, true)
-
-	} else {
-		l.emitTokenAndValue(TokenSTRING, l.input[l.start+2:l.pos-1], true)
-	}
-
-	//  Set newline
-
-	l.line = lLine
-	l.lastnl = lLastnl
-
-	return lexToken
-}
-
-/*
-lexComment lexes comments.
-*/
-func lexComment(l *lexer) lexFunc {
-
-	// Consume initial /*
-
-	r := l.next(0)
-
-	if r == '#' {
-
-		l.startNew()
-
-		for r != '\n' && r != RuneEOF {
-			r = l.next(0)
-		}
-
-		l.emitTokenAndValue(TokenPOSTCOMMENT, l.input[l.start:l.pos], false)
-
-		if r == RuneEOF {
-			return nil
-		}
-
-		l.line++
-
-	} else {
-
-		l.next(0)
-
-		lLine := l.line
-		lLastnl := l.lastnl
-
-		l.startNew()
-
-		r = l.next(0)
-
-		for r != '*' && l.next(1) != '/' {
-
-			if r == '\n' {
-				lLine++
-				lLastnl = l.pos
-			}
-			r = l.next(0)
-
-			if r == RuneEOF {
-				l.emitError("Unexpected end while reading comment")
-				return nil
-			}
-		}
-
-		l.emitTokenAndValue(TokenPRECOMMENT, l.input[l.start:l.pos-1], false)
-
-		// Consume final /
-
-		l.next(0)
-
-		//  Set newline
-
-		l.line = lLine
-		l.lastnl = lLastnl
-
-	}
-
-	return lexToken
-}

+ 0 - 309
lang/ecal/parser/lexer_test.go

@@ -1,309 +0,0 @@
-/*
- * Public Domain Software
- *
- * I (Matthias Ladkau) am the author of the source code in this file.
- * I have placed the source code in this file in the public domain.
- *
- * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package parser
-
-import (
-	"fmt"
-	"testing"
-)
-
-func TestNextItem(t *testing.T) {
-
-	l := &lexer{"Test", "1234", 0, 0, 0, 0, 0, make(chan LexToken)}
-
-	r := l.next(1)
-
-	if r != '1' {
-		t.Errorf("Unexpected token: %q", r)
-		return
-	}
-
-	if r := l.next(0); r != '1' {
-		t.Errorf("Unexpected token: %q", r)
-		return
-	}
-
-	if r := l.next(0); r != '2' {
-		t.Errorf("Unexpected token: %q", r)
-		return
-	}
-
-	if r := l.next(1); r != '3' {
-		t.Errorf("Unexpected token: %q", r)
-		return
-	}
-
-	if r := l.next(2); r != '4' {
-		t.Errorf("Unexpected token: %q", r)
-		return
-	}
-
-	if r := l.next(0); r != '3' {
-		t.Errorf("Unexpected token: %q", r)
-		return
-	}
-
-	if r := l.next(0); r != '4' {
-		t.Errorf("Unexpected token: %q", r)
-		return
-	}
-
-	if r := l.next(0); r != RuneEOF {
-		t.Errorf("Unexpected token: %q", r)
-		return
-	}
-}
-
-func TestEquals(t *testing.T) {
-	l := LexToList("mytest", "not\n test")
-
-	if mt := l[0].Type(); mt != "MetaDataGeneral" {
-		t.Error("Unexpected meta type:", mt)
-		return
-	}
-
-	if ok, msg := l[0].Equals(l[1], false); ok || msg != `ID is different 53 vs 7
-Pos is different 0 vs 5
-Val is different not vs test
-Identifier is different false vs true
-Lline is different 1 vs 2
-Lpos is different 1 vs 2
-{
-  "ID": 53,
-  "Pos": 0,
-  "Val": "not",
-  "Identifier": false,
-  "Lline": 1,
-  "Lpos": 1
-}
-vs
-{
-  "ID": 7,
-  "Pos": 5,
-  "Val": "test",
-  "Identifier": true,
-  "Lline": 2,
-  "Lpos": 2
-}` {
-		t.Error("Unexpected result:", msg)
-		return
-	}
-}
-
-func TestBasicTokenLexing(t *testing.T) {
-
-	// Test empty string parsing
-
-	if res := fmt.Sprint(LexToList("mytest", "    \t   ")); res != "[EOF]" {
-		t.Error("Unexpected lexer result:\n  ", res)
-		return
-	}
-
-	// Test arithmetics
-
-	input := `name := a + 1 and (ver+x!=1) * 5 > name2`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`["name" := "a" + v:"1" <AND> ( "ver" + "x" != v:"1" ) * v:"5" > "name2" EOF]` {
-		t.Error("Unexpected lexer result:\n  ", res)
-		return
-	}
-
-	input = `test := not a * 1.3 or (12 / aa) * 5 DiV 3 % 1 > trUe`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`["test" := <NOT> "a" * v:"1.3" <OR> ( v:"12" / "aa" ) * v:"5" "DiV" v:"3" % v:"1" > <TRUE> EOF]` {
-		t.Error("Unexpected lexer result:\n  ", res)
-		return
-	}
-
-	input = `-1.234560e+02+5+2.123 // 1`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`[- v:"1.234560e+02" + v:"5" + v:"2.123" // v:"1" EOF]` {
-		t.Error("Unexpected lexer result:\n  ", res)
-		return
-	}
-
-	// Test invalid identifier
-
-	input = `5test`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`[v:"5" "test" EOF]` {
-		t.Error("Unexpected lexer result:\n  ", res)
-		return
-	}
-
-	input = `@test`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`[Error: Cannot parse identifier '@test'. Identifies may only contain [a-zA-Z] and [a-zA-Z0-9] from the second character (Line 1, Pos 1) EOF]` {
-		t.Error("Unexpected lexer result:\n  ", res)
-		return
-	}
-}
-
-func TestAssignmentLexing(t *testing.T) {
-
-	input := `name := a + 1`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`["name" := "a" + v:"1" EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `name := a.a + a.b`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`["name" := "a" . "a" + "a" . "b" EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `name:=a[1] + b["d"] + c[a]`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`["name" := "a" [ v:"1" ] + "b" [ "d" ] + "c" [ "a" ] EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-}
-
-func TestBlockLexing(t *testing.T) {
-
-	input := `
-if a == 1 {
-    print("xxx")
-} elif b > 2 {
-    print("yyy")
-} else {
-    print("zzz")
-}
-`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`[<IF> "a" == v:"1" { "print" ( "xxx" ) } <ELIF> "b" > v:"2" { "print" ( "yyy" ) } <ELSE> { "print" ( "zzz" ) } EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `
-for a, b in enum(blist) {
-    do(a)
-}
-`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`[<FOR> "a" , "b" <IN> "enum" ( "blist" ) { "do" ( "a" ) } EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `
-for true {
-	x := "1"
-	break; continue
-}
-`
-	if res := LexToList("mytest", input); fmt.Sprint(res) !=
-		`[<FOR> <TRUE> { "x" := "1" <BREAK> ; <CONTINUE> } EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-}
-
-func TestStringLexing(t *testing.T) {
-
-	// Test unclosed quotes
-
-	input := `name "test  bla`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `["name" Error: Unexpected end while reading string value (unclosed quotes) (Line 1, Pos 6) EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `name "test"  'bla'`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `["name" "test" "bla" EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `name "te
-	st"  'bla'`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `["name" Error: invalid syntax while parsing string (Line 1, Pos 6)]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `name r"te
-	st"  'bla'`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `["name" "te\n\tst" "bla" EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	// Parsing with escape sequences
-
-	input = `"test\n\ttest"  '\nfoo\u0028bar' "test{foo}.5w3f"`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `["test\n\ttest" "\nfoo(bar" "test{foo}.5w3f" EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-}
-
-func TestCommentLexing(t *testing.T) {
-
-	input := `name /* foo
-		bar
-	x*/ 'b/* - */la' /*test*/`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `["name" /*  foo
-		bar
-	x */ "b/* - */la" /* test */ EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `name /* foo
-		bar`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `["name" Error: Unexpected end while reading comment (Line 1, Pos 8) EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `foo
-   1+ 2 # Some comment
-bar`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `["foo" v:"1" + v:"2" #  Some comment
- "bar" EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-
-	input = `1+ 2 # Some comment`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `[v:"1" + v:"2" #  Some comment EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-}
-
-func TestSinkLexing(t *testing.T) {
-
-	input := `sink "mysink"
-r"
-A comment describing the sink.
-"
-kindmatch [ foo.bar.* ],
-scopematch [ "data.read", "data.write" ],
-statematch { a : 1, b : NULL },
-priority 0,
-suppresses [ "myothersink" ]
-{
-  a := 1
-}`
-	if res := LexToList("mytest", input); fmt.Sprint(res) != `[<SINK> "mysink" "\nA comment"... <KINDMATCH> `+
-		`[ "foo" . "bar" . * ] , <SCOPEMATCH> [ "data.read" , "data.write" ] , <STATEMATCH> `+
-		`{ "a" : v:"1" , "b" : <NULL> } , <PRIORITY> v:"0" , <SUPPRESSES> [ "myothersink" ] `+
-		`{ "a" := v:"1" } EOF]` {
-		t.Error("Unexpected lexer result:", res)
-		return
-	}
-}

+ 0 - 182
lang/ecal/parser/main_test.go

@@ -1,182 +0,0 @@
-/*
- * Public Domain Software
- *
- * I (Matthias Ladkau) am the author of the source code in this file.
- * I have placed the source code in this file in the public domain.
- *
- * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package parser
-
-import (
-	"encoding/json"
-	"flag"
-	"fmt"
-	"os"
-	"testing"
-)
-
-// Main function for all tests in this package
-
-func TestMain(m *testing.M) {
-	flag.Parse()
-
-	res := m.Run()
-
-	// Check if all nodes have been tested
-
-	for _, n := range astNodeMap {
-		if _, ok := usedNodes[n.Name]; !ok {
-			fmt.Println("Not tested node: ", n.Name)
-		}
-	}
-
-	// Check if all nodes have been pretty printed
-
-	for k := range prettyPrinterMap {
-		if _, ok := usedPrettyPrinterNodes[k]; !ok {
-			fmt.Println("Not tested pretty printer: ", k)
-		}
-	}
-
-	os.Exit(res)
-}
-
-// Used nodes map which is filled during unit testing. Prefilled with tokens which
-// will not be generated by the parser
-//
-var usedNodes = map[string]bool{
-	NodeEOF: true, // Only used as end term
-	"":      true, // No node e.g. semicolon - These nodes should never be part of an AST
-}
-
-func UnitTestParse(name string, input string) (*ASTNode, error) {
-	return UnitTestParseWithPPResult(name, input, "")
-}
-
-func UnitTestParseWithPPResult(name string, input string, expectedPPRes string) (*ASTNode, error) {
-	n, err := ParseWithRuntime(name, input, &DummyRuntimeProvider{})
-
-	// Test AST serialization
-
-	if err == nil {
-		var unmarshaledJSONObject map[string]interface{}
-
-		astString, err := json.Marshal(n.ToJSONObject())
-		if err != nil {
-			return nil, fmt.Errorf("Could not marshal AST: %v", err)
-		}
-
-		if err := json.Unmarshal(astString, &unmarshaledJSONObject); err != nil {
-			return nil, fmt.Errorf("Could not unmarshal JSON object: %v", err)
-		}
-
-		unmarshaledAST, err := ASTFromJSONObject(unmarshaledJSONObject)
-		if err != nil {
-			return nil, fmt.Errorf("Could not create AST from unmarshaled JSON object: %v", err)
-		}
-
-		// Compare the ASTs
-
-		if ok, msg := n.Equals(unmarshaledAST, false); !ok {
-			return nil, fmt.Errorf(
-				"Parsed AST is different from the unmarshaled AST."+
-					"\n%v\n", msg)
-		}
-	}
-
-	// Test Pretty printing
-
-	if err == nil {
-
-		ppres, err := PrettyPrint(n)
-		if err != nil {
-			return nil, fmt.Errorf("Error while pretty printing: %v (input: %v)", err, input)
-		}
-
-		if expectedPPRes == "" {
-
-			n2, err := ParseWithRuntime(name, ppres, &DummyRuntimeProvider{})
-			if err != nil {
-				return nil, fmt.Errorf("Error while parsing pretty print result: %v (result: %v)", err, ppres)
-			}
-
-			// Compare the ASTs
-
-			if ok, msg := n.Equals(n2, true); !ok {
-				return nil, fmt.Errorf(
-					"Parsed AST from pretty printer is different from the originally parsed AST."+
-						"\nOriginal input: %v\nPretty printed: %v\nPretty AST: %v\n%v\n",
-					input, ppres, n2, msg)
-			}
-
-		} else if ppres != expectedPPRes {
-
-			return nil, fmt.Errorf("Expected pretty printer result is different:\nExpected "+
-				"result: %v\nActual result: %v\n", expectedPPRes, ppres)
-		}
-
-		markASTNodesAsPrettyPrinted(n)
-	}
-
-	return n, err
-}
-
-// Used nodes map which is filled during unit testing. Prefilled with tokens which
-// will not be generated by the parser
-//
-var usedPrettyPrinterNodes = map[string]bool{}
-
-func markASTNodesAsPrettyPrinted(n *ASTNode) {
-
-	// Make the encountered node as used
-
-	numChildren := len(n.Children)
-	if numChildren > 0 {
-		usedPrettyPrinterNodes[fmt.Sprintf("%v_%v", n.Name, numChildren)] = true
-	} else {
-		usedPrettyPrinterNodes[n.Name] = true
-	}
-
-	for _, c := range n.Children {
-		markASTNodesAsPrettyPrinted(c)
-	}
-}
-
-func UnitTestPrettyPrinting(input, astOutput, ppOutput string) error {
-	astres, err := ParseWithRuntime("mytest", input, &DummyRuntimeProvider{})
-	if err != nil || fmt.Sprint(astres) != astOutput {
-		return fmt.Errorf("Unexpected parser output:\n%v expected was:\n%v Error: %v", astres, astOutput, err)
-	}
-
-	markASTNodesAsPrettyPrinted(astres)
-
-	ppres, err := PrettyPrint(astres)
-	if err != nil || ppres != ppOutput {
-		return fmt.Errorf("Unexpected result: %v (expected: %v) error: %v", ppres, ppOutput, err)
-	}
-
-	// Make sure the pretty printed result is valid and gets the same parse tree
-
-	astres2, err := ParseWithRuntime("mytest", ppres, &DummyRuntimeProvider{})
-	if err != nil || fmt.Sprint(astres2) != astOutput {
-		return fmt.Errorf("Unexpected parser output from pretty print string:\n%v expected was:\n%v Error: %v", astres2, astOutput, err)
-	}
-
-	return nil
-}
-
-// Helper objects
-
-type DummyRuntimeProvider struct {
-}
-
-func (d *DummyRuntimeProvider) Runtime(n *ASTNode) Runtime {
-
-	// Make the encountered node as used
-
-	usedNodes[n.Name] = true
-
-	return nil
-}

+ 0 - 910
lang/ecal/parser/parser.go

@@ -1,910 +0,0 @@
-/*
- * Public Domain Software
- *
- * I (Matthias Ladkau) am the author of the source code in this file.
- * I have placed the source code in this file in the public domain.
- *
- * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
- */
-
-package parser
-
-import (
-	"fmt"
-)
-
-/*
-Map of AST nodes corresponding to lexer tokens. The map determines how a given
-sequence of lexer tokens are organized into an AST.
-*/
-var astNodeMap map[LexTokenID]*ASTNode
-
-func init() {
-	astNodeMap = map[LexTokenID]*ASTNode{
-		TokenEOF: {NodeEOF, nil, nil, nil, nil, 0, ndTerm, nil},
-
-		// Value tokens
-
-		TokenSTRING:     {NodeSTRING, nil, nil, nil, nil, 0, ndTerm, nil},
-		TokenNUMBER:     {NodeNUMBER, nil, nil, nil, nil, 0, ndTerm, nil},
-		TokenIDENTIFIER: {NodeIDENTIFIER, nil, nil, nil, nil, 0, ndIdentifier, nil},
-
-		// Constructed tokens
-
-		TokenSTATEMENTS: {NodeSTATEMENTS, nil, nil, nil, nil, 0, nil, nil},
-		TokenFUNCCALL:   {NodeFUNCCALL, nil, nil, nil, nil, 0, nil, nil},
-		TokenCOMPACCESS: {NodeCOMPACCESS, nil, nil, nil, nil, 0, nil, nil},
-		TokenLIST:       {NodeLIST, nil, nil, nil, nil, 0, nil, nil},
-		TokenMAP:        {NodeMAP, nil, nil, nil, nil, 0, nil, nil},
-		TokenPARAMS:     {NodePARAMS, nil, nil, nil, nil, 0, nil, nil},
-		TokenGUARD:      {NodeGUARD, nil, nil, nil, nil, 0, nil, nil},
-
-		// Condition operators
-
-		TokenGEQ: {NodeGEQ, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenLEQ: {NodeLEQ, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenNEQ: {NodeNEQ, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenEQ:  {NodeEQ, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenGT:  {NodeGT, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenLT:  {NodeLT, nil, nil, nil, nil, 60, nil, ldInfix},
-
-		// Grouping symbols
-
-		TokenLPAREN: {"", nil, nil, nil, nil, 150, ndInner, nil},
-		TokenRPAREN: {"", nil, nil, nil, nil, 0, nil, nil},
-		TokenLBRACK: {"", nil, nil, nil, nil, 150, ndList, nil},
-		TokenRBRACK: {"", nil, nil, nil, nil, 0, nil, nil},
-		TokenLBRACE: {"", nil, nil, nil, nil, 150, ndMap, nil},
-		TokenRBRACE: {"", nil, nil, nil, nil, 0, nil, nil},
-
-		// Separators
-
-		TokenDOT:       {"", nil, nil, nil, nil, 0, nil, nil},
-		TokenCOMMA:     {"", nil, nil, nil, nil, 0, nil, nil},
-		TokenSEMICOLON: {"", nil, nil, nil, nil, 0, nil, nil},
-
-		// Grouping
-
-		TokenCOLON: {NodeKVP, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenEQUAL: {NodePRESET, nil, nil, nil, nil, 60, nil, ldInfix},
-
-		// Arithmetic operators
-
-		TokenPLUS:   {NodePLUS, nil, nil, nil, nil, 110, ndPrefix, ldInfix},
-		TokenMINUS:  {NodeMINUS, nil, nil, nil, nil, 110, ndPrefix, ldInfix},
-		TokenTIMES:  {NodeTIMES, nil, nil, nil, nil, 120, nil, ldInfix},
-		TokenDIV:    {NodeDIV, nil, nil, nil, nil, 120, nil, ldInfix},
-		TokenDIVINT: {NodeDIVINT, nil, nil, nil, nil, 120, nil, ldInfix},
-		TokenMODINT: {NodeMODINT, nil, nil, nil, nil, 120, nil, ldInfix},
-
-		// Assignment statement
-
-		TokenASSIGN: {NodeASSIGN, nil, nil, nil, nil, 10, nil, ldInfix},
-
-		// Import statement
-
-		TokenIMPORT: {NodeIMPORT, nil, nil, nil, nil, 0, ndImport, nil},
-		TokenAS:     {"", nil, nil, nil, nil, 0, ndImport, nil},
-
-		// Sink definition
-
-		TokenSINK:       {NodeSINK, nil, nil, nil, nil, 0, ndSkink, nil},
-		TokenKINDMATCH:  {NodeKINDMATCH, nil, nil, nil, nil, 150, ndPrefix, nil},
-		TokenSCOPEMATCH: {NodeSCOPEMATCH, nil, nil, nil, nil, 150, ndPrefix, nil},
-		TokenSTATEMATCH: {NodeSTATEMATCH, nil, nil, nil, nil, 150, ndPrefix, nil},
-		TokenPRIORITY:   {NodePRIORITY, nil, nil, nil, nil, 150, ndPrefix, nil},
-		TokenSUPPRESSES: {NodeSUPPRESSES, nil, nil, nil, nil, 150, ndPrefix, nil},
-
-		// Function definition
-
-		TokenFUNC:   {NodeFUNC, nil, nil, nil, nil, 0, ndFunc, nil},
-		TokenRETURN: {NodeRETURN, nil, nil, nil, nil, 0, ndReturn, nil},
-
-		// Boolean operators
-
-		TokenAND: {NodeAND, nil, nil, nil, nil, 40, nil, ldInfix},
-		TokenOR:  {NodeOR, nil, nil, nil, nil, 30, nil, ldInfix},
-		TokenNOT: {NodeNOT, nil, nil, nil, nil, 20, ndPrefix, nil},
-
-		// Condition operators
-
-		TokenLIKE:      {NodeLIKE, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenIN:        {NodeIN, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenHASPREFIX: {NodeHASPREFIX, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenHASSUFFIX: {NodeHASSUFFIX, nil, nil, nil, nil, 60, nil, ldInfix},
-		TokenNOTIN:     {NodeNOTIN, nil, nil, nil, nil, 60, nil, ldInfix},
-
-		// Constant terminals
-
-		TokenFALSE: {NodeFALSE, nil, nil, nil, nil, 0, ndTerm, nil},
-		TokenTRUE:  {NodeTRUE, nil, nil, nil, nil, 0, ndTerm, nil},
-		TokenNULL:  {NodeNULL, nil, nil, nil, nil, 0, ndTerm, nil},
-
-		// Conditional statements
-
-		TokenIF:   {NodeIF, nil, nil, nil, nil, 0, ndGuard, nil},
-		TokenELIF: {"", nil, nil, nil, nil, 0, nil, nil},
-		TokenELSE: {"", nil, nil, nil, nil, 0, nil, nil},
-
-		// Loop statements
-
-		TokenFOR:      {NodeLOOP, nil, nil, nil, nil, 0, ndLoop, nil},
-		TokenBREAK:    {NodeBREAK, nil, nil, nil, nil, 0, ndTerm, nil},
-		TokenCONTINUE: {NodeCONTINUE, nil, nil, nil, nil, 0, ndTerm, nil},
-	}
-}
-
-// Parser
-// ======
-
-/*
-Parser data structure
-*/
-type parser struct {
-	name   string          // Name to identify the input
-	node   *ASTNode        // Current ast node
-	tokens *LABuffer       // Buffer which is connected to the channel which contains lex tokens
-	rp     RuntimeProvider // Runtime provider which creates runtime components
-}
-
-/*
-Parse parses a given input string and returns an AST.
-*/
-func Parse(name string, input string) (*ASTNode, error) {
-	return ParseWithRuntime(name, input, nil)
-}
-
-/*
-ParseWithRuntime parses a given input string and returns an AST decorated with
-runtime components.
-*/
-func ParseWithRuntime(name string, input string, rp RuntimeProvider) (*ASTNode, error) {
-
-	// Create a new parser with a look-ahead buffer of 3
-
-	p := &parser{name, nil, NewLABuffer(Lex(name, input), 3), rp}
-
-	// Read and set initial AST node
-
-	node, err := p.next()
-
-	if err != nil {
-		return nil, err
-	}
-
-	p.node = node
-
-	n, err := p.run(0)
-
-	if err == nil && hasMoreStatements(p, n) {
-
-		st := astNodeMap[TokenSTATEMENTS].instance(p, nil)
-		st.Children = append(st.Children, n)
-
-		for err == nil && hasMoreStatements(p, n) {
-
-			// Skip semicolons
-
-			if p.node.Token.ID == TokenSEMICOLON {
-				skipToken(p, TokenSEMICOLON)
-			}
-
-			n, err = p.run(0)
-			st.Children = append(st.Children, n)
-		}
-
-		n = st
-	}
-
-	if err == nil && p.node != nil && p.node.Token.ID != TokenEOF {
-		token := *p.node.Token
-		err = p.newParserError(ErrUnexpectedEnd, fmt.Sprintf("extra token id:%v (%v)",
-			token.ID, token), token)
-	}
-
-	return n, err
-}
-
-/*
-run models the main parser function.
-*/
-func (p *parser) run(rightBinding int) (*ASTNode, error) {
-	var err error
-
-	n := p.node
-
-	p.node, err = p.next()
-	if err != nil {
-		return nil, err
-	}
-
-	// Start with the null denotation of this statement / expression
-
-	if n.nullDenotation == nil {
-		return nil, p.newParserError(ErrImpossibleNullDenotation,
-			n.Token.String(), *n.Token)
-	}
-
-	left, err := n.nullDenotation(p, n)
-	if err != nil {
-		return nil, err
-	}
-
-	// Collect left denotations as long as the left binding power is greater
-	// than the initial right one
-
-	for rightBinding < p.node.binding {
-		var nleft *ASTNode
-
-		n = p.node
-
-		if n.leftDenotation == nil {
-
-			if left.Token.Lline < n.Token.Lline {
-
-				// If the impossible left denotation is on a new line
-				// we might be parsing a new statement
-
-				return left, nil
-			}
-
-			return nil, p.newParserError(ErrImpossibleLeftDenotation,
-				n.Token.String(), *n.Token)
-		}
-
-		p.node, err = p.next()
-
-		if err != nil {
-			return nil, err
-		}
-
-		// Get the next left denotation
-
-		nleft, err = n.leftDenotation(p, n, left)
-
-		left = nleft
-
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	return left, nil
-}
-
-/*
-next retrieves the next lexer token.
-*/
-func (p *parser) next() (*ASTNode, error) {
-	var preComments []MetaData
-	var postComments []MetaData
-
-	token, more := p.tokens.Next()
-
-	// Skip over pre comment token
-
-	for more && token.ID == TokenPRECOMMENT {
-		preComments = append(preComments, NewLexTokenInstance(token))
-		token, more = p.tokens.Next()
-	}
-
-	// Skip over post comment token
-
-	for more && token.ID == TokenPOSTCOMMENT {
-		postComments = append(postComments, NewLexTokenInstance(token))
-		token, more = p.tokens.Next()
-	}
-
-	if !more {
-
-		// Unexpected end of input - the associated token is an empty error token
-
-		return nil, p.newParserError(ErrUnexpectedEnd, "", token)
-
-	} else if token.ID == TokenError {
-
-		// There was a lexer error wrap it in a parser error
-
-		return nil, p.newParserError(ErrLexicalError, token.Val, token)
-
-	} else if node, ok := astNodeMap[token.ID]; ok {
-
-		// We got a normal AST component
-
-		ret := node.instance(p, &token)
-
-		ret.Meta = append(ret.Meta, preComments...) // Attach pre comments to the next AST node
-		if len(postComments) > 0 && p.node != nil {
-			p.node.Meta = append(p.node.Meta, postComments...) // Attach post comments to the previous AST node
-		}
-
-		return ret, nil
-	}
-
-	return nil, p.newParserError(ErrUnknownToken, fmt.Sprintf("id:%v (%v)", token.ID, token), token)
-}
-
-// Standard null denotation functions
-// ==================================
-
-/*
-ndTerm is used for terminals.
-*/
-func ndTerm(p *parser, self *ASTNode) (*ASTNode, error) {
-	return self, nil
-}
-
-/*
-ndInner returns the inner expression of an enclosed block and discard the
-block token. This method is used for brackets.
-*/
-func ndInner(p *parser, self *ASTNode) (*ASTNode, error) {
-
-	// Get the inner expression
-
-	exp, err := p.run(0)
-	if err != nil {
-		return nil, err
-	}
-
-	// We return here the inner expression - discarding the bracket tokens
-
-	return exp, skipToken(p, TokenRPAREN)
-}
-
-/*
-ndPrefix is used for prefix operators.
-*/
-func ndPrefix(p *parser, self *ASTNode) (*ASTNode, error) {
-
-	// Make sure a prefix will only prefix the next item
-
-	val, err := p.run(self.binding + 20)
-	if err != nil {
-		return nil, err
-	}
-
-	self.Children = append(self.Children, val)
-
-	return self, nil
-}
-
-// Null denotation functions for specific expressions
-// ==================================================
-
-/*
-ndImport is used to parse imports.
-*/
-func ndImport(p *parser, self *ASTNode) (*ASTNode, error) {
-
-	// Must specify a file path
-
-	err := acceptChild(p, self, TokenSTRING)
-
-	if err == nil {
-
-		// Must specify AS
-
-		if err = skipToken(p, TokenAS); err == nil {
-
-			// Must specify an identifier
-
-			err = acceptChild(p, self, TokenIDENTIFIER)
-		}
-	}
-
-	return self, err
-}
-
-/*
-ndSink is used to parse sinks.
-*/
-func ndSkink(p *parser, self *ASTNode) (*ASTNode, error) {
-	var exp, ret *ASTNode
-
-	// Must specify a name
-
-	err := acceptChild(p, self, TokenIDENTIFIER)
-
-	if err == nil {
-
-		// Parse the rest of the parameters as children until we reach the body
-
-		for err == nil && IsNotEndAndNotToken(p, TokenLBRACE) {
-			if exp, err = p.run(150); err == nil {
-				self.Children = append(self.Children, exp)
-
-				// Skip commas
-
-				if p.node.Token.ID == TokenCOMMA {
-					err = skipToken(p, TokenCOMMA)
-				}
-			}
-		}
-
-		if err == nil {
-
-			// Parse the body
-
-			ret, err = parseInnerStatements(p, self)
-		}
-	}
-
-	return ret, err
-}
-
-/*
-ndFunc is used to parse function definitions.
-*/
-func ndFunc(p *parser, self *ASTNode) (*ASTNode, error) {
-	var exp *ASTNode
-	var err error
-
-	// Might specify a function name
-
-	if p.node.Token.ID == TokenIDENTIFIER {
-		err = acceptChild(p, self, TokenIDENTIFIER)
-	}
-
-	// Read in parameters
-
-	if err == nil {
-		err = skipToken(p, TokenLPAREN)
-
-		params := astNodeMap[TokenPARAMS].instance(p, nil)
-		self.Children = append(self.Children, params)
-
-		for err == nil && IsNotEndAndNotToken(p, TokenRPAREN) {
-
-			// Parse all the expressions inside
-
-			if exp, err = p.run(0); err == nil {
-				params.Children = append(params.Children, exp)
-
-				if p.node.Token.ID == TokenCOMMA {
-					err = skipToken(p, TokenCOMMA)
-				}
-			}
-		}
-
-		if err == nil {
-			err = skipToken(p, TokenRPAREN)
-		}
-	}
-
-	if err == nil {
-
-		// Parse the body
-
-		self, err = parseInnerStatements(p, self)
-	}
-
-	return self, err
-}
-
-/*
-ndReturn is used to parse return statements.
-*/
-func ndReturn(p *parser, self *ASTNode) (*ASTNode, error) {
-	var err error
-
-	if self.Token.Lline == p.node.Token.Lline {
-		var val *ASTNode
-
-		// Consume the next expression only if it is on the same line
-
-		val, err = p.run(0)
-
-		if err == nil {
-			self.Children = append(self.Children, val)
-		}
-	}
-
-	return self, err
-}
-
-/*
-ndIdentifier is to parse identifiers and function calls.
-*/
-func ndIdentifier(p *parser, self *ASTNode) (*ASTNode, error) {
-	var parseMore, parseSegment, parseFuncCall, parseCompositionAccess func(parent *ASTNode) error
-
-	parseMore = func(current *ASTNode) error {
-		var err error
-
-		if p.node.Token.ID == TokenDOT {
-			err = parseSegment(current)
-		} else if p.node.Token.ID == TokenLPAREN {
-			err = parseFuncCall(current)
-		} else if p.node.Token.ID == TokenLBRACK && p.node.Token.Lline == self.Token.Lline {
-
-			// Composition access needs to be on the same line as the identifier
-			// as we might otherwise have a list
-
-			err = parseCompositionAccess(current)
-		}
-
-		return err
-	}
-
-	parseSegment = func(current *ASTNode) error {
-		var err error
-		var next *ASTNode
-
-		if err = skipToken(p, TokenDOT); err == nil {
-			next = p.node
-			if err = acceptChild(p, current, TokenIDENTIFIER); err == nil {
-				err = parseMore(next)
-			}
-		}
-
-		return err
-	}
-
-	parseFuncCall = func(current *ASTNode) error {
-		var exp *ASTNode
-
-		err := skipToken(p, TokenLPAREN)
-
-		fc := astNodeMap[TokenFUNCCALL].instance(p, nil)
-		current.Children = append(current.Children, fc)
-
-		// Read in parameters
-
-		for err == nil && IsNotEndAndNotToken(p, TokenRPAREN) {
-
-			// Parse all the expressions inside the directives
-
-			if exp, err = p.run(0); err == nil {