Browse Source

feat: Adding various common code fragments

Matthias Ladkau 4 years ago
parent
commit
80031ffe1d
100 changed files with 21971 additions and 2 deletions
  1. 23 2
      README.md
  2. 121 0
      bitutil/bitutil.go
  3. 81 0
      bitutil/bitutil_test.go
  4. 89 0
      bitutil/murmurhash3.go
  5. 75 0
      bitutil/murmurhash3_test.go
  6. 416 0
      bitutil/packedlist.go
  7. 360 0
      bitutil/packedlist_test.go
  8. 189 0
      cryptutil/gencert.go
  9. 149 0
      cryptutil/gencert_test.go
  10. 109 0
      cryptutil/stringcrypt.go
  11. 53 0
      cryptutil/stringcrypt_test.go
  12. 37 0
      cryptutil/uuid.go
  13. 22 0
      cryptutil/uuid_test.go
  14. 109 0
      cryptutil/x509util.go
  15. 94 0
      cryptutil/x509util_test.go
  16. 65 0
      datautil/datacopy.go
  17. 80 0
      datautil/datacopy_test.go
  18. 246 0
      datautil/mapcache.go
  19. 148 0
      datautil/mapcache_test.go
  20. 52 0
      datautil/nesting.go
  21. 79 0
      datautil/nesting_test.go
  22. 102 0
      datautil/nonce.go
  23. 55 0
      datautil/nonce_test.go
  24. 112 0
      datautil/persistentmap.go
  25. 146 0
      datautil/persistentmap_test.go
  26. 181 0
      datautil/ringbuffer.go
  27. 115 0
      datautil/ringbuffer_test.go
  28. 701 0
      datautil/userdb.go
  29. 484 0
      datautil/userdb_test.go
  30. 30 0
      defs/rambazamba/eventsource.go
  31. 40 0
      defs/rumble/func.go
  32. 45 0
      defs/rumble/globals.go
  33. 22 0
      defs/rumble/runtime.go
  34. 27 0
      defs/rumble/variables.go
  35. 68 0
      errorutil/errorutil.go
  36. 64 0
      errorutil/errorutil_test.go
  37. 418 0
      fileutil/config.go
  38. 221 0
      fileutil/config_test.go
  39. 134 0
      fileutil/fileutil.go
  40. 135 0
      fileutil/fileutil_test.go
  41. 439 0
      fileutil/multifilebuffer.go
  42. 388 0
      fileutil/multifilebuffer_test.go
  43. 89 0
      fileutil/zip.go
  44. 79 0
      fileutil/zip_test.go
  45. 127 0
      flowutil/eventpump.go
  46. 238 0
      flowutil/eventpump_test.go
  47. 3 0
      go.mod
  48. 1284 0
      httputil/access/acl.go
  49. 1271 0
      httputil/access/acl_test.go
  50. 45 0
      httputil/auth/auth.go
  51. 194 0
      httputil/auth/auth_test.go
  52. 139 0
      httputil/auth/basic.go
  53. 222 0
      httputil/auth/basic_test.go
  54. 300 0
      httputil/auth/cookie.go
  55. 243 0
      httputil/auth/cookie_test.go
  56. 278 0
      httputil/httpserver.go
  57. 290 0
      httputil/httpserver_test.go
  58. 113 0
      httputil/user/session.go
  59. 151 0
      httputil/user/session_test.go
  60. 258 0
      httputil/user/user.go
  61. 186 0
      httputil/user/user_test.go
  62. 114 0
      httputil/util.go
  63. 144 0
      httputil/util_test.go
  64. 141 0
      imageutil/asciiraster.go
  65. 163 0
      imageutil/asciiraster_test.go
  66. 314 0
      imageutil/rasterfont1.go
  67. 852 0
      imageutil/rasterfont2.go
  68. 149 0
      lang/graphql/parser/const.go
  69. 487 0
      lang/graphql/parser/lexer.go
  70. 246 0
      lang/graphql/parser/lexer_test.go
  71. 126 0
      lang/graphql/parser/node.go
  72. 830 0
      lang/graphql/parser/parser.go
  73. 1236 0
      lang/graphql/parser/parser_test.go
  74. 66 0
      lang/graphql/parser/parsererrors.go
  75. 37 0
      lang/graphql/parser/runtime.go
  76. 227 0
      lockutil/lockfile.go
  77. 139 0
      lockutil/lockfile_test.go
  78. 111 0
      logutil/formatter.go
  79. 72 0
      logutil/formatter_test.go
  80. 299 0
      logutil/logger.go
  81. 179 0
      logutil/logger_test.go
  82. 34 0
      pools/pools.go
  83. 56 0
      pools/pools_test.go
  84. 514 0
      pools/threadpool.go
  85. 415 0
      pools/threadpool_test.go
  86. 83 0
      sortutil/heap.go
  87. 101 0
      sortutil/heap_test.go
  88. 227 0
      sortutil/priorityqueue.go
  89. 196 0
      sortutil/priorityqueue_test.go
  90. 62 0
      sortutil/sortutil.go
  91. 48 0
      sortutil/sortutil_test.go
  92. 118 0
      sortutil/vectorclock.go
  93. 97 0
      sortutil/vectorclock_test.go
  94. 710 0
      stringutil/stringutil.go
  95. 596 0
      stringutil/stringutil_test.go
  96. 163 0
      stringutil/transform.go
  97. 103 0
      stringutil/transform_test.go
  98. 246 0
      termutil/autoterm.go
  99. 236 0
      termutil/autoterm_test.go
  100. 0 0
      termutil/fileterm.go

+ 23 - 2
README.md

@@ -1,3 +1,24 @@
-# common
+Common
+--
+Common is a collection of common algorithms and functions which are used across a multitude of projects on devt.de.
 
-Common algorithms and functions
+|Package|Description|
+| --- | --- |
+| bitutil | Byte processing helper functions. |
+| cryptutil | Processing certificates, uuids and encryption/decryption helpers. |
+| datautil | Datastructures and data storage helpers. |
+| defs | Definition used across devt.de projects. |
+| errorutil | Helper functions around error processing. |
+| fileutil | File handling utilities. |
+| flowutil | Datastructures for flow control. |
+| httputil | Heplers for HTTP handling. |
+| imageutil | Image and pixel processing. |
+| lang | Lexer and parsers. |
+| lockutil | Utilities for locking. |
+| logutil | Simple logging infrastructure. |
+| pools | Pooling helpers. |
+| sortutil | Datastructures and utilities around sorting. |
+| stringutil | String processing helpers. |
+| termutil | Unix/Windows terminal helpers. |
+| testutil | Utilities for code testing. |
+| timeutil | Helpers for time processing. |

+ 121 - 0
bitutil/bitutil.go

@@ -0,0 +1,121 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package bitutil contains common function for bit-level operations.
+
+Pack and Unpack functions are used to pack and unpack a list of non-zero numbers
+very efficiently.
+*/
+package bitutil
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+)
+
+/*
+CompareByteArray compares the contents of two byte array slices. Returns true
+if both slices are equivalent in terms of size and content. The capacity may
+be different.
+*/
+func CompareByteArray(arr1 []byte, arr2 []byte) bool {
+	if len(arr1) != len(arr2) {
+		return false
+	}
+	for i, v := range arr1 {
+		if v != arr2[i] {
+			return false
+		}
+	}
+	return true
+}
+
+/*
+ByteSizeString takes a numeric byte size and returns it in human readable form.
+The useISU parameter determines which units to use. False uses the more common
+binary form. The units kibibyte, mebibyte, etc were established by the
+International Electrotechnical Commission (IEC) in 1998.
+
+useISU = True -> Decimal (as formally defined in the International System of Units)
+Bytes / Metric
+1000^1 kB kilobyte
+1000^2 MB megabyte
+1000^3 GB gigabyte
+1000^4 TB terabyte
+1000^5 PB petabyte
+1000^6 EB exabyte
+
+useISU = False -> Binary (as defined by the International Electrotechnical Commission)
+Bytes / Metric
+1024^1 KiB kibibyte
+1024^2 MiB mebibyte
+1024^3 GiB gibibyte
+1024^4 TiB tebibyte
+1024^5 PiB pebibyte
+1024^6 EiB exbibyte
+*/
+func ByteSizeString(size int64, useISU bool) string {
+	var byteSize, unit float64 = float64(size), 1024
+	var pre string
+
+	if useISU {
+		unit = 1000
+	}
+
+	if byteSize < unit {
+		return fmt.Sprintf("%d B", int(byteSize))
+	}
+
+	exp := math.Floor(math.Log(byteSize) / math.Log(unit))
+
+	if useISU {
+		pre = string("kMGTPE"[int(exp-1)])
+	} else {
+		pre = fmt.Sprintf("%vi", string("KMGTPE"[int(exp-1)]))
+	}
+
+	res := byteSize / math.Pow(unit, exp)
+
+	return fmt.Sprintf("%.1f %sB", res, pre)
+}
+
+/*
+HexDump produces a more-or-less human readable hex dump from a given byte array
+slice.
+*/
+func HexDump(data []byte) string {
+	buf := new(bytes.Buffer)
+	line := new(bytes.Buffer)
+
+	buf.WriteString("====\n000000  ")
+
+	for i, b := range data {
+
+		if i != 0 && i%10 == 0 {
+			buf.WriteString(fmt.Sprintf(" %s\n%06x  ", line.String(), i))
+			line = new(bytes.Buffer)
+		}
+
+		buf.WriteString(fmt.Sprintf("%02X ", b))
+		line.WriteString(fmt.Sprintf("%c", b))
+	}
+
+	rest := len(data) % 10
+	if rest != 0 {
+		for i := rest; i < 10; i++ {
+			buf.WriteString("   ")
+		}
+	}
+
+	buf.WriteString(fmt.Sprintf(" %s\n====\n", line.String()))
+
+	return buf.String()
+}

+ 81 - 0
bitutil/bitutil_test.go

@@ -0,0 +1,81 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package bitutil
+
+import (
+	"testing"
+)
+
+func TestCompareByteArray(t *testing.T) {
+	testdata1 := []byte("Test")
+	testdata2 := make([]byte, 4, 5)
+	testdata3 := make([]byte, 3, 3)
+
+	if CompareByteArray(testdata1, testdata2) {
+		t.Error("Byte arrays should not be considered equal before copying data.")
+	}
+
+	if CompareByteArray(testdata1, testdata3) {
+		t.Error("Byte arrays should not be considered equal if the length is different.")
+	}
+
+	copy(testdata2, testdata1)
+
+	if cap(testdata1) == cap(testdata2) {
+		t.Error("Capacity of testdata sclices should be different.")
+	}
+
+	if !CompareByteArray(testdata1, testdata2) {
+		t.Error("Byte arrays should be considered equal.")
+	}
+}
+
+func TestByteSizeString(t *testing.T) {
+	// Test byte sizes
+	testdata := []int64{10000, 1024, 500, 1233456, 44166037, 84166037, 5000000000}
+
+	// non-ISU values
+	expected1 := []string{"9.8 KiB", "1.0 KiB", "500 B", "1.2 MiB", "42.1 MiB", "80.3 MiB", "4.7 GiB"}
+
+	// ISU values
+	expected2 := []string{"10.0 kB", "1.0 kB", "500 B", "1.2 MB", "44.2 MB", "84.2 MB", "5.0 GB"}
+
+	for i, test := range testdata {
+		res := ByteSizeString(test, false)
+		if res != expected1[i] {
+			t.Error("Unexpected value for non-isu value:", test,
+				"got:", res, "expected:", expected1[i])
+			return
+		}
+
+		res = ByteSizeString(test, true)
+		if res != expected2[i] {
+			t.Error("Unexpected value for isu value:", test,
+				"got:", res, "expected:", expected2[i])
+			return
+		}
+	}
+}
+
+func TestHexDump(t *testing.T) {
+	testdata := []byte("This is a test text. This is a test text.")
+
+	res := HexDump(testdata)
+	if res != "====\n"+
+		"000000  54 68 69 73 20 69 73 20 61 20  This is a \n"+
+		"00000a  74 65 73 74 20 74 65 78 74 2E  test text.\n"+
+		"000014  20 54 68 69 73 20 69 73 20 61   This is a\n"+
+		"00001e  20 74 65 73 74 20 74 65 78 74   test text\n"+
+		"000028  2E                             .\n"+
+		"====\n" {
+
+		t.Error("Invalid boundaries should cause an error")
+	}
+}

+ 89 - 0
bitutil/murmurhash3.go

@@ -0,0 +1,89 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package bitutil
+
+import "fmt"
+
+const (
+	c1 uint32 = 0xcc9e2d51
+	c2 uint32 = 0x1b873593
+)
+
+/*
+MurMurHashData hashes a given array of bytes. This is an implementation
+of Austin Appleby's MurmurHash3 (32bit) function.
+
+Reference implementation: http://code.google.com/p/smhasher/wiki/MurmurHash3
+*/
+func MurMurHashData(data []byte, offset int, size int, seed int) (uint32, error) {
+
+	// Check parameters
+
+	if offset < 0 || size < 0 {
+		return 0, fmt.Errorf("Invalid data boundaries; offset: %v; size: %v",
+			offset, size)
+	}
+
+	h1 := uint32(seed)
+	end := offset + size
+	end -= end % 4
+
+	// Check length of available data
+
+	if len(data) <= end {
+		return 0, fmt.Errorf("Data out of bounds; set boundary: %v; data length: %v",
+			end, len(data))
+	}
+
+	for i := offset; i < end; i += 4 {
+
+		var k1 = uint32(data[i])
+		k1 |= uint32(data[i+1]) << 8
+		k1 |= uint32(data[i+2]) << 16
+		k1 |= uint32(data[i+3]) << 24
+
+		k1 *= c1
+		k1 = (k1 << 15) | (k1 >> 17) // ROTL32(k1,15);
+		k1 *= c2
+
+		h1 ^= k1
+		h1 = (h1 << 13) | (h1 >> 19) // ROTL32(h1,13);
+		h1 = h1*5 + 0xe6546b64
+	}
+
+	// Tail
+
+	var k1 uint32
+
+	switch size & 3 {
+	case 3:
+		k1 = uint32(data[end+2]) << 16
+		fallthrough
+	case 2:
+		k1 |= uint32(data[end+1]) << 8
+		fallthrough
+	case 1:
+		k1 |= uint32(data[end])
+		k1 *= c1
+		k1 = (k1 << 15) | (k1 >> 17) // ROTL32(k1,15);
+		k1 *= c2
+		h1 ^= k1
+	}
+
+	h1 ^= uint32(size)
+
+	h1 ^= h1 >> 16
+	h1 *= 0x85ebca6b
+	h1 ^= h1 >> 13
+	h1 *= 0xc2b2ae35
+	h1 ^= h1 >> 16
+
+	return h1, nil
+}

+ 75 - 0
bitutil/murmurhash3_test.go

@@ -0,0 +1,75 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package bitutil
+
+import (
+	"testing"
+)
+
+var testData = []byte("Now is the time for all good men to come to the aid of their country")
+
+var resultArray1 = []uint32{
+	0x249cb285, 0xcae32c45, 0x49cc6fdd, 0x3c89b814, 0xdc9778bb, 0x6db6607a,
+	0x736df8ad, 0xd367e257, 0x59b32232, 0x2496a9b4, 0x01d69f33, 0x08454378,
+	0x4ad4f630, 0x0ae1ca05, 0x042bdb5b, 0xbf3592e8, 0x0ed8b048, 0xb86958db,
+	0xa74ca5b6, 0xb7982271, 0x10a77c40, 0x8caba8ef, 0xe5085ab6, 0x8ee964b8,
+	0x170f0222, 0x42dec76d, 0xc4ebe4e5, 0x3d246566, 0x64f1133e, 0x8a0597dd,
+	0x5b13cdb8, 0x1c723636, 0xc8b60a2f, 0xb572fe46, 0xb801f177, 0x71d44c64,
+	0x755aeff1, 0x66ba2eeb, 0x5cfec249, 0x5b9d603f, 0x4e916049, 0x07622306,
+	0x57d4271f, 0x3fa8e56a, 0x4b4fe703, 0x995e958d, 0xdaf48fbb, 0xbe381e68,
+	0xd4af5452, 0x6b8e4cdc, 0x3c7bbc57, 0xd834a3e0, 0x78665c77, 0x5ab0d747,
+	0x4b34afb7, 0xbce90104, 0x25a31264, 0xa348c314, 0xab9fb213, 0x48f40ea9,
+	0xa232f18e, 0xda12f11a, 0x7dcdfcfb, 0x24381ba8, 0x1a15737d, 0x32b1ea01,
+	0x7ed7f6c6, 0xd16ab3ed}
+
+func TestMurMurHashData(t *testing.T) {
+
+	data := []byte{0xf6, 0x02, 0x03, 0x04}
+
+	// Test invalid data boundaries
+
+	_, err := MurMurHashData(data, 1, -3, 6)
+
+	if err == nil {
+		t.Error("Invalid boundaries should cause an error")
+	} else if err.Error() != "Invalid data boundaries; offset: 1; size: -3" {
+		t.Errorf("Unexpected error: %v", err)
+	}
+
+	_, err = MurMurHashData(data, 1, 5, 6)
+
+	if err == nil {
+		t.Error("Invalid boundaries should cause an error")
+	} else if err.Error() != "Data out of bounds; set boundary: 4; data length: 4" {
+		t.Errorf("Unexpected error: %v", err)
+	}
+
+	// Test against data
+
+	// Go source code is always UTF-8, so the string literal is UTF-8 text.
+	data = []byte("Now is the time for all good men to come to the aid of their country")
+
+	doTest := func(offset, size int) uint32 {
+		res, err := MurMurHashData(data, offset, size, 4)
+
+		if err != nil {
+			t.Errorf("Unexpected error: %v", err)
+		}
+
+		return res
+	}
+
+	for i := 0; i < len(resultArray1); i++ {
+		res := doTest(0, i)
+		if res != resultArray1[i] {
+			t.Errorf("Unexpected result; Expected: 0x%x; Got: 0x%x", resultArray1[i], res)
+		}
+	}
+}

+ 416 - 0
bitutil/packedlist.go

@@ -0,0 +1,416 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package bitutil
+
+import (
+	"bytes"
+	"encoding/binary"
+	"math"
+)
+
+/*
+Different types of list packing
+*/
+const (
+	packListType2Bit = 0x1
+	packListType3Bit = 0x2
+	packListType6Bit = 0x3
+	packListTypeVar  = 0x0
+)
+
+/*
+PackList packs a given list to a string. Depending on the given highest number the
+list is packed in the most efficient way.
+*/
+func PackList(unpackedlist []uint64, highest uint64) string {
+
+	// Depending on the highest number convert to given list
+
+	switch {
+	case highest <= 3:
+		list := make([]byte, len(unpackedlist))
+		for i, num := range unpackedlist {
+			list[i] = byte(num)
+		}
+		return PackList2Bit(list)
+
+	case highest <= 7:
+		list := make([]byte, len(unpackedlist))
+		for i, num := range unpackedlist {
+			list[i] = byte(num)
+		}
+		return PackList3Bit(list)
+
+	case highest <= 63:
+		list := make([]byte, len(unpackedlist))
+		for i, num := range unpackedlist {
+			list[i] = byte(num)
+		}
+		return PackList6Bit(list)
+
+	case highest <= math.MaxUint8:
+		list := make([]byte, len(unpackedlist))
+		for i, num := range unpackedlist {
+			list[i] = byte(num)
+		}
+		return PackList8Bit(list)
+
+	case highest <= math.MaxUint16:
+		list := make([]uint16, len(unpackedlist))
+		for i, num := range unpackedlist {
+			list[i] = uint16(num)
+		}
+		return PackList16Bit(list)
+
+	case highest <= math.MaxUint32:
+		list := make([]uint32, len(unpackedlist))
+		for i, num := range unpackedlist {
+			list[i] = uint32(num)
+		}
+		return PackList32Bit(list)
+	}
+
+	return PackList64Bit(unpackedlist)
+}
+
+/*
+UnpackList unpacks a list from a packed string.
+*/
+func UnpackList(packedlist string) []uint64 {
+	plist := []byte(packedlist)
+
+	if len(plist) == 0 {
+		return nil
+	}
+
+	if plist[0]&0xC0 == packListTypeVar {
+		return UnpackBigList(packedlist)
+	}
+
+	res := UnpackSmallList(packedlist)
+	ret := make([]uint64, len(res))
+
+	for i, item := range res {
+		ret[i] = uint64(item)
+	}
+
+	return ret
+}
+
+/*
+PackList8Bit packs a list of 8 bit numbers.
+*/
+func PackList8Bit(list []uint8) string {
+	var bb bytes.Buffer
+
+	bb.WriteByte(0x00)
+
+	for i := 0; i < len(list); i++ {
+		binary.Write(&bb, binary.LittleEndian, list[i])
+	}
+
+	return bb.String()
+}
+
+/*
+PackList16Bit packs a list of 16 bit numbers.
+*/
+func PackList16Bit(list []uint16) string {
+	var bb bytes.Buffer
+
+	bb.WriteByte(0x01)
+
+	for i := 0; i < len(list); i++ {
+		binary.Write(&bb, binary.LittleEndian, list[i])
+	}
+
+	return bb.String()
+}
+
+/*
+PackList32Bit packs a list of 32 bit numbers.
+*/
+func PackList32Bit(list []uint32) string {
+	var bb bytes.Buffer
+
+	bb.WriteByte(0x02)
+
+	for i := 0; i < len(list); i++ {
+		binary.Write(&bb, binary.LittleEndian, list[i])
+	}
+
+	return bb.String()
+}
+
+/*
+PackList64Bit packs a list of 64 bit numbers.
+*/
+func PackList64Bit(list []uint64) string {
+	var bb bytes.Buffer
+
+	bb.WriteByte(0x03)
+
+	for i := 0; i < len(list); i++ {
+		binary.Write(&bb, binary.LittleEndian, list[i])
+	}
+
+	return bb.String()
+}
+
+/*
+UnpackBigList unpacks a list which has large values.
+*/
+func UnpackBigList(packedlist string) []uint64 {
+	var ret []uint64
+	plist := []byte(packedlist)
+
+	numlist := plist[1:]
+	reader := bytes.NewReader(numlist)
+
+	if plist[0] == 0x00 {
+		var item uint8
+		size := len(numlist)
+		ret = make([]uint64, size)
+		for i := 0; i < size; i++ {
+			binary.Read(reader, binary.LittleEndian, &item)
+			ret[i] = uint64(item)
+		}
+	} else if plist[0] == 0x01 {
+		var item uint16
+		size := len(numlist) / 2
+		ret = make([]uint64, size)
+		for i := 0; i < size; i++ {
+			binary.Read(reader, binary.LittleEndian, &item)
+			ret[i] = uint64(item)
+		}
+	} else if plist[0] == 0x02 {
+		var item uint32
+		size := len(numlist) / 4
+		ret = make([]uint64, size)
+		for i := 0; i < size; i++ {
+			binary.Read(reader, binary.LittleEndian, &item)
+			ret[i] = uint64(item)
+		}
+	} else if plist[0] == 0x03 {
+		size := len(numlist) / 8
+		ret = make([]uint64, size)
+		binary.Read(reader, binary.LittleEndian, ret)
+	}
+
+	return ret
+}
+
+/*
+PackList2Bit packs a list of bytes into a string using 2 bits for each item.
+(Items must be between 1 and 3)
+*/
+func PackList2Bit(list []byte) string {
+	if len(list) == 0 {
+		return ""
+	}
+
+	// Packing the list with 2 bit items reduces the size by a factor of 4
+
+	ret := make([]byte, int(math.Ceil(float64(1)/3+float64(len(list)-1)/4)))
+
+	if len(list) == 1 {
+		ret[0] = list2byte2bit(packListType2Bit, list[0], 0, 0)
+	} else if len(list) == 2 {
+		ret[0] = list2byte2bit(packListType2Bit, list[0], list[1], 0)
+	} else {
+		ret[0] = list2byte2bit(packListType2Bit, list[0], list[1], list[2])
+
+		j := 1
+		for i := 3; i < len(list); i += 4 {
+			if len(list[i:]) == 1 {
+				ret[j] = list2byte2bit(list[i], 0, 0, 0)
+			} else if len(list[i:]) == 2 {
+				ret[j] = list2byte2bit(list[i], list[i+1], 0, 0)
+			} else if len(list[i:]) == 3 {
+				ret[j] = list2byte2bit(list[i], list[i+1], list[i+2], 0)
+			} else {
+				ret[j] = list2byte2bit(list[i], list[i+1], list[i+2], list[i+3])
+			}
+			j++
+		}
+	}
+
+	return string(ret)
+}
+
+/*
+PackList3Bit packs a list of bytes into a string using 3 bits for each item.
+(Items must be between 1 and 7)
+*/
+func PackList3Bit(list []byte) string {
+	if len(list) == 0 {
+		return ""
+	}
+
+	// Packing the list with 2 bit items reduces the size by a factor of 2
+
+	ret := make([]byte, int(math.Ceil(float64(len(list))/2)))
+
+	if len(list) == 1 {
+		ret[0] = list2byte3bitAndHeader(packListType3Bit, list[0], 0)
+	} else {
+		ret[0] = list2byte3bitAndHeader(packListType3Bit, list[0], list[1])
+
+		j := 1
+		for i := 2; i < len(list); i += 2 {
+			if len(list[i:]) == 1 {
+				ret[j] = list2byte3bitAndHeader(0, list[i], 0)
+			} else {
+				ret[j] = list2byte3bitAndHeader(0, list[i], list[i+1])
+			}
+			j++
+		}
+	}
+
+	return string(ret)
+}
+
+/*
+PackList6Bit packs a list of bytes into a string using 6 bits for each item.
+(Items must be between 1 and 63)
+*/
+func PackList6Bit(list []byte) string {
+	if len(list) == 0 {
+		return ""
+	}
+
+	// Packing the list with 6 bit items does not reduce the factor
+
+	ret := make([]byte, len(list))
+
+	if len(list) == 1 {
+		ret[0] = list2byte6bitAndHeader(packListType6Bit, list[0])
+	} else {
+		ret[0] = list2byte6bitAndHeader(packListType6Bit, list[0])
+
+		for i := 1; i < len(list); i++ {
+			ret[i] = list2byte6bitAndHeader(0, list[i])
+		}
+	}
+
+	return string(ret)
+}
+
+/*
+UnpackSmallList unpacks a string into a list of bytes. Returns the list of bytes
+or a list of a single 0x00 byte if the numbers in the list are too big.
+*/
+func UnpackSmallList(packedlist string) []byte {
+	plist := []byte(packedlist)
+
+	if len(plist) == 0 {
+		return []byte{}
+	}
+
+	ltype := plist[0] & 0xC0 >> 6
+
+	if ltype == packListType2Bit {
+		return unpacklist2bit(plist)
+	} else if ltype == packListType3Bit {
+		return unpacklist3bit(plist)
+	} else if ltype == packListType6Bit {
+		return unpacklist6bit(plist)
+	}
+
+	// Must be gob encoded
+
+	return []byte{00}
+}
+
+func unpacklist2bit(packedlist []byte) []byte {
+	ret := make([]byte, 0, len(packedlist)*3)
+
+	for i := 0; i < len(packedlist); i++ {
+		b1, b2, b3, b4 := byte2list2bit(packedlist[i])
+		if i > 0 && b1 != 0 {
+			ret = append(ret, b1)
+		}
+		if b2 != 0 {
+			ret = append(ret, b2)
+		}
+		if b3 != 0 {
+			ret = append(ret, b3)
+		}
+		if b4 != 0 {
+			ret = append(ret, b4)
+		}
+	}
+
+	return ret
+}
+
+func unpacklist3bit(packedlist []byte) []byte {
+	ret := make([]byte, 0, len(packedlist)*2)
+
+	for i := 0; i < len(packedlist); i++ {
+		b1, b2 := byte2list3bit(packedlist[i])
+		if b1 != 0 {
+			ret = append(ret, b1)
+		}
+		if b2 != 0 {
+			ret = append(ret, b2)
+		}
+	}
+
+	return ret
+}
+
+func unpacklist6bit(packedlist []byte) []byte {
+	ret := make([]byte, 0, len(packedlist))
+
+	for i := 0; i < len(packedlist); i++ {
+		ret = append(ret, byte2list6bit(packedlist[i]))
+	}
+
+	return ret
+}
+
+func byte2list2bit(b byte) (b1 byte, b2 byte, b3 byte, b4 byte) {
+	b1 = b & 0xC0 >> 6
+	b2 = b & 0x30 >> 4
+	b3 = b & 0x0C >> 2
+	b4 = b & 0x03
+
+	return b1, b2, b3, b4
+}
+
+func list2byte2bit(b1 byte, b2 byte, b3 byte, b4 byte) byte {
+	return (b1 & 0x03 << 6) |
+		(b2 & 0x03 << 4) |
+		(b3 & 0x03 << 2) |
+		(b4 & 0x03)
+}
+
+func list2byte3bitAndHeader(b1 byte, b2 byte, b3 byte) byte {
+	return (b1 & 0x03 << 6) |
+		(b2 & 0x07 << 3) |
+		(b3 & 0x07)
+}
+
+func byte2list3bit(b byte) (b2 byte, b3 byte) {
+	b2 = b & 0x38 >> 3
+	b3 = b & 0x07
+
+	return b2, b3
+}
+
+func list2byte6bitAndHeader(b1 byte, b2 byte) byte {
+	return (b1 & 0x03 << 6) |
+		(b2 & 0x3F)
+}
+
+func byte2list6bit(b byte) byte {
+	return b & 0x3F
+}

+ 360 - 0
bitutil/packedlist_test.go

@@ -0,0 +1,360 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package bitutil
+
+import (
+	"fmt"
+	"math"
+	"testing"
+)
+
+func TestListPacking(t *testing.T) {
+	mylist := make([]uint64, 7)
+	mylist[0] = 3
+	mylist[1] = 7
+	mylist[2] = 63
+	mylist[3] = math.MaxUint8
+	mylist[4] = math.MaxUint16
+	mylist[5] = math.MaxUint32
+	mylist[6] = math.MaxUint64
+
+	res := UnpackList(PackList(mylist, 3))
+	if res[0] != 3 {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	res = UnpackList(PackList(mylist, 7))
+	if fmt.Sprint(res[:2]) != "[3 7]" {
+		t.Error("Unexpected result:", res[:2])
+		return
+	}
+
+	res = UnpackList(PackList(mylist, 63))
+	if fmt.Sprint(res[:3]) != "[3 7 63]" {
+		t.Error("Unexpected result:", res[:3])
+		return
+	}
+
+	res = UnpackList(PackList(mylist, math.MaxUint8))
+	if fmt.Sprint(res[:4]) != "[3 7 63 255]" {
+		t.Error("Unexpected result:", res[:4])
+		return
+	}
+
+	res = UnpackList(PackList(mylist, math.MaxUint16))
+	if fmt.Sprint(res[:5]) != "[3 7 63 255 65535]" {
+		t.Error("Unexpected result:", res[:5])
+		return
+	}
+
+	res = UnpackList(PackList(mylist, math.MaxUint32))
+	if fmt.Sprint(res[:6]) != "[3 7 63 255 65535 4294967295]" {
+		t.Error("Unexpected result:", res[:6])
+		return
+	}
+
+	res = UnpackList(PackList(mylist, math.MaxUint64))
+	if fmt.Sprint(res[:7]) != "[3 7 63 255 65535 4294967295 18446744073709551615]" {
+		t.Error("Unexpected result:", res[:7])
+		return
+	}
+
+	res = UnpackList(PackList([]uint64{10, 12, 80}, 80))
+	if fmt.Sprint(res) != "[10 12 80]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestListPacking8(t *testing.T) {
+	list1 := PackList3Bit([]byte{1, 2, 3, 4, 5, 6, 7})
+	list2 := PackList16Bit([]uint16{1, 2, 3, 4})
+
+	if len(list1) != 4 || len(list2) != 9 {
+		t.Error("Unexpected lengths:", len(list1), len(list2))
+		return
+	}
+
+	res1 := UnpackList(list1)
+	res2 := UnpackList(list2)
+
+	if fmt.Sprint(res1) != "[1 2 3 4 5 6 7]" {
+		t.Error("Unexpected result:", res1)
+		return
+	}
+	if fmt.Sprint(res2) != "[1 2 3 4]" {
+		t.Error("Unexpected result:", res2)
+		return
+	}
+
+	if UnpackList("") != nil {
+		t.Error("Unexpected result")
+		return
+	}
+}
+
+func TestVarBitListPacking8(t *testing.T) {
+	scale := 3
+
+	testlist := make([]uint8, scale)
+
+	for i := 0; i < scale; i++ {
+		testlist[i] = math.MaxUint8
+	}
+
+	res := PackList8Bit(testlist)
+
+	if len(res) != scale+1 {
+		t.Error("Unexpected length:", len(res))
+		return
+	}
+
+	res2 := UnpackBigList(res)
+
+	for i := 0; i < scale; i++ {
+		if testlist[i] != uint8(res2[i]) {
+			t.Error("Unexpected result at:", i)
+		}
+	}
+}
+
+func TestVarBitListPacking16(t *testing.T) {
+	scale := 3
+
+	testlist := make([]uint16, scale)
+
+	for i := 0; i < scale; i++ {
+		testlist[i] = math.MaxUint16
+	}
+
+	res := PackList16Bit(testlist)
+
+	if len(res) != scale*2+1 {
+		t.Error("Unexpected length:", len(res))
+		return
+	}
+
+	res2 := UnpackBigList(res)
+
+	for i := 0; i < scale; i++ {
+		if testlist[i] != uint16(res2[i]) {
+			t.Error("Unexpected result at:", i)
+		}
+	}
+}
+
+func TestVarBitListPacking32(t *testing.T) {
+	scale := 3
+
+	testlist := make([]uint32, scale)
+
+	for i := 0; i < scale; i++ {
+		testlist[i] = math.MaxUint32
+	}
+
+	res := PackList32Bit(testlist)
+
+	if len(res) != scale*4+1 {
+		t.Error("Unexpected length:", len(res))
+		return
+	}
+
+	res2 := UnpackBigList(res)
+
+	for i := 0; i < scale; i++ {
+		if testlist[i] != uint32(res2[i]) {
+			t.Error("Unexpected result at:", i)
+		}
+	}
+}
+
+func TestVarBitListPacking64(t *testing.T) {
+	scale := 3
+
+	testlist := make([]uint64, scale)
+
+	for i := 0; i < scale; i++ {
+		testlist[i] = math.MaxUint64
+	}
+
+	res := PackList64Bit(testlist)
+
+	if len(res) != scale*8+1 {
+		t.Error("Unexpected length:", len(res))
+		return
+	}
+
+	res2 := UnpackBigList(res)
+
+	for i := 0; i < scale; i++ {
+		if testlist[i] != uint64(res2[i]) {
+			t.Error("Unexpected result at:", i)
+		}
+	}
+}
+
+func TestSmallListPacking(t *testing.T) {
+
+	// Test simple cases
+
+	if PackList2Bit([]byte{}) != "" {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if PackList3Bit([]byte{}) != "" {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if PackList6Bit([]byte{}) != "" {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if string(UnpackSmallList("")) != "" {
+		t.Error("Unexpected result")
+		return
+	}
+
+	// Simulates a gob encoded string
+
+	if string(UnpackSmallList(string([]byte{0x00}))) != string(0x00) {
+		t.Error("Unexpected result")
+		return
+	}
+
+	// Test normal cases
+
+	checkListAndPresentation2bit(t, []byte{1, 2, 3, 1, 2, 3}, []byte{0x5b, 0x6c}, 2)
+	checkListAndPresentation2bit(t, []byte{1}, []byte{0x50}, 1)
+	checkListAndPresentation2bit(t, []byte{1, 2}, []byte{0x58}, 1)
+	checkListAndPresentation2bit(t, []byte{1, 2, 3}, []byte{0x5B}, 1)
+	checkListAndPresentation2bit(t, []byte{1, 2, 3, 3}, []byte{0x5B, 0xC0}, 2)
+	checkListAndPresentation2bit(t, []byte{1, 2, 3, 3, 2}, []byte{0x5B, 0xE0}, 2)
+	checkListAndPresentation2bit(t, []byte{1, 2, 3, 3, 2, 1, 3}, []byte{0x5B, 0xE7}, 2)
+
+	checkListAndPresentation3bit(t, []byte{1, 2, 3, 1, 2, 3}, []byte{0x8A, 0x19, 0x13}, 3)
+	checkListAndPresentation3bit(t, []byte{1}, []byte{0x88}, 1)
+	checkListAndPresentation3bit(t, []byte{1, 2}, []byte{0x8A}, 1)
+	checkListAndPresentation3bit(t, []byte{1, 2, 3}, []byte{0x8A, 0x18}, 2)
+	checkListAndPresentation3bit(t, []byte{1, 2, 3, 3}, []byte{0x8A, 0x1B}, 2)
+	checkListAndPresentation3bit(t, []byte{1, 2, 3, 4, 5, 6, 7}, []byte{0x8A, 0x1C, 0x2E, 0x38}, 4)
+
+	checkListAndPresentation6bit(t, []byte{1, 2, 3, 1, 2, 3})
+	checkListAndPresentation6bit(t, []byte{1})
+	checkListAndPresentation6bit(t, []byte{1, 2})
+	checkListAndPresentation6bit(t, []byte{1, 2, 3})
+	checkListAndPresentation6bit(t, []byte{1, 2, 3, 3})
+	checkListAndPresentation6bit(t, []byte{1, 2, 3, 4, 35, 45, 63})
+}
+
+func checkListAndPresentation2bit(t *testing.T, list []byte, packedlist []byte, packedLen int) {
+	res := PackList2Bit(list)
+	if res != string(packedlist) {
+		t.Errorf("Unexpected result: %X", []byte(res))
+		return
+	}
+	if len(res) != packedLen {
+		t.Error("Unexpected size", len(res))
+		return
+	}
+	if dres := UnpackSmallList(res); string(dres) != string(list) {
+		t.Errorf("Unexpected result: %X", []byte(dres))
+		return
+	}
+}
+
+func checkListAndPresentation3bit(t *testing.T, list []byte, packedlist []byte, packedLen int) {
+	res := PackList3Bit(list)
+	if res != string(packedlist) {
+		t.Errorf("Unexpected result: %X", []byte(res))
+		return
+	}
+	if len(res) != packedLen {
+		t.Error("Unexpected size", len(res))
+		return
+	}
+	if dres := UnpackSmallList(res); string(dres) != string(list) {
+		t.Errorf("Unexpected result: %X", []byte(dres))
+		return
+	}
+}
+
+func checkListAndPresentation6bit(t *testing.T, list []byte) {
+	res := PackList6Bit(list)
+
+	packedlist := make([]byte, len(list))
+	copy(packedlist, list)
+	packedlist[0] = packedlist[0] | 0xC0
+
+	if res != string(packedlist) {
+		t.Errorf("Unexpected result: %X vs %X", []byte(res), packedlist)
+		return
+	}
+	if len(res) != len(list) {
+		t.Error("Unexpected size", len(res))
+		return
+	}
+	if dres := UnpackSmallList(res); string(dres) != string(list) {
+		t.Errorf("Unexpected result: %X", []byte(dres))
+		return
+	}
+}
+
+func TestList2byte2bit(t *testing.T) {
+	if res := list2byte2bit(0x01, 0x2, 0x03, 0x01); res != 0x6D {
+		t.Errorf("Unexpected result: %X", res)
+		return
+	}
+	if res := list2byte3bitAndHeader(0x00, 0x07, 0x03); res != 0x3B {
+		t.Errorf("Unexpected result: %X", res)
+		return
+	}
+}
+
+func TestByte2list2bit(t *testing.T) {
+	if a, b, c, d := byte2list2bit(0x30); a != 00 || b != 03 || c != 00 || d != 00 {
+		t.Error("Unexpected result:", a, b, c, d)
+		return
+	}
+	if a, b, c, d := byte2list2bit(0x80); a != 02 || b != 00 || c != 00 || d != 00 {
+		t.Error("Unexpected result:", a, b, c, d)
+		return
+	}
+	if a, b, c, d := byte2list2bit(0x01); a != 00 || b != 00 || c != 00 || d != 01 {
+		t.Error("Unexpected result:", a, b, c, d)
+		return
+	}
+	if a, b, c, d := byte2list2bit(0x31); a != 00 || b != 03 || c != 00 || d != 01 {
+		t.Error("Unexpected result:", a, b, c, d)
+		return
+	}
+	if a, b, c, d := byte2list2bit(0x05); a != 00 || b != 00 || c != 01 || d != 01 {
+		t.Error("Unexpected result:", a, b, c, d)
+		return
+	}
+}
+
+func TestByte2list3bit(t *testing.T) {
+	if a, b := byte2list3bit(0x01); a != 00 || b != 01 {
+		t.Error("Unexpected result:", a, b)
+		return
+	}
+	if a, b := byte2list3bit(0x31); a != 06 || b != 01 {
+		t.Error("Unexpected result:", a, b)
+		return
+	}
+	if a, b := byte2list3bit(0x05); a != 00 || b != 05 {
+		t.Error("Unexpected result:", a, b)
+		return
+	}
+}

+ 189 - 0
cryptutil/gencert.go

@@ -0,0 +1,189 @@
+/*
+Package cryptutil contains cryptographic utility functions.
+
+Certificate generation code based on:
+go source src/crypto/tls/generate_cert.go
+
+Copyright 2009 The Go Authors. All rights reserved.
+Use of this source code is governed by a BSD-style license.
+*/
+package cryptutil
+
+import (
+	"crypto/ecdsa"
+	"crypto/elliptic"
+	"crypto/rand"
+	"crypto/rsa"
+	"crypto/x509"
+	"crypto/x509/pkix"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"math/big"
+	"net"
+	"os"
+	"strings"
+	"time"
+)
+
+/*
+GenCert generates certificate files in a given path.
+
+path       - Path to generate the certificate in.
+certFile   - Certificate file to generate.
+keyFile    - Key file to generate.
+host       - Comma-separated hostnames and IPs to generate a certificate for.
+validFrom  - Creation date formatted as Jan 1 15:04:05 2011. Default is empty string which means now.
+validFor   - Duration that certificate is valid for. Default is 365*24*time.Hour.
+isCA       - Flag whether this cert should be its own Certificate Authority.
+rsaBits    - Size of RSA key to generate. Ignored if ecdsa-curve is set. Default is 2048.
+ecdsaCurve - ECDSA curve to use to generate a key. Valid values are P224, P256, P384, P521 or empty string (not set).
+*/
+func GenCert(path string, certFile string, keyFile string, host string,
+	validFrom string, validFor time.Duration, isCA bool, rsaBits int, ecdsaCurve string) error {
+
+	var err error
+
+	// Check parameters
+
+	if path != "" && !strings.HasSuffix(path, "/") {
+		path += "/"
+	}
+
+	if host == "" {
+		return errors.New("Host required for certificate generation")
+	}
+
+	var notBefore time.Time
+
+	if validFrom == "" {
+		notBefore = time.Now()
+	} else {
+		notBefore, err = time.Parse("Jan 2 15:04:05 2006", validFrom)
+		if err != nil {
+			return fmt.Errorf("Failed to parse creation date: %s", err)
+		}
+	}
+
+	notAfter := notBefore.Add(validFor)
+
+	// Generate private key
+
+	var priv interface{}
+
+	switch ecdsaCurve {
+	case "":
+		priv, err = rsa.GenerateKey(rand.Reader, rsaBits)
+	case "P224":
+		priv, err = ecdsa.GenerateKey(elliptic.P224(), rand.Reader)
+	case "P256":
+		priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
+	case "P384":
+		priv, err = ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
+	case "P521":
+		priv, err = ecdsa.GenerateKey(elliptic.P521(), rand.Reader)
+	default:
+		err = fmt.Errorf("Unrecognized elliptic curve: %q", ecdsaCurve)
+	}
+
+	if err != nil {
+		return fmt.Errorf("Failed to generate private key: %s", err)
+	}
+
+	// Generate serial random number
+
+	serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
+	serialNumber, _ := rand.Int(rand.Reader, serialNumberLimit)
+
+	// Create and populate the certificate template
+
+	template := x509.Certificate{
+
+		SerialNumber: serialNumber,
+		Subject: pkix.Name{
+			Organization: []string{"Local"},
+		},
+		NotBefore: notBefore,
+		NotAfter:  notAfter,
+
+		KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
+		ExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
+		BasicConstraintsValid: true,
+	}
+
+	// Add hosts
+
+	hosts := strings.Split(host, ",")
+	for _, h := range hosts {
+		if ip := net.ParseIP(h); ip != nil {
+			template.IPAddresses = append(template.IPAddresses, ip)
+		} else {
+			template.DNSNames = append(template.DNSNames, h)
+		}
+	}
+
+	// Set the CA flag
+
+	if isCA {
+		template.IsCA = isCA
+		template.KeyUsage |= x509.KeyUsageCertSign
+	}
+
+	// Create the certificate and write it out
+
+	derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, publicKey(priv), priv)
+
+	if err == nil {
+
+		certOut, err := os.Create(path + certFile)
+		defer certOut.Close()
+
+		if err != nil {
+			return fmt.Errorf("Failed to open %s for writing: %s", certFile, err)
+		}
+
+		pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
+
+		// Write out private key
+
+		keyOut, err := os.OpenFile(path+keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
+		defer keyOut.Close()
+
+		if err != nil {
+			return fmt.Errorf("Failed to open %v for writing: %v", keyFile, err)
+		}
+
+		pem.Encode(keyOut, pemBlockForKey(priv))
+	}
+
+	return err
+}
+
+/*
+Return public key from a given key pair.
+*/
+func publicKey(priv interface{}) interface{} {
+	switch k := priv.(type) {
+	case *rsa.PrivateKey:
+		return &k.PublicKey
+	case *ecdsa.PrivateKey:
+		return &k.PublicKey
+	default:
+		return nil
+	}
+}
+
+/*
+Return private key pem block for a given key pair.
+*/
+func pemBlockForKey(priv interface{}) *pem.Block {
+	switch k := priv.(type) {
+	case *rsa.PrivateKey:
+		return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)}
+	case *ecdsa.PrivateKey:
+		b, _ := x509.MarshalECPrivateKey(k)
+		return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b}
+	default:
+		return nil
+	}
+}

+ 149 - 0
cryptutil/gencert_test.go

@@ -0,0 +1,149 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package cryptutil
+
+import (
+	"errors"
+	"flag"
+	"fmt"
+	"os"
+	"strings"
+	"testing"
+	"time"
+
+	"devt.de/krotik/common/fileutil"
+)
+
+const certDir = "certs"
+
+const invalidFileName = "**" + string(0x0)
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	// Setup
+	if res, _ := fileutil.PathExists(certDir); res {
+		os.RemoveAll(certDir)
+	}
+
+	err := os.Mkdir(certDir, 0770)
+	if err != nil {
+		fmt.Print("Could not create test directory:", err.Error())
+		os.Exit(1)
+	}
+
+	// Run the tests
+	res := m.Run()
+
+	// Teardown
+	err = os.RemoveAll(certDir)
+	if err != nil {
+		fmt.Print("Could not remove test directory:", err.Error())
+	}
+
+	os.Exit(res)
+}
+
+func TestGenCert(t *testing.T) {
+
+	checkGeneration := func(ecdsaCurve string) error {
+
+		// Generate a certificate and private key
+
+		err := GenCert(certDir, "cert.pem", "key.pem", "localhost,127.0.0.1", "", 365*24*time.Hour, true, 2048, ecdsaCurve)
+		if err != nil {
+			return err
+		}
+
+		// Check that the files were generated
+
+		if ok, _ := fileutil.PathExists(certDir + "/key.pem"); !ok {
+			return errors.New("Private key was not generated")
+		}
+
+		if ok, _ := fileutil.PathExists(certDir + "/cert.pem"); !ok {
+			return errors.New("Certificate was not generated")
+		}
+
+		_, err = ReadX509CertsFromFile(certDir + "/cert.pem")
+		if err != nil {
+			return err
+		}
+
+		return nil
+	}
+
+	if err := checkGeneration(""); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := checkGeneration("P224"); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := checkGeneration("P256"); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := checkGeneration("P384"); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := checkGeneration("P521"); err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Test error cases
+
+	err := GenCert(certDir, "cert.pem", "key.pem", "", "", 365*24*time.Hour, true, 2048, "")
+	if err.Error() != "Host required for certificate generation" {
+		t.Error(err)
+		return
+	}
+
+	err = GenCert(certDir, "cert.pem", "key.pem", "localhost", "", 365*24*time.Hour, true, 2048, "xxx")
+	if err.Error() != `Failed to generate private key: Unrecognized elliptic curve: "xxx"` {
+		t.Error(err)
+		return
+	}
+
+	err = GenCert(certDir, "cert.pem", "key.pem", "localhost", "xxx", 365*24*time.Hour, true, 2048, "")
+	if err.Error() != `Failed to parse creation date: parsing time "xxx" as "Jan 2 15:04:05 2006": cannot parse "xxx" as "Jan"` {
+		t.Error(err)
+		return
+	}
+
+	err = GenCert(certDir, "cert.pem", invalidFileName, "localhost", "", 365*24*time.Hour, true, 2048, "")
+	if !strings.HasPrefix(err.Error(), "Failed to open") {
+		t.Error(err)
+		return
+	}
+
+	err = GenCert(certDir, invalidFileName, "key.pem", "localhost", "", 365*24*time.Hour, true, 2048, "")
+	if !strings.HasPrefix(err.Error(), "Failed to open") {
+		t.Error(err)
+		return
+	}
+
+	if publicKey(nil) != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if pemBlockForKey(nil) != nil {
+		t.Error("Unexpected result")
+		return
+	}
+}

+ 109 - 0
cryptutil/stringcrypt.go

@@ -0,0 +1,109 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package cryptutil
+
+import (
+	"crypto/aes"
+	"crypto/cipher"
+	"crypto/rand"
+	"crypto/sha256"
+	"encoding/base64"
+	"fmt"
+	"io"
+)
+
+/*
+EncryptString encrypts a given string using AES (cfb mode).
+*/
+func EncryptString(passphrase, text string) (string, error) {
+	var ret []byte
+
+	// Create a new cipher with the given key
+
+	key := sha256.Sum256([]byte(passphrase))
+
+	block, err := aes.NewCipher((&key)[:])
+
+	if err == nil {
+
+		// Base64 encode the string
+
+		b := base64.StdEncoding.EncodeToString([]byte(text))
+
+		ciphertext := make([]byte, aes.BlockSize+len(b))
+
+		// Create the initialization vector using random numbers
+
+		iv := ciphertext[:aes.BlockSize]
+
+		if _, err = io.ReadFull(rand.Reader, iv); err == nil {
+
+			// Do the encryption
+
+			cfb := cipher.NewCFBEncrypter(block, iv)
+
+			cfb.XORKeyStream(ciphertext[aes.BlockSize:], []byte(b))
+
+			ret = ciphertext
+		}
+	}
+
+	return string(ret), err
+}
+
+/*
+DecryptString decrypts a given string using AES (cfb mode).
+*/
+func DecryptString(passphrase, text string) (string, error) {
+	var ret []byte
+
+	// Check encrypted text
+
+	if len(text) < aes.BlockSize {
+		return "", fmt.Errorf("Ciphertext is too short - must be at least: %v", aes.BlockSize)
+	}
+
+	// Create a new cipher with the given key
+
+	key := sha256.Sum256([]byte(passphrase))
+
+	block, err := aes.NewCipher((&key)[:])
+
+	if err == nil {
+
+		// Separate initialization vector and actual encrypted text
+
+		iv := text[:aes.BlockSize]
+
+		text = text[aes.BlockSize:]
+
+		// Do the decryption
+
+		cfb := cipher.NewCFBDecrypter(block, []byte(iv))
+
+		ret = []byte(text) // Reuse text buffer
+
+		cfb.XORKeyStream(ret, []byte(text))
+
+		// Decode text from base64
+
+		ret, err = base64.StdEncoding.DecodeString(string(ret))
+
+		if err != nil {
+
+			// Return a proper error if something went wrong
+
+			ret = nil
+			err = fmt.Errorf("Could not decrypt data")
+		}
+	}
+
+	return string(ret), err
+}

+ 53 - 0
cryptutil/stringcrypt_test.go

@@ -0,0 +1,53 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package cryptutil
+
+import (
+	"testing"
+)
+
+func TestStringEncryption(t *testing.T) {
+
+	secret := "This is a test"
+
+	encString, err := EncryptString("foo", secret)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	decString, err := DecryptString("foo", encString)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	if decString != secret {
+		t.Error("Unexpected result:", decString, secret)
+		return
+	}
+
+	decString, err = DecryptString("foo1", encString)
+	if err.Error() != "Could not decrypt data" {
+		t.Error(err)
+		return
+	}
+
+	if decString != "" {
+		t.Error("Unexpected result:", decString)
+		return
+	}
+
+	decString, err = DecryptString("foo1", "bar")
+	if err.Error() != "Ciphertext is too short - must be at least: 16" {
+		t.Error(err)
+		return
+	}
+}

+ 37 - 0
cryptutil/uuid.go

@@ -0,0 +1,37 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package cryptutil
+
+import (
+	"crypto/rand"
+
+	"devt.de/krotik/common/errorutil"
+)
+
+/*
+GenerateUUID generates a version 4 (randomly generated) UUID according to RFC4122.
+*/
+func GenerateUUID() [16]byte {
+	var u [16]byte
+
+	_, err := rand.Read(u[:])
+	errorutil.AssertOk(err)
+
+	// Set version 4
+
+	u[6] = (u[6] & 0x0f) | 0x40
+
+	// Set variant bits - variant of RFC 4122
+
+	u[8] = (u[8] & 0xbf) | 0x80
+
+	return u
+
+}

+ 22 - 0
cryptutil/uuid_test.go

@@ -0,0 +1,22 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package cryptutil
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestUUID(t *testing.T) {
+	if fmt.Sprint(GenerateUUID()) == "" {
+		t.Error("Unexpected result")
+		return
+	}
+}

+ 109 - 0
cryptutil/x509util.go

@@ -0,0 +1,109 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package cryptutil
+
+import (
+	"bytes"
+	"crypto/md5"
+	"crypto/sha1"
+	"crypto/sha256"
+	"crypto/x509"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"os"
+)
+
+/*
+ReadX509CertsFromFile reads a list of pem encoded certificates from a given file.
+*/
+func ReadX509CertsFromFile(filename string) ([]*x509.Certificate, error) {
+	var err error
+	var certs []*x509.Certificate
+
+	file, err := os.OpenFile(filename, os.O_RDONLY, 0660)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	certsString, err := ioutil.ReadAll(file)
+	if err == nil {
+		certs, err = ReadX509Certs(certsString)
+	}
+
+	return certs, err
+}
+
+/*
+ReadX509Certs reads a list of pem encoded certificates from a byte array.
+*/
+func ReadX509Certs(certs []byte) ([]*x509.Certificate, error) {
+
+	var blocks []byte
+
+	for {
+		var block *pem.Block
+
+		block, certs = pem.Decode(certs)
+		if block == nil {
+			return nil, errors.New("PEM not parsed")
+		}
+
+		blocks = append(blocks, block.Bytes...)
+		if len(certs) == 0 {
+			break
+		}
+	}
+	c, err := x509.ParseCertificates(blocks)
+	if err != nil {
+		return nil, err
+	}
+
+	return c, nil
+}
+
+/*
+Sha1CertFingerprint computes a sha1 fingerprint for a certificate.
+*/
+func Sha1CertFingerprint(cert *x509.Certificate) string {
+	return formatFingerprint(fmt.Sprintf("%x", sha1.Sum(cert.Raw)))
+}
+
+/*
+Sha256CertFingerprint computes a sha256 fingerprint for a certificate.
+*/
+func Sha256CertFingerprint(cert *x509.Certificate) string {
+	return formatFingerprint(fmt.Sprintf("%x", sha256.Sum256(cert.Raw)))
+}
+
+/*
+Md5CertFingerprint computes a md5 fingerprint for a certificate.
+*/
+func Md5CertFingerprint(cert *x509.Certificate) string {
+	return formatFingerprint(fmt.Sprintf("%x", md5.Sum(cert.Raw)))
+}
+
+/*
+Format a given fingerprint string.
+*/
+func formatFingerprint(raw string) string {
+	var buf bytes.Buffer
+
+	for i, c := range raw {
+		buf.WriteByte(byte(c))
+		if (i+1)%2 == 0 && i != len(raw)-1 {
+			buf.WriteByte(byte(':'))
+		}
+	}
+
+	return buf.String()
+}

+ 94 - 0
cryptutil/x509util_test.go

@@ -0,0 +1,94 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package cryptutil
+
+import (
+	"strings"
+	"testing"
+)
+
+func TestCertificateDecoding(t *testing.T) {
+
+	_, err := ReadX509CertsFromFile(invalidFileName)
+	if err == nil {
+		t.Error("Attempting to load an invalid file should result in an error")
+		return
+	}
+
+	googleCert := `
+-----BEGIN CERTIFICATE-----
+MIIEgDCCA2igAwIBAgIIORWTXMrZJggwDQYJKoZIhvcNAQELBQAwSTELMAkGA1UE
+BhMCVVMxEzARBgNVBAoTCkdvb2dsZSBJbmMxJTAjBgNVBAMTHEdvb2dsZSBJbnRl
+cm5ldCBBdXRob3JpdHkgRzIwHhcNMTYwNzEzMTMxODU2WhcNMTYxMDA1MTMxNjAw
+WjBoMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwN
+TW91bnRhaW4gVmlldzETMBEGA1UECgwKR29vZ2xlIEluYzEXMBUGA1UEAwwOd3d3
+Lmdvb2dsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkNYMd
+9AGxMuv6wC7XBkzi6G7l+jqq+xoxs3zW+8jmGntRh/ggnTNLTQiwLPquusGbPo4n
+bVX2UQV7ATyWeg7WZQuVjgeeF7WG++xwtLUtW3noSCmePSasWx0mcJu2tiuMWqsm
+PbR08k14tz4jiqmRDQQfttffVS1wk0Oul6+x7hN8AyZ24gUWzb+L5ILA+8CtsZB/
+u9XFtf+yEr277J7vH7GyEJxYt3u2dxy/nrNlF8o2wUl+U1bvUnQVRPNiFXLK2uiQ
+4XkL7F3Uk19q09snjHcOixYHSYgyGYATCfV/d6hQ+RSKzd7TQp/YHtT1LgmUUefH
+Hu04LXVnuhKUYYZnAgMBAAGjggFLMIIBRzAdBgNVHSUEFjAUBggrBgEFBQcDAQYI
+KwYBBQUHAwIwGQYDVR0RBBIwEIIOd3d3Lmdvb2dsZS5jb20waAYIKwYBBQUHAQEE
+XDBaMCsGCCsGAQUFBzAChh9odHRwOi8vcGtpLmdvb2dsZS5jb20vR0lBRzIuY3J0
+MCsGCCsGAQUFBzABhh9odHRwOi8vY2xpZW50czEuZ29vZ2xlLmNvbS9vY3NwMB0G
+A1UdDgQWBBRU6a8Q+y3AwMTsYpTXqT+xJ6n9bzAMBgNVHRMBAf8EAjAAMB8GA1Ud
+IwQYMBaAFErdBhYbvPZotXb1gba7Yhq6WoEvMCEGA1UdIAQaMBgwDAYKKwYBBAHW
+eQIFATAIBgZngQwBAgIwMAYDVR0fBCkwJzAloCOgIYYfaHR0cDovL3BraS5nb29n
+bGUuY29tL0dJQUcyLmNybDANBgkqhkiG9w0BAQsFAAOCAQEAiw4H269LfRl/Vrm6
+BmTCS5ipvbE6qMbwdB++eA/NaHU29bbFzRIRIo7T6nHynAE6QTUS0fRoZ2bnoaxY
+Z98hSqnPlpDC3D2IImcrSywIejS0aFcT6UZT57QUm7iANDs3N7XHsXXLT0wrvXZS
+GPKxS2JtOS3J5lRoN4fbYLuAHEzBn7zAqtrd98EEaYGdDerMo8kAyIDHqV4OiukI
+YkefRqQpi1B8hPFuFw8KDGuAHdfHOoUmuRo4yxs5Br7FhoLLtdN+5UD3tbWYGZo4
+9dl+K2ZqYOiNIHSTg78YaLM2s82G0WcL3oSzZg/ne+HZdhTu2YNFbGnoBIrgPjiP
+TV6Wsg==
+-----END CERTIFICATE-----
+`
+
+	c, err := ReadX509Certs([]byte(googleCert))
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	if len(c) != 1 {
+		t.Error("Only one certificate should have been read")
+		return
+	}
+
+	if res := Sha256CertFingerprint(c[0]); res != "d0:88:88:3c:7b:b3:da:b4:9e:d8:bf:ec:43:aa:92:cb:29:58:e8:e2:e1:c3:89:8d:73:50:6a:b8:c8:f1:12:21" {
+		t.Error("Unexpected fingerprint:", res)
+		return
+	}
+
+	if res := Sha1CertFingerprint(c[0]); res != "ee:b6:d4:d8:88:e5:75:5f:ff:c0:19:27:b6:67:9c:77:e8:0d:2c:7f" {
+		t.Error("Unexpected fingerprint:", res)
+		return
+	}
+
+	if res := Md5CertFingerprint(c[0]); res != "5c:a6:bd:96:9c:96:79:a7:90:ee:89:a6:ee:1a:04:a8" {
+		t.Error("Unexpected fingerprint:", res)
+		return
+	}
+
+	// Test error cases
+
+	_, err = ReadX509Certs([]byte(googleCert[2:]))
+	if err.Error() != "PEM not parsed" {
+		t.Error("PEM parsing error expected:", err)
+		return
+	}
+
+	_, err = ReadX509Certs([]byte(googleCert[0:29] + "Mi" + googleCert[31:]))
+	if strings.HasPrefix("asn1: structure error", err.Error()) {
+		t.Error("asn1 parsing error expected:", err)
+		return
+	}
+}

+ 65 - 0
datautil/datacopy.go

@@ -0,0 +1,65 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package datautil contains general data handling objects and helper methods.
+*/
+package datautil
+
+import (
+	"bytes"
+	"encoding/gob"
+
+	"devt.de/krotik/common/pools"
+)
+
+/*
+bufferPool holds buffers which are used to copy objects.
+*/
+var bufferPool = pools.NewByteBufferPool()
+
+/*
+CopyObject copies contents of a given object reference to another given object reference.
+*/
+func CopyObject(src interface{}, dest interface{}) error {
+	bb := bufferPool.Get().(*bytes.Buffer)
+
+	err := gob.NewEncoder(bb).Encode(src)
+
+	if err != nil {
+		return err
+	}
+
+	err = gob.NewDecoder(bb).Decode(dest)
+
+	if err != nil {
+		return err
+	}
+
+	bb.Reset()
+	bufferPool.Put(bb)
+
+	return nil
+}
+
+/*
+MergeMaps merges all given maps into a new map. Contents are shallow copies
+and conflicts are resolved as last-one-wins.
+*/
+func MergeMaps(maps ...map[string]interface{}) map[string]interface{} {
+	ret := make(map[string]interface{})
+
+	for _, m := range maps {
+		for k, v := range m {
+			ret[k] = v
+		}
+	}
+
+	return ret
+}

+ 80 - 0
datautil/datacopy_test.go

@@ -0,0 +1,80 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"testing"
+
+	"devt.de/krotik/common/testutil"
+)
+
+func TestMergeMaps(t *testing.T) {
+	m := MergeMaps(map[string]interface{}{
+		"a": 1,
+		"b": 2,
+	}, map[string]interface{}{
+		"b": 3,
+		"c": 4,
+	})
+
+	if len(m) != 3 {
+		t.Error("Unexpected number of result entries:", len(m))
+		return
+	}
+
+	if m["a"] != 1 || m["b"] != 3 || m["c"] != 4 {
+		t.Error("Unexpected entries:", m)
+		return
+	}
+}
+
+func TestCopyObject(t *testing.T) {
+
+	var ret2 string
+
+	if err := CopyObject("test", &ret2); err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Test encoding errors
+
+	var ret3 testutil.GobTestObject
+
+	gobtest := &testutil.GobTestObject{Name: "test", EncErr: true, DecErr: false}
+
+	if err := CopyObject(gobtest, &ret3); err == nil || err.Error() != "Encode error" {
+		t.Error("Unexpected result:", err)
+		return
+	}
+
+	gobtest = &testutil.GobTestObject{Name: "test", EncErr: false, DecErr: false}
+	ret3 = testutil.GobTestObject{Name: "test", EncErr: false, DecErr: true}
+
+	if err := CopyObject(gobtest, &ret3); err == nil || err.Error() != "Decode error" {
+		t.Error("Unexpected result:", err)
+		return
+	}
+
+	ret3 = testutil.GobTestObject{Name: "test", EncErr: true, DecErr: false}
+
+	if err := CopyObject(&ret3, gobtest); err == nil || err.Error() != "Encode error" {
+		t.Error("Unexpected result:", err)
+		return
+	}
+
+	ret3 = testutil.GobTestObject{Name: "test", EncErr: false, DecErr: false}
+	gobtest = &testutil.GobTestObject{Name: "test", EncErr: false, DecErr: true}
+
+	if err := CopyObject(&ret3, gobtest); err == nil || err.Error() != "Decode error" {
+		t.Error("Unexpected result:", err)
+		return
+	}
+}

+ 246 - 0
datautil/mapcache.go

@@ -0,0 +1,246 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"sort"
+	"sync"
+	"time"
+)
+
+/*
+MapCache is a map based cache object storing string->interface{}. It is possible
+to specify a maximum size, which when reached causes the oldest entries to be
+removed. It is also possible to set an expiry time for values. Values which are
+old are purged on the next access to the object.
+*/
+type MapCache struct {
+	data    map[string]interface{} // Data for the cache
+	ts      map[string]int64       // Timestamps for values
+	size    uint64                 // Size of the cache
+	maxsize uint64                 // Max size of the cache
+	maxage  int64                  // Max age of the cache
+	mutex   *sync.RWMutex          // Mutex to protect atomic map operations
+}
+
+/*
+NewMapCache creates a new MapCache object. The calling function can specify
+the maximum size and the maximum age in seconds for entries. A value of 0
+means no size constraint and no age constraint.
+*/
+func NewMapCache(maxsize uint64, maxage int64) *MapCache {
+	return &MapCache{make(map[string]interface{}), make(map[string]int64),
+		0, maxsize, maxage, &sync.RWMutex{}}
+}
+
+/*
+Clear removes all entries.
+*/
+func (mc *MapCache) Clear() {
+
+	// Take writer lock
+
+	mc.mutex.Lock()
+	defer mc.mutex.Unlock()
+
+	mc.data = make(map[string]interface{})
+	mc.ts = make(map[string]int64)
+
+	mc.size = 0
+}
+
+/*
+Size returns the current size of the MapCache.
+*/
+func (mc *MapCache) Size() uint64 {
+	return mc.size
+}
+
+/*
+Put stores an item in the MapCache.
+*/
+func (mc *MapCache) Put(k string, v interface{}) {
+
+	// Do cache maintenance
+
+	oldest := mc.maintainCache()
+
+	// Take writer lock
+
+	mc.mutex.Lock()
+	defer mc.mutex.Unlock()
+
+	// Check if the entry is a new entry
+
+	if _, exists := mc.data[k]; !exists {
+
+		// If the list is full remove the oldest item otherwise increase the size
+
+		if mc.maxsize != 0 && mc.size == mc.maxsize {
+			delete(mc.data, oldest)
+			delete(mc.ts, oldest)
+		} else {
+			mc.size++
+		}
+	}
+
+	// Do the actual map operation
+
+	mc.data[k] = v
+	mc.ts[k] = time.Now().Unix()
+}
+
+/*
+Remove removes an item in the MapCache.
+*/
+func (mc *MapCache) Remove(k string) bool {
+
+	// Do cache maintenance
+
+	mc.maintainCache()
+
+	// Take writer lock
+
+	mc.mutex.Lock()
+	defer mc.mutex.Unlock()
+
+	// Check if the entry exists
+
+	_, exists := mc.data[k]
+
+	if exists {
+
+		// Do the actual map operation
+
+		delete(mc.data, k)
+		delete(mc.ts, k)
+
+		mc.size--
+	}
+
+	return exists
+}
+
+/*
+Get retrieves an item from the MapCache.
+*/
+func (mc *MapCache) Get(k string) (interface{}, bool) {
+
+	// Do cache maintenance
+
+	mc.maintainCache()
+
+	// Take reader lock
+
+	mc.mutex.RLock()
+	defer mc.mutex.RUnlock()
+
+	// Do the actual map operation
+
+	v, ok := mc.data[k]
+
+	return v, ok
+}
+
+/*
+GetAll retrieves all items from the MapCache.
+*/
+func (mc *MapCache) GetAll() map[string]interface{} {
+
+	// Do cache maintenance
+
+	mc.maintainCache()
+
+	// Take reader lock
+
+	mc.mutex.RLock()
+	defer mc.mutex.RUnlock()
+
+	// Create return map
+
+	cp := make(map[string]interface{})
+
+	for k, v := range mc.data {
+		cp[k] = v
+	}
+
+	return cp
+}
+
+/*
+String returns a string representation of this MapCache.
+*/
+func (mc *MapCache) String() string {
+
+	mc.mutex.RLock()
+	defer mc.mutex.RUnlock()
+
+	// Sort keys before printing the map
+
+	var keys []string
+	for k := range mc.data {
+		keys = append(keys, k)
+	}
+	sort.Sort(sort.StringSlice(keys))
+
+	buf := &bytes.Buffer{}
+	for _, k := range keys {
+		buf.WriteString(fmt.Sprint(k, ":", mc.data[k], "\n"))
+	}
+
+	return buf.String()
+}
+
+/*
+maintainCache removes expired items and returns the oldest entry.
+*/
+func (mc *MapCache) maintainCache() string {
+
+	mc.mutex.RLock()
+
+	oldestTS := int64(math.MaxInt64)
+	oldestK := ""
+
+	now := time.Now().Unix()
+
+	for k, v := range mc.ts {
+
+		// Check if the entry has expired
+
+		if mc.maxage != 0 && now-v > mc.maxage {
+
+			// Remove entry if it has expired
+
+			mc.mutex.RUnlock()
+			mc.mutex.Lock()
+
+			delete(mc.data, k)
+			delete(mc.ts, k)
+			mc.size--
+
+			mc.mutex.Unlock()
+			mc.mutex.RLock()
+		}
+
+		// Gather oldest entry
+
+		if v < oldestTS {
+			oldestTS = v
+			oldestK = k
+		}
+	}
+
+	mc.mutex.RUnlock()
+
+	return oldestK
+}

+ 148 - 0
datautil/mapcache_test.go

@@ -0,0 +1,148 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"testing"
+	"time"
+)
+
+func TestMapCache(t *testing.T) {
+
+	// Create a map cache which can hold a maximum of 3 items for no longer than
+	// 5 seconds
+
+	mc := NewMapCache(3, 5)
+
+	mc.Put("k1", "aaa")
+	mc.Put("k2", "bbb")
+	mc.Put("k3", "ccc")
+
+	if s := mc.Size(); s != 3 {
+		t.Error("Unexpected size:", s)
+		return
+	}
+
+	mc.Clear()
+
+	if s := mc.Size(); s != 0 {
+		t.Error("Unexpected size:", s)
+		return
+	}
+
+	mc.Put("k1", "aaa")
+	mc.Put("k2", "bbb")
+	mc.Put("k3", "ccc")
+
+	if s := mc.Size(); s != 3 {
+		t.Error("Unexpected size:", s)
+		return
+	}
+
+	// Test copy
+
+	cp := mc.GetAll()
+
+	if len(cp) != 3 {
+		t.Error("Unexpected copy result:", cp)
+		return
+	}
+
+	// Simulate different timings
+
+	mc.ts["k1"] = time.Now().Unix() - 6 // Expired
+	mc.ts["k2"] = time.Now().Unix() - 3 // Oldest entry
+
+	if mc.String() != `
+k1:aaa
+k2:bbb
+k3:ccc
+`[1:] {
+		t.Error("Unexpected cache content:", mc)
+		return
+	}
+
+	// Do a read operation on an expired entry
+
+	if e, ok := mc.Get("k1"); e != nil || ok {
+		t.Error("Expired entry should not be returned", ok, e)
+		return
+	}
+
+	if mc.String() != `
+k2:bbb
+k3:ccc
+`[1:] {
+		t.Error("Unexpected cache content:", mc)
+		return
+	}
+
+	// Do a read operation on a live entry
+
+	if e, ok := mc.Get("k2"); e != "bbb" || !ok {
+		t.Error("Live entry should be returned", ok, e)
+		return
+	}
+
+	if mc.String() != `
+k2:bbb
+k3:ccc
+`[1:] {
+		t.Error("Unexpected cache content:", mc)
+		return
+	}
+
+	// Add 1 entry and update another
+
+	mc.Put("k3", "updateccc")
+	mc.Put("k4", "ddd")
+
+	if mc.String() != `
+k2:bbb
+k3:updateccc
+k4:ddd
+`[1:] {
+		t.Error("Unexpected cache content:", mc)
+		return
+	}
+
+	// Add another entry which should push out the oldest
+
+	mc.Put("k5", "eee")
+
+	if mc.String() != `
+k3:updateccc
+k4:ddd
+k5:eee
+`[1:] {
+		t.Error("Unexpected cache content:", mc)
+		return
+	}
+
+	// Remove items
+
+	if !mc.Remove("k3") {
+		t.Error("Live item should be deleted")
+		return
+	}
+
+	if mc.String() != `
+k4:ddd
+k5:eee
+`[1:] {
+		t.Error("Unexpected cache content:", mc)
+		return
+	}
+
+	if mc.Remove("k0") {
+		t.Error("Removal of non-existing item should not return success")
+		return
+	}
+}

+ 52 - 0
datautil/nesting.go

@@ -0,0 +1,52 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import "fmt"
+
+/*
+GetNestedValue gets a value from a nested object structure.
+*/
+func GetNestedValue(d map[string]interface{}, path []string) (interface{}, error) {
+	var ret interface{}
+	var err error
+
+	getNestedMap := func(d map[string]interface{}, key string) (map[string]interface{}, error) {
+		val := d[key]
+		newMap, ok := val.(map[string]interface{})
+
+		if !ok {
+			return nil, fmt.Errorf("Unexpected data type %T as value of %v", val, key)
+		}
+
+		return newMap, nil
+	}
+
+	// Drill into the object structure and return the requested value.
+
+	nestedMap := d
+	atomLevel := len(path) - 1
+
+	for i, elem := range path {
+
+		if i < atomLevel {
+
+			if nestedMap, err = getNestedMap(nestedMap, elem); err != nil {
+				break
+			}
+
+		} else {
+
+			ret = nestedMap[elem]
+		}
+	}
+
+	return ret, err
+}

+ 79 - 0
datautil/nesting_test.go

@@ -0,0 +1,79 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"bytes"
+	"encoding/gob"
+	"fmt"
+	"testing"
+)
+
+func TestNesting(t *testing.T) {
+
+	// Create a nested piece of data which is serialized and deserialized
+
+	var testData1 = map[string]interface{}{
+		"level1": map[string]interface{}{
+			"level2": map[string]interface{}{
+				"atom": 42,
+			},
+			"atom2": "test5",
+		},
+	}
+
+	var bb1 bytes.Buffer
+
+	// Only register the generic map[string]interface{}
+
+	gob.Register(map[string]interface{}{})
+
+	if err := gob.NewEncoder(&bb1).Encode(testData1); err != nil {
+		t.Error(err)
+		return
+	}
+
+	var testOut map[string]interface{}
+
+	if err := gob.NewDecoder(&bb1).Decode(&testOut); err != nil {
+		t.Error(err)
+		return
+	}
+
+	val, err := GetNestedValue(testOut, []string{"level1", "level2", "atom"})
+	if val != 42 || err != nil {
+		t.Error("Unexpected result:", val, err)
+		return
+	}
+
+	val, err = GetNestedValue(testOut, []string{"level1", "level2"})
+	if fmt.Sprint(val) != "map[atom:42]" || err != nil {
+		t.Error("Unexpected result:", val, err)
+		return
+	}
+
+	val, err = GetNestedValue(testOut, []string{"level1", "atom2"})
+	if val != "test5" || err != nil {
+		t.Error("Unexpected result:", val, err)
+		return
+	}
+
+	val, err = GetNestedValue(testOut, []string{"level1", "atom3"})
+	if val != nil || err != nil {
+		t.Error("Unexpected result:", val, err)
+		return
+	}
+
+	val, err = GetNestedValue(testOut, []string{"level1", "level2", "atom", "test"})
+	if val != nil || err.Error() != "Unexpected data type int as value of atom" {
+		t.Error("Unexpected result:", val, err)
+		return
+	}
+}

+ 102 - 0
datautil/nonce.go

@@ -0,0 +1,102 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"crypto/sha256"
+	"errors"
+	"fmt"
+
+	"devt.de/krotik/common/cryptutil"
+	"devt.de/krotik/common/timeutil"
+)
+
+/*
+MaxNonceLifetime is the maximum lifetime for nonces in seconds.
+*/
+var MaxNonceLifetime int64 = 3600 // One hour
+
+/*
+Default nonce related errors
+*/
+var (
+	ErrInvlaidNonce = errors.New("Invalid nonce value")
+)
+
+/*
+nonces is an internal map which holds all valid nonces
+*/
+var nonces *MapCache
+
+/*
+NewNonce generates a new nonce value. The nonce is invalidated either
+after it was consumed or automatically after MaxNonceLifetime seconds.
+*/
+func NewNonce() string {
+
+	if nonces == nil {
+
+		// Create nonce cache if it doesn't exist yet
+
+		nonces = NewMapCache(0, MaxNonceLifetime)
+	}
+
+	// Get a timestamp
+
+	ts := timeutil.MakeTimestamp()
+
+	// Calculate a hash based on a UUID
+
+	uuid := cryptutil.GenerateUUID()
+	secPart := sha256.Sum256(uuid[:])
+
+	// Construct the actual nonce and save it
+
+	ret := fmt.Sprintf("%x-%s", secPart, ts)
+
+	nonces.Put(ret, nil)
+
+	return ret
+}
+
+/*
+CheckNonce checks if a given nonce is valid. The nonce is still valid
+after this operation.
+*/
+func CheckNonce(nonce string) error {
+
+	// Check length
+
+	if len(nonce) == 78 && nonces != nil {
+
+		// Check if the nonce is still valid
+
+		if _, ok := nonces.Get(nonce); ok {
+			return nil
+		}
+	}
+
+	return ErrInvlaidNonce
+}
+
+/*
+ConsumeNonce consumes a given nonce. The nonce will no longer be valid
+after this operation.
+*/
+func ConsumeNonce(nonce string) error {
+
+	err := CheckNonce(nonce)
+
+	if err == nil {
+		nonces.Remove(nonce)
+	}
+
+	return nil
+}

+ 55 - 0
datautil/nonce_test.go

@@ -0,0 +1,55 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"testing"
+)
+
+func TestNonces(t *testing.T) {
+
+	n1 := NewNonce()
+	n2 := NewNonce()
+
+	// Test normal check
+
+	if err := CheckNonce(n1); err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Test consumption
+
+	if err := ConsumeNonce(n1); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := CheckNonce(n1); err != ErrInvlaidNonce {
+		t.Error("Nonce should no longer be valid")
+		return
+	}
+
+	// Simulate timeout
+
+	nonces = nil
+
+	if err := CheckNonce(n2); err != ErrInvlaidNonce {
+		t.Error("Nonce should no longer be valid")
+		return
+	}
+
+	// Test error case
+
+	if err := CheckNonce("test"); err != ErrInvlaidNonce {
+		t.Error("Nonce should no longer be valid")
+		return
+	}
+}

+ 112 - 0
datautil/persistentmap.go

@@ -0,0 +1,112 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"encoding/gob"
+	"os"
+)
+
+/*
+PersistentMap is a persistent map storing string values. This implementation returns
+more encoding / decoding errors since not all possible values are supported.
+*/
+type PersistentMap struct {
+	filename string                 // File of the persistent map
+	Data     map[string]interface{} // Data of the persistent map
+}
+
+/*
+NewPersistentMap creates a new persistent map.
+*/
+func NewPersistentMap(filename string) (*PersistentMap, error) {
+	pm := &PersistentMap{filename, make(map[string]interface{})}
+	return pm, pm.Flush()
+}
+
+/*
+LoadPersistentMap loads a persistent map from a file.
+*/
+func LoadPersistentMap(filename string) (*PersistentMap, error) {
+	file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0660)
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+
+	pm := &PersistentMap{filename, make(map[string]interface{})}
+
+	de := gob.NewDecoder(file)
+
+	return pm, de.Decode(&pm.Data)
+}
+
+/*
+Flush writes contents of the persistent map to the disk.
+*/
+func (pm *PersistentMap) Flush() error {
+	file, err := os.OpenFile(pm.filename, os.O_CREATE|os.O_RDWR, 0660)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+
+	en := gob.NewEncoder(file)
+
+	return en.Encode(pm.Data)
+}
+
+/*
+PersistentStringMap is a persistent map storing string values.
+*/
+type PersistentStringMap struct {
+	filename string            // File of the persistent map
+	Data     map[string]string // Data of the persistent map
+}
+
+/*
+NewPersistentStringMap creates a new persistent map.
+*/
+func NewPersistentStringMap(filename string) (*PersistentStringMap, error) {
+	pm := &PersistentStringMap{filename, make(map[string]string)}
+	return pm, pm.Flush()
+}
+
+/*
+LoadPersistentStringMap loads a persistent map from a file.
+*/
+func LoadPersistentStringMap(filename string) (*PersistentStringMap, error) {
+	file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0660)
+	if err != nil {
+		return nil, err
+	}
+
+	pm := &PersistentStringMap{filename, make(map[string]string)}
+
+	de := gob.NewDecoder(file)
+	de.Decode(&pm.Data)
+
+	return pm, file.Close()
+}
+
+/*
+Flush writes contents of the persistent map to the disk.
+*/
+func (pm *PersistentStringMap) Flush() error {
+	file, err := os.OpenFile(pm.filename, os.O_CREATE|os.O_RDWR, 0660)
+	if err != nil {
+		return err
+	}
+
+	en := gob.NewEncoder(file)
+	en.Encode(pm.Data)
+
+	return file.Close()
+}

+ 146 - 0
datautil/persistentmap_test.go

@@ -0,0 +1,146 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"testing"
+
+	"devt.de/krotik/common/fileutil"
+)
+
+const testdbdir = "test"
+
+const invalidFileName = "**" + string(0x0)
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	// Setup
+	if res, _ := fileutil.PathExists(testdbdir); res {
+		os.RemoveAll(testdbdir)
+	}
+
+	err := os.Mkdir(testdbdir, 0770)
+	if err != nil {
+		fmt.Print("Could not create test directory:", err.Error())
+		os.Exit(1)
+	}
+
+	// Run the tests
+	res := m.Run()
+
+	// Teardown
+	err = os.RemoveAll(testdbdir)
+	if err != nil {
+		fmt.Print("Could not remove test directory:", err.Error())
+	}
+
+	os.Exit(res)
+
+}
+
+func TestPersistentMap(t *testing.T) {
+
+	// Test main scenario
+
+	pm, err := NewPersistentMap(testdbdir + "/testmap.map")
+	if err != nil {
+		t.Error(nil)
+		return
+	}
+
+	pm.Data["test1"] = "test1data"
+	pm.Data["test2"] = "test2data"
+
+	pm.Flush()
+
+	pm2, err := LoadPersistentMap(testdbdir + "/testmap.map")
+
+	if len(pm2.Data) != 2 {
+		t.Error("Unexpected size of map")
+		return
+	}
+
+	if pm.Data["test1"] != "test1data" || pm.Data["test2"] != "test2data" {
+		t.Error("Unexpected data in map:", pm.Data)
+		return
+	}
+
+	// Test error cases
+
+	pm, err = NewPersistentMap(invalidFileName)
+	if err == nil {
+		t.Error("Unexpected result of new map")
+		return
+	}
+
+	pm, err = LoadPersistentMap(invalidFileName)
+	if err == nil {
+		t.Error("Unexpected result of new map")
+		return
+	}
+
+	pm = &PersistentMap{invalidFileName, make(map[string]interface{})}
+	if err := pm.Flush(); err == nil {
+		t.Error("Unexpected result of new map")
+		return
+	}
+}
+
+func TestPersistentStringMap(t *testing.T) {
+
+	// Test main scenario
+
+	pm, err := NewPersistentStringMap(testdbdir + "/teststringmap.map")
+	if err != nil {
+		t.Error(nil)
+		return
+	}
+
+	pm.Data["test1"] = "test1data"
+	pm.Data["test2"] = "test2data"
+
+	pm.Flush()
+
+	pm2, err := LoadPersistentStringMap(testdbdir + "/teststringmap.map")
+
+	if len(pm2.Data) != 2 {
+		t.Error("Unexpected size of map")
+		return
+	}
+
+	if pm.Data["test1"] != "test1data" || pm.Data["test2"] != "test2data" {
+		t.Error("Unexpected data in map:", pm.Data)
+		return
+	}
+
+	// Test error cases
+
+	pm, err = NewPersistentStringMap(invalidFileName)
+	if err == nil {
+		t.Error("Unexpected result of new map")
+		return
+	}
+
+	pm, err = LoadPersistentStringMap(invalidFileName)
+	if err == nil {
+		t.Error("Unexpected result of new map")
+		return
+	}
+
+	pm = &PersistentStringMap{invalidFileName, make(map[string]string)}
+	if err := pm.Flush(); err == nil {
+		t.Error("Unexpected result of new map")
+		return
+	}
+}

+ 181 - 0
datautil/ringbuffer.go

@@ -0,0 +1,181 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package datautil contains general data handling objects and helper methods.
+*/
+package datautil
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+)
+
+/*
+RingBuffer is a classic thread-safe ringbuffer implementation. It stores
+abstract interface{} objects. It has specific methods so it can be used as
+a print logger.
+*/
+type RingBuffer struct {
+	data     []interface{} // Elements of this ring buffer
+	size     int           // Size of the ring buffer
+	first    int           // First item of the ring buffer
+	last     int           // Last item of the ring buffer
+	modCount int           // Check for modifications during iterations
+	lock     *sync.RWMutex // Lock for RingBuffer
+}
+
+/*
+NewRingBuffer creates a new ringbuffer with a given size.
+*/
+func NewRingBuffer(size int) *RingBuffer {
+	return &RingBuffer{make([]interface{}, size), 0, 0, 0, 0, &sync.RWMutex{}}
+}
+
+/*
+Reset removes all content from the ringbuffer.
+*/
+func (rb *RingBuffer) Reset() {
+	rb.lock.Lock()
+	defer rb.lock.Unlock()
+
+	rb.data = make([]interface{}, cap(rb.data))
+	rb.size = 0
+	rb.first = 0
+	rb.last = 0
+	rb.modCount = 0
+}
+
+/*
+IsEmpty returns if this ringbuffer is empty.
+*/
+func (rb *RingBuffer) IsEmpty() bool {
+	rb.lock.RLock()
+	defer rb.lock.RUnlock()
+
+	return rb.size == 0
+}
+
+/*
+Size returns the size of the ringbuffer.
+*/
+func (rb *RingBuffer) Size() int {
+	rb.lock.RLock()
+	defer rb.lock.RUnlock()
+
+	return rb.size
+}
+
+/*
+Get returns an element of the ringbuffer from a given position.
+*/
+func (rb *RingBuffer) Get(p int) interface{} {
+	rb.lock.RLock()
+	defer rb.lock.RUnlock()
+
+	return rb.data[(rb.first+p)%len(rb.data)]
+}
+
+/*
+Add adds an item to the ringbuffer.
+*/
+func (rb *RingBuffer) Add(e interface{}) {
+	rb.lock.Lock()
+	defer rb.lock.Unlock()
+
+	ld := len(rb.data)
+
+	rb.data[rb.last] = e
+	rb.last = (rb.last + 1) % ld
+
+	if rb.size == ld {
+		rb.first = (rb.first + 1) % ld
+	} else {
+		rb.size++
+	}
+
+	rb.modCount++
+}
+
+/*
+Poll removes and returns the head of the ringbuffer.
+*/
+func (rb *RingBuffer) Poll() interface{} {
+	rb.lock.Lock()
+	defer rb.lock.Unlock()
+
+	if rb.size == 0 {
+		return nil
+	}
+
+	i := rb.data[rb.first]
+	rb.data[rb.first] = nil
+
+	rb.size--
+	rb.first = (rb.first + 1) % len(rb.data)
+	rb.modCount++
+
+	return i
+}
+
+/*
+Log writes the given arguments as strings into the ring buffer. Each line is a
+separate item.
+*/
+func (rb *RingBuffer) Log(v ...interface{}) {
+	lines := strings.Split(fmt.Sprint(v...), "\n")
+
+	for _, line := range lines {
+		rb.Add(line)
+	}
+}
+
+/*
+Slice returns the contents of the buffer as a slice.
+*/
+func (rb *RingBuffer) Slice() []interface{} {
+	rb.lock.RLock()
+	defer rb.lock.RUnlock()
+
+	ld := len(rb.data)
+	ret := make([]interface{}, rb.size)
+
+	for i := 0; i < rb.size; i++ {
+		ret[i] = rb.data[(i+rb.first)%ld]
+	}
+
+	return ret
+}
+
+/*
+StringSlice returns the contents of the buffer as a slice of strings.
+Each item of the buffer is a separate string.
+*/
+func (rb *RingBuffer) StringSlice() []string {
+	rb.lock.RLock()
+	defer rb.lock.RUnlock()
+
+	ld := len(rb.data)
+	ret := make([]string, rb.size)
+
+	for i := 0; i < rb.size; i++ {
+		ret[i] = fmt.Sprint(rb.data[(i+rb.first)%ld])
+	}
+
+	return ret
+}
+
+/*
+String retusn the contents of the buffer as a string. Each item of the buffer is
+treated as a separate line.
+*/
+func (rb *RingBuffer) String() string {
+	return strings.Join(rb.StringSlice(), "\n")
+}

+ 115 - 0
datautil/ringbuffer_test.go

@@ -0,0 +1,115 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestRingBuffer(t *testing.T) {
+
+	rb := NewRingBuffer(3)
+
+	if !rb.IsEmpty() {
+		t.Error("Initial buffer should be empty")
+		return
+	}
+
+	if rb.Poll() != nil {
+		t.Error("Initial buffer should be empty")
+		return
+	}
+
+	if rb.Size() != 0 {
+		t.Error("Unexpected size:", rb.Size())
+		return
+	}
+
+	rb.Add("AAA")
+
+	if rb.Size() != 1 {
+		t.Error("Unexpected size:", rb.Size())
+		return
+	}
+
+	rb.Add("BBB")
+	rb.Add("CCC")
+
+	if rb.Size() != 3 {
+		t.Error("Unexpected size:", rb.Size())
+		return
+	}
+
+	if rb.String() != `
+AAA
+BBB
+CCC`[1:] {
+		t.Error("Unexpected result:", rb.String())
+		return
+	}
+
+	rb.Log("DDD\nEEE")
+	if rb.Size() != 3 {
+		t.Error("Unexpected size:", rb.Size())
+		return
+	}
+
+	if rb.String() != `
+CCC
+DDD
+EEE`[1:] {
+		t.Error("Unexpected result:", rb.String())
+		return
+	}
+
+	if p := rb.Poll(); p != "CCC" {
+		t.Error("Unexpected result:", p)
+		return
+	}
+
+	if rb.Size() != 2 {
+		t.Error("Unexpected size:", rb.Size())
+		return
+	}
+
+	if p := rb.Get(rb.Size() - 1); p != "EEE" {
+		t.Error("Unexpected result:", p)
+		return
+	}
+
+	rb = NewRingBuffer(100)
+
+	rb.Add("AAA")
+
+	if s := rb.String(); s != "AAA" {
+		t.Error("Unexpected result:", s)
+		return
+	}
+
+	rb.Add("BBB")
+
+	if s := rb.String(); s != "AAA\nBBB" {
+		t.Error("Unexpected result:", s)
+		return
+	}
+
+	if s := rb.Slice(); fmt.Sprint(s) != "[AAA BBB]" {
+		t.Error("Unexpected result:", s)
+		return
+	}
+
+	rb.Reset()
+
+	if !rb.IsEmpty() {
+		t.Error("Buffer shoudl be empty after a reset")
+		return
+	}
+}

File diff suppressed because it is too large
+ 701 - 0
datautil/userdb.go


+ 484 - 0
datautil/userdb_test.go

@@ -0,0 +1,484 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package datautil
+
+import (
+	"fmt"
+	"path"
+	"testing"
+)
+
+func TestUserDB(t *testing.T) {
+
+	// Create user DB instance and store a credential
+
+	ud, err := NewUserDB(path.Join(testdbdir, "testuserdb"), "test123")
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	err = ud.AddUserEntry("fred", "s3cret", map[string]interface{}{
+		"field1": "foo",
+		"field2": 2,
+	})
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Create a second user DB instance
+
+	ud2, err := NewUserDB(path.Join(testdbdir, "testuserdb"), "test123")
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Check that the data was loaded
+
+	if res := fmt.Sprint(ud2.AllUsers()); res != "[fred]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Get the user data
+
+	data, ok := ud2.UserData("fred")
+
+	if !ok || data["field1"] != "foo" || data["field2"] != 2 {
+		t.Error("Unexpected result:", ok, data)
+		return
+	}
+
+	// Check user password
+
+	if ok := ud2.CheckUserPassword("fred", "s3cret"); !ok || err != nil {
+		t.Error("Unexpected result:", ok, err)
+		return
+	}
+
+	if ok := ud2.CheckUserPassword("fred", "s4cret"); ok || err != nil {
+		t.Error("Unexpected result:", ok, err)
+		return
+	}
+
+	// Change data and password
+
+	err = ud2.UpdateUserPassword("fred", "secret55")
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	err = ud2.UpdateUserData("fred", map[string]interface{}{
+		"field5": "bar",
+		"field2": 2,
+	})
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// ... and another instance
+
+	ud3, err := NewUserDB(path.Join(testdbdir, "testuserdb"), "test123")
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Check that all updated information are correct
+
+	data, ok = ud3.UserData("fred")
+
+	if !ok || data["field5"] != "bar" || data["field2"] != 2 {
+		t.Error("Unexpected result:", ok, data)
+		return
+	}
+
+	// Check user password
+
+	if ok := ud3.CheckUserPassword("fred", "s3cret"); ok || err != nil {
+		t.Error("Unexpected result:", ok, err)
+		return
+	}
+
+	if ok := ud3.CheckUserPassword("fred", "secret55"); !ok || err != nil {
+		t.Error("Unexpected result:", ok, err)
+		return
+	}
+
+	// Remove now the entry
+
+	ud3.RemoveUserEntry("fred")
+
+	ud4, err := NewUserDB(path.Join(testdbdir, "testuserdb"), "test123")
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Check that the data was removed
+
+	if res := fmt.Sprint(ud4.AllUsers()); res != "[]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestUserDBPasswordHistory(t *testing.T) {
+	oldMaxPassHistory := MaxPassHistory
+	MaxPassHistory = 3
+	defer func() {
+		MaxPassHistory = oldMaxPassHistory
+	}()
+
+	// Create user DB instance and store a credential
+
+	ud, err := NewUserDB(path.Join(testdbdir, "testuserdbhistory"), "test123")
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err = ud.AddUserEntry("fred", "s3cret1", nil); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if len(ud.Data["fred"].PasshashHistory) != 0 {
+		t.Error("Unexpected result:", ud.Data["fred"].PasshashHistory)
+		return
+	}
+
+	if err = ud.UpdateUserPassword("fred", "s3cret1"); err.Error() != "Cannot reuse current password" {
+		t.Error(err)
+		return
+	}
+
+	if err = ud.UpdateUserPassword("fred", "s3cret2"); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if len(ud.Data["fred"].PasshashHistory) != 1 {
+		t.Error("Unexpected result:", ud.Data["fred"].PasshashHistory)
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret1"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret2"); ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	ud.UpdateUserPassword("fred", "s3cret3")
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret1"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret2"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if len(ud.Data["fred"].PasshashHistory) != 2 {
+		t.Error("Unexpected result:", ud.Data["fred"].PasshashHistory)
+		return
+	}
+
+	ud.UpdateUserPassword("fred", "s3cret4")
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret1"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret2"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret3"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if len(ud.Data["fred"].PasshashHistory) != 3 {
+		t.Error("Unexpected result:", ud.Data["fred"].PasshashHistory)
+		return
+	}
+
+	ud.UpdateUserPassword("fred", "s3cret5")
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret2"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret3"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret4"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if len(ud.Data["fred"].PasshashHistory) != 3 {
+		t.Error("Unexpected result:", ud.Data["fred"].PasshashHistory)
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret1"); ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	ud.UpdateUserPassword("fred", "s3cret6")
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret3"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret4"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret5"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if len(ud.Data["fred"].PasshashHistory) != 3 {
+		t.Error("Unexpected result:", ud.Data["fred"].PasshashHistory)
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret2"); ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPasswordHistory("fred", "s3cret6"); ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if ok := ud.CheckUserPassword("fred", "s3cret6"); !ok || err != nil {
+		t.Error("Unexpected result")
+		return
+	}
+}
+
+func TestUserDBErrorCases(t *testing.T) {
+
+	ud, err := NewUserDB(path.Join(testdbdir, invalidFileName), "test123")
+
+	if err == nil || ud != nil {
+		t.Error("Unexpected result:", err, ud)
+		return
+	}
+
+	ud, err = NewUserDB(path.Join(testdbdir, "errtest"), "test123")
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	err = ud.AddUserEntry("foo", "bar", nil)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	err = ud.AddUserEntry("foo", "bar", nil)
+	if err == nil || err.Error() != "User foo already exists" {
+		t.Error(err)
+		return
+	}
+
+	err = ud.UpdateUserData("fred", nil)
+	if err == nil || err.Error() != "Unknown user fred" {
+		t.Error(err)
+		return
+	}
+
+	err = ud.UpdateUserPassword("fred", "")
+	if err == nil || err.Error() != "Unknown user fred" {
+		t.Error(err)
+		return
+	}
+
+	err = ud.RemoveUserEntry("fred")
+	if err == nil || err.Error() != "Unknown user fred" {
+		t.Error(err)
+		return
+	}
+
+}
+
+func TestEnforcedUserDB(t *testing.T) {
+
+	// Create user DB instance and store a credential
+
+	eud, err := NewEnforcedUserDB(path.Join(testdbdir, "testenforceuserdb"), "test123")
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	eud.SetPasswordCheckParam("NotContainSequence", false)
+
+	if err := eud.AddUserEntry("fritz", "#Secr3taaa", nil); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if eud.UserExists("foo") {
+		t.Error("User foo should not exist")
+		return
+	}
+
+	if !eud.UserExists("fritz") {
+		t.Error("User fritz should exist")
+		return
+	}
+
+	eud.SetPasswordCheckParam("NotContainSequence", true)
+
+	if res := len(eud.PasswordCheckParams()); res != 8 {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if err := eud.UpdateUserPassword("fritz", "#Secr3tbbb"); err.Error() != "Password must not contain a same character sequence" {
+		t.Error(err)
+		return
+	}
+
+	if err := eud.UpdateUserPassword("fritz", "#Secr3tabc"); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := eud.UpdateUserPassword("fritz", "#Secr3taaa"); err.Error() != "Password was used before within the last 10 changes; Password must not contain a same character sequence" {
+		t.Error(err)
+		return
+	}
+
+	if err := eud.AddUserEntry("hans", "aaa", nil); err.Error() != "Password matches a common dictionary password; Password must be at least 8 characters long; Password must contain an upper case character; Password must contain a number; Password must contain a special character; Password must not contain a same character sequence" {
+		t.Error(err)
+		return
+	}
+
+	// Test multiple errors
+
+	if err := eud.UpdateUserPassword("fritz", "aaa"); err == nil || err.Error() != "Password matches a common dictionary password; Password must be at least 8 characters long; Password must contain an upper case character; Password must contain a number; Password must contain a special character; Password must not contain a same character sequence" {
+		t.Error(err)
+		return
+	}
+	if err := eud.IsAcceptablePassword("fritz", "#Secr3tabc"); err == nil || err.Error() != "Cannot reuse current password" {
+		t.Error(err)
+		return
+	}
+	if err := eud.IsAcceptablePassword("fritz", "AA1"); err == nil || err.Error() != "Password is too similar to the common dictionary password aa1234 (50% match); Password must be at least 8 characters long; Password must contain a lower case character; Password must contain a special character" {
+		t.Error(err)
+		return
+	}
+	if err := eud.IsAcceptablePassword("fritz", "xxx"); err == nil || err.Error() != "Password must be at least 8 characters long; Password must contain an upper case character; Password must contain a number; Password must contain a special character; Password must not contain a same character sequence" {
+		t.Error(err)
+		return
+	}
+
+	if err := eud.IsAcceptablePassword("fritz", "AA2"); err == nil || err.Error() != "Password is too similar to the common dictionary password aaa (66% match); Password must be at least 8 characters long; Password must contain a lower case character; Password must contain a special character" {
+		t.Error(err)
+		return
+	}
+
+	if err := eud.IsAcceptablePassword("fritz", "Test1234#"); err == nil || err.Error() != "Password is too similar to the common dictionary password test12345 (88% match)" {
+		t.Error(err)
+		return
+	}
+
+	if err := eud.IsAcceptablePassword("fritz", "#Test1234"); err == nil || err.Error() != "Password is too similar to the common dictionary password test1234 (88% match)" {
+		t.Error(err)
+		return
+	}
+
+	// Test EvalPasswordStrength
+
+	if score, warn, err := eud.EvalPasswordStrength("fritz", "aaa"); fmt.Sprintf("%v#%v#%v", score, warn, err) != "0#[]#Password matches a common dictionary password; Password must be at least 8 characters long; Password must contain an upper case character; Password must contain a number; Password must contain a special character; Password must not contain a same character sequence" {
+		t.Error("Unexpected result:", fmt.Sprintf("%v#%v#%v", score, warn, err))
+		return
+	}
+
+	if score, warn, err := eud.EvalPasswordStrength("fritz", "#Secr3ttest"); fmt.Sprintf("%v#%v#%v", score, warn, err) != "1#[Password should be at least 12 characters long Password should contain at least 2 upper case characters Password should contain at least 2 numbers Password should contain at least 2 special characters Password is vaguely similar to the common dictionary password secre (45% match)]#<nil>" {
+		t.Error("Unexpected result:", fmt.Sprintf("%v#%v#%v", score, warn, err))
+		return
+	}
+
+	if score, warn, err := eud.EvalPasswordStrength("fritz", "#SECR3TTEsT"); fmt.Sprintf("%v#%v#%v", score, warn, err) != "1#[Password should be at least 12 characters long Password should contain at least 2 lower case characters Password should contain at least 2 numbers Password should contain at least 2 special characters Password is vaguely similar to the common dictionary password secre (45% match)]#<nil>" {
+		t.Error("Unexpected result:", fmt.Sprintf("%v#%v#%v", score, warn, err))
+		return
+	}
+
+	if score, warn, err := eud.EvalPasswordStrength("fritz", "#ArchBoo0815!"); fmt.Sprintf("%v#%v#%v", score, warn, err) != "10#[]#<nil>" {
+		t.Error("Unexpected result:", fmt.Sprintf("%v#%v#%v", score, warn, err))
+		return
+	}
+}
+
+func TestDictPasswordDetection(t *testing.T) {
+
+	// No match
+
+	match, word, dist := CheckForDictPassword("ZYxzzyxzzy55xz#")
+
+	if res := fmt.Sprintf("%v#%v#%v", match, word, dist); res != "false##-1" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Direct match
+
+	match, word, dist = CheckForDictPassword("fireball")
+
+	if res := fmt.Sprintf("%v#%v#%v", match, word, dist); res != "true#fireball#0" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Partial match
+
+	match, word, dist = CheckForDictPassword("testfire")
+
+	if res := fmt.Sprintf("%v#%v#%v", match, word, dist); res != "false#testibil#4" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	match, word, dist = CheckForDictPassword("tuberbla")
+
+	if res := fmt.Sprintf("%v#%v#%v", match, word, dist); res != "false#erbol#5" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}

+ 30 - 0
defs/rambazamba/eventsource.go

@@ -0,0 +1,30 @@
+/*
+ * Rambazamba
+ *
+ * Copyright 2016 Matthias Ladkau. All rights reserved.
+ *
+ * This Source Code Form is subject to the terms of the MIT
+ * License, If a copy of the MIT License was not distributed with this
+ * file, You can obtain one at https://opensource.org/licenses/MIT.
+ */
+
+package rambazamba
+
+/*
+EventPublisher is the API for external event sources to publish events
+to Rambazamba engines. The event source should use a given EventPublisher
+object to inject events. Use api.RegisterEventSource to create a new
+EventPublisher object.
+*/
+type EventPublisher interface {
+
+	/*
+		AddEvent adds a new event to one or more Rambazamba engines.
+		Expects 3 parameters: Name - a name which identifies the event,
+		Kind - an event kind which is checked against the kind match of
+		sinks and State - an event state which contains additional data.
+		All of the given parameter will be accessible from Rumble if
+		the event triggers a Rumble sink.
+	*/
+	AddEvent(name string, kind []string, state map[interface{}]interface{}) error
+}

+ 40 - 0
defs/rumble/func.go

@@ -0,0 +1,40 @@
+/*
+ * Rambazamba
+ *
+ * Copyright 2016 Matthias Ladkau. All rights reserved.
+ *
+ * This Source Code Form is subject to the terms of the MIT
+ * License, If a copy of the MIT License was not distributed with this
+ * file, You can obtain one at https://opensource.org/licenses/MIT.
+ */
+
+/*
+Package rumble contains all definitions which external code should use to
+integrate with Rambazamba.
+*/
+package rumble
+
+/*
+Function is a function in Rumble.
+*/
+type Function interface {
+
+	/*
+		Name returns the name of the function. A function should be camelCase
+		and should only contain alphanumerical characters.
+	*/
+	Name() string
+
+	/*
+		Validate is called to validate the number of arguments, check the
+		environment and to execute any initialisation code which might be
+		necessary for the function.
+	*/
+	Validate(argsNum int, runtime Runtime) RuntimeError
+
+	/*
+		Execute executes the rumble function. This function might be called
+		by several threads concurrently.
+	*/
+	Execute(argsVal []interface{}, vars Variables, runtime Runtime) (interface{}, RuntimeError)
+}

+ 45 - 0
defs/rumble/globals.go

@@ -0,0 +1,45 @@
+/*
+ * Rambazamba
+ *
+ * Copyright 2016 Matthias Ladkau. All rights reserved.
+ *
+ * This Source Code Form is subject to the terms of the MIT
+ * License, If a copy of the MIT License was not distributed with this
+ * file, You can obtain one at https://opensource.org/licenses/MIT.
+ */
+
+package rumble
+
+import (
+	"errors"
+)
+
+/*
+Default variables for sinks
+*/
+const (
+	VarProcessor = "processor" // Current event processor (new sinks will be added to this)
+	VarMonitor   = "monitor"   // Current event monitor (new events will be using this)
+	VarEvent     = "event"     // Current event which triggered a sink
+)
+
+/*
+Runtime related error types - these errors are generic errors of Rumble
+where the code will not check for object equality
+*/
+var (
+	ErrInvalidConstruct = errors.New("Invalid construct")
+	ErrInvalidState     = errors.New("Invalid state")
+	ErrVarAccess        = errors.New("Cannot access variable")
+	ErrNotANumber       = errors.New("Operand is not a number")
+	ErrNotABoolean      = errors.New("Operand is not a boolean")
+	ErrNotAList         = errors.New("Operand is not a list")
+	ErrNotAMap          = errors.New("Operand is not a map")
+	ErrNotAListOrMap    = errors.New("Operand is not a list nor a map")
+)
+
+/*
+RuntimeError is a special error which contains additional internal
+information which are not exposed (e.g. code line).
+*/
+type RuntimeError error

+ 22 - 0
defs/rumble/runtime.go

@@ -0,0 +1,22 @@
+/*
+ * Rambazamba
+ *
+ * Copyright 2016 Matthias Ladkau. All rights reserved.
+ *
+ * This Source Code Form is subject to the terms of the MIT
+ * License, If a copy of the MIT License was not distributed with this
+ * file, You can obtain one at https://opensource.org/licenses/MIT.
+ */
+
+package rumble
+
+/*
+Runtime accesses the runtime environment of the function.
+*/
+type Runtime interface {
+
+	/*
+	   NewRuntimeError creates a new runtime error.
+	*/
+	NewRuntimeError(t error, d string) RuntimeError
+}

+ 27 - 0
defs/rumble/variables.go

@@ -0,0 +1,27 @@
+/*
+ * Rambazamba
+ *
+ * Copyright 2016 Matthias Ladkau. All rights reserved.
+ *
+ * This Source Code Form is subject to the terms of the MIT
+ * License, If a copy of the MIT License was not distributed with this
+ * file, You can obtain one at https://opensource.org/licenses/MIT.
+ */
+
+package rumble
+
+/*
+Variables accesses the variable scope of the function.
+*/
+type Variables interface {
+
+	/*
+	   SetValue sets a new value for a variable.
+	*/
+	SetValue(varName string, varValue interface{}) error
+
+	/*
+	   GetValue gets the current value of a variable.
+	*/
+	GetValue(varName string) (interface{}, bool, error)
+}

+ 68 - 0
errorutil/errorutil.go

@@ -0,0 +1,68 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package errorutil contains common error objects and functions.
+*/
+package errorutil
+
+import "strings"
+
+/*
+AssertOk will panic on any non-nil error parameter.
+*/
+func AssertOk(err error) {
+	if err != nil {
+		panic(err.Error())
+	}
+}
+
+/*
+AssertTrue will panic if the given condition is negative.
+*/
+func AssertTrue(condition bool, errString string) {
+	if !condition {
+		panic(errString)
+	}
+}
+
+/*
+CompositeError can collect multiple errors in a single error object.
+*/
+type CompositeError struct {
+	Errors []string
+}
+
+/*
+NewCompositeError creates a new composite error object.
+*/
+func NewCompositeError() *CompositeError {
+	return &CompositeError{make([]string, 0)}
+}
+
+/*
+Add adds an error.
+*/
+func (ce *CompositeError) Add(e error) {
+	ce.Errors = append(ce.Errors, e.Error())
+}
+
+/*
+HasErrors returns true if any error have been collected.
+*/
+func (ce *CompositeError) HasErrors() bool {
+	return len(ce.Errors) > 0
+}
+
+/*
+Error returns all collected errors as a string.
+*/
+func (ce *CompositeError) Error() string {
+	return strings.Join(ce.Errors, "; ")
+}

+ 64 - 0
errorutil/errorutil_test.go

@@ -0,0 +1,64 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package errorutil
+
+import (
+	"errors"
+	"testing"
+)
+
+func TestAssertOk(t *testing.T) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Error("Giving AssertOk an error should cause a panic.")
+		}
+	}()
+
+	AssertOk(errors.New("test"))
+}
+
+func TestAssertTrue(t *testing.T) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Error("Giving AssertTrue a negative condition should cause a panic.")
+		}
+	}()
+
+	AssertTrue(false, "bla")
+}
+
+func TestCompositeError(t *testing.T) {
+
+	ce := NewCompositeError()
+
+	if ce.HasErrors() {
+		t.Error("CompositeError object shouldn't have any errors yet")
+		return
+	}
+
+	ce.Add(errors.New("test1"))
+
+	if !ce.HasErrors() {
+		t.Error("CompositeError object should have one error by now")
+		return
+	}
+
+	ce.Add(errors.New("test2"))
+
+	// Add a CompositeError to a CompositeError
+
+	ce2 := NewCompositeError()
+	ce2.Add(errors.New("test3"))
+	ce.Add(ce2)
+
+	if ce.Error() != "test1; test2; test3" {
+		t.Error("Unexpected output:", ce.Error())
+	}
+}

+ 418 - 0
fileutil/config.go

@@ -0,0 +1,418 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package fileutil contains file based utilities and helper functions.
+*/
+package fileutil
+
+import (
+	"crypto/sha256"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"strings"
+	"sync"
+	"time"
+
+	"devt.de/krotik/common/stringutil"
+)
+
+/*
+LoadConfig loads or creates a JSON based configuration file. Missing settings
+from the config file will be filled with default settings. This function provides
+a simple mechanism for programs to handle user-defined configuration files which
+should be loaded at start time.
+*/
+func LoadConfig(filename string, defaultConfig map[string]interface{}) (map[string]interface{}, error) {
+	var mdata []byte
+	var data map[string]interface{}
+	var err error
+	var ok bool
+
+	if ok, err = PathExists(filename); err != nil {
+		return nil, err
+
+	} else if ok {
+
+		// Load config
+
+		mdata, err = ioutil.ReadFile(filename)
+		if err == nil {
+
+			err = json.Unmarshal(mdata, &data)
+			if err == nil {
+
+				// Make sure all required configuration values are set
+
+				for k, v := range defaultConfig {
+					if dv, ok := data[k]; !ok || dv == nil {
+						data[k] = v
+					}
+				}
+			}
+		}
+
+	} else if err == nil {
+
+		// Write config
+
+		data = defaultConfig
+
+		mdata, err = json.MarshalIndent(data, "", "    ")
+		if err == nil {
+
+			err = ioutil.WriteFile(filename, mdata, 0644)
+		}
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	return data, nil
+}
+
+/*
+ConfStr reads a config value as a string value.
+*/
+func ConfStr(config map[string]interface{}, key string) string {
+	return fmt.Sprint(config[key])
+}
+
+/*
+ConfBool reads a config value as a boolean value.
+*/
+func ConfBool(config map[string]interface{}, key string) bool {
+	return strings.ToLower(fmt.Sprint(config[key])) == "true"
+}
+
+// Watched Config
+// ==============
+
+/*
+WatchedConfigErrRetries is the number of times the code will try to
+read the disk configuration before overwriting it with the current
+(working) configuration. Set to -1 if it should never attempt to overwrite.
+*/
+var WatchedConfigErrRetries = 10
+
+/*
+watchSleep is the sleep which is used by the watch thread
+*/
+var watchSleep = time.Sleep
+
+/*
+Defined error codes for WatchedConfig
+*/
+var (
+	ErrClosed = errors.New("Config file was closed")
+)
+
+/*
+WatchedConfig is a helper object which continuously watches a given config file.
+The file and the memory config are kept in sync.
+*/
+type WatchedConfig struct {
+	config     map[string]interface{} // Internal in memory config
+	configLock *sync.RWMutex          // Lock for config
+	interval   time.Duration          // Interval with which the file should be watched
+	filename   string                 // File which stores the config
+	SyncError  error                  // Synchronization errors
+	shutdown   chan bool              // Signal channel for thread shutdown
+}
+
+/*
+NewWatchedConfig returns a new watcher object for a given config file.
+*/
+func NewWatchedConfig(filename string, defaultConfig map[string]interface{},
+	interval time.Duration) (*WatchedConfig, error) {
+
+	var ret *WatchedConfig
+
+	config, err := LoadConfig(filename, defaultConfig)
+
+	if err == nil {
+		wc := &WatchedConfig{config, &sync.RWMutex{}, interval, filename, nil, nil}
+
+		err = wc.start()
+
+		if err == nil {
+			ret = wc
+		}
+	}
+
+	return ret, err
+}
+
+/*
+GetValue returns a single config value.
+*/
+func (wc *WatchedConfig) GetValue(k string) (interface{}, bool, error) {
+	wc.configLock.Lock()
+	defer wc.configLock.Unlock()
+
+	if wc.SyncError != nil {
+		return nil, false, wc.SyncError
+	}
+
+	val, ok := wc.config[k]
+
+	return val, ok, nil
+}
+
+/*
+GetConfig returns the current config.
+*/
+func (wc *WatchedConfig) GetConfig() (map[string]interface{}, error) {
+	wc.configLock.Lock()
+	defer wc.configLock.Unlock()
+
+	if wc.SyncError != nil {
+		return nil, wc.SyncError
+	}
+
+	cconfig := make(map[string]interface{})
+
+	for k, v := range wc.config {
+		cconfig[k] = v
+	}
+
+	return cconfig, nil
+}
+
+/*
+start kicks off the file watcher background thread.
+*/
+func (wc *WatchedConfig) start() error {
+
+	// Sync from file - if the file exists. No need to hold a lock since
+	// we are in the startup
+
+	err := wc.sync(true)
+
+	if err == nil {
+
+		// Kick off watcher
+
+		wc.shutdown = make(chan bool)
+
+		go wc.watch()
+	}
+
+	return err
+}
+
+/*
+watch is the internal file watch goroutine function.
+*/
+func (wc *WatchedConfig) watch() {
+	err := wc.SyncError
+	errCnt := 0
+
+	defer func() {
+		wc.shutdown <- true
+	}()
+
+	for wc.SyncError != ErrClosed {
+
+		// Wakeup every interval
+
+		watchSleep(wc.interval)
+
+		// Run the sync
+
+		wc.configLock.Lock()
+
+		// Sync from file
+
+		if err = wc.sync(true); err != nil && wc.SyncError != ErrClosed {
+
+			// Increase the error count
+
+			err = fmt.Errorf("Could not sync config from disk: %v",
+				err.Error())
+
+			errCnt++
+
+		} else {
+
+			// Reset the error count
+
+			errCnt = 0
+		}
+
+		// Update the sync error
+
+		if wc.SyncError != ErrClosed {
+			wc.SyncError = err
+		}
+
+		if errCnt == WatchedConfigErrRetries {
+
+			// We can't read the disk configuration after
+			// WatchedConfigErrRetries attempts - try to overwrite
+			// it with the working memory configuration
+
+			wc.sync(false)
+		}
+
+		wc.configLock.Unlock()
+	}
+}
+
+/*
+Close closes this config watcher.
+*/
+func (wc *WatchedConfig) Close() error {
+	var err error
+
+	wc.configLock.Lock()
+
+	if wc.SyncError != nil {
+
+		// Preserve any old error
+
+		err = wc.SyncError
+	}
+
+	// Set the table into the closed state
+
+	wc.SyncError = ErrClosed
+
+	wc.configLock.Unlock()
+
+	// Wait for watcher shutdown if it was started
+
+	if wc.shutdown != nil {
+		<-wc.shutdown
+		wc.shutdown = nil
+	}
+
+	return err
+}
+
+/*
+Attempt to synchronize the memory config with the file. Depending on the
+checkFile flag either the file (true) or the memory config (false) is
+regarded as up-to-date.
+
+It is assumed that the configLock (write) is held before calling this
+function.
+
+The table is in an undefined state if an error is returned.
+*/
+func (wc *WatchedConfig) sync(checkFile bool) error {
+	var checksumFile, checksumMemory string
+
+	stringMemoryTable := func() ([]byte, error) {
+		return json.MarshalIndent(wc.config, "", "  ")
+	}
+
+	writeMemoryTable := func() error {
+		res, err := stringMemoryTable()
+
+		if err == nil {
+			err = ioutil.WriteFile(wc.filename, res, 0644)
+		}
+
+		return err
+	}
+
+	readMemoryTable := func() (map[string]interface{}, error) {
+		var conf map[string]interface{}
+
+		res, err := ioutil.ReadFile(wc.filename)
+
+		if err == nil {
+			err = json.Unmarshal(stringutil.StripCStyleComments(res), &conf)
+		}
+
+		return conf, err
+	}
+
+	// Check if the file can be opened
+
+	file, err := os.OpenFile(wc.filename, os.O_RDONLY, 0660)
+
+	if err != nil {
+
+		if os.IsNotExist(err) {
+
+			// Just ignore not found errors
+
+			err = nil
+		}
+
+		// File does not exist - no checksum
+
+		checksumFile = ""
+
+	} else {
+
+		hashFactory := sha256.New()
+
+		if _, err = io.Copy(hashFactory, file); err == nil {
+
+			// Create the checksum of the present file
+
+			checksumFile = fmt.Sprintf("%x", hashFactory.Sum(nil))
+		}
+
+		file.Close()
+	}
+
+	if err == nil {
+
+		// At this point we know everything about the file now check
+		// the memory table
+
+		var cString []byte
+
+		if cString, err = stringMemoryTable(); err == nil {
+			hashFactory := sha256.New()
+
+			hashFactory.Write(cString)
+
+			checksumMemory = fmt.Sprintf("%x", hashFactory.Sum(nil))
+		}
+	}
+
+	if err == nil {
+
+		// At this point we also know everything about the memory table
+
+		if checkFile {
+
+			// File is up-to-date - we should build the memory table
+
+			if checksumFile != checksumMemory {
+				var conf map[string]interface{}
+
+				if conf, err = readMemoryTable(); err == nil {
+					wc.config = conf
+				}
+			}
+
+		} else {
+
+			// Memory is up-to-date - we should write a new file
+
+			if checksumFile != checksumMemory {
+
+				err = writeMemoryTable()
+			}
+		}
+	}
+
+	return err
+}

+ 221 - 0
fileutil/config_test.go

@@ -0,0 +1,221 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package fileutil
+
+import (
+	"io/ioutil"
+	"os"
+	"strings"
+	"testing"
+	"time"
+)
+
+const InvalidFileName = "**" + string(0x0)
+
+var testDefaultConfig = map[string]interface{}{
+	"MemoryOnlyStorage": false,
+	"DatastoreLocation": "db",
+}
+
+func TestLoadingConfig(t *testing.T) {
+
+	configFile := "test.config.json"
+
+	if res, _ := PathExists(configFile); res {
+		os.Remove(configFile)
+	}
+
+	// Test config creation
+
+	config, err := LoadConfig(configFile, testDefaultConfig)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	if res, _ := PathExists(configFile); !res {
+		t.Error("Config should have been created")
+		return
+	}
+
+	// We should have now created a default config file
+
+	compareConfig(t, config, testDefaultConfig)
+
+	// Test reload of config creation
+
+	config, err = LoadConfig(configFile, testDefaultConfig)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	compareConfig(t, config, testDefaultConfig)
+
+	ioutil.WriteFile(configFile, []byte("{ \"wrong"), 0644)
+
+	_, err = LoadConfig(configFile, testDefaultConfig)
+	if err.Error() != "unexpected end of JSON input" {
+		t.Error(err)
+		return
+	}
+
+	// Write partial config - Make sure all is loaded
+
+	ioutil.WriteFile(configFile, []byte(`{"MemoryOnlyStorage":false}`), 0644)
+
+	config, err = LoadConfig(configFile, testDefaultConfig)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Check that the default values have been added
+
+	compareConfig(t, config, testDefaultConfig)
+
+	// Test value retrival
+
+	if res := ConfBool(config, "MemoryOnlyStorage"); res {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConfStr(config, "DatastoreLocation"); res != "db" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res, _ := PathExists(configFile); res {
+		os.Remove(configFile)
+	}
+
+	// Check invalid config file
+
+	configFile = "**" + string(0x0)
+
+	_, err = LoadConfig(configFile, testDefaultConfig)
+	if !strings.Contains(strings.ToLower(err.Error()), string(0)+": invalid argument") {
+		t.Error(err)
+		return
+	}
+}
+
+func compareConfig(t *testing.T, config1 map[string]interface{}, config2 map[string]interface{}) {
+	if len(config1) != len(config2) {
+		t.Error("Given config has different elements to loaded config:",
+			config1, config2)
+		return
+	}
+	for k, v := range config1 {
+		if v != config2[k] {
+			t.Error("Different values for:", k, " -> ", v, "vs", config2[k])
+			return
+		}
+	}
+}
+
+func TestPersistedConfig(t *testing.T) {
+	testFile := "persist_tester.cfg"
+	defer func() {
+		os.Remove(testFile)
+	}()
+
+	// Test the most basic start and stop
+
+	pt, err := NewWatchedConfig(testFile, testDefaultConfig, time.Millisecond)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	v, ok, err := pt.GetValue("MemoryOnlyStorage")
+	if !ok || err != nil || v != false {
+		t.Error("Unexpected stored value:", v, ok, err)
+		return
+	}
+
+	v, ok, err = pt.GetValue("foo")
+	if ok || err != nil || v != nil {
+		t.Error("Unexpected stored value:", v, ok, err)
+		return
+	}
+
+	c, err := pt.GetConfig()
+	if err != nil {
+		t.Error("Unexpected result:", err)
+		return
+	}
+
+	if len(c) != 2 {
+		t.Error("Unexpected result:", c)
+		return
+	}
+
+	ioutil.WriteFile(testFile, []byte(`{"MemoryOnlyStorage":true}`), 0644)
+
+	time.Sleep(100 * time.Millisecond)
+
+	v, ok, err = pt.GetValue("MemoryOnlyStorage")
+	if !ok || err != nil || v != true {
+		t.Error("Unexpected stored value:", v, ok, err)
+		return
+	}
+
+	// Check error state
+
+	pt.filename = InvalidFileName
+
+	WatchedConfigErrRetries = 2
+
+	time.Sleep(10 * time.Millisecond)
+
+	_, _, err = pt.GetValue("MemoryOnlyStorage")
+	if err == nil || err.Error() != "Could not sync config from disk: open **"+string(0)+": invalid argument" {
+		t.Error("Unexpected stored value:", err)
+		return
+	}
+
+	_, err = pt.GetConfig()
+	if err == nil || err.Error() != "Could not sync config from disk: open **"+string(0)+": invalid argument" {
+		t.Error("Unexpected stored value:", err)
+		return
+	}
+
+	err = pt.Close()
+	if err == nil || err.Error() != "Could not sync config from disk: open **"+string(0)+": invalid argument" {
+		t.Error("Unexpected stored value:", err)
+		return
+	}
+
+	pt, err = NewWatchedConfig(testFile, testDefaultConfig, time.Millisecond)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	time.Sleep(100 * time.Millisecond)
+
+	os.Remove(testFile)
+
+	time.Sleep(100 * time.Millisecond)
+
+	v, ok, err = pt.GetValue("MemoryOnlyStorage")
+	if !ok || err != nil || v != true {
+		t.Error("Unexpected stored value:", v, ok, err)
+		return
+	}
+
+	err = pt.Close()
+	if err != nil {
+		t.Error("Unexpected stored value:", err)
+		return
+	}
+}

+ 134 - 0
fileutil/fileutil.go

@@ -0,0 +1,134 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package fileutil
+
+import (
+	"bytes"
+	"crypto/sha256"
+	"fmt"
+	"io"
+	"os"
+
+	"devt.de/krotik/common/bitutil"
+	"devt.de/krotik/common/pools"
+)
+
+/*
+PathExists returns whether the given file or directory exists.
+*/
+func PathExists(path string) (bool, error) {
+	_, err := os.Stat(path)
+	if err == nil {
+		return true, nil
+	}
+	if os.IsNotExist(err) {
+		return false, nil
+	}
+	return false, err
+}
+
+/*
+IsDir returns whether the given path is a directory.
+*/
+func IsDir(path string) (bool, error) {
+	stat, err := os.Stat(path)
+	if err != nil {
+		return false, err
+	}
+
+	return stat.IsDir(), nil
+}
+
+/*
+CheckSumFile calculates a sha256 checksum of a given file. This function
+will read in the whole file.
+*/
+func CheckSumFile(path string) (string, error) {
+	var checksum = ""
+
+	f, err := os.Open(path)
+
+	if err == nil {
+		defer f.Close()
+
+		hashFactory := sha256.New()
+
+		if _, err = io.Copy(hashFactory, f); err == nil {
+			checksum = fmt.Sprintf("%x", hashFactory.Sum(nil))
+		}
+	}
+
+	return checksum, err
+}
+
+/*
+fastSumSampleSize is the sample size for fast checksum
+*/
+const fastSumSampleSize = 16 * 1024
+
+/*
+bufferPool holds buffers which are used for fast checksums.
+*/
+var fastChecksumBigBufferPool = pools.NewByteBufferPool()
+var fastChecksumSmallBufferPool = pools.NewByteSlicePool(fastSumSampleSize * 3)
+
+/*
+CheckSumFileFast calculates a 32bit MurmurHash3 checksum from a portion
+of the given file.
+*/
+func CheckSumFileFast(path string) (string, error) {
+	var fi os.FileInfo
+	var checksum = ""
+
+	f, err := os.Open(path)
+
+	if err == nil {
+		defer f.Close()
+
+		if fi, err = f.Stat(); err == nil {
+			var res uint32
+
+			if fi.Size() < int64(fastSumSampleSize*8) {
+				buf := fastChecksumBigBufferPool.Get().(*bytes.Buffer)
+
+				// Read in the whole file
+
+				if _, err = io.Copy(buf, f); err == nil {
+
+					if res, err = bitutil.MurMurHashData(buf.Bytes(), 0, buf.Len(), 42); err == nil {
+						checksum = fmt.Sprintf("%x", res)
+					}
+				}
+
+				buf.Reset()
+				fastChecksumBigBufferPool.Put(buf)
+
+			} else {
+
+				sr := io.NewSectionReader(f, 0, fi.Size())
+				buf := fastChecksumSmallBufferPool.Get().([]byte)
+
+				sr.Read(buf[:fastSumSampleSize])
+				sr.Seek(sr.Size()/2, 0)
+				sr.Read(buf[fastSumSampleSize : fastSumSampleSize*2])
+				sr.Seek(int64(-fastSumSampleSize), 2)
+				sr.Read(buf[fastSumSampleSize*2:])
+
+				if res, err = bitutil.MurMurHashData(buf, 0, len(buf)-1, 42); err == nil {
+					checksum = fmt.Sprintf("%x", res)
+				}
+
+				fastChecksumSmallBufferPool.Put(buf)
+			}
+		}
+	}
+
+	return checksum, err
+}

+ 135 - 0
fileutil/fileutil_test.go

@@ -0,0 +1,135 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package fileutil
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+)
+
+const TESTPATH = "fileutiltestpath"
+
+func TestDirectoryExists(t *testing.T) {
+	os.Remove(TESTPATH)
+
+	res, err := PathExists(TESTPATH)
+	if err != nil {
+		t.Error(err.Error())
+		return
+	}
+	if res {
+		t.Error("Path test should not exist")
+	}
+
+	os.Mkdir(TESTPATH, 0770)
+	defer func() {
+		os.RemoveAll(TESTPATH)
+	}()
+
+	res, err = PathExists(TESTPATH)
+	if err != nil {
+		t.Error(err.Error())
+		return
+	}
+	if !res {
+		t.Error("Path test should exist after it was created")
+		return
+	}
+
+	_, err = PathExists("**" + string(0x0))
+	if err == nil {
+		t.Error("Incorrect paths should throw an error")
+		return
+	}
+}
+
+func TestIsDir(t *testing.T) {
+	os.Remove(TESTPATH)
+
+	res, err := IsDir(TESTPATH)
+	if err != nil && !os.IsNotExist(err) {
+		t.Error(err.Error())
+		return
+	}
+	if res {
+		t.Error("Path test should not exist")
+	}
+
+	os.Mkdir(TESTPATH, 0770)
+	defer func() {
+		os.RemoveAll(TESTPATH)
+	}()
+
+	res, err = IsDir(TESTPATH)
+	if err != nil {
+		t.Error(err.Error())
+		return
+	}
+	if !res {
+		t.Error("Dir test should exist after it was created")
+		return
+	}
+
+	_, err = IsDir("**" + string(0x0))
+	if err == nil {
+		t.Error("Incorrect paths should throw an error")
+		return
+	}
+}
+
+func TestCheckSumFiles(t *testing.T) {
+	os.Remove(TESTPATH)
+
+	res, err := IsDir(TESTPATH)
+	if err != nil && !os.IsNotExist(err) {
+		t.Error(err.Error())
+		return
+	}
+	if res {
+		t.Error("Path test should not exist")
+	}
+
+	os.Mkdir(TESTPATH, 0770)
+	defer func() {
+		os.RemoveAll(TESTPATH)
+	}()
+
+	testfile := filepath.Join(TESTPATH, "testfile.txt")
+
+	ioutil.WriteFile(testfile, []byte("Omnium enim rerum\nprincipia parva sunt"), 0660)
+
+	if res, err := CheckSumFile(testfile); res != "90a258b01ceab4058906318bf0b34a31f2ff7ac2268c7bf3df9168f1f6ca5bc6" || err != nil {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+
+	// Test fast checksum
+
+	if res, err := CheckSumFileFast(testfile); res != "6f05b934" || err != nil {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+
+	testfile = filepath.Join(TESTPATH, "testfile2.txt")
+
+	buf := make([]byte, fastSumSampleSize*8)
+	for i := 0; i < fastSumSampleSize*8; i++ {
+		buf[i] = byte(i % 10)
+	}
+
+	ioutil.WriteFile(testfile, buf, 0660)
+
+	if res, err := CheckSumFileFast(testfile); res != "14294b07" || err != nil {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+}

+ 439 - 0
fileutil/multifilebuffer.go

@@ -0,0 +1,439 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package fileutil
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+
+	"devt.de/krotik/common/timeutil"
+)
+
+/*
+MultiFileBuffer is a file-persitent buffer which can be split over multiple files.
+
+A specified file is opened and used as backend storage for a byte buffer. By
+default, the file grows indefinitely. It is possible to specify a rollover
+condition to allow the file to rollover once the condition is satisfied.
+If the condition is satisfied, the file is closed and a new file is silently
+opened for output. The buffer will save old log files by appending the
+extensions ‘.1’, ‘.2’ etc., to the file name. The rollover condition is only
+checked once at the beginning of a write operation.
+
+For example, with a base file name of app.log, the buffer would create
+app.log, app.log.1, app.log.2, etc. The file being written to is always app.log.
+When this file is filled, it is closed and renamed to app.log.1, and if files
+app.log.1, app.log.2, etc. exist, then they are renamed to app.log.2, app.log.3
+etc. respectively.
+*/
+type MultiFileBuffer struct {
+	lock     *sync.Mutex       // Lock for reading and writing
+	filename string            // File name for buffer
+	basename string            // Base file name (file name + iterator decoration)
+	iterator FilenameIterator  // Iterator for file names
+	cond     RolloverCondition // Rollover condition
+	fp       *os.File          // Current file handle
+}
+
+/*
+NewMultiFileBuffer creates a new MultiFileBuffer with a given file name
+iterator and rollover condition.
+*/
+func NewMultiFileBuffer(filename string, it FilenameIterator, cond RolloverCondition) (*MultiFileBuffer, error) {
+	var err error
+
+	mfb := &MultiFileBuffer{&sync.Mutex{}, filename, it.Basename(filename), it, cond, nil}
+
+	if err = mfb.checkrollover(); err != nil {
+		return nil, err
+	}
+
+	if mfb.fp == nil {
+
+		// File existed and can be continued
+
+		mfb.lock.Lock()
+		mfb.fp, err = os.OpenFile(mfb.basename, os.O_APPEND|os.O_RDWR, 0660)
+		mfb.lock.Unlock()
+
+	}
+
+	return mfb, nil
+}
+
+/*
+Write writes len(p) bytes from p to the underlying data stream. It returns
+the number of bytes written from p (0 <= n <= len(p)) and any error
+encountered that caused the write to stop early.
+*/
+func (mfb *MultiFileBuffer) Write(output []byte) (int, error) {
+	var b int
+
+	err := mfb.checkrollover()
+
+	if err == nil {
+
+		if mfb.fp == nil {
+
+			// File existed and can be continued
+
+			mfb.lock.Lock()
+			mfb.fp, err = os.OpenFile(mfb.basename, os.O_APPEND|os.O_RDWR, 0660)
+			mfb.lock.Unlock()
+
+		}
+
+		mfb.lock.Lock()
+		b, err = mfb.fp.Write(output)
+		mfb.lock.Unlock()
+	}
+
+	return b, err
+}
+
+/*
+checkrollover checks if the buffer files should be switched.
+*/
+func (mfb *MultiFileBuffer) checkrollover() error {
+	mfb.lock.Lock()
+	defer mfb.lock.Unlock()
+
+	//  Update basename here
+
+	mfb.basename = mfb.iterator.Basename(mfb.filename)
+
+	// Rollover if the base file does not exist
+
+	ex, err := PathExists(mfb.basename)
+
+	if err == nil && (!ex || mfb.cond.CheckRollover(mfb.basename)) {
+
+		// Rollover if either the base file does not exist or the
+		// rollover condition is satisfied
+
+		err = mfb.rollover()
+	}
+
+	return err
+}
+
+/*
+Close closes the buffer.
+*/
+func (mfb *MultiFileBuffer) Close() error {
+	var err error
+
+	if mfb.fp != nil {
+		err = mfb.fp.Close()
+		mfb.fp = nil
+	}
+
+	return err
+}
+
+/*
+rollover switches the buffer files.
+*/
+func (mfb *MultiFileBuffer) rollover() error {
+	var err error
+
+	// Recursive file renaming function
+
+	var ensureFileSlot func(fn string) error
+
+	ensureFileSlot = func(fn string) error {
+
+		// Check if the file exists already
+
+		ex, err := PathExists(fn)
+
+		if ex && err == nil {
+
+			// Determine new file name
+
+			newfn := mfb.iterator.NextName(fn)
+
+			if newfn == "" {
+
+				// If it is the end of the iteration just delete the file
+
+				err = os.Remove(fn)
+
+			} else {
+
+				// Ensure the new file name is usable
+
+				err = ensureFileSlot(newfn)
+
+				// Rename file according to iterator.NextName()
+
+				if err == nil {
+					err = os.Rename(fn, newfn)
+				}
+			}
+		}
+
+		return err
+	}
+
+	// Close existing file
+
+	err = mfb.Close()
+
+	// Create file handle
+
+	if err == nil {
+
+		err = ensureFileSlot(mfb.basename)
+
+		if err == nil {
+
+			// Overwrite existing base file
+
+			mfb.fp, err = os.OpenFile(mfb.basename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)
+		}
+	}
+
+	return err
+}
+
+// Rollover conditions
+// ===================
+
+/*
+RolloverCondition is used by the MultiFileBuffer to check if the buffer files
+should be switched.
+*/
+type RolloverCondition interface {
+
+	/*
+	   CheckRollover checks if the buffer files should be switched.
+	*/
+	CheckRollover(basename string) bool
+}
+
+/*
+EmptyRolloverCondition creates a rollover condition which is never true.
+*/
+func EmptyRolloverCondition() RolloverCondition {
+	return &emptyRolloverCondition{}
+}
+
+/*
+emptyRolloverCondition is a rollover condition which is never true.
+*/
+type emptyRolloverCondition struct {
+}
+
+/*
+NextName returns the next file name based on the current file name.
+An empty string means the end of the iteration.
+*/
+func (rc *emptyRolloverCondition) CheckRollover(basename string) bool {
+	return false
+}
+
+/*
+SizeBasedRolloverCondition creates a new rollover condition based on file
+size. The condition is satisfied if the base file exceeds a certain file size.
+*/
+func SizeBasedRolloverCondition(maxSize int64) RolloverCondition {
+	return &sizeBasedRolloverCondition{maxSize}
+}
+
+/*
+sizeBasedRolloverCondition is the implementation of the size based rollover
+condition.
+*/
+type sizeBasedRolloverCondition struct {
+	maxSize int64
+}
+
+/*
+NextName returns the next file name based on the current file name.
+An empty string means the end of the iteration.
+*/
+func (rc *sizeBasedRolloverCondition) CheckRollover(basename string) bool {
+	ret := false
+
+	if info, err := os.Stat(basename); err == nil {
+		ret = info.Size() >= rc.maxSize
+	}
+
+	return ret
+}
+
+// FilenameIterator
+// ================
+
+/*
+FilenameIterator is used by the MultiFileBuffer to determine the new file name
+when rotating the buffer files. Basename is called before doing any calculation.
+This function should do general filename decoration. If the decoration changes
+over time then the function needs to also handle the cleanup.
+*/
+type FilenameIterator interface {
+
+	/*
+		Basename decorades the initial file name.
+	*/
+	Basename(filename string) string
+
+	/*
+		NextName returns the next file name based on the current file name.
+		An empty string means the end of the iteration.
+	*/
+	NextName(currentName string) string
+}
+
+/*
+ConsecutiveNumberIterator creates a new file name iterator which adds numbers
+at the end of files. Up to maxNum files will be created. A maxNum parameter
+< 1 means there is no limit.
+*/
+func ConsecutiveNumberIterator(maxNum int) FilenameIterator {
+	return &consecutiveNumberIterator{maxNum}
+}
+
+/*
+consecutiveNumberIterator is the implementation of the consecutive number
+file iterator.
+*/
+type consecutiveNumberIterator struct {
+	maxNum int
+}
+
+/*
+Basename decorades the initial file name.
+*/
+func (it *consecutiveNumberIterator) Basename(filename string) string {
+	return filename
+}
+
+/*
+NextName returns the next file name based on the current file name.
+An empty string means the end of the iteration.
+*/
+func (it *consecutiveNumberIterator) NextName(currentName string) string {
+
+	if i := strings.LastIndex(currentName, "."); i > 0 {
+
+		if num, err := strconv.ParseInt(currentName[i+1:], 10, 64); err == nil {
+
+			nextNum := int(num + 1)
+
+			if it.maxNum > 0 && nextNum > it.maxNum {
+				return ""
+			}
+
+			return fmt.Sprintf("%s.%v", currentName[:i], nextNum)
+		}
+	}
+
+	return fmt.Sprintf("%s.1", currentName)
+}
+
+/*
+DailyDateIterator creates a new file name iterator which adds dates at the
+end of files. The log will be switched at least once every day. Up to maxNumPerDay
+files will be created per day. A maxNumPerDay parameter < 1 means there is no limit.
+Up to maxDays different days will be kept (oldest ones are deleted). A maxDays
+parameter < 1 means everything is kept.
+*/
+func DailyDateIterator(maxNumPerDay int, maxDays int) FilenameIterator {
+	return &dailyDateIterator{&consecutiveNumberIterator{maxNumPerDay}, maxDays, timeutil.MakeTimestamp}
+}
+
+/*
+consecutiveNumberIterator is the implementation of the consecutive number
+file iterator.
+*/
+type dailyDateIterator struct {
+	*consecutiveNumberIterator
+	maxDays int
+	tsFunc  func() string // Timestamp function
+}
+
+/*
+NextName returns the next file name based on the current file name.
+An empty string means the end of the iteration.
+*/
+func (it *dailyDateIterator) Basename(filename string) string {
+
+	// Get todays date
+
+	ts := it.tsFunc()
+	today, _ := timeutil.TimestampString(ts, "UTC")
+	today = today[:10]
+
+	// Cleanup old files
+
+	if it.maxDays > 0 {
+
+		prefix := path.Base(filename)
+		dir := path.Dir(filename)
+
+		if files, err := ioutil.ReadDir(dir); err == nil {
+			var datesToConsider []string
+
+			// Collect all relevant files
+
+			foundToday := false
+
+			for _, f := range files {
+
+				if strings.HasPrefix(f.Name(), prefix) && len(f.Name()) > len(prefix) {
+
+					dateString := f.Name()[len(prefix)+1:]
+					if !strings.ContainsRune(dateString, '.') {
+						datesToConsider = append(datesToConsider, dateString)
+						if !foundToday {
+							foundToday = dateString == today
+						}
+					}
+				}
+			}
+
+			// Make sure today is one of the dates
+
+			if !foundToday {
+				datesToConsider = append(datesToConsider, today)
+			}
+
+			// Sort them so the newest ones are kept
+
+			sort.Strings(datesToConsider)
+
+			//  Check if files need to be removed
+
+			if len(datesToConsider) > it.maxDays {
+				datesToRemove := datesToConsider[:len(datesToConsider)-it.maxDays]
+
+				for _, f := range files {
+					for _, dateToRemove := range datesToRemove {
+
+						if strings.HasPrefix(f.Name(), fmt.Sprintf("%s.%s", prefix, dateToRemove)) {
+
+							os.Remove(path.Join(dir, f.Name()))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return fmt.Sprintf("%s.%s", filename, today)
+}

+ 388 - 0
fileutil/multifilebuffer_test.go

@@ -0,0 +1,388 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package fileutil
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path"
+	"testing"
+)
+
+const BUFTESTPATH = "filebuftestpath"
+
+func TestMultiFileBufferErrors(t *testing.T) {
+
+	buf, err := NewMultiFileBuffer("**"+string(0x0),
+		ConsecutiveNumberIterator(5), EmptyRolloverCondition())
+
+	if buf != nil || err == nil {
+		t.Error("Unexpected result:", buf, err)
+		return
+	}
+}
+
+func TestMultiFileBufferDateDailyDate(t *testing.T) {
+	os.RemoveAll(BUFTESTPATH)
+	os.Mkdir(BUFTESTPATH, 0770)
+	defer func() {
+		if res, _ := PathExists(BUFTESTPATH); res {
+			os.RemoveAll(BUFTESTPATH)
+		}
+	}()
+
+	filename := path.Join(BUFTESTPATH, "testdate.log")
+
+	it := DailyDateIterator(-1, 2) // No limit on files
+
+	// Fix the today day
+
+	it.(*dailyDateIterator).tsFunc = func() string {
+		return "512800001234" // 1986-04-02
+	}
+
+	buf, err := NewMultiFileBuffer(filename,
+		it, SizeBasedRolloverCondition(3))
+
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Write([]byte("a"))
+	buf.Write([]byte("b"))
+	buf.Write([]byte("c"))
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"testdate.log.1986-04-02": "abc",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Close()
+
+	// Create a new buffer
+
+	buf, err = NewMultiFileBuffer(filename,
+		it, SizeBasedRolloverCondition(3))
+
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Write([]byte("d"))
+	buf.Write([]byte("e"))
+	buf.Write([]byte("fg"))
+	buf.Write([]byte("h"))
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"testdate.log.1986-04-02":   "h",
+		"testdate.log.1986-04-02.1": "defg",
+		"testdate.log.1986-04-02.2": "abc",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Close()
+
+	// A new day
+
+	it.(*dailyDateIterator).tsFunc = func() string {
+		return "512900001234" // 1986-04-03
+	}
+
+	buf.Write([]byte("123"))
+	buf.Write([]byte("4"))
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"testdate.log.1986-04-03":   "4",
+		"testdate.log.1986-04-03.1": "123",
+		"testdate.log.1986-04-02":   "h",
+		"testdate.log.1986-04-02.1": "defg",
+		"testdate.log.1986-04-02.2": "abc",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Close()
+
+	// Test cleanup - Move months into the future
+
+	it.(*dailyDateIterator).tsFunc = func() string {
+		return "522800001234" // 1986-07-26
+	}
+
+	buf.Write([]byte("x"))
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"testdate.log.1986-07-26":   "x",
+		"testdate.log.1986-04-03":   "4",
+		"testdate.log.1986-04-03.1": "123",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Close()
+
+	// Last test writer without restriction
+
+	buf, err = NewMultiFileBuffer(filename,
+		it, EmptyRolloverCondition())
+
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	for i := 0; i < 10; i++ {
+		buf.Write([]byte("x"))
+	}
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"testdate.log.1986-07-26":   "xxxxxxxxxxx",
+		"testdate.log.1986-04-03":   "4",
+		"testdate.log.1986-04-03.1": "123",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Close()
+
+	// Write into a closed file
+
+	for i := 0; i < 10; i++ {
+		buf.Write([]byte("x"))
+	}
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"testdate.log.1986-07-26":   "xxxxxxxxxxxxxxxxxxxxx",
+		"testdate.log.1986-04-03":   "4",
+		"testdate.log.1986-04-03.1": "123",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Close()
+}
+
+func TestMultiFileBufferSimpleNumbering(t *testing.T) {
+	os.RemoveAll(BUFTESTPATH)
+	os.Mkdir(BUFTESTPATH, 0770)
+	defer func() {
+		if res, _ := PathExists(BUFTESTPATH); res {
+			os.RemoveAll(BUFTESTPATH)
+		}
+	}()
+
+	filename := path.Join(BUFTESTPATH, "test1.log")
+
+	buf, err := NewMultiFileBuffer(filename,
+		ConsecutiveNumberIterator(3), SizeBasedRolloverCondition(4))
+
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Write([]byte("a"))
+	buf.Write([]byte("b"))
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"test1.log": "ab",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Close()
+
+	// Create a new buffer
+
+	buf, err = NewMultiFileBuffer(filename,
+		ConsecutiveNumberIterator(3), SizeBasedRolloverCondition(4))
+
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Write([]byte("c"))
+	buf.Write([]byte("d"))
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"test1.log": "abcd",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Now fill up the files
+
+	for i := 0; i < 10; i++ {
+		if _, err := buf.Write([]byte(fmt.Sprint(i))); err != nil {
+			t.Error(err)
+			return
+		}
+	}
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"test1.log":   "89",
+		"test1.log.1": "4567",
+		"test1.log.2": "0123",
+		"test1.log.3": "abcd",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Fill up some more and see that the oldest entries disappear
+
+	buf.Write([]byte("xxx"))
+
+	for i := 0; i < 7; i++ {
+		if _, err := buf.Write([]byte(fmt.Sprint(i))); err != nil {
+			t.Error(err)
+			return
+		}
+	}
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"test1.log":   "456",
+		"test1.log.1": "0123",
+		"test1.log.2": "89xxx",
+		"test1.log.3": "4567",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Close()
+
+	// Create a new buffer
+
+	buf, err = NewMultiFileBuffer(filename,
+		ConsecutiveNumberIterator(3), SizeBasedRolloverCondition(4))
+
+	for i := 0; i < 4; i++ {
+		if _, err := buf.Write([]byte(fmt.Sprint(i))); err != nil {
+			t.Error(err)
+			return
+		}
+	}
+
+	if err = checkDirLayout(BUFTESTPATH, map[string]string{
+		"test1.log":   "123",
+		"test1.log.1": "4560",
+		"test1.log.2": "0123",
+		"test1.log.3": "89xxx",
+	}); err != nil {
+		t.Error(err)
+		return
+	}
+
+	buf.Close()
+}
+
+func checkDirLayout(dir string, expected map[string]string) error {
+
+	files, err := ioutil.ReadDir(dir)
+	if err != nil {
+		return err
+	}
+
+	if len(files) != len(expected) {
+
+		foundFiles := make([]string, 0, len(files))
+		for _, f := range files {
+			foundFiles = append(foundFiles, f.Name())
+		}
+
+		return fmt.Errorf("Unexpected layout found files: %v", foundFiles)
+	}
+
+	for _, f := range files {
+		content, err := ioutil.ReadFile(path.Join(dir, f.Name()))
+		if err != nil {
+			return err
+		}
+		expectedContent, ok := expected[f.Name()]
+		if !ok {
+			return fmt.Errorf("File %v not in list of expected files", f.Name())
+		}
+
+		if expectedContent != string(content) {
+			return fmt.Errorf("Content of file %v is not as expected: %v",
+				f.Name(), string(content))
+		}
+	}
+
+	return nil
+}
+
+func TestConsecutiveNumberIterator(t *testing.T) {
+
+	it := ConsecutiveNumberIterator(5)
+
+	if res := it.NextName("foo"); res != "foo.1" {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	if res := it.NextName("foo.1"); res != "foo.2" {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	if res := it.NextName("foo.4"); res != "foo.5" {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	if res := it.NextName("foo.5"); res != "" {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+}
+
+func TestDailyDateIterator(t *testing.T) {
+
+	it := DailyDateIterator(-1, -1)
+	it.(*dailyDateIterator).tsFunc = func() string {
+		return "512800001234" // 1986-04-02
+	}
+
+	filename := "foo"
+
+	basename := it.Basename(filename)
+
+	if res := it.NextName(basename); res != "foo.1986-04-02.1" {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	it.(*dailyDateIterator).tsFunc = func() string {
+		return "522800001234" // 1986-07-26
+	}
+
+	basename = it.Basename(filename)
+
+	if res := it.NextName(basename + ".51"); res != "foo.1986-07-26.52" {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+}

+ 89 - 0
fileutil/zip.go

@@ -0,0 +1,89 @@
+package fileutil
+
+import (
+	"archive/zip"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+/*
+UnzipFile extracts a given zip file into a given output folder.
+*/
+func UnzipFile(name string, dest string, overwrite bool) error {
+	var f *os.File
+
+	stat, err := os.Stat(name)
+
+	if err == nil {
+
+		if f, err = os.Open(name); err == nil {
+			defer f.Close()
+
+			err = UnzipReader(f, stat.Size(), dest, overwrite)
+		}
+	}
+
+	return err
+}
+
+/*
+UnzipReader extracts a given zip archive into a given output folder.
+Size is the size of the archive.
+*/
+func UnzipReader(reader io.ReaderAt, size int64, dest string, overwrite bool) error {
+	var rc io.ReadCloser
+
+	r, err := zip.NewReader(reader, size)
+
+	if err == nil {
+
+		for _, f := range r.File {
+
+			if rc, err = f.Open(); err == nil {
+				var e bool
+
+				fpath := filepath.Join(dest, f.Name)
+
+				if e, _ = PathExists(fpath); e && !overwrite {
+					err = fmt.Errorf("Path already exists: %v", fpath)
+
+				} else if f.FileInfo().IsDir() {
+
+					// Create folder
+
+					err = os.MkdirAll(fpath, os.ModePerm)
+
+				} else {
+					var fdir string
+
+					// Create File
+
+					if lastIndex := strings.LastIndex(fpath, string(os.PathSeparator)); lastIndex > -1 {
+						fdir = fpath[:lastIndex]
+					}
+
+					if err = os.MkdirAll(fdir, os.ModePerm); err == nil {
+						f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode())
+
+						if err == nil {
+							_, err = io.Copy(f, rc)
+
+							f.Close()
+						}
+					}
+				}
+
+				rc.Close()
+			}
+
+			if err != nil {
+				break
+			}
+		}
+	}
+
+	return err
+}

+ 79 - 0
fileutil/zip_test.go

@@ -0,0 +1,79 @@
+package fileutil
+
+import (
+	"encoding/base64"
+	"io/ioutil"
+	"os"
+	"path"
+	"strings"
+	"testing"
+)
+
+var testZipFile = "UEsDBBQAAAAAAAlhM0sAAAAAAAAAAAAAAAALAAAAdGVzdGZvbGRlci" +
+	"9QSwMECgAAAAAA/WAzS9JjSIgDAAAAAwAAABQAAAB0ZXN0Zm9sZGVyL3Rlc3QxLnR4dDEyM1" +
+	"BLAwQKAAAAAAAMYTNLccOosQMAAAADAAAAFAAAAHRlc3Rmb2xkZXIvdGVzdDIudHh0NDU2UE" +
+	"sBAj8AFAAAAAAACWEzSwAAAAAAAAAAAAAAAAsAJAAAAAAAAAAQAAAAAAAAAHRlc3Rmb2xkZX" +
+	"IvCgAgAAAAAAABABgAynC8mDcx0wG6nMOYNzHTAcpwvJg3MdMBUEsBAj8ACgAAAAAA/WAzS9" +
+	"JjSIgDAAAAAwAAABQAJAAAAAAAAAAgAAAAKQAAAHRlc3Rmb2xkZXIvdGVzdDEudHh0CgAgAA" +
+	"AAAAABABgAAgkxjDcx0wFqBhKVNzHTAQIJMYw3MdMBUEsBAj8ACgAAAAAADGEzS3HDqLEDAA" +
+	"AAAwAAABQAJAAAAAAAAAAgAAAAXgAAAHRlc3Rmb2xkZXIvdGVzdDIudHh0CgAgAAAAAAABAB" +
+	"gArtRMnDcx0wE68M6gNzHTAXrDTJw3MdMBUEsFBgAAAAADAAMAKQEAAJMAAAAAAA=="
+
+func TestUnzipFile(t *testing.T) {
+
+	data, _ := base64.StdEncoding.DecodeString(testZipFile)
+
+	ioutil.WriteFile("ziptest.zip", data, 0660)
+	ioutil.WriteFile("ziptest2.zip", data[:5], 0660)
+
+	defer func() {
+		os.Remove("ziptest.zip")
+		os.Remove("ziptest2.zip")
+		os.RemoveAll("foo")
+	}()
+
+	if err := UnzipFile("ziptest.zip", "foo", false); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := UnzipFile("ziptest.zip", "foo", false); !strings.Contains(err.Error(), "Path already exists:") {
+		t.Error(err)
+		return
+	}
+
+	if err := UnzipFile("ziptest2.zip", "foo", false); err.Error() != "zip: not a valid zip file" {
+		t.Error(err)
+		return
+	}
+
+	if e, err := PathExists("foo"); !e {
+		t.Error("Unexpected result:", e, err)
+		return
+	}
+
+	if e, err := PathExists(path.Join("foo", "testfolder")); !e {
+		t.Error("Unexpected result:", e, err)
+		return
+	}
+
+	if e, err := PathExists(path.Join("foo", "testfolder", "test1.txt")); !e {
+		t.Error("Unexpected result:", e, err)
+		return
+	}
+
+	if e, err := PathExists(path.Join("foo", "testfolder", "test2.txt")); !e {
+		t.Error("Unexpected result:", e, err)
+		return
+	}
+
+	if c, err := ioutil.ReadFile(path.Join("foo", "testfolder", "test1.txt")); string(c) != "123" {
+		t.Error("Unexpected result:", string(c), err)
+		return
+	}
+
+	if c, err := ioutil.ReadFile(path.Join("foo", "testfolder", "test2.txt")); string(c) != "456" {
+		t.Error("Unexpected result:", string(c), err)
+		return
+	}
+}

+ 127 - 0
flowutil/eventpump.go

@@ -0,0 +1,127 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package flowutil contains utilities to manage control flow.
+*/
+package flowutil
+
+import "sync"
+
+/*
+EventPump implements the observer pattern. Observers can subscribe to receive
+notifications on certain events. Observed objects can send notifications.
+*/
+type EventPump struct {
+	eventsObservers     map[string]map[interface{}][]EventCallback
+	eventsObserversLock *sync.Mutex
+}
+
+/*
+EventCallback is the callback function which is called when an event was observed.
+*/
+type EventCallback func(event string, eventSource interface{})
+
+/*
+NewEventPump creates a new event pump.
+*/
+func NewEventPump() *EventPump {
+	return &EventPump{make(map[string]map[interface{}][]EventCallback), &sync.Mutex{}}
+}
+
+/*
+AddObserver adds a new observer to the event pump. An observer can subscribe to
+a given event from a given event source. If the event is an empty string then
+the observer subscribes to all events from the event source. If the
+eventSource is nil then the observer subscribes to all event sources.
+*/
+func (ep *EventPump) AddObserver(event string, eventSource interface{}, callback EventCallback) {
+
+	// Ignore requests with non-existent callbacks
+
+	if callback == nil {
+		return
+	}
+
+	ep.eventsObserversLock.Lock()
+	defer ep.eventsObserversLock.Unlock()
+
+	sources, ok := ep.eventsObservers[event]
+	if !ok {
+		sources = make(map[interface{}][]EventCallback)
+		ep.eventsObservers[event] = sources
+	}
+
+	callbacks, ok := sources[eventSource]
+	if !ok {
+		callbacks = []EventCallback{callback}
+		sources[eventSource] = callbacks
+	} else {
+		sources[eventSource] = append(callbacks, callback)
+	}
+}
+
+/*
+PostEvent posts an event to this event pump from a given event source.
+*/
+func (ep *EventPump) PostEvent(event string, eventSource interface{}) {
+	if event == "" || eventSource == nil {
+		panic("Posting an event requires the event and its source")
+	}
+
+	ep.eventsObserversLock.Lock()
+	defer ep.eventsObserversLock.Unlock()
+
+	postEvent := func(event string, eventSource interface{}) {
+
+		if sources, ok := ep.eventsObservers[event]; ok {
+			for source, callbacks := range sources {
+				if source == eventSource || source == nil {
+					for _, callback := range callbacks {
+						ep.eventsObserversLock.Unlock()
+						callback(event, eventSource)
+						ep.eventsObserversLock.Lock()
+					}
+				}
+			}
+		}
+	}
+
+	postEvent(event, eventSource)
+	postEvent("", eventSource)
+}
+
+/*
+RemoveObservers removes observers from the event pump. If the event is an
+empty string then the observer is removed from all events. If the
+eventSource is nil then all observers of the event are dropped.
+*/
+func (ep *EventPump) RemoveObservers(event string, eventSource interface{}) {
+	ep.eventsObserversLock.Lock()
+	defer ep.eventsObserversLock.Unlock()
+
+	// Clear everything
+
+	if event == "" && eventSource == nil {
+		ep.eventsObservers = make(map[string]map[interface{}][]EventCallback)
+
+	} else if eventSource == nil {
+		delete(ep.eventsObservers, event)
+
+	} else if event == "" {
+		for _, sources := range ep.eventsObservers {
+			delete(sources, eventSource)
+		}
+
+	} else {
+		if sources, ok := ep.eventsObservers[event]; ok {
+			delete(sources, eventSource)
+		}
+	}
+}

+ 238 - 0
flowutil/eventpump_test.go

@@ -0,0 +1,238 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package flowutil
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"sort"
+	"testing"
+)
+
+func TestEventPump(t *testing.T) {
+	var res []string
+
+	source1 := &bytes.Buffer{}
+	source2 := errors.New("TEST")
+
+	ep := NewEventPump()
+
+	// Add observer 1
+
+	ep.AddObserver("event1", source1, func(event string, eventSource interface{}) {
+		if eventSource != source1 {
+			t.Error("Unexpected event source:", eventSource)
+			return
+		}
+		res = append(res, "1")
+		sort.Strings(res)
+
+	})
+
+	// Add observer 2
+
+	ep.AddObserver("event2", source2, func(event string, eventSource interface{}) {
+		if eventSource != source2 {
+			t.Error("Unexpected event source:", eventSource)
+			return
+		}
+		res = append(res, "2")
+		sort.Strings(res)
+
+	})
+
+	// Add observer 3
+
+	ep.AddObserver("event2", source2, func(event string, eventSource interface{}) {
+		if eventSource != source2 {
+			t.Error("Unexpected event source:", eventSource)
+			return
+		}
+		res = append(res, "3")
+		sort.Strings(res)
+
+	})
+
+	// Run the tests
+
+	// Test 1 straight forward case
+
+	ep.PostEvent("event1", source1)
+
+	if fmt.Sprint(res) != "[1]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event2", source2)
+
+	if fmt.Sprint(res) != "[2 3]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event1", source2)
+
+	if fmt.Sprint(res) != "[]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Add observer 4
+
+	ep.AddObserver("", source1, func(event string, eventSource interface{}) {
+		if eventSource != source1 {
+			t.Error("Unexpected event source:", eventSource)
+			return
+		}
+		res = append(res, "4")
+		sort.Strings(res)
+	})
+
+	// Add observer 5
+
+	ep.AddObserver("", nil, func(event string, eventSource interface{}) {
+		res = append(res, "5")
+		sort.Strings(res)
+	})
+
+	// Add observer 6
+
+	ep.AddObserver("", source2, func(event string, eventSource interface{}) {
+		if eventSource != source2 {
+			t.Error("Unexpected event source:", eventSource)
+			return
+		}
+		res = append(res, "6")
+		sort.Strings(res)
+	})
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event1", source2)
+
+	if fmt.Sprint(res) != "[5 6]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event3", source2)
+
+	if fmt.Sprint(res) != "[5 6]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event3", source1)
+
+	if fmt.Sprint(res) != "[4 5]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event3", errors.New("test"))
+
+	if fmt.Sprint(res) != "[5]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Remove observers
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event2", source2)
+
+	if fmt.Sprint(res) != "[2 3 5 6]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+	ep.RemoveObservers("event2", source2)
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event2", source2)
+
+	if fmt.Sprint(res) != "[5 6]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	ep.RemoveObservers("", source2) // Remove all handlers specific to source 2
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event2", source2)
+
+	if fmt.Sprint(res) != "[5]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	ep.PostEvent("event1", source1)
+
+	if fmt.Sprint(res) != "[1 4 5 5]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	ep.RemoveObservers("event1", nil) // Remove all handlers specific to source 2
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event2", source2)
+
+	if fmt.Sprint(res) != "[5]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	ep.RemoveObservers("", nil) // Remove all handlers
+
+	res = make([]string, 0) // Reset res
+
+	ep.PostEvent("event2", source2)
+
+	if fmt.Sprint(res) != "[]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// This call should be ignored
+
+	ep.AddObserver("event1", source1, nil)
+
+	if fmt.Sprint(ep.eventsObservers) != "map[]" {
+		t.Error("Event map should be empty at this point:", ep.eventsObservers)
+		return
+	}
+}
+
+func TestWrongPostEvent(t *testing.T) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Error("Posting events with empty values shouldn't work.")
+		}
+	}()
+
+	ep := NewEventPump()
+	ep.PostEvent("", nil)
+}

+ 3 - 0
go.mod

@@ -0,0 +1,3 @@
+module devt.de/krotik/common
+
+go 1.12

File diff suppressed because it is too large
+ 1284 - 0
httputil/access/acl.go


File diff suppressed because it is too large
+ 1271 - 0
httputil/access/acl_test.go


+ 45 - 0
httputil/auth/auth.go

@@ -0,0 +1,45 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package auth contains user authentication code for webservers.
+
+Basic access authentication requires a client to provide a user name and password
+with each request. Most browsers will directly support this method.
+See: https://en.wikipedia.org/wiki/Basic_access_authentication
+
+Cookie based authentication requires the client to login once and create a unique
+access token. The access token is then used to authenticate each request.
+*/
+package auth
+
+import "net/http"
+
+/*
+HandleFuncWrapper is an abstract wrapper for handle functions to add authentication features.
+*/
+type HandleFuncWrapper interface {
+
+	/*
+		SetAuthFunc gives an authentication function which can be used by the
+		wrapper to authenticate users.
+	*/
+	SetAuthFunc(authFunc func(user, pass string) bool)
+
+	/*
+	   HandleFunc is the new handle func which wraps an original handle functions to do an authentication check.
+	*/
+	HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request))
+
+	/*
+	   CheckAuth checks the user authentication of an incomming request. Returns
+	   if the authentication is correct and the given username.
+	*/
+	CheckAuth(r *http.Request) (string, bool)
+}

+ 194 - 0
httputil/auth/auth_test.go

@@ -0,0 +1,194 @@
+package auth
+
+import (
+	"bytes"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strings"
+	"sync"
+	"testing"
+
+	"devt.de/krotik/common/httputil"
+	"devt.de/krotik/common/httputil/user"
+)
+
+const TESTPORT = ":9090"
+
+const TESTQUERYURL = "http://localhost" + TESTPORT + "/foo"
+
+var handleCallback = func(w http.ResponseWriter, r *http.Request) {}
+
+var originalHandleFunction = func(w http.ResponseWriter, r *http.Request) {
+	session, _ := user.UserSessionManager.GetSession("", w, r, false)
+
+	handleCallback(w, r)
+
+	if session == nil {
+		w.Write([]byte("Content"))
+	} else {
+		w.Write([]byte(fmt.Sprint("Content - User session: ", session.User())))
+	}
+}
+
+var wrappedHandleFunction = originalHandleFunction
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	// Create a test file
+
+	ioutil.WriteFile("test.jpg", []byte("testpic"), 0777)
+
+	// Setup a simple webserver
+
+	hs, wg := startServer()
+	if hs == nil {
+		return
+	}
+
+	// Make sure the webserver shuts down
+
+	defer stopServer(hs, wg)
+
+	// Register a simple content delivery function
+
+	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+
+		// Call the wrapped handle function which then adds the authentication
+
+		wrappedHandleFunction(w, r)
+	})
+
+	// Run the tests
+
+	res := m.Run()
+
+	// Remove test file
+
+	os.Remove("test.jpg")
+
+	os.Exit(res)
+}
+
+func TestNoAuthNoSession(t *testing.T) {
+
+	// By default there is no session and no authentication
+
+	res, _ := sendTestRequest(TESTQUERYURL, "GET", nil, nil, nil)
+
+	if res != "Content" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Trying to create an anonymous session should fail
+
+	r, _ := http.NewRequest("GET", "", nil)
+	_, err := user.UserSessionManager.GetSession("", nil, r, true)
+
+	if err.Error() != "Cannot create a session without a user" {
+		t.Error("Unexpected error:", err)
+		return
+	}
+}
+
+/*
+Send a request to a HTTP test server
+*/
+func sendTestRequest(url string, method string, headers map[string]string,
+	cookies []*http.Cookie, content []byte) (string, *http.Response) {
+
+	var req *http.Request
+	var err error
+
+	// Create request
+
+	if content != nil {
+		req, err = http.NewRequest(method, url, bytes.NewBuffer(content))
+	} else {
+		req, err = http.NewRequest(method, url, nil)
+	}
+
+	// Add headers
+
+	req.Header.Set("Content-Type", "application/json")
+
+	for k, v := range headers {
+		req.Header.Set(k, v)
+	}
+
+	// Add cookies
+
+	for _, v := range cookies {
+		req.AddCookie(v)
+	}
+
+	client := &http.Client{}
+	resp, err := client.Do(req)
+	if err != nil {
+		panic(err)
+	}
+	defer resp.Body.Close()
+
+	body, _ := ioutil.ReadAll(resp.Body)
+	bodyStr := strings.Trim(string(body), " \n")
+
+	// Try json decoding first
+
+	out := bytes.Buffer{}
+	err = json.Indent(&out, []byte(bodyStr), "", "  ")
+	if err == nil {
+		return out.String(), resp
+	}
+
+	// Just return the body
+
+	return bodyStr, resp
+}
+
+/*
+Start a HTTP test server.
+*/
+func startServer() (*httputil.HTTPServer, *sync.WaitGroup) {
+	hs := &httputil.HTTPServer{}
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+
+	go hs.RunHTTPServer(TESTPORT, &wg)
+
+	wg.Wait()
+
+	// Server is started
+
+	if hs.LastError != nil {
+		panic(hs.LastError)
+	}
+
+	return hs, &wg
+}
+
+/*
+Stop a started HTTP test server.
+*/
+func stopServer(hs *httputil.HTTPServer, wg *sync.WaitGroup) {
+
+	if hs.Running == true {
+
+		wg.Add(1)
+
+		// Server is shut down
+
+		hs.Shutdown()
+
+		wg.Wait()
+
+	} else {
+
+		panic("Server was not running as expected")
+	}
+}

+ 139 - 0
httputil/auth/basic.go

@@ -0,0 +1,139 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package auth
+
+import (
+	"encoding/base64"
+	"net/http"
+	"strings"
+
+	"devt.de/krotik/common/httputil/user"
+)
+
+/*
+Realm is the authentication realm
+*/
+var Realm = "RestrictedAccessRealm"
+
+/*
+BashicAuthHandleFuncWrapper datastructure. Wrapper for HandleFunc to add
+basic authentication to all added endpoints.
+*/
+type BashicAuthHandleFuncWrapper struct {
+	origHandleFunc func(pattern string, handler func(http.ResponseWriter, *http.Request))
+	authFunc       func(user, pass string) bool
+	accessFunc     func(http.ResponseWriter, *http.Request, string) bool
+
+	// Callbacks
+
+	CallbackSessionExpired func(w http.ResponseWriter, r *http.Request)
+	CallbackUnauthorized   func(w http.ResponseWriter, r *http.Request)
+}
+
+/*
+NewBashicAuthHandleFuncWrapper creates a new HandleFunc wrapper.
+*/
+func NewBashicAuthHandleFuncWrapper(origHandleFunc func(pattern string,
+	handler func(http.ResponseWriter, *http.Request))) *BashicAuthHandleFuncWrapper {
+
+	return &BashicAuthHandleFuncWrapper{
+		origHandleFunc,
+		nil,
+		nil,
+
+		// Session expired callback
+
+		func(w http.ResponseWriter, r *http.Request) {
+			w.Header().Set("WWW-Authenticate", `Basic realm="`+Realm+`"`)
+			w.WriteHeader(http.StatusUnauthorized)
+			w.Write([]byte("Session expired\n"))
+		},
+		func(w http.ResponseWriter, r *http.Request) {
+			w.Header().Set("WWW-Authenticate", `Basic realm="`+Realm+`"`)
+			w.WriteHeader(http.StatusUnauthorized)
+			w.Write([]byte("Unauthorized\n"))
+		},
+	}
+}
+
+/*
+SetAuthFunc gives an authentication function which can be used by the wrapper
+to authenticate users.
+*/
+func (bw *BashicAuthHandleFuncWrapper) SetAuthFunc(authFunc func(user, pass string) bool) {
+	bw.authFunc = authFunc
+}
+
+/*
+SetAccessFunc sets an access function which can be used by the wrapper to
+check the user access rights.
+*/
+func (bw *BashicAuthHandleFuncWrapper) SetAccessFunc(accessFunc func(http.ResponseWriter, *http.Request, string) bool) {
+	bw.accessFunc = accessFunc
+}
+
+/*
+HandleFunc is the new handle func which wraps an original handle functions to do an authentication check.
+*/
+func (bw *BashicAuthHandleFuncWrapper) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {
+
+	bw.origHandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
+
+		if name, res := bw.CheckAuth(r); res {
+
+			session, err := user.UserSessionManager.GetSession(name, w, r, true)
+
+			if session != nil && err == nil {
+
+				// Check authorization
+
+				if bw.accessFunc == nil || bw.accessFunc(w, r, name) {
+
+					// Handle the request
+
+					handler(w, r)
+				}
+
+				return
+			}
+
+			bw.CallbackSessionExpired(w, r)
+
+			return
+		}
+
+		bw.CallbackUnauthorized(w, r)
+	})
+}
+
+/*
+CheckAuth checks the user authentication of an incomming request. Returns
+if the authentication is correct and the given username.
+*/
+func (bw *BashicAuthHandleFuncWrapper) CheckAuth(r *http.Request) (string, bool) {
+	var user string
+	var ok bool
+
+	if s := strings.SplitN(r.Header.Get("Authorization"), " ", 2); len(s) == 2 {
+
+		if b, err := base64.StdEncoding.DecodeString(s[1]); err == nil {
+
+			if pair := strings.Split(string(b), ":"); len(pair) == 2 {
+
+				user = pair[0]
+				pass := pair[1]
+
+				ok = bw.authFunc != nil && bw.authFunc(user, pass)
+			}
+		}
+	}
+
+	return user, ok
+}

+ 222 - 0
httputil/auth/basic_test.go

@@ -0,0 +1,222 @@
+package auth
+
+import (
+	"encoding/base64"
+	"fmt"
+	"net/http"
+	"strings"
+	"testing"
+	"time"
+
+	"devt.de/krotik/common/httputil/user"
+)
+
+func TestBasicAuth(t *testing.T) {
+
+	// Set a very fast session expiry
+
+	user.UserSessionManager.Provider.(*user.MemorySessionProvider).SetExpiry(1)
+
+	// Create a wrapper for basic auth
+
+	ba := NewBashicAuthHandleFuncWrapper(func(pattern string,
+		handler func(http.ResponseWriter, *http.Request)) {
+
+		// Ignore the pattern and just replace the wrappedHandleFunction
+
+		wrappedHandleFunction = handler
+	})
+
+	// Wrap the originalHandleFunction and let the previous code set it
+	// as wrappedHandleFunction
+
+	ba.HandleFunc("/", originalHandleFunction)
+
+	// Test that basic authentication is active
+
+	res, _ := sendTestRequest(TESTQUERYURL, "GET", nil, nil, nil)
+
+	if res != "Unauthorized" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Register credentials and try to authenticate
+
+	ba.SetAuthFunc(func(user, pass string) bool {
+		return user == "yams" && pass == "yams"
+	})
+
+	passStr := base64.StdEncoding.EncodeToString([]byte("yams:yams"))
+
+	res, resp := sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStr,
+	}, nil, nil)
+
+	if res != "Content - User session: yams" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Check we have a valid session
+
+	cookies := resp.Cookies()
+	sessions, _ := user.UserSessionManager.Provider.GetAll()
+
+	if len(sessions) != 1 {
+		t.Error("Unexpected number of active sessions:", sessions)
+		return
+	}
+
+	var theSession user.Session
+	for _, v := range sessions {
+		theSession = v.(user.Session)
+		break
+	}
+
+	if len(cookies) != 1 ||
+		cookies[0].Raw != fmt.Sprintf("~sid=%v; Path=/; Max-Age=%v; HttpOnly",
+			theSession.ID(), CookieMaxLifetime) {
+
+		t.Error("Unexpected cookie:", cookies)
+		return
+	}
+
+	// The next request will have access to a session
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStr,
+	}, cookies, nil)
+
+	if res != "Content - User session: yams" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Test expiry
+
+	time.Sleep(2 * time.Second)
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStr,
+	}, cookies, nil)
+
+	if res != "Session expired" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Test destroying session
+
+	res, resp = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStr,
+	}, nil, nil)
+
+	if res != "Content - User session: yams" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	cookies = resp.Cookies()
+	sessions, _ = user.UserSessionManager.Provider.GetAll()
+
+	if len(sessions) != 1 {
+		t.Error("Unexpected number of active sessions:", sessions)
+		return
+	}
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStr,
+	}, cookies, nil)
+
+	if res != "Content - User session: yams" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Test access denied
+
+	ba.SetAccessFunc(func(w http.ResponseWriter, r *http.Request, user string) bool {
+
+		if strings.HasPrefix(r.URL.Path, "/foo/bar") {
+			http.Error(w, "Page is restricted", http.StatusForbidden)
+			return false
+		}
+		return true
+	})
+
+	res, resp = sendTestRequest(TESTQUERYURL+"/bar", "GET", map[string]string{
+		"Authorization": "Basic " + passStr,
+	}, cookies, nil)
+
+	if res != "Page is restricted" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	for _, k := range sessions {
+		user.UserSessionManager.Provider.Destroy(k.ID())
+	}
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStr,
+	}, cookies, nil)
+
+	if res != "Session expired" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Test error cases
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStr + "wrong",
+	}, cookies, nil)
+
+	if res != "Unauthorized" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic" + passStr,
+	}, cookies, nil)
+
+	if res != "Unauthorized" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	passStrWrong := base64.StdEncoding.EncodeToString([]byte("yams:yams1"))
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStrWrong,
+	}, cookies, nil)
+
+	if res != "Unauthorized" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	passStrWrong = base64.StdEncoding.EncodeToString([]byte("yamsyams"))
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStrWrong,
+	}, cookies, nil)
+
+	if res != "Unauthorized" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	passStrWrong = base64.StdEncoding.EncodeToString([]byte("yams1:yams"))
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", map[string]string{
+		"Authorization": "Basic " + passStrWrong,
+	}, cookies, nil)
+
+	if res != "Unauthorized" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}

+ 300 - 0
httputil/auth/cookie.go

@@ -0,0 +1,300 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package auth
+
+import (
+	"crypto/rand"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+
+	"devt.de/krotik/common/datautil"
+	"devt.de/krotik/common/errorutil"
+	"devt.de/krotik/common/httputil/user"
+)
+
+/*
+cookieNameAuth defines the auth cookie name
+*/
+const cookieNameAuth = "~aid"
+
+/*
+CookieMaxLifetime is the max life time of an auth cookie in seconds
+*/
+var CookieMaxLifetime = 3600
+
+/*
+TestCookieAuthDisabled is a flag to disable cookie based authentication temporarily
+(should only be used by unit tests)
+*/
+var TestCookieAuthDisabled = false
+
+/*
+CookieAuthHandleFuncWrapper datastructure. Wrapper for HandleFunc to add
+cookie authentication to all added endpoints.
+*/
+type CookieAuthHandleFuncWrapper struct {
+	origHandleFunc func(pattern string, handler func(http.ResponseWriter, *http.Request))
+	authFunc       func(user, pass string) bool
+	accessFunc     func(http.ResponseWriter, *http.Request, string) bool
+	tokenMap       *datautil.MapCache
+	expiry         int
+	publicURL      map[string]func(http.ResponseWriter, *http.Request)
+
+	// Callbacks
+
+	CallbackSessionExpired func(w http.ResponseWriter, r *http.Request)
+	CallbackUnauthorized   func(w http.ResponseWriter, r *http.Request)
+}
+
+/*
+NewCookieAuthHandleFuncWrapper creates a new HandleFunc wrapper.
+*/
+func NewCookieAuthHandleFuncWrapper(origHandleFunc func(pattern string,
+	handler func(http.ResponseWriter, *http.Request))) *CookieAuthHandleFuncWrapper {
+
+	return &CookieAuthHandleFuncWrapper{
+		origHandleFunc,
+		nil,
+		nil,
+		datautil.NewMapCache(0, int64(CookieMaxLifetime)),
+		CookieMaxLifetime,
+		make(map[string]func(http.ResponseWriter, *http.Request)),
+
+		// Session expired callback
+
+		func(w http.ResponseWriter, r *http.Request) {
+			w.WriteHeader(http.StatusUnauthorized)
+			w.Write([]byte("Session expired\n"))
+		},
+		func(w http.ResponseWriter, r *http.Request) {
+			w.WriteHeader(http.StatusUnauthorized)
+			w.Write([]byte("Unauthorized\n"))
+		},
+	}
+}
+
+/*
+AddPublicPage adds a page which should be accessible without authentication.
+using a special handler.
+*/
+func (cw *CookieAuthHandleFuncWrapper) AddPublicPage(url string, handler func(http.ResponseWriter, *http.Request)) {
+	cw.publicURL[url] = handler
+}
+
+/*
+Expiry returns the current authentication expiry time in seconds.
+*/
+func (cw *CookieAuthHandleFuncWrapper) Expiry() int {
+	return cw.expiry
+}
+
+/*
+SetExpiry sets the authentication expiry time in seconds. All existing authentications
+are retracted during this function call.
+*/
+func (cw *CookieAuthHandleFuncWrapper) SetExpiry(secs int) {
+	cw.expiry = secs
+	cw.tokenMap = datautil.NewMapCache(0, int64(secs))
+}
+
+/*
+SetAuthFunc sets an authentication function which can be used by the wrapper
+to authenticate users.
+*/
+func (cw *CookieAuthHandleFuncWrapper) SetAuthFunc(authFunc func(user, pass string) bool) {
+	cw.authFunc = authFunc
+}
+
+/*
+SetAccessFunc sets an access function which can be used by the wrapper to
+check the user access rights.
+*/
+func (cw *CookieAuthHandleFuncWrapper) SetAccessFunc(accessFunc func(http.ResponseWriter, *http.Request, string) bool) {
+	cw.accessFunc = accessFunc
+}
+
+/*
+AuthUser authenticates a user and creates an auth token unless testOnly is true.
+Returns an empty string if the authentication was not successful.
+*/
+func (cw *CookieAuthHandleFuncWrapper) AuthUser(user, pass string, testOnly bool) string {
+
+	if cw.authFunc != nil && cw.authFunc(user, pass) {
+
+		if !testOnly {
+
+			// Generate a valid auth token
+
+			aid := cw.newAuthID()
+
+			cw.tokenMap.Put(aid, user)
+
+			return aid
+		}
+
+		return "ok"
+	}
+
+	return ""
+}
+
+/*
+CheckAuth checks the user authentication of an incomming request. Returns
+if the authentication is correct and the given username.
+*/
+func (cw *CookieAuthHandleFuncWrapper) CheckAuth(r *http.Request) (string, bool) {
+	var name string
+	var ok bool
+
+	cookie, _ := r.Cookie(cookieNameAuth)
+
+	if cookie != nil && cookie.Value != "" {
+		var user interface{}
+		if user, ok = cw.tokenMap.Get(cookie.Value); ok {
+			name = fmt.Sprint(user)
+		}
+	}
+
+	return name, ok
+}
+
+/*
+SetAuthCookie sets the auth cookie in a given response object.
+*/
+func (cw *CookieAuthHandleFuncWrapper) SetAuthCookie(yaid string, w http.ResponseWriter) {
+
+	if yaid == "" {
+
+		// Nothing to do if no auth id is given
+
+		return
+	}
+
+	cookie := http.Cookie{
+		Name:     cookieNameAuth,
+		Value:    url.QueryEscape(yaid),
+		Path:     "/",
+		HttpOnly: true,
+		MaxAge:   cw.expiry,
+	}
+	http.SetCookie(w, &cookie)
+}
+
+/*
+RemoveAuthCookie removes the auth cookie in a given response object and invalidates
+it.
+*/
+func (cw *CookieAuthHandleFuncWrapper) RemoveAuthCookie(w http.ResponseWriter) {
+
+	cookie := http.Cookie{
+		Name:     cookieNameAuth,
+		Value:    "",
+		Path:     "/",
+		HttpOnly: true,
+		MaxAge:   -1,
+	}
+	http.SetCookie(w, &cookie)
+}
+
+/*
+InvalidateAuthCookie invalidates the authentication of an incomming request.
+*/
+func (cw *CookieAuthHandleFuncWrapper) InvalidateAuthCookie(r *http.Request) {
+	cookie, _ := r.Cookie(cookieNameAuth)
+
+	if cookie != nil && cookie.Value != "" {
+		cw.tokenMap.Remove(cookie.Value)
+	}
+}
+
+/*
+newAuthID creates a new auth id.
+*/
+func (cw *CookieAuthHandleFuncWrapper) newAuthID() string {
+	b := make([]byte, 32)
+	_, err := io.ReadFull(rand.Reader, b)
+
+	errorutil.AssertOk(err)
+
+	return fmt.Sprintf("A-%x", b)
+}
+
+/*
+HandleFunc is the new handle func which wraps an original handle functions to do an authentication check.
+*/
+func (cw *CookieAuthHandleFuncWrapper) HandleFunc(pattern string, handler func(http.ResponseWriter, *http.Request)) {
+
+	cw.origHandleFunc(pattern, func(w http.ResponseWriter, r *http.Request) {
+
+		// Check if this is a public URL
+
+		if chandler, ok := cw.publicURL[r.URL.Path]; ok {
+			chandler(w, r)
+			return
+		}
+
+		// Check if authentication is disabled
+
+		if TestCookieAuthDisabled {
+			handler(w, r)
+			return
+		}
+
+		// Retrieve the cookie value
+
+		cookie, _ := r.Cookie(cookieNameAuth)
+
+		if cookie != nil && cookie.Value != "" {
+
+			// Check in the token map if the user was authenticated
+
+			if name, ok := cw.tokenMap.Get(cookie.Value); ok {
+				nameString := fmt.Sprint(name)
+
+				// Create or retrieve the user session (this call sets the session
+				// cookie in the response) - a session is considered expired if
+				// a session cookie is found in the request but no corresponding
+				// session can be found by the UserSessionManager
+
+				session, err := user.UserSessionManager.GetSession(nameString, w, r, true)
+
+				if session != nil && err == nil && session.User() == nameString {
+
+					// Set the auth cookie in the response
+
+					cw.SetAuthCookie(cookie.Value, w)
+
+					// Check authorization
+
+					if cw.accessFunc == nil || cw.accessFunc(w, r, nameString) {
+
+						// Handle the request
+
+						handler(w, r)
+					}
+
+					return
+				}
+
+				// Remove auth token entry since the session has expired
+
+				defer cw.tokenMap.Remove(cookie.Value)
+
+				cw.CallbackSessionExpired(w, r)
+
+				return
+			}
+		}
+
+		cw.CallbackUnauthorized(w, r)
+	})
+}

+ 243 - 0
httputil/auth/cookie_test.go

@@ -0,0 +1,243 @@
+package auth
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+	"testing"
+
+	"devt.de/krotik/common/httputil"
+	"devt.de/krotik/common/httputil/user"
+)
+
+func TestCookieAuth(t *testing.T) {
+
+	// Set a very fast session expiry
+
+	user.UserSessionManager.Provider.(*user.MemorySessionProvider).SetExpiry(1)
+
+	// Create a wrapper for basic auth
+
+	ca := NewCookieAuthHandleFuncWrapper(func(pattern string,
+		handler func(http.ResponseWriter, *http.Request)) {
+
+		// Ignore the pattern and just replace the wrappedHandleFunction
+
+		wrappedHandleFunction = handler
+	})
+
+	ca.SetExpiry(42)
+
+	if res := ca.Expiry(); res != 42 {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Ensure custom handle function is set back
+
+	defer func() { handleCallback = func(w http.ResponseWriter, r *http.Request) {} }()
+
+	// Wrap the originalHandleFunction and let the previous code set it
+	// as wrappedHandleFunction
+
+	ca.HandleFunc("/", originalHandleFunction)
+
+	// Test that basic authentication is active
+
+	res, _ := sendTestRequest(TESTQUERYURL, "GET", nil, nil, nil)
+
+	if res != "Unauthorized" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Test disabling authentication temporarily
+
+	TestCookieAuthDisabled = true
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", nil, nil, nil)
+
+	if res != "Content" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	TestCookieAuthDisabled = false
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", nil, nil, nil)
+
+	if res != "Unauthorized" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Register credentials and try to authenticate
+
+	ca.SetAuthFunc(func(user, pass string) bool {
+		return user == "yams" && pass == "yams"
+	})
+
+	// Test authentication
+
+	if testres := ca.AuthUser("yams", "yams", true); testres != "ok" {
+		t.Error("Unexpected result:", testres)
+	}
+
+	ca.AddPublicPage("/foo/pic", httputil.SingleFileServer("test.jpg", nil).ServeHTTP)
+
+	// Simulate authentication
+
+	ca.AddPublicPage("/foo/login", func(w http.ResponseWriter, r *http.Request) {
+
+		// Create a token
+
+		token := ca.AuthUser(r.Header.Get("user1"), r.Header.Get("pass1"), false)
+
+		// Set the cookie
+
+		ca.SetAuthCookie(token, w)
+	})
+
+	ca.AddPublicPage("/foo/logout", func(w http.ResponseWriter, r *http.Request) {
+		ca.InvalidateAuthCookie(r)
+		ca.RemoveAuthCookie(w)
+	})
+
+	// Get some public content
+
+	res, resp := sendTestRequest(TESTQUERYURL+"/pic", "GET", nil, nil, nil)
+
+	if res != "testpic" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Login request
+
+	res, resp = sendTestRequest(TESTQUERYURL+"/login", "GET", map[string]string{
+		"user1": "yams",
+		"pass1": "yams",
+	}, nil, nil)
+
+	// Send first request which creates a session
+
+	res, resp = sendTestRequest(TESTQUERYURL, "GET", nil, resp.Cookies(), nil)
+
+	if res != "Content - User session: yams" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Test access denied
+
+	ca.SetAccessFunc(func(w http.ResponseWriter, r *http.Request, user string) bool {
+
+		if strings.HasPrefix(r.URL.Path, "/foo/bar") {
+			http.Error(w, "Page is restricted", http.StatusForbidden)
+			return false
+		}
+		return true
+	})
+
+	res, resp = sendTestRequest(TESTQUERYURL+"/bar", "GET", nil, resp.Cookies(), nil)
+
+	if res != "Page is restricted" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Check we have a valid session
+
+	cookies := resp.Cookies()
+	sessions, _ := user.UserSessionManager.Provider.GetAll()
+
+	if len(sessions) != 1 {
+		t.Error("Unexpected number of active sessions:", sessions)
+		return
+	}
+
+	if user, ok := ca.CheckAuth(resp.Request); !ok || user != "yams" {
+		t.Error("Unexpected result:", ok, user)
+		return
+	}
+
+	var theSession user.Session
+	for _, v := range sessions {
+		theSession = v.(user.Session)
+		break
+	}
+
+	var theAuth string
+	for k := range ca.tokenMap.GetAll() {
+		theAuth = k
+		break
+	}
+
+	if len(cookies) != 2 ||
+		cookies[0].Raw != fmt.Sprintf("~sid=%v; Path=/; Max-Age=%v; HttpOnly",
+			theSession.ID(), CookieMaxLifetime) ||
+		cookies[1].Raw != fmt.Sprintf("~aid=%v; Path=/; Max-Age=42; HttpOnly", theAuth) {
+
+		t.Error("Unexpected cookie:", cookies)
+		return
+	}
+
+	// Test session expiry
+
+	user.UserSessionManager.Provider.Destroy(theSession.ID())
+
+	res, _ = sendTestRequest(TESTQUERYURL, "GET", nil, cookies, nil)
+
+	if res != "Session expired" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Test a logout
+
+	_, resp2 := sendTestRequest(TESTQUERYURL+"/logout", "GET", nil, resp.Cookies(), nil)
+
+	cookies = resp2.Cookies()
+
+	if len(cookies) != 1 ||
+		cookies[0].Raw != "~aid=; Path=/; Max-Age=0; HttpOnly" {
+
+		t.Error("Unexpected cookie:", cookies)
+		return
+	}
+
+	cookies = resp.Cookies()
+
+	// The next request will no longer have access to a session
+
+	res, resp = sendTestRequest(TESTQUERYURL, "GET", nil, cookies, nil)
+
+	if res != "Unauthorized" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	cookies = resp.Cookies()
+
+	if len(cookies) != 0 {
+		t.Error("Unexpected cookie:", cookies)
+		return
+	}
+
+	// Test error cases
+
+	// Wrong credentials - error message depends on custom handler
+
+	res, resp = sendTestRequest(TESTQUERYURL+"/login", "GET", map[string]string{
+		"user1": "yams",
+		"pass1": "yams1",
+	}, nil, nil)
+
+	cookies = resp.Cookies()
+
+	if len(cookies) != 0 {
+		t.Error("Unexpected cookie:", cookies)
+		return
+	}
+
+}

+ 278 - 0
httputil/httpserver.go

@@ -0,0 +1,278 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package httputil contains a HTTP/HTTPS Server which can be stopped via signals
+or a Shutdown() call.
+*/
+package httputil
+
+import (
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"net"
+	"net/http"
+	"os"
+	"os/signal"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+)
+
+/*
+HTTPServer data structure
+*/
+type HTTPServer struct {
+	signalling chan os.Signal    // Channel for receiving signals
+	LastError  error             // Last recorded error
+	Running    bool              // Flag if the server is running
+	listener   signalTCPListener // TCP listener of the server
+}
+
+/*
+Shutdown sends a shutdown signal.
+*/
+func (hs *HTTPServer) Shutdown() {
+	if hs.signalling != nil {
+		hs.signalling <- syscall.SIGINT
+	}
+}
+
+/*
+RunHTTPServer starts a HTTP Server which can be stopped via ^C (Control-C).
+It is assumed that all routes have been added prior to this call.
+
+laddr should be the local address which should be given to net.Listen.
+wgStatus is an optional wait group which will be notified once the server is listening
+and once the server has shutdown.
+
+This function will not return unless the server is shutdown.
+*/
+func (hs *HTTPServer) RunHTTPServer(laddr string, wgStatus *sync.WaitGroup) error {
+
+	hs.Running = false
+
+	// Create normal TCP listener
+
+	originalListener, err := net.Listen("tcp", laddr)
+	if err != nil {
+		hs.LastError = err
+
+		if wgStatus != nil {
+			wgStatus.Done()
+		}
+
+		return err
+	}
+
+	// Wrap listener in a signal aware listener
+
+	sl := newSignalTCPListener(originalListener, originalListener.(*net.TCPListener), wgStatus)
+
+	return hs.runServer(sl, wgStatus)
+}
+
+/*
+RunHTTPSServer starts a HTTPS Server which can be stopped via ^C (Control-C).
+It is assumed that all routes have been added prior to this call.
+
+keypath should be set to a path containing the TLS certificate and key.
+certFile should be the file containing the TLS certificate.
+keyFile should be the file containing the private key for the TLS connection.
+laddr should be the local address which should be given to net.Listen.
+wgStatus is an optional wait group which will be notified once the server is listening
+and once the server has shutdown.
+
+This function will not return unless the server is shutdown.
+*/
+func (hs *HTTPServer) RunHTTPSServer(keypath string, certFile string, keyFile string,
+	laddr string, wgStatus *sync.WaitGroup) error {
+
+	// Check parameters
+
+	if keypath != "" && !strings.HasSuffix(keypath, "/") {
+		keypath += "/"
+	}
+
+	// Load key pair and create a TLS config
+
+	cert, err := tls.LoadX509KeyPair(keypath+certFile, keypath+keyFile)
+	if err != nil {
+		hs.LastError = err
+
+		if wgStatus != nil {
+			wgStatus.Done()
+		}
+
+		return err
+	}
+
+	hs.Running = false
+
+	// Create normal TCP listener
+
+	originalListener, err := net.Listen("tcp", laddr)
+	if err != nil {
+		hs.LastError = err
+
+		if wgStatus != nil {
+			wgStatus.Done()
+		}
+
+		return err
+	}
+
+	// Wrap the listener in a TLS listener
+
+	config := tls.Config{Certificates: []tls.Certificate{cert}}
+
+	originalTLSListener := tls.NewListener(originalListener, &config)
+
+	// Wrap listeners in a signal aware listener
+
+	sl := newSignalTCPListener(originalTLSListener, originalListener.(*net.TCPListener), wgStatus)
+
+	return hs.runServer(sl, wgStatus)
+}
+
+/*
+runServer starts the actual server and notifies the wait group.
+*/
+func (hs *HTTPServer) runServer(sl *signalTCPListener, wgStatus *sync.WaitGroup) error {
+
+	// Use the http server from the standard library
+
+	server := http.Server{}
+
+	// Attach SIGINT handler - on unix and windows this is send
+	// when the user presses ^C (Control-C).
+
+	hs.signalling = make(chan os.Signal)
+	signal.Notify(hs.signalling, syscall.SIGINT)
+
+	// Put the serve call into a wait group so we can wait until shutdown
+	// completed
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+
+	go func() {
+		defer wg.Done()
+
+		hs.Running = true
+		server.Serve(sl)
+	}()
+
+	for true {
+		signal := <-hs.signalling
+
+		if signal == syscall.SIGINT {
+
+			// Shutdown the server
+
+			sl.Shutdown()
+
+			// Wait until the server has shut down
+
+			wg.Wait()
+
+			hs.Running = false
+
+			break
+		}
+	}
+
+	if wgStatus != nil {
+		wgStatus.Done()
+	}
+
+	return nil
+}
+
+/*
+signalTCPListener models a TCPListener which can receive signals.
+*/
+type signalTCPListener struct {
+	net.Listener                  // Wrapped new.Listener
+	tcpListener  *net.TCPListener // TCP listener which accepts connections
+	Signals      chan int         // Channel used for signalling
+	wgStatus     *sync.WaitGroup  // Optional Waitgroup to be notified after start
+}
+
+/*
+SigShutdown is used to signal a request for shutdown
+*/
+const SigShutdown = 1
+
+/*
+ErrSigShutdown indicates that a signal was received
+*/
+var ErrSigShutdown = errors.New("Server was shut down")
+
+/*
+newSignalTCPListener wraps a given TCPListener.
+*/
+func newSignalTCPListener(l net.Listener, tl *net.TCPListener, wgStatus *sync.WaitGroup) *signalTCPListener {
+	return &signalTCPListener{l, tl, make(chan int), wgStatus}
+}
+
+/*
+Accept waits for a new connection. This accept call will check every
+second if a signal or other shutdown event was received.
+*/
+func (sl *signalTCPListener) Accept() (net.Conn, error) {
+	for {
+
+		// Wait up to a second for a new connection
+
+		sl.tcpListener.SetDeadline(time.Now().Add(time.Second))
+		newConn, err := sl.Listener.Accept()
+
+		// Notify wgStatus if it was specified
+
+		if sl.wgStatus != nil {
+			sl.wgStatus.Done()
+			sl.wgStatus = nil
+		}
+
+		// Check for a received signal
+
+		select {
+		case sig := <-sl.Signals:
+
+			// Check which signal was received
+
+			if sig == SigShutdown {
+				return nil, ErrSigShutdown
+			}
+
+			panic(fmt.Sprintf("Unknown signal received: %v", sig))
+
+		default:
+
+			netErr, ok := err.(net.Error)
+
+			// If we got a connection or error at this point return it
+
+			if (err != nil && (!ok || !(netErr.Timeout() && netErr.Temporary()))) || newConn != nil {
+				return newConn, err
+			}
+		}
+	}
+}
+
+/*
+Shutdown sends a shutdown signal.
+*/
+func (sl *signalTCPListener) Shutdown() {
+	sl.Signals <- SigShutdown
+	close(sl.Signals)
+}

+ 290 - 0
httputil/httpserver_test.go

@@ -0,0 +1,290 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package httputil
+
+import (
+	"bytes"
+	"crypto/tls"
+	"crypto/x509"
+	"flag"
+	"fmt"
+	"html"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"os"
+	"sync"
+	"syscall"
+	"testing"
+	"time"
+
+	"devt.de/krotik/common/cryptutil"
+	"devt.de/krotik/common/fileutil"
+)
+
+const certdir = "certs"
+
+const testporthttp = ":9050"
+const testporthttps = ":9051"
+
+const invalidFileName = "**" + string(0x0)
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	// Setup
+
+	if res, _ := fileutil.PathExists(certdir); res {
+		os.RemoveAll(certdir)
+	}
+
+	err := os.Mkdir(certdir, 0770)
+	if err != nil {
+		fmt.Print("Could not create test directory:", err.Error())
+		os.Exit(1)
+	}
+
+	// Run the tests
+
+	res := m.Run()
+
+	// Teardown
+
+	err = os.RemoveAll(certdir)
+	if err != nil {
+		fmt.Print("Could not remove test directory:", err.Error())
+	}
+
+	os.Exit(res)
+}
+
+func TestHTTPSServer(t *testing.T) {
+
+	// Generate a certificate and private key
+
+	err := cryptutil.GenCert(certdir, "cert.pem", "key.pem", "localhost", "", 365*24*time.Hour, true, 2048, "")
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Add dummy handler
+
+	http.HandleFunc("/httpsserver_test", func(w http.ResponseWriter, r *http.Request) {
+		fmt.Fprintf(w, "Hello over HTTPS, %q", html.EscapeString(r.URL.Path))
+	})
+
+	hs := &HTTPServer{}
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+
+	go hs.RunHTTPSServer(certdir, "cert.pem", "key.pem", testporthttps, &wg)
+
+	wg.Wait()
+
+	// HTTPS Server has started
+
+	if hs.LastError != nil {
+		t.Error(hs.LastError)
+		return
+
+	}
+	// Check we can't start two servers
+
+	var wg2 sync.WaitGroup
+	hs2 := &HTTPServer{}
+
+	wg2.Add(1)
+
+	err = hs2.RunHTTPSServer(certdir, "c.pem", "k.pem", testporthttps, &wg2)
+	if hs2.LastError == nil ||
+		(hs2.LastError.Error() != "open certs/c.pem: no such file or directory" &&
+			hs2.LastError.Error() != "open certs/c.pem: The system cannot find the file specified.") ||
+		err != hs2.LastError {
+		t.Error("Unexpected error return:", hs2.LastError)
+		return
+	}
+
+	// Add again to wait group so we can try again
+
+	wg2.Add(1)
+
+	err = hs2.RunHTTPSServer(certdir, "cert.pem", "key.pem", testporthttps, &wg2)
+	if hs2.LastError == nil || (hs2.LastError.Error() != "listen tcp "+testporthttps+
+		": bind: address already in use" && hs2.LastError.Error() != "listen tcp "+testporthttps+
+		": bind: Only one usage of each socket address (protocol/network address/port) is normally permitted.") ||
+		err != hs2.LastError {
+		t.Error("Unexpected error return:", hs2.LastError)
+	}
+
+	// Add to the wait group so we can wait for the shutdown
+
+	wg.Add(1)
+
+	// Send something to the server
+
+	if res := sendTestHTTPSRequest(certdir + "/cert.pem"); res != `Hello over HTTPS, "/httpsserver_test"` {
+		t.Error("Unexpected request response:", res)
+		return
+	}
+
+	// Server is shut down
+
+	hs.Shutdown()
+
+	if hs.Running == true {
+		wg.Wait()
+	} else {
+		t.Error("Server was not running as expected")
+	}
+}
+
+func TestSignalling(t *testing.T) {
+
+	// Add dummy handler
+
+	http.HandleFunc("/httpserver_test", func(w http.ResponseWriter, r *http.Request) {
+		fmt.Fprintf(w, "Hello, %q", html.EscapeString(r.URL.Path))
+	})
+
+	hs := &HTTPServer{}
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+
+	go hs.RunHTTPServer(testporthttp, &wg)
+
+	wg.Wait()
+
+	// Server is started
+
+	if hs.LastError != nil {
+		t.Error(hs.LastError)
+		return
+
+	}
+
+	// Check we can't start two servers
+
+	var wg2 sync.WaitGroup
+	wg2.Add(1)
+	hs2 := &HTTPServer{}
+	err := hs2.RunHTTPServer(testporthttp, &wg2)
+	if hs2.LastError == nil || (hs2.LastError.Error() != "listen tcp "+testporthttp+
+		": bind: address already in use" && hs2.LastError.Error() != "listen tcp "+testporthttp+
+		": bind: Only one usage of each socket address (protocol/network address/port) is normally permitted.") ||
+		err != hs2.LastError {
+		t.Error("Unexpected error return:", hs2.LastError)
+	}
+
+	// Add to the wait group so we can wait for the shutdown
+
+	wg.Add(1)
+
+	// Send something to the server
+
+	if res := sendTestRequest(); res != `Hello, "/httpserver_test"` {
+		t.Error("Unexpected request response:", res)
+		return
+	}
+
+	// Check we can send other signals
+
+	hs.signalling <- syscall.SIGHUP
+
+	time.Sleep(time.Duration(50) * time.Millisecond)
+	if hs.Running != true {
+		t.Error("Server should still be running after sending wrong shutdown signal")
+		return
+	}
+
+	// Server is shut down
+
+	hs.Shutdown()
+
+	if hs.Running == true {
+		wg.Wait()
+	} else {
+		t.Error("Server was not running as expected")
+	}
+
+	// Test listener panic
+
+	originalListener, _ := net.Listen("tcp", testporthttp)
+	sl := newSignalTCPListener(originalListener, originalListener.(*net.TCPListener), nil)
+
+	go testUnknownSignalPanic(t, sl)
+	sl.Signals <- -1
+}
+
+func testUnknownSignalPanic(t *testing.T, sl *signalTCPListener) {
+	defer func() {
+		if r := recover(); r == nil {
+			t.Error("Sending an unknown signal did not cause a panic.")
+		}
+	}()
+	sl.Accept()
+}
+
+func sendTestRequest() string {
+	url := "http://localhost" + testporthttp + "/httpserver_test"
+
+	var jsonStr = []byte(`{"msg":"Hello!"}`)
+	req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
+	req.Header.Set("X-Custom-Header", "myvalue")
+	req.Header.Set("Content-Type", "application/json")
+
+	client := &http.Client{}
+	resp, err := client.Do(req)
+	if err != nil {
+		panic(err)
+	}
+	defer resp.Body.Close()
+
+	body, _ := ioutil.ReadAll(resp.Body)
+
+	return string(body)
+}
+
+func sendTestHTTPSRequest(caCert string) string {
+
+	// Build ca cert pool
+
+	caPool := x509.NewCertPool()
+	serverCert, err := ioutil.ReadFile(caCert)
+	if err != nil {
+		panic(err)
+	}
+	caPool.AppendCertsFromPEM(serverCert)
+
+	tr := &http.Transport{
+		TLSClientConfig:    &tls.Config{RootCAs: caPool},
+		DisableCompression: true,
+	}
+
+	url := "https://localhost" + testporthttps + "/httpsserver_test"
+
+	var jsonStr = []byte(`{"msg":"Hello!"}`)
+	req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonStr))
+	req.Header.Set("X-Custom-Header", "myvalue")
+	req.Header.Set("Content-Type", "application/json")
+
+	client := &http.Client{Transport: tr}
+	resp, err := client.Do(req)
+	if err != nil {
+		panic(err)
+	}
+	defer resp.Body.Close()
+
+	body, _ := ioutil.ReadAll(resp.Body)
+
+	return string(body)
+}

+ 113 - 0
httputil/user/session.go

@@ -0,0 +1,113 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package user contains user session management for webservers. Sessions are
+identified via session cookies and stored in memory on the server side.
+*/
+package user
+
+import "fmt"
+
+/*
+Session models a user session object.
+*/
+type Session interface {
+
+	/*
+		Id returns the session id.
+	*/
+	ID() string
+
+	/*
+	   User returns the user of the session.
+	*/
+	User() string
+
+	/*
+		GetAll returns all known session values.
+	*/
+	GetAll() map[string]interface{}
+
+	/*
+		Get returns a session.
+	*/
+	Get(key string) (interface{}, bool)
+
+	/*
+		Set sets a session value. A nil value deletes a value
+		from the session.
+	*/
+	Set(key string, value interface{})
+
+	/*
+		String returns a string representation of the session.
+	*/
+	String() string
+}
+
+/*
+NewDefaultSession creates a new default session object.
+*/
+func NewDefaultSession(id string, user string) Session {
+	return &DefaultSession{id, user, make(map[string]interface{})}
+}
+
+/*
+DefaultSession is the default manager for web sessions.
+*/
+type DefaultSession struct {
+	id     string
+	user   string
+	values map[string]interface{}
+}
+
+/*
+ID returns the session id.
+*/
+func (ds *DefaultSession) ID() string {
+	return ds.id
+}
+
+/*
+User returns the user of the session.
+*/
+func (ds *DefaultSession) User() string {
+	return ds.user
+}
+
+/*
+GetAll returns all known session values.
+*/
+func (ds *DefaultSession) GetAll() map[string]interface{} {
+	return ds.values
+}
+
+/*
+Get returns a session.
+*/
+func (ds *DefaultSession) Get(key string) (interface{}, bool) {
+	ret, ok := ds.values[key]
+	return ret, ok
+}
+
+/*
+Set sets a session value. A nil value deletes a value
+from the session.
+*/
+func (ds *DefaultSession) Set(key string, value interface{}) {
+	ds.values[key] = value
+}
+
+/*
+	String returns a string representation of the session.
+*/
+func (ds *DefaultSession) String() string {
+	return fmt.Sprint("Session: ", ds.id, " (User:", ds.user, " Values:", ds.values, ")")
+}

+ 151 - 0
httputil/user/session_test.go

@@ -0,0 +1,151 @@
+package user
+
+import (
+	"fmt"
+	"net/http"
+	"testing"
+)
+
+func TestDefaultSession(t *testing.T) {
+
+	ds := NewDefaultSession("test", "user1")
+
+	if res := ds.ID(); res != "test" {
+		t.Error("Unexpected id:", res)
+		return
+	}
+
+	ds.Set("key1", "value1")
+
+	if res, ok := ds.Get("key1"); !ok || res != "value1" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ds.GetAll(); fmt.Sprint(res) != "map[key1:value1]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(ds); res != "Session: test (User:user1 Values:map[key1:value1])" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestSessionCreation(t *testing.T) {
+
+	handleCallback = func(w http.ResponseWriter, r *http.Request) {
+		param := r.URL.Query()
+		name, ok := param["user"]
+		if ok {
+
+			// Register the new user
+
+			UserSessionManager.GetSession(name[0], w, r, true)
+
+			if hasCookie, isActive := UserSessionManager.CheckSessionCookie(r); !hasCookie || !isActive {
+				t.Error("Unexpected result:", hasCookie, isActive)
+				return
+			}
+		}
+
+		session, _ := UserSessionManager.GetSession("", w, r, false)
+
+		_, ok = param["logout"]
+		if ok && session != nil {
+
+			if hasCookie, isActive := UserSessionManager.CheckSessionCookie(r); !hasCookie || !isActive {
+				t.Error("Unexpected result:", hasCookie, isActive)
+				return
+			}
+
+			UserSessionManager.RemoveSessionCookie(w)
+			UserSessionManager.Provider.Destroy(session.ID())
+		}
+	}
+
+	res, resp := sendTestRequest(TESTQUERYURL+"?user=fred", "GET", nil, nil, nil)
+
+	if res != "Content" {
+		t.Error("Unexpected response:", res)
+		return
+	}
+
+	// Check we have a valid session
+
+	cookies := resp.Cookies()
+	sessions, _ := UserSessionManager.Provider.GetAll()
+
+	if len(sessions) != 1 {
+		t.Error("Unexpected number of active sessions:", sessions)
+		return
+	}
+
+	var theSession Session
+	for _, v := range sessions {
+		theSession = v.(Session)
+		break
+	}
+
+	if len(cookies) != 1 ||
+		cookies[0].Raw != fmt.Sprintf("~sid=%v; Path=/; Max-Age=%v; HttpOnly",
+			theSession.ID(), CookieMaxLifetime) {
+
+		t.Error("Unexpected cookie:", cookies)
+		return
+	}
+
+	// The next request will have access to a session
+
+	res, resp = sendTestRequest(TESTQUERYURL, "GET", nil, cookies, nil)
+
+	if res != "Content - User session: fred" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	session, _ := UserSessionManager.GetSession("", nil, resp.Request, false)
+	if session == nil {
+		t.Error("Unexpected result")
+		return
+	}
+
+	res, resp = sendTestRequest(TESTQUERYURL+"?logout=1", "GET", nil, cookies, nil)
+	cookies = resp.Cookies()
+
+	if res != "Content - User session: fred" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if len(cookies) != 1 ||
+		fmt.Sprint(cookies[0].Raw) != "~sid=; Path=/; Max-Age=0; HttpOnly" {
+
+		t.Error("Unexpected cookie:", cookies[0])
+		return
+	}
+
+	// Check the user is no longer identified in the session
+
+	res, resp = sendTestRequest(TESTQUERYURL, "GET", nil, cookies, nil)
+
+	if res != "Content" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// We can still see the cookie in the session but it has now an invalid value
+
+	if hasCookie, isActive := UserSessionManager.CheckSessionCookie(resp.Request); !hasCookie || isActive {
+		t.Error("Unexpected result:", hasCookie, isActive)
+		return
+	}
+
+	session, err := UserSessionManager.GetSession("", nil, resp.Request, false)
+	if session != nil || err != nil {
+		t.Error("Unexpected result:", err)
+		return
+	}
+
+}

+ 258 - 0
httputil/user/user.go

@@ -0,0 +1,258 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package user
+
+import (
+	"crypto/rand"
+	"errors"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"sync"
+
+	"devt.de/krotik/common/datautil"
+	"devt.de/krotik/common/errorutil"
+)
+
+/*
+cookieName defines the session cookie name
+*/
+const cookieNameSession = "~sid"
+
+/*
+CookieMaxLifetime is the max life time of a session cookie in seconds
+*/
+var CookieMaxLifetime = 3600
+
+/*
+UserSessionManager manages all user sessions.
+*/
+var UserSessionManager = &SessionManager{sync.Mutex{},
+	NewMemorySessionProvider()}
+
+/*
+SessionManager manages web sessions.
+*/
+type SessionManager struct {
+	Lock     sync.Mutex
+	Provider SessionProvider
+}
+
+/*
+newSessionId creates a new session id.
+*/
+func (manager *SessionManager) newSessionID() string {
+	b := make([]byte, 32)
+	_, err := io.ReadFull(rand.Reader, b)
+
+	errorutil.AssertOk(err)
+
+	return fmt.Sprintf("S-%x", b)
+}
+
+/*
+CheckSessionCookie checks if a request contains a session cookie and if the
+session is active. Returns has cookie and is active.
+*/
+func (manager *SessionManager) CheckSessionCookie(r *http.Request) (bool, bool) {
+	var session Session
+
+	cookie, _ := r.Cookie(cookieNameSession)
+
+	if cookie != nil {
+		sid, _ := url.QueryUnescape(cookie.Value)
+		session, _ = manager.Provider.Get(sid)
+	}
+
+	return cookie != nil, session != nil
+}
+
+/*
+RemoveSessionCookie removes the session cookie in a given response object.
+*/
+func (manager *SessionManager) RemoveSessionCookie(w http.ResponseWriter) {
+
+	cookie := http.Cookie{
+		Name:     cookieNameSession,
+		Value:    "",
+		Path:     "/",
+		HttpOnly: true,
+		MaxAge:   -1,
+	}
+	http.SetCookie(w, &cookie)
+}
+
+/*
+GetSession retrieves an existing or creates a new session
+*/
+func (manager *SessionManager) GetSession(user string, w http.ResponseWriter,
+	r *http.Request, create bool) (Session, error) {
+
+	manager.Lock.Lock()
+	defer manager.Lock.Unlock()
+
+	var session Session
+	var err error
+	var sid string
+
+	// Retrieve the cookie
+
+	cookie, cerr := r.Cookie(cookieNameSession)
+
+	if cookie == nil || cookie.Value == "" {
+
+		if !create {
+
+			// Session is not present and it should not be created
+
+			return nil, nil
+		}
+
+		// Session is not created if no user is present
+
+		if user == "" {
+			return nil, errors.New("Cannot create a session without a user")
+		}
+
+		// No cookie present - create a new session
+
+		sid = manager.newSessionID()
+
+		session, _ = manager.Provider.Init(sid, user)
+
+	} else {
+
+		// Session should be available
+
+		sid, _ = url.QueryUnescape(cookie.Value)
+		session, err = manager.Provider.Get(sid)
+	}
+
+	if create {
+
+		// Write the session cookie in the response
+
+		cookie = &http.Cookie{
+			Name:     cookieNameSession,
+			Value:    url.QueryEscape(sid),
+			Path:     "/",
+			HttpOnly: true,
+			MaxAge:   CookieMaxLifetime,
+		}
+
+		http.SetCookie(w, cookie)
+	}
+
+	if cerr == http.ErrNoCookie {
+
+		// Also register the cookie in the request so the session can
+		// can be found by subsequent calls
+
+		r.AddCookie(cookie)
+	}
+
+	return session, err
+}
+
+/*
+SessionProvider is a session storage provider. Sessions should expire
+after a certain amount of time.
+*/
+type SessionProvider interface {
+
+	/*
+		Create a new session for a given user. The session has an explicit
+		expiry time after which a get will fail.
+	*/
+	Init(sid string, user string) (Session, error)
+
+	/*
+		Get retrieves a session.
+	*/
+	Get(sid string) (Session, error)
+
+	/*
+		GetAll returns a list of all sessions.
+	*/
+	GetAll() ([]Session, error)
+
+	/*
+		Destroy destroys a session.
+	*/
+	Destroy(sid string) error
+}
+
+/*
+MemorySessionProvider keeps all session related data in memory.
+*/
+type MemorySessionProvider struct {
+	sessions *datautil.MapCache // Thread safe memory cache
+}
+
+/*
+NewMemorySessionProvider creates a new memory session provider. By default
+sessions have the same expiry time as cookies.
+*/
+func NewMemorySessionProvider() SessionProvider {
+	ret := &MemorySessionProvider{}
+	ret.SetExpiry(CookieMaxLifetime)
+	return ret
+}
+
+/*
+SetExpiry sets the session expiry time in seconds. All existing sessions
+are deleted during this function call. This call is not thread safe - only
+use it during initialisation!
+*/
+func (ms *MemorySessionProvider) SetExpiry(secs int) {
+	ms.sessions = datautil.NewMapCache(0, int64(secs))
+}
+
+/*
+Init creates a new session for a given user. The session has an explicit
+expiry time after which a get will fail.
+*/
+func (ms *MemorySessionProvider) Init(sid string, user string) (Session, error) {
+	session := NewDefaultSession(sid, user)
+	ms.sessions.Put(sid, session)
+	return session, nil
+}
+
+/*
+Get retrieves a session.
+*/
+func (ms *MemorySessionProvider) Get(sid string) (Session, error) {
+	if session, _ := ms.sessions.Get(sid); session != nil {
+		return session.(Session), nil
+	}
+	return nil, nil
+}
+
+/*
+GetAll returns a list of all sessions.
+*/
+func (ms *MemorySessionProvider) GetAll() ([]Session, error) {
+	sessions := make([]Session, 0, ms.sessions.Size())
+
+	for _, s := range ms.sessions.GetAll() {
+		sessions = append(sessions, s.(Session))
+	}
+
+	return sessions, nil
+}
+
+/*
+Destroy destroys a session.
+*/
+func (ms *MemorySessionProvider) Destroy(sid string) error {
+	ms.sessions.Remove(sid)
+	return nil
+}

+ 186 - 0
httputil/user/user_test.go

@@ -0,0 +1,186 @@
+package user
+
+import (
+	"bytes"
+	"encoding/json"
+	"flag"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strings"
+	"sync"
+	"testing"
+
+	"devt.de/krotik/common/httputil"
+)
+
+const TESTPORT = ":9090"
+
+const TESTQUERYURL = "http://localhost" + TESTPORT + "/foo"
+
+var handleCallback = func(w http.ResponseWriter, r *http.Request) {}
+
+var handleFunction = func(w http.ResponseWriter, r *http.Request) {
+
+	// Check if a valid session cookie is there
+
+	session, _ := UserSessionManager.GetSession("", w, r, false)
+
+	handleCallback(w, r)
+
+	if session == nil {
+		w.Write([]byte("Content"))
+	} else {
+		w.Write([]byte(fmt.Sprint("Content - User session: ", session.User())))
+	}
+}
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	// Setup a simple webserver
+
+	hs, wg := startServer()
+	if hs == nil {
+		return
+	}
+
+	// Make sure the webserver shuts down
+
+	defer stopServer(hs, wg)
+
+	// Register a simple content delivery function
+
+	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+
+		// Call the wrapped handle function which then adds the authentication
+
+		handleFunction(w, r)
+	})
+
+	// Run the tests
+
+	res := m.Run()
+
+	os.Exit(res)
+}
+
+func TestNoAuthNoSession(t *testing.T) {
+
+	// By default there is no session and no authentication
+
+	res, _ := sendTestRequest(TESTQUERYURL, "GET", nil, nil, nil)
+
+	if res != "Content" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Trying to create an anonymous session should fail
+
+	r, _ := http.NewRequest("GET", "", nil)
+	_, err := UserSessionManager.GetSession("", nil, r, true)
+
+	if err.Error() != "Cannot create a session without a user" {
+		t.Error("Unexpected error:", err)
+		return
+	}
+}
+
+/*
+Send a request to a HTTP test server
+*/
+func sendTestRequest(url string, method string, headers map[string]string,
+	cookies []*http.Cookie, content []byte) (string, *http.Response) {
+
+	var req *http.Request
+	var err error
+
+	// Create request
+
+	if content != nil {
+		req, err = http.NewRequest(method, url, bytes.NewBuffer(content))
+	} else {
+		req, err = http.NewRequest(method, url, nil)
+	}
+
+	// Add headers
+
+	req.Header.Set("Content-Type", "application/json")
+
+	for k, v := range headers {
+		req.Header.Set(k, v)
+	}
+
+	// Add cookies
+
+	for _, v := range cookies {
+		req.AddCookie(v)
+	}
+
+	client := &http.Client{}
+	resp, err := client.Do(req)
+	if err != nil {
+		panic(err)
+	}
+	defer resp.Body.Close()
+
+	body, _ := ioutil.ReadAll(resp.Body)
+	bodyStr := strings.Trim(string(body), " \n")
+
+	// Try json decoding first
+
+	out := bytes.Buffer{}
+	err = json.Indent(&out, []byte(bodyStr), "", "  ")
+	if err == nil {
+		return out.String(), resp
+	}
+
+	// Just return the body
+
+	return bodyStr, resp
+}
+
+/*
+Start a HTTP test server.
+*/
+func startServer() (*httputil.HTTPServer, *sync.WaitGroup) {
+	hs := &httputil.HTTPServer{}
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+
+	go hs.RunHTTPServer(TESTPORT, &wg)
+
+	wg.Wait()
+
+	// Server is started
+
+	if hs.LastError != nil {
+		panic(hs.LastError)
+	}
+
+	return hs, &wg
+}
+
+/*
+Stop a started HTTP test server.
+*/
+func stopServer(hs *httputil.HTTPServer, wg *sync.WaitGroup) {
+
+	if hs.Running == true {
+
+		wg.Add(1)
+
+		// Server is shut down
+
+		hs.Shutdown()
+
+		wg.Wait()
+
+	} else {
+
+		panic("Server was not running as expected")
+	}
+}

+ 114 - 0
httputil/util.go

@@ -0,0 +1,114 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package httputil
+
+import (
+	"errors"
+	"io/ioutil"
+	"math/rand"
+	"mime"
+	"net/http"
+	"net/url"
+	"path/filepath"
+	"time"
+
+	"devt.de/krotik/common/fileutil"
+)
+
+/*
+CheckLocalRedirect checks if a given redirect URL is a local redirect.
+The function returns an error in all other cases.
+*/
+func CheckLocalRedirect(urlString string) error {
+
+	u, err := url.Parse(urlString)
+
+	if err == nil && u.IsAbs() {
+		err = errors.New("Redirection URL must not be an absolute URL")
+	}
+
+	return err
+}
+
+/*
+singleFileHandler is a handler for a single file.
+*/
+type singleFileHandler struct {
+	path       string
+	errHandler func(err error)
+}
+
+/*
+SingleFileServer returns a handler that serves all HTTP requests
+with the contents of a single file.
+*/
+func SingleFileServer(path string, errHandler func(err error)) http.Handler {
+	if errHandler == nil {
+		errHandler = func(err error) {}
+	}
+	return &singleFileHandler{path, errHandler}
+}
+
+/*
+ServeHTTP serves HTTP requests.
+*/
+func (f *singleFileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+
+	ok, err := fileutil.PathExists(f.path)
+
+	if ok {
+		var content []byte
+
+		ctype := mime.TypeByExtension(filepath.Ext(f.path))
+		w.Header().Set("Content-Type", ctype)
+
+		if content, err = ioutil.ReadFile(f.path); err == nil {
+			if _, err = w.Write(content); err == nil {
+				return
+			}
+		}
+	}
+
+	if err != nil {
+		f.errHandler(err)
+	}
+
+	w.WriteHeader(http.StatusUnauthorized)
+	w.Write([]byte("Unauthorized\n"))
+}
+
+/*
+randomFileHandler is a handler for a random file.
+*/
+type randomFileHandler struct {
+	*singleFileHandler
+	paths []string
+}
+
+/*
+RandomFileServer returns a handler that serves all HTTP requests
+with the contents of a random file. The file is picked from a predefined
+list.
+*/
+func RandomFileServer(paths []string, errHandler func(err error)) http.Handler {
+	return &randomFileHandler{&singleFileHandler{"", errHandler}, paths}
+}
+
+/*
+ServeHTTP serves HTTP requests.
+*/
+func (f *randomFileHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	rand.Seed(int64(time.Now().Nanosecond()))
+
+	selected := f.paths[rand.Intn(len(f.paths))]
+	f.singleFileHandler.path = selected
+
+	f.singleFileHandler.ServeHTTP(w, r)
+}

+ 144 - 0
httputil/util_test.go

@@ -0,0 +1,144 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package httputil
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"testing"
+)
+
+const InvalidFileName = "**" + string(0x0)
+
+/*
+dummyResponse is a dummy object for http response testing
+*/
+type dummyResponse struct {
+	out    *bytes.Buffer
+	header map[string][]string
+}
+
+func (dr *dummyResponse) Header() http.Header {
+	return dr.header
+}
+
+func (dr *dummyResponse) Write(b []byte) (int, error) {
+	return dr.out.Write(b)
+}
+
+func (dr *dummyResponse) WriteHeader(int) {
+}
+
+func TestCheckLocalRedirect(t *testing.T) {
+
+	// Check local redirects
+
+	if err := CheckLocalRedirect("/foo/bar"); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := CheckLocalRedirect("foo/bar"); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := CheckLocalRedirect("x"); err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Check absolute redirects
+
+	if err := CheckLocalRedirect("http://hans.foo/bla"); err == nil || err.Error() != "Redirection URL must not be an absolute URL" {
+		t.Error(err)
+		return
+	}
+
+	if err := CheckLocalRedirect("file://hans.foo/bla"); err == nil || err.Error() != "Redirection URL must not be an absolute URL" {
+		t.Error(err)
+		return
+	}
+
+	if err := CheckLocalRedirect("://hans.foo/bla"); err == nil || err.Error() != "parse ://hans.foo/bla: missing protocol scheme" {
+		t.Error(err)
+		return
+	}
+
+	if err := CheckLocalRedirect("https:www.foo.co.uk"); err == nil || err.Error() != "Redirection URL must not be an absolute URL" {
+		t.Error(err)
+		return
+	}
+
+	if err := CheckLocalRedirect("https:3627733859"); err == nil || err.Error() != "Redirection URL must not be an absolute URL" {
+		t.Error(err)
+		return
+	}
+}
+
+func TestSingleFileServer(t *testing.T) {
+
+	ioutil.WriteFile("foo.txt", []byte("foo test"), 0666)
+	defer os.Remove("foo.txt")
+
+	sfs := SingleFileServer("foo.txt", nil)
+	dr := &dummyResponse{&bytes.Buffer{}, make(map[string][]string)}
+
+	sfs.ServeHTTP(dr, nil)
+
+	if res := fmt.Sprint(dr.header); res != "map[Content-Type:[text/plain; charset=utf-8]]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(dr.out); res != "foo test" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	sfs = SingleFileServer(InvalidFileName, nil)
+	dr = &dummyResponse{&bytes.Buffer{}, make(map[string][]string)}
+
+	sfs.ServeHTTP(dr, nil)
+
+	if res := fmt.Sprint(dr.header); res != "map[]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(dr.out); res != "Unauthorized\n" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestRandomFileServer(t *testing.T) {
+
+	ioutil.WriteFile("foo.txt", []byte("foo test"), 0666)
+	defer os.Remove("foo.txt")
+
+	rfs := RandomFileServer([]string{"foo.txt", "foo.txt", "foo.txt"}, nil)
+	dr := &dummyResponse{&bytes.Buffer{}, make(map[string][]string)}
+
+	rfs.ServeHTTP(dr, nil)
+
+	if res := fmt.Sprint(dr.header); res != "map[Content-Type:[text/plain; charset=utf-8]]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(dr.out); res != "foo test" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}

+ 141 - 0
imageutil/asciiraster.go

@@ -0,0 +1,141 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package imageutil contains utility function to create/manipulate images.
+
+Asciiraster contains support for raster fonts for images. Using RenderSymbols you
+can add text and symbols to an image. By specifying a symbol map containing ASCII art
+it is possible to define how each rune should be rendered.
+*/
+package imageutil
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"image"
+	"image/color"
+	"unicode"
+)
+
+/*
+SymbolSpacing defines the spacing in pixels between two symbols
+*/
+var SymbolSpacing = 1
+
+/*
+SpaceSymbolSpacing defines the space in pixels of a space character if the
+character is not defined in the font map
+*/
+var SpaceSymbolSpacing = 5
+
+/*
+RenderSymbols renders the symbols in the given string str at the given point p in the
+given Image img in the color col using smap as symbol mapping.
+*/
+func RenderSymbols(img image.Image, p image.Point, str string,
+	col color.Color, smap map[rune]string) (image.Image, error) {
+
+	var offset int
+
+	imgc := wrapImage(img)
+
+	// Iterate over the string
+
+	for _, r := range str {
+
+		sym, ok := smap[r]
+		if !ok {
+
+			if unicode.IsSpace(r) {
+
+				// If a space character is encountered and it is not defined in the map
+				// then just move the offset and continue
+
+				offset += SpaceSymbolSpacing
+				continue
+			}
+
+			return nil, fmt.Errorf("Cannot find mapping for rune: %q", r)
+		}
+
+		sline := 0
+		rwidth := 0
+
+		// Go through the symbold line by line
+
+		scanner := bufio.NewScanner(bytes.NewBufferString(sym))
+		for scanner.Scan() {
+
+			line := scanner.Text()
+
+			// Set max width of symbol
+
+			if l := len(line); rwidth < l {
+				rwidth = l
+			}
+
+			soffset := 0
+
+			for _, sr := range line {
+
+				// Draw each pixel
+
+				if !(unicode.IsSpace(sr) || unicode.IsControl(sr)) {
+					imgc.Set(offset+soffset+p.X, sline+p.Y, col)
+				}
+
+				soffset++
+			}
+
+			sline++
+		}
+
+		// Advance the offset
+
+		offset += rwidth + SymbolSpacing
+	}
+
+	return imgc, nil
+}
+
+/*
+wrapImage wraps a given image.
+*/
+func wrapImage(img image.Image) *imageWrapper {
+	return &imageWrapper{img, make(map[image.Point]color.Color)}
+}
+
+/*
+imageWrapper is a wrapper class for images which allows setting single pixels.
+*/
+type imageWrapper struct {
+	image.Image                             // Original image
+	pixMap      map[image.Point]color.Color // Modified pixels
+}
+
+/*
+Set sets the color of the pixel at (x, y).
+*/
+func (m *imageWrapper) Set(x, y int, c color.Color) {
+	m.pixMap[image.Point{x, y}] = c
+}
+
+/*
+At returns the color of the pixel at (x, y).
+*/
+func (m *imageWrapper) At(x, y int) color.Color {
+
+	if c := m.pixMap[image.Point{x, y}]; c != nil {
+		return c
+	}
+
+	return m.Image.At(x, y)
+}

+ 163 - 0
imageutil/asciiraster_test.go

@@ -0,0 +1,163 @@
+package imageutil
+
+import (
+	"bytes"
+	"encoding/base64"
+	"image"
+	"io/ioutil"
+	"log"
+	"os"
+	"strings"
+	"testing"
+
+	"devt.de/krotik/common/fileutil"
+)
+
+import (
+	"image/color"
+	"image/jpeg"
+	"image/png"
+)
+
+func TestRenderSymbols(t *testing.T) {
+
+	reader := base64.NewDecoder(base64.StdEncoding, strings.NewReader(data))
+
+	m, err := jpeg.Decode(reader)
+	if err != nil {
+		log.Fatal(err)
+	}
+
+	imageFile := "test.png"
+
+	if res, _ := fileutil.PathExists(imageFile); res {
+		os.Remove(imageFile)
+	}
+
+	colBlack := color.RGBA{0, 0, 0, 255}
+
+	_, err = RenderSymbols(m, image.Point{5, 2}, "c", colBlack,
+		testCharMap)
+
+	if err.Error() != "Cannot find mapping for rune: 'c'" {
+		t.Error("Unexpected error response: ", err)
+		return
+	}
+
+	m, err = RenderSymbols(m, image.Point{5, 2}, "aba a", colBlack,
+		testCharMap)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Check that at least some pixels were set correctly
+
+	if m.At(7, 2) == colBlack {
+		t.Error("Expected color was not found")
+	}
+	if m.At(8, 2) != colBlack {
+		t.Error("Expected color was not found")
+	}
+	if m.At(13, 5) != colBlack {
+		t.Error("Expected color was not found")
+	}
+	if m.At(14, 5) == colBlack {
+		t.Error("Expected color was not found")
+	}
+
+	var buf bytes.Buffer
+
+	png.Encode(&buf, m)
+
+	ioutil.WriteFile(imageFile, buf.Bytes(), 0644)
+
+	if res, _ := fileutil.PathExists(imageFile); res {
+		os.Remove(imageFile)
+	} else {
+		t.Error("Expected image file did not exist")
+	}
+}
+
+// Test character
+
+var testCharMap = map[rune]string{
+
+	'a': `
+   ****
+ **    **
+ ********
+ **    **
+ **    **
+`[1:],
+
+	'b': `
+ ******
+ **    *
+ ******
+ **    *
+ ******
+`[1:],
+}
+
+// Test image
+
+const data = `
+/9j/4AAQSkZJRgABAQIAHAAcAAD/2wBDABALDA4MChAODQ4SERATGCgaGBYWGDEjJR0oOjM9PDkzODdA
+SFxOQERXRTc4UG1RV19iZ2hnPk1xeXBkeFxlZ2P/2wBDARESEhgVGC8aGi9jQjhCY2NjY2NjY2NjY2Nj
+Y2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2NjY2P/wAARCABnAJYDASIAAhEBAxEB/8QA
+HwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIh
+MUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVW
+V1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXG
+x8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQF
+BgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAV
+YnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOE
+hYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq
+8vP09fb3+Pn6/9oADAMBAAIRAxEAPwDlwKMD0pwzSiuK57QzGDxS7D6in8Y5ximnAPUfSlcq4m3ilUYp
+2OKXHvRcVxnTtS7c07HNFK4DQPakC4PNOA+tOx70XAjK/So5gBGP94fzqfvUVx/qxx/EP51UXqRP4WSE
+cmgjilP3jSEZqS0IO/NGDnpUiocDg/McDjvV6HTPOdVWYgsM5KcfzzQ2JySM2jp6VYu7SWzmMUwG4cgj
+kMPUVBjjtTGtRu0Zopw+lFFxhinrGzuqqMsxAA9yaXFSRv5cqSEcIwYj6GpuZ30O30fSLKzhUpbpNMv3
+5XGTn29BV28jt7pPLuIVljPBBFVreYx+VbqAjycgt3x14zRcNOxGyVFHQkIc/wA61exyKLbuzjdZ046d
+ftEuTEw3Rk9SPT8P8Kpbea3tchbyVae4JkjbbGpGdwOM89Af6ViFTWUtGdcXoM2+woK1JtpNtTcoZt+l
+Jt7ZqTbRtouFyPFRXI/c9D94fzqzioLsfuD/ALw/nVReqIn8LJCOTSY+tSMOTmkIpXLRu+F0t5pJxPHG
+wjjUAuBjJJz1+laD6Pai+WaK9SBX6puzn6ZP+NV/Dkdtc6ZNbyAFwxLAHDYPv6VoQ21nPNEEiQGEFRtk
+Gf0NaWTOeW7Of8QwGG4MRZnEbYXPJwRnOR0zWNXW+KrqBLUWi5EjbWCgcAA9c/gRXKYqZaGlK/LqMH0F
+FLtHvRSNiYD2pSDTgpp6p0ywUHoTULXYxcktzrdCf7Xo8LP/AKyEmMNjJ46dfbFWJ5TDGNwB9lFUvDV9
+YrbfYGbyrjcWG88S57g+vtV26ZIvMlumKwwjLZ6V0WfU54yTvYwtbubea2WNWbzg4bYQeBgj8OtYeKhj
+u4y2HQxqxOD1xzxmrWAQCCGB6EGsaikndmsJxeiYzBo280/Z7UbayuaXGY5oIp+2lx9KLjIsVDeD/Rj/
+ALy/zq1t96r3y4tT/vL/ADq4P3kRP4WSleTSFKkkKoCW4GaqNcMxIjXj1pxjKT0FKrGC1Nrw3vGrKkYz
+5kTAr6455/HH510UdwPtRgWCbzF5+YYUf4Vwun39xpmoR3qASMmQUJwGU9Rnt/8AWrpbrxhb8/ZdOmaQ
+gAGZwFH5ZJrpVKVlY5ZYhN6kXiu2eO/ikZlIljAAB5yM549OawSOOlPuLqe+umuLqTfM4OSOAo7ADsKh
+hl/cRsTuJHPv7mlKi3sVTxNtGP20VJhThgSQaK52mnZnUqsWrpkyeUrr5pABOAPU1AGaXUCWJISHGPfP
+P8qL7BiKnsMg46H3qrbzupbj5mPTPTpXVSglG551SpzSsXJ4/MBUgYIxyKpySyGBYJriV1D7kRpCVH4V
+bSeNJ4xchni3DeqnBI+td7F4b0mKIRjT45VbktJlzk455+n6VtYzv2PNwFZWBHBGKVJDGVC54/nXQeMN
+NttLNkba1jgWVWDmM8bhg4/nzXLSSbXVj6fyNKUdNRp21RtIRJGrjuM0u3FQ2DbodvcEkfQmrW2vLqLl
+k0ejCXNFMj2/jQV9qkxSYNRcsZiq2oI32N2CkhWXJxwOe9XMcVt6hoPn6dFaW0wgRpNzvKDlz6+/0rai
+ryv2Jm9LHJai+ZRGCBjnr71ErdAxAY9B611t1Y2cunbbaOQ3FvKZI3UqGlZMbiWwfcfhV231iwvLSM3U
+lt5Uq52TuZG+hGMA12xXJGxxzjzybOQtNOvb5j9ktZJhnBIHyg+5PFX38JayqK/2eLJIBUTgkDA9q7ex
+itrSHFpGsUbndhRgc+g7VNIyfZJAoJZUbb3I46CtFJMylBo8sdWhmYMuCnylc9wef5VUT7+1chc5NS7h
+sUZO5RtIPUH3pkBDOxxxmqM9TQtn+WilhHfHaik43KTG3Z4IyPyrNVjGCsZ+dmwv6V3cXhSG8sYpJLud
+JJIwxChdoJGcYx/Wkg8DafA4knvLiQr/ALqj+VQpKw3FtnFFfvbiSMgZJ6/jXp2n3d9cQRBTFsKD96EP
+oOxPU/8A68VVtbbRtMVntbePKDLTSHJH/Aj/AEqHTvE66rq72VugMMcbSGTnL4wMAfjT5n0HyW3L+s6b
+baxaJBdzN+7bcrxkAhun0rz3VNCv7e7lgigknWI43xLu6jjIHTjtXqfkpPGVYsBkghTikgsYIN/lhgXb
+cxLkknp/ShczQ7xtY8vtEmhkj8yGRBuCnehUcnHcVtmwfJ/fQ8e7f/E12txZW91C0U6b42xlST2OR/Ko
+Bo1gM/uW55/1jf41nOipu7LhV5FZHIGzI6zwj/vr/Ck+yr3uYf8Ax7/CutbQdMb71tn/ALaN/jSf8I/p
+X/PoP++2/wAan6rAr6wzkWt0II+1Rc/7Lf4Vd1eeCSKBbdZDdShYoiZNoyfY10P/AAj2lf8APmP++2/x
+oPh/SjKspsozIuNrZORjp3qo0FHYPb3OZt7ae3SzjuItsiRSAgnccl/UA+3Q1yNjKLR4ZZYY5VD7tkv3
+WwO/+e1evPp9nI257aJm6bioz1z1+tY+s6Hplnot9PbWMMcqwOFcLyOO1bJWMZSTOPHi+9w3mosrlyd2
+9lCj02g9P/1e9a3hzxAbl2ikZRcdQueHHt7j864Y8Z4I4oRzG6urFWU5BHBB7HNJxTFGbR6he6Vpmtgm
+eLy5zwZI/lb8fX8azIvBUUTHdfSFP4QsYB/HNZ+k+KEnRY75hHOvAk6K/v7H9K6yyvlnQBmDZ6GsnzR0
+N0oy1RzOtaN/Y1tHNFO06u+zYy4I4Jzx9KKveJblXuordSGES5b6n/62PzorKVdp2LjQTVyWz8UWEWlq
+jSgyxfJt6EgdDzWTdeLIZGO7zHI/hVajGmWWP+PWL8qwlAIURrhpMAHHJA71pRcZrToZzcoEuo6heakA
+GHk245CZ6/X1qPTLq40q+W5t2QybSpDAkEEc55/zilk5k2r91eKhLDzWz2rpsczbbuemeD76fUNG865I
+MiysmQMZAAwa3a5j4ftu0ByP+fh/5CulkLLG7INzhSVHqe1Fh3uOoqn9qQQxyhndmHIxwOmSR2xQ13KD
+KoiBZOV9JBnt707MVy5RWdNdy7wRGf3bfMinnO1jg+vY03WXLaJO3mhQ20b0zwpYf0qlG7S7icrJs08U
+VwumgC+YiQyeVtZH567hzj8aSL949oGhE/2v5pJCDkksQwBHC4/+vXQ8LZ2uYxxCavY7us/xCcaBfn0h
+b+VP0bnSrb94ZMJgOecj1rl/GfidUE2k2gy5+SeQjgA/wj3rlas2jdao48qrjLAGkSKPk4Gc1WMj92I+
+lIJnU8OfxPWo5inBokmtQTmM4OOh71b0q6vbFmWCbaxHyqQGAP0PT8KhSTzVyo5ocSKA5VfTOTmqsmRd
+pl99XjPzThzK3zOeOSeveirNmkgg/fIpYsTkYORxRXmzlTjJqx6EVUcU7mhkKCzdAK59QI9zYxtG1fYU
+UVtgtmY4nZEa8Ak9aqFv3rfSiiu1nMeifDv/AJF+T/r4f+QrqqKKQwzQenNFFMCOKFIgNuThdoJ5OPSk
+ubeK6t3gnXdG4wwziiii/UTKMOg6dbzJLFE4dSCP3rEdeOM8805tDsGMvySgSsS6rM6gk9eAcUUVftZt
+3uyVGNthuq3Eei6DK8H7sRR7YuMgHtXkc8rzTNLM26RyWY+p70UVnLY0iEsUipG7rhZBlDkc1HgYoorM
+0HwyBXGeRjmrcUhMg2ghezd//rUUVcTKW5s2jZtY/QDaOKKKK8ip8bPRj8KP/9k=
+`

+ 314 - 0
imageutil/rasterfont1.go

@@ -0,0 +1,314 @@
+package imageutil
+
+/*
+Basic1CharMap is a default character map for RenderSymbols.
+
+It only has uppcase letters, numbers, the underscore and the colon rune.
+*/
+var Basic1CharMap = map[rune]string{
+
+	':': `
+
+
+**
+
+**
+`[1:],
+
+	'_': `
+
+
+
+
+*******
+`[1:],
+
+	'a': `
+ *****
+**   **
+*******
+**   **
+**   **
+`[1:],
+
+	'b': `
+******
+**   **
+******
+**   **
+******
+`[1:],
+
+	'c': `
+ ******
+**
+**
+**
+ ******
+`[1:],
+
+	'd': `
+******
+**   **
+**   **
+**   **
+******
+`[1:],
+
+	'e': `
+*******
+**
+*****
+**
+*******
+`[1:],
+
+	'f': `
+*******
+**
+*****
+**
+**
+`[1:],
+
+	'g': `
+ ******
+**
+**   ***
+**    **
+ ******
+`[1:],
+
+	'h': `
+**   **
+**   **
+*******
+**   **
+**   **
+`[1:],
+
+	'i': `
+**
+**
+**
+**
+**
+`[1:],
+
+	'j': `
+     **
+     **
+     **
+**   **
+ *****
+`[1:],
+
+	'k': `
+**   **
+**  **
+*****
+**  **
+**   **
+`[1:],
+
+	'l': `
+**
+**
+**
+**
+*******
+`[1:],
+
+	'm': `
+***    ***
+****  ****
+** **** **
+**  **  **
+**      **
+`[1:],
+
+	'n': `
+***    **
+****   **
+** **  **
+**  ** **
+**   ****
+`[1:],
+
+	'o': `
+ ******
+**    **
+**    **
+**    **
+ ******
+`[1:],
+
+	'p': `
+******
+**   **
+******
+**
+**
+`[1:],
+
+	'q': `
+ ******
+**    **
+**    **
+** ** **
+ ******
+    **
+`[1:],
+
+	'r': `
+******
+**   **
+******
+**   **
+**   **
+`[1:],
+
+	's': `
+ ******
+**
+*******
+     **
+******
+`[1:],
+
+	't': `
+********
+   **
+   **
+   **
+   **
+`[1:],
+
+	'u': `
+**    **
+**    **
+**    **
+**    **
+ ******
+`[1:],
+
+	'v': `
+**    **
+**    **
+**    **
+ **  **
+  ****
+`[1:],
+
+	'w': `
+**     **
+**     **
+**  *  **
+** *** **
+ *** ***
+`[1:],
+
+	'x': `
+**   **
+ ** **
+  ***
+ ** **
+**   **
+`[1:],
+
+	'y': `
+**    **
+ **  **
+  ****
+   **
+   **
+`[1:],
+
+	'z': `
+*******
+   ***
+  ***
+ ***
+*******
+`[1:],
+
+	'1': `
+ **
+***
+ **
+ **
+ **
+`[1:],
+
+	'2': `
+******
+     **
+ *****
+**
+*******
+`[1:],
+
+	'3': `
+******
+     **
+ *****
+     **
+******
+`[1:],
+
+	'4': `
+**   **
+**   **
+*******
+     **
+     **
+`[1:],
+
+	'5': `
+*******
+**
+******
+     **
+******
+`[1:],
+
+	'6': `
+ ******
+**
+*******
+**    **
+ ******
+`[1:],
+
+	'7': `
+*******
+     **
+    **
+   **
+   **
+`[1:],
+
+	'8': `
+ *****
+**   **
+ *****
+**   **
+ *****
+`[1:],
+
+	'9': `
+ *****
+**   **
+ ******
+     **
+ *****
+`[1:],
+
+	'0': `
+ ******
+**  ****
+** ** **
+****  **
+ ******
+`[1:],
+}

+ 852 - 0
imageutil/rasterfont2.go

@@ -0,0 +1,852 @@
+package imageutil
+
+/*
+Basic2CharMap is a default character map for RenderSymbols.
+
+A more elaborage font having Upper and lower case banner text with numbers
+and some special characters.
+*/
+var Basic2CharMap = map[rune]string{
+
+	'A': `
+   *
+  * *
+ *   *
+*     *
+*******
+*     *
+*     *
+
+`[1:],
+	'B': `
+******
+*     *
+*     *
+******
+*     *
+*     *
+******
+
+`[1:],
+	'C': `
+ *****
+*     *
+*
+*
+*
+*     *
+ *****
+
+`[1:],
+	'D': `
+******
+*     *
+*     *
+*     *
+*     *
+*     *
+******
+
+`[1:],
+	'E': `
+*******
+*
+*
+*****
+*
+*
+*******
+
+`[1:],
+	'F': `
+*******
+*
+*
+*****
+*
+*
+*
+
+`[1:],
+	'G': `
+ *****
+*     *
+*
+*  ****
+*     *
+*     *
+ *****
+
+`[1:],
+	'H': `
+*     *
+*     *
+*     *
+*******
+*     *
+*     *
+*     *
+
+`[1:],
+	'I': `
+***
+ *
+ *
+ *
+ *
+ *
+***
+
+`[1:],
+	'J': `
+      *
+      *
+      *
+      *
+*     *
+*     *
+ *****
+
+`[1:],
+	'K': `
+*    *
+*   *
+*  *
+***
+*  *
+*   *
+*    *
+
+`[1:],
+	'L': `
+*
+*
+*
+*
+*
+*
+*******
+
+`[1:],
+	'M': `
+*     *
+**   **
+* * * *
+*  *  *
+*     *
+*     *
+*     *
+
+`[1:],
+	'N': `
+*     *
+**    *
+* *   *
+*  *  *
+*   * *
+*    **
+*     *
+
+`[1:],
+	'O': `
+*******
+*     *
+*     *
+*     *
+*     *
+*     *
+*******
+
+`[1:],
+	'P': `
+******
+*     *
+*     *
+******
+*
+*
+*
+
+`[1:],
+	'Q': `
+ *****
+*     *
+*     *
+*     *
+*   * *
+*    *
+ **** *
+
+`[1:],
+	'R': `
+******
+*     *
+*     *
+******
+*   *
+*    *
+*     *
+
+`[1:],
+	'S': `
+ *****
+*     *
+*
+ *****
+      *
+*     *
+ *****
+
+`[1:],
+	'T': `
+*******
+   *
+   *
+   *
+   *
+   *
+   *
+
+`[1:],
+	'U': `
+*     *
+*     *
+*     *
+*     *
+*     *
+*     *
+ *****
+
+`[1:],
+	'V': `
+*     *
+*     *
+*     *
+*     *
+ *   *
+  * *
+   *
+
+`[1:],
+	'W': `
+*     *
+*  *  *
+*  *  *
+*  *  *
+*  *  *
+*  *  *
+ ** **
+
+`[1:],
+	'X': `
+*     *
+ *   *
+  * *
+   *
+  * *
+ *   *
+*     *
+
+`[1:],
+	'Y': `
+*     *
+ *   *
+  * *
+   *
+   *
+   *
+   *
+
+`[1:],
+	'Z': `
+*******
+     *
+    *
+   *
+  *
+ *
+*******
+
+`[1:],
+	'a': `
+
+  **
+ *  *
+*    *
+******
+*    *
+*    *
+
+`[1:],
+	'b': `
+
+*****
+*    *
+*****
+*    *
+*    *
+*****
+
+`[1:],
+	'c': `
+
+ ****
+*    *
+*
+*
+*    *
+ ****
+
+`[1:],
+	'd': `
+
+*****
+*    *
+*    *
+*    *
+*    *
+*****
+
+`[1:],
+	'e': `
+
+******
+*
+*****
+*
+*
+******
+
+`[1:],
+	'f': `
+
+******
+*
+*****
+*
+*
+*
+
+`[1:],
+	'g': `
+
+ ****
+*    *
+*
+*  ***
+*    *
+ ****
+
+`[1:],
+	'h': `
+
+*    *
+*    *
+******
+*    *
+*    *
+*    *
+
+`[1:],
+	'i': `
+
+*
+*
+*
+*
+*
+*
+
+`[1:],
+	'j': `
+
+     *
+     *
+     *
+     *
+*    *
+ ****
+
+`[1:],
+	'k': `
+
+*    *
+*   *
+****
+*  *
+*   *
+*    *
+
+`[1:],
+	'l': `
+
+*
+*
+*
+*
+*
+******
+
+`[1:],
+	'm': `
+
+*    *
+**  **
+* ** *
+*    *
+*    *
+*    *
+
+`[1:],
+	'n': `
+
+*    *
+**   *
+* *  *
+*  * *
+*   **
+*    *
+
+`[1:],
+	'o': `
+
+ ****
+*    *
+*    *
+*    *
+*    *
+ ****
+
+`[1:],
+	'p': `
+
+*****
+*    *
+*    *
+*****
+*
+*
+
+`[1:],
+	'q': `
+
+ ****
+*    *
+*    *
+*  * *
+*   *
+ *** *
+
+`[1:],
+	'r': `
+
+*****
+*    *
+*    *
+*****
+*   *
+*    *
+
+`[1:],
+	's': `
+
+ ****
+*
+ ****
+     *
+*    *
+ ****
+
+`[1:],
+	't': `
+
+*****
+  *
+  *
+  *
+  *
+  *
+
+`[1:],
+	'u': `
+
+*    *
+*    *
+*    *
+*    *
+*    *
+ ****
+
+`[1:],
+	'v': `
+
+*    *
+*    *
+*    *
+*    *
+ *  *
+  **
+
+`[1:],
+	'w': `
+
+*    *
+*    *
+*    *
+* ** *
+**  **
+*    *
+
+`[1:],
+	'x': `
+
+*    *
+ *  *
+  **
+  **
+ *  *
+*    *
+
+`[1:],
+	'y': `
+
+*   *
+ * *
+  *
+  *
+  *
+  *
+
+`[1:],
+	'z': `
+
+******
+    *
+   *
+  *
+ *
+******
+
+`[1:],
+	'1': `
+  *
+ **
+* *
+  *
+  *
+  *
+*****
+
+`[1:],
+	'2': `
+ *****
+*     *
+      *
+ *****
+*
+*
+*******
+
+`[1:],
+	'3': `
+ *****
+*     *
+      *
+ *****
+      *
+*     *
+ *****
+
+`[1:],
+	'4': `
+*
+*    *
+*    *
+*    *
+*******
+     *
+     *
+
+`[1:],
+	'5': `
+*******
+*
+*
+******
+      *
+*     *
+ *****
+
+`[1:],
+	'6': `
+ *****
+*     *
+*
+******
+*     *
+*     *
+ *****
+
+`[1:],
+	'7': `
+*******
+*    *
+    *
+   *
+  *
+  *
+  *
+
+`[1:],
+	'8': `
+ *****
+*     *
+*     *
+ *****
+*     *
+*     *
+ *****
+
+`[1:],
+	'9': `
+ *****
+*     *
+*     *
+ ******
+      *
+*     *
+ *****
+
+`[1:],
+	'0': `
+  ***
+ *   *
+*     *
+*     *
+*     *
+ *   *
+  ***
+
+`[1:],
+	':': `
+
+**
+**
+
+**
+**
+
+
+`[1:],
+	';': `
+
+***
+***
+
+***
+***
+ *
+*
+`[1:],
+
+	',': `
+
+
+
+
+***
+***
+ *
+*
+`[1:],
+	'.': `
+
+
+
+
+***
+***
+***
+
+`[1:],
+	'\'': `
+***
+***
+ *
+*
+
+
+
+
+`[1:],
+	'_': `
+
+
+
+
+
+
+
+*******
+`[1:],
+	'(': `
+  **
+ *
+*
+*
+*
+ *
+  **
+
+`[1:],
+	')': `
+**
+  *
+   *
+   *
+   *
+  *
+**
+
+`[1:],
+	'!': `
+***
+***
+***
+ *
+
+***
+***
+
+`[1:],
+	'*': `
+
+ *   *
+  * *
+*******
+  * *
+ *   *
+
+
+`[1:],
+	'%': `
+***   *
+* *  *
+*** *
+   *
+  * ***
+ *  * *
+*   ***
+
+`[1:],
+	'?': `
+ *****
+*     *
+      *
+   ***
+   *
+
+   *
+
+`[1:],
+	'/': `
+      *
+     *
+    *
+   *
+  *
+ *
+*
+
+`[1:],
+	'@': `
+ *****
+*     *
+* *** *
+* *** *
+* ****
+*
+ *****
+
+`[1:],
+	'#': `
+  * *
+  * *
+*******
+  * *
+*******
+  * *
+  * *
+
+`[1:],
+	'-': `
+
+
+
+*****
+
+
+
+
+`[1:],
+	'+': `
+
+  *
+  *
+*****
+  *
+  *
+
+
+`[1:],
+	'=': `
+
+
+*****
+
+*****
+
+
+
+`[1:],
+	'&': `
+  **
+ *  *
+  **
+ ***
+*   * *
+*    *
+ ***  *
+
+`[1:],
+	'|': `
+*
+*
+*
+
+*
+*
+*
+
+`[1:],
+	'\\': `
+*
+ *
+  *
+   *
+    *
+     *
+      *
+
+`[1:],
+	'~': `
+
+
+ **
+*  *  *
+    **
+
+
+
+`[1:],
+}

+ 149 - 0
lang/graphql/parser/const.go

@@ -0,0 +1,149 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package parser contains a GraphQL parser. Based on GraphQL spec June 2018.
+
+Lexer for Source Text - @spec 2.1
+
+Lex() is a lexer function to convert a given search query into a list of tokens.
+
+Based on a talk by Rob Pike: Lexical Scanning in Go
+
+https://www.youtube.com/watch?v=HxaD_trXwRE
+
+The lexer's output is pushed into a channel which is consumed by the parser.
+This design enables the concurrent processing of the input text by lexer and
+parser.
+
+Parser
+
+Parse() is a parser which produces a parse tree from a given set of lexer tokens.
+
+Based on an article by Douglas Crockford: Top Down Operator Precedence
+
+http://crockford.com/javascript/tdop/tdop.html
+
+which is based on the ideas of Vaughan Pratt and his paper: Top Down Operator Precedence
+
+http://portal.acm.org/citation.cfm?id=512931
+https://tdop.github.io/
+
+ParseWithRuntime() parses a given input and decorates the resulting parse tree
+with runtime components which can be used to interpret the parsed query.
+*/
+package parser
+
+/*
+LexTokenID represents a unique lexer token ID
+*/
+type LexTokenID int
+
+/*
+Available lexer token types
+*/
+const (
+	TokenError LexTokenID = iota // Lexing error token with a message as val
+	TokenEOF                     // End-of-file token
+
+	// Punctuators - @spec 2.1.8
+
+	// GraphQL documents include punctuation in order to describe structure.
+	// GraphQL is a data description language and not a programming language,
+	// therefore GraphQL lacks the punctuation often used to describe mathematical expressions.
+
+	TokenPunctuator
+
+	// Names - @spec 2.1.9
+
+	// GraphQL Documents are full of named things: operations, fields, arguments, types,
+	// directives, fragments, and variables. All names must follow the same grammatical
+	// form. Names in GraphQL are case‐sensitive. That is to say name, Name, and NAME
+	// all refer to different names. Underscores are significant, which means
+	// other_name and othername are two different names. Names in GraphQL are limited
+	// to this ASCII subset of possible characters to support interoperation with as
+	// many other systems as possible.
+
+	TokenName
+
+	// Integer value - @spec 2.9.1
+
+	// An Integer number is specified without a decimal point or exponent (ex. 1).
+
+	TokenIntValue
+
+	// Float value - @spec 2.9.2
+
+	// A Float number includes either a decimal point (ex. 1.0) or an exponent
+	// (ex. 1e50) or both (ex. 6.0221413e23).
+
+	TokenFloatValue
+
+	// String Value - @spec 2.9.4
+
+	// Strings are sequences of characters wrapped in double‐quotes (").
+	// (ex. "Hello World"). White space and other otherwise‐ignored characters are
+	// significant within a string value. Unicode characters are allowed within String
+	// value literals, however SourceCharacter must not contain some ASCII control
+	// characters so escape sequences must be used to represent these characters.
+
+	TokenStringValue
+)
+
+/*
+Available parser AST node types
+*/
+const (
+	NodeAlias                = "Alias"
+	NodeArgument             = "Argument"
+	NodeArguments            = "Arguments"
+	NodeDefaultValue         = "DefaultValue"
+	NodeDirective            = "Directive"
+	NodeDirectives           = "Directives"
+	NodeDocument             = "Document"
+	NodeEnumValue            = "EnumValue"
+	NodeEOF                  = "EOF"
+	NodeExecutableDefinition = "ExecutableDefinition"
+	NodeField                = "Field"
+	NodeFragmentDefinition   = "FragmentDefinition"
+	NodeFragmentName         = "FragmentName"
+	NodeFragmentSpread       = "FragmentSpread"
+	NodeInlineFragment       = "InlineFragment"
+	NodeListValue            = "ListValue"
+	NodeName                 = "Name"
+	NodeObjectField          = "ObjectField"
+	NodeObjectValue          = "ObjectValue"
+	NodeOperationDefinition  = "OperationDefinition"
+	NodeOperationType        = "OperationType"
+	NodeSelectionSet         = "SelectionSet"
+	NodeType                 = "Type"
+	NodeTypeCondition        = "TypeCondition"
+	NodeValue                = "Value"
+	NodeVariable             = "Variable"
+	NodeVariableDefinition   = "VariableDefinition"
+	NodeVariableDefinitions  = "VariableDefinitions"
+)
+
+/*
+ValueNodes are AST nodes which contain a significant value
+*/
+var ValueNodes = []string{
+	NodeAlias,
+	NodeDefaultValue,
+	NodeEnumValue,
+	NodeFragmentName,
+	NodeFragmentSpread,
+	NodeName,
+	NodeObjectField,
+	NodeOperationType,
+	NodeType,
+	NodeTypeCondition,
+	NodeValue,
+	NodeVariable,
+}

+ 487 - 0
lang/graphql/parser/lexer.go

@@ -0,0 +1,487 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"devt.de/krotik/common/stringutil"
+)
+
+/*
+LexToken represents a token which is returned by the lexer.
+*/
+type LexToken struct {
+	ID    LexTokenID // Token kind
+	Pos   int        // Starting position (in runes)
+	Val   string     // Token value
+	Lline int        // Line in the input this token appears
+	Lpos  int        // Position in the input line this token appears
+}
+
+/*
+PosString returns the position of this token in the origianl input as a string.
+*/
+func (t LexToken) PosString() string {
+	return fmt.Sprintf("Line %v, Pos %v", t.Lline, t.Lpos)
+}
+
+/*
+String returns a string representation of a token.
+*/
+func (t LexToken) String() string {
+
+	switch {
+
+	case t.ID == TokenEOF:
+		return "EOF"
+
+	case t.ID == TokenError:
+		return fmt.Sprintf("Error: %s (%s)", t.Val, t.PosString())
+
+	case t.ID == TokenName:
+		return fmt.Sprintf("<%s>", t.Val)
+
+	case t.ID == TokenStringValue:
+		return fmt.Sprintf("\"%s\"", t.Val)
+
+	case t.ID == TokenIntValue:
+		return fmt.Sprintf("int(%s)", t.Val)
+
+	case t.ID == TokenFloatValue:
+		return fmt.Sprintf("flt(%s)", t.Val)
+	}
+
+	return fmt.Sprintf("%s", t.Val)
+}
+
+/*
+SymbolMap is a map of special symbols
+*/
+var SymbolMap = map[string]LexTokenID{
+	"!": TokenPunctuator,
+	"$": TokenPunctuator,
+	"(": TokenPunctuator,
+	")": TokenPunctuator,
+	":": TokenPunctuator,
+	"=": TokenPunctuator,
+	"@": TokenPunctuator,
+	"[": TokenPunctuator,
+	"]": TokenPunctuator,
+	"{": TokenPunctuator,
+	"|": TokenPunctuator,
+	"}": TokenPunctuator,
+	// "..." Is checked as a special case
+}
+
+// Lexer
+// =====
+
+/*
+RuneEOF is a special rune which represents the end of the input
+*/
+const RuneEOF = -1
+
+/*
+RuneComma is the rune for a comma
+*/
+const RuneComma = ','
+
+/*
+Function which represents the current state of the lexer and returns the next state
+*/
+type lexFunc func() lexFunc
+
+/*
+Lexer data structure
+*/
+type lexer struct {
+	name   string        // Name to identify the input
+	input  string        // Input string of the lexer
+	pos    int           // Current rune pointer
+	line   int           // Current line pointer
+	lastnl int           // Last newline position
+	width  int           // Width of last rune
+	start  int           // Start position of the current red token
+	tokens chan LexToken // Channel for lexer output
+}
+
+/*
+Lex lexes a given input. Returns a channel which contains tokens.
+*/
+func Lex(name string, input string) chan LexToken {
+
+	l := &lexer{name, input, 0, 0, 0, 0, 0, make(chan LexToken)}
+	go l.run()
+
+	return l.tokens
+}
+
+/*
+LexToList lexes a given input. Returns a list of tokens.
+*/
+func LexToList(name string, input string) []LexToken {
+	var tokens []LexToken
+
+	for t := range Lex(name, input) {
+		tokens = append(tokens, t)
+	}
+
+	return tokens
+}
+
+/*
+run is the main loop of the lexer.
+*/
+func (l *lexer) run() {
+
+	if l.skipWhiteSpace() {
+		for state := l.lexToken; state != nil; {
+			state = state()
+
+			if !l.skipWhiteSpace() {
+				break
+			}
+		}
+	}
+
+	close(l.tokens)
+}
+
+/*
+next returns the next rune in the input and advances the current rune pointer if the
+peek value is -1 or smaller. If the peek value is 0 or greater then the nth token from the current
+position is returned without advancing the current rune pointer.
+*/
+func (l *lexer) next(peek int) rune {
+	var r rune
+	var w, peekw int
+
+	// Check if we reached the end
+
+	if int(l.pos) >= len(l.input) {
+		return RuneEOF
+	}
+
+	// Decode the next rune
+
+	peeklen := 1 + peek
+	if peeklen < 1 {
+		peeklen = 1
+	}
+
+	for i := 0; i < peeklen; i++ {
+		r, w = utf8.DecodeRuneInString(l.input[l.pos+peekw:])
+		peekw += w
+	}
+
+	if peek == -1 {
+		l.width = w
+		l.pos += l.width
+	}
+
+	return r
+}
+
+/*
+hasSequence checks if the next characters are of the following sequence.
+*/
+func (l *lexer) hasSequence(s string) bool {
+	runes := stringutil.StringToRuneSlice(s)
+	for i := 0; i < len(runes); i++ {
+		if l.next(i) != runes[i] {
+			return false
+		}
+	}
+	return true
+}
+
+/*
+startNew starts a new token.
+*/
+func (l *lexer) startNew() {
+	l.start = l.pos
+}
+
+/*
+emitTokenAndValue passes a token with a given value back to the client.
+*/
+func (l *lexer) emitToken(i LexTokenID, val string) {
+	if l.tokens != nil {
+		l.tokens <- LexToken{i, l.start, val, l.line + 1, l.start - l.lastnl + 1}
+	}
+}
+
+// State functions
+// ===============
+
+/*
+lexToken is the main entry function for the lexer.
+*/
+func (l *lexer) lexToken() lexFunc {
+
+	l.startNew()
+	l.lexTextBlock()
+
+	token := l.input[l.start:l.pos]
+
+	// Check for Comment - @spec 2.1.4, 2.1.7
+
+	if token == "#" {
+		return l.skipRestOfLine()
+	}
+
+	// Lexical tokens - @spec 2.1.6
+
+	// Check for String
+
+	if token == "\"" {
+		return l.lexStringValue()
+	}
+
+	// Check for Punctuator - @spec 2.1.8
+
+	if _, ok := SymbolMap[token]; ok || token == "..." {
+		l.emitToken(TokenPunctuator, token)
+		return l.lexToken
+	}
+
+	// Check for Name - @spec 2.1.9
+
+	isName, _ := regexp.MatchString("^[_A-Za-z][_0-9A-Za-z]*$", token)
+	if isName {
+		l.emitToken(TokenName, token)
+		return l.lexToken
+	}
+
+	// Check for IntValue - @spec 2.9.1
+
+	isZero, _ := regexp.MatchString("^-?0$", token)
+	isInt, _ := regexp.MatchString("^-?[1-9][0-9]*$", token)
+	if isZero || isInt {
+		l.emitToken(TokenIntValue, token)
+		return l.lexToken
+	}
+
+	// Check for FloatValue - @spec 2.9.2
+
+	isFloat1, _ := regexp.MatchString("^[0-9]*\\.[0-9]*$", token)
+	isFloat2, _ := regexp.MatchString("^[0-9][eE][+-]?[0-9]*$", token)
+	isFloat3, _ := regexp.MatchString("^[0-9]*\\.[0-9][eE][+-]?[0-9]*$", token)
+
+	if isFloat1 || isFloat2 || isFloat3 {
+		l.emitToken(TokenFloatValue, strings.ToLower(token))
+		return l.lexToken
+	}
+
+	// Everything else is an error
+
+	l.emitToken(TokenError, token)
+
+	return l.lexToken
+}
+
+/*
+lexTextBlock lexes a block of text without whitespaces. Interprets
+optionally all one or two letter tokens.
+*/
+func (l *lexer) lexTextBlock() {
+
+	r := l.next(0)
+
+	// Check if we start with a known symbol
+
+	if _, ok := SymbolMap[strings.ToLower(string(r))]; ok || r == '#' || r == '"' {
+		l.next(-1)
+		return
+	} else if r == '.' && l.hasSequence("...") {
+		l.next(-1)
+		l.next(-1)
+		l.next(-1)
+		return
+	}
+
+	for !l.isIgnoredRune(r) {
+		l.next(-1)
+
+		r = l.next(0)
+
+		// Check if we find a token in the block
+
+		if _, ok := SymbolMap[strings.ToLower(string(r))]; ok || r == '#' || r == '"' {
+			return
+		} else if r == '.' && l.hasSequence("...") {
+			return
+		}
+	}
+}
+
+/*
+lexStringValue lexes a string value either as a simple string or a block string.
+
+Values can be declared in different ways:
+
+" ... " A normal string (escape sequences are interpreted)
+
+""" ... """ A multi-line string (escape sequences are not interpreted)
+*/
+func (l *lexer) lexStringValue() lexFunc {
+	var isEnd func(rune) bool
+
+	// String value lexing - @spec 2.9.4
+
+	// Lookahead 2 tokens
+
+	r1 := l.next(0)
+	r2 := l.next(1)
+
+	isBlockString := r1 == '"' && r2 == '"'
+
+	if isBlockString {
+
+		// Consume the initial quotes for blockstrings
+
+		l.next(-1)
+		l.next(-1)
+
+		isEnd = func(r rune) bool {
+			r1 := l.next(0)
+			r2 := l.next(1)
+			return r == '"' && r1 == '"' && r2 == '"'
+		}
+
+	} else {
+
+		isEnd = func(r rune) bool {
+			return r == '"'
+		}
+	}
+
+	r := l.next(-1)
+	lLine := l.line
+	lLastnl := l.lastnl
+
+	for !isEnd(r) {
+
+		if r == '\n' {
+			lLine++
+			lLastnl = l.pos
+		}
+
+		r = l.next(-1)
+
+		if r == RuneEOF {
+			l.emitToken(TokenError, "EOF inside quotes")
+			return nil
+		}
+	}
+
+	if !isBlockString {
+		val := l.input[l.start+1 : l.pos-1]
+
+		s, err := strconv.Unquote("\"" + val + "\"")
+		if err != nil {
+			l.emitToken(TokenError, "Could not interpret escape sequence: "+err.Error())
+			return nil
+		}
+
+		l.emitToken(TokenStringValue, s)
+
+	} else {
+
+		// Consume the final quotes for blockstrings
+
+		l.next(-1)
+		l.next(-1)
+
+		token := l.input[l.start+3 : l.pos-3]
+
+		// Since block strings represent freeform text often used in indented
+		// positions, the string value semantics of a block string excludes uniform
+		// indentation and blank initial and trailing lines
+		// (from spec about 'Block Strings')
+
+		token = stringutil.StripUniformIndentation(token)
+		token = stringutil.TrimBlankLines(token)
+
+		l.emitToken(TokenStringValue, token)
+	}
+
+	//  Set newline
+
+	l.line = lLine
+	l.lastnl = lLastnl
+
+	return l.lexToken
+}
+
+/*
+isIgnoredRune checks if a given rune should be ignored.
+*/
+func (l *lexer) isIgnoredRune(r rune) bool {
+
+	// Ignored tokens - @spec 2.1.1, 2.1.2, 2.1.3, 2.1.3, 2.1.5, 2.1.7
+
+	return unicode.IsSpace(r) || unicode.IsControl(r) || r == RuneEOF ||
+		r == RuneComma || r == '\ufeff'
+}
+
+/*
+skipWhiteSpace skips any number of whitespace characters. Returns false if the parser
+reaches EOF while skipping whitespaces.
+*/
+func (l *lexer) skipWhiteSpace() bool {
+	r := l.next(0)
+
+	for l.isIgnoredRune(r) {
+		if r == '\n' {
+			l.line++
+			l.lastnl = l.pos
+		}
+
+		l.next(-1)
+
+		if r == RuneEOF {
+			l.startNew()
+			l.start--
+			l.emitToken(TokenEOF, "")
+			return false
+		}
+
+		r = l.next(0)
+	}
+
+	return true
+}
+
+/*
+skipRestOfLine skips all characters until the next newline character.
+*/
+func (l *lexer) skipRestOfLine() lexFunc {
+	r := l.next(-1)
+
+	for r != '\n' && r != RuneEOF {
+		r = l.next(-1)
+	}
+
+	if r == RuneEOF {
+		return nil
+	}
+
+	l.line++
+	l.lastnl = l.pos - 1
+
+	return l.lexToken
+}

+ 246 - 0
lang/graphql/parser/lexer_test.go

@@ -0,0 +1,246 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestNextAndPeek(t *testing.T) {
+	l := &lexer{"", "Test", 0, 0, 0, 0, 0, make(chan LexToken)}
+
+	if res := fmt.Sprintf("%c", l.next(0)); res != "T" {
+		t.Error("Unexpected result:", res)
+	}
+
+	if res := fmt.Sprintf("%c", l.next(1)); res != "e" {
+		t.Error("Unexpected result:", res)
+	}
+
+	if res := fmt.Sprintf("%c", l.next(2)); res != "s" {
+		t.Error("Unexpected result:", res)
+	}
+
+	if res := fmt.Sprintf("%c", l.next(3)); res != "t" {
+		t.Error("Unexpected result:", res)
+	}
+
+	if l.pos != 0 {
+		t.Error("Lexer moved forward when it shouldn't: ", l.pos)
+		return
+	}
+
+	if res := fmt.Sprintf("%c", l.next(-1)); res != "T" {
+		t.Error("Unexpected result:", res)
+	}
+
+	if l.pos != 1 {
+		t.Error("Lexer moved forward when it shouldn't: ", l.pos)
+		return
+	}
+
+	if res := fmt.Sprintf("%c", l.next(-1)); res != "e" {
+		t.Error("Unexpected result:", res)
+	}
+
+	if l.pos != 2 {
+		t.Error("Lexer moved forward when it shouldn't: ", l.pos)
+		return
+	}
+}
+
+func TestSimpleLexing(t *testing.T) {
+
+	if res := fmt.Sprint(LexToList("test", "\ufeff1!23")); res != `[int(1) ! int(23) EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(LexToList("test", "1!23.4e+11 3E-5 11.1 .4$")); res !=
+		`[int(1) ! flt(23.4e+11) flt(3e-5) flt(11.1) flt(.4) $ EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(LexToList("test", "12!foo...bar99")); res !=
+		`[int(12) ! <foo> ... <bar99> EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(LexToList("test", "-0 0 1230 0123")); res !=
+		`[int(-0) int(0) int(1230) Error: 0123 (Line 1, Pos 11) EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestLexingErrors(t *testing.T) {
+
+	if res := fmt.Sprint(LexToList("test", `"te`)); res != `[Error: EOF inside quotes (Line 1, Pos 1) EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestMultilineLexing(t *testing.T) {
+
+	if res := fmt.Sprint(LexToList("test", `1!23#...4e+11
+123
+true
+`)); res != `[int(1) ! int(23) int(123) <true> EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(LexToList("test", `"""
+123
+"""
+"["
+[
+"123"
+"123\u2318"
+"""123\u2318"""
+"""
+  bla
+"""
+`)); res != `["123" "[" [ "123" "123⌘" "123\u2318" "bla" EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(LexToList("test", `"""
+    Hello,
+      World!
+
+    Yours,
+      GraphQL.
+  """
+`)); res != `["Hello,
+  World!
+
+Yours,
+  GraphQL." EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(LexToList("test", `"Hello,\n  World!\n\nYours,\n  GraphQL."
+`)); res != `["Hello,
+  World!
+
+Yours,
+  GraphQL." EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	ll := LexToList("test", `"Hello,\n  World!\n\nYours,\n  GraphQL."
+`)
+	if res := ll[len(ll)-1].PosString(); res != "Line 2, Pos 1" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestIgnoredLexing(t *testing.T) {
+
+	res := fmt.Sprint(LexToList("test", "1,2,3...abc\t\r\n#123\n"))
+
+	if res != `[int(1) int(2) int(3) ... <abc> EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	res = fmt.Sprint(LexToList("test", "1,2,3 .. x abc\r\n#123\n"))
+
+	if res != `[int(1) int(2) int(3) Error: .. (Line 1, Pos 7) <x> <abc> EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	res = fmt.Sprint(LexToList("test", "1,2,3 .. x abc\r\n#123"))
+
+	if res != `[int(1) int(2) int(3) Error: .. (Line 1, Pos 7) <x> <abc> EOF]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestSampleQueries(t *testing.T) {
+
+	sampleQueries := [][]string{{`
+query StudentsNormal {
+  allStudents(pagination: {offset: 0, limit: 10}, sort: {fields: [{field: "studentNumber", order: ASC}]}, 
+                           filter: {fields: [{op: NIN, value: "[Harry]", field: "name"}]}) {
+    result {
+      ...studentFields
+      subjects {
+        name
+        classroom
+      }
+    }
+    pagination {
+      offset
+      limit
+      total
+    }
+  }
+}
+`, `[<query> <StudentsNormal> { <allStudents> ( <pagination> : { <offset> : int(0) <limit> : int(10) } <sort> : { <fields> : [ { <field> : "studentNumber" <order> : <ASC> } ] } <filter> : { <fields> : [ { <op> : <NIN> <value> : "[Harry]" <field> : "name" } ] } ) { <result> { ... <studentFields> <subjects> { <name> <classroom> } } <pagination> { <offset> <limit> <total> } } } EOF]`},
+
+		{`
+query StudentsJPA {
+  allStudentsJPA(pagination: {offset: 0, limit: 10}, sort: {fields: [{field: "studentNumber", order: ASC}]}, filter: {fields: [{op: NIN, value: "[Harry]", field: "name"}]}) {
+    ... on PaginationWrapper_Student {
+      result {
+        name
+      }
+    }
+    result {
+      ...studentFields
+      ... on Student {
+        enrolled
+      }
+      subjects {
+        name
+        classroom
+      }
+    }
+    pagination {
+      offset
+      limit
+      total
+    }
+  }
+}
+`, `[<query> <StudentsJPA> { <allStudentsJPA> ( <pagination> : { <offset> : int(0) <limit> : int(10) } <sort> : { <fields> : [ { <field> : "studentNumber" <order> : <ASC> } ] } <filter> : { <fields> : [ { <op> : <NIN> <value> : "[Harry]" <field> : "name" } ] } ) { ... <on> <PaginationWrapper_Student> { <result> { <name> } } <result> { ... <studentFields> ... <on> <Student> { <enrolled> } <subjects> { <name> <classroom> } } <pagination> { <offset> <limit> <total> } } } EOF]`},
+
+		{`
+
+# query variables
+{
+  "st": {
+    "studentNumber": 63170004,
+    "studentLoan": 631700.04,
+    "name": "Latest",
+    "surname": "Greatest"
+  }
+`, `[{ "st" : { "studentNumber" : int(63170004) "studentLoan" : flt(631700.04) "name" : "Latest" "surname" : "Greatest" } EOF]`}}
+
+	for _, sampleQuery := range sampleQueries {
+
+		if res := fmt.Sprint(LexToList("test", sampleQuery[0])); res != sampleQuery[1] {
+			t.Error("Unexpected result\nGiven:\n", sampleQuery[0], "\nGot:\n", res, "\nExpected:\n", sampleQuery[1])
+			return
+		}
+	}
+}

+ 126 - 0
lang/graphql/parser/node.go

@@ -0,0 +1,126 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"bytes"
+	"fmt"
+
+	"devt.de/krotik/common/stringutil"
+)
+
+/*
+ASTNode models a node in the AST
+*/
+type ASTNode struct {
+	Name     string     // Name of the node
+	Token    *LexToken  // Lexer token of this ASTNode
+	Children []*ASTNode // Child nodes
+	Runtime  Runtime    // Runtime component for this ASTNode
+
+	binding        int                                                             // Binding power of this node
+	nullDenotation func(p *parser, self *ASTNode) (*ASTNode, error)                // Configure token as beginning node
+	leftDenotation func(p *parser, self *ASTNode, left *ASTNode) (*ASTNode, error) // Configure token as left node
+}
+
+/*
+newAstNode creates an instance of this ASTNode which is connected to a concrete lexer token.
+*/
+func newAstNode(name string, p *parser, t *LexToken) *ASTNode {
+	ret := &ASTNode{name, t, make([]*ASTNode, 0, 2), nil, 0, nil, nil}
+	if p.rp != nil {
+		ret.Runtime = p.rp.Runtime(ret)
+	}
+	return ret
+}
+
+/*
+changeAstNode changes the name of a given ASTNode.
+*/
+func changeAstNode(node *ASTNode, newname string, p *parser) *ASTNode {
+	node.Name = newname
+	node.Runtime = nil
+	if p.rp != nil {
+		node.Runtime = p.rp.Runtime(node)
+	}
+	return node
+}
+
+/*
+instane creates a new instance of this ASTNode which is connected to a concrete lexer token.
+*/
+func (n *ASTNode) instance(p *parser, t *LexToken) *ASTNode {
+	ret := &ASTNode{n.Name, t, make([]*ASTNode, 0, 2), nil, n.binding, n.nullDenotation, n.leftDenotation}
+	if p.rp != nil {
+		ret.Runtime = p.rp.Runtime(ret)
+	}
+	return ret
+}
+
+/*
+Plain returns this ASTNode and all its children as plain AST. A plain AST
+only contains map objects, lists and primitive types which can be serialized
+with JSON.
+*/
+func (n *ASTNode) Plain() map[string]interface{} {
+	ret := make(map[string]interface{})
+
+	ret["name"] = n.Name
+
+	lenChildren := len(n.Children)
+
+	if lenChildren > 0 {
+		children := make([]map[string]interface{}, lenChildren)
+		for i, child := range n.Children {
+			children[i] = child.Plain()
+		}
+
+		ret["children"] = children
+	}
+
+	if stringutil.IndexOf(n.Name, ValueNodes) != -1 {
+		ret["value"] = n.Token.Val
+	}
+
+	return ret
+}
+
+/*
+String returns a string representation of this token.
+*/
+func (n *ASTNode) String() string {
+	var buf bytes.Buffer
+	n.levelString(0, &buf)
+	return buf.String()
+}
+
+/*
+levelString function to recursively print the tree.
+*/
+func (n *ASTNode) levelString(indent int, buf *bytes.Buffer) {
+
+	// Print current level
+
+	buf.WriteString(stringutil.GenerateRollingString(" ", indent*2))
+
+	if stringutil.IndexOf(n.Name, ValueNodes) != -1 {
+		buf.WriteString(fmt.Sprintf(n.Name+": %v", n.Token.Val))
+	} else {
+		buf.WriteString(n.Name)
+	}
+
+	buf.WriteString("\n")
+
+	// Print children
+
+	for _, child := range n.Children {
+		child.levelString(indent+1, buf)
+	}
+}

+ 830 - 0
lang/graphql/parser/parser.go

@@ -0,0 +1,830 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"fmt"
+
+	"devt.de/krotik/common/errorutil"
+)
+
+// Parser Rules
+// ============
+
+/*
+Maps of AST nodes corresponding to lexer tokens
+*/
+var astNodeMapValues map[string]*ASTNode
+var astNodeMapTokens map[LexTokenID]*ASTNode
+var astNodeMapIgnoredValues map[string]*ASTNode
+
+func init() {
+	astNodeMapValues = map[string]*ASTNode{
+		"query":        {NodeOperationDefinition, nil, nil, nil, 0, ndOperationDefinition, nil},
+		"mutation":     {NodeOperationDefinition, nil, nil, nil, 0, ndOperationDefinition, nil},
+		"subscription": {NodeOperationDefinition, nil, nil, nil, 0, ndOperationDefinition, nil},
+		"fragment":     {NodeFragmentDefinition, nil, nil, nil, 0, ndFragmentDefinition, nil},
+		"{":            {NodeSelectionSet, nil, nil, nil, 0, ndSelectionSet, nil},
+		"(":            {NodeArguments, nil, nil, nil, 0, ndArgsOrVarDef, nil},
+		"@":            {NodeDirectives, nil, nil, nil, 0, ndDirectives, nil},
+		"$":            {NodeVariable, nil, nil, nil, 0, ndVariable, nil},
+		"...":          {NodeFragmentSpread, nil, nil, nil, 0, ndFragmentSpread, nil},
+		"[":            {NodeListValue, nil, nil, nil, 0, ndListValue, nil},
+
+		// Tokens which are not part of the AST (can be retrieved by next but not be inserted by run)
+
+		"}": {"", nil, nil, nil, 0, nil, nil},
+		":": {"", nil, nil, nil, 0, nil, nil},
+		")": {"", nil, nil, nil, 0, nil, nil},
+		"=": {"", nil, nil, nil, 0, nil, nil},
+		"]": {"", nil, nil, nil, 0, nil, nil},
+	}
+	astNodeMapTokens = map[LexTokenID]*ASTNode{
+		TokenName:        {NodeName, nil, nil, nil, 0, ndTerm, nil},
+		TokenIntValue:    {NodeValue, nil, nil, nil, 0, ndTerm, nil},
+		TokenStringValue: {NodeValue, nil, nil, nil, 0, ndTerm, nil},
+		TokenFloatValue:  {NodeValue, nil, nil, nil, 0, ndTerm, nil},
+		TokenEOF:         {NodeEOF, nil, nil, nil, 0, ndTerm, nil},
+	}
+}
+
+// Parser
+// ======
+
+/*
+Parser data structure
+*/
+type parser struct {
+	name   string          // Name to identify the input
+	node   *ASTNode        // Current ast node
+	tokens chan LexToken   // Channel which contains lex tokens
+	rp     RuntimeProvider // Runtime provider which creates runtime components
+
+	// Flags
+
+	isVarDef bool // The next Arguments block is parsed as a VariableDefinition
+	isValue  bool // The next expression is parsed as a value
+}
+
+/*
+Parse parses a given input string and returns an AST.
+*/
+func Parse(name string, input string) (*ASTNode, error) {
+	return ParseWithRuntime(name, input, nil)
+}
+
+/*
+ParseWithRuntime parses a given input string and returns an AST decorated with
+runtime components.
+*/
+func ParseWithRuntime(name string, input string, rp RuntimeProvider) (*ASTNode, error) {
+	p := &parser{name, nil, Lex(name, input), rp, false, false}
+
+	node, err := p.next()
+
+	if err != nil {
+		return nil, err
+	}
+
+	p.node = node
+
+	doc := newAstNode(NodeDocument, p, node.Token)
+
+	for err == nil && p.node.Name != NodeEOF {
+
+		if node, err = p.run(0); err == nil {
+
+			if node != nil && node.Name == NodeSelectionSet {
+
+				// Handle query shorthand
+
+				if len(doc.Children) == 0 {
+					ed := newAstNode(NodeExecutableDefinition, p, node.Token)
+					doc.Children = append(doc.Children, ed)
+					od := newAstNode(NodeOperationDefinition, p, node.Token)
+					ed.Children = append(ed.Children, od)
+					od.Children = append(od.Children, node)
+
+				} else {
+
+					return nil, p.newParserError(ErrMultipleShorthand,
+						node.Token.String(), *node.Token)
+				}
+			} else {
+
+				ed := newAstNode(NodeExecutableDefinition, p, node.Token)
+				doc.Children = append(doc.Children, ed)
+				ed.Children = append(ed.Children, node)
+			}
+		}
+	}
+
+	if err == nil {
+		return doc, nil
+	}
+
+	return nil, err
+}
+
+/*
+run is the main parser function.
+*/
+func (p *parser) run(rightBinding int) (*ASTNode, error) {
+	var err error
+	var left *ASTNode
+
+	n := p.node
+
+	// Get the next ASTNode
+
+	if p.node, err = p.next(); err == nil {
+
+		// All nodes have a null denotation
+
+		if n.nullDenotation == nil {
+			return nil, p.newParserError(ErrImpossibleNullDenotation, p.node.Token.Val, *p.node.Token)
+		}
+
+		left, err = n.nullDenotation(p, n)
+	}
+
+	if err != nil {
+		return nil, err
+	}
+
+	// At this point we would normally collect left denotations but this
+	// parser has only null denotations
+
+	errorutil.AssertTrue(rightBinding == p.node.binding, "Unexpected right binding")
+
+	return left, nil
+}
+
+/*
+next retrieves the next lexer token and return it as ASTNode.
+*/
+func (p *parser) next() (*ASTNode, error) {
+
+	token, more := <-p.tokens
+
+	if !more {
+
+		// Unexpected end of input - the associated token is an empty error token
+
+		return nil, p.newParserError(ErrUnexpectedEnd, "", token)
+
+	} else if token.ID == TokenError {
+
+		// There was a lexer error wrap it in a parser error
+
+		return nil, p.newParserError(ErrLexicalError, token.Val, token)
+
+	} else if node, ok := astNodeMapValues[token.Val]; ok &&
+		(!p.isValue || token.ID == TokenPunctuator) && token.ID != TokenStringValue {
+
+		// Parse complex expressions unless we parse a value (then just deal with punctuators)
+
+		return node.instance(p, &token), nil
+
+	} else if node, ok := astNodeMapTokens[token.ID]; ok {
+
+		return node.instance(p, &token), nil
+	}
+
+	return nil, p.newParserError(ErrUnknownToken, fmt.Sprintf("id:%v (%v)", token.ID, token), token)
+}
+
+// Null denotation functions
+// =========================
+
+/*
+ndTerm is used for terminals.
+*/
+func ndTerm(p *parser, self *ASTNode) (*ASTNode, error) {
+	return self, nil
+}
+
+/*
+ndVariable is used for variables.  (@spec 2.10)
+*/
+func ndVariable(p *parser, self *ASTNode) (*ASTNode, error) {
+	var err error
+
+	if p.node.Token.ID == TokenName {
+
+		// Append the query name and move on
+
+		self.Token = p.node.Token
+		p.node, err = p.next()
+	}
+
+	return self, err
+}
+
+/*
+ndListValue parses a list value. (@spec 2.9.7)
+*/
+func ndListValue(p *parser, self *ASTNode) (*ASTNode, error) {
+	var current *ASTNode
+	var err error
+
+	for p.node.Token.ID != TokenEOF && p.node.Token.Val != "]" {
+
+		// Parse list values
+
+		if current, err = parseValue(p); err == nil {
+			self.Children = append(self.Children, current)
+		} else {
+			return nil, err
+		}
+	}
+
+	return self, skipToken(p, "]")
+}
+
+/*
+ndInputObject parses an input object literal. (@spec 2.9.8)
+*/
+func ndInputObject(p *parser, self *ASTNode) (*ASTNode, error) {
+	var current *ASTNode
+	var err error
+
+	changeAstNode(self, NodeObjectValue, p)
+
+	for p.node.Token.ID != TokenEOF && p.node.Token.Val != "}" {
+
+		if current, err = p.run(0); err == nil {
+
+			if current.Name != NodeName {
+
+				err = p.newParserError(ErrNameExpected,
+					current.Token.String(), *current.Token)
+
+			} else {
+
+				of := newAstNode(NodeObjectField, p, current.Token)
+				self.Children = append(self.Children, of)
+
+				if err = skipToken(p, ":"); err == nil {
+
+					// Parse object value
+
+					if current, err = parseValue(p); err == nil {
+						of.Children = append(of.Children, current)
+					}
+				}
+			}
+		}
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return self, skipToken(p, "}")
+}
+
+/*
+ndFragmentSpread is used for fragment spreads and inline fragments.
+(@spec 2.8, 2.8.2)
+*/
+func ndFragmentSpread(p *parser, self *ASTNode) (*ASTNode, error) {
+	var current, expectedNameNode *ASTNode
+	var err error
+
+	if p.node.Token.Val == "on" {
+
+		// We might have an inline fragment
+
+		onToken := p.node.Token
+		p.node, err = p.next()
+
+		if err == nil && p.node.Name == NodeName {
+
+			// Append the fragment name
+
+			changeAstNode(p.node, NodeTypeCondition, p)
+			self.Children = append(self.Children, p.node)
+			p.node, err = p.next()
+
+		} else {
+
+			self.Token = onToken
+		}
+
+	} else if p.node.Token.ID == TokenName {
+
+		// Append the query name and move on
+
+		self.Token = p.node.Token
+		p.node, err = p.next()
+
+	} else {
+
+		expectedNameNode = p.node
+	}
+
+	if err == nil && p.node.Token.Val == "@" {
+
+		// Parse directives
+
+		if current, err = p.run(0); err == nil {
+			self.Children = append(self.Children, current)
+		}
+	}
+
+	if err == nil && p.node.Token.Val == "{" {
+
+		// Parse selection set
+
+		if current, err = p.run(0); err == nil {
+			self.Children = append(self.Children, current)
+
+			// If there is a selection set we must have an inline fragment
+
+			changeAstNode(self, NodeInlineFragment, p)
+		}
+
+	} else if err == nil && expectedNameNode != nil {
+
+		// Using the fragment spread operatior without specifying a name nor
+		// a selection set is an error
+
+		err = p.newParserError(ErrNameExpected,
+			expectedNameNode.Token.String(), *expectedNameNode.Token)
+	}
+
+	return self, err
+}
+
+/*
+ndOperationDefinition parses an operation definition. Each operation is
+represented by an optional operation name and a selection set. (@spec 2.3)
+*/
+func ndOperationDefinition(p *parser, self *ASTNode) (*ASTNode, error) {
+	var err error
+	var current = p.node
+
+	ot := newAstNode(NodeOperationType, p, self.Token)
+	self.Children = append(self.Children, ot)
+
+	if p.node.Token.ID == TokenName {
+
+		// Append the query name and move on
+
+		self.Children = append(self.Children, p.node)
+		p.node, err = p.next()
+	}
+
+	if err == nil && p.node.Token.Val == "(" {
+
+		// Parse variable definition
+
+		p.isVarDef = true
+
+		if current, err = p.run(0); err == nil {
+			self.Children = append(self.Children, current)
+		}
+
+		p.isVarDef = false
+	}
+
+	if err == nil && p.node.Token.Val == "@" {
+
+		// Parse directive
+
+		if current, err = p.run(0); err == nil {
+			self.Children = append(self.Children, current)
+		}
+	}
+
+	if err == nil && p.node.Token.Val == "{" {
+
+		// Parse selection set
+
+		if current, err = p.run(0); err == nil {
+			self.Children = append(self.Children, current)
+		}
+
+	} else if err == nil {
+
+		// Selection Set is mandatory
+
+		err = p.newParserError(ErrSelectionSetExpected,
+			current.Token.String(), *current.Token)
+	}
+
+	return self, err
+}
+
+/*
+ndFragmentDefinition parses a fragment definition. Each fragment is
+represented by an optional fragment name, a type condition and a selection set.
+(@spec 2.8)
+*/
+func ndFragmentDefinition(p *parser, self *ASTNode) (*ASTNode, error) {
+	var err error
+	var current = p.node
+
+	if p.node.Token.ID == TokenName {
+
+		// Append the fragment name and move on
+
+		changeAstNode(p.node, NodeFragmentName, p)
+		self.Children = append(self.Children, p.node)
+
+		p.node, err = p.next()
+
+	} else {
+
+		err = p.newParserError(ErrNameExpected,
+			p.node.Token.String(), *p.node.Token)
+	}
+
+	if err == nil {
+
+		if p.node.Token.Val != "on" {
+
+			// Type conditions must start with on
+
+			err = p.newParserError(ErrOnExpected,
+				p.node.Token.String(), *p.node.Token)
+
+		} else {
+			p.node, err = p.next()
+
+			if p.node.Token.ID == TokenName {
+
+				// Append the fragment name
+
+				changeAstNode(p.node, NodeTypeCondition, p)
+				self.Children = append(self.Children, p.node)
+				p.node, err = p.next()
+
+			} else {
+
+				err = p.newParserError(ErrNameExpected,
+					p.node.Token.String(), *p.node.Token)
+			}
+		}
+	}
+
+	if err == nil && p.node.Token.Val == "@" {
+
+		// Parse directive
+
+		if current, err = p.run(0); err == nil {
+			self.Children = append(self.Children, current)
+		}
+	}
+
+	if err == nil && p.node.Token.Val == "{" {
+
+		// Parse selection set
+
+		if current, err = p.run(0); err == nil {
+			self.Children = append(self.Children, current)
+		}
+
+	} else if err == nil {
+
+		// Selection Set is mandatory
+
+		err = p.newParserError(ErrSelectionSetExpected,
+			p.node.Token.String(), *p.node.Token)
+	}
+
+	return self, err
+}
+
+/*
+ndSelectionSet parses a selection set. An operation selects the set of
+information it needs. (@spec 2.4)
+*/
+func ndSelectionSet(p *parser, self *ASTNode) (*ASTNode, error) {
+	var current *ASTNode
+	var err error
+
+	// Special case if we are parsing an input object literal (@spec 2.9.8)
+
+	if p.isValue {
+		return ndInputObject(p, self)
+	}
+
+	for p.node.Token.ID != TokenEOF && p.node.Token.Val != "}" {
+
+		if p.node.Token.Val == "..." {
+
+			// Add a simple fragment spread
+
+			if current, err = p.run(0); err == nil {
+				self.Children = append(self.Children, current)
+			}
+
+		} else {
+
+			err = acceptFieldExpression(p, self)
+		}
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return self, skipToken(p, "}")
+}
+
+/*
+acceptFieldExpression parses a field expression. (@spec 2.5, 2.6, 2.7)
+*/
+func acceptFieldExpression(p *parser, self *ASTNode) error {
+	var err error
+
+	// Field node gets the first token in the field expression
+
+	fe := newAstNode(NodeField, p, p.node.Token)
+	self.Children = append(self.Children, fe)
+
+	current := p.node
+
+	if p.node, err = p.next(); err == nil && p.node.Name != NodeEOF {
+
+		if p.node.Token.Val == ":" {
+
+			// Last node was an Alias not a name
+
+			changeAstNode(current, NodeAlias, p)
+
+			// Append Alias to Field children and move on
+
+			fe.Children = append(fe.Children, current)
+
+			if p.node, err = p.next(); err == nil && p.node.Name != NodeEOF {
+				current = p.node
+				p.node, err = p.next()
+			}
+		}
+
+		if err == nil && p.node.Name != NodeEOF {
+
+			// Next node must be a Name
+
+			if current.Name == NodeName {
+
+				// Append Name to Field children and move on
+
+				fe.Children = append(fe.Children, current)
+
+			} else {
+
+				err = p.newParserError(ErrNameExpected,
+					current.Token.String(), *current.Token)
+			}
+
+			if err == nil && p.node.Token.Val == "(" {
+
+				// Parse arguments
+
+				if current, err = p.run(0); err == nil {
+					fe.Children = append(fe.Children, current)
+				}
+			}
+
+			if err == nil && p.node.Token.Val == "@" {
+
+				// Parse directives
+
+				if current, err = p.run(0); err == nil {
+					fe.Children = append(fe.Children, current)
+				}
+			}
+
+			if err == nil && p.node.Token.Val == "{" {
+
+				// Parse nested selection set
+
+				if current, err = p.run(0); err == nil {
+					fe.Children = append(fe.Children, current)
+				}
+			}
+		}
+	}
+
+	return err
+}
+
+/*
+ndArgsOrVarDef parses an argument or variable definition expression. (@spec 2.6, 2.10)
+*/
+func ndArgsOrVarDef(p *parser, self *ASTNode) (*ASTNode, error) {
+	var err error
+	var args, arg, current *ASTNode
+
+	// Create a list token
+
+	if p.isVarDef {
+		args = newAstNode(NodeVariableDefinitions, p, p.node.Token)
+	} else {
+		args = newAstNode(NodeArguments, p, p.node.Token)
+	}
+
+	for err == nil && p.node.Token.ID != TokenEOF && p.node.Token.Val != ")" {
+
+		if p.isVarDef {
+			arg = newAstNode(NodeVariableDefinition, p, p.node.Token)
+		} else {
+			arg = newAstNode(NodeArgument, p, p.node.Token)
+		}
+
+		args.Children = append(args.Children, arg)
+
+		if current, err = p.run(0); err == nil {
+
+			if !p.isVarDef && current.Name != NodeName {
+				err = p.newParserError(ErrNameExpected,
+					current.Token.String(), *current.Token)
+
+			} else if p.isVarDef && current.Name != NodeVariable {
+				err = p.newParserError(ErrVariableExpected,
+					current.Token.String(), *current.Token)
+
+			} else {
+
+				// Add name
+
+				arg.Children = append(arg.Children, current)
+
+				if err = skipToken(p, ":"); err == nil {
+
+					// Add value
+
+					if p.isVarDef {
+						if current, err = p.run(0); err == nil {
+							changeAstNode(current, NodeType, p)
+							arg.Children = append(arg.Children, current)
+						}
+					} else {
+						if current, err = parseValue(p); err == nil {
+							arg.Children = append(arg.Children, current)
+						}
+					}
+
+					if err == nil && p.isVarDef && p.node.Token.Val == "=" {
+
+						skipToken(p, "=")
+
+						// Parse default value
+
+						if current, err = parseValue(p); err == nil {
+							changeAstNode(current, NodeDefaultValue, p)
+							arg.Children = append(arg.Children, current)
+						}
+					}
+
+				}
+			}
+		}
+	}
+
+	// Must have a closing bracket
+
+	if err == nil {
+		return args, skipToken(p, ")")
+	}
+
+	return nil, err
+}
+
+/*
+parseValue parses a value and returns the result. (@spec 2.9)
+*/
+func parseValue(p *parser) (*ASTNode, error) {
+	p.isValue = true
+	current, err := p.run(0)
+	p.isValue = false
+
+	if err == nil {
+
+		if current.Token.Val == "true" ||
+			current.Token.Val == "false" ||
+			current.Token.Val == "null" ||
+			current.Token.ID == TokenIntValue ||
+			current.Token.ID == TokenFloatValue ||
+			current.Token.ID == TokenStringValue {
+
+			// Simple constant values
+
+			changeAstNode(current, NodeValue, p)
+
+		} else if current.Name == NodeName {
+
+			// Enum values
+
+			changeAstNode(current, NodeEnumValue, p)
+
+		} else {
+
+			// Everything else must be a variable or a complex data type
+
+			errorutil.AssertTrue(current.Name == NodeVariable ||
+				current.Name == NodeListValue ||
+				current.Name == NodeObjectValue, fmt.Sprint("Unexpected value node:", current))
+		}
+
+		if err == nil {
+			return current, err
+		}
+	}
+
+	return nil, err
+}
+
+/*
+ndDirectives parses a directive expression. (@spec 2.12)
+*/
+func ndDirectives(p *parser, self *ASTNode) (*ASTNode, error) {
+	var err error
+	var current = p.node
+
+	dir := newAstNode(NodeDirective, p, p.node.Token)
+
+	if err = acceptChild(p, dir, TokenName); err == nil {
+
+		if current, err = p.run(0); err == nil {
+
+			dir.Children = append(dir.Children, current)
+			self.Children = append(self.Children, dir)
+
+			if p.node.Token.Val == "@" {
+
+				if p.node, err = p.next(); err == nil {
+					return ndDirectives(p, self)
+				}
+			}
+		}
+	}
+
+	return self, err
+}
+
+// Helper functions
+// ================
+
+/*
+skipToken skips over a token if it has one of the given valid values.
+*/
+func skipToken(p *parser, validValues ...string) error {
+	var err error
+
+	canSkip := func(val string) bool {
+		for _, i := range validValues {
+			if i == val {
+				return true
+			}
+		}
+		return false
+	}
+
+	if !canSkip(p.node.Token.Val) {
+
+		if p.node.Token.ID == TokenEOF {
+			return p.newParserError(ErrUnexpectedEnd, "", *p.node.Token)
+		}
+
+		return p.newParserError(ErrUnexpectedToken, p.node.Token.Val, *p.node.Token)
+	}
+
+	// This should never return an error unless we skip over EOF or complex tokens
+	// like values
+
+	p.node, err = p.next()
+
+	return err
+}
+
+/*
+acceptChild accepts the current token as a child.
+*/
+func acceptChild(p *parser, self *ASTNode, id LexTokenID) error {
+	var err error
+
+	current := p.node
+
+	if p.node, err = p.next(); err == nil {
+
+		if current.Token.ID == id {
+			self.Children = append(self.Children, current)
+		} else {
+			err = p.newParserError(ErrUnexpectedToken, current.Token.Val, *current.Token)
+		}
+	}
+
+	return err
+}

File diff suppressed because it is too large
+ 1236 - 0
lang/graphql/parser/parser_test.go


+ 66 - 0
lang/graphql/parser/parsererrors.go

@@ -0,0 +1,66 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+import (
+	"errors"
+	"fmt"
+)
+
+/*
+newParserError creates a new ParserError object.
+*/
+func (p *parser) newParserError(t error, d string, token LexToken) error {
+	return &Error{p.name, t, d, token.Lline, token.Lpos}
+}
+
+/*
+Error models a parser related error
+*/
+type Error struct {
+	Source string // Name of the source which was given to the parser
+	Type   error  // Error type (to be used for equal checks)
+	Detail string // Details of this error
+	Line   int    // Line of the error
+	Pos    int    // Position of the error
+}
+
+/*
+Error returns a human-readable string representation of this error.
+*/
+func (pe *Error) Error() string {
+	var ret string
+
+	if pe.Detail != "" {
+		ret = fmt.Sprintf("Parse error in %s: %v (%v)", pe.Source, pe.Type, pe.Detail)
+	} else {
+		ret = fmt.Sprintf("Parse error in %s: %v", pe.Source, pe.Type)
+	}
+
+	return fmt.Sprintf("%s (Line:%d Pos:%d)", ret, pe.Line, pe.Pos)
+}
+
+/*
+Parser related error types
+*/
+var (
+	ErrImpossibleLeftDenotation = errors.New("Term can only start an expression")
+	ErrImpossibleNullDenotation = errors.New("Term cannot start an expression")
+	ErrLexicalError             = errors.New("Lexical error")
+	ErrNameExpected             = errors.New("Name expected")
+	ErrOnExpected               = errors.New("Type condition starting with 'on' expected")
+	ErrSelectionSetExpected     = errors.New("Selection Set expected")
+	ErrMultipleShorthand        = errors.New("Query shorthand only allowed for one query operation")
+	ErrUnexpectedEnd            = errors.New("Unexpected end")
+	ErrUnexpectedToken          = errors.New("Unexpected term")
+	ErrUnknownToken             = errors.New("Unknown term")
+	ErrValueOrVariableExpected  = errors.New("Value or variable expected")
+	ErrVariableExpected         = errors.New("Variable expected")
+)

+ 37 - 0
lang/graphql/parser/runtime.go

@@ -0,0 +1,37 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package parser
+
+/*
+RuntimeProvider provides runtime components for a parse tree.
+*/
+type RuntimeProvider interface {
+
+	/*
+	   Runtime returns a runtime component for a given ASTNode.
+	*/
+	Runtime(node *ASTNode) Runtime
+}
+
+/*
+Runtime provides the runtime for an ASTNode.
+*/
+type Runtime interface {
+
+	/*
+	   Validate this runtime component and all its child components.
+	*/
+	Validate() error
+
+	/*
+		Eval evaluate this runtime component.
+	*/
+	Eval() (map[string]interface{}, error)
+}

+ 227 - 0
lockutil/lockfile.go

@@ -0,0 +1,227 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package lockutil contains a file based lock which can be used to lock file resources
+across different processes. The lock file is monitored by a Go routine. Invalidating
+the lock file (e.g. just writing a single character to it) causes the Go routine
+to exit. A client can check if the lockfile is still being monitored by calling
+WatcherRunning().
+*/
+package lockutil
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"time"
+)
+
+/*
+LockFile data structure
+*/
+type LockFile struct {
+	filename  string        // Filename for LockFile
+	timestamp int64         // Timestamp to uniquely indentify the lockfile
+	interval  time.Duration // Interval with which the file should be watched
+	errorChan chan error    // Error communication channel with watcher goroutine
+	running   bool          // Flag to indicate that a lockfile is being watched
+}
+
+/*
+NewLockFile creates a new LockFile which and watch it in given intervals.
+*/
+func NewLockFile(filename string, interval time.Duration) *LockFile {
+	return &LockFile{filename, time.Now().UnixNano(), interval, nil, false}
+}
+
+/*
+watch is the internal watcher goroutine function.
+*/
+func (lf *LockFile) watch() {
+
+	// Attempt to read the lockfile - no error checking since the next write
+	// lockfile call will catch any file related errors
+
+	res, _ := lf.checkLockfile()
+
+	if err := lf.writeLockfile(); err != nil {
+		lf.errorChan <- err
+		return
+	}
+
+	if res != 0 {
+
+		time.Sleep(lf.interval * 10)
+
+		// If we have overwritten an existing timestamp then check
+		// if it was overwritten again by another process after some time
+
+		res, err := lf.checkLockfile()
+
+		if res != lf.timestamp || err != nil {
+
+			lf.errorChan <- errors.New(fmt.Sprint(
+				"Could not write lockfile - read result after writing: ", res,
+				"(expected: ", lf.timestamp, ")", err))
+			return
+		}
+	}
+
+	// Signal that all is well
+
+	lf.running = true
+	lf.errorChan <- nil
+
+	for lf.running {
+
+		// Wakeup every interval and read the file
+
+		time.Sleep(lf.interval)
+
+		res, err := lf.checkLockfile()
+		if err != nil {
+
+			// Shut down if we get an error back
+
+			lf.running = false
+			lf.errorChan <- err
+
+			return
+		}
+
+		if res != lf.timestamp {
+
+			// Attempt to write the timestamp again - no error checking
+			// if it fails we'll try again next time
+
+			lf.writeLockfile()
+		}
+	}
+
+	// At this point lf.running is false - remove lockfile and return
+
+	lf.errorChan <- os.Remove(lf.filename)
+}
+
+/*
+Write a timestamp to the lockfile
+*/
+func (lf *LockFile) writeLockfile() error {
+	file, err := os.OpenFile(lf.filename, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+
+	data := make([]byte, 8)
+
+	data[0] = byte(lf.timestamp >> 56)
+	data[1] = byte(lf.timestamp >> 48)
+	data[2] = byte(lf.timestamp >> 40)
+	data[3] = byte(lf.timestamp >> 32)
+	data[4] = byte(lf.timestamp >> 24)
+	data[5] = byte(lf.timestamp >> 16)
+	data[6] = byte(lf.timestamp >> 8)
+	data[7] = byte(lf.timestamp >> 0)
+
+	_, err = file.Write(data)
+
+	return err
+}
+
+/*
+Try to read a timestamp from a lockfile
+*/
+func (lf *LockFile) checkLockfile() (int64, error) {
+	file, err := os.OpenFile(lf.filename, os.O_RDONLY, 0660)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return 0, nil
+		}
+		return 0, err
+	}
+	defer file.Close()
+
+	// Read timestamp
+	timestamp := make([]byte, 8)
+	i, err := file.Read(timestamp)
+
+	if i != 8 {
+		return 0, errors.New(fmt.Sprint("Unexpected timestamp value found in lockfile:", timestamp))
+	}
+
+	return (int64(timestamp[0]) << 56) |
+		(int64(timestamp[1]) << 48) |
+		(int64(timestamp[2]) << 40) |
+		(int64(timestamp[3]) << 32) |
+		(int64(timestamp[4]) << 24) |
+		(int64(timestamp[5]) << 16) |
+		(int64(timestamp[6]) << 8) |
+		(int64(timestamp[7]) << 0), err
+}
+
+/*
+Start creates the lockfile and starts watching it.
+*/
+func (lf *LockFile) Start() error {
+
+	// Do nothing if the lockfile is already being watched
+
+	if lf.running {
+		return nil
+	}
+
+	// Set the running flag and kick off the watcher goroutine
+
+	lf.errorChan = make(chan error)
+
+	go lf.watch()
+
+	return <-lf.errorChan
+}
+
+/*
+WatcherRunning returns if the watcher goroutine is running.
+*/
+func (lf *LockFile) WatcherRunning() bool {
+	return lf.running
+}
+
+/*
+Finish watching a lockfile and return once the watcher goroutine has finished.
+*/
+func (lf *LockFile) Finish() error {
+	var err error
+
+	// Do nothing if the lockfile is not being watched
+
+	if !lf.running {
+
+		// Clean up if there is a channel still open
+
+		if lf.errorChan != nil {
+			err = <-lf.errorChan
+			lf.errorChan = nil
+		}
+
+		return err
+	}
+
+	// Signale the watcher goroutine to stop
+
+	lf.running = false
+
+	// Wait for the goroutine to finish
+
+	err = <-lf.errorChan
+	lf.errorChan = nil
+
+	return err
+}

+ 139 - 0
lockutil/lockfile_test.go

@@ -0,0 +1,139 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package lockutil
+
+import (
+	"flag"
+	"fmt"
+	"os"
+	"testing"
+	"time"
+
+	"devt.de/krotik/common/fileutil"
+)
+
+const lfdir = "lockfiletest"
+
+const invalidFileName = "**" + string(0x0)
+
+func TestMain(m *testing.M) {
+	flag.Parse()
+
+	// Setup
+	if res, _ := fileutil.PathExists(lfdir); res {
+		os.RemoveAll(lfdir)
+	}
+
+	err := os.Mkdir(lfdir, 0770)
+	if err != nil {
+		fmt.Print("Could not create test directory:", err.Error())
+		os.Exit(1)
+	}
+
+	// Run the tests
+	res := m.Run()
+
+	// Teardown
+	err = os.RemoveAll(lfdir)
+	if err != nil {
+		fmt.Print("Could not remove test directory:", err.Error())
+	}
+
+	os.Exit(res)
+}
+
+func TestLockFile(t *testing.T) {
+
+	duration := time.Duration(3) * time.Millisecond
+
+	// Straight case
+
+	lf := NewLockFile(lfdir+"/test1.lck", duration)
+
+	if err := lf.Start(); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if err := lf.Finish(); err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Simulate 2 process opening the same lockfile
+
+	lf1 := &LockFile{lfdir + "/test2.lck", 1, duration, nil, false}
+	if err := lf1.Start(); err != nil {
+		t.Error(err)
+		return
+	}
+
+	lf2 := &LockFile{lfdir + "/test2.lck", 2, duration, nil, false}
+	if err := lf2.Start(); err == nil {
+		t.Error("Unexpected result while starting lockfile watch:", err)
+		return
+	}
+
+	if err := lf1.Finish(); err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Test error cases
+
+	lf3 := &LockFile{lfdir + "/" + invalidFileName, 1, duration, nil, false}
+	if err := lf3.Start(); err == nil {
+		t.Error("Unexpected result while starting lockfile watch:", err)
+		return
+	}
+
+	lf = &LockFile{lfdir + "/test3.lck", 1, duration, nil, false}
+	if err := lf.Start(); err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Calling start twice should have no effect
+
+	if err := lf.Start(); err != nil {
+		t.Error(err)
+		return
+	}
+
+	lf.filename = lfdir + "/" + invalidFileName
+
+	for lf.WatcherRunning() {
+		time.Sleep(lf.interval * 2)
+	}
+
+	if lf.WatcherRunning() {
+		t.Error("Watcher is still running")
+		return
+	}
+
+	if err := lf.Finish(); err == nil {
+		t.Error("Unexpected finish result")
+		return
+	}
+
+	file, err := os.OpenFile(lfdir+"/test4.lck", os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+	file.Write(make([]byte, 3))
+	file.Close()
+
+	lf = &LockFile{lfdir + "/test4.lck", 1, duration, nil, false}
+	if _, err := lf.checkLockfile(); err == nil || err.Error() != "Unexpected timestamp value found in lockfile:[0 0 0 0 0 0 0 0]" {
+		t.Error("Unexpected checkLockfile result:", err)
+		return
+	}
+}

+ 111 - 0
logutil/formatter.go

@@ -0,0 +1,111 @@
+package logutil
+
+import (
+	"fmt"
+	"strings"
+
+	"devt.de/krotik/common/testutil"
+	"devt.de/krotik/common/timeutil"
+)
+
+/*
+Formatter is used to format log messages.
+*/
+type Formatter interface {
+
+	/*
+	   Format formats a given log message into a string.
+	*/
+	Format(level Level, scope string, msg ...interface{}) string
+}
+
+/*
+ConsoleFormatter returns a simple formatter which does a simple fmt.Sprintln
+on all log messages. It only adds the log level.
+*/
+func ConsoleFormatter() Formatter {
+	return &consoleFormatter{}
+}
+
+/*
+consoleFormatter is the console formatter implementation.
+*/
+type consoleFormatter struct {
+}
+
+/*
+Format formats a given log message into a string.
+*/
+func (sf *consoleFormatter) Format(level Level, scope string, msg ...interface{}) string {
+	return fmt.Sprintln(fmt.Sprintf("%v:", level), fmt.Sprint(msg...))
+}
+
+/*
+SimpleFormatter returns a simple formatter which does a simple fmt.Sprintln
+on all log messages. It also adds a current timestamp, the message scope and
+log level.
+*/
+func SimpleFormatter() Formatter {
+	return &simpleFormatter{timeutil.MakeTimestamp}
+}
+
+/*
+simpleFormatter is the simple formatter implementation.
+*/
+type simpleFormatter struct {
+	tsFunc func() string // Timestamp function
+}
+
+/*
+Format formats a given log message into a string.
+*/
+func (sf *simpleFormatter) Format(level Level, scope string, msg ...interface{}) string {
+	if scope == "" {
+		return fmt.Sprintln(sf.tsFunc(), level, fmt.Sprint(msg...))
+	}
+
+	return fmt.Sprintln(sf.tsFunc(), level, scope, fmt.Sprint(msg...))
+}
+
+/*
+TemplateFormatter returns a formatter which produces log messages according to
+a given template string. The template string may contain one or more of the
+following directives:
+
+%s         The scope of the log message
+%l         The level of the log message
+%t         Current timestamp (milliseconds elapsed since January 1, 1970 UTC)
+%f         Function in which the log message was issued e.g. foo.bar.MyFunc()
+%c         Code location of the log statement which issuing the log message e.g. package/somefile.go:12
+%m         The log message and its arguments formatted with fmt.Sprintf()
+*/
+func TemplateFormatter(template string) Formatter {
+	return &templateFormatter{template, timeutil.MakeTimestamp}
+}
+
+/*
+templateFormatter is the template formatter implementation.
+*/
+type templateFormatter struct {
+	template string        // Template for a log message
+	tsFunc   func() string // Timestamp function
+}
+
+/*
+Format formats a given log message into a string.
+*/
+func (sf *templateFormatter) Format(level Level, scope string, msg ...interface{}) string {
+
+	name, loc := testutil.GetCaller(2)
+
+	out := sf.template
+
+	out = strings.Replace(out, "%s", scope, -1)
+	out = strings.Replace(out, "%l", fmt.Sprint(level), -1)
+	out = strings.Replace(out, "%t", sf.tsFunc(), -1)
+	out = strings.Replace(out, "%f", name, -1)
+	out = strings.Replace(out, "%c", loc, -1)
+	out = strings.Replace(out, "%m", fmt.Sprint(msg...), -1)
+
+	return fmt.Sprintln(out)
+}

+ 72 - 0
logutil/formatter_test.go

@@ -0,0 +1,72 @@
+package logutil
+
+import (
+	"bytes"
+	"strings"
+	"testing"
+)
+
+func TestFormatting(t *testing.T) {
+	ClearLogSinks()
+
+	sf := TemplateFormatter("%t [%l] %s %m")
+
+	sf.(*templateFormatter).tsFunc = func() string {
+		return "0000000000000" // Timestamp for testing is always 0
+	}
+
+	rootBuf := &bytes.Buffer{}
+	logger := GetLogger("")
+
+	logger.AddLogSink(Debug, sf, rootBuf)
+
+	logger.Info("foo")
+	logger.Warning("bar")
+
+	if rootBuf.String() != `
+0000000000000 [Info]  foo
+0000000000000 [Warning]  bar
+`[1:] {
+		t.Error("Unexpected output:", rootBuf.String())
+		return
+	}
+
+	ClearLogSinks()
+
+	sf = TemplateFormatter("%c - %m")
+
+	sf.(*templateFormatter).tsFunc = func() string {
+		return "0000000000000" // Timestamp for testing is always 0
+	}
+
+	rootBuf = &bytes.Buffer{}
+	logger = GetLogger("")
+
+	logger.AddLogSink(Debug, sf, rootBuf)
+
+	logger.Info("foo")
+	logger.Warning("bar")
+
+	if !strings.Contains(rootBuf.String(), "formatter_test.go:47") {
+		t.Error("Unexpected output:", rootBuf.String())
+		return
+	}
+
+	ClearLogSinks()
+
+	rootBuf = &bytes.Buffer{}
+	logger = GetLogger("")
+
+	logger.AddLogSink(Debug, ConsoleFormatter(), rootBuf)
+
+	logger.Info("foo")
+	logger.Warning("bar")
+
+	if rootBuf.String() != `
+Info: foo
+Warning: bar
+`[1:] {
+		t.Error("Unexpected output:", rootBuf.String())
+		return
+	}
+}

+ 299 - 0
logutil/logger.go

@@ -0,0 +1,299 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package logutil contains a simple leveled logging infrastructure supporting
+different log levels, package scopes, formatters and handlers.
+
+The main object is the Logger object which requires a scope. Use
+GetLogger(scope string) to get an instance. Log messages are published
+by various log methods (e.g. Info).
+
+The logger object is also used to add sinks which consume log messages.
+Each sinks requires a formatter which formats / decorades incoming log
+messages. Log messages are handled by the most specific scoped sinks which
+allow the message level.
+
+Example:
+
+	logger = GetLogger("foo.bar")
+
+	logger.AddLogSink(Info, SimpleFormatter(), myLogFile)
+
+	logger.Info("A log message")
+*/
+package logutil
+
+import (
+	"fmt"
+	"io"
+	"log"
+	"runtime/debug"
+	"sort"
+	"strings"
+	"sync"
+)
+
+/*
+fallbackLogger is used if there are error during regular logging
+*/
+var fallbackLogger = log.Print
+
+/*
+Level represents a logging level
+*/
+type Level string
+
+/*
+Log levels
+*/
+const (
+	Debug   Level = "Debug"
+	Info          = "Info"
+	Warning       = "Warning"
+	Error         = "Error"
+)
+
+/*
+LogLevelPriority is a map assigning priorities to log level (lower number means a higher priority)
+*/
+var logLevelPriority = map[Level]int{
+	Debug:   1,
+	Info:    2,
+	Warning: 3,
+	Error:   4,
+}
+
+/*
+stringToLoglevel is a map assigning log levels to strings.
+*/
+var stringToLoglevel = map[string]Level{
+	strings.ToLower(fmt.Sprint(Debug)):   Debug,
+	strings.ToLower(fmt.Sprint(Info)):    Info,
+	strings.ToLower(fmt.Sprint(Warning)): Warning,
+	strings.ToLower(fmt.Sprint(Error)):   Error,
+}
+
+/*
+StringToLoglevel tries to turn a given string into a log level.
+*/
+func StringToLoglevel(loglevelString string) Level {
+	level, _ := stringToLoglevel[strings.ToLower(loglevelString)]
+	return level
+}
+
+/*
+Logger is the main logging object which is used to add sinks and publish
+log messages. A log messages is only handled by the most appropriate sink
+in terms of level and scope. Multiple sinks can be registered for the same
+level and scope.
+*/
+type Logger interface {
+
+	/*
+	   AddLogSink adds a log sink to a logger. A log sink can be a file or console
+	   which satisfies the io.Writer interface.
+	*/
+	AddLogSink(loglevel Level, formatter Formatter, appender io.Writer)
+
+	/*
+		Debug logs a message at debug level.
+	*/
+	Debug(msg ...interface{})
+
+	/*
+		Info logs a message at info level.
+	*/
+	Info(msg ...interface{})
+
+	/*
+		Warning logs a message at warning level.
+	*/
+	Warning(msg ...interface{})
+
+	/*
+		Error logs a message at error level.
+	*/
+	Error(msg ...interface{})
+
+	/*
+		Error logs a message at error level and a stacktrace.
+	*/
+	LogStackTrace(loglevel Level, msg ...interface{})
+}
+
+/*
+GetLogger returns a logger of a certain scope. Use the empty string '' for the
+root scope.
+*/
+func GetLogger(scope string) Logger {
+	return &logger{scope}
+}
+
+/*
+ClearLogSinks removes all configured log sinks.
+*/
+func ClearLogSinks() {
+	logSinksLock.Lock()
+	defer logSinksLock.Unlock()
+
+	logSinks = make([][]*logSink, 0)
+}
+
+/*
+logger is the  main Logger interface implementation.
+*/
+type logger struct {
+	scope string
+}
+
+/*
+AddLogSink adds a log sink to a logger. A log sink can be a file or console
+which satisfies the io.Writer interface.
+*/
+func (l *logger) AddLogSink(loglevel Level, formatter Formatter, appender io.Writer) {
+	addLogSink(loglevel, l.scope, formatter, appender)
+}
+
+/*
+Debug logs a message at debug level.
+*/
+func (l *logger) Debug(msg ...interface{}) {
+	publishLog(Debug, l.scope, msg...)
+}
+
+/*
+Info logs a message at info level.
+*/
+func (l *logger) Info(msg ...interface{}) {
+	publishLog(Info, l.scope, msg...)
+}
+
+/*
+Warning logs a message at warning level.
+*/
+func (l *logger) Warning(msg ...interface{}) {
+	publishLog(Warning, l.scope, msg...)
+}
+
+/*
+Error logs a message at error level.
+*/
+func (l *logger) Error(msg ...interface{}) {
+	publishLog(Error, l.scope, msg...)
+}
+
+/*
+Error logs a message at error level and a stacktrace.
+*/
+func (l *logger) LogStackTrace(loglevel Level, msg ...interface{}) {
+	msg = append(msg, fmt.Sprintln())
+	msg = append(msg, string(debug.Stack()))
+	publishLog(loglevel, l.scope, msg...)
+}
+
+// Singleton logger
+// ================
+
+/*
+logSink models a single log sink.
+*/
+type logSink struct {
+	io.Writer
+	level     Level
+	scope     string
+	formatter Formatter
+}
+
+/*
+Implementation of sort interface for logSinks
+*/
+type sinkSlice [][]*logSink
+
+func (p sinkSlice) Len() int           { return len(p) }
+func (p sinkSlice) Less(i, j int) bool { return p[i][0].scope > p[j][0].scope }
+func (p sinkSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+/*
+logSinks contains all registered log sinks.
+*/
+var logSinks = make([][]*logSink, 0)
+var logSinksLock = sync.RWMutex{}
+
+/*
+addLogSink adds a new logging sink.
+*/
+func addLogSink(level Level, scope string, formatter Formatter, sink io.Writer) {
+	logSinksLock.Lock()
+	defer logSinksLock.Unlock()
+
+	// First see if the new sink can be appended to an existing list
+
+	for i, scopeSinks := range logSinks {
+		if scopeSinks[0].scope == scope {
+			scopeSinks = append(scopeSinks, &logSink{sink, level, scope, formatter})
+			logSinks[i] = scopeSinks
+			return
+		}
+	}
+
+	// Insert the new sink in the appropriate place
+
+	logSinks = append(logSinks, []*logSink{{sink, level, scope, formatter}})
+	sort.Sort(sinkSlice(logSinks))
+}
+
+/*
+publishLog publishes a log message.
+*/
+func publishLog(loglevel Level, scope string, msg ...interface{}) {
+
+	// Go through the sorted list of sinks
+
+	for _, sinks := range logSinks {
+
+		// Check if the log scope is within the message scope
+
+		if strings.HasPrefix(scope, sinks[0].scope) {
+
+			handled := false
+
+			for _, sink := range sinks {
+
+				// Check if the level is ok
+
+				if logLevelPriority[sink.level] <= logLevelPriority[loglevel] {
+
+					handled = true
+
+					fmsg := sink.formatter.Format(loglevel, scope, msg...)
+
+					if _, err := sink.Write([]byte(fmsg)); err != nil {
+
+						// Something went wrong use the fallback logger
+
+						fallbackLogger(fmt.Sprintf(
+							"Cloud not publish log message: %v (message: %v)",
+							err, fmsg))
+					}
+				}
+			}
+
+			if handled {
+				return
+			}
+		}
+	}
+
+	// No handler for log message use the fallback logger
+
+	fmsg := SimpleFormatter().Format(loglevel, scope, msg...)
+
+	fallbackLogger(fmt.Sprintf("No log handler for log message: %v", fmsg))
+}

+ 179 - 0
logutil/logger_test.go

@@ -0,0 +1,179 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package logutil
+
+import (
+	"bytes"
+	"fmt"
+	"strings"
+	"testing"
+)
+
+type brokenSink struct {
+}
+
+func (bs *brokenSink) Write(p []byte) (n int, err error) {
+	return 0, fmt.Errorf("testerror")
+}
+
+func TestLogging(t *testing.T) {
+
+	if StringToLoglevel("iNfO") != Info {
+		t.Error("Unexpected result")
+		return
+	}
+
+	ClearLogSinks()
+
+	sf := SimpleFormatter()
+
+	sf.(*simpleFormatter).tsFunc = func() string {
+		return "0000000000000" // Timestamp for testing is always 0
+	}
+
+	// Test straight forward case doing root logging
+
+	rootBuf := &bytes.Buffer{}
+	logger := GetLogger("")
+
+	logger.AddLogSink(Debug, sf, rootBuf)
+
+	logger.Info("foo")
+	logger.Warning("bar")
+
+	if rootBuf.String() != `
+0000000000000 Info foo
+0000000000000 Warning bar
+`[1:] {
+		t.Error("Unexpected output:", rootBuf.String())
+		return
+	}
+
+	logger.LogStackTrace(Error, "test123")
+	logger.Warning("next")
+
+	if !strings.Contains(rootBuf.String(), "logger_test.go") {
+		t.Error("Unexpected output:", rootBuf.String())
+		return
+	}
+
+	rootBuf.Reset()
+
+	logger.Info("foo")
+	logger.Warning("bar")
+
+	// Add a sub package logger
+
+	subBuf := &bytes.Buffer{}
+	logger = GetLogger("foo")
+
+	logger.AddLogSink(Info, sf, subBuf)
+
+	logger.Debug("debugmsg")
+	logger.Info("foo")
+	logger.Warning("bar")
+
+	// Debug message was handled in root logger
+
+	if rootBuf.String() != `
+0000000000000 Info foo
+0000000000000 Warning bar
+0000000000000 Debug foo debugmsg
+`[1:] {
+		t.Error("Unexpected output:", rootBuf.String())
+		return
+	}
+
+	// Info and warning where handled in the sub logger
+
+	if subBuf.String() != `
+0000000000000 Info foo foo
+0000000000000 Warning foo bar
+`[1:] {
+		t.Error("Unexpected output:", subBuf.String())
+		return
+	}
+
+	// Add a sub sub package logger
+
+	subsubBuf := &bytes.Buffer{}
+	logger = GetLogger("foo.bar")
+
+	//  Add the logger twice
+
+	logger.AddLogSink(Error, sf, subsubBuf)
+	logger.AddLogSink(Error, sf, subsubBuf)
+
+	logger = GetLogger("foo.bar.bla")
+
+	logger.Error("test1")
+	logger.Info("test2")
+	logger.Debug("test3")
+
+	// Check that the messages were distributed correctly
+
+	if rootBuf.String() != `
+0000000000000 Info foo
+0000000000000 Warning bar
+0000000000000 Debug foo debugmsg
+0000000000000 Debug foo.bar.bla test3
+`[1:] {
+		t.Error("Unexpected output:", rootBuf.String())
+		return
+	}
+
+	if subBuf.String() != `
+0000000000000 Info foo foo
+0000000000000 Warning foo bar
+0000000000000 Info foo.bar.bla test2
+`[1:] {
+		t.Error("Unexpected output:", subBuf.String())
+		return
+	}
+
+	// Log message is duplicated as we have the same sink twice
+
+	if subsubBuf.String() != `
+0000000000000 Error foo.bar.bla test1
+0000000000000 Error foo.bar.bla test1
+`[1:] {
+		t.Error("Unexpected output:", subsubBuf.String())
+		return
+	}
+
+	// Remove all log sinks and test error cases
+
+	ClearLogSinks()
+
+	fallbackBuf := &bytes.Buffer{}
+	fallbackLogger = func(v ...interface{}) {
+		fallbackBuf.WriteString(fmt.Sprint(v...))
+	}
+
+	logger = GetLogger("foo.bar.bla")
+
+	logger.Error("test1")
+
+	if !strings.Contains(fallbackBuf.String(), "Error foo.bar.bla test1") {
+		t.Error("Unexpected output:", fallbackBuf.String())
+		return
+	}
+
+	logger = GetLogger("foo.bar")
+
+	logger.AddLogSink(Info, sf, &brokenSink{})
+
+	logger.Info("test")
+
+	if !strings.Contains(fallbackBuf.String(), "testerror") {
+		t.Error("Unexpected output:", fallbackBuf.String())
+		return
+	}
+}

+ 34 - 0
pools/pools.go

@@ -0,0 +1,34 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package pools contains object pooling utilities.
+*/
+package pools
+
+import (
+	"bytes"
+	"sync"
+)
+
+/*
+NewByteBufferPool creates a new pool of bytes.Buffer objects. The pool creates
+new ones if it runs empty.
+*/
+func NewByteBufferPool() *sync.Pool {
+	return &sync.Pool{New: func() interface{} { return &bytes.Buffer{} }}
+}
+
+/*
+NewByteSlicePool creates a new pool of []byte objects of a certain size. The
+pool creates new ones if it runs empty.
+*/
+func NewByteSlicePool(size int) *sync.Pool {
+	return &sync.Pool{New: func() interface{} { return make([]byte, size) }}
+}

+ 56 - 0
pools/pools_test.go

@@ -0,0 +1,56 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package pools
+
+import (
+	"bytes"
+	"testing"
+)
+
+func TestByteBufferPool(t *testing.T) {
+
+	pool := NewByteBufferPool()
+
+	buf1 := pool.Get().(*bytes.Buffer)
+	buf2 := pool.Get()
+	buf3 := pool.Get()
+
+	if buf1 == nil || buf2 == nil || buf3 == nil {
+		t.Error("Initialisation didn't work")
+		return
+	}
+
+	buf1.Write(make([]byte, 10, 10))
+
+	buf1.Reset()
+
+	pool.Put(buf1)
+}
+
+func TestByteSlicePool(t *testing.T) {
+
+	pool := NewByteSlicePool(5)
+
+	buf1 := pool.Get().([]byte)
+	buf2 := pool.Get()
+	buf3 := pool.Get()
+
+	if buf1 == nil || buf2 == nil || buf3 == nil {
+		t.Error("Initialisation didn't work")
+		return
+	}
+
+	if s := len(buf1); s != 5 {
+		t.Error("Unexpected size:", s)
+		return
+	}
+
+	pool.Put(buf1)
+}

+ 514 - 0
pools/threadpool.go

@@ -0,0 +1,514 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package pools contains object pooling utilities.
+*/
+package pools
+
+import (
+	"math"
+	"sync"
+	"time"
+)
+
+/*
+Different states of a thread pool.
+*/
+const (
+	StatusRunning  = "Running"
+	StatusStopping = "Stopping"
+	StatusStopped  = "Stopped"
+)
+
+/*
+Task is a task which should be run in a thread.
+*/
+type Task interface {
+
+	/*
+		Run the task.
+	*/
+	Run() error
+
+	/*
+		HandleError handles an error which occured during the run method.
+	*/
+	HandleError(e error)
+}
+
+/*
+TaskQueue is a queue of tasks for a thread pool.
+*/
+type TaskQueue interface {
+
+	/*
+		Clear the queue of all pending tasks
+	*/
+	Clear()
+
+	/*
+		Pop returns the next task from the queue.
+	*/
+	Pop() Task
+	/*
+		Push adds another task to the queue.
+	*/
+	Push(t Task)
+
+	/*
+		Size returns the size of the queue.
+	*/
+	Size() int
+}
+
+/*
+DefaultTaskQueue implements a simple (FIFO) task queue for a thread pool.
+*/
+type DefaultTaskQueue struct {
+	queue []Task
+}
+
+/*
+Clear the queue of all pending tasks
+*/
+func (tq *DefaultTaskQueue) Clear() {
+	tq.queue = make([]Task, 0)
+}
+
+/*
+Pop returns the next task from the queue.
+*/
+func (tq *DefaultTaskQueue) Pop() Task {
+	var task Task
+
+	if len(tq.queue) > 0 {
+		task = tq.queue[0]
+		tq.queue = tq.queue[1:]
+	}
+
+	return task
+}
+
+/*
+Push adds another task to the queue.
+*/
+func (tq *DefaultTaskQueue) Push(t Task) {
+	tq.queue = append(tq.queue, t)
+}
+
+/*
+Size returns the size of the queue.
+*/
+func (tq *DefaultTaskQueue) Size() int {
+	return len(tq.queue)
+}
+
+/*
+ThreadPool creates a pool of threads which process tasks according to a given
+task queue. The threads are kept in an idle state if no more tasks are available.
+They resume immediately once a new task is added.
+*/
+type ThreadPool struct {
+
+	// Task regulation
+
+	queue     TaskQueue   // Task queue for thread pool
+	queueLock *sync.Mutex // Lock for queue
+
+	// Worker regulation
+
+	workerIDCount uint64                       // Id counter for worker tasks
+	workerMap     map[uint64]*ThreadPoolWorker // Map of all workers
+	workerIdleMap map[uint64]*ThreadPoolWorker // Map of all idle workers
+	workerMapLock *sync.Mutex                  // Lock for worker map
+	workerKill    int                          // Count of workers which should die
+	newTaskCond   *sync.Cond                   // Waiting condition for new tasks
+
+	// Callbacks to regulate load
+
+	RegulationLock *sync.Mutex // Lock for regulation variables
+
+	TooManyThreshold int    // Threshold for too many tasks
+	TooManyCallback  func() // Callback for too many tasks
+	tooManyTriggered bool   // Flag if too many tasks threshold was passed
+
+	TooFewThreshold int    // Threshold for too few tasks
+	TooFewCallback  func() // Callback for too few tasks
+	tooFewTriggered bool   // Flag if too many tasks threshold was passed
+}
+
+/*
+NewThreadPool creates a new thread pool.
+*/
+func NewThreadPool() *ThreadPool {
+	return NewThreadPoolWithQueue(&DefaultTaskQueue{})
+}
+
+/*
+NewThreadPoolWithQueue creates a new thread pool with a specific task queue.
+*/
+func NewThreadPoolWithQueue(q TaskQueue) *ThreadPool {
+	return &ThreadPool{q, &sync.Mutex{},
+		0, make(map[uint64]*ThreadPoolWorker),
+		make(map[uint64]*ThreadPoolWorker), &sync.Mutex{},
+		0, sync.NewCond(&sync.Mutex{}), &sync.Mutex{},
+		math.MaxInt32, func() {}, false, 0, func() {}, false}
+}
+
+/*
+AddTask adds a task to the thread pool.
+*/
+func (tp *ThreadPool) AddTask(t Task) {
+	tp.queueLock.Lock()
+	defer tp.queueLock.Unlock()
+
+	tp.queue.Push(t)
+
+	// Reset too few flag
+
+	tp.RegulationLock.Lock()
+
+	if tp.tooFewTriggered && tp.TooFewThreshold < tp.queue.Size() {
+		tp.tooFewTriggered = false
+	}
+
+	// Check too many
+
+	if !tp.tooManyTriggered && tp.TooManyThreshold <= tp.queue.Size() {
+		tp.tooManyTriggered = true
+		tp.TooManyCallback()
+	}
+
+	tp.RegulationLock.Unlock()
+
+	// Wake up a waiting worker
+
+	tp.newTaskCond.Signal()
+}
+
+/*
+getTask is called by a worker to request a new task. The worker is expected to finish
+if this function returns nil.
+*/
+func (tp *ThreadPool) getTask() Task {
+	var returnIdleTask = true
+
+	// Check if tasks should be stopped
+
+	tp.workerMapLock.Lock()
+	if tp.workerKill > 0 {
+		tp.workerKill--
+		tp.workerMapLock.Unlock()
+		return nil
+
+	} else if tp.workerKill == -1 {
+
+		// Check for special worker kill value which is used when workers should
+		// be killed when no more tasks are available.
+
+		returnIdleTask = false
+	}
+	tp.workerMapLock.Unlock()
+
+	// Check if there is a task available
+
+	tp.queueLock.Lock()
+	task := tp.queue.Pop()
+	tp.queueLock.Unlock()
+
+	if task != nil {
+		return task
+	}
+
+	tp.RegulationLock.Lock()
+
+	// Reset too many flag
+
+	if tp.tooManyTriggered && tp.TooManyThreshold > tp.queue.Size() {
+		tp.tooManyTriggered = false
+	}
+
+	// Check too few
+
+	if !tp.tooFewTriggered && tp.TooFewThreshold >= tp.queue.Size() {
+		tp.tooFewTriggered = true
+		tp.TooFewCallback()
+	}
+
+	tp.RegulationLock.Unlock()
+
+	if returnIdleTask {
+
+		// No new task available return idle task
+
+		return &idleTask{tp}
+	}
+
+	return nil
+}
+
+/*
+SetWorkerCount sets the worker count of this pool. If the wait flag is true then
+this call will return after the pool has reached the requested worker count.
+*/
+func (tp *ThreadPool) SetWorkerCount(count int, wait bool) {
+
+	tp.workerMapLock.Lock()
+	workerCount := len(tp.workerMap)
+	tp.workerMapLock.Unlock()
+
+	if count < 0 {
+		count = 0
+	}
+
+	if workerCount < count {
+
+		// More workers are needed
+
+		tp.workerMapLock.Lock()
+
+		// Make sure no more workers are killed
+
+		tp.workerKill = 0
+
+		for len(tp.workerMap) != count {
+			worker := &ThreadPoolWorker{tp.workerIDCount, tp}
+			go worker.run()
+			tp.workerMap[tp.workerIDCount] = worker
+			tp.workerIDCount++
+		}
+
+		tp.workerMapLock.Unlock()
+
+	} else if workerCount > count {
+
+		// Fewer workers are needed
+
+		tp.workerMapLock.Lock()
+		tp.workerKill = workerCount - count
+		tp.workerMapLock.Unlock()
+
+		tp.newTaskCond.Broadcast()
+
+		if wait {
+			for true {
+				tp.workerMapLock.Lock()
+				workerCount = len(tp.workerMap)
+				tp.workerMapLock.Unlock()
+
+				if workerCount == count {
+					break
+				}
+
+				time.Sleep(5 * time.Nanosecond)
+
+				// Broadcast again since sine workers might be now waiting
+
+				tp.newTaskCond.Broadcast()
+			}
+		}
+	}
+}
+
+/*
+Status returns the current status of the thread pool.
+*/
+func (tp *ThreadPool) Status() string {
+	var status string
+
+	tp.workerMapLock.Lock()
+	workerCount := len(tp.workerMap)
+	workerKill := tp.workerKill
+	tp.workerMapLock.Unlock()
+
+	if workerCount > 0 {
+		if workerKill == -1 {
+			status = StatusStopping
+		} else {
+			status = StatusRunning
+		}
+	} else {
+		status = StatusStopped
+	}
+
+	return status
+}
+
+/*
+WorkerCount returns the current count of workers.
+*/
+func (tp *ThreadPool) WorkerCount() int {
+	tp.workerMapLock.Lock()
+	defer tp.workerMapLock.Unlock()
+	return len(tp.workerMap)
+}
+
+/*
+WaitAll waits for all workers to become idle.
+*/
+func (tp *ThreadPool) WaitAll() {
+
+	// Wake up all workers
+
+	tp.newTaskCond.Broadcast()
+
+	time.Sleep(5 * time.Nanosecond)
+
+	for true {
+
+		tp.workerMapLock.Lock()
+		tp.queueLock.Lock()
+
+		// Get total number of workers and idle workers
+
+		workerCount := len(tp.workerMap)
+		workerIdleCount := len(tp.workerIdleMap)
+
+		// Get number of pending tasks
+
+		tasks := tp.queue.Size()
+
+		tp.queueLock.Unlock()
+		tp.workerMapLock.Unlock()
+
+		// Only leave this loop if either no workers are left or if all
+		// tasks are done and all workers are idle
+
+		if workerCount == 0 || (workerCount == workerIdleCount && tasks == 0) {
+			break
+		}
+
+		time.Sleep(5 * time.Nanosecond)
+
+		// Broadcast again and again until all workers are idle
+
+		tp.newTaskCond.Broadcast()
+	}
+}
+
+/*
+JoinAll processes all remaining tasks and kills off all workers afterwards.
+*/
+func (tp *ThreadPool) JoinAll() {
+
+	// Tell all workers to die
+
+	tp.workerMapLock.Lock()
+	tp.workerKill = -1
+	tp.workerMapLock.Unlock()
+
+	tp.newTaskCond.Broadcast()
+
+	for true {
+
+		tp.workerMapLock.Lock()
+		tp.queueLock.Lock()
+
+		// Get total number of workers
+
+		workerCount := len(tp.workerMap)
+
+		// Get number of pending tasks
+
+		tasks := tp.queue.Size()
+
+		tp.queueLock.Unlock()
+		tp.workerMapLock.Unlock()
+
+		// Only leave this loop if no workers are existing and all tasks are done
+
+		if workerCount == 0 && tasks == 0 {
+			break
+		}
+
+		time.Sleep(5 * time.Nanosecond)
+
+		// Broadcast again and again until all workers are dead
+
+		tp.newTaskCond.Broadcast()
+	}
+}
+
+/*
+ThreadPoolWorker models a worker in the thread pool.
+*/
+type ThreadPoolWorker struct {
+	id   uint64      // ID of the thread pool worker
+	pool *ThreadPool // Thread pool of this worker
+}
+
+/*
+run lets this worker run tasks.
+*/
+func (w *ThreadPoolWorker) run() {
+
+	for true {
+
+		// Try to get the next task
+
+		task := w.pool.getTask()
+
+		// Exit if there is not new task
+
+		if task == nil {
+			break
+		}
+
+		_, isIdleTask := task.(*idleTask)
+
+		if isIdleTask {
+
+			// Register this worker as idle
+
+			w.pool.workerMapLock.Lock()
+			w.pool.workerIdleMap[w.id] = w
+			w.pool.workerMapLock.Unlock()
+		}
+
+		// Run the task
+
+		if err := task.Run(); err != nil {
+			task.HandleError(err)
+		}
+
+		if isIdleTask {
+			w.pool.workerMapLock.Lock()
+			delete(w.pool.workerIdleMap, w.id)
+			w.pool.workerMapLock.Unlock()
+		}
+	}
+
+	// Remove worker from workerMap
+
+	w.pool.workerMapLock.Lock()
+	delete(w.pool.workerMap, w.id)
+	w.pool.workerMapLock.Unlock()
+}
+
+/*
+idleTask is the internal idle task.
+*/
+type idleTask struct {
+	tp *ThreadPool
+}
+
+/*
+Run the idle task.
+*/
+func (t *idleTask) Run() error {
+	t.tp.newTaskCond.L.Lock()
+	t.tp.newTaskCond.Wait()
+	t.tp.newTaskCond.L.Unlock()
+	return nil
+}
+
+func (t *idleTask) HandleError(e error) {
+	panic(e.Error())
+}

+ 415 - 0
pools/threadpool_test.go

@@ -0,0 +1,415 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package pools
+
+import (
+	"bytes"
+	"errors"
+	"strings"
+	"sync"
+	"testing"
+	"time"
+)
+
+type testTask struct {
+	task         func() error
+	errorHandler func(e error)
+}
+
+func (t *testTask) Run() error {
+	return t.task()
+}
+
+func (t *testTask) HandleError(e error) {
+	t.errorHandler(e)
+}
+
+func TestDefaultTaskQueue(t *testing.T) {
+	var taskFinishCounter int
+	var tq DefaultTaskQueue
+
+	if res := tq.Size(); res != 0 {
+		t.Error("Initial size should be empty not: ", res)
+		return
+	}
+
+	if res := tq.Pop(); res != nil {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	tq.Clear()
+
+	if res := tq.Size(); res != 0 {
+		t.Error("Initial size should be empty not: ", res)
+		return
+	}
+
+	if res := tq.Pop(); res != nil {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	tq.Push(&testTask{func() error {
+		taskFinishCounter++
+		return nil
+	}, nil})
+	tq.Push(&testTask{func() error {
+		taskFinishCounter++
+		return nil
+	}, nil})
+	tq.Push(&testTask{func() error {
+		taskFinishCounter++
+		return nil
+	}, nil})
+
+	if res := tq.Size(); res != 3 {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	// Execute the functions
+
+	tq.Pop().Run()
+
+	if res := tq.Size(); res != 2 {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	tq.Pop().Run()
+
+	if res := tq.Size(); res != 1 {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	tq.Pop().Run()
+
+	if res := tq.Size(); res != 0 {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	if res := tq.Pop(); res != nil {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	if taskFinishCounter != 3 {
+		t.Error("Unexpected result: ", taskFinishCounter)
+		return
+	}
+}
+
+func TestThreadPool(t *testing.T) {
+	var taskFinishCounter int
+	taskFinishCounterLock := &sync.Mutex{}
+
+	tp := NewThreadPool()
+
+	tp.SetWorkerCount(-10, true)
+	tp.TooManyThreshold = 1
+
+	if status := tp.Status(); status != StatusStopped {
+		t.Error("Unexpected status:", status)
+		return
+	}
+
+	tp.SetWorkerCount(3, true)
+
+	if status := tp.Status(); status != StatusRunning {
+		t.Error("Unexpected status:", status)
+		return
+	}
+
+	if workers := len(tp.workerMap); workers != 3 {
+		t.Error("Unepxected state:", workers)
+		return
+	}
+
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		return nil
+	}, nil})
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		return nil
+	}, nil})
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		return nil
+	}, nil})
+
+	tp.JoinAll()
+
+	if workers := len(tp.workerMap); workers != 0 {
+		t.Error("Unepxected state:", workers)
+		return
+	}
+
+	if taskFinishCounter != 3 {
+		t.Error("Unexpected result: ", taskFinishCounter)
+		return
+	}
+
+	if status := tp.Status(); status != StatusStopped {
+		t.Error("Unexpected status:", status)
+		return
+	}
+
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		return nil
+	}, nil})
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		return nil
+	}, nil})
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		return nil
+	}, nil})
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		time.Sleep(10 * time.Millisecond)
+		return nil
+	}, nil})
+
+	if status := tp.Status(); status != StatusStopped {
+		t.Error("Unexpected status:", status)
+		return
+	}
+
+	tp.SetWorkerCount(3, false)
+
+	if workers := len(tp.workerMap); workers != 3 {
+		t.Error("Unepxected state:", workers)
+		return
+	}
+
+	// Let the workers go into the idle state
+
+	time.Sleep(20 * time.Millisecond)
+
+	// Reduce the number of workers
+
+	tp.SetWorkerCount(1, true)
+
+	if workers := len(tp.workerMap); workers != 1 {
+		t.Error("Unepxected state:", workers)
+		return
+	}
+
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		return nil
+	}, nil})
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		time.Sleep(10 * time.Millisecond)
+		return nil
+	}, nil})
+
+	// Set the kill value
+
+	tp.workerKill = -1
+
+	if status := tp.Status(); status != StatusStopping {
+		t.Error("Unexpected status:", status)
+		return
+	}
+
+	tp.WaitAll()
+
+	tp.SetWorkerCount(-5, true)
+
+	if workers := len(tp.workerMap); workers != 0 {
+		t.Error("Unepxected state:", workers)
+		return
+	}
+
+	tp.AddTask(&testTask{func() error {
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		return nil
+	}, nil})
+
+	tp.WaitAll()
+
+	if taskFinishCounter != 9 {
+		t.Error("Unexpected result: ", taskFinishCounter)
+		return
+	}
+
+	tp.SetWorkerCount(1, false)
+
+	tp.WaitAll()
+
+	if taskFinishCounter != 10 {
+		t.Error("Unexpected result: ", taskFinishCounter)
+		return
+	}
+
+	tp.SetWorkerCount(0, true)
+
+	if status := tp.Status(); status != StatusStopped {
+		t.Error("Unexpected status:", status)
+		return
+	}
+}
+
+func TestThreadPoolThresholds(t *testing.T) {
+	var taskFinishCounter int
+	taskFinishCounterLock := &sync.Mutex{}
+
+	task := &testTask{func() error {
+		time.Sleep(time.Millisecond * 5)
+		taskFinishCounterLock.Lock()
+		taskFinishCounter++
+		taskFinishCounterLock.Unlock()
+		return nil
+	}, nil}
+
+	var buf bytes.Buffer
+
+	tp := NewThreadPool()
+
+	tp.TooFewThreshold = 1
+	tp.TooManyThreshold = 5
+
+	tp.TooFewCallback = func() {
+		taskFinishCounterLock.Lock()
+		buf.WriteString("low")
+		taskFinishCounterLock.Unlock()
+	}
+	tp.TooManyCallback = func() {
+		taskFinishCounterLock.Lock()
+		buf.WriteString("high")
+		taskFinishCounterLock.Unlock()
+	}
+
+	tp.SetWorkerCount(10, false)
+
+	for i := 0; i < 10; i++ {
+		tp.AddTask(task)
+	}
+
+	if wc := tp.WorkerCount(); wc != 10 {
+		t.Error("Unexpected result:", wc)
+		return
+	}
+
+	tp.SetWorkerCount(10, false)
+
+	tp.WaitAll()
+
+	if wc := tp.WorkerCount(); wc != 10 {
+		t.Error("Unexpected result:", wc)
+		return
+	}
+
+	tp.SetWorkerCount(10, false)
+
+	for i := 0; i < 10; i++ {
+		tp.AddTask(task)
+	}
+
+	tp.WaitAll()
+
+	if wc := tp.WorkerCount(); wc != 10 {
+		t.Error("Unexpected result:", wc)
+		return
+	}
+
+	if taskFinishCounter != 20 {
+		t.Error("Unexpected result:", taskFinishCounter)
+		return
+	}
+
+	tp.JoinAll()
+
+	if wc := tp.WorkerCount(); wc != 0 {
+		t.Error("Unexpected result:", wc)
+		return
+	}
+
+	// Check that the callbacks where triggered twice each
+
+	if !strings.Contains(buf.String(), "high") {
+		t.Error("Unexpected result:", buf.String())
+		return
+	}
+	if !strings.Contains(buf.String(), "low") {
+		t.Error("Unexpected result:", buf.String())
+		return
+	}
+}
+
+func TestThreadPoolIdleTaskPanic(t *testing.T) {
+
+	defer func() {
+		if r := recover(); r == nil {
+			t.Error("Error handling on the idle task did not cause a panic")
+		}
+	}()
+
+	// Run error handling function of idle task
+
+	idleTask := &idleTask{}
+	idleTask.HandleError(nil)
+}
+
+func TestThreadPoolErrorHandling(t *testing.T) {
+
+	// Test error normal task handling
+
+	var buf bytes.Buffer
+
+	task := &testTask{func() error {
+		return errors.New("testerror")
+	}, func(e error) {
+		buf.WriteString(e.Error())
+	}}
+
+	tp := NewThreadPool()
+
+	tp.AddTask(task)
+
+	if buf.String() != "" {
+		t.Error("Unexpected result:", buf.String())
+	}
+
+	tp.SetWorkerCount(1, false)
+	tp.JoinAll()
+
+	if buf.String() != "testerror" {
+		t.Error("Unexpected result:", buf.String())
+	}
+}

+ 83 - 0
sortutil/heap.go

@@ -0,0 +1,83 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package sortutil
+
+import "container/heap"
+
+/*
+IntHeap is a classic heap with int values.
+*/
+type IntHeap []int
+
+func (h IntHeap) Len() int           { return len(h) }
+func (h IntHeap) Less(i, j int) bool { return h[i] < h[j] }
+func (h IntHeap) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }
+
+/*
+Push adds an item to the heap.
+*/
+func (h *IntHeap) Push(x interface{}) {
+	*h = append(*h, x.(int))
+}
+
+/*
+Pop removes an item to the heap.
+*/
+func (h *IntHeap) Pop() interface{} {
+	old := *h
+	n := len(old)
+	x := old[n-1]
+
+	*h = old[0 : n-1]
+
+	return x
+}
+
+/*
+Peek returns the next item but does not remove it like Pop.
+*/
+func (h *IntHeap) Peek() int {
+	return (*h)[0]
+}
+
+/*
+RemoveFirst removes the first occurences of item r from the IntHeap.
+*/
+func (h *IntHeap) RemoveFirst(r int) {
+	heapList := *h
+
+	for i, item := range heapList {
+		if item == r {
+			if i+1 < len(heapList) {
+				*h = append(heapList[:i], heapList[i+1:]...)
+				heap.Fix(h, i)
+				break
+			} else {
+				*h = heapList[:i]
+			}
+		}
+	}
+}
+
+/*
+RemoveAll removes all occurences of item r from the IntHeap.
+*/
+func (h *IntHeap) RemoveAll(r int) {
+	newHeap := &IntHeap{}
+
+	for len(*h) > 0 {
+		item := heap.Pop(h)
+		if item != r {
+			heap.Push(newHeap, item)
+		}
+	}
+
+	(*h) = *newHeap
+}

+ 101 - 0
sortutil/heap_test.go

@@ -0,0 +1,101 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package sortutil
+
+import (
+	"bytes"
+	"container/heap"
+	"fmt"
+	"testing"
+)
+
+func TestIntHeap(t *testing.T) {
+	h := &IntHeap{2, 1, 5}
+
+	heap.Init(h)
+	heap.Push(h, 3)
+	heap.Push(h, 8)
+
+	if (*h)[0] != 1 {
+		t.Error("Unexpected minimum:", (*h)[0])
+		return
+	}
+
+	if (*h)[len(*h)-1] != 8 {
+		t.Error("Unexpected maximum:", (*h)[len(*h)-1])
+		return
+	}
+
+	if res := h.Peek(); res != (*h)[0] {
+		t.Error("Unexpected peek result:", res)
+		return
+	}
+
+	var buf bytes.Buffer
+
+	for h.Len() > 0 {
+		buf.WriteString(fmt.Sprintf("%d ", heap.Pop(h)))
+	}
+
+	if buf.String() != "1 2 3 5 8 " {
+		t.Error("Unexpected sort order:", buf.String())
+	}
+
+	buf.Reset()
+
+	h = &IntHeap{2, 1, 5}
+
+	heap.Init(h)
+	heap.Push(h, 3)
+	heap.Push(h, 3)
+	heap.Push(h, 8)
+
+	h.RemoveAll(3)
+
+	for h.Len() > 0 {
+		buf.WriteString(fmt.Sprintf("%d ", heap.Pop(h)))
+	}
+
+	if buf.String() != "1 2 5 8 " {
+		t.Error("Unexpected sort order:", buf.String())
+	}
+
+	buf.Reset()
+
+	h = &IntHeap{2, 1, 5}
+
+	heap.Init(h)
+	heap.Push(h, 3)
+	heap.Push(h, 3)
+	heap.Push(h, 8)
+
+	h.RemoveFirst(3)
+
+	for h.Len() > 0 {
+		buf.WriteString(fmt.Sprintf("%d ", heap.Pop(h)))
+	}
+
+	if buf.String() != "1 2 3 5 8 " {
+		t.Error("Unexpected sort order:", buf.String())
+	}
+
+	heap.Push(h, 3)
+	heap.Push(h, 3)
+	heap.Push(h, 8)
+
+	h.RemoveFirst(3)
+	h.RemoveFirst(3)
+	h.RemoveFirst(8)
+
+	if h.Len() != 0 {
+		t.Error("Unexpected size:", h.Len())
+		return
+	}
+}

+ 227 - 0
sortutil/priorityqueue.go

@@ -0,0 +1,227 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package sortutil
+
+import (
+	"bytes"
+	"container/heap"
+	"fmt"
+)
+
+/*
+PriorityQueue is like a regular queue where each element has a priority. Items with
+higher priority are served first. Items with the same priority are returned in the
+order they were added. Priority 0 is the highest priority with the priority
+decreasing as the priority number increases.
+
+It is possible to set a minimum priority function on the PriorityQueue object.
+The function returns the current minimum priority level which should be returned
+by the queue. If the current available priority is lower than this then len()
+will return 0 and pop will return nil. If the function returns a negative value
+then the value is ignored.
+*/
+type PriorityQueue struct {
+	heap         *priorityQueueHeap // Heap which holds the values
+	orderCounter int
+	MinPriority  func() int // Function returning the minimum priority
+}
+
+/*
+NewPriorityQueue creates a new priority queue.
+*/
+func NewPriorityQueue() *PriorityQueue {
+
+	pqheap := make(priorityQueueHeap, 0)
+	pq := &PriorityQueue{&pqheap, 0, func() int { return -1 }}
+
+	heap.Init(pq.heap)
+
+	return pq
+}
+
+/*
+Clear clears the current queue contents.
+*/
+func (pq *PriorityQueue) Clear() {
+	pqheap := make(priorityQueueHeap, 0)
+	pq.heap = &pqheap
+	pq.orderCounter = 0
+	heap.Init(pq.heap)
+}
+
+/*
+CurrentPriority returns the priority of the next item.
+*/
+func (pq *PriorityQueue) CurrentPriority() int {
+	if len(*pq.heap) == 0 {
+		return 0
+	}
+
+	return pq.heap.Peek().(*pqItem).priority
+}
+
+/*
+Push adds a new element to the queue.
+*/
+func (pq *PriorityQueue) Push(value interface{}, priority int) {
+
+	// Highest priority is 0 we can't go higher
+
+	if priority < 0 {
+		priority = 0
+	}
+
+	heap.Push(pq.heap, &pqItem{value, priority, pq.orderCounter, 0})
+	pq.orderCounter++
+}
+
+/*
+Peek returns the next item of the queue but does not remove it.
+*/
+func (pq *PriorityQueue) Peek() interface{} {
+	minPriority := pq.MinPriority()
+
+	if len(*pq.heap) == 0 || (minPriority > 0 && pq.heap.Peek().(*pqItem).priority > minPriority) {
+		return nil
+	}
+
+	return pq.heap.Peek().(*pqItem).value
+}
+
+/*
+Pop remove the next element from the queue and returns it.
+*/
+func (pq *PriorityQueue) Pop() interface{} {
+	minPriority := pq.MinPriority()
+
+	if len(*pq.heap) == 0 || (minPriority > 0 && pq.heap.Peek().(*pqItem).priority > minPriority) {
+		return nil
+	}
+
+	return heap.Pop(pq.heap).(*pqItem).value
+}
+
+/*
+Size returns the current queue size.
+*/
+func (pq *PriorityQueue) Size() int {
+	minPriority := pq.MinPriority()
+
+	if len(*pq.heap) == 0 || (minPriority > 0 && pq.heap.Peek().(*pqItem).priority > minPriority) {
+		return 0
+	}
+	return len(*pq.heap)
+}
+
+/*
+SizeCurrentPriority returns the queue size of all elements of the highest priority.
+*/
+func (pq *PriorityQueue) SizeCurrentPriority() int {
+	minPriority := pq.MinPriority()
+
+	if len(*pq.heap) == 0 || (minPriority > 0 && pq.heap.Peek().(*pqItem).priority > minPriority) {
+		return 0
+	}
+
+	higestPriority := pq.heap.Peek().(*pqItem).priority
+	counter := 0
+
+	for _, item := range *pq.heap {
+		if item.priority == higestPriority {
+			counter++
+		}
+	}
+
+	return counter
+}
+
+/*
+String returns a string representation of the queue.
+*/
+func (pq *PriorityQueue) String() string {
+	var ret bytes.Buffer
+
+	ret.WriteString("[ ")
+
+	for _, item := range *pq.heap {
+		ret.WriteString(fmt.Sprintf("%v (%v) ", item.value, item.priority))
+	}
+
+	ret.WriteString("]")
+
+	return ret.String()
+}
+
+// Internal datastructures
+// =======================
+
+/*
+pqItem models an item in the priority queue.
+*/
+type pqItem struct {
+	value    interface{} // Value which is held in the queue
+	priority int         // Priority of the item
+	order    int         // Order of adding
+	index    int         // Item index in the heap (required by heap).
+}
+
+/*
+priorityQueueHeap implements the heap.Interface and is the datastructure which
+actually holds items.
+*/
+type priorityQueueHeap []*pqItem
+
+func (pq priorityQueueHeap) Len() int { return len(pq) }
+func (pq priorityQueueHeap) Less(i, j int) bool {
+	if pq[i].priority != pq[j].priority {
+		return pq[i].priority < pq[j].priority
+	}
+
+	return pq[i].order < pq[j].order
+}
+func (pq priorityQueueHeap) Swap(i, j int) {
+	pq[i], pq[j] = pq[j], pq[i]
+	pq[i].index = i
+	pq[j].index = j
+}
+
+/*
+Push adds an item to the queue.
+*/
+func (pq *priorityQueueHeap) Push(x interface{}) {
+	n := len(*pq)
+	item := x.(*pqItem)
+
+	item.index = n
+
+	*pq = append(*pq, item)
+}
+
+/*
+Pop removes an item from the queue.
+*/
+func (pq *priorityQueueHeap) Pop() interface{} {
+	old := *pq
+	n := len(old)
+	item := old[n-1]
+
+	item.index = -1
+	*pq = old[0 : n-1]
+
+	return item
+}
+
+/*
+Peek returns the next item but does not remove it from the queue.
+*/
+func (pq *priorityQueueHeap) Peek() interface{} {
+	q := *pq
+	return q[0]
+}

+ 196 - 0
sortutil/priorityqueue_test.go

@@ -0,0 +1,196 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package sortutil
+
+import (
+	"fmt"
+	"testing"
+)
+
+func TestPriorityQueue(t *testing.T) {
+
+	pq := NewPriorityQueue()
+
+	if pq.CurrentPriority() != 0 {
+		t.Error("Unexpected priority:", pq.CurrentPriority())
+		return
+	}
+
+	pq.Push("test1", 1)
+	pq.Push("test8", 8)
+	pq.Push("test2", 2)
+	pq.Push("test5", 5)
+
+	// Check contents:
+
+	if res := fmt.Sprint(pq); res != "[ test1 (1) test5 (5) test2 (2) test8 (8) ]" {
+		t.Error("Unexpected queue layout:", res)
+		return
+	}
+
+	if pq.CurrentPriority() != 1 {
+		t.Error("Unexpected priority:", pq.CurrentPriority())
+		return
+	}
+
+	if pq.Size() != 4 {
+		t.Error("Unexpected size:", pq.Size())
+		return
+	}
+
+	if pq.SizeCurrentPriority() != 1 {
+		t.Error("Unexpected size:", pq.SizeCurrentPriority())
+		return
+	}
+
+	// Set minpriority function
+
+	pq.MinPriority = func() int {
+		return 1
+	}
+
+	peek := pq.Peek()
+	if res := pq.Pop(); res != "test1" && res == peek {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(pq); res != "[ test2 (2) test5 (5) test8 (8) ]" {
+		t.Error("Unexpected queue layout:", res)
+		return
+	}
+
+	peek = pq.Peek()
+	if res := pq.Pop(); res != nil && res == peek {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	if res := pq.Size(); res != 0 {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	if res := pq.SizeCurrentPriority(); res != 0 {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	pq.MinPriority = func() int { return -1 }
+
+	peek = pq.Peek()
+	if res := pq.Pop(); res != "test2" && res == peek {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	peek = pq.Peek()
+
+	if pq.CurrentPriority() != 5 {
+		t.Error("Unexpected priority:", pq.CurrentPriority())
+		return
+	}
+
+	if res := pq.Pop(); res != "test5" && res == peek {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	if pq.CurrentPriority() != 8 {
+		t.Error("Unexpected priority:", pq.CurrentPriority())
+		return
+	}
+
+	peek = pq.Peek()
+	if res := pq.Pop(); res != "test8" && res == peek {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	pq.Push("test2", 9)
+
+	if pq.CurrentPriority() != 9 {
+		t.Error("Unexpected priority:", pq.CurrentPriority())
+		return
+	}
+
+	pq.Clear()
+
+	if pq.CurrentPriority() != 0 {
+		t.Error("Unexpected priority:", pq.CurrentPriority())
+		return
+	}
+
+	if res := pq.Size(); res != 0 {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	if res := fmt.Sprint(pq); res != "[ ]" {
+		t.Error("Unexpected queue layout:", res)
+		return
+	}
+
+	// Test we can use it as a normal queue
+
+	pq.Push("test1", 0)
+	pq.Push("test8", -1)
+	pq.Push("test2", 0)
+	pq.Push("test5", 0)
+
+	if res := pq.Size(); res != 4 {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	if res := pq.SizeCurrentPriority(); res != 4 {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	pq.MinPriority = func() int {
+		return 0
+	}
+
+	peek = pq.Peek()
+	if res := pq.Pop(); res != "test1" && res == peek {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	if res := pq.Size(); res != 3 {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	peek = pq.Peek()
+	if res := pq.Pop(); res != "test8" && res == peek {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	peek = pq.Peek()
+	if res := pq.Pop(); res != "test2" && res == peek {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	peek = pq.Peek()
+	if res := pq.Pop(); res != "test5" && res == peek {
+		t.Error("Unexpected pop result:", res)
+		return
+	}
+
+	if pq.CurrentPriority() != 0 {
+		t.Error("Unexpected priority:", pq.CurrentPriority())
+		return
+	}
+
+}

+ 62 - 0
sortutil/sortutil.go

@@ -0,0 +1,62 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package sortutil contains common sorting definitions and utilities for sorting data.
+*/
+package sortutil
+
+import (
+	"fmt"
+	"sort"
+)
+
+/*
+Int64Slice is a special type implementing the sort interface for int64
+*/
+type Int64Slice []int64
+
+func (p Int64Slice) Len() int           { return len(p) }
+func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+/*
+Int64s sorts a slice of int64s in increasing order.
+*/
+func Int64s(a []int64) { sort.Sort(Int64Slice(a)) }
+
+/*
+UInt64Slice is a special type implementing the sort interface for uint64
+*/
+type UInt64Slice []uint64
+
+func (p UInt64Slice) Len() int           { return len(p) }
+func (p UInt64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p UInt64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+/*
+UInt64s sorts a slice of uint64s in increasing order.
+*/
+func UInt64s(a []uint64) { sort.Sort(UInt64Slice(a)) }
+
+/*
+AbstractSlice is a special type implementing the sort interface for interface{}
+(Sorting is by string value)
+*/
+type AbstractSlice []interface{}
+
+func (p AbstractSlice) Len() int           { return len(p) }
+func (p AbstractSlice) Less(i, j int) bool { return fmt.Sprint(p[i]) < fmt.Sprint(p[j]) }
+func (p AbstractSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+/*
+InterfaceStrings sorts a slice of interface{} in increasing order by their string
+values.
+*/
+func InterfaceStrings(a []interface{}) { sort.Sort(AbstractSlice(a)) }

+ 48 - 0
sortutil/sortutil_test.go

@@ -0,0 +1,48 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package sortutil
+
+import (
+	"reflect"
+	"testing"
+)
+
+func TestInt64s(t *testing.T) {
+	testSlice := []int64{5, 2, 3, 0xFFFFFFFF, 1}
+
+	Int64s(testSlice)
+
+	if !reflect.DeepEqual(testSlice, []int64{1, 2, 3, 5, 0xFFFFFFFF}) {
+		t.Error("Unexpected sorted order:", testSlice)
+		return
+	}
+}
+
+func TestUInt64s(t *testing.T) {
+	testSlice := []uint64{5, 2, 3, 0xFFFFFFFF, 1}
+
+	UInt64s(testSlice)
+
+	if !reflect.DeepEqual(testSlice, []uint64{1, 2, 3, 5, 0xFFFFFFFF}) {
+		t.Error("Unexpected sorted order:", testSlice)
+		return
+	}
+}
+
+func TestAbstractSlice(t *testing.T) {
+	testSlice := []interface{}{5, 2, "bla", 0xFFFFFFFF, 1}
+
+	InterfaceStrings(testSlice)
+
+	if !reflect.DeepEqual(testSlice, []interface{}{1, 2, 0xFFFFFFFF, 5, "bla"}) {
+		t.Error("Unexpected sorted order:", testSlice)
+		return
+	}
+}

+ 118 - 0
sortutil/vectorclock.go

@@ -0,0 +1,118 @@
+package sortutil
+
+import (
+	"bytes"
+	"fmt"
+	"sort"
+)
+
+/*
+VectorClock implements a vector clock object. The clock can record actions
+of actors. Each action produces a new version which can be queried.
+*/
+type VectorClock struct {
+	versionVector map[string]uint64 // Data for the cache
+}
+
+/*
+NewVectorClock creates a new vector clock datastructure.
+*/
+func NewVectorClock() *VectorClock {
+	return &VectorClock{make(map[string]uint64)}
+}
+
+/*
+CloneVectorClock clones an existing vector clock.
+*/
+func CloneVectorClock(vc *VectorClock) *VectorClock {
+	newVC := NewVectorClock()
+	for actor, version := range vc.versionVector {
+		newVC.versionVector[actor] = version
+	}
+	return newVC
+}
+
+/*
+NewDescendant creates a vector clock which is a descendant of all given vector clocks.
+*/
+func NewDescendant(otherVCs ...*VectorClock) *VectorClock {
+	newVC := NewVectorClock()
+
+	for _, otherVC := range otherVCs {
+		for actor, version := range otherVC.versionVector {
+			if newVC.Version(actor) < version {
+				newVC.versionVector[actor] = version
+			}
+		}
+	}
+
+	return newVC
+}
+
+/*
+Act records an action of an actor.
+*/
+func (vc *VectorClock) Act(actor string) {
+	if _, ok := vc.versionVector[actor]; ok {
+		vc.versionVector[actor]++
+	} else {
+		vc.versionVector[actor] = 1
+	}
+}
+
+/*
+Version returns the current version for a given actor.
+*/
+func (vc *VectorClock) Version(actor string) uint64 {
+	if v, ok := vc.versionVector[actor]; ok {
+		return v
+	}
+	return 0
+}
+
+/*
+IsDescendent determines if another vector clock is a descendent of this vector clock.
+*/
+func (vc *VectorClock) IsDescendent(otherVC *VectorClock) bool {
+
+	// In order for vc to be considered a descendant of otherVC, each marker
+	// in otherVC must have a corresponding marker in vc that has a revision
+	// number greater than or equal to the marker in otherVC.
+
+	for actor, version := range otherVC.versionVector {
+		if vc.Version(actor) < version {
+			return false
+		}
+	}
+
+	return true
+}
+
+/*
+IsConflicting determines if another vector clock is conflicting with this vector clock.
+*/
+func (vc *VectorClock) IsConflicting(otherVC *VectorClock) bool {
+	return !(vc.IsDescendent(otherVC) || otherVC.IsDescendent(vc))
+}
+
+/*
+String returns a string representation of this vector clock.
+*/
+func (vc *VectorClock) String() string {
+
+	var actors []string
+	for actor := range vc.versionVector {
+		actors = append(actors, actor)
+	}
+
+	sort.Strings(actors)
+
+	buf := &bytes.Buffer{}
+
+	for _, actor := range actors {
+		version := vc.versionVector[actor]
+		buf.WriteString(fmt.Sprint(actor, ":", version, "\n"))
+	}
+
+	return buf.String()
+}

+ 97 - 0
sortutil/vectorclock_test.go

@@ -0,0 +1,97 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package sortutil
+
+import "testing"
+
+type dinnerDay struct {
+	day string
+	vc  *VectorClock
+}
+
+const (
+	actorAlice = "Alice"
+	actorBen   = "Ben"
+	actorCathy = "Cathy"
+	actorDave  = "Dave"
+)
+
+/*
+The dinner agreement example was taken from:
+http://basho.com/posts/technical/why-vector-clocks-are-easy/
+*/
+
+func TestDinnerAgreement(t *testing.T) {
+
+	// Test how Alice, Ben Cathy and Dave are meeting for dinner at Dave's place
+
+	// Start by Alice suggesting to meet on Wednesday
+
+	dd := &dinnerDay{"Wednesday", NewVectorClock()}
+	dd.vc.Act(actorAlice)
+
+	dd2 := &dinnerDay{dd.day, CloneVectorClock(dd.vc)}
+
+	// Ben suggests now Tuesday
+
+	dd.day = "Tuesday"
+	dd.vc.Act(actorBen)
+
+	// Dave confirms the day
+
+	dd.vc.Act(actorDave)
+
+	// Check descendancy
+
+	if !dd.vc.IsDescendent(dd2.vc) {
+		t.Error("dd should be a descendent of dd2")
+		return
+	} else if dd2.vc.IsDescendent(dd.vc) {
+		t.Error("dd2 should not be a descendent of dd")
+		return
+	}
+
+	// Cathy has an old version and suggests Thursday
+
+	dd2.day = "Thursday"
+	dd2.vc.Act(actorCathy)
+
+	// Detect conflict
+
+	if !dd.vc.IsConflicting(dd2.vc) {
+		t.Error("Vector clocks should be conflicting")
+		return
+	}
+
+	// Dave makes a decision and chooses Thursday
+
+	dd3 := &dinnerDay{dd2.day, NewDescendant(dd.vc, dd2.vc)}
+	dd3.vc.Act(actorDave)
+
+	// Check descendancy
+
+	if !dd3.vc.IsDescendent(dd.vc) || dd3.vc.IsConflicting(dd.vc) {
+		t.Error("dd3 should be a descendent of dd")
+		return
+	} else if !dd3.vc.IsDescendent(dd2.vc) || dd3.vc.IsConflicting(dd2.vc) {
+		t.Error("dd3 should be a descendent of dd2")
+		return
+	}
+
+	if out := dd3.vc.String(); out != `
+Alice:1
+Ben:1
+Cathy:1
+Dave:2
+`[1:] {
+		t.Error("Unexpected output:", out)
+		return
+	}
+}

+ 710 - 0
stringutil/stringutil.go

@@ -0,0 +1,710 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+/*
+Package stringutil contains common function for string operations.
+*/
+package stringutil
+
+import (
+	"bytes"
+	"crypto/md5"
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+/*
+LongestCommonPrefix determines the longest common prefix of a given list of strings.
+*/
+func LongestCommonPrefix(s []string) string {
+	var res string
+
+	commonPrefix := func(str1, str2 string) string {
+		var buf bytes.Buffer
+
+		rs2 := StringToRuneSlice(str2)
+		rs2len := len(rs2)
+
+		for i, c := range str1 {
+			if i >= rs2len {
+				break
+			} else if c == rs2[i] {
+				buf.WriteRune(c)
+			}
+		}
+
+		return buf.String()
+	}
+
+	lens := len(s)
+
+	if lens > 0 {
+		res = s[0]
+
+		for i := 1; i < lens; i++ {
+			res = commonPrefix(res, s[i])
+		}
+	}
+
+	return res
+}
+
+/*
+PrintStringTable prints a given list of strings as table with c columns.
+*/
+func PrintStringTable(ss []string, c int) string {
+	var ret bytes.Buffer
+
+	if c < 1 {
+		return ""
+	}
+
+	//  Determine max widths of columns
+
+	maxWidths := make(map[int]int)
+
+	for i, s := range ss {
+		col := i % c
+
+		if l := utf8.RuneCountInString(s); l > maxWidths[col] {
+			maxWidths[col] = l
+		}
+	}
+
+	for i, s := range ss {
+		col := i % c
+
+		if i < len(ss)-1 {
+			var formatString string
+
+			if col != c-1 {
+				formatString = fmt.Sprintf("%%-%vv ", maxWidths[col])
+			} else {
+				formatString = "%v"
+			}
+
+			ret.WriteString(fmt.Sprintf(formatString, s))
+
+		} else {
+
+			ret.WriteString(fmt.Sprintln(s))
+			break
+		}
+
+		if col == c-1 {
+			ret.WriteString(fmt.Sprintln())
+		}
+	}
+
+	return ret.String()
+}
+
+/*
+GraphicStringTableSymbols defines how to draw a graphic table.
+*/
+type GraphicStringTableSymbols struct {
+	BoxHorizontal string
+	BoxVertical   string
+	BoxMiddle     string
+
+	BoxCornerTopLeft     string
+	BoxCornerTopRight    string
+	BoxCornerBottomLeft  string
+	BoxCornerBottomRight string
+
+	BoxTopMiddle    string
+	BoxLeftMiddle   string
+	BoxRightMiddle  string
+	BoxBottomMiddle string
+}
+
+/*
+Standard graphic table drawing definitions.
+*/
+var (
+	SingleLineTable       = &GraphicStringTableSymbols{"─", "│", "┼", "┌", "┐", "└", "┘", "┬", "├", "┤", "┴"}
+	DoubleLineTable       = &GraphicStringTableSymbols{"═", "║", "╬", "╔", "╗", "╚", "╝", "╦", "╠", "╣", "╩"}
+	SingleDoubleLineTable = &GraphicStringTableSymbols{"═", "│", "╪", "╒", "╕", "╘", "╛", "╤", "╞", "╡", "╧"}
+	DoubleSingleLineTable = &GraphicStringTableSymbols{"─", "║", "╫", "╓", "╖", "╙", "╜", "╥", "╟", "╢", "╨"}
+	MonoTable             = &GraphicStringTableSymbols{"#", "#", "#", "#", "#", "#", "#", "#", "#", "#", "#"}
+)
+
+/*
+PrintGraphicStringTable prints a given list of strings in a graphic table
+with c columns - creates a header after n rows using syms as drawing symbols.
+*/
+func PrintGraphicStringTable(ss []string, c int, n int, syms *GraphicStringTableSymbols) string {
+	var topline, bottomline, middleline, ret bytes.Buffer
+
+	if c < 1 {
+		return ""
+	}
+
+	if syms == nil {
+		syms = MonoTable
+	}
+
+	//  Determine max widths of columns
+
+	maxWidths := make(map[int]int)
+
+	for i, s := range ss {
+		col := i % c
+
+		l := utf8.RuneCountInString(s)
+
+		if l > maxWidths[col] {
+			maxWidths[col] = l
+		}
+	}
+
+	// Determine total width and create top, middle and bottom line
+
+	totalWidth := 1
+	topline.WriteString(syms.BoxCornerTopLeft)
+	bottomline.WriteString(syms.BoxCornerBottomLeft)
+	middleline.WriteString(syms.BoxLeftMiddle)
+
+	for i := 0; i < len(maxWidths); i++ {
+		totalWidth += maxWidths[i] + 2
+
+		topline.WriteString(GenerateRollingString(syms.BoxHorizontal, maxWidths[i]+1))
+		bottomline.WriteString(GenerateRollingString(syms.BoxHorizontal, maxWidths[i]+1))
+		middleline.WriteString(GenerateRollingString(syms.BoxHorizontal, maxWidths[i]+1))
+
+		if i < len(maxWidths)-1 {
+			topline.WriteString(syms.BoxTopMiddle)
+			bottomline.WriteString(syms.BoxBottomMiddle)
+			middleline.WriteString(syms.BoxMiddle)
+		}
+	}
+
+	topline.WriteString(syms.BoxCornerTopRight)
+	bottomline.WriteString(syms.BoxCornerBottomRight)
+	middleline.WriteString(syms.BoxRightMiddle)
+
+	// Draw the table
+
+	ret.WriteString(topline.String())
+	ret.WriteString(fmt.Sprintln())
+
+	row := 0
+	for i, s := range ss {
+		col := i % c
+
+		ret.WriteString(syms.BoxVertical)
+
+		if i < len(ss)-1 {
+			formatString := fmt.Sprintf("%%-%vv ", maxWidths[col])
+			ret.WriteString(fmt.Sprintf(formatString, s))
+		} else {
+			formatString := fmt.Sprintf("%%-%vv ", maxWidths[col])
+			ret.WriteString(fmt.Sprintf(formatString, s))
+
+			for col < c-1 && col < len(ss)-1 {
+				col++
+				ret.WriteString(syms.BoxVertical)
+				ret.WriteString(GenerateRollingString(" ", maxWidths[col]))
+				ret.WriteString(" ")
+			}
+
+			ret.WriteString(syms.BoxVertical)
+			ret.WriteString(fmt.Sprintln())
+
+			break
+		}
+
+		if col == c-1 {
+			ret.WriteString(syms.BoxVertical)
+			ret.WriteString(fmt.Sprintln())
+			row++
+
+			if row == n {
+				ret.WriteString(middleline.String())
+				ret.WriteString(fmt.Sprintln())
+			}
+		}
+	}
+
+	ret.WriteString(bottomline.String())
+	ret.WriteString(fmt.Sprintln())
+
+	return ret.String()
+}
+
+/*
+PrintCSVTable prints a given list of strings in a CSV table with c
+columns.
+*/
+func PrintCSVTable(ss []string, c int) string {
+	var ret bytes.Buffer
+	var col int
+
+	if c < 1 || len(ss) == 0 {
+		return ""
+	}
+
+	// Write the table
+
+	for i, s := range ss {
+		col = i % c
+
+		ret.WriteString(strings.TrimSpace(fmt.Sprint(s)))
+
+		if col == c-1 {
+			ret.WriteString(fmt.Sprintln())
+		} else if i < len(ss)-1 {
+			ret.WriteString(", ")
+		}
+	}
+
+	if col != c-1 {
+		ret.WriteString(fmt.Sprintln())
+	}
+
+	return ret.String()
+}
+
+/*
+RuneSliceToString converts a slice of runes into a string.
+*/
+func RuneSliceToString(buf []rune) string {
+	var sbuf bytes.Buffer
+	for _, r := range buf {
+		fmt.Fprintf(&sbuf, "%c", r)
+	}
+	return sbuf.String()
+}
+
+/*
+StringToRuneSlice converts a string into a slice of runes.
+*/
+func StringToRuneSlice(s string) []rune {
+	var buf []rune
+	for _, r := range s {
+		buf = append(buf, r)
+	}
+	return buf
+}
+
+/*
+Plural returns the string 's' if the parameter is greater than one or
+if the parameter is 0.
+*/
+func Plural(l int) string {
+	if l > 1 || l == 0 {
+		return "s"
+	}
+	return ""
+}
+
+/*
+GlobParseError describes a failure to parse a glob expression
+and gives the offending expression.
+*/
+type GlobParseError struct {
+	Msg  string
+	Pos  int
+	Glob string
+}
+
+/*
+Error Returns a string representation of the error.
+*/
+func (e *GlobParseError) Error() string {
+	return fmt.Sprintf("%s at %d of %s", e.Msg, e.Pos, e.Glob)
+}
+
+/*
+GlobToRegex converts a given glob expression into a regular expression.
+*/
+func GlobToRegex(glob string) (string, error) {
+
+	buf := new(bytes.Buffer)
+	brackets, braces := 0, 0
+	n := len(glob)
+
+	for i := 0; i < n; i++ {
+		char := glob[i]
+
+		switch char {
+		case '\\':
+			// Escapes
+			i++
+			if i >= n {
+				return "", &GlobParseError{"Missing escaped character", i, glob}
+			}
+			buf.WriteByte(char)
+			buf.WriteByte(glob[i])
+			continue
+
+		case '*':
+			// Wildcard match multiple characters
+			buf.WriteByte('.')
+		case '?':
+			// Wildcard match any single character
+			buf.WriteByte('.')
+			continue
+		case '{':
+			// Group (always non-capturing)
+			buf.WriteString("(?:")
+			braces++
+			continue
+		case '}':
+			// End of group
+			if braces > 0 {
+				braces--
+				buf.WriteByte(')')
+				continue
+			}
+		case '[':
+			// Character class
+			if brackets > 0 {
+				return "", &GlobParseError{"Unclosed character class", i, glob}
+			}
+			brackets++
+		case ']':
+			// End of character class
+			brackets = 0
+		case ',':
+			// OR in groups
+			if braces > 0 {
+				buf.WriteByte('|')
+			} else {
+				buf.WriteByte(char)
+			}
+			continue
+		case '^':
+			// Beginning of line in character classes otherwise normal
+			// escaped character
+			if brackets == 0 {
+				buf.WriteByte('\\')
+			}
+		case '!':
+			// [! is the equivalent of [^ in glob
+			if brackets > 0 && glob[i-1] == '[' {
+				buf.WriteByte('^')
+			} else {
+				buf.WriteByte('!')
+			}
+			continue
+		case '.', '$', '(', ')', '|', '+':
+			// Escape all regex characters which are not glob characters
+			buf.WriteByte('\\')
+		}
+
+		buf.WriteByte(char)
+	}
+
+	if brackets > 0 {
+		return "", &GlobParseError{"Unclosed character class", n, glob}
+	} else if braces > 0 {
+		return "", &GlobParseError{"Unclosed group", n, glob}
+	}
+
+	return buf.String(), nil
+}
+
+/*
+GlobStartingLiterals gets the first literals of a glob string.
+*/
+func GlobStartingLiterals(glob string) string {
+
+	buf := new(bytes.Buffer)
+	n := len(glob)
+
+	for i := 0; i < n; i++ {
+		char := glob[i]
+
+		if char == '\\' || char == '*' || char == '?' ||
+			char == '{' || char == '[' {
+			break
+		}
+		buf.WriteByte(char)
+	}
+
+	return buf.String()
+}
+
+/*
+LevenshteinDistance computes the Levenshtein distance between two strings.
+*/
+func LevenshteinDistance(str1, str2 string) int {
+	if str1 == str2 {
+		return 0
+	}
+
+	rslice1 := StringToRuneSlice(str1)
+	rslice2 := StringToRuneSlice(str2)
+
+	n, m := len(rslice1), len(rslice2)
+
+	if n == 0 {
+		return m
+	} else if m == 0 {
+		return n
+	}
+
+	v0 := make([]int, m+1, m+1)
+	v1 := make([]int, m+1, m+1)
+
+	for i := 0; i <= m; i++ {
+		v0[i] = i
+	}
+
+	var cost int
+
+	for i := 0; i < n; i++ {
+		v1[0] = i + 1
+
+		for j := 0; j < m; j++ {
+			if rslice1[i] == rslice2[j] {
+				cost = 0
+			} else {
+				cost = 1
+			}
+
+			v1[j+1] = min3(v1[j]+1, v0[j+1]+1, v0[j]+cost)
+		}
+
+		v0, v1 = v1, v0
+	}
+
+	return v0[m]
+}
+
+/*
+3 way min for computing the Levenshtein distance.
+*/
+func min3(a, b, c int) int {
+	ret := a
+	if b < ret {
+		ret = b
+	}
+	if c < ret {
+		ret = c
+	}
+	return ret
+}
+
+/*
+VersionStringCompare compares two version strings. Returns: 0 if the strings are
+equal; -1 if the first string is smaller; 1 if the first string is greater.
+*/
+func VersionStringCompare(str1, str2 string) int {
+	val1 := strings.Split(str1, ".")
+	val2 := strings.Split(str2, ".")
+
+	idx := 0
+
+	for idx < len(val1) && idx < len(val2) && val1[idx] == val2[idx] {
+		idx++
+	}
+
+	switch {
+	case idx < len(val1) && idx < len(val2):
+		return versionStringPartCompare(val1[idx], val2[idx])
+	case len(val1) > len(val2):
+		return 1
+	case len(val1) < len(val2):
+		return -1
+	}
+	return 0
+}
+
+/*
+versionStringPartCompare compares two version string parts. Returns: 0 if the
+strings are equal; -1 if the first string is smaller; 1 if the first string is
+greater.
+*/
+func versionStringPartCompare(str1, str2 string) int {
+	pat := regexp.MustCompile("^([0-9]+)([\\D].*)?")
+
+	res1 := pat.FindStringSubmatch(str1)
+	res2 := pat.FindStringSubmatch(str2)
+
+	switch {
+	case res1 == nil && res2 == nil:
+		return strings.Compare(str1, str2)
+	case res1 == nil && res2 != nil:
+		return -1
+	case res1 != nil && res2 == nil:
+		return 1
+	}
+
+	v1, _ := strconv.Atoi(res1[1])
+	v2, _ := strconv.Atoi(res2[1])
+
+	res := 0
+
+	switch {
+	case v1 > v2:
+		res = 1
+	case v1 < v2:
+		res = -1
+	}
+
+	if res == 0 {
+
+		switch {
+		case res1[2] != "" && res2[2] == "":
+			return 1
+		case res1[2] == "" && res2[2] != "":
+			return -1
+		case res1[2] != "" && res2[2] != "":
+			return strings.Compare(res1[2], res2[2])
+		}
+	}
+
+	return res
+}
+
+/*
+IsAlphaNumeric checks if a string contains only alpha numerical characters or "_".
+*/
+func IsAlphaNumeric(str string) bool {
+	ret, _ := regexp.MatchString("^[a-zA-Z0-9_]*$", str)
+	return ret
+}
+
+/*
+IsTrueValue checks if a given string is a true value.
+*/
+func IsTrueValue(str string) bool {
+	str = strings.ToLower(str)
+	return str == "true" || str == "yes" || str == "on" || str == "ok" ||
+		str == "1" || str == "active" || str == "enabled"
+}
+
+/*
+IndexOf returns the index of str in slice or -1 if it does not exist.
+*/
+func IndexOf(str string, slice []string) int {
+	for i, s := range slice {
+		if str == s {
+			return i
+		}
+	}
+
+	return -1
+}
+
+/*
+MapKeys returns the keys of a map as a sorted list.
+*/
+func MapKeys(m map[string]interface{}) []string {
+	ret := make([]string, 0, len(m))
+
+	for k := range m {
+		ret = append(ret, k)
+	}
+
+	sort.Strings(ret)
+
+	return ret
+}
+
+/*
+GenerateRollingString creates a string by repeating a given string pattern.
+*/
+func GenerateRollingString(seq string, size int) string {
+	var buf bytes.Buffer
+
+	rs := StringToRuneSlice(seq)
+	l := len(rs)
+
+	if l == 0 {
+		return ""
+	}
+
+	for i := 0; i < size; i++ {
+		buf.WriteRune(rs[i%l])
+	}
+
+	return buf.String()
+}
+
+/*
+ConvertToString tries to convert a given object into a stable string. This
+function can be used to display nested maps.
+*/
+func ConvertToString(v interface{}) string {
+
+	if vStringer, ok := v.(fmt.Stringer); ok {
+		return vStringer.String()
+	}
+
+	if _, err := json.Marshal(v); err != nil {
+		v = containerStringConvert(v)
+	}
+
+	if vString, ok := v.(string); ok {
+		return vString
+	} else if res, err := json.Marshal(v); err == nil {
+		return string(res)
+	}
+
+	return fmt.Sprint(v)
+}
+
+/*
+containerStringConvert converts container contents into strings.
+*/
+func containerStringConvert(v interface{}) interface{} {
+	res := v
+
+	if mapContainer, ok := v.(map[interface{}]interface{}); ok {
+		newRes := make(map[string]interface{})
+
+		for mk, mv := range mapContainer {
+			newRes[ConvertToString(mk)] = containerStringConvert(mv)
+		}
+
+		res = newRes
+
+	} else if mapList, ok := v.([]interface{}); ok {
+		newRes := make([]interface{}, len(mapList))
+
+		for i, lv := range mapList {
+			newRes[i] = containerStringConvert(lv)
+		}
+
+		res = newRes
+	}
+
+	return res
+}
+
+/*
+MD5HexString calculates the MD5 sum of a string and returns it as hex string.
+*/
+func MD5HexString(str string) string {
+	return fmt.Sprintf("%x", md5.Sum([]byte(str)))
+}
+
+/*
+LengthConstantEquals compares two strings in length-constant time. This
+function is deliberately inefficient in that it does not stop at the earliest
+possible time. This is to prevent timing attacks when comparing password
+hashes.
+*/
+func LengthConstantEquals(str1 []byte, str2 []byte) bool {
+	diff := len(str1) ^ len(str2)
+
+	for i := 0; i < len(str1) && i < len(str2); i++ {
+		diff |= int(str1[i] ^ str2[i])
+	}
+
+	return diff == 0
+}

+ 596 - 0
stringutil/stringutil_test.go

@@ -0,0 +1,596 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package stringutil
+
+import (
+	"bytes"
+	"fmt"
+	"regexp"
+	"sync"
+	"testing"
+)
+
+func TestLongestCommonPrefix(t *testing.T) {
+
+	if res := LongestCommonPrefix([]string{}); res != "" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := LongestCommonPrefix([]string{"test"}); res != "test" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := LongestCommonPrefix([]string{"tester", "test"}); res != "test" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := LongestCommonPrefix([]string{"foo", "test"}); res != "" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := LongestCommonPrefix([]string{"foo", "test"}); res != "" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := LongestCommonPrefix([]string{"foo2", "foo1", "footest"}); res != "foo" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestPrintStringTable(t *testing.T) {
+
+	if res := PrintStringTable(nil, 0); res != "" {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	test1 := []string{"foo", "bar", "tester", "1", "xxx", "test", "te"}
+
+	if res := PrintStringTable(test1, 4); res != `
+foo bar  tester 1
+xxx test te
+`[1:] {
+		t.Error("Unexpected result:\n", "#"+res+"#")
+		return
+	}
+
+	if res := PrintStringTable(test1, 3); res != `
+foo bar tester
+1   xxx test
+te
+`[1:] {
+		t.Error("Unexpected result:\n", "#"+res+"#")
+		return
+	}
+}
+
+func TestRuneSlice(t *testing.T) {
+	sl := StringToRuneSlice("test")
+
+	if fmt.Sprint(sl) != "[116 101 115 116]" {
+		t.Error("Unexpected result:", sl)
+		return
+	}
+
+	if RuneSliceToString(sl) != "test" {
+		t.Error("Unexpected result:", sl)
+		return
+	}
+}
+
+func TestPluralCompareByteArray(t *testing.T) {
+	if fmt.Sprintf("There are 2 test%s", Plural(2)) != "There are 2 tests" {
+		t.Error("2 items should have an 's'")
+		return
+	}
+	if fmt.Sprintf("There is 1 test%s", Plural(1)) != "There is 1 test" {
+		t.Error("1 item should have no 's'")
+		return
+	}
+
+	if fmt.Sprintf("There are 0 test%s", Plural(0)) != "There are 0 tests" {
+		t.Error("0 items should have an 's'")
+		return
+	}
+}
+
+func TestGlobToRegex(t *testing.T) {
+	globMatch(t, true, "*", "^$", "foo", "bar")
+	globMatch(t, true, "?", "?", "^", "[", "]", "$")
+	globMatch(t, true, "foo*", "foo", "food", "fool")
+	globMatch(t, true, "f*d", "fud", "food")
+	globMatch(t, true, "*d", "good", "bad")
+	globMatch(t, true, "\\*\\?\\[\\{\\\\", "*?[{\\")
+	globMatch(t, true, "[]^-]", "]", "-", "^")
+	globMatch(t, true, "]", "]")
+	globMatch(t, true, "^.$()|+", "^.$()|+")
+	globMatch(t, true, "[^^]", ".", "$", "[", "]")
+	globMatch(t, false, "[^^]", "^")
+	globMatch(t, true, "[!!-]", "^", "?")
+	globMatch(t, false, "[!!-]", "!", "-")
+	globMatch(t, true, "{[12]*,[45]*,[78]*}", "1", "2!", "4", "42", "7", "7$")
+	globMatch(t, false, "{[12]*,[45]*,[78]*}", "3", "6", "9ß")
+	globMatch(t, true, "}", "}")
+	globMatch(t, true, "abc,", "abc,")
+
+	globMatch(t, true, "myfile[^9]", "myfile1")
+	globMatch(t, true, "myfile[!9]", "myfile1")
+	globMatch(t, false, "myfile[^9]", "myfile9")
+	globMatch(t, false, "myfile[!9]", "myfile9")
+
+	globMatch(t, true, "*.*", "tester/bla.txt")
+	globMatch(t, false, "*.tmp", "tester/bla.txt")
+
+	testdata := []string{"foo*test", "f?t", "*d", "all"}
+	expected := []string{"foo", "f", "", "all"}
+
+	for i, str := range testdata {
+		res := GlobStartingLiterals(str)
+
+		if res != expected[i] {
+			t.Error("Unexpected starting literal for glob:", res, "str:",
+				str, "expected:", expected[i])
+		}
+	}
+
+	testdata = []string{"[", "{", "\\", "*.*\\", "[["}
+	expected = []string{"Unclosed character class at 1 of [",
+		"Unclosed group at 1 of {",
+		"Missing escaped character at 1 of \\",
+		"Missing escaped character at 4 of *.*\\",
+		"Unclosed character class at 1 of [["}
+
+	for i, str := range testdata {
+		_, err := GlobToRegex(str)
+
+		if err.Error() != expected[i] {
+			t.Error("Unexpected error for glob:", err, "str:",
+				str, "expected error:", expected[i])
+		}
+	}
+
+	if str, err := GlobToRegex("[][]"); str != "[][]" || err != nil {
+		t.Error("Unecpected glob parsing result:", str, err)
+	}
+
+	if str, err := GlobToRegex(")"); str != "\\)" || err != nil {
+		t.Error("Unecpected glob parsing result:", str, err)
+	}
+}
+
+func globMatch(t *testing.T, expectedResult bool, glob string, testStrings ...string) {
+	re, err := GlobToRegex(glob)
+	if err != nil {
+		t.Error("Glob parsing error:", err)
+	}
+	for _, testString := range testStrings {
+		res, err := regexp.MatchString(re, testString)
+		if err != nil {
+			t.Error("Regexp", re, "parsing error:", err, "from glob", glob)
+		}
+		if res != expectedResult {
+			t.Error("Unexpected evaluation result. Glob:", glob, "testString:",
+				testString, "expectedResult:", expectedResult)
+		}
+	}
+}
+
+func TestLevenshteinDistance(t *testing.T) {
+	testdata1 := []string{"", "a", "", "abc", "", "a", "abc", "a", "b", "ac",
+		"abcdefg", "a", "ab", "example", "sturgeon", "levenshtein", "distance"}
+	testdata2 := []string{"", "", "a", "", "abc", "a", "abc", "ab", "ab", "abc",
+		"xabxcdxxefxgx", "b", "ac", "samples", "urgently", "frankenstein", "difference"}
+	expected := []int{0, 1, 1, 3, 3, 0, 0, 1, 1, 1, 6, 1, 1,
+		3, 6, 6, 5}
+
+	for i, str1 := range testdata1 {
+		res := LevenshteinDistance(str1, testdata2[i])
+
+		if res != expected[i] {
+			t.Error("Unexpected Levenshtein distance result:", res, "str1:",
+				str1, "str2:", testdata2[i], "expected:", expected[i])
+		}
+	}
+}
+
+func TestVersionStringCompare(t *testing.T) {
+	testdata1 := []string{"1", "1.1", "1.1", "2.1", "5.4.3.2.1", "1.674.2.18",
+		"1.674.2", "1.674.2.5", "2.4.18.14smp", "2.4.18.15smp", "1.2.3a1",
+		"2.18.15smp"}
+	testdata2 := []string{"2", "2.0", "1.1", "2.0", "6.5.4.3.2", "1.674.2.5",
+		"1.674.2.5", "1.674.2", "2.4.18.14smp", "2.4.18.14smp", "1.2.3b1",
+		"2.4.18.14smp"}
+
+	expected := []int{-1, -1, 0, 1, -1, 1, -1, 1, 0, 1, -1, 1}
+
+	for i, str1 := range testdata1 {
+		res := VersionStringCompare(str1, testdata2[i])
+
+		if res != expected[i] {
+			t.Error("Unexpected version string compare result:", res, "str1:",
+				str1, "str2:", testdata2[i])
+		}
+	}
+}
+
+func TestVersionStringPartCompare(t *testing.T) {
+
+	testdata1 := []string{"", "", "1", "1", "a", "1a", "a", "1a", "1a", "1", "12a", "12a1",
+		"12a1"}
+	testdata2 := []string{"", "1", "", "2", "b", "b", "2b", "2b", "1", "1b", "12b", "12a2",
+		"12b1"}
+	expected := []int{0, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1}
+
+	for i, str1 := range testdata1 {
+		res := versionStringPartCompare(str1, testdata2[i])
+
+		if res != expected[i] {
+			t.Error("Unexpected version string compare result:", res, "str1:",
+				str1, "str2:", testdata2[i])
+		}
+	}
+}
+
+func TestIsAlphaNumeric(t *testing.T) {
+	testdata := []string{"test", "123test", "test1234_123", "test#", "test-"}
+	expected := []bool{true, true, true, false, false}
+
+	for i, str := range testdata {
+		if IsAlphaNumeric(str) != expected[i] {
+			t.Error("Unexpected result for alphanumeric test:", str)
+		}
+	}
+}
+
+func TestIsTrueValue(t *testing.T) {
+	testdata := []string{"1", "ok", "1", "FaLse", "0"}
+	expected := []bool{true, true, true, false, false}
+
+	for i, str := range testdata {
+		if IsTrueValue(str) != expected[i] {
+			t.Error("Unexpected result for alphanumeric test:", str)
+		}
+	}
+}
+
+func TestIndexOf(t *testing.T) {
+	slice := []string{"foo", "bar", "test"}
+
+	if res := IndexOf("foo", slice); res != 0 {
+		t.Error("Unexpected result", res)
+		return
+	}
+	if res := IndexOf("bar", slice); res != 1 {
+		t.Error("Unexpected result", res)
+		return
+	}
+	if res := IndexOf("test", slice); res != 2 {
+		t.Error("Unexpected result", res)
+		return
+	}
+	if res := IndexOf("hans", slice); res != -1 {
+		t.Error("Unexpected result", res)
+		return
+	}
+}
+
+func TestMapKeys(t *testing.T) {
+	testMap := map[string]interface{}{
+		"1": "2",
+		"3": "4",
+		"5": "6",
+	}
+
+	if res := MapKeys(testMap); fmt.Sprint(res) != "[1 3 5]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestGenerateRollingString(t *testing.T) {
+	testdata := []string{"_-=-_", "abc", "=", ""}
+	testlen := []int{20, 4, 5, 100}
+	expected := []string{"_-=-__-=-__-=-__-=-_", "abca", "=====", ""}
+
+	for i, str := range testdata {
+		res := GenerateRollingString(str, testlen[i])
+		if res != expected[i] {
+			t.Error("Unexpected result for creating a roling string from:", str,
+				"result:", res, "expected:", expected[i])
+		}
+	}
+}
+
+func TestConvertToString(t *testing.T) {
+
+	if res := ConvertToString(""); res != "" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString("test"); res != "test" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString(4.123); res != "4.123" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString(6); res != "6" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString(map[string]int{"z": 1, "d": 2, "a": 4}); res != `{"a":4,"d":2,"z":1}` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString([]int{1, 2, 3}); res != "[1,2,3]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString(map[interface{}]interface{}{"z": 1, "d": 2, "a": 4}); res != `{"a":4,"d":2,"z":1}` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString(map[interface{}]interface{}{"z": []interface{}{1, 2, 3}, "d": 2, "a": 4}); res != `{"a":4,"d":2,"z":[1,2,3]}` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString([]interface{}{1, sync.Mutex{}, 3}); res != `[1,{},3]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString([]interface{}{1, map[interface{}]interface{}{1: 2}, 3}); res != `[1,{"1":2},3]` {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res := ConvertToString(&bytes.Buffer{}); res != "" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Not much to do with such a construct but we shouldn't fail!
+
+	type foo struct{ i int }
+
+	x := make(map[foo]foo)
+	x[foo{1}] = foo{2}
+
+	if res := ConvertToString(x); res != "map[{1}:{2}]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestMD5HexString(t *testing.T) {
+	res := MD5HexString("This is a test")
+	if res != "ce114e4501d2f4e2dcea3e17b546f339" {
+		t.Error("Unexpected md5 hex result", res)
+
+	}
+}
+
+func TestLengthConstantEquals(t *testing.T) {
+
+	if !LengthConstantEquals([]byte("test1"), []byte("test1")) {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if LengthConstantEquals([]byte("test1"), []byte("test2")) {
+		t.Error("Unexpected result")
+		return
+	}
+
+	if LengthConstantEquals([]byte("test1"), []byte("test2test123")) {
+		t.Error("Unexpected result")
+		return
+	}
+}
+
+func TestPrintGraphicStringTable(t *testing.T) {
+
+	if res := PrintGraphicStringTable(nil, 0, 5, nil); res != "" {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	if res := PrintGraphicStringTable([]string{}, 4, 5, SingleLineTable); res != `
+┌┐
+└┘
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	if res := PrintCSVTable([]string{}, 4); res != "" {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	test1 := []string{"foo", "bar", "tester", "1", "xxx", "test", "te", "foo",
+		"bar", "tester", "1"}
+
+	if res := PrintGraphicStringTable(test1, 4, 5, SingleLineTable); res != `
+┌────┬───────┬───────┬────┐
+│foo │bar    │tester │1   │
+│xxx │test   │te     │foo │
+│bar │tester │1      │    │
+└────┴───────┴───────┴────┘
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	if res := PrintCSVTable(test1, 4); res != `
+foo, bar, tester, 1
+xxx, test, te, foo
+bar, tester, 1
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	test1 = []string{"foo", "bar", "tester", "1", "xxx", "test", "te", "foo",
+		"bar"}
+
+	if res := PrintGraphicStringTable(test1, 4, 5, nil); res != `
+#########################
+#foo #bar  #tester #1   #
+#xxx #test #te     #foo #
+#bar #     #       #    #
+#########################
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	test1 = []string{"foo", "bar", "tester", "1", "xxx", "test", "te", "foo"}
+
+	if res := PrintGraphicStringTable(test1, 4, 5, nil); res != `
+#########################
+#foo #bar  #tester #1   #
+#xxx #test #te     #foo #
+#########################
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+	test1 = []string{"foo", "bar", "tester", "1", "xxx", "test", "te", "foo"}
+
+	if res := PrintGraphicStringTable(test1, 1, 2, SingleLineTable); res != `
+┌───────┐
+│foo    │
+│bar    │
+├───────┤
+│tester │
+│1      │
+│xxx    │
+│test   │
+│te     │
+│foo    │
+└───────┘
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	if res := PrintCSVTable(test1, 1); res != `
+foo
+bar
+tester
+1
+xxx
+test
+te
+foo
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	if res := PrintGraphicStringTable(test1, 100, 0, nil); res != `
+##########################################
+#foo #bar #tester #1 #xxx #test #te #foo #
+##########################################
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	test1 = []string{"foo", "bar", "tester", "1", "xxx", "test", "te", "foo"}
+
+	if res := PrintGraphicStringTable(test1, 4, 5, SingleLineTable); res != `
+┌────┬─────┬───────┬────┐
+│foo │bar  │tester │1   │
+│xxx │test │te     │foo │
+└────┴─────┴───────┴────┘
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+	test1 = []string{"foo", "bar", "tester", "1", "xxx", "test", "te", "foo"}
+
+	if res := PrintGraphicStringTable(test1, 1, 2, SingleDoubleLineTable); res != `
+╒═══════╕
+│foo    │
+│bar    │
+╞═══════╡
+│tester │
+│1      │
+│xxx    │
+│test   │
+│te     │
+│foo    │
+╘═══════╛
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	if res := PrintGraphicStringTable(test1, 1, 2, DoubleSingleLineTable); res != `
+╓───────╖
+║foo    ║
+║bar    ║
+╟───────╢
+║tester ║
+║1      ║
+║xxx    ║
+║test   ║
+║te     ║
+║foo    ║
+╙───────╜
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	if res := PrintGraphicStringTable(test1, 1, 2, DoubleLineTable); res != `
+╔═══════╗
+║foo    ║
+║bar    ║
+╠═══════╣
+║tester ║
+║1      ║
+║xxx    ║
+║test   ║
+║te     ║
+║foo    ║
+╚═══════╝
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+	if res := PrintGraphicStringTable(test1, 100, 0, SingleLineTable); res != `
+┌────┬────┬───────┬──┬────┬─────┬───┬────┐
+│foo │bar │tester │1 │xxx │test │te │foo │
+└────┴────┴───────┴──┴────┴─────┴───┴────┘
+`[1:] {
+		t.Error("Unexpected result:\n", "#\n"+res+"#")
+		return
+	}
+
+}

+ 163 - 0
stringutil/transform.go

@@ -0,0 +1,163 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package stringutil
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+	"math"
+	"regexp"
+	"strings"
+	"unicode"
+)
+
+var cSyleCommentsRegexp = regexp.MustCompile("(?s)//.*?\n|/\\*.*?\\*/")
+
+/*
+StripCStyleComments strips out C-Style comments from a given string.
+*/
+func StripCStyleComments(text []byte) []byte {
+	return cSyleCommentsRegexp.ReplaceAll(text, nil)
+}
+
+/*
+CreateDisplayString changes all "_" characters into spaces and properly capitalizes
+the resulting string.
+*/
+func CreateDisplayString(str string) string {
+	if len(str) == 0 {
+		return ""
+	}
+
+	return ProperTitle(strings.Replace(str, "_", " ", -1))
+}
+
+// The following words should not be capitalized
+//
+var notCapitalize = map[string]string{
+	"a":    "",
+	"an":   "",
+	"and":  "",
+	"at":   "",
+	"but":  "",
+	"by":   "",
+	"for":  "",
+	"from": "",
+	"in":   "",
+	"nor":  "",
+	"on":   "",
+	"of":   "",
+	"or":   "",
+	"the":  "",
+	"to":   "",
+	"with": "",
+}
+
+/*
+ProperTitle will properly capitalize a title string by capitalizing the first, last
+and any important words. Not capitalized are articles: a, an, the; coordinating
+conjunctions: and, but, or, for, nor; prepositions (fewer than five
+letters): on, at, to, from, by.
+*/
+func ProperTitle(input string) string {
+	words := strings.Fields(strings.ToLower(input))
+	size := len(words)
+
+	for index, word := range words {
+		if _, ok := notCapitalize[word]; !ok || index == 0 || index == size-1 {
+			words[index] = strings.Title(word)
+		}
+	}
+
+	return strings.Join(words, " ")
+}
+
+/*
+ToUnixNewlines converts all newlines in a given string to unix newlines.
+*/
+func ToUnixNewlines(s string) string {
+	s = strings.Replace(s, "\r\n", "\n", -1)
+	return strings.Replace(s, "\r", "\n", -1)
+}
+
+/*
+TrimBlankLines removes blank initial and trailing lines.
+*/
+func TrimBlankLines(s string) string {
+	return strings.Trim(s, "\r\n")
+}
+
+/*
+StripUniformIndentation removes uniform indentation from a string.
+*/
+func StripUniformIndentation(s string) string {
+	leadingWhitespace := func(line string) int {
+		var count int
+
+		// Count leading whitespaces in a string
+
+		for _, r := range line {
+			if unicode.IsSpace(r) || unicode.IsControl(r) {
+				count++
+			} else {
+				return count
+			}
+		}
+
+		return -1 // Special case line is full of whitespace
+	}
+
+	// Count the minimum number of leading whitespace excluding
+	// empty lines
+
+	minCount := math.MaxInt16
+	reader := strings.NewReader(s)
+	scanner := bufio.NewScanner(reader)
+
+	for scanner.Scan() {
+		if lw := leadingWhitespace(scanner.Text()); lw != -1 {
+			if lw < minCount {
+				minCount = lw
+			}
+		}
+	}
+
+	// Go through the string again and build up the output
+
+	var buf bytes.Buffer
+
+	reader.Seek(0, io.SeekStart)
+	scanner = bufio.NewScanner(reader)
+
+	for scanner.Scan() {
+		line := scanner.Text()
+
+		if strings.TrimSpace(line) != "" {
+			for i, r := range line {
+				if i >= minCount {
+					buf.WriteRune(r)
+				}
+			}
+		}
+
+		buf.WriteString("\n")
+	}
+
+	// Prepare output string
+
+	ret := buf.String()
+
+	if !strings.HasSuffix(s, "\n") {
+		ret = ret[:len(ret)-1]
+	}
+
+	return ret
+}

+ 103 - 0
stringutil/transform_test.go

@@ -0,0 +1,103 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package stringutil
+
+import (
+	"testing"
+)
+
+func TestStripCStyleComments(t *testing.T) {
+
+	test := `
+// Comment1
+This is a test
+/* A
+comment
+// Comment2
+  */ bla
+`
+
+	if out := string(StripCStyleComments([]byte(test))); out != `
+This is a test
+ bla
+` {
+		t.Error("Unexpected return:", out)
+		return
+	}
+}
+
+func TestCreateDisplayString(t *testing.T) {
+	testdata := []string{"this is a tEST", "_bla", "a_bla", "a__bla", "a__b_la", "",
+		"a fool a to be to"}
+	expected := []string{"This Is a Test", "Bla", "A Bla", "A Bla", "A B La", "",
+		"A Fool a to Be To"}
+
+	for i, str := range testdata {
+		res := CreateDisplayString(str)
+		if res != expected[i] {
+			t.Error("Unexpected result for creating a display string from:", str,
+				"result:", res, "expected:", expected[i])
+		}
+	}
+}
+
+func TestStripUniformIndentation(t *testing.T) {
+
+	testdata := []string{`
+
+    aaa
+  aaa
+      aaa
+
+`, `
+  bbb
+    
+    xx xx
+  bbb
+  bbb`, `
+  ccc
+ccc
+    ccc
+ `}
+
+	expected := []string{`
+
+  aaa
+aaa
+    aaa
+
+`, `
+bbb
+
+  xx xx
+bbb
+bbb`, `
+  ccc
+ccc
+    ccc
+`}
+
+	for i, str := range testdata {
+		res := StripUniformIndentation(str)
+		if res != expected[i] {
+			t.Error("Unexpected result:", str,
+				"result: '"+res+"' expected:", expected[i])
+			return
+		}
+	}
+}
+
+func TestNewLineTransform(t *testing.T) {
+	res := TrimBlankLines(ToUnixNewlines("\r\n  test123\r\ntest123\r\n"))
+	if res != "  test123\ntest123" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}

+ 246 - 0
termutil/autoterm.go

@@ -0,0 +1,246 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package termutil
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+
+	"devt.de/krotik/common/stringutil"
+	"devt.de/krotik/common/termutil/getch"
+)
+
+/*
+Dict is a dictionary object used by the AutoCompleteMixin
+*/
+type Dict interface {
+
+	/*
+	   Suggest returns dictionary suggestions based on a given prefix. Returns if there
+	   is a direct match and a list of suggestions.
+	*/
+	Suggest(prefix string) ([]string, error)
+}
+
+/*
+autocompleteLineTerminalMixin adds auto-complete functionality to a given ConsoleLineTerminals
+*/
+type autocompleteLineTerminalMixin struct {
+	ConsoleLineTerminal      // Terminal which is being extended
+	dict                Dict // Dictionary to use for suggestions
+	tabCount            int  // Counter for tab presses
+}
+
+/*
+AddAutoCompleteMixin adds auto-complete support for a given ConsoleLineTerminal.
+The auto-complete function operates on a given Dict object which suggests either
+a direct match or a list of matches. A single tab auto-completes if there is a
+direct match. Two tabs and the console outputs all suggestions.
+*/
+func AddAutoCompleteMixin(term ConsoleLineTerminal, dict Dict) (ConsoleLineTerminal, error) {
+
+	autoterm := &autocompleteLineTerminalMixin{term, dict, 0}
+
+	// Add key handler
+
+	autoterm.AddKeyHandler(autoterm.handleKeyInput)
+
+	return autoterm, nil
+}
+
+/*
+handleKeyInput handles the key input for the history mixin.
+*/
+func (at *autocompleteLineTerminalMixin) handleKeyInput(e *getch.KeyEvent, buf []rune) (bool, []rune, error) {
+	var err error
+	var ret []rune
+
+	if e.Code == getch.KeyTab {
+		var suggestions []string
+
+		at.tabCount++
+
+		currentLine := stringutil.RuneSliceToString(buf)
+		words := strings.Split(currentLine, " ")
+		prefix := strings.Join(words[:len(words)-1], " ")
+		lastWord := words[len(words)-1]
+
+		if suggestions, err = at.dict.Suggest(currentLine); err == nil {
+			num := len(suggestions)
+
+			if num == 1 {
+				var newline string
+
+				if suggestions[0] == lastWord {
+
+					// Nothing more to auto-complete insert a space for next level suggestions
+
+					newline = fmt.Sprintf("%v ", currentLine)
+
+				} else {
+
+					// If there is only one suggestion we can use it
+
+					if prefix != "" {
+						newline = fmt.Sprintf("%v ", prefix)
+					}
+
+					newline = fmt.Sprintf("%v%v ", newline, suggestions[0])
+				}
+
+				ret = stringutil.StringToRuneSlice(newline)
+
+			} else if len(suggestions) > 1 {
+
+				cp := stringutil.LongestCommonPrefix(suggestions)
+
+				if len(cp) > len(lastWord) {
+					var newline string
+
+					if prefix != "" {
+						newline = fmt.Sprintf("%v ", prefix)
+					}
+
+					ret = stringutil.StringToRuneSlice(fmt.Sprintf("%v%v", newline, cp))
+				}
+
+				if at.tabCount > 1 || ret == nil {
+
+					// There are multiple suggestions and tab was pressed more than once
+
+					at.WriteString(fmt.Sprintln())
+					at.WriteString(stringutil.PrintStringTable(suggestions, 4))
+
+					if at.tabCount == 2 {
+
+						// Check if at least on suggestion is the full string
+
+						for _, s := range suggestions {
+							if s == lastWord {
+								ret = stringutil.StringToRuneSlice(currentLine + " ")
+								break
+							}
+						}
+					}
+				}
+			}
+		}
+
+		if ret != nil {
+			at.tabCount = 0
+		}
+	}
+
+	return ret != nil, ret, err
+}
+
+// Dictionaries
+// ============
+
+/*
+MultiWordDict models a dictionary which can present suggestions based on multiple
+words. Only suggestions for the last word are returned. However, these suggestions
+may depend on the preceding words.
+*/
+type MultiWordDict struct {
+	chooser DictChooser
+	dicts   map[string]Dict
+}
+
+/*
+DictChooser chooses a WordListDict based on given prefix words. The function
+also gets a presisted map of WordListDicts which can be used as a cache.
+*/
+type DictChooser func([]string, map[string]Dict) (Dict, error)
+
+/*
+NewMultiWordDict returns a new MultiWordDict. The client code needs to specify a
+function to retrieve WordListDicts for given prefix words and can optionally
+supply an initial map of WordListDicts.
+*/
+func NewMultiWordDict(chooser DictChooser, dicts map[string]Dict) *MultiWordDict {
+	if dicts == nil {
+		dicts = make(map[string]Dict)
+	}
+	return &MultiWordDict{chooser, dicts}
+}
+
+/*
+Suggest returns dictionary suggestions based on a given prefix. Returns if there
+is a direct match and a list of suggestions.
+*/
+func (md *MultiWordDict) Suggest(prefix string) ([]string, error) {
+
+	// Split prefix into words
+
+	prefixWords := strings.Split(prefix, " ")
+
+	dict, err := md.chooser(prefixWords, md.dicts)
+
+	if err == nil && dict != nil {
+		return dict.Suggest(prefixWords[len(prefixWords)-1])
+	}
+
+	return nil, err
+}
+
+/*
+WordListDict is a simple dictionary which looks up suggstions based on an
+internal word list
+*/
+type WordListDict struct {
+	words []string
+}
+
+/*
+NewWordListDict returns a new WordListDict from a given list of words. The list
+of words will be sorted.
+*/
+func NewWordListDict(words []string) *WordListDict {
+	sort.Strings(words)
+	return &WordListDict{words}
+}
+
+/*
+Suggest returns dictionary suggestions based on a given prefix. Returns if there
+is a direct match and a list of suggestions.
+*/
+func (wd *WordListDict) Suggest(prefix string) ([]string, error) {
+	var suggestions []string
+
+	// Do a binary search on the word list
+
+	index := sort.SearchStrings(wd.words, prefix)
+
+	if index < len(wd.words) {
+
+		// Check the found word
+
+		foundWord := wd.words[index]
+
+		if strings.HasPrefix(foundWord, prefix) {
+
+			// Build up suggestions
+
+			suggestions = append(suggestions, foundWord)
+
+			// Look for further matching words
+
+			for i := index + 1; i < len(wd.words); i++ {
+				if nextWord := wd.words[i]; strings.HasPrefix(nextWord, prefix) {
+					suggestions = append(suggestions, nextWord)
+				}
+			}
+		}
+	}
+
+	return suggestions, nil
+}

+ 236 - 0
termutil/autoterm_test.go

@@ -0,0 +1,236 @@
+/*
+ * Public Domain Software
+ *
+ * I (Matthias Ladkau) am the author of the source code in this file.
+ * I have placed the source code in this file in the public domain.
+ *
+ * For further information see: http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+package termutil
+
+import (
+	"bytes"
+	"fmt"
+	"testing"
+
+	"devt.de/krotik/common/termutil/getch"
+)
+
+func TestAutoCompleteConsoleLineTerminal(t *testing.T) {
+	var out bytes.Buffer
+
+	// Setup mock getch
+
+	getchStart = func() error { return nil }
+
+	var getchbuffer []*getch.KeyEvent
+	addTestKeyEvent := func(kc getch.KeyCode, r rune) {
+		getchbuffer = append(getchbuffer, &getch.KeyEvent{
+			Code: kc,
+			Rune: r,
+		})
+	}
+
+	getchGetch = func() (*getch.KeyEvent, error) {
+		e := getchbuffer[0]
+		getchbuffer = getchbuffer[1:]
+		return e, nil
+	}
+
+	ct, err := NewConsoleLineTerminal(&out)
+
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	rootDict := NewWordListDict([]string{"ll", "dir", "get", "put", "test", "test1", "test2"})
+
+	chooser := func(lineWords []string, dictCache map[string]Dict) (Dict, error) {
+
+		if len(lineWords) == 1 {
+			return rootDict, nil
+		}
+
+		return NewWordListDict([]string{fmt.Sprintf("file4-%v", len(lineWords)), "file2", "file3", "file1", "test"}), nil
+	}
+
+	dict := NewMultiWordDict(chooser, nil)
+
+	// Wrap the console terminal in a Auto Complete Mixin
+
+	ct, err = AddAutoCompleteMixin(ct, dict)
+
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	// Test normal auto complete
+
+	addTestKeyEvent(getch.KeyT, 'd')
+	addTestKeyEvent(getch.KeyTab, 0x00)
+	addTestKeyEvent(getch.KeyEnter, 0x00)
+
+	if line, err := ct.NextLine(); err != nil || line != "dir " {
+		t.Error("Unexpected result:", "#"+line+"#", err)
+		return
+	}
+
+	addTestKeyEvent(getch.KeyT, 'd')
+	addTestKeyEvent(getch.KeyTab, 0x00)
+	addTestKeyEvent(getch.KeyT, 't')
+	addTestKeyEvent(getch.KeyTab, 0x00)
+	addTestKeyEvent(getch.KeyEnter, 0x00)
+
+	if line, err := ct.NextLine(); err != nil || line != "dir test " {
+		t.Error("Unexpected result:", "#"+line+"#", err)
+		return
+	}
+
+	// Test auto complete with multiple suggestion and picking one by pressing tab
+
+	addTestKeyEvent(getch.KeyT, 't')
+	addTestKeyEvent(getch.KeyE, 'e')
+	addTestKeyEvent(getch.KeyTab, 0x00) // Auto complete to test
+	addTestKeyEvent(getch.KeyTab, 0x00) // See suggestions ("test", "test1", "test2"s)
+	addTestKeyEvent(getch.KeyTab, 0x00) // Produce final space - "test" was accepted
+	addTestKeyEvent(getch.KeyEnter, 0x00)
+
+	if line, err := ct.NextLine(); err != nil || line != "test " {
+		t.Error("Unexpected result:", "#"+line+"#", err)
+		return
+	}
+
+	// Check second level suggestion
+
+	addTestKeyEvent(getch.KeyT, 't')
+	addTestKeyEvent(getch.KeyE, 'e')
+	addTestKeyEvent(getch.KeyTab, 0x00)
+	addTestKeyEvent(getch.KeyE, ' ')
+	addTestKeyEvent(getch.KeyT, 'f')
+	addTestKeyEvent(getch.KeyTab, 0x00) // No effect since there is no "file"
+	addTestKeyEvent(getch.KeyTab, 0x00)
+	addTestKeyEvent(getch.KeyTab, 0x00)
+	addTestKeyEvent(getch.KeyT, '1')
+	addTestKeyEvent(getch.KeyTab, 0x00)
+	addTestKeyEvent(getch.KeyEnter, 0x00)
+
+	if line, err := ct.NextLine(); err != nil || line != "test file1 " {
+		t.Error("Unexpected result:", "#"+line+"#", err)
+		return
+	}
+
+}
+
+func TestWordListDict(t *testing.T) {
+
+	wlist := []string{"bar", "foo", "test", "test1", "test2", "test3", "zanas"}
+	wld := NewWordListDict(wlist)
+
+	if res, _ := wld.Suggest("zanas"); fmt.Sprint(res) != "[zanas]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res, _ := wld.Suggest("zan"); fmt.Sprint(res) != "[zanas]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res, _ := wld.Suggest("zap"); fmt.Sprint(res) != "[]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res, _ := wld.Suggest("t"); fmt.Sprint(res) != "[test test1 test2 test3]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res, _ := wld.Suggest("test"); fmt.Sprint(res) != "[test test1 test2 test3]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if res, _ := wld.Suggest("b"); fmt.Sprint(res) != "[bar]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// Special case of empty dictionary
+
+	wld = NewWordListDict([]string{})
+
+	if res, _ := wld.Suggest("b"); fmt.Sprint(res) != "[]" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+}
+
+func TestMultiWordDict(t *testing.T) {
+
+	rootDict := NewWordListDict([]string{"bar", "foo"})
+
+	md := NewMultiWordDict(func(p []string, c map[string]Dict) (Dict, error) {
+		var dict Dict
+		var ok bool
+
+		if p[0] == "" {
+			return nil, nil
+		}
+
+		if p[0] == "foo" {
+			return nil, fmt.Errorf("Testerror")
+		}
+
+		if dict, ok = c[p[0]]; !ok {
+			dict = rootDict
+		}
+
+		return dict, nil
+	}, nil)
+
+	md.dicts["bar"] = NewWordListDict([]string{"bar", "foo", "test", "test1", "test2", "test3", "zanas"})
+
+	if res, err := md.Suggest(""); err != nil || fmt.Sprint(res) != "[]" {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+
+	if res, err := md.Suggest("f"); err != nil || fmt.Sprint(res) != "[foo]" {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+
+	if res, err := md.Suggest("foo"); err == nil || err.Error() != "Testerror" {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+
+	if res, err := md.Suggest("b"); err != nil || fmt.Sprint(res) != "[bar]" {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+
+	if res, err := md.Suggest("bar"); err != nil || fmt.Sprint(res) != "[bar]" {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+
+	if res, err := md.Suggest("bar "); err != nil || fmt.Sprint(res) != "[bar foo test test1 test2 test3 zanas]" {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+
+	if res, err := md.Suggest("bar b"); err != nil || fmt.Sprint(res) != "[bar]" {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+
+	if res, err := md.Suggest("bar test"); err != nil || fmt.Sprint(res) != "[test test1 test2 test3]" {
+		t.Error("Unexpected result:", res, err)
+		return
+	}
+}

+ 0 - 0
termutil/fileterm.go


Some files were not shown because too many files changed in this diff