Browse Source

Merge branch 'fix/add-cluster-example' of krotik/eliasdb into master

Matthias Ladkau 4 years ago
parent
commit
72cc278bbf
55 changed files with 1496 additions and 64 deletions
  1. 14 11
      .gitignore
  2. 9 3
      README.md
  3. 144 0
      api/v1/cluster_test.go
  4. 46 0
      cluster.md
  5. 4 4
      cluster/distributedstoragemanager.go
  6. 14 5
      cluster/memberaddresstable.go
  7. 4 4
      cluster/memberaddresstable_test.go
  8. 25 24
      cluster/memberstorage.go
  9. 1 1
      cluster/rebalance.go
  10. 5 5
      cluster/transfer.go
  11. 32 0
      cluster/util.go
  12. 26 0
      cluster/util_test.go
  13. BIN
      cluster_join.png
  14. BIN
      cluster_term.png
  15. BIN
      cluster_working.png
  16. 2 2
      eliasdb_design.md
  17. 32 0
      examples/data-mining/build.sh
  18. 66 0
      examples/data-mining/doc/data-mining.md
  19. BIN
      examples/data-mining/doc/data-mining_chart.png
  20. BIN
      examples/data-mining/doc/eliasdb_graphiql.png
  21. BIN
      examples/data-mining/doc/eliasdb_term.png
  22. 46 0
      examples/data-mining/docker-compose.yml
  23. 13 0
      examples/data-mining/docker-images/collector/Dockerfile
  24. 71 0
      examples/data-mining/docker-images/collector/app/main.py
  25. 8 0
      examples/data-mining/docker-images/collector/build.sh
  26. 11 0
      examples/data-mining/docker-images/collector/etc/supervisord.conf
  27. 14 0
      examples/data-mining/docker-images/eliasdb/Dockerfile
  28. 13 0
      examples/data-mining/docker-images/eliasdb/build.sh
  29. 6 0
      examples/data-mining/docker-images/eliasdb/cluster.config.json.1
  30. 6 0
      examples/data-mining/docker-images/eliasdb/cluster.config.json.2
  31. 6 0
      examples/data-mining/docker-images/eliasdb/cluster.config.json.3
  32. BIN
      examples/data-mining/docker-images/eliasdb/cluster.stateinfo.1
  33. BIN
      examples/data-mining/docker-images/eliasdb/cluster.stateinfo.2
  34. BIN
      examples/data-mining/docker-images/eliasdb/cluster.stateinfo.3
  35. 31 0
      examples/data-mining/docker-images/eliasdb/docker-compose.yml
  36. 25 0
      examples/data-mining/docker-images/eliasdb/eliasdb.config.json
  37. 5 0
      examples/data-mining/docker-images/frontend/Dockerfile
  38. 11 0
      examples/data-mining/docker-images/frontend/app/dist/frontend.js
  39. 15 0
      examples/data-mining/docker-images/frontend/app/index.html
  40. 33 0
      examples/data-mining/docker-images/frontend/app/package.json
  41. BIN
      examples/data-mining/docker-images/frontend/app/pig.gif
  42. 28 0
      examples/data-mining/docker-images/frontend/app/src/component/LineChart.vue
  43. 102 0
      examples/data-mining/docker-images/frontend/app/src/component/LineResult.vue
  44. 21 0
      examples/data-mining/docker-images/frontend/app/src/index.ts
  45. 231 0
      examples/data-mining/docker-images/frontend/app/src/lib/eliasdb-graphql.ts
  46. 4 0
      examples/data-mining/docker-images/frontend/app/src/vue-shims.d.ts
  47. 66 0
      examples/data-mining/docker-images/frontend/app/tsconfig.json
  48. 104 0
      examples/data-mining/docker-images/frontend/app/webpack.config.js
  49. 15 0
      examples/data-mining/docker-images/frontend/build.sh
  50. 25 0
      examples/data-mining/docker-images/frontend/etc/default.conf
  51. 154 0
      graph/graphmanager_cluster_test.go
  52. 4 1
      graph/graphmanager_test.go
  53. 0 0
      graph/import_export.go
  54. 0 0
      graph/import_export_test.go
  55. 4 4
      server/pages.go

+ 14 - 11
.gitignore

@@ -1,12 +1,15 @@
-.cache
-.cover
-.eliasdb_console_history
-coverage.txt
-coverage.out
-coverage.html
-test
+/.cache
+/.cover
+/.eliasdb_console_history
+/coverage.txt
+/coverage.out
+/coverage.html
+/test
 /dist
-build
-eliasdb
-examples/tutorial/run/
-examples/chat/run/
+/build
+/eliasdb
+/examples/tutorial/run/
+/examples/chat/run/
+/examples/data-mining/docker-images/eliasdb/eliasdb
+/examples/data-mining/docker-images/frontend/app/node_modules
+/examples/data-mining/docker-images/frontend/app/graphiql

+ 9 - 3
README.md

@@ -58,14 +58,18 @@ docker run --rm --network="host" -it -v $PWD:/data --user $(id -u):$(id -g) -v $
 
 ### Tutorial:
 
-To get an idea of what EliasDB is about have a look at the [tutorial](https://devt.de/krotik/eliasdb/src/master/examples/tutorial/doc/tutorial.md). This tutorial will cover the basics of EQL and show how data is organized.
+To get an idea of what EliasDB is about have a look at the [tutorial](examples/tutorial/doc/tutorial.md). This tutorial will cover the basics of EQL and show how data is organized.
 
-There is a separate [tutorial](https://devt.de/krotik/eliasdb/src/master/examples/tutorial/doc/tutorial_graphql.md) on using ELiasDB with GraphQL.
+There is a separate [tutorial](examples/tutorial/doc/tutorial_graphql.md) on using ELiasDB with GraphQL.
 
 ### REST API:
 
 The terminal uses a REST API to communicate with the backend. The REST API can be browsed using a dynamically generated swagger.json definition (https://localhost:9090/db/swagger.json). You can browse the API of EliasDB's latest version [here](http://petstore.swagger.io/?url=https://devt.de/krotik/eliasdb/raw/master/swagger.json).
 
+### Clustering:
+
+EliasDB supports to be run in a cluster by joining multiple instances of EliasDB together. You can read more about it [here](cluster.md).
+
 ### Command line options
 The main EliasDB executable has two main tools:
 ```
@@ -212,13 +216,15 @@ docker build --tag krotik/eliasdb .
 
 Example Applications
 --------------------
-[Chat](https://devt.de/krotik/eliasdb/src/master/examples/chat/doc/chat.md) - A simple chat application showing user management and subscriptions.
+- [Chat](examples/chat/doc/chat.md) - A simple chat application showing user /management and subscriptions.
+- [Data-mining](examples/data-mining/doc/data-mining.md) - A more complex application which uses the cluster feature of EliasDB and GraphQL for data queries.
 
 
 Further Reading
 ---------------
 - A design document which describes the different components of the graph database. [Link](https://devt.de/krotik/eliasdb/src/master/eliasdb_design.md)
 - A reference for the EliasDB query language EQL. [Link](https://devt.de/krotik/eliasdb/src/master/eql.md)
+- A reference for the EliasDB's support for GraphQL. [Link](https://devt.de/krotik/eliasdb/src/master/graphql.md)
 - A quick overview of what you can do when you embed EliasDB in your own Go project. [Link](https://devt.de/krotik/eliasdb/src/master/embedding.md)
 
 License

+ 144 - 0
api/v1/cluster_test.go

@@ -424,6 +424,150 @@ func TestClusterQuery(t *testing.T) {
 	}
 }
 
+func TestClusterQueryBigCluster(t *testing.T) {
+	queryURL := "http://localhost" + TESTPORT + EndpointClusterQuery
+
+	// Create a big cluster
+
+	cluster3 := createCluster(3)
+
+	for _, dd := range cluster3 {
+		dd.Start()
+		defer dd.Close()
+	}
+
+	oldGM := api.GM
+	oldGS := api.GS
+	api.GS = cluster3[0]
+	api.GM = graph.NewGraphManager(cluster3[0])
+	api.DD = cluster3[0]
+	api.DDLog = datautil.NewRingBuffer(10)
+
+	defer func() {
+		api.GM = oldGM
+		api.GS = oldGS
+		api.DD = nil
+		api.DDLog = nil
+	}()
+
+	// We should now get back a state
+
+	st, _, res := sendTestRequest(queryURL, "GET", nil)
+
+	if st != "200 OK" || res != `
+{
+  "failed": null,
+  "members": [
+    "TestClusterMember-0",
+    "localhost:9020"
+  ],
+  "replication": 1,
+  "ts": [
+    "TestClusterMember-0",
+    "1"
+  ],
+  "tsold": [
+    "",
+    "0"
+  ]
+}`[1:] {
+		t.Error("Unexpected response:", st, res)
+		return
+	}
+
+	// Join more cluster members
+
+	api.DD = cluster3[1]
+	api.DDLog = datautil.NewRingBuffer(10)
+
+	jsonString, err := json.Marshal(map[string]interface{}{
+		"name":    cluster3[0].MemberManager.Name(),
+		"netaddr": cluster3[0].MemberManager.NetAddr(),
+	})
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	st, _, res = sendTestRequest(queryURL+"join", "PUT", jsonString)
+
+	if st != "200 OK" || res != "" {
+		t.Error("Unexpected response:", st, res)
+		return
+	}
+
+	st, _, res = sendTestRequest(queryURL, "GET", nil)
+
+	if st != "200 OK" || res != `
+{
+  "failed": null,
+  "members": [
+    "TestClusterMember-1",
+    "localhost:9021",
+    "TestClusterMember-0",
+    "localhost:9020"
+  ],
+  "replication": 1,
+  "ts": [
+    "TestClusterMember-0",
+    "2"
+  ],
+  "tsold": [
+    "TestClusterMember-0",
+    "1"
+  ]
+}`[1:] {
+		t.Error("Unexpected response:", st, res)
+		return
+	}
+
+	api.DD = cluster3[2]
+	api.DDLog = datautil.NewRingBuffer(10)
+
+	jsonString, err = json.Marshal(map[string]interface{}{
+		"name":    cluster3[0].MemberManager.Name(),
+		"netaddr": cluster3[0].MemberManager.NetAddr(),
+	})
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	st, _, res = sendTestRequest(queryURL+"join", "PUT", jsonString)
+
+	if st != "200 OK" || res != "" {
+		t.Error("Unexpected response:", st, res)
+		return
+	}
+
+	st, _, res = sendTestRequest(queryURL, "GET", nil)
+
+	if st != "200 OK" || res != `
+{
+  "failed": null,
+  "members": [
+    "TestClusterMember-2",
+    "localhost:9022",
+    "TestClusterMember-0",
+    "localhost:9020",
+    "TestClusterMember-1",
+    "localhost:9021"
+  ],
+  "replication": 1,
+  "ts": [
+    "TestClusterMember-0",
+    "3"
+  ],
+  "tsold": [
+    "TestClusterMember-0",
+    "2"
+  ]
+}`[1:] {
+		t.Error("Unexpected response:", st, res)
+		return
+	}
+}
+
 /*
 Create a cluster with n members (all storage is in memory)
 */

File diff suppressed because it is too large
+ 46 - 0
cluster.md


+ 4 - 4
cluster/distributedstoragemanager.go

@@ -80,7 +80,7 @@ func (dsm *DistributedStorageManager) Root(root int) uint64 {
 	dsm.rootError = err
 
 	if res != nil {
-		ret = res.(uint64)
+		ret = toUInt64(res)
 	}
 
 	return ret
@@ -192,7 +192,7 @@ func (dsm *DistributedStorageManager) insertOrUpdate(insert bool, loc uint64, o
 	cloc, err := dsm.ds.sendDataRequest(member, request)
 
 	if err == nil {
-		return cloc.(uint64), err
+		return toUInt64(cloc), err
 
 	}
 
@@ -210,7 +210,7 @@ func (dsm *DistributedStorageManager) insertOrUpdate(insert bool, loc uint64, o
 
 			cloc, nerr := dsm.ds.sendDataRequest(member, request)
 			if nerr == nil {
-				ret = cloc.(uint64)
+				ret = toUInt64(cloc)
 				err = nil
 				break
 			}
@@ -225,7 +225,7 @@ func (dsm *DistributedStorageManager) insertOrUpdate(insert bool, loc uint64, o
 		for _, member := range replicatingMembers {
 			cloc, nerr := dsm.ds.sendDataRequest(member, request)
 			if nerr == nil {
-				ret = cloc.(uint64)
+				ret = toUInt64(cloc)
 				err = nil
 				break
 			}

+ 14 - 5
cluster/memberaddresstable.go

@@ -11,6 +11,7 @@
 package cluster
 
 import (
+	"encoding/gob"
 	"errors"
 	"fmt"
 	"sync"
@@ -22,6 +23,14 @@ import (
 	"devt.de/krotik/eliasdb/storage"
 )
 
+func init() {
+
+	// Make sure we can use the relevant types in a gob operation
+
+	gob.Register(&translationRec{})
+	gob.Register(&transferRec{})
+}
+
 /*
 rootIDTranslationTree is the root id for the translation map
 */
@@ -47,16 +56,16 @@ translationRec is a translation record which stores a local storage location wit
 version number.
 */
 type translationRec struct {
-	loc uint64 // Local storage location
-	ver uint64 // Version of the local stored data
+	Loc uint64 // Local storage location
+	Ver uint64 // Version of the local stored data
 }
 
 /*
 transferRec is a transfer record which stores a data transfer request.
 */
 type transferRec struct {
-	members []string     // Target members
-	request *DataRequest // Data request
+	Members []string     // Target members
+	Request *DataRequest // Data request
 }
 
 /*
@@ -337,7 +346,7 @@ func (mat *memberAddressTable) newlocCounter(dsname string) (uint64, bool, error
 		return 1, false, err
 	}
 
-	ret := v.(uint64)
+	ret := toUInt64(v)
 
 	// Store counter in the cache
 

+ 4 - 4
cluster/memberaddresstable_test.go

@@ -144,7 +144,7 @@ func TestAddressTableClusterLoc(t *testing.T) {
 
 	// Now check the translation lookup
 
-	if tr, ok, err := ms1[0].at.TransClusterLoc("test1", 50); tr.loc != 123 || tr.ver != 1 || !ok || err != nil {
+	if tr, ok, err := ms1[0].at.TransClusterLoc("test1", 50); tr.Loc != 123 || tr.Ver != 1 || !ok || err != nil {
 		t.Error("Unexpected translation:", tr, ok, err)
 		return
 	}
@@ -154,17 +154,17 @@ func TestAddressTableClusterLoc(t *testing.T) {
 		return
 	}
 
-	if tr, ok, err := ms1[0].at.SetTransClusterLoc("test1", 50, 555, 2); tr.loc != 123 || tr.ver != 1 || !ok || err != nil {
+	if tr, ok, err := ms1[0].at.SetTransClusterLoc("test1", 50, 555, 2); tr.Loc != 123 || tr.Ver != 1 || !ok || err != nil {
 		t.Error("Unexpected translation:", tr, ok, err)
 		return
 	}
 
-	if tr, ok, err := ms1[0].at.TransClusterLoc("test1", 50); tr.loc != 555 || tr.ver != 2 || !ok || err != nil {
+	if tr, ok, err := ms1[0].at.TransClusterLoc("test1", 50); tr.Loc != 555 || tr.Ver != 2 || !ok || err != nil {
 		t.Error("Unexpected translation:", tr, ok, err)
 		return
 	}
 
-	if tr, ok, err := ms1[0].at.RemoveTransClusterLoc("test1", 50); tr.loc != 555 || tr.ver != 2 || !ok || err != nil {
+	if tr, ok, err := ms1[0].at.RemoveTransClusterLoc("test1", 50); tr.Loc != 555 || tr.Ver != 2 || !ok || err != nil {
 		t.Error("Unexpected translation:", tr, ok, err)
 		return
 	}

+ 25 - 24
cluster/memberstorage.go

@@ -189,7 +189,7 @@ func (ms *memberStorage) handleSetRootRequest(distTable *DistributionTable, requ
 
 	sm := ms.dataStorage(dsname, true)
 
-	sm.SetRoot(root, request.Value.(uint64))
+	sm.SetRoot(root, toUInt64(request.Value))
 
 	if !request.Transfer {
 		ms.at.AddTransferRequest(distTable.OtherReplicationMembers(0, ms.ds.MemberManager.Name()),
@@ -227,14 +227,15 @@ func (ms *memberStorage) handleInsertRequest(distTable *DistributionTable, reque
 
 		// If this is a transfer request we know already the cluster location
 
-		cloc = request.Args[RPLoc].(uint64)
+		cloc = toUInt64(request.Args[RPLoc])
 	}
 
 	if err == nil {
+		var loc uint64
 
 		// Insert into the local storage
 
-		loc, err := sm.Insert(request.Value)
+		loc, err = sm.Insert(request.Value)
 
 		if err == nil {
 
@@ -285,7 +286,7 @@ func (ms *memberStorage) handleUpdateRequest(distTable *DistributionTable, reque
 	var newVersion uint64
 
 	dsname := request.Args[RPStoreName].(string)
-	cloc := request.Args[RPLoc].(uint64)
+	cloc := toUInt64(request.Args[RPLoc])
 	*response = 0
 
 	// Get the translation
@@ -301,14 +302,14 @@ func (ms *memberStorage) handleUpdateRequest(distTable *DistributionTable, reque
 			// Update the local storage
 
 			if !request.Transfer {
-				err = sm.Update(transRec.loc, request.Value)
-				newVersion = transRec.ver + 1
+				err = sm.Update(transRec.Loc, request.Value)
+				newVersion = transRec.Ver + 1
 
 			} else {
-				newVersion = request.Args[RPVer].(uint64)
+				newVersion = toUInt64(request.Args[RPVer])
 
-				if newVersion >= transRec.ver {
-					err = sm.Update(transRec.loc, request.Value)
+				if newVersion >= transRec.Ver {
+					err = sm.Update(transRec.Loc, request.Value)
 
 				} else {
 
@@ -330,7 +331,7 @@ func (ms *memberStorage) handleUpdateRequest(distTable *DistributionTable, reque
 
 				// Increase the version of the translation record
 
-				_, _, err = ms.at.SetTransClusterLoc(dsname, cloc, transRec.loc, newVersion)
+				_, _, err = ms.at.SetTransClusterLoc(dsname, cloc, transRec.Loc, newVersion)
 
 				if err == nil {
 
@@ -380,7 +381,7 @@ func (ms *memberStorage) handleFreeRequest(distTable *DistributionTable, request
 	var err error
 
 	dsname := request.Args[RPStoreName].(string)
-	cloc := request.Args[RPLoc].(uint64)
+	cloc := toUInt64(request.Args[RPLoc])
 
 	// Get the translation
 
@@ -400,7 +401,7 @@ func (ms *memberStorage) handleFreeRequest(distTable *DistributionTable, request
 
 				// Remove from the local storage
 
-				err = sm.Free(transRec.loc)
+				err = sm.Free(transRec.Loc)
 
 				if !request.Transfer {
 
@@ -439,7 +440,7 @@ func (ms *memberStorage) handleFetchRequest(distTable *DistributionTable,
 	var err error
 
 	dsname := request.Args[RPStoreName].(string)
-	cloc := request.Args[RPLoc].(uint64)
+	cloc := toUInt64(request.Args[RPLoc])
 
 	// Get the translation
 
@@ -461,7 +462,7 @@ func (ms *memberStorage) handleFetchRequest(distTable *DistributionTable,
 		if sm != nil {
 			var res []byte
 
-			err = sm.Fetch(transRec.loc, &res)
+			err = sm.Fetch(transRec.Loc, &res)
 
 			if err == nil {
 
@@ -546,7 +547,7 @@ func (ms *memberStorage) handleRebalanceRequest(distTable *DistributionTable, re
 
 			// Check if the version is newer and update the local record if it is
 
-			if tr.ver < ver {
+			if tr.Ver < ver {
 
 				// Local record exists and needs to be updated
 
@@ -563,11 +564,11 @@ func (ms *memberStorage) handleRebalanceRequest(distTable *DistributionTable, re
 
 					// Update the local storage
 
-					if err = sm.Update(tr.loc, res); err == nil {
+					if err = sm.Update(tr.Loc, res); err == nil {
 
 						// Update the translation
 
-						_, _, err = ms.at.SetTransClusterLoc(smname, cloc, tr.loc, ver)
+						_, _, err = ms.at.SetTransClusterLoc(smname, cloc, tr.Loc, ver)
 
 						manager.LogDebug(ms.ds.MemberManager.Name(),
 							fmt.Sprintf("(Store): Rebalance updated %v location: %v", smname, cloc))
@@ -621,7 +622,7 @@ func (ms *memberStorage) handleRebalanceRequest(distTable *DistributionTable, re
 
 				manager.LogDebug(ms.ds.MemberManager.Name(),
 					fmt.Sprintf("(Store): Rebalance removes %v location: %v from member %v",
-						smname, tr.loc, rsource))
+						smname, tr.Loc, rsource))
 
 				_, err = ms.ds.sendDataRequest(rsource, &DataRequest{RTFree, map[DataRequestArg]interface{}{
 					RPStoreName: smname,
@@ -662,15 +663,15 @@ func (ms *memberStorage) dump(smname string) string {
 			if val != nil {
 				tr := val.(*transferRec)
 
-				args, _ := json.Marshal(tr.request.Args)
+				args, _ := json.Marshal(tr.Request.Args)
 
-				vals, ok := tr.request.Value.([]byte)
+				vals, ok := tr.Request.Value.([]byte)
 				if !ok {
-					vals, _ = json.Marshal(tr.request.Value)
+					vals, _ = json.Marshal(tr.Request.Value)
 				}
 
 				buf.WriteString(fmt.Sprintf("transfer: %v - %v %v %q\n",
-					tr.members, tr.request.RequestType, string(args), vals))
+					tr.Members, tr.Request.RequestType, string(args), vals))
 			}
 		}
 	}
@@ -724,8 +725,8 @@ func (ms *memberStorage) dump(smname string) string {
 				if strings.HasPrefix(key, fmt.Sprint(transPrefix, smname, "#")) {
 					key = string(key[len(fmt.Sprint(transPrefix, smname, "#")):])
 
-					locmap[v.(*translationRec).loc] = fmt.Sprintf("%v (v:%v)",
-						key, v.(*translationRec).ver)
+					locmap[v.(*translationRec).Loc] = fmt.Sprintf("%v (v:%v)",
+						key, v.(*translationRec).Ver)
 				}
 			}
 

+ 1 - 1
cluster/rebalance.go

@@ -108,7 +108,7 @@ func (ms *memberStorage) rebalanceWorker(forceRun bool) {
 
 				maintMgmts = append(maintMgmts, smname)
 				maintLocs = append(maintLocs, cloc)
-				maintVers = append(maintVers, tr.ver)
+				maintVers = append(maintVers, tr.Ver)
 			}
 		}
 

+ 5 - 5
cluster/transfer.go

@@ -73,15 +73,15 @@ func (ms *memberStorage) transferWorker() {
 
 			manager.LogDebug(ms.ds.Name(), "(TR): ",
 				fmt.Sprintf("Processing transfer request %v for %v from %v",
-					tr.request.RequestType, tr.members, ts))
+					tr.Request.RequestType, tr.Members, ts))
 
 			// Send the request to all members
 
 			var failedMembers []string
 
-			for _, member := range tr.members {
+			for _, member := range tr.Members {
 
-				if _, err := ms.ds.sendDataRequest(member, tr.request); err != nil {
+				if _, err := ms.ds.sendDataRequest(member, tr.Request); err != nil {
 					manager.LogDebug(ms.ds.Name(), "(TR): ",
 						fmt.Sprintf("Member %v Error: %v", member, err))
 
@@ -93,8 +93,8 @@ func (ms *memberStorage) transferWorker() {
 
 			if len(failedMembers) == 0 {
 				processed = append(processed, key)
-			} else if len(failedMembers) < len(tr.members) {
-				tr.members = failedMembers
+			} else if len(failedMembers) < len(tr.Members) {
+				tr.Members = failedMembers
 				ms.at.transfer.Put(key, tr)
 			}
 		}

+ 32 - 0
cluster/util.go

@@ -0,0 +1,32 @@
+/*
+ * EliasDB
+ *
+ * Copyright 2016 Matthias Ladkau. All rights reserved.
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ */
+
+package cluster
+
+import (
+	"fmt"
+	"strconv"
+
+	"devt.de/krotik/common/errorutil"
+)
+
+/*
+toUInt64 safely converts an interface{} to an uint64.
+*/
+func toUInt64(v interface{}) uint64 {
+	if vu, ok := v.(uint64); ok {
+		return vu
+	}
+
+	cloc, err := strconv.ParseInt(fmt.Sprint(v), 10, 64)
+	errorutil.AssertOk(err)
+
+	return uint64(cloc)
+}

+ 26 - 0
cluster/util_test.go

@@ -0,0 +1,26 @@
+/*
+ * EliasDB
+ *
+ * Copyright 2016 Matthias Ladkau. All rights reserved.
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ */
+
+package cluster
+
+import "testing"
+
+func TestToUInt64(t *testing.T) {
+
+	if res := toUInt64("1"); res != 1 {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+
+	if res := toUInt64(uint64(1)); res != 1 {
+		t.Error("Unexpected result: ", res)
+		return
+	}
+}

BIN
cluster_join.png


BIN
cluster_term.png


BIN
cluster_working.png


File diff suppressed because it is too large
+ 2 - 2
eliasdb_design.md


+ 32 - 0
examples/data-mining/build.sh

@@ -0,0 +1,32 @@
+#!/bin/sh
+cd "$(dirname "$0")"
+export ROOT_PATH=`pwd`
+
+# This build script should build the following images in the local Docker registry:
+#
+# data-mining/frontend
+# data-mining/eliasdb1
+# data-mining/eliasdb2
+# data-mining/eliasdb3
+# data-mining/collector
+
+echo Building Collector
+echo ==================
+cd ./docker-images/collector
+./build.sh
+cd $ROOT_PATH
+
+echo
+echo Building Eliasdb Cluster
+echo ========================
+cd docker-images/eliasdb
+./build.sh
+cd $ROOT_PATH
+
+echo
+echo Building Frontend
+echo =================
+cd docker-images/frontend
+./build.sh
+cd $ROOT_PATH
+

+ 66 - 0
examples/data-mining/doc/data-mining.md

@@ -0,0 +1,66 @@
+EliasDB Data Mining Example
+==
+This example demonstrates a more complex application which uses the cluster
+feature of EliasDB and GraphQL for data queries.
+
+The idea of the application is to provide a platform for data mining with 3 components for presentation, collection and storage of data. The data which is being collected are request response times of the domain `devt.de`.
+
+The tutorial assumes you have downloaded EliasDB, extracted and build it.
+It also assumes that you have a running docker environment with the `docker` and `docker-compose` commands being available. This tutorial will only work in unix-like environments.
+
+For this tutorial please execute `build.sh` in the subdirectory: examples/data-mining and run `docker-compose up` in the same directory.
+
+After running build.sh you should see the following docker images in the local docker registry:
+
+```
+> docker images
+
+REPOSITORY                   TAG                 IMAGE ID            CREATED             SIZE
+data-mining/collector        latest              3a159822c9e6        6 minutes ago       174MB
+data-mining/frontend         latest              c412dbd46dce        16 hours ago        22.7MB
+data-mining/eliasdb3         latest              c079c1ad876e        17 hours ago        20.9MB
+data-mining/eliasdb2         latest              b53ec5dfdcfb        17 hours ago        20.9MB
+data-mining/eliasdb1         latest              83fddb8783df        17 hours ago        20.9MB
+```
+
+After running `docker-compose up` you should see 5 containers starting with the collector container continuously gathering ping results and storing it into the running EliasDB cluster.
+
+You can query the state of the database by pointing a browser at:
+```
+http://localhost:4040/db/term.html
+```
+You can query for `PingResult` nodes:
+
+![](eliasdb_term.png)
+
+You can also use a GraphiQL interface by pointing a browser at:
+```
+http://localhost:4040/graphiql/
+```
+You can also here query for `PingResult` nodes:
+
+![](eliasdb_graphiql.png)
+
+You can log into a running EliasDB container and query its disk usage:
+```
+> docker exec -it eliasdb1 sh
+
+/data # du -h
+2.2M	./db
+48.0K	./web/db
+56.0K	./web
+12.0K	./ssl
+2.3M	.
+
+/data # df -h
+Filesystem                Size      Used Available Use% Mounted on
+overlay                 240.1G     43.3G    184.5G  19% /
+...
+```
+
+Finally you can see a graph of the collected data by navigating to:
+```
+http://localhost:4040/
+```
+
+![](data-mining_chart.png)

BIN
examples/data-mining/doc/data-mining_chart.png


BIN
examples/data-mining/doc/eliasdb_graphiql.png


BIN
examples/data-mining/doc/eliasdb_term.png


+ 46 - 0
examples/data-mining/docker-compose.yml

@@ -0,0 +1,46 @@
+version: "3"
+
+services:
+  frontend:
+    image: data-mining/frontend:latest
+    container_name: frontend
+    ports:
+      - 4040:80
+    # Uncomment to directly edit the frontend
+    # volumes:
+    #   - ./docker-images/frontend/app:/usr/share/nginx/html
+    networks:
+      - back-tier
+
+  eliasdb1:
+    image: data-mining/eliasdb1:latest
+    container_name: eliasdb1
+    ports:
+      - 4041:9090
+    networks:
+      - back-tier
+
+  eliasdb2:
+    image: data-mining/eliasdb2:latest
+    container_name: eliasdb2
+    ports:
+      - 4042:9090
+    networks:
+      - back-tier
+
+  eliasdb3:
+    image: data-mining/eliasdb3:latest
+    container_name: eliasdb3
+    ports:
+      - 4043:9090
+    networks:
+      - back-tier
+
+  collector:
+    image: data-mining/collector:latest
+    container_name: collector
+    networks:
+      - back-tier
+
+networks:
+  back-tier:

+ 13 - 0
examples/data-mining/docker-images/collector/Dockerfile

@@ -0,0 +1,13 @@
+FROM python:alpine
+RUN apk update && apk add --no-cache supervisor
+
+RUN pip install schedule requests
+
+ADD etc/supervisord.conf /etc/supervisord.conf
+ADD app/main.py /app/main.py
+
+RUN adduser -D collector
+RUN chown collector:collector /app -R
+USER collector
+
+CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"]

+ 71 - 0
examples/data-mining/docker-images/collector/app/main.py

@@ -0,0 +1,71 @@
+#!/usr/bin/env python3
+#
+# EliasDB - Data mining collector example
+#
+# Copyright 2020 Matthias Ladkau. All rights reserved.
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+#
+
+import schedule
+import time
+import requests
+import json
+
+ELIASDB_URL = "eliasdb1:9090"
+
+requests.packages.urllib3.disable_warnings()
+
+def job():
+    global counter
+
+    url = "https://devt.de"
+
+    try:
+
+        now = int(time.time())
+        print("Running request for %s - timestamp: %s (%s)" %
+            (url, now, time.strftime("%d-%m-%Y %H:%M:%S", time.gmtime(now))))
+
+        r = requests.get(url)
+        res_time = r.elapsed
+
+        print ("    %s -> %s" % (url, res_time))
+
+        result = {
+            "key"     : str(now),
+            "kind"    : "PingResult",
+            "url"     : url,
+            "success" : True,
+            "result"  : str(res_time),
+        }
+
+    except Exception as e:
+        print("Error: %s", e)
+
+        result = {
+            "key"     : str(now),
+            "kind"    : "PingResult",
+            "url"     : url,
+            "success" : False,
+            "result"  : str(e),
+        }
+
+    try:
+        r = requests.post('https://%s/db/v1/graph/main/n' % ELIASDB_URL,
+            json.dumps([result]),  verify=False)
+
+        if r.status_code != 200:
+            print("Could not store result: %s", r.text)
+
+    except Exception as e:
+        print("Error storing result: %s", e)
+
+
+schedule.every(5).seconds.do(job)
+
+while True:
+    schedule.run_pending()
+    time.sleep(1)

+ 8 - 0
examples/data-mining/docker-images/collector/build.sh

@@ -0,0 +1,8 @@
+#!/bin/sh
+
+# Build the collector component
+
+docker build --tag data-mining/collector .
+
+# Run an interactive shell on the build image with:
+# docker run -it data-mining/collector sh

+ 11 - 0
examples/data-mining/docker-images/collector/etc/supervisord.conf

@@ -0,0 +1,11 @@
+[supervisord]
+nodaemon=true
+logfile=/dev/null
+pidfile=/dev/null
+logfile_maxbytes=0
+
+[program:collector]
+command=python -u /app/main.py
+stdout_logfile=/dev/fd/1
+stdout_logfile_maxbytes=0
+redirect_stderr=true

+ 14 - 0
examples/data-mining/docker-images/eliasdb/Dockerfile

@@ -0,0 +1,14 @@
+FROM alpine:latest
+
+ARG cluster_id=""
+
+RUN echo "Building cluster member ${cluster_id}"
+
+COPY ./eliasdb /eliasdb
+COPY ./eliasdb.config.json /data/eliasdb.config.json
+COPY ./cluster.config.json.${cluster_id} /data/cluster.config.json
+COPY ./cluster.stateinfo.${cluster_id} /data/cluster.stateinfo
+
+WORKDIR /data
+
+CMD ["../eliasdb", "server"]

+ 13 - 0
examples/data-mining/docker-images/eliasdb/build.sh

@@ -0,0 +1,13 @@
+#!/bin/sh
+cd "$(dirname "$0")"
+export ROOT_PATH=`pwd`
+
+# Build the collector component
+
+cp ../../../../eliasdb .
+docker build --build-arg cluster_id=1 --tag data-mining/eliasdb1 .
+docker build --build-arg cluster_id=2 --tag data-mining/eliasdb2 .
+docker build --build-arg cluster_id=3 --tag data-mining/eliasdb3 .
+
+# Run an interactive shell on the build image with:
+# docker run -it data-mining/eliasdb sh

+ 6 - 0
examples/data-mining/docker-images/eliasdb/cluster.config.json.1

@@ -0,0 +1,6 @@
+{
+    "ClusterMemberName": "eliasdb1",
+    "ClusterMemberRPC": "eliasdb1:9030",
+    "ClusterSecret": "secret123",
+    "ReplicationFactor": 2
+}

+ 6 - 0
examples/data-mining/docker-images/eliasdb/cluster.config.json.2

@@ -0,0 +1,6 @@
+{
+    "ClusterMemberName": "eliasdb2",
+    "ClusterMemberRPC": "eliasdb2:9030",
+    "ClusterSecret": "secret123",
+    "ReplicationFactor": 2
+}

+ 6 - 0
examples/data-mining/docker-images/eliasdb/cluster.config.json.3

@@ -0,0 +1,6 @@
+{
+    "ClusterMemberName": "eliasdb3",
+    "ClusterMemberRPC": "eliasdb3:9030",
+    "ClusterSecret": "secret123",
+    "ReplicationFactor": 2
+}

BIN
examples/data-mining/docker-images/eliasdb/cluster.stateinfo.1


BIN
examples/data-mining/docker-images/eliasdb/cluster.stateinfo.2


BIN
examples/data-mining/docker-images/eliasdb/cluster.stateinfo.3


+ 31 - 0
examples/data-mining/docker-images/eliasdb/docker-compose.yml

@@ -0,0 +1,31 @@
+version: "3"
+
+# Sample file to just spin up the eliasdb cluster
+
+services:
+  eliasdb1:
+    image: data-mining/eliasdb1:latest
+    container_name: eliasdb1
+    ports:
+      - 4041:9090
+    networks:
+      - back-tier
+
+  eliasdb2:
+    image: data-mining/eliasdb2:latest
+    container_name: eliasdb2
+    ports:
+      - 4042:9090
+    networks:
+      - back-tier
+
+  eliasdb3:
+    image: data-mining/eliasdb3:latest
+    container_name: eliasdb3
+    ports:
+      - 4043:9090
+    networks:
+      - back-tier
+
+networks:
+  back-tier:

+ 25 - 0
examples/data-mining/docker-images/eliasdb/eliasdb.config.json

@@ -0,0 +1,25 @@
+{
+    "ClusterConfigFile": "cluster.config.json",
+    "ClusterLogHistory": 100,
+    "ClusterStateInfoFile": "cluster.stateinfo",
+    "CookieMaxAgeSeconds": "86400",
+    "EnableAccessControl": false,
+    "EnableCluster": true,
+    "EnableClusterTerminal": true,
+    "EnableReadOnly": false,
+    "EnableWebFolder": true,
+    "EnableWebTerminal": true,
+    "HTTPSCertificate": "cert.pem",
+    "HTTPSHost": "127.0.0.1",
+    "HTTPSKey": "key.pem",
+    "HTTPSPort": "9090",
+    "LocationAccessDB": "access.db",
+    "LocationDatastore": "db",
+    "LocationHTTPS": "ssl",
+    "LocationUserDB": "users.db",
+    "LocationWebFolder": "web",
+    "LockFile": "eliasdb.lck",
+    "MemoryOnlyStorage": false,
+    "ResultCacheMaxAgeSeconds": 0,
+    "ResultCacheMaxSize": 0
+}

+ 5 - 0
examples/data-mining/docker-images/frontend/Dockerfile

@@ -0,0 +1,5 @@
+FROM nginx:alpine
+
+COPY etc/default.conf /etc/nginx/conf.d/default.conf
+COPY app/ /usr/share/nginx/html
+

File diff suppressed because it is too large
+ 11 - 0
examples/data-mining/docker-images/frontend/app/dist/frontend.js


+ 15 - 0
examples/data-mining/docker-images/frontend/app/index.html

@@ -0,0 +1,15 @@
+<!doctype html>
+<html>
+<head>
+ <meta charset="utf-8"> 
+    <link rel="shortcut icon" href="./pig.gif">
+    
+    <title>Data Mining</title> 
+</head>
+
+<body>
+    <div id="app"></div>
+</body>
+<script src="./dist/frontend.js"></script>
+
+</html>

+ 33 - 0
examples/data-mining/docker-images/frontend/app/package.json

@@ -0,0 +1,33 @@
+{
+  "name": "data-mining-frontend",
+  "version": "1.0.0",
+  "description": "The frontend of the data-mining example",
+  "main": "bundle.js",
+  "scripts": {
+    "build": "webpack",
+    "watch": "webpack --watch",
+    "pretty": "tsc --noEmit && eslint 'src/**/*.{js,ts,tsx}' --quiet --fix"
+  },
+  "author": "Matthias Ladkau",
+  "license": "ISC",
+  "devDependencies": {
+    "@typescript-eslint/eslint-plugin": "^1.13.0",
+    "@typescript-eslint/parser": "^1.13.0",
+    "css-loader": "^3.1.0",
+    "eslint": "^6.1.0",
+    "eslint-config-prettier": "^6.0.0",
+    "eslint-plugin-prettier": "^3.1.0",
+    "prettier": "^1.18.2",
+    "ts-loader": "^6.0.4",
+    "typescript": "^3.5.3",
+    "vue": "^2.6.10",
+    "vue-loader": "^15.7.1",
+    "vue-template-compiler": "^2.6.10",
+    "webpack": "^4.39.1",
+    "webpack-cli": "^3.3.6"
+  },
+  "dependencies": {
+    "chart.js": "^2.9.3",
+    "vue-chartjs": "^3.5.0"
+  }
+}

BIN
examples/data-mining/docker-images/frontend/app/pig.gif


+ 28 - 0
examples/data-mining/docker-images/frontend/app/src/component/LineChart.vue

@@ -0,0 +1,28 @@
+<!--
+*
+* EliasDB - Data mining frontend example
+*
+* Copyright 2020 Matthias Ladkau. All rights reserved.
+*
+* This Source Code Form is subject to the terms of the Mozilla Public
+* License, v. 2.0. If a copy of the MPL was not distributed with this
+* file, You can obtain one at http://mozilla.org/MPL/2.0/.
+*
+
+Simple line chart using vue-chartjs.
+-->
+<script>
+import { Line, mixins } from "vue-chartjs";
+
+export default {
+  extends: Line,
+  mixins: [mixins.reactiveProp],
+  props: ["options"],
+  mounted() {
+    this.renderChart(this.chartData, this.options);
+  }
+};
+</script>
+
+<style>
+</style>

+ 102 - 0
examples/data-mining/docker-images/frontend/app/src/component/LineResult.vue

@@ -0,0 +1,102 @@
+<!--
+*
+* EliasDB - Data mining frontend example
+*
+* Copyright 2020 Matthias Ladkau. All rights reserved.
+*
+* This Source Code Form is subject to the terms of the Mozilla Public
+* License, v. 2.0. If a copy of the MPL was not distributed with this
+* file, You can obtain one at http://mozilla.org/MPL/2.0/.
+*
+
+Graph which renders a result set as a line chart.
+-->
+<template>
+  <div class="small">
+    <line-chart :chart-data="linechartdata" :options="linechartoptions"></line-chart>
+    <div class="chat-msg-window" v-for="msg in messages" v-bind:key="msg.key">
+      <div>{{msg.result}}</div>
+    </div>
+  </div>
+</template>
+
+<script lang="ts">
+import Vue from "vue";
+import LineChart from "./LineChart.vue";
+import { EliasDBGraphQLClient } from "../lib/eliasdb-graphql";
+
+interface Message {
+  key: string;
+  result: string;
+}
+
+export default Vue.extend({
+  props: ["name", "last"],
+  data() {
+    return {
+      client: new EliasDBGraphQLClient(),
+      messages: [] as Message[],
+      linechartdata: null as any,
+      linechartoptions: {
+        responsive: true,
+        maintainAspectRatio: false
+      }
+    };
+  },
+  mounted: async function() {
+    // Ensure channel node exists
+
+    let results: any[] = [];
+
+    try {
+      const response = await this.client.req(`
+{
+  PingResult(ascending: "key", last:${this.last}) {
+    key
+    result
+    success
+    url
+  }
+}`);
+      results = JSON.parse(response).data.PingResult;
+      console.log("Results:", results);
+    } catch (e) {
+      console.error("Could not query results:", e);
+    }
+
+    let labels: string[] = [];
+    let data: number[] = [];
+
+    results.forEach(r => {
+      const timestamp = new Date();
+      timestamp.setTime(parseInt(r["key"]) * 1000);
+      const secs = parseFloat("0." + r["result"].split(".")[1]);
+      if (!isNaN(secs)) {
+        labels.push(timestamp.toISOString());
+        data.push(secs);
+      }
+    });
+
+    this.linechartdata = {
+      labels: labels,
+      datasets: [
+        {
+          label: this.name,
+          backgroundColor: "#f87979",
+          data: data,
+          fill: false
+        }
+      ]
+    };
+  },
+  components: {
+    LineChart
+  }
+});
+</script>
+
+<style>
+.small {
+  margin: 50px auto;
+}
+</style>

+ 21 - 0
examples/data-mining/docker-images/frontend/app/src/index.ts

@@ -0,0 +1,21 @@
+import Vue from 'vue';
+import LineResult from './component/LineResult.vue';
+
+let v = new Vue({
+    el: '#app',
+    template: `
+    <div>
+        <h1>Ping results for devt.de</h1>
+        <line-result :name="name" :last="last" />
+    </div>
+    `,
+    data() {
+        return {
+            name: 'Ping Result',
+            last: '50',
+        };
+    },
+    components: {
+        LineResult,
+    },
+});

+ 231 - 0
examples/data-mining/docker-images/frontend/app/src/lib/eliasdb-graphql.ts

@@ -0,0 +1,231 @@
+/**
+ * EliasDB - JavaScript GraphQL client library (http version)
+ *
+ * Copyright 2019 Matthias Ladkau. All rights reserved.
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/.
+ *
+ */
+export enum RequestMetod {
+    Post = 'post',
+    Get = 'get',
+}
+
+export class EliasDBGraphQLClient {
+    /**
+     * Host this client is connected to.
+     */
+    protected host: string;
+
+    /**
+     * Partition this client is working on.
+     */
+    protected partition: string;
+
+    /**
+     * Websocket over which we can handle subscriptions.
+     */
+    private ws?: WebSocket;
+
+    /**
+     * EliasDB GraphQL endpoints.
+     */
+    private graphQLEndpoint: string;
+    private graphQLReadOnlyEndpoint: string;
+
+    /**
+     * List of operations to execute once the websocket connection is established.
+     */
+    private delayedOperations: {(): void}[] = [];
+
+    /**
+     * Queue of subscriptions which await an id;
+     */
+    private subscriptionQueue: {(data: any): void}[] = [];
+
+    /**
+     * Map of active subscriptions.
+     */
+    private subscriptionCallbacks: {[id: string]: {(data: any): void}} = {};
+
+    /**
+     * Createa a new EliasDB GraphQL Client.
+     *
+     * @param host Host to connect to.
+     * @param partition Partition to query.
+     */
+    public constructor(
+        host: string = window.location.host,
+        partition: string = 'main',
+    ) {
+        this.host = host;
+        this.partition = partition;
+        this.graphQLEndpoint = `http://${host}/db/v1/graphql/${partition}`;
+        this.graphQLReadOnlyEndpoint = `http://${host}/db/v1/graphql-query/${partition}`;
+    }
+
+    /**
+     * Initialize a websocket to support subscriptions.
+     */
+    private initWebsocket() {
+        const url = `ws://${this.host}/db/v1/graphql-subscriptions/${this.partition}`;
+        this.ws = new WebSocket(url);
+        this.ws.onmessage = this.message.bind(this);
+
+        this.ws.onopen = () => {
+            if (this.ws) {
+                this.ws.send(
+                    JSON.stringify({
+                        type: 'init',
+                        payload: {},
+                    }),
+                );
+            }
+        };
+    }
+
+    /**
+     * Run a GraphQL query or mutation and return the response.
+     *
+     * @param query Query to run.
+     * @param variables List of variable values. The query must define these
+     *                  variables.
+     * @param operationName Name of the named operation to run. The query must
+     *                      specify this named operation.
+     * @param method  Request method to use. Get requests cannot run mutations.
+     */
+    public req(
+        query: string,
+        variables: {[key: string]: any} = {},
+        operationName: string = '',
+        method: RequestMetod = RequestMetod.Post,
+    ): Promise<any> {
+        const http = new XMLHttpRequest();
+
+        const toSend: {[key: string]: any} = {
+            operationName,
+            variables,
+            query,
+        };
+
+        // Send an async ajax call
+
+        if (method === RequestMetod.Post) {
+            http.open(method, this.graphQLEndpoint, true);
+        } else {
+            const params = Object.keys(toSend)
+                .map(key => {
+                    const val =
+                        key !== 'variables'
+                            ? toSend[key]
+                            : JSON.stringify(toSend[key]);
+                    return `${key}=${encodeURIComponent(val)}`;
+                })
+                .join('&');
+            const url = `${this.graphQLReadOnlyEndpoint}?${params}`;
+
+            http.open(method, url, true);
+        }
+
+        http.setRequestHeader('content-type', 'application/json');
+
+        return new Promise(function(resolve, reject) {
+            http.onload = function() {
+                try {
+                    if (http.status === 200) {
+                        resolve(http.response);
+                    } else {
+                        let err: string;
+                        try {
+                            err = JSON.parse(http.responseText)['errors'];
+                        } catch {
+                            err = http.responseText.trim();
+                        }
+                        reject(err);
+                    }
+                } catch (e) {
+                    reject(e);
+                }
+            };
+
+            if (method === RequestMetod.Post) {
+                http.send(JSON.stringify(toSend));
+            } else {
+                http.send();
+            }
+        });
+    }
+
+    /**
+     * Run a GraphQL subscription and receive updates if the data changes.
+     *
+     * @param query Query to run.
+     * @param update Update callback.
+     */
+    public subscribe(
+        query: string,
+        update: (data: any) => void,
+        variables: any = null,
+    ) {
+        if (!this.ws) {
+            this.initWebsocket();
+        }
+
+        if (this.ws) {
+            const that = this;
+            const subscribeCall = function() {
+                if (that.ws) {
+                    that.ws.send(
+                        JSON.stringify({
+                            id: that.subscriptionQueue.length,
+                            query,
+                            type: 'subscription_start',
+                            variables: null,
+                        }),
+                    );
+                    that.subscriptionQueue.push(update);
+                }
+            };
+
+            if (this.ws.readyState !== WebSocket.OPEN) {
+                this.delayedOperations.push(subscribeCall);
+            } else {
+                subscribeCall();
+            }
+        }
+    }
+
+    /**
+     * Process a new websocket message.
+     *
+     * @param msg New message.
+     */
+    protected message(msg: MessageEvent) {
+        const pmsg = JSON.parse(msg.data);
+
+        if (pmsg.type == 'init_success') {
+            // Execute the delayed operations
+
+            this.delayedOperations.forEach(c => c());
+            this.delayedOperations = [];
+        } else if (pmsg.type == 'subscription_success') {
+            const callback = this.subscriptionQueue.shift();
+            if (callback) {
+                const id = pmsg.id;
+                this.subscriptionCallbacks[id] = callback;
+            }
+        } else if (pmsg.type == 'subscription_data') {
+            const callback = this.subscriptionCallbacks[pmsg.id];
+            if (callback) {
+                callback(pmsg.payload);
+            }
+        } else if (pmsg.type == 'subscription_fail') {
+            console.error(
+                'Subscription failed: ',
+                pmsg.payload.errors.join('; '),
+            );
+        }
+    }
+}

+ 4 - 0
examples/data-mining/docker-images/frontend/app/src/vue-shims.d.ts

@@ -0,0 +1,4 @@
+declare module '*.vue' {
+    import Vue from 'vue';
+    export default Vue;
+}

+ 66 - 0
examples/data-mining/docker-images/frontend/app/tsconfig.json

@@ -0,0 +1,66 @@
+{
+  "compilerOptions": {
+    /* Basic Options */
+    // "incremental": true,                   /* Enable incremental compilation */
+    "target": "es2015",                          /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017', 'ES2018', 'ES2019' or 'ESNEXT'. */
+    "module": "es2015",                     /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', or 'ESNext'. */
+    // "lib": [],                             /* Specify library files to be included in the compilation. */
+    // "allowJs": true,                       /* Allow javascript files to be compiled. */
+    // "checkJs": true,                       /* Report errors in .js files. */
+    // "jsx": "preserve",                     /* Specify JSX code generation: 'preserve', 'react-native', or 'react'. */
+    // "declaration": true,                   /* Generates corresponding '.d.ts' file. */
+    // "declarationMap": true,                /* Generates a sourcemap for each corresponding '.d.ts' file. */
+    "sourceMap": true,                     /* Generates corresponding '.map' file. */
+    // "outFile": "./",                       /* Concatenate and emit output to single file. */
+    "outDir": "./build/",                        /* Redirect output structure to the directory. */
+    // "rootDir": "./",                       /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */
+    // "composite": true,                     /* Enable project compilation */
+    // "tsBuildInfoFile": "./",               /* Specify file to store incremental compilation information */
+    // "removeComments": true,                /* Do not emit comments to output. */
+    // "noEmit": true,                        /* Do not emit outputs. */
+    // "importHelpers": true,                 /* Import emit helpers from 'tslib'. */
+    // "downlevelIteration": true,            /* Provide full support for iterables in 'for-of', spread, and destructuring when targeting 'ES5' or 'ES3'. */
+    // "isolatedModules": true,               /* Transpile each file as a separate module (similar to 'ts.transpileModule'). */
+
+    /* Strict Type-Checking Options */
+    "strict": true,                           /* Enable all strict type-checking options. */
+    // "noImplicitAny": true,                 /* Raise error on expressions and declarations with an implied 'any' type. */
+    // "strictNullChecks": true,              /* Enable strict null checks. */
+    // "strictFunctionTypes": true,           /* Enable strict checking of function types. */
+    // "strictBindCallApply": true,           /* Enable strict 'bind', 'call', and 'apply' methods on functions. */
+    // "strictPropertyInitialization": true,  /* Enable strict checking of property initialization in classes. */
+    // "noImplicitThis": true,                /* Raise error on 'this' expressions with an implied 'any' type. */
+    // "alwaysStrict": true,                  /* Parse in strict mode and emit "use strict" for each source file. */
+
+    /* Additional Checks */
+    // "noUnusedLocals": true,                /* Report errors on unused locals. */
+    // "noUnusedParameters": true,            /* Report errors on unused parameters. */
+    "noImplicitReturns": true,             /* Report error when not all code paths in function return a value. */
+    // "noFallthroughCasesInSwitch": true,    /* Report errors for fallthrough cases in switch statement. */
+
+    /* Module Resolution Options */
+    "moduleResolution": "node",            /* Specify module resolution strategy: 'node' (Node.js) or 'classic' (TypeScript pre-1.6). */
+    // "baseUrl": "./",                       /* Base directory to resolve non-absolute module names. */
+    // "paths": {},                           /* A series of entries which re-map imports to lookup locations relative to the 'baseUrl'. */
+    // "rootDirs": [],                        /* List of root folders whose combined content represents the structure of the project at runtime. */
+    // "typeRoots": [],                       /* List of folders to include type definitions from. */
+    // "types": [],                           /* Type declaration files to be included in compilation. */
+    // "allowSyntheticDefaultImports": true,  /* Allow default imports from modules with no default export. This does not affect code emit, just typechecking. */
+    "esModuleInterop": true                   /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */
+    // "preserveSymlinks": true,              /* Do not resolve the real path of symlinks. */
+    // "allowUmdGlobalAccess": true,          /* Allow accessing UMD globals from modules. */
+
+    /* Source Map Options */
+    // "sourceRoot": "",                      /* Specify the location where debugger should locate TypeScript files instead of source locations. */
+    // "mapRoot": "",                         /* Specify the location where debugger should locate map files instead of generated locations. */
+    // "inlineSourceMap": true,               /* Emit a single file with source maps instead of having a separate file. */
+    // "inlineSources": true,                 /* Emit the source alongside the sourcemaps within a single file; requires '--inlineSourceMap' or '--sourceMap' to be set. */
+
+    /* Experimental Options */
+    // "experimentalDecorators": true,        /* Enables experimental support for ES7 decorators. */
+    // "emitDecoratorMetadata": true,         /* Enables experimental support for emitting type metadata for decorators. */
+  },
+  "include": [
+    "./src/**/*"
+  ]
+}

+ 104 - 0
examples/data-mining/docker-images/frontend/app/webpack.config.js

@@ -0,0 +1,104 @@
+var path = require('path')
+var webpack = require('webpack')
+const VueLoaderPlugin = require('vue-loader/lib/plugin')
+
+module.exports = {
+  entry: './src/index.ts',
+  output: {
+    path: path.resolve(__dirname, './dist'),
+    publicPath: '/dist/',
+    filename: 'frontend.js'
+  },
+  module: {
+    rules: [
+      {
+        test: /\.vue$/,
+        loader: 'vue-loader',
+        options: {
+          loaders: {
+            // Since sass-loader (weirdly) has SCSS as its default parse mode, we map
+            // the "scss" and "sass" values for the lang attribute to the right configs here.
+            // other preprocessors should work out of the box, no loader config like this necessary.
+            'scss': 'vue-style-loader!css-loader!sass-loader',
+            'sass': 'vue-style-loader!css-loader!sass-loader?indentedSyntax',
+          }
+          // other vue-loader options go here
+        }
+      },
+      {
+        test: /\.tsx?$/,
+        loader: 'ts-loader',
+        exclude: /node_modules/,
+        options: {
+          appendTsSuffixTo: [/\.vue$/],
+        }
+      },
+      {
+        test: /\.(png|jpg|gif|svg)$/,
+        loader: 'file-loader',
+        options: {
+          name: '[name].[ext]?[hash]'
+        }
+      },
+      {
+        test: /\.css$/,
+        use: [
+          'vue-style-loader',
+          'css-loader'
+        ]
+      }
+    ]
+  },
+  resolve: {
+    extensions: ['.ts', '.js', '.vue', '.json'],
+    alias: {
+      'vue$': 'vue/dist/vue.esm.js'
+    }
+  },
+  devServer: {
+    historyApiFallback: true,
+    noInfo: true
+  },
+  performance: {
+    hints: false
+  },
+  devtool: '#eval-source-map',
+  plugins: [
+    // make sure to include the plugin for the magic
+    new VueLoaderPlugin(),
+    // add a nice banner
+    new webpack.BannerPlugin({ 
+        banner: `
+ EliasDB - Data mining frontend example
+
+ Copyright 2020 Matthias Ladkau. All rights reserved.
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+`, 
+        entryOnly: true 
+    })
+  ]
+}
+
+if (process.env.NODE_ENV === 'production') {
+  module.exports.devtool = '#source-map'
+  // http://vue-loader.vuejs.org/en/workflow/production.html
+  module.exports.plugins = (module.exports.plugins || []).concat([
+    new webpack.DefinePlugin({
+      'process.env': {
+        NODE_ENV: '"production"'
+      }
+    }),
+    new webpack.optimize.UglifyJsPlugin({
+      sourceMap: true,
+      compress: {
+        warnings: false
+      }
+    }),
+    new webpack.LoaderOptionsPlugin({
+      minimize: true
+    })
+  ])
+}

+ 15 - 0
examples/data-mining/docker-images/frontend/build.sh

@@ -0,0 +1,15 @@
+#!/bin/sh
+
+# Copy graphiql from examples
+
+cp -fR ../../../tutorial/res/graphiql ./app
+
+# Build the collector component
+
+docker build --tag data-mining/frontend .
+
+# Run container
+# docker run -p 8080:80 data-mining/frontend
+
+# Run an interactive shell on the build image with:
+# docker run -it data-mining/frontend sh

+ 25 - 0
examples/data-mining/docker-images/frontend/etc/default.conf

@@ -0,0 +1,25 @@
+server {
+    listen       80;
+    server_name  localhost;
+
+    #charset koi8-r;
+    #access_log  /var/log/nginx/host.access.log  main;
+
+    location / {
+        root   /usr/share/nginx/html;
+        index  index.html index.htm;
+    }
+
+    #error_page  404              /404.html;
+
+    # redirect server error pages to the static page /50x.html
+    #
+    error_page   500 502 503 504  /50x.html;
+    location = /50x.html {
+        root   /usr/share/nginx/html;
+    }
+
+    location /db/ {
+        proxy_pass   https://eliasdb1:9090/db/;
+    }
+}

+ 154 - 0
graph/graphmanager_cluster_test.go

@@ -23,6 +23,160 @@ import (
 	"devt.de/krotik/eliasdb/hash"
 )
 
+func TestClusterWithPhysicalStorage(t *testing.T) {
+
+	log.SetOutput(ioutil.Discard)
+
+	dgs1, err := graphstorage.NewDiskGraphStorage(GraphManagerTestDBDir5, false)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	ds1, _ := cluster.NewDistributedStorage(dgs1, map[string]interface{}{
+		manager.ConfigRPC:           fmt.Sprintf("localhost:%v", 9021),
+		manager.ConfigMemberName:    fmt.Sprintf("TestClusterMember-1"),
+		manager.ConfigClusterSecret: "test123",
+	}, manager.NewMemStateInfo())
+
+	ds1.Start()
+	defer ds1.Close()
+
+	dgs2, err := graphstorage.NewDiskGraphStorage(GraphManagerTestDBDir6, false)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	ds2, _ := cluster.NewDistributedStorage(dgs2, map[string]interface{}{
+		manager.ConfigRPC:           fmt.Sprintf("localhost:%v", 9022),
+		manager.ConfigMemberName:    fmt.Sprintf("TestClusterMember-2"),
+		manager.ConfigClusterSecret: "test123",
+	}, manager.NewMemStateInfo())
+
+	ds2.Start()
+	defer ds2.Close()
+
+	err = ds2.MemberManager.JoinCluster(ds1.MemberManager.Name(),
+		ds1.MemberManager.NetAddr())
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	sm := ds1.StorageManager("foo", true)
+	sm2 := ds2.StorageManager("foo", true)
+
+	loc, err := sm.Insert("test123")
+	if loc != 1 || err != nil {
+		t.Error("Unexpected result:", loc, err)
+		return
+	}
+
+	loc, err = sm2.Insert("test456")
+	if loc != 2 || err != nil {
+		t.Error("Unexpected result:", loc, err)
+		return
+	}
+
+	res := ""
+
+	if err := sm2.Fetch(1, &res); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if res != "test123" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	if err := sm2.Fetch(2, &res); err != nil {
+		t.Error(err)
+		return
+	}
+
+	if res != "test456" {
+		t.Error("Unexpected result:", res)
+		return
+	}
+
+	// *** HTree storage
+
+	// Use a HTree to insert to and fetch from a storage manager
+
+	sm = ds1.StorageManager("foo2", true)
+	sm2 = ds2.StorageManager("foo2", true)
+
+	htree, err := hash.NewHTree(sm)
+	if err != nil {
+		t.Error(err)
+		return
+	}
+
+	if valres, err := htree.Put([]byte("123"), "Test1"); err != nil || valres != nil {
+		t.Error("Unexpected result:", valres, err)
+		return
+	}
+
+	if valres, err := htree.Put([]byte("123"), "Test2"); err != nil || valres != "Test1" {
+		t.Error("Unexpected result:", valres, err)
+		return
+	}
+
+	// Try to retrieve the item again
+
+	cluster.WaitForTransfer()
+
+	if val, err := htree.Get([]byte("123")); err != nil || val != "Test2" {
+		t.Error("Unexpected result:", val, err)
+		return
+	}
+
+	htree2, err := hash.LoadHTree(sm2, 1)
+	if val, err := htree2.Get([]byte("123")); err != nil || val != "Test2" {
+		t.Error("Unexpected result:", val, err)
+		return
+	}
+
+	// *** GraphManager storage
+
+	gm1 := NewGraphManager(ds1)
+
+	if err := gm1.StoreNode("main", data.NewGraphNodeFromMap(map[string]interface{}{
+		"key":  "123",
+		"kind": "testnode",
+		"foo":  "bar",
+	})); err != nil {
+		t.Error("Unexpected result:", err)
+		return
+	}
+
+	cluster.WaitForTransfer()
+
+	if node, err := gm1.FetchNode("main", "123", "testnode"); err != nil ||
+		node.String() != `GraphNode:
+     key : 123
+    kind : testnode
+     foo : bar
+` {
+		t.Error("Unexpected result:", node, err)
+		return
+	}
+
+	gm2 := NewGraphManager(ds2)
+
+	if node, err := gm2.FetchNode("main", "123", "testnode"); err != nil ||
+		node.String() != `GraphNode:
+     key : 123
+    kind : testnode
+     foo : bar
+` {
+		t.Error("Unexpected result:", node, err)
+		return
+	}
+}
+
 func TestClusterStorage(t *testing.T) {
 
 	cluster2 := createCluster(2)

+ 4 - 1
graph/graphmanager_test.go

@@ -30,9 +30,12 @@ const GraphManagerTestDBDir1 = "gmtest1"
 const GraphManagerTestDBDir2 = "gmtest2"
 const GraphManagerTestDBDir3 = "gmtest3"
 const GraphManagerTestDBDir4 = "gmtest4"
+const GraphManagerTestDBDir5 = "gmtest5"
+const GraphManagerTestDBDir6 = "gmtest6"
 
 var DBDIRS = []string{GraphManagerTestDBDir1, GraphManagerTestDBDir2,
-	GraphManagerTestDBDir3, GraphManagerTestDBDir4}
+	GraphManagerTestDBDir3, GraphManagerTestDBDir4, GraphManagerTestDBDir5,
+	GraphManagerTestDBDir6}
 
 const InvlaidFileName = "**" + string(0x0)
 

+ 0 - 0
graph/import_export.go


+ 0 - 0
graph/import_export_test.go


+ 4 - 4
server/pages.go

@@ -806,7 +806,7 @@ const ClusterTermSRC = `
             padding: 10px;
             border: #000000 2px solid;
             border-radius: 10px;
-            margin: 3em 0 0 0;
+            margin: 3em 0 0 1em;
         }
 
         .c-term h2 {
@@ -821,7 +821,7 @@ const ClusterTermSRC = `
 
         .c-term input {
             padding: 2px;
-            width:100%;
+            width:95%;
         }
 
         .c-term .term_result {
@@ -853,7 +853,7 @@ const ClusterTermSRC = `
             padding: 10px;
             border: #000000 2px solid;
             border-radius: 10px;
-            margin: 3em 0 0 0;
+            margin: 3em 1em 0 0;
         }
 
         .c-log pre {
@@ -874,7 +874,7 @@ const ClusterTermSRC = `
             padding: 10px;
             border: #000000 2px solid;
             border-radius: 10px;
-            margin: 3em 0 0 0;
+            margin: 3em 0 0 1em;
             max-width: 80%;
             word-wrap: break-word;
         }