/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package api contains general REST API definitions.
The REST API provides an interface to EliasDB. It allows querying and modifying
of the datastore. The API responds to GET, POST, PUT and DELETE requests in JSON
if the request was successful (Return code 200 OK) and plain text in all other cases.
Common API definitions
/about
Endpoint which returns an object with version information.
api_versions : List of available API versions e.g. [ "v1" ]
product : Name of the API provider (EliasDB)
version: : Version of the API provider
revision: : Revision of the API provider
/swagger.json
Dynamically generated swagger definition file. See: http://swagger.io
*/
package api
import (
"encoding/json"
"net/http"
"devt.de/krotik/eliasdb/config"
)
/*
EndpointAbout is the about endpoint URL (rooted). Handles about/
*/
const EndpointAbout = APIRoot + "/about/"
/*
AboutEndpointInst creates a new endpoint handler.
*/
func AboutEndpointInst() RestEndpointHandler {
return &aboutEndpoint{}
}
/*
Handler object for about operations.
*/
type aboutEndpoint struct {
*DefaultEndpointHandler
}
/*
HandleGET returns about data for the REST API.
*/
func (a *aboutEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
data := map[string]interface{}{
"api_versions": []string{"v1"},
"product": "EliasDB",
"version": config.ProductVersion,
}
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(data)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package ac contains management code for access control.
*/
package ac
import (
"fmt"
"log"
"net/http"
"net/url"
"strings"
"devt.de/krotik/common/datautil"
"devt.de/krotik/common/httputil/access"
"devt.de/krotik/common/httputil/auth"
"devt.de/krotik/common/httputil/user"
"devt.de/krotik/eliasdb/api"
)
// Code and datastructures relating to access control
// ==================================================
/*
PublicAccessControlEndpointMap contains endpoints which should be publically
available when access control is used
*/
var PublicAccessControlEndpointMap = map[string]api.RestEndpointInst{
EndpointLogin: LoginEndpointInst,
EndpointLogout: LogoutEndpointInst,
EndpointWhoAmI: WhoAmIEndpointInst,
}
/*
AccessManagementEndpointMap contains endpoints which can manage access rights
*/
var AccessManagementEndpointMap = map[string]api.RestEndpointInst{
EndpointUser: UserEndpointInst,
}
/*
LogAccess is used to log access requests
*/
var LogAccess = log.Print
/*
UserDB is the global user database which holds the password hashes and user
details.
*/
var UserDB *datautil.EnforcedUserDB
/*
ACL is the global AccessControlLists object which should be used to check
user access rights.
*/
var ACL *AccessControlLists
/*
AuthHandler is a wrapper object which has a HandleFunc similar to http.HandleFunc.
The HandleFunc of this object should be used for all endpoint which should check
for authentication and authorization.
*/
var AuthHandler *auth.CookieAuthHandleFuncWrapper
/*
DefaultAccessDB is the default access table for EliasDB
*/
var DefaultAccessDB = []byte(`
/*
Access control file for EliasDB. This file controls the access rights for each user.
Rights to resources are assigned to groups. Users are assigned to groups.
This file is monitored by the server - any changes to this file are picked up
by the server immediately. Equally, any change on the server side is immediately
written to this file.
The comments in this file are for initial comprehension only. They will be
removed as soon as the users, groups or permissions are modified from the
server side.
*/
{
"groups": {
"public": {
// Page access
// ===========
"/": "-R--", // Access to the root page
// Resource access
// ===============
"/css/*": "-R--", // Access to CSS rules
"/js/*": "-R--", // Access to JavaScript files
"/img/*": "-R--", // Access to image files
"/vendor/*": "-R--", // Access to frontend libraries
// REST API access
// ===============
"/db/*": "-R--" // Access to database (read)
},
"admin": {
// REST API access
// ===============
"/db/*": "CRUD" // Access to database
}
},
"users": {
"elias": [ // Default EliasDB admin user
"public",
"admin"
],
"johndoe" : [ // Default EliasDB public user
"public"
]
}
}
`[1:])
/*
InitACLs initializes the access control list object.
*/
func InitACLs(tab access.ACLTable) {
ACL = &AccessControlLists{tab}
}
// Access request types
//
const (
CREATE = "create"
READ = "read"
UPDATE = "update"
DELETE = "delete"
)
// Access request results
//
const (
GRANTED = "granted"
DENIED = "denied"
)
// Mapping from http request method to access request type
//
var httpRequestMapping = map[string]string{
"": READ,
"get": READ,
"put": UPDATE,
"post": CREATE,
"delete": DELETE,
}
/*
AccessControlLists store the access rights of groups and which users are
member of which groups.
*/
type AccessControlLists struct {
access.ACLTable
}
/*
CheckHTTPRequest checks the request of a given user to a resource.
*/
func (a *AccessControlLists) CheckHTTPRequest(w http.ResponseWriter, r *http.Request, user string) bool {
var result = DENIED
var detail = "No rule which grants access was found"
// Extract request details
requestType := httpRequestMapping[strings.ToLower(r.Method)]
requestResource := r.URL.Path
// Build rights object
requestRights := &access.Rights{
Create: requestType == CREATE,
Read: requestType == READ,
Update: requestType == UPDATE,
Delete: requestType == DELETE,
}
// Check ACLTable
if res, resDetail, err := a.IsPermitted(user, requestResource, requestRights); res && err == nil {
result = GRANTED
detail = resDetail
} else if err != nil {
detail = err.Error()
}
// Log the result
text := fmt.Sprintf("User %v requested %v access to %v - %v (%v)",
user, requestType, requestResource, result, detail)
if result == GRANTED {
LogAccess(text)
} else {
LogAccess(text)
http.Error(w, fmt.Sprintf("Requested %v access to %v was denied",
requestType, requestResource),
http.StatusForbidden)
}
return result == GRANTED
}
// Default error handlers
/*
CallbackSessionExpired handles requests where the session has expired.
*/
var CallbackSessionExpired = func(w http.ResponseWriter, r *http.Request) {
u, ok := AuthHandler.CheckAuth(r)
// Remove all cookies
AuthHandler.RemoveAuthCookie(w)
user.UserSessionManager.RemoveSessionCookie(w)
if ok {
LogAccess("User ", u, " session expired")
}
origPath := r.URL.Path
if r.URL.RawQuery != "" {
origPath += "?" + r.URL.RawQuery
}
http.Redirect(w, r, fmt.Sprintf("/login.html?msg=Session+Expired&ref=%v",
url.QueryEscape(origPath)), http.StatusFound)
}
/*
CallbackUnauthorized handles requests which are unauthorized.
*/
var CallbackUnauthorized = func(w http.ResponseWriter, r *http.Request) {
LogAccess("Unauthorized request to ", r.URL.Path,
" from ", r.RemoteAddr, " (", r.UserAgent(), " Cookies: ", r.Cookies(), ")")
if strings.HasPrefix(r.URL.Path, api.APIRoot) {
// No redirect for REST clients
http.Error(w, "Valid credentials required", http.StatusForbidden)
} else {
origPath := r.URL.Path
if r.URL.RawQuery != "" {
origPath += "?" + r.URL.RawQuery
}
http.Redirect(w, r, fmt.Sprintf("/login.html?ref=%v",
url.QueryEscape(origPath)), http.StatusFound)
}
}
// Helper functions
// ================
/*
checkResources check given resources for a GET request.
*/
func checkResources(w http.ResponseWriter, resources []string, requiredMin int, requiredMax int, errorMsg string) bool {
if len(resources) < requiredMin {
http.Error(w, errorMsg, http.StatusBadRequest)
return false
} else if len(resources) > requiredMax {
http.Error(w, "Invalid resource specification: "+strings.Join(resources[1:], "/"), http.StatusBadRequest)
return false
}
return true
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package ac
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"time"
"devt.de/krotik/common/datautil"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/httputil"
"devt.de/krotik/common/httputil/auth"
"devt.de/krotik/eliasdb/api"
)
/*
EndpointLogin is the login endpoint definition (rooted). Handles login/
*/
const EndpointLogin = api.APIRoot + "/login/"
/*
DebounceTime default debounce time for each failed logins
*/
var DebounceTime = 5 * time.Second
/*
LoginEndpointInst creates a new endpoint handler. Requires a CookieAuthHandleFuncWrapper
object to verify login requests.
*/
func LoginEndpointInst() api.RestEndpointHandler {
errorutil.AssertTrue(AuthHandler != nil, "AuthHandler not initialized")
return &loginEndpoint{
&api.DefaultEndpointHandler{},
AuthHandler,
3,
20,
datautil.NewMapCache(0, int64(20)),
datautil.NewMapCache(0, int64(20)),
}
}
/*
Handler object for cookie based login operations.
*/
type loginEndpoint struct {
*api.DefaultEndpointHandler
authHandler *auth.CookieAuthHandleFuncWrapper // AuthHandler object to verify login requests
allowedRetries int // Number of retries a user has to enter the correct password
bruteForceDebounce int // Time in seconds a user has to wait after too many failed attempts
failedLogins *datautil.MapCache // Map of failed login attempts per user
debounceUsers *datautil.MapCache // Map of users which have to wait after too many failed attempts
}
/*
HandlePOST tries to log a user in.
*/
func (le *loginEndpoint) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
restClient := false
data := make(map[string]interface{})
ct := r.Header.Get("Content-Type")
// Decode body either as application/json or application/x-www-form-urlencoded
// This endpoint can be used by REST clients as well as pages using form submissions
if ct == "application/json" {
// The client is a REST client
restClient = true
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&data); err != nil {
http.Error(w, fmt.Sprintf("Could not decode request body: %v",
err.Error()), http.StatusBadRequest)
return
}
} else if err := r.ParseForm(); err == nil {
// Json decoding did not work out try normal form data decoding
data["user"] = r.FormValue("user")
data["pass"] = r.FormValue("pass")
data["redirect_ok"] = r.FormValue("redirect_ok")
data["redirect_notok"] = r.FormValue("redirect_notok")
}
// Handle query and ast requests
user, ok1 := data["user"]
pass, ok2 := data["pass"]
redirectOk, ok3 := data["redirect_ok"]
if !ok3 || redirectOk == "" {
redirectOk = "/"
}
redirectNotOk, ok4 := data["redirect_notok"]
if !ok4 || redirectNotOk == "" {
redirectNotOk = "/"
if u, err := url.Parse(r.Referer()); err == nil {
redirectNotOk = u.Path
}
}
if ok1 && ok2 && user != "" {
redirect := redirectNotOk
if aid := le.authHandler.AuthUser(fmt.Sprint(user), fmt.Sprint(pass), false); aid != "" {
redirect = redirectOk
le.authHandler.SetAuthCookie(aid, w)
} else {
LogAccess("Authentication for user ", user, " failed")
// Add a time delay for negative answers to make dictionary attacks
// more tedious
time.Sleep(DebounceTime)
}
if !restClient {
// Redirect if the other end is not a REST client
redirectString := fmt.Sprint(redirect)
// Make sure ok/notok redirect are local!
if err := httputil.CheckLocalRedirect(redirectString); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
http.Redirect(w, r, redirectString, http.StatusFound)
} else if redirect == redirectNotOk {
// The other end is a REST client and failed the authentication
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
}
// REST clients will just get a 200 with the cookie
return
}
http.Error(w, "Invalid authentication request", http.StatusBadRequest)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (le *loginEndpoint) SwaggerDefs(s map[string]interface{}) {
s["paths"].(map[string]interface{})["/login"] = map[string]interface{}{
"post": map[string]interface{}{
"summary": "Login as a user and create a session.",
"description": "The login endpoint can be used to log in and create a new user session.",
"consumes": []string{
"application/x-www-form-urlencoded",
"application/json",
},
"produces": []string{
"text/plain",
},
"parameters": []map[string]interface{}{
{
"name": "user",
"in": "formData",
"description": "Username to log in.",
"required": true,
"type": "string",
},
{
"name": "pass",
"in": "formData",
"description": "Cleartext password of the username.",
"required": true,
"type": "string",
},
{
"name": "redirect_ok",
"in": "formData",
"description": "Redirect URL if the log in is successful.",
"required": false,
"type": "string",
},
{
"name": "redirect_notok",
"in": "formData",
"description": "Redirect URL if the log in is not successful.",
"required": false,
"type": "string",
},
},
"responses": map[string]interface{}{
"302": map[string]interface{}{
"description": "Redirect depending on the log in result.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package ac
import (
"net/http"
"devt.de/krotik/common/httputil/user"
"devt.de/krotik/eliasdb/api"
)
/*
EndpointLogout is the logout endpoint URL (rooted). Handles logout/
*/
const EndpointLogout = api.APIRoot + "/logout/"
/*
LogoutEndpointInst creates a new endpoint handler.
*/
func LogoutEndpointInst() api.RestEndpointHandler {
return &logoutEndpoint{}
}
/*
Handler object for logout operations.
*/
type logoutEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandlePOST terminates the current user session.
*/
func (lo *logoutEndpoint) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
// Remove all cookies - we don't check for a valid authentication so also
// old (invalid) cookies are removed
AuthHandler.InvalidateAuthCookie(r)
AuthHandler.RemoveAuthCookie(w)
user.UserSessionManager.RemoveSessionCookie(w)
ct := r.Header.Get("Content-Type")
if ct != "application/json" {
// Do a redirect for non-REST clients
http.Redirect(w, r, "/", http.StatusFound)
}
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (lo *logoutEndpoint) SwaggerDefs(s map[string]interface{}) {
s["paths"].(map[string]interface{})["/logout"] = map[string]interface{}{
"post": map[string]interface{}{
"summary": "Logout the current user.",
"description": "The logout endpoint terminates the current user session.",
"consumes": []string{
"application/json",
},
"produces": []string{
"application/json",
},
"responses": map[string]interface{}{
"302": map[string]interface{}{
"description": "Redirect to /.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package ac
import (
"encoding/json"
"fmt"
"net/http"
"sort"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/httputil/access"
"devt.de/krotik/eliasdb/api"
)
/*
EndpointUser is the user endpoint URL (rooted). Handles user/
*/
const EndpointUser = api.APIRoot + "/user/"
/*
EndpointWhoAmI is the current user endpoint URL (rooted). Handles whoami/
*/
const EndpointWhoAmI = api.APIRoot + "/whoami/"
/*
WhoAmIEndpointInst creates a new endpoint handler.
*/
func WhoAmIEndpointInst() api.RestEndpointHandler {
return &whoAmIEndpoint{}
}
/*
Handler object for whoami operations.
*/
type whoAmIEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles user queries.
*/
func (we *whoAmIEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
u, ok := AuthHandler.CheckAuth(r)
w.Header().Set("content-type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(map[string]interface{}{
"username": u,
"logged_in": ok,
})
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (we *whoAmIEndpoint) SwaggerDefs(s map[string]interface{}) {
s["paths"].(map[string]interface{})["/whoami"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return information about the current user.",
"description": "Returns information about the current user.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Current user information.",
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"username": map[string]interface{}{
"description": "Name of the current user.",
"type": "string",
},
"logged_in": map[string]interface{}{
"description": "Flag if the current user is logged in.",
"type": "boolean",
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
}
/*
UserEndpointInst creates a new endpoint handler.
*/
func UserEndpointInst() api.RestEndpointHandler {
return &userEndpoint{}
}
/*
Handler object for user operations.
*/
type userEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles user queries.
*/
func (ue *userEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
var data interface{}
// Check parameters
if !checkResources(w, resources, 1, 2, "Need u or g (user/group) and optionally a name") {
return
}
if resources[0] == "u" {
var userData []map[string]interface{}
dataItem := func(u string) (map[string]interface{}, error) {
ud, ok := UserDB.UserData(u)
if !ok {
return nil, fmt.Errorf("User %s does not exist", u)
}
g, _ := ACL.GroupsOfUser(u)
if g == nil {
g = []string{}
}
return map[string]interface{}{
"username": u,
"groups": g,
"data": ud,
}, nil
}
if len(resources) > 1 {
// Return only a single user
item, err := dataItem(resources[1])
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
w.Header().Set("content-type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(item)
return
}
users := UserDB.AllUsers()
sort.Strings(users)
for _, u := range users {
item, _ := dataItem(u)
userData = append(userData, item)
}
data = userData
} else if resources[0] == "g" {
groupData, _ := ACL.GetConfig()
if len(resources) > 1 {
var ok bool
groupPerm := groupData["groups"].(map[string]map[string]string)
if data, ok = groupPerm[resources[1]]; !ok {
data = map[string]interface{}{}
}
} else {
data = groupData["groups"]
}
}
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(data)
}
/*
HandlePOST handles a REST call to create new users and groups.
*/
func (ue *userEndpoint) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
// Check parameters
if !checkResources(w, resources, 2, 2, "Need u or g (user/group) and a name") {
return
}
name := resources[1]
if resources[0] == "u" {
var userDataObject map[string]interface{}
var groupDataObject []interface{}
data := make(map[string]interface{})
dec := json.NewDecoder(r.Body)
if _, ok := UserDB.UserData(name); ok {
// Shortcut the tests if the user already exists
http.Error(w, fmt.Sprintf("Could not add user %s: User %s already exists", name, name),
http.StatusBadRequest)
return
}
if err := dec.Decode(&data); err != nil {
http.Error(w, "Could not decode request body as object: "+err.Error(),
http.StatusBadRequest)
return
}
password, ok := data["password"]
if !ok {
http.Error(w, "Password is missing in body object ", http.StatusBadRequest)
return
}
if userData, ok := data["user_data"]; ok {
if userDataObject, ok = userData.(map[string]interface{}); !ok {
http.Error(w, "User data is not an object", http.StatusBadRequest)
return
}
}
if groupData, ok := data["group_list"]; ok {
if groupDataObject, ok = groupData.([]interface{}); !ok {
http.Error(w, "Group list is not a list", http.StatusBadRequest)
return
}
names, _ := ACL.GroupNames()
for _, g := range groupDataObject {
group := fmt.Sprint(g)
if i := sort.SearchStrings(names, group); !(i < len(names) && names[i] == group) {
http.Error(w, fmt.Sprintf("Group %s does not exist", group), http.StatusBadRequest)
return
}
}
}
if err := UserDB.AddUserEntry(name, fmt.Sprint(password), userDataObject); err != nil {
http.Error(w, fmt.Sprintf("Could not add user %s: %s", name, err.Error()),
http.StatusBadRequest)
return
}
// Add user to various groups
for _, g := range groupDataObject {
ACL.AddUserToGroup(name, fmt.Sprint(g))
}
} else if resources[0] == "g" {
if err := ACL.AddGroup(name); err != nil {
http.Error(w, fmt.Sprintf("Could not add group %s: %s", name, err.Error()),
http.StatusBadRequest)
return
}
} else {
http.Error(w, "Need u or g (user/group) as first path element", http.StatusBadRequest)
return
}
}
/*
HandlePUT handles a REST call to update an existing user or group.
*/
func (ue *userEndpoint) HandlePUT(w http.ResponseWriter, r *http.Request, resources []string) {
var err error
// Check parameters
if !checkResources(w, resources, 2, 2, "Need u or g (user/group) and a name") {
return
}
name := resources[1]
if resources[0] == "u" {
var updates []func() error
var userDataObject map[string]interface{}
var groupDataObject []interface{}
if !UserDB.UserExists(name) {
http.Error(w, fmt.Sprintf("User %s does not exist", name), http.StatusBadRequest)
return
}
data := make(map[string]interface{})
dec := json.NewDecoder(r.Body)
if err = dec.Decode(&data); err != nil {
http.Error(w, "Could not decode request body as object: "+err.Error(),
http.StatusBadRequest)
return
}
if passwordObj, ok := data["password"]; ok {
password := fmt.Sprint(passwordObj)
if err = UserDB.IsAcceptablePassword(name, password); err == nil {
updates = append(updates, func() error {
return UserDB.UpdateUserPassword(name, password)
})
}
}
if err == nil {
if userData, ok := data["user_data"]; ok {
if userDataObject, ok = userData.(map[string]interface{}); !ok {
http.Error(w, "User data is not an object", http.StatusBadRequest)
return
}
updates = append(updates, func() error {
return UserDB.UpdateUserData(name, userDataObject)
})
}
if groupData, ok := data["group_list"]; ok {
var userGroups []string
if groupDataObject, ok = groupData.([]interface{}); !ok {
http.Error(w, "Group list is not a list", http.StatusBadRequest)
return
}
userGroups, _ = ACL.GroupsOfUser(name) // Ignore error here if the user does not exist
var names []string
names, err = ACL.GroupNames()
if err == nil {
for _, g := range groupDataObject {
group := fmt.Sprint(g)
if i := sort.SearchStrings(names, group); !(i < len(names) && names[i] == group) {
http.Error(w, fmt.Sprintf("Group %s does not exist", group), http.StatusBadRequest)
return
}
}
// No errors are expected when executing the transaction
for _, g := range userGroups {
errorutil.AssertOk(ACL.RemoveUserFromGroup(name, fmt.Sprint(g)))
}
for _, g := range groupDataObject {
errorutil.AssertOk(ACL.AddUserToGroup(name, fmt.Sprint(g)))
}
}
}
if err == nil {
// Execute the rest of the updates - no errors expected here
for _, f := range updates {
errorutil.AssertOk(f())
}
}
}
} else if resources[0] == "g" {
// Replace all permissions for a given group
if _, err = ACL.Permissions(name); err != nil {
http.Error(w, fmt.Sprintf("Group %s does not exist", name), http.StatusBadRequest)
return
}
data := make(map[string]interface{})
dec := json.NewDecoder(r.Body)
if err = dec.Decode(&data); err != nil {
http.Error(w, "Could not decode request body as object: "+err.Error(),
http.StatusBadRequest)
return
}
for _, perm := range data {
if _, err = access.RightsFromString(fmt.Sprint(perm)); err != nil {
break
}
}
if err == nil {
errorutil.AssertOk(ACL.ClearPermissions(name))
for path, perm := range data {
r, _ := access.RightsFromString(fmt.Sprint(perm))
errorutil.AssertOk(ACL.AddPermission(name, path, r))
}
}
} else {
err = fmt.Errorf("Need u or g (user/group) as first path element")
}
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
}
}
/*
HandleDELETE handles a REST call to remove an existing user or group.
*/
func (ue *userEndpoint) HandleDELETE(w http.ResponseWriter, r *http.Request, resources []string) {
// Check parameters
if !checkResources(w, resources, 2, 2, "Need u or g (user/group) and a name") {
return
}
name := resources[1]
if resources[0] == "u" {
if err := UserDB.RemoveUserEntry(name); err != nil {
http.Error(w, fmt.Sprintf("Could not remove user %s: %s", name, err.Error()),
http.StatusBadRequest)
return
}
} else if resources[0] == "g" {
if err := ACL.RemoveGroup(name); err != nil {
http.Error(w, fmt.Sprintf("Could not remove group %s: %s", name, err.Error()),
http.StatusBadRequest)
return
}
} else {
http.Error(w, "Need u or g (user/group) as first path element", http.StatusBadRequest)
return
}
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (ue *userEndpoint) SwaggerDefs(s map[string]interface{}) {
username := []map[string]interface{}{
{
"name": "name",
"in": "path",
"description": "Name of user.",
"required": true,
"type": "string",
},
}
groupname := []map[string]interface{}{
{
"name": "name",
"in": "path",
"description": "Name of group.",
"required": true,
"type": "string",
},
}
createParams := []map[string]interface{}{
{
"name": "user_creation_data",
"in": "body",
"description": "Additional data to create a user account",
"required": true,
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"password": map[string]interface{}{
"description": "Password for the new user.",
"type": "string",
},
"user_data": map[string]interface{}{
"description": "Additional user data.",
"type": "object",
},
"group_list": map[string]interface{}{
"description": "List of groups.",
"type": "array",
"items": map[string]interface{}{
"type": "string",
},
},
},
},
},
}
updateParams := []map[string]interface{}{
{
"name": "user_update_data",
"in": "body",
"description": "Additional data to update a user account",
"required": true,
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"password": map[string]interface{}{
"description": "New password for the user.",
"type": "string",
},
"user_data": map[string]interface{}{
"description": "New additional user data.",
"type": "object",
},
"group_list": map[string]interface{}{
"description": "New list of groups.",
"type": "array",
"items": map[string]interface{}{
"type": "string",
},
},
},
},
},
}
permParams := []map[string]interface{}{
{
"name": "permission_data",
"in": "body",
"description": "Resource paths and their permissions.",
"required": true,
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"resource_path": map[string]interface{}{
"description": "Access rights to the resource path as CRUD (create, read, update and delete) string (e.g. '-RU-').",
"type": "string",
"example": "CRUD",
},
},
},
},
}
s["paths"].(map[string]interface{})["/user/u"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return information about all current known users.",
"description": "Returns all registered users.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "List of known users.",
"schema": map[string]interface{}{
"type": "array",
"items": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"username": map[string]interface{}{
"description": "Name of the user.",
"type": "string",
},
"groups": map[string]interface{}{
"description": "Groups of the user.",
"type": "array",
"items": map[string]interface{}{
"type": "string",
},
},
"data": map[string]interface{}{
"description": "Extra data for the user.",
"type": "object",
},
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/user/g"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return information about all known groups and their permissions.",
"description": "Returns all known groups.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Known group.",
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"group_name": map[string]interface{}{
"description": "Resource path.",
"type": "object",
"properties": map[string]interface{}{
"resource_path": map[string]interface{}{
"description": "Access rights to the resource path as CRUD (create, read, update and delete) string (e.g. '-RU-').",
"type": "string",
"example": "CRUD",
},
},
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/user/u/{name}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return information about a current known user.",
"description": "Returns a registered user.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": username,
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Information about a single user.",
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"username": map[string]interface{}{
"description": "Name of the user.",
"type": "string",
},
"groups": map[string]interface{}{
"description": "Groups of the user.",
"type": "array",
"items": map[string]interface{}{
"type": "string",
},
},
"data": map[string]interface{}{
"description": "Extra data for the user.",
"type": "object",
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"post": map[string]interface{}{
"summary": "Create a new user.",
"description": "Create a new user.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(username, createParams...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Request was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"put": map[string]interface{}{
"summary": "Update an existing user.",
"description": "Update an existing user.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(username, updateParams...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Request was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"delete": map[string]interface{}{
"summary": "Delete an existing user.",
"description": "Delete an existing user.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": username,
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Request was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/user/g/{name}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return information about a group's permissions.",
"description": "Returns the permissions of a group.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": groupname,
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Resource paths and their permissions.",
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"resource_path": map[string]interface{}{
"description": "Access rights to the resource path as CRUD (create, read, update and delete) string (e.g. '-RU-').",
"type": "string",
"example": "CRUD",
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"post": map[string]interface{}{
"summary": "Create a new group.",
"description": "Create a new group.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": groupname,
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Request was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"put": map[string]interface{}{
"summary": "Set permissions of an existing group.",
"description": "Set permissions of an existing group.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(groupname, permParams...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Request was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"delete": map[string]interface{}{
"summary": "Delete an existing group.",
"description": "Delete an existing group.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": groupname,
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Request was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package api
import (
"net/http"
"strings"
"devt.de/krotik/common/datautil"
"devt.de/krotik/eliasdb/cluster"
"devt.de/krotik/eliasdb/ecal"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/graphstorage"
)
/*
APIVersion is the version of the REST API
*/
const APIVersion = "1.0.0"
/*
APIRoot is the root directory for the REST API
*/
const APIRoot = "/db"
/*
APISchemes is a list of supported protocol schemes
*/
var APISchemes = []string{"https"}
/*
APIHost is the host definition for the REST API
*/
var APIHost = "localhost:9090"
/*
GeneralEndpointMap contains general endpoints which should always be available
*/
var GeneralEndpointMap = map[string]RestEndpointInst{
EndpointAbout: AboutEndpointInst,
EndpointSwagger: SwaggerEndpointInst,
}
/*
RestEndpointInst models a factory function for REST endpoint handlers.
*/
type RestEndpointInst func() RestEndpointHandler
/*
RestEndpointHandler models a REST endpoint handler.
*/
type RestEndpointHandler interface {
/*
HandleGET handles a GET request.
*/
HandleGET(w http.ResponseWriter, r *http.Request, resources []string)
/*
HandlePOST handles a POST request.
*/
HandlePOST(w http.ResponseWriter, r *http.Request, resources []string)
/*
HandlePUT handles a PUT request.
*/
HandlePUT(w http.ResponseWriter, r *http.Request, resources []string)
/*
HandleDELETE handles a DELETE request.
*/
HandleDELETE(w http.ResponseWriter, r *http.Request, resources []string)
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
SwaggerDefs(s map[string]interface{})
}
/*
GM is the GraphManager instance which should be used by the REST API.
*/
var GM *graph.Manager
/*
SI is the ScriptingInterpreter instance which is working with the api.GM GraphManager instance.
*/
var SI *ecal.ScriptingInterpreter
/*
GS is the GraphStorage instance which should be used by the REST API.
*/
var GS graphstorage.Storage
/*
DD is the DistributedStorage instance which should be used by the REST API.
(Only available if clustering is enabled.)
*/
var DD *cluster.DistributedStorage
/*
DDLog is a ringbuffer containing cluster related logs.
(Only available if clustering is enabled.)
*/
var DDLog *datautil.RingBuffer
/*
Map of all registered endpoint handlers.
*/
var registered = map[string]RestEndpointInst{}
/*
HandleFunc to use for registering handlers
Should be of type: func(pattern string, handler func(http.ResponseWriter, *http.Request))
*/
var HandleFunc = http.HandleFunc
/*
RegisterRestEndpoints registers all given REST endpoint handlers.
*/
func RegisterRestEndpoints(endpointInsts map[string]RestEndpointInst) {
for url, endpointInst := range endpointInsts {
registered[url] = endpointInst
HandleFunc(url, func() func(w http.ResponseWriter, r *http.Request) {
var handlerURL = url
var handlerInst = endpointInst
return func(w http.ResponseWriter, r *http.Request) {
// Create a new handler instance
handler := handlerInst()
// Handle request in appropriate method
res := strings.TrimSpace(r.URL.Path[len(handlerURL):])
if len(res) > 0 && res[len(res)-1] == '/' {
res = res[:len(res)-1]
}
var resources []string
if res != "" {
resources = strings.Split(res, "/")
}
switch r.Method {
case "GET":
handler.HandleGET(w, r, resources)
case "POST":
handler.HandlePOST(w, r, resources)
case "PUT":
handler.HandlePUT(w, r, resources)
case "DELETE":
handler.HandleDELETE(w, r, resources)
default:
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
}
}())
}
}
/*
DefaultEndpointHandler represents the default endpoint handler.
*/
type DefaultEndpointHandler struct {
}
/*
HandleGET is a method stub returning an error.
*/
func (de *DefaultEndpointHandler) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
/*
HandlePOST is a method stub returning an error.
*/
func (de *DefaultEndpointHandler) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
/*
HandlePUT is a method stub returning an error.
*/
func (de *DefaultEndpointHandler) HandlePUT(w http.ResponseWriter, r *http.Request, resources []string) {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
/*
HandleDELETE is a method stub returning an error.
*/
func (de *DefaultEndpointHandler) HandleDELETE(w http.ResponseWriter, r *http.Request, resources []string) {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package api
import (
"encoding/json"
"net/http"
)
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (a *aboutEndpoint) SwaggerDefs(s map[string]interface{}) {
// Add query paths
s["paths"].(map[string]interface{})["/about"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return information about the REST API provider.",
"description": "Returns available API versions, product name and product version.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "About info object",
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"api_versions": map[string]interface{}{
"description": "List of available API versions.",
"type": "array",
"items": map[string]interface{}{
"description": "Available API version.",
"type": "string",
},
},
"product": map[string]interface{}{
"description": "Product name of the REST API provider.",
"type": "string",
},
"version": map[string]interface{}{
"description": "Version of the REST API provider.",
"type": "string",
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
EndpointSwagger is the swagger endpoint URL (rooted). Handles swagger.json/
*/
const EndpointSwagger = APIRoot + "/swagger.json/"
/*
SwaggerEndpointInst creates a new endpoint handler.
*/
func SwaggerEndpointInst() RestEndpointHandler {
return &swaggerEndpoint{}
}
/*
Handler object for swagger operations.
*/
type swaggerEndpoint struct {
*DefaultEndpointHandler
}
/*
HandleGET returns the swagger definition of the REST API.
*/
func (a *swaggerEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
// Add general sections
data := map[string]interface{}{
"swagger": "2.0",
"host": APIHost,
"schemes": APISchemes,
"basePath": APIRoot,
"produces": []string{"application/json"},
"paths": map[string]interface{}{},
"definitions": map[string]interface{}{},
}
// Go through all registered components and let them add their definitions
a.SwaggerDefs(data)
for _, inst := range registered {
inst().SwaggerDefs(data)
}
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(data)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (a *swaggerEndpoint) SwaggerDefs(s map[string]interface{}) {
// Add general application information
s["info"] = map[string]interface{}{
"title": "EliasDB API",
"description": "Query and modify the EliasDB datastore.",
"version": APIVersion,
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package v1 contains EliasDB REST API Version 1.
Binary Blob control endpoint
/blob
The blob endpoint can be used to store and retrieve binary data to/from automatically
allocated storage locatons.
A new binary blob can be stored by sending a POST request. The body should
be the binary data to store. The response should have the following structure:
{
id : <ID of the stored binary blob>
}
/blob/<id>
GET requests can be used to retrieve a binary blobs with a specific ID. Binary blobs
can be updated by sending a PUT request and removed by sending a DELETE request.
Cluster control endpoint
/cluster
The cluster endpoint returns cluster state specific information. A GET request
returns the cluster state info as a key-value map:
{
<stateinfo key> : <info value>,
...
}
/cluster/join
An instance can join an existing cluster by sending a PUT request to the join
endpoint. The body should have the following datastructure:
{
name : <Name of an existing cluster member>,
netaddr : <Network address of an existing cluster member>
}
/cluster/eject
A cluster member can eject another cluster member or itself by sending a PUT
request to the eject endpoint. The body should have the following datastructure:
{
name : <Name the cluster member to eject>,
}
/cluster/ping
An instance can ping another instance (provided the secret is correct). Cluster
membership is not required for this command. The body should have the following datastructure:
{
name : <Name of an existing instance>,
netaddr : <Network address of an existing instance>
}
/cluster/memberinfos
The memberinfos endpoint returns the static member info of every known cluster
member. If a member is not reachable its info contains a single key-value pair with
the key error and an error message as value. A GET request returns the member
info of every member as a key-value map:
{
<memberinfo key> : <memberinfo value>,
...
}
/cluster/log
Returns the latest cluster related log messages. A DELETE call will clear
the current log.
EQL parser endpoint
/eql
The EQL endpoint provides direct access to the EQL parser. It can be used
to parse a given EQL query into an Abstract Syntax Tree or pretty print a
given Abstract Syntax Tree into an EQL query.
A query can be parsed into an Abstract Syntax Tree by sending a POST request. The
body should have the following format:
{
query : <Query to parse>
}
Returns a JSON structure or an error message.
{
ast : <AST of the given query>
}
An Abstract Syntax Tree can be pretty printed into a query by sending a POST request.
The body should have the following format:
{
ast : <AST to pretty print>
}
Returns a JSON structure or an error message.
{
query : <Pretty printed query>
}
Graph request endpoint
/graph
The graph endpoint is the main entry point to send and request graph data.
Data can be send by using POST and PUT requests. POST will store
data in the datastore and always overwrite any existing data. PUT requests on
nodes will only update the given attributes. PUT requests on edges are handled
equally to POST requests. Data can be deleted using DELETE requests. The data
structure for DELETE requests requires only the key and kind attributes.
A PUT, POST or DELETE request should be send to one of the following
endpoints:
/graph/<partition>
A graph with the following datastructure:
{
nodes : [ { <attr> : <value> }, ... ],
edges : [ { <attr> : <value> }, ... ]
}
/graph/<partition>/n
A list of nodes:
[ { <attr> : <value> }, ... ]
/graph/<partition>/e
A list of edges:
[ { <attr> : <value> }, ... ]
GET requests can be used to query single or a series of nodes. The endpoints
support the limit and offset parameters for lists:
limit - How many list items to return
offset - Offset in the dataset (0 to <total count>-1)
The total number of entries is returned in the X-Total-Count header when
a list is returned.
/graph/<partition>/n/<node kind>/[node key]/[traversal spec]
/graph/<partition>/e/<edge kind>/<edge key>
The return data is a list of objects unless a specific node / edge or a traversal
from a specific node is requested. Each object in the list models a node or edge.
[{
key : <value>,
...
}]
If a specifc object is requested then the return data is a single object.
{
key : <value>,
...
}
Traversals return two lists containing traversed nodes and edges. The traversal
endpoint does NOT support limit and offset parameters. Also the X-Total-Count
header is not set.
[
[ <traversed nodes> ], [ <traversed edges> ]
]
Index query endpoint
/index
The index query endpoint should be used to run index search queries against
partitions. Index queries look for words or phrases on all nodes of a given
node kind.
A phrase query finds all nodes/edges where an attribute contains a
certain phrase. A request url which runs a new phrase search should be of the
following form:
/index/<partition>/n/<node kind>?phrase=<phrase>&attr=<attribute>
/index/<partition>/e/<edge kind>?phrase=<phrase>&attr=<attribute>
The return data is a list of node keys:
[ <node key1>, <node key2>, ... ]
A word query finds all nodes/edges where an attribute contains a certain word.
A request url which runs a new word search should be of the following form:
/index/<partition>/n/<node kind>?word=<word>&attr=<attribute>
/index/<partition>/e/<edge kind>?word=<word>&attr=<attribute>
The return data is a map which maps node key to a list of word positions:
{
key : [ <pos1>, <pos2>, ... ],
...
}
A value search finds all nodes/edges where an attribute has a certain value.
A request url which runs a new value search should be of the following form:
/index/<partition>/n/<node kind>?value=<value>&attr=<attribute>
/index/<partition>/e/<edge kind>?value=<value>&attr=<attribute>
The return data is a list of node keys:
[ <node key1>, <node key2>, ... ]
Find query endpoint
/find
The find query endpoint is a simplified index query which looks up nodes
in all partitions which do not start with a _ character. It either searches
for a word / phrase or an exact value on all available attributes.
A phrase query finds all nodes/edges where an attribute contains a
certain phrase. A request url should be of the following form:
/find?text=<word or phrase value>
/find?value=<exact value>
The return data is a map of partitions to node kinds to a list of nodes:
{
<partition> : {
<kind> : [ { node1 }, { node2 }, ... ]
...
}
...
}
GraphQL request endpoint
/graphql
/graphql-query
The GraphQL endpoints execute GraphQL queries on EliasDB's datastore. The
query endpoint supports only read-queries (i.e. no mutations). EliasDB supports
only executable definitions and introspection (i.e. no type system validation).
General database information endpoint
/info
The info endpoint returns general database information such as known
node kinds, known attributes, etc ..
The return data is a key-value map:
{
<info name> : <info value>,
...
}
/info/kind/<kind>
The node kind info endpoint returns general information about a known node or
edge kind such as known attributes or known edges.
Query endpoint
/query
The query endpoint should be used to run EQL search queries against partitions.
The return value is always a list (even if there is only a single entry).
A query result gets an ID and is stored in a cache. The ID is returned in the
X-Cache-Id header. Subsequent requests for the same result can use the ID
instead of a query.
The endpoint supports the optional limit, offset and groups parameter:
limit - How many list items to return
offset - Offset in the dataset
groups - If set then group information are included in the result
(depending on the result size this can be an expensive call)
The total number of entries in the result is returned in the X-Total-Count header.
A request url which runs a new query should be of the following form:
/query/<partition>?q=<query>
/query/<partition>?rid=<result id>
The return data is a result object:
{
header : {
labels : All column labels of the search result.
format : All column format definitions of the search result.
data : The data which is displayed in each column of the search result.
(e.g. 1:n:name - Name of starting nodes,
3:e:key - Key of edge traversed in the second traversal)
primary_kind : The primary kind of the search result.
},
rows : [ [ <col0>, <col1>, ... ] ],
sources : [ [ <src col0>, <src col1>, ... ] ],
selections : [ <row selected> ],
total_selections : <number of total selections>
groups : [ [ <groups of row0> ], [ <groups of row1> ] ... ]
}
Query result endpoint
/queryresult
The query result endpoint is used to run operations on query results.
The quickfilter endpoint (GET) is used to determine the most frequent used values
in a particular result column.
/queryresult/<rid>/quickfilter/<column>?limit=<max result items>
The optional limit parameter can be used to limit the result items. The return
data is a simple object:
{
values : [ <value1>, ... ],
frequencies : [ <frequency1>, ... ]
}
/queryresult/<rid>/select
The select endpoint (GET) returns the (primary) nodes which are currently
selected. The primary node of each row is usually the node from which
the query started, when constructing the row of the result (unless the
primary keyword was used). The return data is a simple object:
{
keys : [ <key of selected node1>, ... ],
kinds : [ <kind of selected node1>, ... ]
}
/queryresult/<rid>/select/<row>
The select endpoint with the row parameter (PUT) is used to select
single or multiple rows of a query result. The row parameter can either
be a positive number or 'all', 'none' or 'invert'. Returns the new
number of total selections.
/queryresult/<rid>/groupselected
The groupselected endpoint returns the groups which contain the selected
(primary) nodes based on the currently selected rows. The primary node
of each row is usually the node from which the query started, when
constructing the row of the result (unless the primary keyword was used).
The return data is a simple object:
{
groups : [ <group1>, ... ],
keys : [ [ <keys of selected nodes in group1> ], ... ],
kinds : [ [ <kinds of selected nodes in group1> ], ... ]
}
The state can be set by sending it to the endpoint via a POST request.
/queryresult/<rid>/groupselected/<name>
The groupselected endpoint with a group name adds (PUT) or removes (DELETE) all
selected nodes to/from the given (existing) group.
/queryresult/<rid>/csv
The csv endpoint returns the search result as CSV string.
*/
package v1
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strconv"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/storage"
)
/*
StorageSuffixBlob is the suffix for binary blob storage
*/
const StorageSuffixBlob = ".blob"
/*
EndpointBlob is the blob endpoint URL (rooted). Handles everything under blob/...
*/
const EndpointBlob = api.APIRoot + APIv1 + "/blob/"
/*
BlobEndpointInst creates a new endpoint handler.
*/
func BlobEndpointInst() api.RestEndpointHandler {
return &blobEndpoint{}
}
/*
Handler object for blob operations.
*/
type blobEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles REST calls to retrieve binary data.
*/
func (be *blobEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
var res interface{}
var ret []byte
// Check parameters
if !checkResources(w, resources, 2, 2, "Need a partition and a specific data ID") {
return
}
loc, err := strconv.ParseUint(resources[1], 10, 64)
if err != nil {
http.Error(w, fmt.Sprint("Could not decode data ID: ", err.Error()),
http.StatusBadRequest)
return
}
sm := api.GS.StorageManager(resources[0]+StorageSuffixBlob, false)
if sm != nil {
res, err = sm.FetchCached(loc)
if sme, ok := err.(*storage.ManagerError); ok && sme.Type == storage.ErrNotInCache {
err = sm.Fetch(loc, &ret)
} else if err == nil && res != nil {
ret = res.([]byte)
}
}
// Write data
w.Header().Set("content-type", "application/octet-stream")
w.Write(ret)
}
/*
HandlePOST handles a REST call to store new binary data.
*/
func (be *blobEndpoint) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
var buf bytes.Buffer
// Check parameters
if !checkResources(w, resources, 1, 1, "Need a partition") {
return
}
sm := api.GS.StorageManager(resources[0]+StorageSuffixBlob, true)
// Use a memory buffer to read send data
buf.ReadFrom(r.Body)
loc, err := sm.Insert(buf.Bytes())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
sm.Flush()
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(map[string]interface{}{
"id": loc,
})
}
/*
HandlePUT handles a REST call to update existing binary data.
*/
func (be *blobEndpoint) HandlePUT(w http.ResponseWriter, r *http.Request, resources []string) {
var buf bytes.Buffer
// Check parameters
if !checkResources(w, resources, 2, 2, "Need a partition and a specific data ID") {
return
}
loc, err := strconv.ParseUint(resources[1], 10, 64)
if err != nil {
http.Error(w, fmt.Sprint("Could not decode data ID: ", err.Error()), http.StatusBadRequest)
return
}
sm := api.GS.StorageManager(resources[0]+StorageSuffixBlob, false)
if sm != nil {
// Use a memory buffer to read send data
buf.ReadFrom(r.Body)
if err := sm.Update(loc, buf.Bytes()); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
sm.Flush()
}
}
/*
HandleDELETE handles a REST call to remove existing binary data.
*/
func (be *blobEndpoint) HandleDELETE(w http.ResponseWriter, r *http.Request, resources []string) {
// Check parameters
if !checkResources(w, resources, 2, 2, "Need a partition and a specific data ID") {
return
}
loc, err := strconv.ParseUint(resources[1], 10, 64)
if err != nil {
http.Error(w, fmt.Sprint("Could not decode data ID: ", err.Error()), http.StatusBadRequest)
return
}
sm := api.GS.StorageManager(resources[0]+StorageSuffixBlob, false)
if sm != nil {
if err := sm.Free(loc); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
sm.Flush()
}
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (be *blobEndpoint) SwaggerDefs(s map[string]interface{}) {
idParams := []map[string]interface{}{
{
"name": "id",
"in": "path",
"description": "ID of the binary blob.",
"required": true,
"type": "string",
},
}
partitionParams := []map[string]interface{}{
{
"name": "partition",
"in": "path",
"description": "Partition to select.",
"required": true,
"type": "string",
},
}
binaryData := []map[string]interface{}{
{
"name": "data",
"in": "body",
"description": "The data to store.",
"required": true,
"schema": map[string]interface{}{
"description": "A blob of binary data.",
},
},
}
s["paths"].(map[string]interface{})["/v1/blob/{partition}"] = map[string]interface{}{
"post": map[string]interface{}{
"summary": "Create a binary blob of data.",
"description": "The blob endpoint can be used to store binary data. Its location will be automatically allocated.",
"consumes": []string{
"application/octet-stream",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(binaryData, partitionParams...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The operation was successful.",
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"id": map[string]interface{}{
"description": "The data ID which can be used to lookup the data.",
"type": "number",
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/blob/{partition}/{id}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Retrieve a binary blob of data.",
"description": "The blob endpoint can be used to retrieve binary data from a specific location.",
"produces": []string{
"text/plain",
"application/octet-stream",
},
"parameters": append(idParams, partitionParams...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The requested binary blob.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"put": map[string]interface{}{
"summary": "Update a binary blob of data.",
"description": "The blob endpoint can be used to update binary data at a specific location.",
"produces": []string{
"text/plain",
},
"parameters": append(idParams, partitionParams...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The operation was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"delete": map[string]interface{}{
"summary": "Remove a binary blob of data.",
"description": "The blob endpoint can be used to remove binary data from a specific location.",
"produces": []string{
"text/plain",
},
"parameters": append(idParams, partitionParams...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The operation was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"net/http"
"devt.de/krotik/eliasdb/api"
)
/*
EndpointClusterQuery is the cluster endpoint URL (rooted). Handles everything under cluster/...
*/
const EndpointClusterQuery = api.APIRoot + APIv1 + "/cluster/"
/*
ClusterEndpointInst creates a new endpoint handler.
*/
func ClusterEndpointInst() api.RestEndpointHandler {
return &clusterEndpoint{}
}
/*
Handler object for cluster queries.
*/
type clusterEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles a cluster query REST call.
*/
func (ce *clusterEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
var data interface{}
// Check clustering is enabled
if api.DD == nil || api.DDLog == nil {
http.Error(w, "Clustering is not enabled on this instance", http.StatusServiceUnavailable)
return
}
if len(resources) == 1 && resources[0] == "log" {
// Cluster logs are requested
data = api.DDLog.StringSlice()
} else if len(resources) == 1 && resources[0] == "memberinfos" {
// Cluster member infos are requested
data = api.DD.MemberManager.MemberInfoCluster()
} else {
// By default the cluster state is returned
data = api.DD.MemberManager.StateInfo().Map()
}
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(data)
}
/*
HandlePUT handles a REST call to join/eject/ping members of the cluster.
*/
func (ce *clusterEndpoint) HandlePUT(w http.ResponseWriter, r *http.Request, resources []string) {
// Check parameters
if !checkResources(w, resources, 1, 1, "Need a command either: join or eject") {
return
}
dec := json.NewDecoder(r.Body)
args := make(map[string]string)
if err := dec.Decode(&args); err != nil {
http.Error(w, "Could not decode arguments: "+err.Error(), http.StatusBadRequest)
return
}
// Function to check arguments
getArg := func(arg string) (string, bool) {
v, ok := args[arg]
if !ok {
http.Error(w, fmt.Sprintf("Required argument %v missing in body arguments", arg), http.StatusBadRequest)
}
return v, ok
}
if resources[0] == "join" {
// Get required args
name, ok := getArg("name")
if ok {
rpc, ok := getArg("netaddr")
if ok {
err := api.DD.MemberManager.JoinCluster(name, rpc)
if err != nil {
http.Error(w, "Could not join the cluster: "+err.Error(), http.StatusForbidden)
}
}
}
} else if resources[0] == "eject" {
// Get required args
name, ok := getArg("name")
if ok {
err := api.DD.MemberManager.EjectMember(name)
if err != nil {
http.Error(w, "Could not eject "+name+" from cluster: "+err.Error(), http.StatusForbidden)
}
}
} else if resources[0] == "ping" {
// Get required args
name, ok := getArg("name")
if ok {
rpc, ok := getArg("netaddr")
if ok {
res, err := api.DD.MemberManager.Client.SendPing(name, rpc)
if err != nil {
http.Error(w, "Ping returned an error: "+err.Error(), http.StatusForbidden)
} else {
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(res)
}
}
}
} else {
http.Error(w, "Unknown command: "+resources[0], http.StatusBadRequest)
}
}
/*
HandleDELETE handles a cluster delete REST call.
*/
func (ce *clusterEndpoint) HandleDELETE(w http.ResponseWriter, r *http.Request, resources []string) {
// Check clustering is enabled
if api.DD == nil || api.DDLog == nil {
http.Error(w, "Clustering is not enabled on this instance", http.StatusServiceUnavailable)
return
}
if len(resources) == 1 && resources[0] == "log" {
// Cluster log should be reset
api.DDLog.Reset()
return
}
http.Error(w, "Request had no effect", http.StatusBadRequest)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (ce *clusterEndpoint) SwaggerDefs(s map[string]interface{}) {
s["paths"].(map[string]interface{})["/v1/cluster"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return cluster specific information.",
"description": "The cluster endpoint returns the cluster state info which contains cluster members and their state.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "A key-value map.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/cluster/{command}"] = map[string]interface{}{
"put": map[string]interface{}{
"summary": "Commands can be given to the cluster by using PUT requests.",
"description": "The cluster can be controlled via this command endpoint on any member.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": []map[string]interface{}{
{
"name": "command",
"in": "path",
"description": "Valid commands are: ping, join and eject.",
"required": true,
"type": "string",
},
{
"name": "args",
"in": "body",
"description": "Arguments for a command",
"required": true,
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"name": map[string]interface{}{
"description": "Name of a cluster member (ping/join=member to contact, eject=member to eject).",
"type": "string",
},
"netaddr": map[string]interface{}{
"description": "Network address of a member e.g. localhost:9030 (ping/join=member address to contact)",
"type": "string",
},
},
},
},
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Only the ping command returns its result. All other positive responses are empty.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/cluster/memberinfos"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return static member info of every known cluster member.",
"description": "The memberinfos returns the static member info of every known cluster member. If a member is not reachable its info contains a single key-value pair with the key error and an error message as value.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "A map of memberinfos (keys are member names).",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/cluster/log"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return latest cluster related log messages.",
"description": "The cluster log endpoint returns the latest cluster related log messages from a memory ring buffer.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "A list of log messages.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"delete": map[string]interface{}{
"summary": "Reset the cluster log.",
"description": "A delete call to the log endpoint resets the cluster related log and clears the ring buffer in memory.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Cluster related log was reset.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
"devt.de/krotik/common/cryptutil"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/ecal/engine"
"devt.de/krotik/ecal/scope"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/ecal"
"github.com/gorilla/websocket"
)
/*
EndpointECALSock is the ECAL endpoint URL (rooted) for websocket operations. Handles everything under sock/...
*/
const EndpointECALSock = api.APIRoot + "/sock/"
/*
upgrader can upgrade normal requests to websocket communications
*/
var sockUpgrader = websocket.Upgrader{
Subprotocols: []string{"ecal-sock"},
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
var sockCallbackError error
/*
ECALSockEndpointInst creates a new endpoint handler.
*/
func ECALSockEndpointInst() api.RestEndpointHandler {
return &ecalSockEndpoint{}
}
/*
Handler object for ECAL websocket operations.
*/
type ecalSockEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles ECAL websocket operations.
*/
func (e *ecalSockEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
if api.SI != nil {
var body []byte
// Update the incomming connection to a websocket
// If the upgrade fails then the client gets an HTTP error response.
conn, err := sockUpgrader.Upgrade(w, r, nil)
if err != nil {
// We give details here on what went wrong
w.Write([]byte(err.Error()))
return
}
commID := fmt.Sprintf("%x", cryptutil.GenerateUUID())
wc := ecal.NewWebsocketConnection(commID, conn)
wc.Init()
if body, err = ioutil.ReadAll(r.Body); err == nil {
var data interface{}
json.Unmarshal(body, &data)
query := map[interface{}]interface{}{}
for k, v := range r.URL.Query() {
values := make([]interface{}, 0)
for _, val := range v {
values = append(values, val)
}
query[k] = values
}
header := map[interface{}]interface{}{}
for k, v := range r.Header {
header[k] = scope.ConvertJSONToECALObject(v)
}
proc := api.SI.Interpreter.RuntimeProvider.Processor
event := engine.NewEvent(fmt.Sprintf("WebSocketRequest"), []string{"db", "web", "sock"},
map[interface{}]interface{}{
"commID": commID,
"path": strings.Join(resources, "/"),
"pathList": resources,
"bodyString": string(body),
"bodyJSON": scope.ConvertJSONToECALObject(data),
"query": query,
"method": r.Method,
"header": header,
})
// Add event that the websocket has been registered
if _, err = proc.AddEventAndWait(event, nil); err == nil {
api.SI.RegisterECALSock(wc)
defer func() {
api.SI.DeregisterECALSock(wc)
}()
for {
var fatal bool
var data map[string]interface{}
// Read websocket message
if data, fatal, err = wc.ReadData(); err != nil {
wc.WriteData(map[string]interface{}{
"error": err.Error(),
})
if fatal {
break
}
continue
}
if val, ok := data["close"]; ok && stringutil.IsTrueValue(fmt.Sprint(val)) {
wc.Close("")
break
}
event = engine.NewEvent(fmt.Sprintf("WebSocketRequest"), []string{"db", "web", "sock", "data"},
map[interface{}]interface{}{
"commID": commID,
"path": strings.Join(resources, "/"),
"pathList": resources,
"query": query,
"method": r.Method,
"header": header,
"data": scope.ConvertJSONToECALObject(data),
})
_, err = proc.AddEvent(event, nil)
errorutil.AssertOk(err)
}
}
}
if err != nil {
wc.Close(err.Error())
api.SI.Interpreter.RuntimeProvider.Logger.LogDebug(err)
}
return
}
http.Error(w, "Resource was not found", http.StatusNotFound)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (e *ecalSockEndpoint) SwaggerDefs(s map[string]interface{}) {
// No swagger definitions for this endpoint as it only handles websocket requests
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strconv"
"strings"
"devt.de/krotik/ecal/engine"
"devt.de/krotik/ecal/scope"
"devt.de/krotik/ecal/util"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/ecal/dbfunc"
)
/*
EndpointECALInternal is the ECAL endpoint URL (rooted) for internal operations. Handles everything under ecal/...
*/
const EndpointECALInternal = api.APIRoot + "/ecal/"
/*
EndpointECALPublic is the ECAL endpoint URL (rooted) for public API operations. Handles everything under api/...
*/
const EndpointECALPublic = api.APIRoot + "/api/"
/*
ECALEndpointInst creates a new endpoint handler.
*/
func ECALEndpointInst() api.RestEndpointHandler {
return &ecalEndpoint{}
}
/*
Handler object for ecal operations.
*/
type ecalEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles a GET request.
*/
func (ee *ecalEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
ee.forwardRequest(w, r, resources)
}
/*
HandlePOST handles a POST request.
*/
func (ee *ecalEndpoint) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
ee.forwardRequest(w, r, resources)
}
/*
HandlePUT handles a PUT request.
*/
func (ee *ecalEndpoint) HandlePUT(w http.ResponseWriter, r *http.Request, resources []string) {
ee.forwardRequest(w, r, resources)
}
/*
HandleDELETE handles a DELETE request.
*/
func (ee *ecalEndpoint) HandleDELETE(w http.ResponseWriter, r *http.Request, resources []string) {
ee.forwardRequest(w, r, resources)
}
func (ee *ecalEndpoint) forwardRequest(w http.ResponseWriter, r *http.Request, resources []string) {
if api.SI != nil {
// Make sure the request we are handling comes from a known path for ECAL
isPublic := strings.HasPrefix(r.URL.Path, EndpointECALPublic)
isInternal := strings.HasPrefix(r.URL.Path, EndpointECALInternal)
if isPublic || isInternal {
var eventKind []string
body, err := ioutil.ReadAll(r.Body)
if err == nil {
if isPublic {
eventKind = []string{"db", "web", "api"}
} else {
eventKind = []string{"db", "web", "ecal"}
}
var data interface{}
json.Unmarshal(body, &data)
query := map[interface{}]interface{}{}
for k, v := range r.URL.Query() {
values := make([]interface{}, 0)
for _, val := range v {
values = append(values, val)
}
query[k] = values
}
header := map[interface{}]interface{}{}
for k, v := range r.Header {
header[k] = scope.ConvertJSONToECALObject(v)
}
proc := api.SI.Interpreter.RuntimeProvider.Processor
event := engine.NewEvent(fmt.Sprintf("WebRequest"), eventKind,
map[interface{}]interface{}{
"path": strings.Join(resources, "/"),
"pathList": resources,
"bodyString": string(body),
"bodyJSON": scope.ConvertJSONToECALObject(data),
"query": query,
"method": r.Method,
"header": header,
})
var m engine.Monitor
if m, err = proc.AddEventAndWait(event, nil); err == nil {
if m != nil {
var headers map[interface{}]interface{}
status := 0
var body []byte
for _, e := range m.(*engine.RootMonitor).AllErrors() {
if len(e.ErrorMap) > 0 {
for _, e := range e.ErrorMap {
if re, ok := e.(*util.RuntimeErrorWithDetail); ok && re.Type == dbfunc.ErrWebEventHandled {
res := re.Data.(map[interface{}]interface{})
if status, err = strconv.Atoi(fmt.Sprint(res["status"])); err == nil {
headers, _ = res["header"].(map[interface{}]interface{})
body, err = json.Marshal(scope.ConvertECALToJSONObject(res["body"]))
}
} else {
err = e
}
break
}
break
}
}
if status != 0 {
for k, v := range headers {
w.Header().Set(fmt.Sprint(k), fmt.Sprint(v))
}
w.WriteHeader(status)
fmt.Fprintln(w, string(body))
return
}
}
}
}
if err != nil {
api.SI.Interpreter.RuntimeProvider.Logger.LogError(err)
}
}
}
http.Error(w, "Resource was not found", http.StatusNotFound)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (ee *ecalEndpoint) SwaggerDefs(s map[string]interface{}) {
desc := map[string]interface{}{
"summary": "Forward web requests to the ECAL backend.",
"description": "The ecal endpoint forwards web requests to the ECAL backend.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "A result object generated by ECAL scripts.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
}
s["paths"].(map[string]interface{})["/ecal"] = map[string]interface{}{
"get": desc,
"post": desc,
"put": desc,
"delete": desc,
}
s["paths"].(map[string]interface{})["/api"] = map[string]interface{}{
"get": desc,
"post": desc,
"put": desc,
"delete": desc,
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"net/http"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/eql"
"devt.de/krotik/eliasdb/eql/parser"
)
/*
EndpointEql is the eql endpoint URL (rooted). Handles everything under eql/...
*/
const EndpointEql = api.APIRoot + APIv1 + "/eql/"
/*
EqlEndpointInst creates a new endpoint handler.
*/
func EqlEndpointInst() api.RestEndpointHandler {
return &eqlEndpoint{}
}
/*
Handler object for eql operations.
*/
type eqlEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandlePOST handles REST calls to transform EQL queries.
*/
func (e *eqlEndpoint) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
dec := json.NewDecoder(r.Body)
data := make(map[string]interface{})
if err := dec.Decode(&data); err != nil {
http.Error(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
// Handle query and ast requests
query, ok1 := data["query"]
ast, ok2 := data["ast"]
if ok1 || ok2 {
res := make(map[string]interface{})
if ok1 {
resast, err := eql.ParseQuery("request", fmt.Sprint(query))
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
res["ast"] = resast.Plain()
}
if ok2 {
astmap, ok := ast.(map[string]interface{})
if !ok {
http.Error(w, "Plain AST object expected as 'ast' value", http.StatusBadRequest)
return
}
// Try to create a proper AST from plain AST
astnode, err := parser.ASTFromPlain(astmap)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Now pretty print the AST
ppres, err := parser.PrettyPrint(astnode)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
res["query"] = ppres
}
w.Header().Set("content-type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
return
}
http.Error(w, "Need either a query or an ast parameter", http.StatusBadRequest)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (e *eqlEndpoint) SwaggerDefs(s map[string]interface{}) {
s["paths"].(map[string]interface{})["/v1/eql"] = map[string]interface{}{
"post": map[string]interface{}{
"summary": "EQL parser and pretty printer endpoint.",
"description": "The eql endpoint should be used to parse a given EQL query into an Abstract Syntax Tree or pretty print a given Abstract Syntax Tree into an EQL query.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": []map[string]interface{}{
{
"name": "data",
"in": "body",
"description": "Query or AST which should be converted.",
"required": true,
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"query": map[string]interface{}{
"description": "Query which should be parsed.",
"type": "string",
},
"ast": map[string]interface{}{
"description": "AST which should be pretty printed.",
"type": "object",
},
},
},
},
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The operation was successful.",
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"ast": map[string]interface{}{
"description": "The resulting AST if a query was parsed.",
"type": "object",
},
"query": map[string]interface{}{
"description": "The pretty printed query if an AST was given.",
"type": "string",
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
/*
EndpointFindQuery is the find endpoint URL (rooted). Handles everything under find/...
*/
const EndpointFindQuery = api.APIRoot + APIv1 + "/find/"
/*
FindEndpointInst creates a new endpoint handler.
*/
func FindEndpointInst() api.RestEndpointHandler {
return &findEndpoint{}
}
/*
Handler object for find queries.
*/
type findEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles a search query REST call.
*/
func (ie *findEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
var err error
ret := make(map[string]map[string][]interface{})
// Check what is queried
text := r.URL.Query().Get("text")
value := r.URL.Query().Get("value")
if text == "" && value == "" {
http.Error(w, "Query string for text (word or phrase) or value (exact match) is required", http.StatusBadRequest)
return
}
lookup := stringutil.IsTrueValue(r.URL.Query().Get("lookup"))
part := r.URL.Query().Get("part")
parts := api.GM.Partitions()
kinds := api.GM.NodeKinds()
if part != "" && stringutil.IndexOf(part, parts) == -1 {
err = fmt.Errorf("Partition %s does not exist", part)
}
if err == nil {
// Go through all partitions
for _, p := range parts {
if strings.HasPrefix(p, "_") || part != "" && part != p {
// Ignore partitions which start with an _ character or if they
// are not searched for.
continue
}
partitionData := make(map[string][]interface{})
ret[p] = partitionData
// Go through all known node kinds
for _, k := range kinds {
var iq graph.IndexQuery
var nodes []interface{}
nodeMap := make(map[string]interface{})
// NodeIndexQuery may return nil nil if the node kind does not exist
// in a partition
if iq, err = api.GM.NodeIndexQuery(p, k); err == nil && iq != nil {
// Go through all known attributes of the node kind
for _, attr := range api.GM.NodeAttrs(k) {
var keys []string
// Run the lookup on all attributes
if text != "" {
keys, err = iq.LookupPhrase(attr, text)
} else {
keys, err = iq.LookupValue(attr, value)
}
// Lookup all nodes
for _, key := range keys {
var node data.Node
if _, ok := nodeMap[key]; !ok && err == nil {
if lookup {
if node, err = api.GM.FetchNode(p, key, k); node != nil {
nodeMap[key] = node.Data()
}
} else {
nodeMap[key] = map[string]interface{}{
data.NodeKey: key,
data.NodeKind: k,
}
}
}
}
}
}
for _, n := range nodeMap {
nodes = append(nodes, n)
}
if nodes != nil {
partitionData[k] = nodes
}
}
}
}
// Check if there was an error
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
e := json.NewEncoder(w)
e.Encode(ret)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (ie *findEndpoint) SwaggerDefs(s map[string]interface{}) {
s["paths"].(map[string]interface{})["/v1/find"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Run index searches on the EliasDB datastore.",
"description": "The find endpoint should be used to run simple index searches for either a value or a phrase.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": []map[string]interface{}{
{
"name": "text",
"in": "query",
"description": "A word or phrase to search for.",
"required": false,
"type": "string",
},
{
"name": "value",
"in": "query",
"description": "A node value to search for.",
"required": false,
"type": "string",
},
{
"name": "lookup",
"in": "query",
"description": "Flag if a complete node lookup should be done (otherwise only key and kind are returned).",
"required": false,
"type": "boolean",
},
{
"name": "part",
"in": "query",
"description": "Limit the search to a partition (without the option all partitions are searched).",
"required": false,
"type": "string",
},
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "An object of search results.",
"schema": map[string]interface{}{
"type": "object",
"description": "Object of results per partition.",
"properties": map[string]interface{}{
"partition": map[string]interface{}{
"type": "object",
"description": "Object of results per kind.",
"properties": map[string]interface{}{
"kind": map[string]interface{}{
"description": "List of found nodes.",
"type": "array",
"items": map[string]interface{}{
"description": "Found node.",
"type": "object",
},
},
},
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"net/http"
"sort"
"strconv"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
/*
EndpointGraph is the graph endpoint URL (rooted). Handles everything under graph/...
*/
const EndpointGraph = api.APIRoot + APIv1 + "/graph/"
/*
GraphEndpointInst creates a new endpoint handler.
*/
func GraphEndpointInst() api.RestEndpointHandler {
return &graphEndpoint{}
}
/*
Handler object for graph operations.
*/
type graphEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles REST calls to retrieve data from the graph database.
*/
func (ge *graphEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
// Check parameters
if !checkResources(w, resources, 3, 5, "Need a partition, entity type (n or e) and a kind; optional key and traversal spec") {
return
}
if resources[1] != "n" && resources[1] != "e" {
http.Error(w, "Entity type must be n (nodes) or e (edges)", http.StatusBadRequest)
return
}
if len(resources) == 3 {
// Iterate over a list of nodes
if resources[1] == "n" {
// Get limit parameter; -1 if not set
limit, ok := queryParamPosNum(w, r, "limit")
if !ok {
return
}
// Get offset parameter; -1 if not set
offset, ok := queryParamPosNum(w, r, "offset")
if !ok {
return
}
it, err := api.GM.NodeKeyIterator(resources[0], resources[2])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if it == nil {
http.Error(w, "Unknown partition or node kind", http.StatusBadRequest)
return
}
i := 0
if offset != -1 {
for i = 0; i < offset; i++ {
if !it.HasNext() {
http.Error(w, "Offset exceeds available nodes", http.StatusInternalServerError)
return
}
if it.Next(); it.LastError != nil {
http.Error(w, it.LastError.Error(), http.StatusInternalServerError)
return
}
}
} else {
offset = 0
}
var data []interface{}
if limit == -1 {
data = make([]interface{}, 0)
} else {
data = make([]interface{}, 0, limit)
}
for i = offset; it.HasNext(); i++ {
// Break out if the limit was reached
if limit != -1 && i > offset+limit-1 {
break
}
key := it.Next()
if it.LastError != nil {
http.Error(w, it.LastError.Error(), http.StatusInternalServerError)
return
}
node, err := api.GM.FetchNode(resources[0], key, resources[2])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data = append(data, node.Data())
}
// Set total count header
w.Header().Add(HTTPHeaderTotalCount, strconv.FormatUint(api.GM.NodeCount(resources[2]), 10))
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(data)
} else {
http.Error(w, "Entity type must be n (nodes) when requesting all items", http.StatusBadRequest)
return
}
} else if len(resources) == 4 {
// Fetch a specific node or relationship
var data map[string]interface{}
if resources[1] == "n" {
node, err := api.GM.FetchNode(resources[0], resources[3], resources[2])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if node == nil {
http.Error(w, "Unknown partition or node kind", http.StatusBadRequest)
return
}
data = node.Data()
} else {
edge, err := api.GM.FetchEdge(resources[0], resources[3], resources[2])
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if edge == nil {
http.Error(w, "Unknown partition or edge kind", http.StatusBadRequest)
return
}
data = edge.Data()
}
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(data)
} else {
if resources[1] == "n" {
node, err := api.GM.FetchNodePart(resources[0], resources[3], resources[2], []string{"key", "kind"})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if node == nil {
http.Error(w, "Unknown partition or node kind", http.StatusBadRequest)
return
}
nodes, edges, err := api.GM.TraverseMulti(resources[0], resources[3],
resources[2], resources[4], true)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := make([][]map[string]interface{}, 2)
dataNodes := make([]map[string]interface{}, 0, len(nodes))
dataEdges := make([]map[string]interface{}, 0, len(edges))
if nodes != nil && edges != nil {
for i, n := range nodes {
e := edges[i]
dataNodes = append(dataNodes, n.Data())
dataEdges = append(dataEdges, e.Data())
}
}
data[0] = dataNodes
data[1] = dataEdges
// Sort the result
sort.Stable(&traversalResultComparator{data})
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(data)
} else {
http.Error(w, "Entity type must be n (nodes) when requesting traversal results", http.StatusBadRequest)
return
}
}
}
/*
HandlePUT handles a REST call to insert new elements into the graph or update
existing elements. Nodes are updated if they already exist. Edges are replaced
if they already exist.
*/
func (ge *graphEndpoint) HandlePUT(w http.ResponseWriter, r *http.Request, resources []string) {
ge.handleGraphRequest(w, r, resources,
func(trans graph.Trans, part string, node data.Node) error {
return trans.UpdateNode(part, node)
},
func(trans graph.Trans, part string, edge data.Edge) error {
return trans.StoreEdge(part, edge)
})
}
/*
HandlePOST handles a REST call to insert new elements into the graph or update
existing elements. Nodes and edges are replaced if they already exist.
*/
func (ge *graphEndpoint) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
ge.handleGraphRequest(w, r, resources,
func(trans graph.Trans, part string, node data.Node) error {
return trans.StoreNode(part, node)
},
func(trans graph.Trans, part string, edge data.Edge) error {
return trans.StoreEdge(part, edge)
})
}
/*
HandleDELETE handles a REST call to delete elements from the graph.
*/
func (ge *graphEndpoint) HandleDELETE(w http.ResponseWriter, r *http.Request, resources []string) {
ge.handleGraphRequest(w, r, resources,
func(trans graph.Trans, part string, node data.Node) error {
return trans.RemoveNode(part, node.Key(), node.Kind())
},
func(trans graph.Trans, part string, edge data.Edge) error {
return trans.RemoveEdge(part, edge.Key(), edge.Kind())
})
}
/*
handleGraphRequest handles a graph query REST call.
*/
func (ge *graphEndpoint) handleGraphRequest(w http.ResponseWriter, r *http.Request, resources []string,
transFuncNode func(trans graph.Trans, part string, node data.Node) error,
transFuncEdge func(trans graph.Trans, part string, edge data.Edge) error) {
var nDataList []map[string]interface{}
var eDataList []map[string]interface{}
// Check parameters
if !checkResources(w, resources, 1, 2, "Need a partition; optional entity type (n or e)") {
return
}
dec := json.NewDecoder(r.Body)
if len(resources) == 1 {
// No explicit type given - expecting a graph
gdata := make(map[string][]map[string]interface{})
if err := dec.Decode(&gdata); err != nil {
http.Error(w, "Could not decode request body as object with list of nodes and/or edges: "+err.Error(), http.StatusBadRequest)
return
}
nDataList = gdata["nodes"]
eDataList = gdata["edges"]
} else if resources[1] == "n" {
nDataList = make([]map[string]interface{}, 1)
if err := dec.Decode(&nDataList); err != nil {
http.Error(w, "Could not decode request body as list of nodes: "+err.Error(), http.StatusBadRequest)
return
}
} else if resources[1] == "e" {
eDataList = make([]map[string]interface{}, 1)
if err := dec.Decode(&eDataList); err != nil {
http.Error(w, "Could not decode request body as list of edges: "+err.Error(), http.StatusBadRequest)
return
}
}
// Create a transaction
trans := graph.NewGraphTrans(api.GM)
if nDataList != nil {
// Store nodes in transaction
for _, ndata := range nDataList {
node := data.NewGraphNodeFromMap(ndata)
if err := transFuncNode(trans, resources[0], node); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
}
if eDataList != nil {
// Store edges in transaction
for _, edata := range eDataList {
edge := data.NewGraphEdgeFromNode(data.NewGraphNodeFromMap(edata))
if err := transFuncEdge(trans, resources[0], edge); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
}
}
// Commit transaction
if err := trans.Commit(); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (ge *graphEndpoint) SwaggerDefs(s map[string]interface{}) {
partitionParams := []map[string]interface{}{
{
"name": "partition",
"in": "path",
"description": "Partition to select.",
"required": true,
"type": "string",
},
}
entityParams := []map[string]interface{}{
{
"name": "entity_type",
"in": "path",
"description": "Datastore entity type which should selected. " +
"Either n for nodes or e for edges.",
"required": true,
"type": "string",
},
}
defaultParams := []map[string]interface{}{
{
"name": "kind",
"in": "path",
"description": "Node or edge kind to be queried.",
"required": true,
"type": "string",
},
}
defaultParams = append(defaultParams, partitionParams...)
defaultParams = append(defaultParams, entityParams...)
optionalQueryParams := []map[string]interface{}{
{
"name": "limit",
"in": "query",
"description": "How many list items to return.",
"required": false,
"type": "number",
"format": "integer",
},
{
"name": "offset",
"in": "query",
"description": "Offset in the dataset.",
"required": false,
"type": "number",
"format": "integer",
},
}
keyParam := []map[string]interface{}{
{
"name": "key",
"in": "path",
"description": "Node or edge key to be queried.",
"required": true,
"type": "string",
},
}
travParam := []map[string]interface{}{
{
"name": "traversal_spec",
"in": "path",
"description": "Traversal to be followed from a single node.",
"required": true,
"type": "string",
},
}
graphPost := []map[string]interface{}{
{
"name": "entities",
"in": "body",
"description": "Nodes and Edges which should be stored",
"required": true,
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"nodes": map[string]interface{}{
"description": "List of nodes to be inserted / updated.",
"type": "array",
"items": map[string]interface{}{
"description": "Node to be inserted / updated.",
"type": "object",
},
},
"edges": map[string]interface{}{
"description": "List of edges to be inserted / updated.",
"type": "array",
"items": map[string]interface{}{
"description": "Edge to be inserted / updated.",
"type": "object",
},
},
},
},
},
}
entitiesPost := []map[string]interface{}{
{
"name": "entities",
"in": "body",
"description": "Nodes or Edges which should be stored",
"required": true,
"schema": map[string]interface{}{
"type": "array",
"items": map[string]interface{}{
"description": "Node or edge to be inserted / updated.",
"type": "object",
},
},
},
}
defaultError := map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
}
// Add endpoint to insert a graph with nodes and edges
s["paths"].(map[string]interface{})["/v1/graph/{partition}"] = map[string]interface{}{
"post": map[string]interface{}{
"summary": "Data can be send by using POST requests.",
"description": "A whole graph can be send. " +
"POST will store data in the datastore and always overwrite any existing data.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(partitionParams, graphPost...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "No data is returned when data is created.",
},
"default": defaultError,
},
},
"put": map[string]interface{}{
"summary": "Data can be send by using PUT requests.",
"description": "A whole graph can be send. " +
"PUT will store data in the datastore and update existing data.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(partitionParams, graphPost...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "No data is returned when data is created.",
},
"default": defaultError,
},
},
"delete": map[string]interface{}{
"summary": "Data can be send by using DELETE requests.",
"description": "A whole graph can be send. " +
"DELETE will delete data in the datastore - only key and kind are required.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(partitionParams, graphPost...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "No data is returned when data is created.",
},
"default": defaultError,
},
},
}
// Add endpoint to insert nodes / edges
s["paths"].(map[string]interface{})["/v1/graph/{partition}/{entity_type}"] = map[string]interface{}{
"post": map[string]interface{}{
"summary": "Data can be send by using POST requests.",
"description": "A list of nodes / edges can be send. " +
"POST will store data in the datastore and always overwrite any existing data.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(append(partitionParams, entityParams...), entitiesPost...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "No data is returned when data is created.",
},
"default": defaultError,
},
},
"put": map[string]interface{}{
"summary": "Data can be send by using PUT requests.",
"description": "A list of nodes / edges can be send. " +
"PUT will store data in the datastore and update existing data.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(append(partitionParams, entityParams...), entitiesPost...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "No data is returned when data is created.",
},
"default": defaultError,
},
},
"delete": map[string]interface{}{
"summary": "Data can be send by using DELETE requests.",
"description": "A list of nodes / edges can be send. " +
"DELETE will delete data in the datastore - only key and kind are required.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(append(partitionParams, entityParams...), entitiesPost...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "No data is returned when data is created.",
},
"default": defaultError,
},
},
}
// Add endpoint to query nodes for a specific node kind
s["paths"].(map[string]interface{})["/v1/graph/{partition}/{entity_type}/{kind}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "The graph endpoint is the main entry point to request data.",
"description": "GET requests can be used to query a series of nodes. " +
"The X-Total-Count header contains the total number of nodes which were found.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(defaultParams, optionalQueryParams...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The return data is a list of objects",
"schema": map[string]interface{}{
"type": "array",
"items": map[string]interface{}{
"type": "object",
},
},
},
"default": defaultError,
},
},
}
// Add endpoint to query/create a specific node
s["paths"].(map[string]interface{})["/v1/graph/{partition}/{entity_type}/{kind}/{key}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "The graph endpoint is the main entry point to request data.",
"description": "GET requests can be used to query a single node.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(append(defaultParams, keyParam...), optionalQueryParams...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The return data is a single object",
"schema": map[string]interface{}{
"type": "object",
},
},
"default": defaultError,
},
},
}
// Add endpoint to traverse from a single node
s["paths"].(map[string]interface{})["/v1/graph/{partition}/{entity_type}/{kind}/{key}/{traversal_spec}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "The graph endpoint is the main entry point to request data.",
"description": "GET requests can be used to query a single node and then traverse to its neighbours.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(append(defaultParams, keyParam...), travParam...),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The return data are two lists containing traversed nodes and edges. " +
"The traversal endpoint does NOT support limit and offset parameters. " +
"Also the X-Total-Count header is not set.",
"schema": map[string]interface{}{
"type": "array",
"items": map[string]interface{}{
"type": "array",
"items": map[string]interface{}{
"type": "object",
},
},
},
},
"default": defaultError,
},
},
}
}
// Comparator object to sort traversal results
type traversalResultComparator struct {
Data [][]map[string]interface{} // Data to sort
}
func (c traversalResultComparator) Len() int {
return len(c.Data[0])
}
func (c traversalResultComparator) Less(i, j int) bool {
c1 := c.Data[0][i]
c2 := c.Data[0][j]
return fmt.Sprintf("%v", c1[data.NodeKey]) < fmt.Sprintf("%v", c2[data.NodeKey])
}
func (c traversalResultComparator) Swap(i, j int) {
c.Data[0][i], c.Data[0][j] = c.Data[0][j], c.Data[0][i]
c.Data[1][i], c.Data[1][j] = c.Data[1][j], c.Data[1][i]
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"net/http"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/graphql"
)
/*
EndpointGraphQLQuery is a query-only GraphQL endpoint URL (rooted). Handles
everything under graphql-query/...
*/
const EndpointGraphQLQuery = api.APIRoot + APIv1 + "/graphql-query/"
/*
GraphQLQueryEndpointInst creates a new endpoint handler.
*/
func GraphQLQueryEndpointInst() api.RestEndpointHandler {
return &graphQLQueryEndpoint{}
}
/*
Handler object for GraphQL operations.
*/
type graphQLQueryEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles GraphQL queries.
*/
func (e *graphQLQueryEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
gqlquery := map[string]interface{}{
"variables": nil,
"operationName": nil,
}
partition := r.URL.Query().Get("partition")
if partition == "" && len(resources) > 0 {
partition = resources[0]
}
if partition == "" {
http.Error(w, "Need a partition", http.StatusBadRequest)
return
}
query := r.URL.Query().Get("query")
if query == "" {
http.Error(w, "Need a query parameter", http.StatusBadRequest)
return
}
gqlquery["query"] = query
if operationName := r.URL.Query().Get("operationName"); operationName != "" {
gqlquery["operationName"] = operationName
}
if variables := r.URL.Query().Get("variables"); variables != "" {
varData := make(map[string]interface{})
if err := json.Unmarshal([]byte(variables), &varData); err != nil {
http.Error(w, "Could not decode variables: "+err.Error(), http.StatusBadRequest)
return
}
gqlquery["variables"] = varData
}
res, err := graphql.RunQuery(stringutil.CreateDisplayString(partition)+" query",
partition, gqlquery, api.GM, nil, true)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Set("content-type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (e *graphQLQueryEndpoint) SwaggerDefs(s map[string]interface{}) {
s["paths"].(map[string]interface{})["/v1/graphql-query/{partition}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "GraphQL interface which only executes non-modifying queries.",
"description": "The GraphQL interface can be used to query data.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": []map[string]interface{}{
{
"name": "partition",
"in": "path",
"description": "Partition to query.",
"required": true,
"type": "string",
},
{
"name": "operationName",
"in": "query",
"description": "GraphQL query operation name.",
"required": false,
"type": "string",
},
{
"name": "query",
"in": "query",
"description": "GraphQL query.",
"required": true,
"type": "string",
},
{
"name": "variables",
"in": "query",
"description": "GraphQL query variable values.",
"required": false,
"type": "string",
},
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The operation was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
"github.com/gorilla/websocket"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/graphql"
)
/*
EndpointGraphQLSubscriptions is the GraphQL endpoint URL for subscriptions (rooted). Handles websockets under graphql-subscriptions/
*/
const EndpointGraphQLSubscriptions = api.APIRoot + APIv1 + "/graphql-subscriptions/"
/*
upgrader can upgrade normal requests to websocket communications
*/
var upgrader = websocket.Upgrader{
Subprotocols: []string{"graphql-subscriptions"},
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
var subscriptionCallbackError error
/*
GraphQLSubscriptionsEndpointInst creates a new endpoint handler.
*/
func GraphQLSubscriptionsEndpointInst() api.RestEndpointHandler {
return &graphQLSubscriptionsEndpoint{}
}
/*
Handler object for GraphQL operations.
*/
type graphQLSubscriptionsEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles GraphQL subscription queries.
*/
func (e *graphQLSubscriptionsEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
// Update the incomming connection to a websocket
// If the upgrade fails then the client gets an HTTP error response.
conn, err := upgrader.Upgrade(w, r, nil)
// Websocket connections support one concurrent reader and one concurrent writer.
// See: https://godoc.org/github.com/gorilla/websocket#hdr-Concurrency
connRMutex := &sync.Mutex{}
connWMutex := &sync.Mutex{}
if err != nil {
// We give details here on what went wrong
w.Write([]byte(err.Error()))
return
}
subID := ""
// Ensure we have a partition to query
partition := r.URL.Query().Get("partition")
if partition == "" && len(resources) > 0 {
partition = resources[0]
}
if partition == "" {
connWMutex.Lock()
e.WriteError(conn, subID, "Need a 'partition' in path or as url parameter", true)
connWMutex.Unlock()
return
}
connWMutex.Lock()
conn.WriteMessage(websocket.TextMessage, []byte(`{"type":"init_success","payload":{}}`))
connWMutex.Unlock()
// Create the callback handler for the subscription
callbackHandler := &subscriptionCallbackHandler{
finished: false,
publish: func(data map[string]interface{}, err error) {
var res []byte
// Error for unit testing
err = subscriptionCallbackError
// This is called if data im the datastore changes
if err == nil {
res, err = json.Marshal(map[string]interface{}{
"id": subID,
"type": "subscription_data",
"payload": data,
})
}
if err != nil {
connWMutex.Lock()
e.WriteError(conn, subID, err.Error(), true)
connWMutex.Unlock()
return
}
connWMutex.Lock()
conn.WriteMessage(websocket.TextMessage, res)
connWMutex.Unlock()
},
}
for {
// Read websocket message
connRMutex.Lock()
_, msg, err := conn.ReadMessage()
connRMutex.Unlock()
if err != nil {
// Unregister the callback handler
callbackHandler.finished = true
// If the client is still listening write the error message
// This is a NOP if the client hang up
connWMutex.Lock()
e.WriteError(conn, subID, err.Error(), true)
connWMutex.Unlock()
return
}
data := make(map[string]interface{})
if err := json.Unmarshal(msg, &data); err != nil {
connWMutex.Lock()
e.WriteError(conn, subID, err.Error(), false)
connWMutex.Unlock()
continue
}
// Check we got a message with a type
if msgType, ok := data["type"]; ok {
// Check if the user wants to start a new subscription
if _, ok := data["query"]; msgType == "subscription_start" && ok {
var res []byte
subID = fmt.Sprint(data["id"])
if _, ok := data["variables"]; !ok {
data["variables"] = nil
}
if _, ok := data["operationName"]; !ok {
data["operationName"] = nil
}
resData, err := graphql.RunQuery(stringutil.CreateDisplayString(partition)+" query",
partition, data, api.GM, callbackHandler, false)
if err == nil {
res, err = json.Marshal(map[string]interface{}{
"id": subID,
"type": "subscription_data",
"payload": resData,
})
}
if err != nil {
connWMutex.Lock()
e.WriteError(conn, subID, err.Error(), false)
connWMutex.Unlock()
continue
}
connWMutex.Lock()
conn.WriteMessage(websocket.TextMessage, []byte(
fmt.Sprintf(`{"id":"%s","type":"subscription_success","payload":{}}`, subID)))
conn.WriteMessage(websocket.TextMessage, res)
connWMutex.Unlock()
}
}
}
}
/*
WriteError writes an error message to the websocket.
*/
func (e *graphQLSubscriptionsEndpoint) WriteError(conn *websocket.Conn,
subID string, msg string, close bool) {
// Write the error as cleartext message
data, _ := json.Marshal(map[string]interface{}{
"id": subID,
"type": "subscription_fail",
"payload": map[string]interface{}{
"errors": []string{msg},
},
})
conn.WriteMessage(websocket.TextMessage, data)
if close {
// Write error as closing control message
conn.WriteControl(websocket.CloseMessage,
websocket.FormatCloseMessage(
websocket.CloseUnsupportedData, msg), time.Now().Add(10*time.Second))
conn.Close()
}
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (e *graphQLSubscriptionsEndpoint) SwaggerDefs(s map[string]interface{}) {
// No swagger definitions for this endpoint as it only handles websocket requests
}
// Callback Handler
// ================
/*
subscriptionCallbackHandler pushes new events to a subscription client via a websocket.
*/
type subscriptionCallbackHandler struct {
finished bool
publish func(data map[string]interface{}, err error)
}
func (ch *subscriptionCallbackHandler) Publish(data map[string]interface{}, err error) {
ch.publish(data, err)
}
func (ch *subscriptionCallbackHandler) IsFinished() bool {
return ch.finished
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"net/http"
"devt.de/krotik/common/lang/graphql/parser"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/graphql"
)
/*
EndpointGraphQL is the GraphQL endpoint URL (rooted). Handles everything under graphql/...
*/
const EndpointGraphQL = api.APIRoot + APIv1 + "/graphql/"
/*
GraphQLEndpointInst creates a new endpoint handler.
*/
func GraphQLEndpointInst() api.RestEndpointHandler {
return &graphQLEndpoint{}
}
/*
Handler object for GraphQL operations.
*/
type graphQLEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandlePOST handles GraphQL queries.
*/
func (e *graphQLEndpoint) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
var err error
var res map[string]interface{}
dec := json.NewDecoder(r.Body)
data := make(map[string]interface{})
if err := dec.Decode(&data); err != nil {
http.Error(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
toAST, ok1 := data["query-to-ast"]
toQuery, ok2 := data["ast-to-query"]
if ok1 || ok2 {
res := make(map[string]interface{})
if ok1 {
resast, err := parser.Parse("request", fmt.Sprint(toAST))
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
res["result-ast"] = resast.Plain()
}
if ok2 {
astmap, ok := toQuery.(map[string]interface{})
if !ok {
http.Error(w, "Plain AST object expected as 'ast-to-query' value", http.StatusBadRequest)
return
}
// Try to create a proper AST from plain AST
astnode, err := parser.ASTFromPlain(astmap)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Now pretty print the AST
ppres, err := parser.PrettyPrint(astnode)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
res["result-query"] = ppres
}
w.Header().Set("content-type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
return
} else {
partData, ok := data["partition"]
if !ok && len(resources) > 0 {
partData = resources[0]
ok = true
}
if !ok || partData == "" {
http.Error(w, "Need a partition", http.StatusBadRequest)
return
}
part := fmt.Sprint(partData)
if _, ok := data["variables"]; !ok {
data["variables"] = nil
}
if _, ok := data["operationName"]; !ok {
data["operationName"] = nil
}
res, err = graphql.RunQuery(stringutil.CreateDisplayString(part)+" query",
part, data, api.GM, nil, false)
}
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
w.Header().Set("content-type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(res)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (e *graphQLEndpoint) SwaggerDefs(s map[string]interface{}) {
graphqlRequestParam := map[string]interface{}{
"name": "graphql_request",
"in": "body",
"description": "GraphQL request",
"required": true,
"schema": map[string]interface{}{
"$ref": "#/definitions/GraphQLRequest",
},
}
s["paths"].(map[string]interface{})["/v1/graphql/{partition}"] = map[string]interface{}{
"post": map[string]interface{}{
"summary": "GraphQL interface.",
"description": "The GraphQL interface can be used to query and modify data.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": []map[string]interface{}{
{
"name": "partition",
"in": "path",
"description": "Partition to query.",
"required": true,
"type": "string",
},
graphqlRequestParam,
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The operation was successful.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["definitions"].(map[string]interface{})["GraphQLRequest"] = map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"operationName": map[string]interface{}{
"description": "GraphQL query operation name.",
"type": "string",
},
"query": map[string]interface{}{
"description": "GraphQL query.",
"type": "string",
},
"variables": map[string]interface{}{
"description": "GraphQL query variable values.",
"type": "object",
},
},
}
s["paths"].(map[string]interface{})["/v1/graphql"] = map[string]interface{}{
"post": map[string]interface{}{
"summary": "GraphQL parser and pretty printer endpoint.",
"description": "The GraphQL endpoint without specifying a partition should be used to parse a given GraphQL query into an Abstract Syntax Tree or pretty print a given Abstract Syntax Tree into a GraphQL query.",
"consumes": []string{
"application/json",
},
"produces": []string{
"text/plain",
"application/json",
},
"parameters": []map[string]interface{}{
{
"name": "data",
"in": "body",
"description": "Query or AST which should be converted.",
"required": true,
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"query-to-ast": map[string]interface{}{
"description": "Query which should be parsed.",
"type": "string",
},
"ast-to-query": map[string]interface{}{
"description": "AST which should be pretty printed.",
"type": "object",
},
},
},
},
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "The operation was successful.",
"schema": map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"result-ast": map[string]interface{}{
"description": "The resulting AST if a query was parsed.",
"type": "object",
},
"result-query": map[string]interface{}{
"description": "The pretty printed query if an AST was given.",
"type": "string",
},
},
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"net/http"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/graph"
)
/*
EndpointIndexQuery is the index endpoint URL (rooted). Handles everything under index/...
*/
const EndpointIndexQuery = api.APIRoot + APIv1 + "/index/"
/*
IndexEndpointInst creates a new endpoint handler.
*/
func IndexEndpointInst() api.RestEndpointHandler {
return &indexEndpoint{}
}
/*
Handler object for search queries.
*/
type indexEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles a search query REST call.
*/
func (ie *indexEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
var err error
// Check parameters
if !checkResources(w, resources, 3, 3, "Need a partition, entity type (n or e) and a kind") {
return
}
if resources[1] != "n" && resources[1] != "e" {
http.Error(w, "Entity type must be n (nodes) or e (edges)", http.StatusBadRequest)
return
}
// Check what is queried
attr := r.URL.Query().Get("attr")
if attr == "" {
http.Error(w, "Query string for attr (attribute) is required", http.StatusBadRequest)
return
}
phrase := r.URL.Query().Get("phrase")
word := r.URL.Query().Get("word")
value := r.URL.Query().Get("value")
// Get the index query object
var iq graph.IndexQuery
if resources[1] == "n" {
iq, err = api.GM.NodeIndexQuery(resources[0], resources[2])
} else {
iq, err = api.GM.EdgeIndexQuery(resources[0], resources[2])
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
} else if iq == nil {
http.Error(w, "Unknown partition or node kind", http.StatusBadRequest)
return
}
// Do the lookup
var data interface{}
switch {
case phrase != "":
data, err = iq.LookupPhrase(attr, phrase)
if len(data.([]string)) == 0 {
data = []string{}
}
case word != "":
data, err = iq.LookupWord(attr, word)
if len(data.(map[string][]uint64)) == 0 {
data = map[string][]uint64{}
}
case value != "":
data, err = iq.LookupValue(attr, value)
if len(data.([]string)) == 0 {
data = []string{}
}
default:
http.Error(w, "Query string for either phrase, word or value is required", http.StatusBadRequest)
return
}
// Check if there was an error
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(data)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (ie *indexEndpoint) SwaggerDefs(s map[string]interface{}) {
s["paths"].(map[string]interface{})["/v1/index/{partition}/{entity_type}/{kind}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Run index searches on the EliasDB datastore.",
"description": "The index endpoint should be used to run index searches for either a word, phrase or a whole value. All queries must specify a kind and an node/edge attribute.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": []map[string]interface{}{
{
"name": "partition",
"in": "path",
"description": "Partition to query.",
"required": true,
"type": "string",
},
{
"name": "entity_type",
"in": "path",
"description": "Datastore entity type which should selected. " +
"Either n for nodes or e for edges.",
"required": true,
"type": "string",
},
{
"name": "kind",
"in": "path",
"description": "Node or edge kind to be queried.",
"required": true,
"type": "string",
},
{
"name": "attr",
"in": "query",
"description": "Attribute which should contain the word, phrase or value.",
"required": true,
"type": "string",
},
{
"name": "word",
"in": "query",
"description": "Word to search for in word queries.",
"required": false,
"type": "string",
},
{
"name": "phrase",
"in": "query",
"description": "Phrase to search for in phrase queries.",
"required": false,
"type": "string",
},
{
"name": "value",
"in": "query",
"description": "Value (node/edge attribute value) to search for in value queries.",
"required": false,
"type": "string",
},
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "A list of keys or when doing a word search a map with node/edge key to word positions.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"net/http"
"devt.de/krotik/eliasdb/api"
)
/*
EndpointInfoQuery is the info endpoint URL (rooted). Handles everything under info/...
*/
const EndpointInfoQuery = api.APIRoot + APIv1 + "/info/"
/*
InfoEndpointInst creates a new endpoint handler.
*/
func InfoEndpointInst() api.RestEndpointHandler {
return &infoEndpoint{}
}
/*
Handler object for info queries.
*/
type infoEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles a info query REST call.
*/
func (ie *infoEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
data := make(map[string]interface{})
if len(resources) > 0 {
if resources[0] == "kind" {
// Kind info is requested
if len(resources) == 1 {
http.Error(w, "Missing node kind", http.StatusBadRequest)
return
}
na := api.GM.NodeAttrs(resources[1])
ea := api.GM.EdgeAttrs(resources[1])
if len(na) == 0 && len(ea) == 0 {
http.Error(w, fmt.Sprint("Unknown node kind ", resources[1]), http.StatusBadRequest)
return
}
data["node_attrs"] = na
data["node_edges"] = api.GM.NodeEdges(resources[1])
data["edge_attrs"] = ea
}
} else {
// Get general information
data["partitions"] = api.GM.Partitions()
nks := api.GM.NodeKinds()
data["node_kinds"] = nks
ncs := make(map[string]uint64)
for _, nk := range nks {
ncs[nk] = api.GM.NodeCount(nk)
}
data["node_counts"] = ncs
eks := api.GM.EdgeKinds()
data["edge_kinds"] = eks
ecs := make(map[string]uint64)
for _, ek := range eks {
ecs[ek] = api.GM.EdgeCount(ek)
}
data["edge_counts"] = ecs
}
// Write data
w.Header().Set("content-type", "application/json; charset=utf-8")
ret := json.NewEncoder(w)
ret.Encode(data)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (ie *infoEndpoint) SwaggerDefs(s map[string]interface{}) {
s["paths"].(map[string]interface{})["/v1/info"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return general datastore information.",
"description": "The info endpoint returns general database information such as known node kinds, known attributes, etc.",
"produces": []string{
"text/plain",
"application/json",
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "A key-value map.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/info/kind/{kind}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return information on a given node or edge kind.",
"description": "The info kind endpoint returns information on a given node kind such as known attributes and edges.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": []map[string]interface{}{
{
"name": "kind",
"in": "path",
"description": "Node or edge kind to be queried.",
"required": true,
"type": "string",
},
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "A key-value map.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
"devt.de/krotik/common/datautil"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/eql"
"devt.de/krotik/eliasdb/graph/data"
)
/*
ResultCacheMaxSize is the maximum size for the result cache
*/
var ResultCacheMaxSize uint64
/*
ResultCacheMaxAge is the maximum age a result cache entry can have in seconds
*/
var ResultCacheMaxAge int64
/*
ResultCache is a cache for result sets (by default no expiry and no limit)
*/
var ResultCache *datautil.MapCache
/*
idCount is an ID counter for results
*/
var idCount = time.Now().Unix()
/*
EndpointQuery is the query endpoint URL (rooted). Handles everything under query/...
*/
const EndpointQuery = api.APIRoot + APIv1 + "/query/"
/*
QueryEndpointInst creates a new endpoint handler.
*/
func QueryEndpointInst() api.RestEndpointHandler {
// Init the result cache if necessary
if ResultCache == nil {
ResultCache = datautil.NewMapCache(ResultCacheMaxSize, ResultCacheMaxAge)
}
return &queryEndpoint{}
}
/*
Handler object for search queries.
*/
type queryEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles a search query REST call.
*/
func (eq *queryEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
var err error
// Check parameters
if !checkResources(w, resources, 1, 1, "Need a partition") {
return
}
// Get partition
part := resources[0]
// Get limit parameter; -1 if not set
limit, ok := queryParamPosNum(w, r, "limit")
if !ok {
return
}
// Get offset parameter; -1 if not set
offset, ok := queryParamPosNum(w, r, "offset")
if !ok {
return
}
// Get groups parameter
gs := r.URL.Query().Get("groups")
showGroups := gs != ""
// See if a result ID was given
resID := r.URL.Query().Get("rid")
if resID != "" {
res, ok := ResultCache.Get(resID)
if !ok {
http.Error(w, "Unknown result ID (rid parameter)", http.StatusBadRequest)
return
}
err = eq.writeResultData(w, res.(*APISearchResult), part, resID, offset, limit, showGroups)
} else {
var res eql.SearchResult
// Run the query
query := r.URL.Query().Get("q")
if query == "" {
http.Error(w, "Missing query (q parameter)", http.StatusBadRequest)
return
}
res, err = eql.RunQuery(stringutil.CreateDisplayString(part)+" query",
part, query, api.GM)
if err == nil {
sres := &APISearchResult{res, nil}
// Make sure the result has a primary node column
_, err = sres.GetPrimaryNodeColumn()
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Store the result in the cache
resID = genID()
ResultCache.Put(resID, sres)
err = eq.writeResultData(w, sres, part, resID, offset, limit, showGroups)
}
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
/*
writeResultData writes result data for the client.
*/
func (eq *queryEndpoint) writeResultData(w http.ResponseWriter, res *APISearchResult,
part string, resID string, offset int, limit int, showGroups bool) error {
var err error
// Write out the data
header := res.Header()
ret := json.NewEncoder(w)
resdata := make(map[string]interface{})
// Count total selections
sels := res.Selections()
totalSels := 0
for _, s := range sels {
if s {
totalSels++
}
}
resdata["total_selections"] = totalSels
rows := res.Rows()
srcs := res.RowSources()
if limit == -1 && offset == -1 {
resdata["rows"] = rows
resdata["sources"] = srcs
resdata["selections"] = sels
} else {
if offset > 0 {
if offset >= len(rows) {
return fmt.Errorf("Offset exceeds available rows")
}
rows = rows[offset:]
srcs = srcs[offset:]
sels = sels[offset:]
}
if limit != -1 && limit < len(rows) {
rows = rows[:limit]
srcs = srcs[:limit]
sels = sels[:limit]
}
resdata["rows"] = rows
resdata["sources"] = srcs
resdata["selections"] = sels
}
// Write out result header
resdataHeader := make(map[string]interface{})
resdata["header"] = resdataHeader
resdataHeader["labels"] = header.Labels()
resdataHeader["format"] = header.Format()
resdataHeader["data"] = header.Data()
pk := header.PrimaryKind()
resdataHeader["primary_kind"] = pk
if showGroups {
groupList := make([][]string, 0, len(srcs))
if len(srcs) > 0 {
var col int
// Get column for primary kind
col, err = res.GetPrimaryNodeColumn()
// Lookup groups for nodes
for _, s := range resdata["sources"].([][]string) {
if err == nil {
var nodes []data.Node
groups := make([]string, 0, 3)
key := strings.Split(s[col], ":")[2]
nodes, _, err = api.GM.TraverseMulti(part, key, pk,
":::"+eql.GroupNodeKind, false)
if err == nil {
for _, n := range nodes {
groups = append(groups, n.Key())
}
}
groupList = append(groupList, groups)
}
}
}
resdata["groups"] = groupList
}
if err == nil {
// Set response header values
w.Header().Add(HTTPHeaderTotalCount, fmt.Sprint(res.RowCount()))
w.Header().Add(HTTPHeaderCacheID, resID)
w.Header().Set("content-type", "application/json; charset=utf-8")
ret.Encode(resdata)
}
return err
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (eq *queryEndpoint) SwaggerDefs(s map[string]interface{}) {
// Add query paths
s["paths"].(map[string]interface{})["/v1/query/{partition}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Run EQL queries to query the EliasDB datastore.",
"description": "The query endpoint should be used to run EQL search " +
"queries against partitions. The return value is always a list " +
"(even if there is only a single entry). A query result gets an " +
"ID and is stored in a cache. The ID is returned in the X-Cache-Id " +
"header. Subsequent requests for the same result can use the ID instead of a query.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": []map[string]interface{}{
{
"name": "partition",
"in": "path",
"description": "Partition to query.",
"required": true,
"type": "string",
},
{
"name": "q",
"in": "query",
"description": "URL encoded query to execute.",
"required": false,
"type": "string",
},
{
"name": "rid",
"in": "query",
"description": "Result ID to retrieve from the result cache.",
"required": false,
"type": "number",
"format": "integer",
},
{
"name": "limit",
"in": "query",
"description": "How many list items to return.",
"required": false,
"type": "number",
"format": "integer",
},
{
"name": "offset",
"in": "query",
"description": "Offset in the dataset.",
"required": false,
"type": "number",
"format": "integer",
},
{
"name": "groups",
"in": "query",
"description": "Include group information in the result if set to any value.",
"required": false,
"type": "number",
"format": "integer",
},
},
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "A query result",
"schema": map[string]interface{}{
"$ref": "#/definitions/QueryResult",
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add QueryResult to definitions
s["definitions"].(map[string]interface{})["QueryResult"] = map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"header": map[string]interface{}{
"description": "Header for the query result.",
"type": "object",
"properties": map[string]interface{}{
"labels": map[string]interface{}{
"description": "All column labels of the search result.",
"type": "array",
"items": map[string]interface{}{
"description": "Column label.",
"type": "string",
},
},
"format": map[string]interface{}{
"description": "All column format definitions of the search result.",
"type": "array",
"items": map[string]interface{}{
"description": "Column format as specified in the show format (e.g. text).",
"type": "string",
},
},
"data": map[string]interface{}{
"description": "The data which is displayed in each column of the search result.",
"type": "array",
"items": map[string]interface{}{
"description": "Data source for the column (e.g. 1:n:name - Name of starting nodes, 3:e:key - Key of edge traversed in the second traversal).",
"type": "string",
},
},
},
},
"rows": map[string]interface{}{
"description": "Rows of the query result.",
"type": "array",
"items": map[string]interface{}{
"description": "Columns of a row of the query result.",
"type": "array",
"items": map[string]interface{}{
"description": "A single cell of the query result (string, integer or null).",
"type": "object",
},
},
},
"sources": map[string]interface{}{
"description": "Data sources of the query result.",
"type": "array",
"items": map[string]interface{}{
"description": "Columns of a row of the query result.",
"type": "array",
"items": map[string]interface{}{
"description": "Data source of a single cell of the query result.",
"type": "string",
},
},
},
"groups": map[string]interface{}{
"description": "Group names for each row.",
"type": "array",
"items": map[string]interface{}{
"description": " Groups of the primary kind node.",
"type": "array",
"items": map[string]interface{}{
"description": "Group name.",
"type": "string",
},
},
},
"selections": map[string]interface{}{
"description": "List of row selections.",
"type": "array",
"items": map[string]interface{}{
"description": "Row selection.",
"type": "boolean",
},
},
"total_selections": map[string]interface{}{
"description": "Number of total selections.",
"type": "number",
"format": "integer",
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
}
/*
genID generates a unique ID.
*/
func genID() string {
idCount++
return fmt.Sprint(idCount)
}
/*
APISearchResult is a search result maintained by the API. It embeds
*/
type APISearchResult struct {
eql.SearchResult // Normal eql search result
selections []bool // Selections of the result
}
/*
GetPrimaryNodeColumn determines the first primary node column.
*/
func (r *APISearchResult) GetPrimaryNodeColumn() (int, error) {
var err error
pk := r.Header().PrimaryKind()
col := -1
rs := r.RowSources()
if len(rs) > 0 {
for i, scol := range rs[0] {
scolParts := strings.Split(scol, ":")
if len(scolParts) > 1 && pk == scolParts[1] {
col = i
}
}
}
if col == -1 {
err = fmt.Errorf("Could not determine key of primary node - query needs a primary expression")
}
return col, err
}
/*
Selections returns all current selections.
*/
func (r *APISearchResult) Selections() []bool {
r.refreshSelection()
return r.selections
}
/*
SetSelection sets a new selection.
*/
func (r *APISearchResult) SetSelection(line int, selection bool) {
r.refreshSelection()
if line < len(r.selections) {
r.selections[line] = selection
}
}
/*
AllSelection selects all rows.
*/
func (r *APISearchResult) AllSelection() {
r.refreshSelection()
for i := 0; i < len(r.selections); i++ {
r.selections[i] = true
}
}
/*
NoneSelection selects none rows.
*/
func (r *APISearchResult) NoneSelection() {
r.refreshSelection()
for i := 0; i < len(r.selections); i++ {
r.selections[i] = false
}
}
/*
InvertSelection inverts the current selection.
*/
func (r *APISearchResult) InvertSelection() {
r.refreshSelection()
for i := 0; i < len(r.selections); i++ {
r.selections[i] = !r.selections[i]
}
}
/*
refreshSelection reallocates the selection array if necessary.
*/
func (r *APISearchResult) refreshSelection() {
l := r.SearchResult.RowCount()
if len(r.selections) != l {
origSelections := r.selections
// There is a difference between the selections array and the row
// count we need to resize
r.selections = make([]bool, l)
for i, s := range origSelections {
if i < l {
r.selections[i] = s
}
}
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"encoding/json"
"fmt"
"net/http"
"sort"
"strconv"
"strings"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/eql"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
/*
EndpointQueryResult is the query result endpoint URL (rooted). Handles everything under queryresult/...
*/
const EndpointQueryResult = api.APIRoot + APIv1 + "/queryresult/"
/*
QueryResultEndpointInst creates a new endpoint handler.
*/
func QueryResultEndpointInst() api.RestEndpointHandler {
return &queryResultEndpoint{}
}
/*
Handler object for query result operations.
*/
type queryResultEndpoint struct {
*api.DefaultEndpointHandler
}
/*
HandleGET handles info requests on query results.
*/
func (qre *queryResultEndpoint) HandleGET(w http.ResponseWriter, r *http.Request, resources []string) {
qre.handleRequest("get", w, r, resources)
}
/*
HandlePUT handles state changing operations on query results.
*/
func (qre *queryResultEndpoint) HandlePUT(w http.ResponseWriter, r *http.Request, resources []string) {
qre.handleRequest("put", w, r, resources)
}
/*
HandlePOST handles state changing operations on query results.
*/
func (qre *queryResultEndpoint) HandlePOST(w http.ResponseWriter, r *http.Request, resources []string) {
qre.handleRequest("post", w, r, resources)
}
/*
HandleDELETE handles state changing operations on query results.
*/
func (qre *queryResultEndpoint) HandleDELETE(w http.ResponseWriter, r *http.Request, resources []string) {
qre.handleRequest("delete", w, r, resources)
}
func (qre *queryResultEndpoint) handleRequest(requestType string, w http.ResponseWriter, r *http.Request, resources []string) {
// Check parameters
if !checkResources(w, resources, 2, 3, "Need a result ID and an operation") {
return
}
// Limit is either not set (then -1) or a positive value
limit, ok := queryParamPosNum(w, r, "limit")
if !ok {
return
}
resID := resources[0]
op := resources[1]
res, ok := ResultCache.Get(resID)
if !ok {
http.Error(w, "Unknown query result", http.StatusBadRequest)
return
}
sres := res.(*APISearchResult)
if op == "csv" {
if requestType != "get" {
http.Error(w, "Csv can only handle GET requests",
http.StatusBadRequest)
return
}
w.Header().Set("content-type", "text/plain; charset=utf-8")
w.Write([]byte(sres.CSV()))
return
} else if op == "quickfilter" {
qre.quickFilter(requestType, w, resources, sres, limit)
return
} else if op == "select" {
qre.selectRows(requestType, w, resources, sres)
return
} else if op == "groupselected" {
qre.groupSelected(requestType, w, r, resources, sres)
return
}
http.Error(w, fmt.Sprintf("Unknown operation: %v", op), http.StatusBadRequest)
}
/*
groupSelected implements the adding/removing of all selected nodes to a group functionality.
*/
func (qre *queryResultEndpoint) groupSelected(requestType string, w http.ResponseWriter, r *http.Request,
resources []string, sres *APISearchResult) {
var col int
var err error
addNodeToGroup := func(trans graph.Trans, part, groupName, key, kind string) error {
// Add to group
edge := data.NewGraphEdge()
edge.SetAttr("key", stringutil.MD5HexString(fmt.Sprintf("%s#%s#%s", key, kind, groupName)))
edge.SetAttr("kind", "Containment")
edge.SetAttr(data.EdgeEnd1Key, groupName)
edge.SetAttr(data.EdgeEnd1Kind, eql.GroupNodeKind)
edge.SetAttr(data.EdgeEnd1Role, "Container")
edge.SetAttr(data.EdgeEnd1Cascading, false)
edge.SetAttr(data.EdgeEnd2Key, key)
edge.SetAttr(data.EdgeEnd2Kind, kind)
edge.SetAttr(data.EdgeEnd2Role, "ContainedItem")
edge.SetAttr(data.EdgeEnd2Cascading, false)
return trans.StoreEdge(part, edge)
}
removeNodeFromGroup := func(trans graph.Trans, part, groupName, key, kind string) error {
var nodes []data.Node
var edges []data.Edge
nodes, edges, err = api.GM.TraverseMulti(part, key, kind, ":::"+eql.GroupNodeKind, false)
if err == nil {
for i, n := range nodes {
if n.Key() == groupName {
errorutil.AssertOk(trans.RemoveEdge(part, edges[i].Key(), edges[i].Kind()))
}
}
}
return err
}
trans := graph.NewGraphTrans(api.GM)
part := sres.Header().Partition()
selections := sres.Selections()
if col, err = sres.GetPrimaryNodeColumn(); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if len(resources) == 3 {
if requestType != "put" && requestType != "delete" {
http.Error(w, "Groupselected for a specific group can only handle PUT and DELETE requests",
http.StatusBadRequest)
return
}
groupName := resources[2]
for i, srcs := range sres.RowSources() {
src := strings.Split(srcs[col], ":")
kind := src[1]
key := src[2]
if selections[i] {
// Add or remove form group
if requestType == "put" {
errorutil.AssertOk(addNodeToGroup(trans, part, groupName, key, kind))
} else if requestType == "delete" {
errorutil.AssertOk(removeNodeFromGroup(trans, part, groupName, key, kind))
}
}
}
} else {
if requestType != "get" && requestType != "post" {
http.Error(w, "Groupselected can only handle GET and POST requests",
http.StatusBadRequest)
return
}
if requestType == "post" {
var reqGroups []interface{}
var reqKeys, reqKinds []interface{}
// Apply the given state
gdata := make(map[string]interface{})
// Parse the data
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&gdata); err != nil {
http.Error(w, "Could not decode request body as object with lists of groups, keys and kinds: "+err.Error(),
http.StatusBadRequest)
return
}
reqGroupsVal, ok1 := gdata["groups"]
reqKeysVal, ok2 := gdata["keys"]
reqKindsVal, ok3 := gdata["kinds"]
if ok1 && ok2 && ok3 {
reqGroups, ok1 = reqGroupsVal.([]interface{})
reqKeys, ok2 = reqKeysVal.([]interface{})
reqKinds, ok3 = reqKindsVal.([]interface{})
}
if !ok1 || !ok2 || !ok3 {
http.Error(w, "Wrong data structures in request body - expecting an object with lists of groups, keys and kinds.",
http.StatusBadRequest)
return
}
// Remove groups from all selected nodes
trans2 := graph.NewGraphTrans(api.GM)
for i, srcs := range sres.RowSources() {
src := strings.Split(srcs[col], ":")
kind := src[1]
key := src[2]
if selections[i] {
var nodes []data.Node
nodes, _, err = api.GM.TraverseMulti(part, key, kind, ":::"+eql.GroupNodeKind, false)
if err == nil {
for _, n := range nodes {
errorutil.AssertOk(removeNodeFromGroup(trans2, part, n.Key(), key, kind)) // There should be no errors at this point
}
}
}
if err != nil {
break
}
}
if err == nil {
err = trans2.Commit()
if err == nil {
for i, g := range reqGroups {
reqKindsArr := reqKinds[i].([]interface{})
for j, k := range reqKeys[i].([]interface{}) {
errorutil.AssertOk(addNodeToGroup(trans, part,
fmt.Sprint(g), fmt.Sprint(k), fmt.Sprint(reqKindsArr[j]))) // There should be no errors at this point
}
}
}
}
}
}
if err == nil {
if err = trans.Commit(); err == nil {
var sstate map[string]interface{}
if sstate, err = qre.groupSelectionState(sres, part, col, selections); err == nil {
qre.dataWriter(w).Encode(sstate)
}
}
}
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
/*
groupSelectionState returns the current group selection state of a given query result.
*/
func (qre *queryResultEndpoint) groupSelectionState(sres *APISearchResult, part string, primaryNodeCol int, selections []bool) (map[string]interface{}, error) {
var ret map[string]interface{}
var err error
// Get groups for all selected nodes
retGroups := []string{}
retKeys := [][]string{}
retKinds := [][]string{}
memberKeys := make(map[string][]string)
memberKinds := make(map[string][]string)
for i, srcs := range sres.RowSources() {
src := strings.Split(srcs[primaryNodeCol], ":")
kind := src[1]
key := src[2]
if selections[i] {
var nodes []data.Node
nodes, _, err = api.GM.TraverseMulti(part, key, kind, ":::"+eql.GroupNodeKind, false)
if err == nil {
for _, n := range nodes {
nkeys, ok := memberKeys[n.Key()]
nkinds, _ := memberKinds[n.Key()]
if !ok {
nkeys = make([]string, 0)
nkinds = make([]string, 0)
}
memberKeys[n.Key()] = append(nkeys, key)
memberKinds[n.Key()] = append(nkinds, kind)
}
}
}
if err != nil {
break
}
}
memberKeysList := make([]string, 0, len(memberKeys))
for g := range memberKeys {
memberKeysList = append(memberKeysList, g)
}
sort.Strings(memberKeysList)
for _, g := range memberKeysList {
retGroups = append(retGroups, g)
retKeys = append(retKeys, memberKeys[g])
retKinds = append(retKinds, memberKinds[g])
}
if err == nil {
ret = map[string]interface{}{
"groups": retGroups,
"keys": retKeys,
"kinds": retKinds,
}
}
return ret, err
}
/*
selectRows implements the row selection functionality.
*/
func (qre *queryResultEndpoint) selectRows(requestType string, w http.ResponseWriter,
resources []string, sres *APISearchResult) {
if requestType != "put" && requestType != "get" {
http.Error(w, "Select can only handle GET and PUT requests", http.StatusBadRequest)
return
}
if requestType == "get" {
var col int
var err error
var keys, kinds []string
// Just return the current selections
if col, err = sres.GetPrimaryNodeColumn(); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
sels := sres.Selections()
for i, srcs := range sres.RowSources() {
if sels[i] {
src := strings.Split(srcs[col], ":")
keys = append(keys, src[2])
kinds = append(kinds, src[1])
}
}
qre.dataWriter(w).Encode(map[string][]string{
"keys": keys,
"kinds": kinds,
})
return
} else if len(resources) < 3 {
http.Error(w,
"Need a selection ('all', 'none', 'invert' or row number)",
http.StatusBadRequest)
return
}
if resources[2] == "all" {
sres.AllSelection()
} else if resources[2] == "none" {
sres.NoneSelection()
} else if resources[2] == "invert" {
sres.InvertSelection()
} else {
i, err := strconv.ParseInt(resources[2], 10, 64)
row := int(i)
selections := sres.Selections()
if err != nil || row < 0 || row >= len(selections) {
http.Error(w, "Invalid selection row number", http.StatusBadRequest)
return
}
sres.SetSelection(row, !selections[row])
}
// Count total selections
totalSels := 0
for _, s := range sres.Selections() {
if s {
totalSels++
}
}
qre.dataWriter(w).Encode(map[string]int{
"total_selections": totalSels,
})
}
/*
quickfilter implements the quickfilter functionality.
*/
func (qre *queryResultEndpoint) quickFilter(requestType string, w http.ResponseWriter,
resources []string, sres *APISearchResult, limit int) {
if requestType != "get" {
http.Error(w, "Quickfilter can only handle GET requests", http.StatusBadRequest)
return
} else if len(resources) < 3 {
http.Error(w, "Need a query result column to filter", http.StatusBadRequest)
return
}
i, err := strconv.ParseInt(resources[2], 10, 64)
index := int(i)
if err != nil || index < 0 || index >= len(sres.Header().Labels()) {
http.Error(w, "Invalid query result column", http.StatusBadRequest)
return
}
// Go through the column in question and collect the data
counts := make(map[string]uint64)
for _, row := range sres.Rows() {
val := fmt.Sprint(row[index])
counts[val]++
}
values := make([]string, 0, len(counts))
frequencies := make([]uint64, 0, len(counts))
for val, freq := range counts {
values = append(values, val)
frequencies = append(frequencies, freq)
}
sort.Stable(&countComparator{values, frequencies})
if limit != -1 && len(values) > limit {
values = values[:limit]
frequencies = frequencies[:limit]
}
qre.dataWriter(w).Encode(map[string]interface{}{
"values": values,
"frequencies": frequencies,
})
}
/*
dataWriter returns an object to write result data.
*/
func (qre *queryResultEndpoint) dataWriter(w http.ResponseWriter) *json.Encoder {
w.Header().Set("content-type", "application/json; charset=utf-8")
return json.NewEncoder(w)
}
/*
SwaggerDefs is used to describe the endpoint in swagger.
*/
func (qre *queryResultEndpoint) SwaggerDefs(s map[string]interface{}) {
required := []map[string]interface{}{
{
"name": "rid",
"in": "path",
"description": "Result ID of a query result.",
"required": true,
"type": "string",
},
}
column := map[string]interface{}{
"name": "column",
"in": "path",
"description": "Column of the query result.",
"required": true,
"type": "string",
}
row := map[string]interface{}{
"name": "row",
"in": "path",
"description": "Row number of the query result or 'all', 'none' or 'invert'.",
"required": true,
"type": "string",
}
groupName := map[string]interface{}{
"name": "group_name",
"in": "path",
"description": "Name of an existing group.",
"required": true,
"type": "string",
}
limit := map[string]interface{}{
"name": "limit",
"in": "query",
"description": "Limit the maximum number of result items.",
"required": false,
"type": "string",
}
selectionStateParam := map[string]interface{}{
"name": "selection_state",
"in": "body",
"description": "Group seletion state of a query result",
"required": true,
"schema": map[string]interface{}{
"$ref": "#/definitions/GroupSelectionState",
},
}
selectionStateGroups := map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"groups": map[string]interface{}{
"description": "List of group names which include one or more selected nodes.",
"type": "array",
"items": map[string]interface{}{
"description": "Group name.",
"type": "string",
},
},
"keys": map[string]interface{}{
"description": "Lists of selected node keys which are part of the groups in the 'groups' list.",
"type": "array",
"items": map[string]interface{}{
"description": "List of node keys.",
"type": "array",
"items": map[string]interface{}{
"description": "Node key.",
"type": "string",
},
},
},
"kinds": map[string]interface{}{
"description": "Lists of selected node kinds which are part of the groups in the 'groups' list.",
"type": "array",
"items": map[string]interface{}{
"description": "List of node kinds.",
"type": "array",
"items": map[string]interface{}{
"description": "Node kind.",
"type": "string",
},
},
},
},
}
selectionState := map[string]interface{}{
"type": "object",
"properties": map[string]interface{}{
"keys": map[string]interface{}{
"description": "Lists of selected node keys.",
"type": "array",
"items": map[string]interface{}{
"description": "Node key.",
"type": "string",
},
},
"kinds": map[string]interface{}{
"description": "Kinds of all selected nodes.",
"type": "array",
"items": map[string]interface{}{
"description": "Node kind.",
"type": "string",
},
},
},
}
s["paths"].(map[string]interface{})["/v1/queryresult/{rid}/csv"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return the search result in CSV format.",
"description": "The csv endpoint is used to generate a CSV string from the search result.",
"produces": []string{
"text/plain",
},
"parameters": append(required),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "A CSV string.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/queryresult/{rid}/quickfilter/{column}"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return quickfilter information on a given result column.",
"description": "The quickfilter endpoint is used to determine the 10 most frequent used values in a particular result column.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(required, column, limit),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "An object containing values and frequencies.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/queryresult/{rid}/select"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Return the (primary) nodes which are currently selected.",
"description": "The select endpoint is used to query all selected nodes of a given query result.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": required,
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Current total selections.",
"schema": map[string]interface{}{
"$ref": "#/definitions/SelectionState",
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/queryresult/{rid}/select/{row}"] = map[string]interface{}{
"put": map[string]interface{}{
"summary": "Selects one or more rows of a given query result.",
"description": "The select endpoint is used to select one or more rows of a given query result.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(required, row),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Current total selections.",
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/queryresult/{rid}/groupselected/{group_name}"] = map[string]interface{}{
"put": map[string]interface{}{
"summary": "Add all selected nodes (primary node of each row) to the given group.",
"description": "The groupselected endpoint is used to add all selected nodes (primary node of each row) to the given (existing) group.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(required, groupName),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Current group selection state after the operation.",
"schema": map[string]interface{}{
"$ref": "#/definitions/GroupSelectionState",
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"delete": map[string]interface{}{
"summary": "Remove all selected nodes (primary node of each row) from the given group.",
"description": "The groupselected endpoint is used to remove all selected nodes (primary node of each row) from the given (existing) group.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(required, groupName),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Current group selection state after the operation.",
"schema": map[string]interface{}{
"$ref": "#/definitions/GroupSelectionState",
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
s["paths"].(map[string]interface{})["/v1/queryresult/{rid}/groupselected"] = map[string]interface{}{
"get": map[string]interface{}{
"summary": "Get the current group selection state.",
"description": "Returns the current selections state which contains all selected nodes which are in groups.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(required),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Current group selection state.",
"schema": map[string]interface{}{
"$ref": "#/definitions/GroupSelectionState",
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
"post": map[string]interface{}{
"summary": "Set a new group selection state.",
"description": "Sets the groups in the given selection state.",
"produces": []string{
"text/plain",
"application/json",
},
"parameters": append(required, selectionStateParam),
"responses": map[string]interface{}{
"200": map[string]interface{}{
"description": "Current group selection state after the operation.",
"schema": map[string]interface{}{
"$ref": "#/definitions/GroupSelectionState",
},
},
"default": map[string]interface{}{
"description": "Error response",
"schema": map[string]interface{}{
"$ref": "#/definitions/Error",
},
},
},
},
}
// Add generic error object to definition
s["definitions"].(map[string]interface{})["Error"] = map[string]interface{}{
"description": "A human readable error mesage.",
"type": "string",
}
// Add selection states to definition
s["definitions"].(map[string]interface{})["SelectionState"] = selectionState
s["definitions"].(map[string]interface{})["GroupSelectionState"] = selectionStateGroups
}
/*
countComparator is a comparator object used for sorting the counts
*/
type countComparator struct {
Values []string
Frequencies []uint64
}
func (c countComparator) Len() int {
return len(c.Values)
}
func (c countComparator) Less(i, j int) bool {
if c.Frequencies[i] == c.Frequencies[j] {
return c.Values[i] < c.Values[j]
}
return c.Frequencies[i] > c.Frequencies[j]
}
func (c countComparator) Swap(i, j int) {
c.Values[i], c.Values[j] = c.Values[j], c.Values[i]
c.Frequencies[i], c.Frequencies[j] = c.Frequencies[j], c.Frequencies[i]
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package v1
import (
"net/http"
"strconv"
"strings"
"devt.de/krotik/eliasdb/api"
)
/*
APIv1 is the directory for version 1 of the API
*/
const APIv1 = "/v1"
/*
HTTPHeaderTotalCount is a special header value containing the total count of objects.
*/
const HTTPHeaderTotalCount = "X-Total-Count"
/*
HTTPHeaderCacheID is a special header value containing a cache ID for a quick follow up query.
*/
const HTTPHeaderCacheID = "X-Cache-Id"
/*
V1EndpointMap is a map of urls to endpoints for version 1 of the API
*/
var V1EndpointMap = map[string]api.RestEndpointInst{
EndpointBlob: BlobEndpointInst,
EndpointClusterQuery: ClusterEndpointInst,
EndpointEql: EqlEndpointInst,
EndpointGraph: GraphEndpointInst,
EndpointGraphQL: GraphQLEndpointInst,
EndpointGraphQLQuery: GraphQLQueryEndpointInst,
EndpointGraphQLSubscriptions: GraphQLSubscriptionsEndpointInst,
EndpointIndexQuery: IndexEndpointInst,
EndpointFindQuery: FindEndpointInst,
EndpointInfoQuery: InfoEndpointInst,
EndpointQuery: QueryEndpointInst,
EndpointQueryResult: QueryResultEndpointInst,
EndpointECALInternal: ECALEndpointInst,
EndpointECALSock: ECALSockEndpointInst,
}
/*
V1PublicEndpointMap is a map of urls to public endpoints for version 1 of the API
*/
var V1PublicEndpointMap = map[string]api.RestEndpointInst{
EndpointECALPublic: ECALEndpointInst,
}
// Helper functions
// ================
/*
checkResources check given resources for a GET request.
*/
func checkResources(w http.ResponseWriter, resources []string, requiredMin int, requiredMax int, errorMsg string) bool {
if len(resources) < requiredMin {
http.Error(w, errorMsg, http.StatusBadRequest)
return false
} else if len(resources) > requiredMax {
http.Error(w, "Invalid resource specification: "+strings.Join(resources[1:], "/"), http.StatusBadRequest)
return false
}
return true
}
/*
Extract a positive number from a query parameter. Returns -1 and true
if the parameter was not given.
*/
func queryParamPosNum(w http.ResponseWriter, r *http.Request, param string) (int, bool) {
val := r.URL.Query().Get(param)
if val == "" {
return -1, true
}
num, err := strconv.Atoi(val)
if err != nil || num < 0 {
http.Error(w, "Invalid parameter value: "+param+" should be a positive integer number", http.StatusBadRequest)
return -1, false
}
return num, true
}
package cluster
import (
"bytes"
"fmt"
"time"
)
/*
msmap is a map of all know memory-only memberStorages.
*/
var msmap = make(map[*DistributedStorage]*memberStorage)
/*
ClearMSMap clears the current map of known memory-only memberStorages.
*/
func ClearMSMap() {
msmap = make(map[*DistributedStorage]*memberStorage)
}
/*
DumpMemoryClusterLayout returns the current storage layout in a memory-only cluster
for a given storage manager (e.g. mainPerson.nodes for Person nodes of partition main).
*/
func DumpMemoryClusterLayout(smname string) string {
buf := new(bytes.Buffer)
for _, ms := range msmap {
buf.WriteString(fmt.Sprintf("MemoryStorage: %s\n", ms.gs.Name()))
buf.WriteString(ms.dump(smname))
}
return buf.String()
}
/*
WaitForTransfer waits for the datatransfer to happen.
*/
func WaitForTransfer() {
for _, ms := range msmap {
ms.transferWorker()
for ms.transferRunning {
time.Sleep(time.Millisecond)
}
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package cluster contains EliasDB's clustering code.
The clustering code provides an abstraction layer to EliasDB's graphstorage.Storage.
This means the actual storage of a cluster can be entirely memory based or use
any other backend as long as it satisfies the graphstorage.Storage interface.
DistributedStorage wraps a graphstorage.Storage and has a manager.MemberManager
object.
Members are identified by a unique name. Calling Start() on manager.MemberManager
registers and starts the RPC server for the member. Cluster internal RPC requests
are served by manager.Server. It is a singleton object which routes RPC calls
to registered MemberManagers - this architecture makes it easy to unit test
the cluster code. The manager.MemberManager has a manager.Client object which
can be used to send messages to the cluster.
The integrity of the cluster is protected by a shared secret (string) among
all members of the cluster. A new member can only join and communicate with
the cluster if it has the secret string. The secret string is never transferred
directly over the network - it is only used for generating a member specific
token which can be verified by all other members.
The clustering code was inspired by Amazon DynamoDB
http://www.allthingsdistributed.com/2012/01/amazon-dynamodb.html
*/
package cluster
import (
"fmt"
"math"
"sync"
"devt.de/krotik/common/datautil"
"devt.de/krotik/eliasdb/cluster/manager"
"devt.de/krotik/eliasdb/graph/graphstorage"
"devt.de/krotik/eliasdb/storage"
)
/*
DistributedStorageError is an error related to the distribution storage. This
error is returned when the data distribution fails for example when too many
cluster members have failed.
*/
type DistributedStorageError struct {
err error // Wrapped error
}
/*
newError creates a new DistributedStorageError.
*/
func newError(err error) error {
return &DistributedStorageError{err}
}
/*
Error returns a string representation of a DistributedStorageError.
*/
func (dse *DistributedStorageError) Error() string {
return fmt.Sprint("Storage disabled: ", dse.err.Error())
}
/*
DistributedStorage data structure
*/
type DistributedStorage struct {
MemberManager *manager.MemberManager // Manager object
distributionTableLock *sync.Mutex // Mutex to access the distribution table
distributionTable *DistributionTable // Distribution table for the cluster - may be nil
distributionTableError error // Error detail if the storage is disabled
localName string // Name of the local graph storage
localDRHandler func(interface{}, *interface{}) error // Local data request handler
localFlushHandler func() error // Handler to flush the local storage
localCloseHandler func() error // Handler to close the local storage
mainDB map[string]string // Local main copy (only set when requested)
mainDBError error // Last error when main db was requested
}
/*
NewDistributedStorage creates a new cluster graph storage. The distributed storage
wraps around a local graphstorage.Storage. The configuration of the distributed
storage consists of two parts: A normal config map which defines static information
like rpc port, secret string, etc and a StateInfo object which is used for dynamic
information like cluster members, member status, etc. An empty StateInfo means
that the cluster has only one member.
*/
func NewDistributedStorage(gs graphstorage.Storage, config map[string]interface{},
stateInfo manager.StateInfo) (*DistributedStorage, error) {
ds, ms, err := newDistributedAndMemberStorage(gs, config, stateInfo)
if _, ok := gs.(*graphstorage.MemoryGraphStorage); ok {
msmap[ds] = ms // Keep track of memory storages for debugging
}
return ds, err
}
/*
DSRetNew is the return value on successful creating a distributed storage
(used for testing)
*/
var DSRetNew error
/*
newDistributedAndMemberStorage creates a new cluster graph storage but also returns a
reference to the internal memberStorage object.
*/
func newDistributedAndMemberStorage(gs graphstorage.Storage, config map[string]interface{},
stateInfo manager.StateInfo) (*DistributedStorage, *memberStorage, error) {
var repFac int
// Merge given configuration with default configuration
clusterconfig := datautil.MergeMaps(manager.DefaultConfig, config)
// Make 100% sure there is a secret string
if clusterconfig[manager.ConfigClusterSecret] == "" {
clusterconfig[manager.ConfigClusterSecret] = manager.DefaultConfig[manager.ConfigClusterSecret]
}
// Set replication factor
if f, ok := stateInfo.Get(manager.StateInfoREPFAC); !ok {
repFac = int(math.Max(clusterconfig[manager.ConfigReplicationFactor].(float64), 1))
stateInfo.Put(manager.StateInfoREPFAC, repFac)
stateInfo.Flush()
} else {
repFac = f.(int)
}
// Create member objects - these calls will initialise this member's state info
mm := manager.NewMemberManager(clusterconfig[manager.ConfigRPC].(string),
clusterconfig[manager.ConfigMemberName].(string),
clusterconfig[manager.ConfigClusterSecret].(string), stateInfo)
dt, err := NewDistributionTable(mm.Members(), repFac)
if err != nil {
mm.LogInfo("Storage disabled:", err)
}
ds := &DistributedStorage{mm, &sync.Mutex{}, dt, err, gs.Name(), nil, nil, nil, nil, nil}
// Create MemberStorage instance which is not exposed - the object will
// only be used by the RPC server and called during start and stop. It is
// the only instance which has access to the wrapped storage.GraphStorage.
memberStorage, err := newMemberStorage(ds, gs)
if err != nil {
return nil, nil, err
}
// Register handler function for RPC calls and for closing the local storage
mm.SetHandleDataRequest(memberStorage.handleDataRequest)
ds.localDRHandler = memberStorage.handleDataRequest
ds.localFlushHandler = memberStorage.gs.FlushAll
ds.localCloseHandler = memberStorage.gs.Close
// Set update handler
ds.MemberManager.SetEventHandler(func() {
// Handler for state info updates (this handler is called once the state
// info object has been updated from the current state)
si := mm.StateInfo()
rfo, ok := si.Get(manager.StateInfoREPFAC)
rf := rfo.(int)
members, ok2 := si.Get(manager.StateInfoMEMBERS)
if ok && ok2 {
distTable, distTableErr := ds.DistributionTable()
numMembers := len(members.([]string)) / 2
numFailedPeers := len(mm.Client.FailedPeers())
// Check if the cluster is operational
if distTableErr == nil && rf > 0 && numFailedPeers > rf-1 {
// Cluster is not operational
if distTable != nil {
err := fmt.Errorf("Too many members failed (total: %v, failed: %v, replication: %v)",
numMembers, numFailedPeers, rf)
mm.LogInfo("Storage disabled:", err.Error())
ds.SetDistributionTableError(err)
}
return
}
// Check if the replication factor has changed or the amount of members
if distTable == nil ||
numMembers != len(distTable.Members()) ||
rf != distTable.repFac {
// Try to renew the distribution table
if dt, err := NewDistributionTable(mm.Members(), rf); err == nil {
ds.SetDistributionTable(dt)
}
}
}
}, memberStorage.transferWorker)
return ds, memberStorage, DSRetNew
}
/*
Start starts the distributed storage.
*/
func (ds *DistributedStorage) Start() error {
return ds.MemberManager.Start()
}
/*
Close closes the distributed storage.
*/
func (ds *DistributedStorage) Close() error {
ds.MemberManager.Shutdown()
return ds.localCloseHandler()
}
/*
IsOperational returns if this distribution storage is operational
*/
func (ds *DistributedStorage) IsOperational() bool {
ds.distributionTableLock.Lock()
defer ds.distributionTableLock.Unlock()
return ds.distributionTableError == nil && ds.distributionTable != nil
}
/*
DistributionTable returns the current distribution table or an error if the
storage is not available.
*/
func (ds *DistributedStorage) DistributionTable() (*DistributionTable, error) {
ds.distributionTableLock.Lock()
defer ds.distributionTableLock.Unlock()
return ds.distributionTable, ds.distributionTableError
}
/*
SetDistributionTable sets the distribution table and clears any error.
*/
func (ds *DistributedStorage) SetDistributionTable(dt *DistributionTable) {
ds.distributionTableLock.Lock()
defer ds.distributionTableLock.Unlock()
ds.distributionTable = dt
ds.distributionTableError = nil
}
/*
SetDistributionTableError records an distribution table related error. This
clears the current distribution table.
*/
func (ds *DistributedStorage) SetDistributionTableError(err error) {
ds.distributionTableLock.Lock()
defer ds.distributionTableLock.Unlock()
ds.distributionTable = nil
ds.distributionTableError = newError(err)
}
/*
sendDataRequest is used to send data requests into the cluster.
*/
func (ds *DistributedStorage) sendDataRequest(member string, request *DataRequest) (interface{}, error) {
// Check if the request should be handled locally
if member == ds.MemberManager.Name() {
// Make sure to copy the request value for local insert or update requests.
// This is necessary since the serialization buffers are pooled and never
// dismissed. Locally the values are just passed around.
if request.RequestType == RTInsert || request.RequestType == RTUpdate {
var val []byte
datautil.CopyObject(request.Value, &val)
request.Value = val
}
var response interface{}
err := ds.localDRHandler(request, &response)
return response, err
}
return ds.MemberManager.Client.SendDataRequest(member, request)
}
/*
Name returns the name of the cluster DistributedStorage instance.
*/
func (ds *DistributedStorage) Name() string {
return ds.MemberManager.Name()
}
/*
LocalName returns the local name of the wrapped DistributedStorage instance.
*/
func (ds *DistributedStorage) LocalName() string {
return ds.localName
}
/*
ReplicationFactor returns the replication factor of this cluster member. A
value of 0 means the cluster is not operational in the moment.
*/
func (ds *DistributedStorage) ReplicationFactor() int {
// Do not do anything is the cluster is not operational
distTable, distTableErr := ds.DistributionTable()
if distTableErr != nil {
return 0
}
return distTable.repFac
}
/*
MainDB returns the main database. The main database is a quick
lookup map for meta data which is always kept in memory.
*/
func (ds *DistributedStorage) MainDB() map[string]string {
ret := make(map[string]string)
// Clear the current mainDB cache
ds.mainDB = nil
// Do not do anything is the cluster is not operational
distTable, distTableErr := ds.DistributionTable()
if distTableErr != nil {
ds.mainDBError = distTableErr
return ret
}
// Main db requests always go to member 1
member := distTable.Members()[0]
request := &DataRequest{RTGetMain, nil, nil, false}
mainDB, err := ds.sendDataRequest(member, request)
if err != nil {
// Cycle through all replicating members if there was an error.
// (as long as the cluster is considered operational there must be a
// replicating member available to accept the request)
for _, rmember := range distTable.Replicas(member) {
mainDB, err = ds.sendDataRequest(rmember, request)
if err == nil {
break
}
}
}
ds.mainDBError = err
if mainDB != nil {
ds.mainDB = mainDB.(map[string]string)
ret = ds.mainDB
}
// We failed to get the main db - any flush will fail.
return ret
}
/*
RollbackMain rollback the main database.
*/
func (ds *DistributedStorage) RollbackMain() error {
// Nothing to do here - the main db will be updated next time it is requested
ds.mainDB = nil
ds.mainDBError = nil
return nil
}
/*
FlushMain writes the main database to the storage.
*/
func (ds *DistributedStorage) FlushMain() error {
// Make sure there is no error
distTable, distTableErr := ds.DistributionTable()
if ds.mainDBError != nil {
return ds.mainDBError
} else if distTableErr != nil {
return distTableErr
}
// Main db requests always go to member 1
member := distTable.Members()[0]
request := &DataRequest{RTSetMain, nil, ds.mainDB, false}
_, err := ds.sendDataRequest(member, request)
if err != nil {
// Cycle through all replicating members if there was an error.
// (as long as the cluster is considered operational there must be a
// replicating member available to accept the request)
for _, rmember := range distTable.Replicas(member) {
_, err = ds.sendDataRequest(rmember, request)
if err == nil {
break
}
}
}
return err
}
/*
FlushAll writes all pending local changes to the storage.
*/
func (ds *DistributedStorage) FlushAll() error {
return ds.localFlushHandler()
}
/*
StorageManager gets a storage manager with a certain name. A non-exisClusterting StorageManager
is not created automatically if the create flag is set to false.
*/
func (ds *DistributedStorage) StorageManager(smname string, create bool) storage.Manager {
// Make sure there is no error
distTable, distTableErr := ds.DistributionTable()
if ds.mainDBError != nil {
return nil
} else if distTableErr != nil {
return nil
}
if !create {
// Make sure the storage manage exists if it should not be created.
// Try to get its 1st root value. If nil is returned then the storage
// manager does not exist.
// Root ids always go to member 1 as well as the first insert request for data.
member := distTable.Members()[0]
request := &DataRequest{RTGetRoot, map[DataRequestArg]interface{}{
RPStoreName: smname,
RPRoot: 1,
}, nil, false}
res, err := ds.sendDataRequest(member, request)
if res == nil && err == nil {
return nil
}
}
return &DistributedStorageManager{smname, 0, ds, nil}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package cluster
import (
"bytes"
"encoding/gob"
"fmt"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/eliasdb/storage"
)
/*
DistributedStorageManager is a storage.Manager which sends requests to the
distributed storage.
*/
type DistributedStorageManager struct {
name string // Name of the storage manager
rrc int // Round robin counter
ds *DistributedStorage // Distributed storage which created the instance
rootError error // Last error when root values were handled
}
/*
Name returns the name of the StorageManager instance.
*/
func (dsm *DistributedStorageManager) Name() string {
return fmt.Sprint("DistributedStorageManager: ", dsm.name)
}
/*
Root returns a root value.
*/
func (dsm *DistributedStorageManager) Root(root int) uint64 {
var ret uint64
// Do not do anything if the cluster is not operational
distTable, distTableErr := dsm.ds.DistributionTable()
if distTableErr != nil {
return 0
}
// Root ids always go to member 1
member := distTable.Members()[0]
request := &DataRequest{RTGetRoot, map[DataRequestArg]interface{}{
RPStoreName: dsm.name,
RPRoot: root,
}, nil, false}
res, err := dsm.ds.sendDataRequest(member, request)
if err != nil {
// Cycle through all replicating members if there was an error.
// (as long as the cluster is considered operational there must be a
// replicating member available to accept the request)
for _, rmember := range distTable.Replicas(member) {
res, err = dsm.ds.sendDataRequest(rmember, request)
if err == nil {
break
}
}
}
dsm.rootError = err
if res != nil {
ret = toUInt64(res)
}
return ret
}
/*
SetRoot writes a root value.
*/
func (dsm *DistributedStorageManager) SetRoot(root int, val uint64) {
// Do not do anything is the cluster is not operational
distTable, distTableErr := dsm.ds.DistributionTable()
if distTableErr != nil {
return
}
// Root ids always go to member 1
member := distTable.Members()[0]
request := &DataRequest{RTSetRoot, map[DataRequestArg]interface{}{
RPStoreName: dsm.name,
RPRoot: root,
}, val, false}
_, err := dsm.ds.sendDataRequest(member, request)
if err != nil {
// Cycle through all replicating members if there was an error.
// (as long as the cluster is considered operational there must be a
// replicating member available to accept the request)
for _, rmember := range distTable.Replicas(member) {
_, err = dsm.ds.sendDataRequest(rmember, request)
if err == nil {
break
}
}
}
dsm.rootError = err
}
/*
Insert inserts an object and return its storage location.
*/
func (dsm *DistributedStorageManager) Insert(o interface{}) (uint64, error) {
return dsm.insertOrUpdate(true, 0, o)
}
/*
Update updates a storage location.
*/
func (dsm *DistributedStorageManager) Update(loc uint64, o interface{}) error {
_, err := dsm.insertOrUpdate(false, loc, o)
return err
}
/*
insertOrUpdate stores an object and returns its storage location and any error.
*/
func (dsm *DistributedStorageManager) insertOrUpdate(insert bool, loc uint64, o interface{}) (uint64, error) {
var member string
var replicatingMembers []string
var rtype RequestType
var ret uint64
// Do not do anything is the cluster is not operational
distTable, distTableErr := dsm.ds.DistributionTable()
if distTableErr != nil {
return 0, distTableErr
}
// Choose the instance this request should be routed to
if insert {
members := distTable.Members()
member = members[(dsm.rrc)%len(members)]
rtype = RTInsert
} else {
member, replicatingMembers = distTable.LocationHome(loc)
rtype = RTUpdate
}
// Serialize the object into a gob bytes stream
bb := storage.BufferPool.Get().(*bytes.Buffer)
defer func() {
bb.Reset()
storage.BufferPool.Put(bb)
}()
errorutil.AssertOk(gob.NewEncoder(bb).Encode(o))
request := &DataRequest{rtype, map[DataRequestArg]interface{}{
RPStoreName: dsm.name,
RPLoc: loc,
}, bb.Bytes(), false}
cloc, err := dsm.ds.sendDataRequest(member, request)
if err == nil {
return toUInt64(cloc), err
}
// An error has occurred we need to use another member
if rtype == RTInsert {
// Cycle through all members and see which one accepts first
members := distTable.Members()
lenMembers := len(members)
for i := 1; i < lenMembers; i++ {
member = members[(dsm.rrc+i)%lenMembers]
cloc, nerr := dsm.ds.sendDataRequest(member, request)
if nerr == nil {
ret = toUInt64(cloc)
err = nil
break
}
}
} else {
// Cycle through all replicating members and see which one accepts first
// (as long as the cluster is considered operational there must be a
// replicating member available to accept the request)
for _, member := range replicatingMembers {
cloc, nerr := dsm.ds.sendDataRequest(member, request)
if nerr == nil {
ret = toUInt64(cloc)
err = nil
break
}
}
}
return ret, err
}
/*
Free frees a storage location.
*/
func (dsm *DistributedStorageManager) Free(loc uint64) error {
// Do not do anything is the cluster is not operational
distTable, distTableErr := dsm.ds.DistributionTable()
if distTableErr != nil {
return distTableErr
}
// Choose the instance this request should be routed to
member, replicatingMembers := distTable.LocationHome(loc)
request := &DataRequest{RTFree, map[DataRequestArg]interface{}{
RPStoreName: dsm.name,
RPLoc: loc,
}, nil, false}
_, err := dsm.ds.sendDataRequest(member, request)
if err != nil {
// Cycle through all replicating members and see which one accepts first
// (as long as the cluster is considered operational there must be a
// replicating member available to accept the request)
for _, member := range replicatingMembers {
_, nerr := dsm.ds.sendDataRequest(member, request)
if nerr == nil {
err = nil
break
}
}
}
return err
}
/*
Exists checks if an object exists in a given storage location.
*/
func (dsm *DistributedStorageManager) Exists(loc uint64) (bool, error) {
var ret bool
err := dsm.lookupData(loc, &ret, false)
return ret, err
}
/*
Fetch fetches an object from a given storage location and writes it to
a given data container.
*/
func (dsm *DistributedStorageManager) Fetch(loc uint64, o interface{}) error {
return dsm.lookupData(loc, o, true)
}
/*
lookupData fetches or checks for an object in a given storage location.
*/
func (dsm *DistributedStorageManager) lookupData(loc uint64, o interface{}, fetch bool) error {
var rt RequestType
// Do not do anything if the cluster is not operational
distTable, distTableErr := dsm.ds.DistributionTable()
if distTableErr != nil {
return distTableErr
}
// Choose the instance this request should be routed to
primaryMember, secondaryMembers := distTable.LocationHome(loc)
if fetch {
rt = RTFetch
} else {
rt = RTExists
}
request := &DataRequest{rt, map[DataRequestArg]interface{}{
RPStoreName: dsm.name,
RPLoc: loc,
}, nil, false}
res, err := dsm.ds.sendDataRequest(primaryMember, request)
if err != nil || (!fetch && !res.(bool)) {
// Try secondary members if the primary member failed or the data didn't exist there
var serr error
for _, member := range secondaryMembers {
res, serr = dsm.ds.sendDataRequest(member, request)
if serr == nil {
err = nil
break
}
}
}
if err == nil {
if !fetch {
*o.(*bool) = res.(bool)
} else {
gob.NewDecoder(bytes.NewReader(res.([]byte))).Decode(o)
}
}
return err
}
/*
FetchCached is not implemented for a DistributedStorageManager. Only defined to satisfy
the StorageManager interface.
*/
func (dsm *DistributedStorageManager) FetchCached(loc uint64) (interface{}, error) {
return nil, storage.NewStorageManagerError(storage.ErrNotInCache, "", dsm.Name())
}
/*
Flush is not implemented for a DistributedStorageManager. All changes are immediately
written to disk in a cluster.
*/
func (dsm *DistributedStorageManager) Flush() error {
_, distTableErr := dsm.ds.DistributionTable()
// Do not do anything if the cluster is not operational
if distTableErr != nil {
return distTableErr
}
// Increase round robin counter - things which belond together should be
// stored on the same members
dsm.rrc++
return nil
}
/*
Rollback is not implemented for a DistributedStorageManager. All changes are immediately
written to disk in a cluster.
*/
func (dsm *DistributedStorageManager) Rollback() error {
return nil
}
/*
Close is not implemented for a DistributedStorageManager. Only the local storage must
be closed which is done when the DistributedStore is shut down.
*/
func (dsm *DistributedStorageManager) Close() error {
if _, distTableErr := dsm.ds.DistributionTable(); distTableErr != nil {
return distTableErr
}
return dsm.rootError
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package cluster
import (
"bytes"
"errors"
"fmt"
"math"
)
/*
defaultDistributionRange is the default range of possible addresses for any cluster.
Depending on the cluster size each member is in charge of a certain part of this range.
*/
var defaultDistributionRange = uint64(math.MaxUint64)
/*
DistributionTable is used to locate data in a cluster. The table contains
all cluster members and can identify replication members for given data locations.
*/
type DistributionTable struct {
members []string // All known cluster members
memberRange uint64 // Range for a single member
distrange []uint64 // Distribution range among members
mranges map[string]uint64 // Map member ranges
replicas map[string][]string // Map of replicas (which members a replicas of a member)
replicates map[string][]string // Map of replicates (what is replicated on a member)
repFac int // Replication factor of the cluster
space uint64 // Address space which is distributed in the cluster
}
/*
NewDistributionTable creates a new distribution table.
*/
func NewDistributionTable(members []string, repFac int) (*DistributionTable, error) {
return createDistributionTable(members, repFac, defaultDistributionRange)
}
/*
createDistributionTable creates a new distribution table.
*/
func createDistributionTable(members []string, repFac int, space uint64) (*DistributionTable, error) {
var distrange []uint64
replicas := make(map[string][]string)
replicates := make(map[string][]string)
mranges := make(map[string]uint64)
// Check for bogus values
if repFac < 1 {
return nil, errors.New("Replication factor must be > 0")
} else if repFac > len(members) {
return nil, fmt.Errorf("Not enough members (%v) for given replication factor: %v",
len(members), repFac)
}
// Do the range calculations
memberRange := uint64(space / uint64(len(members)))
for i := 0; i < len(members); i++ {
mrange := uint64(i) * memberRange
mranges[members[i]] = mrange
distrange = append(distrange, mrange)
var replicasList []string
for j := 1; j < repFac; j++ {
replicasList = append(replicasList, members[(i+j)%len(members)])
}
replicas[members[i]] = replicasList
replicates[members[i]] = make([]string, 0, repFac)
}
for m, r := range replicas {
for _, rm := range r {
replicates[rm] = append(replicates[rm], m)
}
}
return &DistributionTable{members, memberRange, distrange, mranges, replicas,
replicates, repFac, space}, nil
}
/*
Members returns all known cluster members.
*/
func (dd *DistributionTable) Members() []string {
return dd.members
}
/*
Replicas returns all replicas for a given member.
*/
func (dd *DistributionTable) Replicas(name string) []string {
return dd.replicas[name]
}
/*
MemberRange returns the location range of a given member.
*/
func (dd *DistributionTable) MemberRange(name string) (uint64, uint64) {
mrange := dd.mranges[name]
if name == dd.members[len(dd.members)-1] {
return mrange, dd.space
}
return mrange, mrange + dd.memberRange - 1
}
/*
ReplicationRange return the location range which is replicated by a given member.
*/
func (dd *DistributionTable) ReplicationRange(name string) (uint64, uint64) {
var start, end uint64
start = defaultDistributionRange
for _, r := range dd.replicates[name] {
rstart, rend := dd.MemberRange(r)
if rstart < start {
start = rstart
}
if rend > end {
end = rend
}
}
return start, end
}
/*
LocationHome return the member which is in charge of a given location and all its replicas.
*/
func (dd *DistributionTable) LocationHome(loc uint64) (string, []string) {
var member string
for i, r := range dd.distrange {
if loc < r {
member = dd.members[i-1]
return member, dd.replicas[member]
}
}
member = dd.members[len(dd.members)-1]
return member, dd.replicas[member]
}
/*
OtherReplicationMembers returns all members of a replication group (identified
by a given locqtion) minus a given member.
*/
func (dd *DistributionTable) OtherReplicationMembers(loc uint64, name string) []string {
var ret []string
primary, replicas := dd.LocationHome(loc)
if name == primary {
ret = replicas
} else {
ret = append(ret, primary)
for _, rep := range replicas {
if rep != name {
ret = append(ret, rep)
}
}
}
return ret
}
/*
String returns a string representation of this distribution table.
*/
func (dd *DistributionTable) String() string {
var ret bytes.Buffer
ret.WriteString("Location ranges:\n")
for _, member := range dd.members {
f, t := dd.MemberRange(member)
ret.WriteString(fmt.Sprintf("%v: %v -> %v\n", member, f, t))
}
ret.WriteString(fmt.Sprintf("Replicas (factor=%v) :\n", dd.repFac))
for _, member := range dd.members {
ret.WriteString(fmt.Sprintf("%v: %v\n", member, dd.replicas[member]))
}
return ret.String()
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package manager
import (
"encoding/gob"
"fmt"
"net"
"net/rpc"
"sort"
"strings"
"sync"
"time"
"devt.de/krotik/common/datautil"
)
func init() {
// Make sure we can use the relevant types in a gob operation
gob.Register(&MemberToken{})
}
/*
Known cluster locks
*/
const (
ClusterLockUpdateStateInfo = "ClusterLockUpdateStateInfo"
)
/*
DialTimeout is the dial timeout for RPC connections
*/
var DialTimeout = 10 * time.Second
/*
MemberToken is used to authenticate a member in the cluster
*/
type MemberToken struct {
MemberName string
MemberAuth string
}
/*
Client is the client for the RPC cluster API of a cluster member.
*/
type Client struct {
token *MemberToken // Token to be send to other members for authentication
rpc string // This client's rpc network interface (may be empty in case of pure clients)
peers map[string]string // Map of member names to their rpc network interface
conns map[string]*rpc.Client // Map of member names to network connections
failed map[string]string // Map of (temporary) failed members
maplock *sync.RWMutex // Lock for maps
clusterLocks *datautil.MapCache // Cluster locks and which member holds them
}
/*
MemberErrors map for simulated member errors (only used for testing)
*/
var MemberErrors map[string]error
/*
MemberErrorExceptions map to exclude members from simulated member errors (only used for testing)
*/
var MemberErrorExceptions map[string][]string
// General cluster client API
// ==========================
/*
IsFailed checks if the given member is in the failed state.
*/
func (mc *Client) IsFailed(name string) bool {
mc.maplock.Lock()
defer mc.maplock.Unlock()
_, ok := mc.failed[name]
return ok
}
/*
FailedTotal returns the total number of failed members.
*/
func (mc *Client) FailedTotal() int {
mc.maplock.Lock()
defer mc.maplock.Unlock()
return len(mc.failed)
}
/*
FailedPeers returns a list of failed members.
*/
func (mc *Client) FailedPeers() []string {
var ret []string
mc.maplock.Lock()
defer mc.maplock.Unlock()
for p := range mc.failed {
ret = append(ret, p)
}
sort.Strings(ret)
return ret
}
/*
FailedPeerErrors returns the same list as FailedPeers but with error messages.
*/
func (mc *Client) FailedPeerErrors() []string {
var ret []string
for _, p := range mc.FailedPeers() {
e := mc.failed[p]
ret = append(ret, fmt.Sprintf("%v (%v)", p, e))
}
return ret
}
/*
OperationalPeers returns all operational peers and an error if too many cluster members
have failed.
*/
func (mc *Client) OperationalPeers() ([]string, error) {
var err error
var peers []string
mc.maplock.Lock()
defer mc.maplock.Unlock()
for peer := range mc.peers {
if _, ok := mc.failed[peer]; !ok {
peers = append(peers, peer)
}
}
if len(mc.peers) > 0 && len(peers) == 0 {
err = &Error{ErrClusterState, fmt.Sprintf("No peer cluster member is reachable")}
} else {
sort.Strings(peers)
}
return peers, err
}
/*
SendRequest sends a request to another cluster member. Not reachable members
get an entry in the failed map and the error return is ErrMemberComm. All
other error returns should be considered serious errors.
*/
func (mc *Client) SendRequest(member string, remoteCall RPCFunction,
args map[RequestArgument]interface{}) (interface{}, error) {
var err error
// Function to categorize errors
handleError := func(err error) error {
if _, ok := err.(net.Error); ok {
// We got a network error and the communication with a member
// is interrupted - add the member to the failing members list
mc.maplock.Lock()
// Set failure state
mc.failed[member] = err.Error()
// Remove the connection
delete(mc.conns, member)
mc.maplock.Unlock()
return &Error{ErrMemberComm, err.Error()}
}
// Do not wrap a cluster network error in another cluster network error
if strings.HasPrefix(err.Error(), "ClusterError: "+ErrMemberError.Error()) {
return err
}
return &Error{ErrMemberError, err.Error()}
}
mc.maplock.Lock()
laddr, ok := mc.peers[member]
mc.maplock.Unlock()
if ok {
// Get network connection to the member
mc.maplock.Lock()
conn, ok := mc.conns[member]
mc.maplock.Unlock()
if !ok {
c, err := net.DialTimeout("tcp", laddr, DialTimeout)
if err != nil {
LogDebug(mc.token.MemberName, ": ",
fmt.Sprintf("- %v.%v (laddr=%v err=%v)", member, remoteCall, laddr, err))
return nil, handleError(err)
}
conn = rpc.NewClient(c)
mc.maplock.Lock()
mc.conns[member] = conn
mc.maplock.Unlock()
}
// Assemble the request
request := map[RequestArgument]interface{}{
RequestTARGET: member,
RequestTOKEN: mc.token,
}
if args != nil {
for k, v := range args {
request[k] = v
}
}
var response interface{}
LogDebug(mc.token.MemberName, ": ",
fmt.Sprintf("> %v.%v (laddr=%v)", member, remoteCall, laddr))
if err, _ = MemberErrors[member]; err == nil || isErrorExcepted(mc.token.MemberName, member) {
err = conn.Call("Server."+string(remoteCall), request, &response)
}
LogDebug(mc.token.MemberName, ": ",
fmt.Sprintf("< %v.%v (err=%v)", member, remoteCall, err))
if err != nil {
return nil, handleError(err)
}
return response, nil
}
return nil, &Error{ErrUnknownPeer, member}
}
/*
SendPing sends a ping to a member and returns the result. Second argument is
optional if the target member is not a known peer. Should be an empty string
in all other cases.
*/
func (mc *Client) SendPing(member string, rpc string) ([]string, error) {
if _, ok := mc.peers[member]; rpc != "" && !ok {
// Add member temporary
mc.peers[member] = rpc
defer func() {
mc.maplock.Lock()
delete(mc.peers, member)
delete(mc.conns, member)
delete(mc.failed, member)
mc.maplock.Unlock()
}()
}
res, err := mc.SendRequest(member, RPCPing, nil)
if res != nil {
return res.([]string), err
}
return nil, err
}
// Cluster membership functions
// ============================
/*
SendJoinCluster sends a request to a cluster member to join the caller to the cluster.
Pure clients cannot use this function as this call requires the Client.rpc field to be set.
*/
func (mc *Client) SendJoinCluster(targetMember string, targetMemberRPC string) (map[string]interface{}, error) {
// Check we are on a cluster member - pure clients will fail here
if mc.rpc == "" {
return nil, &Error{ErrClusterConfig, "Cannot add member without RPC interface"}
}
// Ensure the new member is in the peers map
mc.maplock.Lock()
mc.peers[targetMember] = targetMemberRPC
mc.maplock.Unlock()
// Join the cluster
res, err := mc.SendRequest(targetMember, RPCJoinCluster, map[RequestArgument]interface{}{
RequestMEMBERNAME: mc.token.MemberName,
RequestMEMBERRPC: mc.rpc,
})
if res != nil && err == nil {
return bytesToMap(res.([]byte)), err
}
mc.maplock.Lock()
delete(mc.peers, targetMember)
delete(mc.conns, targetMember)
delete(mc.failed, targetMember)
mc.maplock.Unlock()
return nil, err
}
/*
SendEjectMember sends a request to eject a member from the cluster.
*/
func (mc *Client) SendEjectMember(member string, memberToEject string) error {
_, err := mc.SendRequest(member, RPCEjectMember, map[RequestArgument]interface{}{
RequestMEMBERNAME: memberToEject,
})
return err
}
// Cluster-wide locking
// ====================
/*
SendAcquireClusterLock tries to acquire a named lock on all members of the cluster.
It fails if the lock is alread acquired or if not enough cluster members can be
reached.
*/
func (mc *Client) SendAcquireClusterLock(lockName string) error {
// Get operational peers (operational cluster is NOT required - up to the calling
// function to decide if the cluster should be operational)
peers, _ := mc.OperationalPeers()
// Try to acquire the lock on all members
var takenLocks []string
for _, peer := range peers {
_, err := mc.SendRequest(peer,
RPCAcquireLock, map[RequestArgument]interface{}{
RequestLOCK: lockName,
})
if err != nil && err.(*Error).Type == ErrMemberComm {
// If we can't communicate with a member just continue and
// don't take the lock - the member is now in the failed list
// and subsequent calls to operational peers should determine
// if the cluster is functional or not
continue
} else if err != nil {
// If there was a serious error try to release all taken locks
for _, lockPeer := range takenLocks {
mc.SendRequest(lockPeer,
RPCReleaseLock, map[RequestArgument]interface{}{
RequestLOCK: lockName,
})
}
return err
} else {
takenLocks = append(takenLocks, peer)
}
}
// Now take the lock on this member
mc.maplock.Lock()
mc.clusterLocks.Put(lockName, mc.token.MemberName)
mc.maplock.Unlock()
return nil
}
/*
SendReleaseClusterLock tries to release a named lock on all members of the cluster.
It is not an error if a lock is not takfen (or has expired) on this member or any other
target member.
*/
func (mc *Client) SendReleaseClusterLock(lockName string) error {
// Get operational peers (operational cluster is NOT required - up to the calling
// function to decide if the cluster should be operational)
peers, _ := mc.OperationalPeers()
// Try to acquire the lock on all members
for _, peer := range peers {
_, err := mc.SendRequest(peer,
RPCReleaseLock, map[RequestArgument]interface{}{
RequestLOCK: lockName,
})
if err != nil && err.(*Error).Type != ErrMemberComm {
return err
}
}
// Now release the lock on this member
mc.maplock.Lock()
mc.clusterLocks.Remove(lockName)
mc.maplock.Unlock()
return nil
}
// StateInfo functions
// ===================
/*
SendStateInfoRequest requests the state info of a member and returns it.
*/
func (mc *Client) SendStateInfoRequest(member string) (map[string]interface{}, error) {
res, err := mc.SendRequest(member, RPCSIRequest, nil)
if res != nil {
return bytesToMap(res.([]byte)), err
}
return nil, err
}
// Data request functions
// ======================
/*
SendDataRequest sends a data request to a member and returns its response.
*/
func (mc *Client) SendDataRequest(member string, reqdata interface{}) (interface{}, error) {
return mc.SendRequest(member, RPCDataRequest, map[RequestArgument]interface{}{
RequestDATA: reqdata,
})
}
// Static member info functions
// ============================
/*
SendMemberInfoRequest requests the static member info of a member and returns it.
*/
func (mc *Client) SendMemberInfoRequest(member string) (map[string]interface{}, error) {
res, err := mc.SendRequest(member, RPCMIRequest, nil)
if res != nil {
return bytesToMap(res.([]byte)), err
}
return nil, err
}
// Helper functions
// ================
/*
Check if a given route should be excepted from errors (only used for testing)
*/
func isErrorExcepted(source string, target string) bool {
if exceptions, ok := MemberErrorExceptions[source]; ok {
for _, exception := range exceptions {
if exception == target {
return true
}
}
}
return false
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package manager contains the management code for EliasDB's clustering feature.
The management code deals with cluster building, general communication between cluster
members, verification of communicating peers and monitoring of members.
The cluster structure is pure peer-to-peer design with no single point of failure. All
members of the cluster share a versioned cluster state which is persisted. Members have
to manually be added or removed from the cluster. Each member also has a member info object
which can be used by the application which uses the cluster to store additional member
related information.
Temporary failures are detected automatically. Every member of the cluster monitors the
state of all its peers by sending ping requests to them on a regular schedule.
*/
package manager
import (
"bytes"
"encoding/gob"
"fmt"
"sync"
"devt.de/krotik/common/datautil"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/fileutil"
"devt.de/krotik/eliasdb/storage"
)
// Cluster config
// ==============
/*
ConfigRPC is the PRC network interface for the local cluster manager
*/
const ConfigRPC = "ClusterMemberRPC"
/*
ConfigMemberName is the name of the cluster member
*/
const ConfigMemberName = "ClusterMemberName"
/*
ConfigClusterSecret is the secret which authorizes a cluster member
(the secret must never be send directly over the network)
*/
const ConfigClusterSecret = "ClusterSecret"
/*
ConfigReplicationFactor is the number of times a given datum must be stored
redundently. The cluster can suffer n-1 member losses before it becomes
inoperational. The value is set once in the configuration and becomes afterwards
part of the global cluster state info (once this is there the config value is ignored).
*/
const ConfigReplicationFactor = "ReplicationFactor"
/*
DefaultConfig is the defaut configuration
*/
var DefaultConfig = map[string]interface{}{
ConfigRPC: "127.0.0.1:9030",
ConfigMemberName: "member1",
ConfigClusterSecret: "secret123",
ConfigReplicationFactor: 1.0,
}
// Cluster state info
// ==================
/*
Known StateInfo entries
*/
const (
StateInfoTS = "ts" // Timestamp of state info
StateInfoTSOLD = "tsold" // Previous timestamp of state info
StateInfoMEMBERS = "members" // List of known cluster members
StateInfoFAILED = "failed" // List of failed peers
StateInfoREPFAC = "replication" // Replication factor of the cluster
)
/*
Known MemberInfo entries
*/
const (
MemberInfoError = "error" // Error message if a member was not reachable
MemberInfoTermURL = "termurl" // URL to the cluster terminal of the member
)
/*
StateInfo models a state object which stores cluster related data. This
information is exchanged between cluster members. It is not expected that
the info changes frequently.
*/
type StateInfo interface {
/*
Put stores some data in the state info.
*/
Put(key string, value interface{})
/*
Get retrievtes some data from the state info.
*/
Get(key string) (interface{}, bool)
/*
Map returns the state info as a map.
*/
Map() map[string]interface{}
/*
Flush persists the state info.
*/
Flush() error
}
/*
DefaultStateInfo is the default state info which uses a file to persist its data.
*/
type DefaultStateInfo struct {
*datautil.PersistentMap
datalock *sync.RWMutex
}
/*
NewDefaultStateInfo creates a new DefaultStateInfo.
*/
func NewDefaultStateInfo(filename string) (StateInfo, error) {
var pm *datautil.PersistentMap
var err error
if res, _ := fileutil.PathExists(filename); !res {
pm, err = datautil.NewPersistentMap(filename)
if err != nil {
return nil, &Error{ErrClusterConfig,
fmt.Sprintf("Cannot create state info file %v: %v",
filename, err.Error())}
}
} else {
pm, err = datautil.LoadPersistentMap(filename)
if err != nil {
return nil, &Error{ErrClusterConfig,
fmt.Sprintf("Cannot load state info file %v: %v",
filename, err.Error())}
}
}
return &DefaultStateInfo{pm, &sync.RWMutex{}}, nil
}
/*
Map returns the state info as a map.
*/
func (dsi *DefaultStateInfo) Map() map[string]interface{} {
var ret map[string]interface{}
datautil.CopyObject(dsi.Data, &ret)
return ret
}
/*
Get retrieves some data from the state info.
*/
func (dsi *DefaultStateInfo) Get(key string) (interface{}, bool) {
dsi.datalock.RLock()
defer dsi.datalock.RUnlock()
v, ok := dsi.Data[key]
return v, ok
}
/*
Put stores some data in the state info.
*/
func (dsi *DefaultStateInfo) Put(key string, value interface{}) {
dsi.datalock.Lock()
defer dsi.datalock.Unlock()
dsi.Data[key] = value
}
/*
Flush persists the state info.
*/
func (dsi *DefaultStateInfo) Flush() error {
if err := dsi.PersistentMap.Flush(); err != nil {
return &Error{ErrClusterConfig,
fmt.Sprintf("Cannot persist state info: %v",
err.Error())}
}
return nil
}
/*
MsiRetFlush nil or the error which should be returned by a Flush call
*/
var MsiRetFlush error
/*
MemStateInfo is a state info object which does not persist its data.
*/
type MemStateInfo struct {
data map[string]interface{}
datalock *sync.RWMutex
}
/*
NewMemStateInfo creates a new MemStateInfo.
*/
func NewMemStateInfo() StateInfo {
return &MemStateInfo{make(map[string]interface{}), &sync.RWMutex{}}
}
/*
Map returns the state info as a map.
*/
func (msi *MemStateInfo) Map() map[string]interface{} {
var ret map[string]interface{}
datautil.CopyObject(msi.data, &ret)
return ret
}
/*
Get retrieves some data from the state info.
*/
func (msi *MemStateInfo) Get(key string) (interface{}, bool) {
msi.datalock.RLock()
defer msi.datalock.RUnlock()
v, ok := msi.data[key]
return v, ok
}
/*
Put stores some data in the state info.
*/
func (msi *MemStateInfo) Put(key string, value interface{}) {
msi.datalock.Lock()
defer msi.datalock.Unlock()
msi.data[key] = value
}
/*
Flush does not do anything :-)
*/
func (msi *MemStateInfo) Flush() error {
return MsiRetFlush
}
// Helper functions to properly serialize maps
// ===========================================
/*
mapToBytes converts a given map to bytes. This method panics on errors.
*/
func mapToBytes(m map[string]interface{}) []byte {
bb := storage.BufferPool.Get().(*bytes.Buffer)
defer func() {
bb.Reset()
storage.BufferPool.Put(bb)
}()
errorutil.AssertOk(gob.NewEncoder(bb).Encode(m))
return bb.Bytes()
}
/*
bytesToMap tries to convert a given byte array into a map. This method panics on errors.
*/
func bytesToMap(b []byte) map[string]interface{} {
var ret map[string]interface{}
errorutil.AssertOk(gob.NewDecoder(bytes.NewReader(b)).Decode(&ret))
return ret
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package manager
import (
"errors"
"fmt"
"log"
)
// Logging
// =======
/*
Logger is a function which processes log messages from the cluster
*/
type Logger func(v ...interface{})
/*
LogInfo is called if an info message is logged in the cluster code
*/
var LogInfo = Logger(log.Print)
/*
LogDebug is called if a debug message is logged in the cluster code
(by default disabled)
*/
var LogDebug = Logger(LogNull)
/*
LogNull is a discarding logger to be used for disabling loggers
*/
var LogNull = func(v ...interface{}) {
}
// Errors
// ======
/*
Error is a cluster related error
*/
type Error struct {
Type error // Error type (to be used for equal checks)
Detail string // Details of this error
}
/*
Error returns a human-readable string representation of this error.
*/
func (ge *Error) Error() string {
if ge.Detail != "" {
return fmt.Sprintf("ClusterError: %v (%v)", ge.Type, ge.Detail)
}
return fmt.Sprintf("ClusterError: %v", ge.Type)
}
/*
Cluster related error types
*/
var (
ErrMemberComm = errors.New("Network error")
ErrMemberError = errors.New("Member error")
ErrClusterConfig = errors.New("Cluster configuration error")
ErrClusterState = errors.New("Cluster state error")
ErrUnknownPeer = errors.New("Unknown peer member")
ErrUnknownTarget = errors.New("Unknown target member")
ErrInvalidToken = errors.New("Invalid member token")
ErrNotMember = errors.New("Client is not a cluster member")
ErrLockTaken = errors.New("Requested lock is already taken")
ErrLockNotOwned = errors.New("Requested lock not owned")
)
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package manager
import (
"fmt"
"strconv"
"strings"
)
/*
runHouseKeeping flag to switch off automatic start of housekeeping
*/
var runHousekeeping = true
/*
FreqHousekeeping is the frequency of running housekeeping tasks (ms)
*/
var FreqHousekeeping float64 = 1000
/*
logHousekeeping flag to write a log message every time the housekeeping task is running
*/
var logHousekeeping = false
/*
HousekeepingWorker is the background thread which handles various tasks to provide
"eventual" consistency for the cluster.
*/
func (mm *MemberManager) HousekeepingWorker() {
mm.housekeepingLock.Lock()
defer mm.housekeepingLock.Unlock()
if mm.StopHousekeeping {
return
} else if logHousekeeping {
LogDebug(mm.name, "(HK): Running housekeeping task")
}
// Special function which ensures that the given member is removed from the
// failed list.
removeFromFailedState := func(peer string) {
mm.Client.maplock.Lock()
defer mm.Client.maplock.Unlock()
if _, ok := mm.Client.failed[peer]; ok {
// Remove a member from the failed state list and send an update
LogDebug(mm.name, "(HK): ",
fmt.Sprintf("Removing %v from list of failed members", peer))
delete(mm.Client.failed, peer)
}
}
// Housekeeping will try to talk to all peers
resolveConflict := false // Flag to resolve a state conflict at the end of a cycle.
for peer := range mm.Client.peers {
LogDebug(mm.name, "(HK): ",
fmt.Sprintf("Housekeeping talking to: %v", peer))
// Send a ping to the member
res, err := mm.Client.SendPing(peer, "")
if err != nil {
LogDebug(mm.name, "(HK): ",
fmt.Sprintf("Error pinging %v - %v", peer, err))
continue
} else if len(res) == 1 {
LogDebug(mm.name, "(HK): ",
fmt.Sprintf("Member %v says this instance is not part of the cluster", peer))
mm.Client.maplock.Lock()
mm.Client.failed[peer] = ErrNotMember.Error()
mm.Client.maplock.Unlock()
continue
}
// Check timestamp on the result and see where this member is:
peerTsMember := res[1]
peerTsTS, _ := strconv.ParseInt(res[2], 10, 64)
peerTsOldMember := res[3]
peerTsOldTS, _ := strconv.ParseInt(res[4], 10, 64)
simmTS, _ := mm.stateInfo.Get(StateInfoTS)
mmTS := simmTS.([]string)
simmOldTS, _ := mm.stateInfo.Get(StateInfoTSOLD)
mmOldTS := simmOldTS.([]string)
mmTsMember := mmTS[0]
mmTsTS, _ := strconv.ParseInt(mmTS[1], 10, 64)
mmTsOldMember := mmOldTS[0]
mmTsOldTS, _ := strconv.ParseInt(mmOldTS[1], 10, 64)
LogDebug(mm.name, "(HK): ",
fmt.Sprintf("TS Me : Curr:%v:%v - Old:%v:%v", mmTsMember, mmTsTS, mmTsOldMember, mmTsOldTS))
LogDebug(mm.name, "(HK): ",
fmt.Sprintf("TS Peer: Curr:%v:%v - Old:%v:%v", peerTsMember, peerTsTS, peerTsOldMember, peerTsOldTS))
if peerTsTS > mmTsTS || peerTsMember != mmTsMember {
// Peer has a newer version
if peerTsMember == mmTsMember && peerTsOldMember == mmTsMember && peerTsOldTS == mmTsTS {
// Peer has the next state info version - update the local state info
sf, err := mm.Client.SendStateInfoRequest(peer)
if err == nil {
LogDebug(mm.name, ": Updating state info of member")
mm.applyStateInfo(sf)
}
} else {
// Peer has a different version - potential conflict send a
// state update at the end of the cycle
if sf, err := mm.Client.SendStateInfoRequest(peer); err == nil {
LogDebug(mm.name, ": Merging members in state infos")
// Add any newly known cluster members
mm.applyStateInfoPeers(sf, false)
resolveConflict = true
}
}
// Remove the member from the failed state list if it is on there
removeFromFailedState(peer)
} else if peerTsTS == mmTsTS && peerTsMember == mmTsMember {
// Peer is up-to-date - check if it is in a failed state list
removeFromFailedState(peer)
}
// We do nothing with members using an outdated cluster state
// they should update eventually through their own housekeeping
}
// Check if there is a new failed members list
sfFailed, _ := mm.stateInfo.Get(StateInfoFAILED)
if len(sfFailed.([]string))/2 != len(mm.Client.failed) || resolveConflict {
LogDebug(mm.name, "(HK): ",
fmt.Sprintf("Updating other members with current failed members list: %v",
strings.Join(mm.Client.FailedPeerErrors(), ", ")))
if err := mm.UpdateClusterStateInfo(); err != nil {
// Just update local state info if we could not update the peers
LogDebug(mm.name, "(HK): ",
fmt.Sprintf("Could not update cluster state: %v", err.Error()))
mm.updateStateInfo(true)
}
}
// Notify others that housekeeping has finished
mm.notifyHouseKeeping()
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package manager
import (
"crypto/sha512"
"fmt"
"math/rand"
"net"
"net/rpc"
"sort"
"strconv"
"sync"
"time"
"devt.de/krotik/common/datautil"
)
/*
MemberManager is the management object for a cluster member.
This is the main object of the clustering code it contains the main API.
A member registers itself to the rpc server which is the global
ManagerServer (server) object. Each cluster member needs to have a unique name.
Communication between members is secured by using a secret string which
is never exchanged over the network and a hash generated token which
identifies a member.
Each MemberManager object contains a Client object which can be used to
communicate with other cluster members. This object should be used by pure
clients - code which should communicate with the cluster without running an
actual member.
*/
type MemberManager struct {
name string // Name of the cluster member
secret string // Cluster secret
stateInfo StateInfo // StateInfo object which can persist runtime configuration
memberInfo map[string]interface{} // Static info about this member
housekeeping bool // Housekeeping thread running
housekeepingLock *sync.Mutex // Lock for housekeeping (prevent housekeeping from running)
StopHousekeeping bool // Flag to temporarily stop housekeeping
handleDataRequest func(interface{}, *interface{}) error // Handler for cluster data requests
notifyStateUpdate func() // Handler which is called when the state info is updated
notifyHouseKeeping func() // Handler which is called each time the housekeeping thread has finished
Client *Client // RPC client object
listener net.Listener // RPC server listener
wg sync.WaitGroup // RPC server Waitgroup for listener shutdown
}
/*
NewMemberManager create a new MemberManager object.
*/
func NewMemberManager(rpcInterface string, name string, secret string, stateInfo StateInfo) *MemberManager {
// Generate member token
token := &MemberToken{name, fmt.Sprintf("%X", sha512.Sum512_224([]byte(name+secret)))}
// By default a client can hold a lock for up to 30 seconds before it is cleared.
mm := &MemberManager{name, secret, stateInfo, make(map[string]interface{}),
false, &sync.Mutex{}, false, func(interface{}, *interface{}) error { return nil }, func() {}, func() {},
&Client{token, rpcInterface, make(map[string]string), make(map[string]*rpc.Client),
make(map[string]string), &sync.RWMutex{}, datautil.NewMapCache(0, 30)},
nil, sync.WaitGroup{}}
// Check if given state info should be initialized or applied
if _, ok := stateInfo.Get(StateInfoTS); !ok {
mm.updateStateInfo(true)
} else {
mm.applyStateInfo(stateInfo.Map())
}
return mm
}
// General cluster member API
// ==========================
/*
Start starts the manager process for this cluster member.
*/
func (mm *MemberManager) Start() error {
mm.LogInfo("Starting member manager ", mm.name, " rpc server on: ", mm.Client.rpc)
l, err := net.Listen("tcp", mm.Client.rpc)
if err != nil {
return err
}
go func() {
rpc.Accept(l)
mm.wg.Done()
mm.LogInfo("Connection closed: ", mm.Client.rpc)
}()
mm.listener = l
server.managers[mm.name] = mm
if runHousekeeping {
s1 := rand.NewSource(time.Now().UnixNano())
r1 := rand.New(s1)
// Start housekeeping thread which will check for configuration changes
mm.housekeeping = true
go func() {
for mm.housekeeping {
mm.HousekeepingWorker()
time.Sleep(time.Duration(FreqHousekeeping*(1+r1.Float64())) * time.Millisecond)
}
mm.wg.Done()
}()
}
return nil
}
/*
Shutdown shuts the member manager rpc server for this cluster member down.
*/
func (mm *MemberManager) Shutdown() error {
// Stop housekeeping
if mm.housekeeping {
mm.wg.Add(1)
mm.housekeeping = false
mm.wg.Wait()
mm.LogInfo("Housekeeping stopped")
}
// Close socket
if mm.listener != nil {
mm.LogInfo("Shutdown rpc server on: ", mm.Client.rpc)
mm.wg.Add(1)
mm.listener.Close()
mm.listener = nil
mm.wg.Wait()
} else {
LogDebug("Member manager ", mm.name, " already shut down")
}
return nil
}
/*
LogInfo logs a member related message at info level.
*/
func (mm *MemberManager) LogInfo(v ...interface{}) {
LogInfo(mm.name, ": ", fmt.Sprint(v...))
}
/*
Name returns the member name.
*/
func (mm *MemberManager) Name() string {
return mm.name
}
/*
NetAddr returns the network address of the member.
*/
func (mm *MemberManager) NetAddr() string {
return mm.Client.rpc
}
/*
Members returns a list of all cluster members.
*/
func (mm *MemberManager) Members() []string {
var ret []string
siMembers, _ := mm.stateInfo.Get(StateInfoMEMBERS)
members := siMembers.([]string)
for i := 0; i < len(members); i += 2 {
ret = append(ret, members[i])
}
sort.Strings(ret)
return ret
}
/*
StateInfo returns the current state info.
*/
func (mm *MemberManager) StateInfo() StateInfo {
return mm.stateInfo
}
/*
MemberInfo returns the current static member info. Clients may modify the
returned map. Member info can be used to store additional information
on every member (e.g. a member specific URL).
*/
func (mm *MemberManager) MemberInfo() map[string]interface{} {
return mm.memberInfo
}
/*
SetEventHandler sets event handler funtions which are called when the state info
is updated or when housekeeping has been done.
*/
func (mm *MemberManager) SetEventHandler(notifyStateUpdate func(), notifyHouseKeeping func()) {
mm.notifyStateUpdate = notifyStateUpdate
mm.notifyHouseKeeping = notifyHouseKeeping
}
/*
SetHandleDataRequest sets the data request handler.
*/
func (mm *MemberManager) SetHandleDataRequest(handleDataRequest func(interface{}, *interface{}) error) {
mm.handleDataRequest = handleDataRequest
}
/*
MemberInfoCluster returns the current static member info for every known
cluster member. This calls every member in the cluster.
*/
func (mm *MemberManager) MemberInfoCluster() map[string]map[string]interface{} {
clusterMemberInfo := make(map[string]map[string]interface{})
clusterMemberInfo[mm.name] = mm.MemberInfo()
for p := range mm.Client.peers {
mi, err := mm.Client.SendMemberInfoRequest(p)
if err != nil {
clusterMemberInfo[p] = map[string]interface{}{MemberInfoError: err.Error()}
} else {
clusterMemberInfo[p] = mi
}
}
return clusterMemberInfo
}
// Cluster membership functions
// ============================
/*
JoinCluster lets this member try to join an existing cluster. The secret must
be correct otherwise the member will be rejected.
*/
func (mm *MemberManager) JoinCluster(newMemberName string, newMemberRPC string) error {
// Housekeeping should not be running while joining a cluster
mm.housekeepingLock.Lock()
defer mm.housekeepingLock.Unlock()
res, err := mm.Client.SendJoinCluster(newMemberName, newMemberRPC)
if err == nil {
// Update the state info of this member if the join was successful
mm.applyStateInfo(res)
}
return err
}
/*
JoinNewMember joins a new member to the current cluster. It is assumed that
the new members token has already been verified.
*/
func (mm *MemberManager) JoinNewMember(newMemberName string, newMemberRPC string) error {
// Acquire cluster lock for updating the state info
if err := mm.Client.SendAcquireClusterLock(ClusterLockUpdateStateInfo); err != nil {
return err
}
// Get operational peers (operational cluster is NOT required - other members should
// update eventually)
peers, _ := mm.Client.OperationalPeers()
mm.LogInfo("Adding member ", newMemberName, " with rpc ", newMemberRPC, " to the cluster")
// Add member to local state info
if err := mm.addMember(newMemberName, newMemberRPC, nil); err != nil {
// Try to release the cluster lock if something went wrong at this point
mm.Client.SendReleaseClusterLock(ClusterLockUpdateStateInfo)
return err
}
// Add member to all other cluster members (ignore failures - failed members
// should be updated eventually by the BackgroundWorker)
for _, p := range peers {
mm.Client.SendRequest(p, RPCAddMember, map[RequestArgument]interface{}{
RequestMEMBERNAME: newMemberName,
RequestMEMBERRPC: newMemberRPC,
RequestSTATEINFOMAP: mapToBytes(mm.stateInfo.Map()),
})
}
// Release cluster lock for updating the state info
return mm.Client.SendReleaseClusterLock(ClusterLockUpdateStateInfo)
}
/*
EjectMember ejects a member from the current cluster. Trying to remove a non-existent
member has no effect.
*/
func (mm *MemberManager) EjectMember(memberToEject string) error {
var err error
// Get operational peers (operational cluster is NOT required - other members should
// update eventually)
peers, _ := mm.Client.OperationalPeers()
// Check if the given member name is valid - it must be a peer or this member
if memberToEjectRPC, ok := mm.Client.peers[memberToEject]; ok {
// Acquire cluster lock for updating the state info
if err := mm.Client.SendAcquireClusterLock(ClusterLockUpdateStateInfo); err != nil {
return err
}
mm.LogInfo("Ejecting member ", memberToEject, " from the cluster")
mm.Client.maplock.Lock()
delete(mm.Client.peers, memberToEject)
delete(mm.Client.conns, memberToEject)
delete(mm.Client.failed, memberToEject)
mm.Client.maplock.Unlock()
if err := mm.updateStateInfo(true); err != nil {
// Put the member to eject back into the peers map
mm.Client.peers[memberToEject] = memberToEjectRPC
// Try to release the cluster lock if something went wrong at this point
mm.Client.SendReleaseClusterLock(ClusterLockUpdateStateInfo)
return err
}
// Send the state info to all other cluster members (ignore failures - failed members
// should be updated eventually by the BackgroundWorker)
for _, k := range peers {
mm.Client.SendRequest(k, RPCUpdateStateInfo, map[RequestArgument]interface{}{
RequestSTATEINFOMAP: mapToBytes(mm.stateInfo.Map()),
})
}
// Release cluster lock for updating the state info
err = mm.Client.SendReleaseClusterLock(ClusterLockUpdateStateInfo)
} else if mm.name == memberToEject {
// If we should eject ourselves then forward the request
mm.LogInfo("Ejecting this member from the cluster")
if len(peers) > 0 {
if err := mm.Client.SendEjectMember(peers[0], mm.name); err != nil {
return err
}
}
// Clear peer maps and update the cluster state
mm.Client.maplock.Lock()
mm.Client.peers = make(map[string]string)
mm.Client.conns = make(map[string]*rpc.Client)
mm.Client.failed = make(map[string]string)
mm.Client.maplock.Unlock()
err = mm.updateStateInfo(true)
}
return err
}
// StateInfo functions
// ===================
/*
UpdateClusterStateInfo updates the members state info and sends it to all members in
the cluster.
*/
func (mm *MemberManager) UpdateClusterStateInfo() error {
// Get operational peers - fail if the cluster is not operational
peers, err := mm.Client.OperationalPeers()
if err != nil {
return err
}
// Acquire cluster lock for updating the state info
if err := mm.Client.SendAcquireClusterLock(ClusterLockUpdateStateInfo); err != nil {
return err
}
mm.LogInfo("Updating cluster state info")
if err := mm.updateStateInfo(true); err != nil {
// Try to release the cluster lock if something went wrong at this point
mm.Client.SendReleaseClusterLock(ClusterLockUpdateStateInfo)
return err
}
// Send the state info to all other cluster members (ignore failures - failed members
// should be updated eventually by the BackgroundWorker)
for _, k := range peers {
mm.Client.SendRequest(k, RPCUpdateStateInfo, map[RequestArgument]interface{}{
RequestSTATEINFOMAP: mapToBytes(mm.stateInfo.Map()),
})
}
// Release cluster lock for updating the state info
return mm.Client.SendReleaseClusterLock(ClusterLockUpdateStateInfo)
}
// Helper functions
// ================
/*
addMember adds a new member to the local state info.
*/
func (mm *MemberManager) addMember(newMemberName string, newMemberRPC string,
newStateInfo map[string]interface{}) error {
// Check if member exists already
if _, ok := mm.Client.peers[newMemberName]; ok {
return &Error{ErrClusterConfig,
fmt.Sprintf("Cannot add member %v as a member with the same name exists already",
newMemberName)}
}
// Add new peer to peer map - member.Client.conns will be updated on the
// first connection
mm.Client.maplock.Lock()
mm.Client.peers[newMemberName] = newMemberRPC
mm.Client.maplock.Unlock()
// Store the new state or just update the state
if newStateInfo != nil {
return mm.applyStateInfo(newStateInfo)
}
return mm.updateStateInfo(true)
}
/*
updateStateInfo updates the StateInfo from the current runtime state.
Only updates the timestamp if newTS is true.
*/
func (mm *MemberManager) updateStateInfo(newTS bool) error {
sortMapKeys := func(m map[string]string) []string {
var ks []string
for k := range m {
ks = append(ks, k)
}
sort.Strings(ks)
return ks
}
// Populate members entry
members := make([]string, 0, len(mm.Client.peers)*2)
// Add this member to the state info
members = append(members, mm.name)
members = append(members, mm.Client.rpc)
// Add other known members to the state info
mm.Client.maplock.Lock()
for _, name := range sortMapKeys(mm.Client.peers) {
rpc := mm.Client.peers[name]
members = append(members, name)
members = append(members, rpc)
}
mm.stateInfo.Put(StateInfoMEMBERS, members)
failed := make([]string, 0, len(mm.Client.failed)*2)
// Add all known failed members to the state info
for _, name := range sortMapKeys(mm.Client.failed) {
errstr := mm.Client.failed[name]
failed = append(failed, name)
failed = append(failed, errstr)
}
mm.Client.maplock.Unlock()
mm.stateInfo.Put(StateInfoFAILED, failed)
// Check for replication factor entry - don't touch if it is set
if _, ok := mm.stateInfo.Get(StateInfoREPFAC); !ok {
mm.stateInfo.Put(StateInfoREPFAC, 1)
}
if newTS {
// Populate old timestamp and timestamp
newOldTS, ok := mm.stateInfo.Get(StateInfoTS)
if !ok {
newOldTS = []string{"", "0"}
}
mm.stateInfo.Put(StateInfoTSOLD, newOldTS)
v, _ := strconv.ParseInt(newOldTS.([]string)[1], 10, 64)
mm.stateInfo.Put(StateInfoTS, []string{mm.name, fmt.Sprint(v + 1)})
}
err := mm.stateInfo.Flush()
if err == nil {
// Notify others of the state update
mm.notifyStateUpdate()
}
return err
}
/*
applyStateInfo sets the runtime state from the given StateInfo map.
*/
func (mm *MemberManager) applyStateInfo(stateInfoMap map[string]interface{}) error {
// Set peers entry
mm.applyStateInfoPeers(stateInfoMap, true)
// Set failed entry
mm.Client.maplock.Lock()
mm.Client.failed = make(map[string]string)
siFailed, _ := stateInfoMap[StateInfoFAILED]
failed := siFailed.([]string)
for i := 0; i < len(failed); i += 2 {
mm.Client.failed[failed[i]] = failed[i+1]
}
mm.Client.maplock.Unlock()
// Set give replication factor entry
mm.stateInfo.Put(StateInfoREPFAC, stateInfoMap[StateInfoREPFAC])
// Set given timestamp
mm.stateInfo.Put(StateInfoTS, stateInfoMap[StateInfoTS])
mm.stateInfo.Put(StateInfoTSOLD, stateInfoMap[StateInfoTSOLD])
// Set state info
return mm.updateStateInfo(false)
}
/*
applyStateInfoPeers sets the peer related runtime state from the given StateInfo map.
*/
func (mm *MemberManager) applyStateInfoPeers(stateInfoMap map[string]interface{}, replaceExisting bool) {
// Set peers entry
if replaceExisting {
mm.Client.maplock.Lock()
mm.Client.peers = make(map[string]string)
mm.Client.maplock.Unlock()
}
siMembers, _ := stateInfoMap[StateInfoMEMBERS]
members := siMembers.([]string)
for i := 0; i < len(members); i += 2 {
// Do not add this member as peer
if members[i] != mm.name {
mm.Client.maplock.Lock()
mm.Client.peers[members[i]] = members[i+1]
mm.Client.maplock.Unlock()
}
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package manager
import (
"crypto/sha512"
"fmt"
"net/rpc"
"devt.de/krotik/common/errorutil"
)
func init() {
// Create singleton Server instance.
server = &Server{make(map[string]*MemberManager)}
// Register the cluster API as RPC server
errorutil.AssertOk(rpc.Register(server))
}
/*
RPCFunction is used to identify the called function in a RPC call
*/
type RPCFunction string
/*
List of all possible RPC functions. The list includes all RPC callable functions
in this file.
*/
const (
// General functions
RPCPing RPCFunction = "Ping"
RPCSIRequest = "StateInfoRequest"
RPCMIRequest = "MemberInfoRequest"
// Cluster-wide locking
RPCAcquireLock = "AcquireLock"
RPCReleaseLock = "ReleaseLock"
// Cluster member management
RPCJoinCluster = "JoinCluster"
RPCAddMember = "AddMember"
RPCEjectMember = "EjectMember"
// StateInfo functions
RPCUpdateStateInfo = "UpdateStateInfo"
// Data request functions
RPCDataRequest = "DataRequest"
)
/*
RequestArgument is used to identify arguments in a RPC call
*/
type RequestArgument int
/*
List of all possible arguments in a RPC request. There are usually no checks which
give back an error if a required argument is missing. The RPC API is an internal
API and might change without backwards compatibility.
*/
const (
// General arguments
RequestTARGET RequestArgument = iota // Required argument which identifies the target cluster member
RequestTOKEN // Client token which is used for authorization checks
RequestLOCK // Lock name which a member requests to take
RequestMEMBERNAME // Name for a member
RequestMEMBERRPC // Rpc address and port for a member
RequestSTATEINFOMAP // StateInfo object as a map
RequestDATA // Data request object
)
/*
server is the Server instance which serves rpc calls
*/
var server *Server
/*
Server is the RPC exposed cluster API of a cluster member. Server
is a singleton and will route incoming (authenticated) requests to registered
MemberManagers. The calling member is referred to as source member and the called
member is referred to as target member.
*/
type Server struct {
managers map[string]*MemberManager // Map of local cluster members
}
// General functions
// =================
/*
Ping answers with a Pong if the given client token was verified and the local
cluster member exists.
*/
func (ms *Server) Ping(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, false)
if err != nil {
return err
}
// Send a simple response
res := []string{"Pong"}
// Check if request is from a cluster member - only reveal timestamps
// to members
token := request[RequestTOKEN].(*MemberToken)
if _, ok := manager.Client.peers[token.MemberName]; ok {
ts, _ := manager.stateInfo.Get(StateInfoTS)
res = append(res, ts.([]string)...)
tsold, _ := manager.stateInfo.Get(StateInfoTSOLD)
res = append(res, tsold.([]string)...)
}
*response = res
return nil
}
/*
StateInfoRequest answers with the member's state info.
*/
func (ms *Server) StateInfoRequest(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, false)
if err != nil {
return err
}
*response = mapToBytes(manager.stateInfo.Map())
return nil
}
/*
MemberInfoRequest answers with the member's static info.
*/
func (ms *Server) MemberInfoRequest(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, false)
if err != nil {
return err
}
*response = mapToBytes(manager.memberInfo)
return nil
}
// Cluster membership functions
// ============================
/*
JoinCluster is used by a new member if it wants to join the cluster.
*/
func (ms *Server) JoinCluster(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, false)
if err != nil {
return err
}
newMemberName := request[RequestMEMBERNAME].(string)
newMemberRPC := request[RequestMEMBERRPC].(string)
err = manager.JoinNewMember(newMemberName, newMemberRPC)
if err == nil {
// Return updated state info if there was no error
*response = mapToBytes(manager.stateInfo.Map())
}
return err
}
/*
AddMember adds a new member on the target member.
*/
func (ms *Server) AddMember(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, true)
if err != nil {
return err
}
// Acquire lock to modify client map
newMemberName := request[RequestMEMBERNAME].(string)
newMemberRPC := request[RequestMEMBERRPC].(string)
newStateInfo := bytesToMap(request[RequestSTATEINFOMAP].([]byte))
return manager.addMember(newMemberName, newMemberRPC, newStateInfo)
}
/*
EjectMember can be called by a cluster member to eject itself or another cluster member.
*/
func (ms *Server) EjectMember(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, true)
if err != nil {
return err
}
memberToEject := request[RequestMEMBERNAME].(string)
return manager.EjectMember(memberToEject)
}
// Cluster-wide locking
// ====================
/*
AcquireLock tries to acquire a named lock for the source member on the
target member. It fails if the lock is alread acquired by a different member.
The lock can only be held for a limited amount of time.
*/
func (ms *Server) AcquireLock(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, true)
if err != nil {
return err
}
// Acquire lock to modify lock map
manager.Client.maplock.Lock()
manager.Client.maplock.Unlock()
requestedLock := request[RequestLOCK].(string)
sourceMember := request[RequestTOKEN].(*MemberToken).MemberName
// Get the lock owner
lockOwner, ok := manager.Client.clusterLocks.Get(requestedLock)
if ok && lockOwner != sourceMember {
// If there is already an owner return an error which mentions the owner
return &Error{ErrLockTaken, lockOwner.(string)}
}
// If there is no owner set the source client as the new owner
manager.Client.clusterLocks.Put(requestedLock, sourceMember)
*response = sourceMember
return nil
}
/*
ReleaseLock releases a lock. Only the member which holds the lock can release it.
*/
func (ms *Server) ReleaseLock(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, true)
if err != nil {
return err
}
// Acquire lock to modify lock map
manager.Client.maplock.Lock()
defer manager.Client.maplock.Unlock()
requestedLock := request[RequestLOCK].(string)
sourceMember := request[RequestTOKEN].(*MemberToken).MemberName
// Get the lock owner
lockOwner, ok := manager.Client.clusterLocks.Get(requestedLock)
if ok {
if lockOwner == sourceMember {
// Release lock
manager.Client.clusterLocks.Remove(requestedLock)
} else {
// Lock is owned by someone else
return &Error{ErrLockNotOwned, fmt.Sprintf("Owned by %v not by %v",
lockOwner, sourceMember)}
}
}
// Operation on a non-existing lock is a NOP
return nil
}
// StateInfo functions
// ===================
/*
UpdateStateInfo updates the state info of the target member.
*/
func (ms *Server) UpdateStateInfo(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, true)
if err != nil {
return err
}
newStateInfo := bytesToMap(request[RequestSTATEINFOMAP].([]byte))
return manager.applyStateInfo(newStateInfo)
}
// Data request functions
// ======================
/*
DataRequest handles a data request.
*/
func (ms *Server) DataRequest(request map[RequestArgument]interface{},
response *interface{}) error {
// Verify the given token and retrieve the target member
manager, err := ms.checkToken(request, true)
if err != nil {
return err
}
// Handle the data request
reqdata := request[RequestDATA]
return manager.handleDataRequest(reqdata, response)
}
// Helper functions
// ================
/*
checkToken checks the member token in a given request.
*/
func (ms *Server) checkToken(request map[RequestArgument]interface{},
checkClusterMembership bool) (*MemberManager, error) {
// Get the target member
target := request[RequestTARGET].(string)
token := request[RequestTOKEN].(*MemberToken)
if manager, ok := ms.managers[target]; ok {
// Generate expected auth from given requesting member name in token and secret of target
expectedAuth := fmt.Sprintf("%X", sha512.Sum512_224([]byte(token.MemberName+manager.secret)))
if token.MemberAuth == expectedAuth {
if checkClusterMembership {
// Check if the requesting client is actually a member of the cluster
manager.Client.maplock.Lock()
_, ok := manager.Client.peers[token.MemberName]
manager.Client.maplock.Unlock()
if !ok {
return nil, ErrNotMember
}
}
return manager, nil
}
return nil, ErrInvalidToken
}
return nil, ErrUnknownTarget
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package cluster
import (
"encoding/gob"
"errors"
"fmt"
"sync"
"time"
"devt.de/krotik/common/timeutil"
"devt.de/krotik/eliasdb/cluster/manager"
"devt.de/krotik/eliasdb/hash"
"devt.de/krotik/eliasdb/storage"
)
func init() {
// Make sure we can use the relevant types in a gob operation
gob.Register(&translationRec{})
gob.Register(&transferRec{})
}
/*
rootIDTranslationTree is the root id for the translation map
*/
const rootIDTranslationTree = 2
/*
rootIDTransferTree is the root id for the transfer map
*/
const rootIDTransferTree = 3
/*
transPrefix is the prefix for translation entries (cluster location -> local location)
*/
const transPrefix = "t"
/*
rangePrefix is the prefix for range counters
*/
const newlocPrefix = "n"
/*
translationRec is a translation record which stores a local storage location with a
version number.
*/
type translationRec struct {
Loc uint64 // Local storage location
Ver uint64 // Version of the local stored data
}
/*
transferRec is a transfer record which stores a data transfer request.
*/
type transferRec struct {
Members []string // Target members
Request *DataRequest // Data request
}
/*
memberAddressTable is used by a memberStorage to manage cluster locations and their link to
local locations.
*/
type memberAddressTable struct {
ds *DistributedStorage // Related distribution storage
sm storage.Manager // Storage manager which stores this translation table
translation *hash.HTree // Tree which stores the translation table (cluster location -> physical location)
transfer *hash.HTree // Tree which stores the transfer table
newlocCounters map[string]uint64 // Cached counter values to create new cluster locations
newlocCounterLock *sync.Mutex // Lock for cached counter values
}
/*
newMemberAddressTable creates a new member address table.
*/
func newMemberAddressTable(ds *DistributedStorage, sm storage.Manager) (*memberAddressTable, error) {
var err error
var translation, transfer *hash.HTree
var ret *memberAddressTable
translation, err = getHtree(rootIDTranslationTree, sm)
if err == nil {
transfer, err = getHtree(rootIDTransferTree, sm)
if err == nil {
err = sm.Flush()
if err == nil {
ret = &memberAddressTable{ds, sm, translation, transfer, make(map[string]uint64), &sync.Mutex{}}
}
}
}
return ret, err
}
/*
NewClusterLoc returns a new cluster location for a given storage manager.
*/
func (mat *memberAddressTable) NewClusterLoc(dsname string) (uint64, error) {
var ret uint64
var err error
var dsm *DistributedStorageManager
// Check member is operational
distTable, distTableErr := mat.checkState()
if distTableErr != nil {
return 0, distTableErr
}
// Get the location range which is allowed
rangeStart, rangeStop := distTable.MemberRange(mat.ds.MemberManager.Name())
// Get counter
newLocCounter, _, _ := mat.newlocCounter(dsname)
// Check that rangeCounter is sensible
if newLocCounter < rangeStart {
newLocCounter = rangeStart
}
// Get a StorageManager instance if required
if newLocCounter == rangeStart {
dsm = mat.ds.StorageManager(dsname, true).(*DistributedStorageManager)
}
locExists := func(dsname string, candidate uint64) (bool, error) {
// We might be a new member - check with other members if we are at the start
// of our range
if newLocCounter == rangeStart {
ok, err := dsm.Exists(candidate)
if err != nil || ok {
return err == nil && ok, err
}
}
return mat.translation.Exists(transKey(dsname, candidate))
}
candidate := newLocCounter
ok, err := locExists(dsname, candidate)
if err == nil {
if ok {
// Candidate exists - search for a better one
var i uint64
for i = rangeStart; i <= rangeStop; i++ {
ok, err = locExists(dsname, i)
if err == nil && !ok && i != 0 {
ret = i
goto SearchResult
} else if err != nil {
goto SearchResult
}
}
err = errors.New("Could not find any free storage location on this member")
SearchResult:
} else {
// Candidate does not exist - it is a new location
ret = candidate
}
}
// At this point we either have an error or a valid location in ret
if err == nil {
newLocCounter = ret + 1
if newLocCounter > rangeStop {
// Reset range counter - next time we test which if there is anything
// left in this range
newLocCounter = 1
}
mat.setNewlocCounter(dsname, newLocCounter)
mat.sm.Flush()
}
return ret, err
}
/*
AddTransferRequest adds a data transfer request which can be picked up by the transferWorker.
*/
func (mat *memberAddressTable) AddTransferRequest(targetMembers []string, request *DataRequest) {
// Get a unique key for the transfer request
key := timeutil.MakeTimestamp()
ex, err := mat.transfer.Exists([]byte(key))
for ex && err == nil {
key = timeutil.MakeTimestamp()
time.Sleep(time.Millisecond)
ex, err = mat.transfer.Exists([]byte(key))
}
// Store the transfer request
if err == nil {
_, err := mat.transfer.Put([]byte(key), &transferRec{targetMembers, request})
if err == nil {
mat.sm.Flush()
}
}
if request != nil {
ts, _ := timeutil.TimestampString(string(key), "UTC")
manager.LogDebug(mat.ds.Name(), "(Store): ",
fmt.Sprintf("Added transfer request %v (Error: %v) to %v from %v",
request.RequestType, err, targetMembers, ts))
}
}
/*
TransClusterLoc translates a cluster location to a local location. Returns the translated
location, a flag if the location was found and lookup errors.
*/
func (mat *memberAddressTable) TransClusterLoc(dsname string, clusterLoc uint64) (*translationRec, bool, error) {
v, err := mat.translation.Get(transKey(dsname, clusterLoc))
if v == nil {
return nil, false, err
}
return v.(*translationRec), true, err
}
/*
SetTransClusterLoc adds a translation from a cluster location to a local location. Returns the
previously stored translated location, a flag if the location was found and errors.
*/
func (mat *memberAddressTable) SetTransClusterLoc(dsname string, clusterLoc uint64,
localLoc uint64, localVer uint64) (*translationRec, bool, error) {
v, err := mat.translation.Put(transKey(dsname, clusterLoc), &translationRec{localLoc, localVer})
if err == nil {
mat.sm.Flush()
}
if v == nil {
return nil, false, err
}
return v.(*translationRec), true, err
}
/*
RemoveTransClusterLoc removes a translation of a cluster location. Returns the
previously stored translated location, a flag if the location was found and errors.
*/
func (mat *memberAddressTable) RemoveTransClusterLoc(dsname string, clusterLoc uint64) (*translationRec, bool, error) {
v, err := mat.translation.Remove(transKey(dsname, clusterLoc))
if err == nil {
mat.sm.Flush()
}
if v == nil {
return nil, false, err
}
return v.(*translationRec), true, err
}
/*
Check the state of cluster member. Return an error if the member is not
operational.
*/
func (mat *memberAddressTable) checkState() (*DistributionTable, error) {
distTable, distTableErr := mat.ds.DistributionTable()
if distTableErr != nil {
return nil, fmt.Errorf("Storage is currently disabled on member: %v (%v)",
mat.ds.MemberManager.Name(), distTableErr)
}
return distTable, nil
}
// Helper functions
// ================
/*
newlocCounter returns the location counter for a given storage manager. Returns the translated
location, a flag if the location was found and lookup errors.
*/
func (mat *memberAddressTable) newlocCounter(dsname string) (uint64, bool, error) {
// Try to get the counter from the cache
mat.newlocCounterLock.Lock()
cv, ok := mat.newlocCounters[dsname]
mat.newlocCounterLock.Unlock()
if ok {
return cv, true, nil
}
// Lookup the counter
v, err := mat.translation.Get(newlocCounterKey(dsname))
if v == nil {
return 1, false, err
}
ret := toUInt64(v)
// Store counter in the cache
mat.newlocCounterLock.Lock()
mat.newlocCounters[dsname] = ret
mat.newlocCounterLock.Unlock()
return ret, true, err
}
/*
setNewlocCounter sets a location counter for a given storage manager.
*/
func (mat *memberAddressTable) setNewlocCounter(dsname string, counter uint64) error {
// Store counter in the cache and HTree
mat.newlocCounterLock.Lock()
mat.newlocCounters[dsname] = counter
mat.newlocCounterLock.Unlock()
_, err := mat.translation.Put(newlocCounterKey(dsname), counter)
return err
}
/*
newlocCounterKey returns the counter key for a given storage manager.
*/
func newlocCounterKey(dsname string) []byte {
return []byte(fmt.Sprint(newlocPrefix, dsname))
}
/*
transKey returns the translation map lookup key for a given cluster location and storage manager.
*/
func transKey(dsname string, loc uint64) []byte {
return []byte(fmt.Sprint(transPrefix, dsname, "#", loc))
}
/*
getHtree returns a HTree from a given storage.Manager with a given root ID.
*/
func getHtree(rootID int, sm storage.Manager) (*hash.HTree, error) {
var htree *hash.HTree
var err error
loc := sm.Root(rootID)
if loc == 0 {
// Create a new HTree and store its location
htree, err = hash.NewHTree(sm)
if err == nil {
// Make sure the new root id is persisted
sm.SetRoot(rootID, htree.Location())
}
} else {
// Load existing HTree
htree, err = hash.LoadHTree(sm, loc)
}
return htree, err
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package cluster
import (
"bytes"
"encoding/json"
"fmt"
"sort"
"strings"
"sync"
"devt.de/krotik/common/sortutil"
"devt.de/krotik/eliasdb/cluster/manager"
"devt.de/krotik/eliasdb/graph/graphstorage"
"devt.de/krotik/eliasdb/hash"
"devt.de/krotik/eliasdb/storage"
)
/*
ClusterStoragePrefix is the prefix for cluster related storage managers
*/
const ClusterStoragePrefix = "cs_"
/*
LocalStoragePrefix is the prefix for local storage managers
*/
const LocalStoragePrefix = "ls_"
/*
memberStorage models the local storage of a cluster member. This data structure
is the only thing which has access to the wrapped graphstorage.Storage.
*/
type memberStorage struct {
ds *DistributedStorage // Distributed storage which created this member storage
gs graphstorage.Storage // Wrapped graphstorage.Storage
at *memberAddressTable // Address table (cluster location -> local location)
transferLock *sync.Mutex // Lock for the transfer task
transferRunning bool // Flag to indicate that the transfer task is running
rebalanceLock *sync.Mutex // Lock for the rebalance task
rebalanceRunning bool // Flag to indicate that the rebalance task is running
rebalanceCounter int
}
/*
newMemberStorage creates a new memberStorage instance.
*/
func newMemberStorage(ds *DistributedStorage, gs graphstorage.Storage) (*memberStorage, error) {
sm := gs.StorageManager("cluster_translation", true)
at, err := newMemberAddressTable(ds, sm)
if err != nil {
return nil, err
}
return &memberStorage{ds, gs, at, &sync.Mutex{}, false, &sync.Mutex{}, false, 0}, nil
}
/*
handleDataRequest deals with RPC requests. It is the only function which is
called by the RPC server of the member manager.
*/
func (ms *memberStorage) handleDataRequest(request interface{}, response *interface{}) error {
var err error
// Make sure a request can be served
distTable, distTableErr := ms.at.checkState()
if distTableErr != nil {
return distTableErr
}
dr := request.(*DataRequest)
switch dr.RequestType {
case RTGetMain:
*response = ms.gs.MainDB()
case RTSetMain:
err = ms.handleSetMainRequest(distTable, dr, response)
case RTSetRoot:
err = ms.handleSetRootRequest(distTable, dr, response)
case RTGetRoot:
err = ms.handleGetRootRequest(distTable, dr, response)
case RTInsert:
err = ms.handleInsertRequest(distTable, dr, response)
case RTUpdate:
err = ms.handleUpdateRequest(distTable, dr, response)
case RTFree:
err = ms.handleFreeRequest(distTable, dr, response)
case RTExists:
err = ms.handleFetchRequest(distTable, dr, response, false)
case RTFetch:
err = ms.handleFetchRequest(distTable, dr, response, true)
case RTRebalance:
err = ms.handleRebalanceRequest(distTable, dr, response)
default:
err = fmt.Errorf("Unknown request type")
}
manager.LogDebug(ms.ds.MemberManager.Name(), fmt.Sprintf("(Store): Handled: %v %s (Transfer: %v, Error: %v)",
dr.RequestType, dr.Args, dr.Transfer, err))
return err
}
/*
handleSetMainRequest sets the mainDB on the local storage manager.
*/
func (ms *memberStorage) handleSetMainRequest(distTable *DistributionTable, request *DataRequest, response *interface{}) error {
mainDB := ms.gs.MainDB()
newMainDB := request.Value.(map[string]string)
// Update keys and values
for k, v := range newMainDB {
mainDB[k] = v
}
// Check if things should be deleted
var toRemove []string
for k := range mainDB {
if _, ok := newMainDB[k]; !ok {
toRemove = append(toRemove, k)
}
}
for _, k := range toRemove {
delete(mainDB, k)
}
err := ms.gs.FlushMain()
if !request.Transfer {
ms.at.AddTransferRequest(distTable.OtherReplicationMembers(0, ms.ds.MemberManager.Name()),
&DataRequest{RTSetMain, nil, request.Value, true})
}
return err
}
/*
handleGetRootRequest retrieves a root value from a local storage manager.
*/
func (ms *memberStorage) handleGetRootRequest(distTable *DistributionTable, request *DataRequest, response *interface{}) error {
dsname := request.Args[RPStoreName].(string)
root := request.Args[RPRoot].(int)
sm := ms.dataStorage(dsname, false)
if sm != nil {
*response = sm.Root(root)
}
return nil
}
/*
handleSetRootRequest sets a new root value in a local storage manager.
*/
func (ms *memberStorage) handleSetRootRequest(distTable *DistributionTable, request *DataRequest, response *interface{}) error {
dsname := request.Args[RPStoreName].(string)
root := request.Args[RPRoot].(int)
sm := ms.dataStorage(dsname, true)
sm.SetRoot(root, toUInt64(request.Value))
if !request.Transfer {
ms.at.AddTransferRequest(distTable.OtherReplicationMembers(0, ms.ds.MemberManager.Name()),
&DataRequest{RTSetRoot, request.Args, request.Value, true})
}
return sm.Flush()
}
/*
handleInsertRequest inserts an object and return its cluster storage location.
Distribution procedure:
Client -> Cluster Member Request Receiver
Cluster Member Request Receiver -> Cluster Member Primary Storage (chosen round-robin / available)
Cluster Member Primary Storage writes into its Transfer Table
Cluster Member Primary Storage (Transfer worker) -> Replicating Cluster Members
*/
func (ms *memberStorage) handleInsertRequest(distTable *DistributionTable, request *DataRequest, response *interface{}) error {
var err error
var cloc uint64
dsname := request.Args[RPStoreName].(string)
*response = 0
sm := ms.dataStorage(dsname, true)
if !request.Transfer {
// First get a new cluster location (on this member)
cloc, err = ms.at.NewClusterLoc(dsname)
} else {
// If this is a transfer request we know already the cluster location
cloc = toUInt64(request.Args[RPLoc])
}
if err == nil {
var loc uint64
// Insert into the local storage
loc, err = sm.Insert(request.Value)
if err == nil {
// Add a translation
_, _, err = ms.at.SetTransClusterLoc(dsname, cloc, loc, 1)
if err == nil {
if !request.Transfer {
// Add transfer request for replication
// At this point the operation has succeeded. We still need to
// replicate the change to all the replicating members but
// any errors happening during this shall not fail this operation.
// The next rebalancing will then synchronize all members again.
ms.at.AddTransferRequest(distTable.Replicas(ms.ds.MemberManager.Name()),
&DataRequest{RTInsert, map[DataRequestArg]interface{}{
RPStoreName: dsname,
RPLoc: cloc,
}, request.Value, true})
}
*response = cloc
}
}
}
return err
}
/*
handleUpdateRequest updates an object and return its cluster storage location.
There is indeed a chance to produce inconsistencies if members fail in the right
sequence. It is assumed that these will be delt with in the next rebalance.
Distribution procedure:
Client -> Cluster Member Request Receiver
Cluster Member Request Receiver -> Cluster Member Primary Storage or Replicating Cluster Member
Storing Cluster Member does the update and writes into its transfer table
Storing Cluster Member (Transfer worker) -> Replicating / Primary Cluster Members
*/
func (ms *memberStorage) handleUpdateRequest(distTable *DistributionTable, request *DataRequest, response *interface{}) error {
var err error
var newVersion uint64
dsname := request.Args[RPStoreName].(string)
cloc := toUInt64(request.Args[RPLoc])
*response = 0
// Get the translation
transRec, ok, err := ms.at.TransClusterLoc(dsname, cloc)
if ok {
sm := ms.dataStorage(dsname, false)
if sm != nil {
// Update the local storage
if !request.Transfer {
err = sm.Update(transRec.Loc, request.Value)
newVersion = transRec.Ver + 1
} else {
newVersion = toUInt64(request.Args[RPVer])
if newVersion >= transRec.Ver {
err = sm.Update(transRec.Loc, request.Value)
} else {
// Outdated update requests are simply ignored
err = fmt.Errorf("Received outdated update request (%v - Location: %v)",
ms.ds.MemberManager.Name(), cloc)
manager.LogDebug(ms.ds.MemberManager.Name(), err.Error())
// Need to return no error so the transfer worker on the
// other side removes its entry
err = nil
}
}
if err == nil {
// Increase the version of the translation record
_, _, err = ms.at.SetTransClusterLoc(dsname, cloc, transRec.Loc, newVersion)
if err == nil {
if !request.Transfer {
// Add transfer request for replication
// At this point the operation has succeeded. We still need to
// replicate the change to all the replicating members but
// any errors happening during this shall not fail this operation.
// The next rebalancing will then synchronize all members again.
ms.at.AddTransferRequest(distTable.OtherReplicationMembers(cloc, ms.ds.MemberManager.Name()),
&DataRequest{RTUpdate, map[DataRequestArg]interface{}{
RPStoreName: dsname,
RPLoc: cloc,
RPVer: newVersion,
}, request.Value, true})
}
*response = cloc
return nil
}
}
}
}
if err == nil {
err = fmt.Errorf("Cluster slot not found (%v - Location: %v)",
ms.ds.MemberManager.Name(), cloc)
}
return err
}
/*
handleFreeRequest removes an object.
Distribution procedure:
Client -> Cluster Member Request Receiver
Cluster Member Request Receiver -> Cluster Member Primary Storage or Replicating Cluster Member
Storing Cluster Member does the free and writes into its transfer table
Storing Cluster Member (Transfer worker) -> Replicating / Primary Cluster Members
*/
func (ms *memberStorage) handleFreeRequest(distTable *DistributionTable, request *DataRequest, response *interface{}) error {
var err error
dsname := request.Args[RPStoreName].(string)
cloc := toUInt64(request.Args[RPLoc])
// Get the translation
transRec, ok, err := ms.at.TransClusterLoc(dsname, cloc)
if ok {
sm := ms.dataStorage(dsname, false)
if sm != nil {
// Remove the translation
_, _, err = ms.at.RemoveTransClusterLoc(dsname, cloc)
if err == nil {
// Remove from the local storage
err = sm.Free(transRec.Loc)
if !request.Transfer {
// Add transfer request for replication
// At this point the operation has succeeded. We still need to
// replicate the change to all the replicating members but
// any errors happening during this shall not fail this operation.
// The next rebalancing will then synchronize all members again.
ms.at.AddTransferRequest(distTable.OtherReplicationMembers(cloc, ms.ds.MemberManager.Name()),
&DataRequest{RTFree, map[DataRequestArg]interface{}{
RPStoreName: dsname,
RPLoc: cloc,
}, nil, true})
}
return err
}
}
}
if err == nil {
err = fmt.Errorf("Cluster slot not found (%v - Location: %v)", ms.ds.MemberManager.Name(), cloc)
}
return err
}
/*
handleFetchRequest inserts an object and return its cluster storage location.
*/
func (ms *memberStorage) handleFetchRequest(distTable *DistributionTable,
request *DataRequest, response *interface{}, fetch bool) error {
var err error
dsname := request.Args[RPStoreName].(string)
cloc := toUInt64(request.Args[RPLoc])
// Get the translation
transRec, ok, err := ms.at.TransClusterLoc(dsname, cloc)
if ok {
// Check if the data should be retrieved
if !fetch {
*response = true
return nil
}
sm := ms.dataStorage(dsname, false)
if sm != nil {
var res []byte
err = sm.Fetch(transRec.Loc, &res)
if err == nil {
*response = res
return nil
}
}
} else if !fetch {
*response = false
return err
}
if err == nil {
err = fmt.Errorf("Cluster slot not found (%v - Location: %v)", ms.ds.MemberManager.Name(), cloc)
}
return err
}
/*
handleRebalanceRequest processes rebalance requests.
*/
func (ms *memberStorage) handleRebalanceRequest(distTable *DistributionTable, request *DataRequest, response *interface{}) error {
var err error
var tr *translationRec
var found bool
var res interface{}
var lloc uint64
handleError := func(err error) {
if err != nil {
manager.LogDebug(ms.ds.MemberManager.Name(), fmt.Sprintf("(Store): Error during rebalancing request handling: %v", err))
}
}
// Get the location ranges for this member and locations which are replicated on this member.
storeRangeStart, storeRangeStop := distTable.MemberRange(ms.ds.MemberManager.Name())
repRangeStart, repRangeStop := distTable.ReplicationRange(ms.ds.MemberManager.Name())
// Get the request data
rsource := request.Args[RPSrc].(string)
smnames := request.Args[RPStoreName]
locs := request.Args[RPLoc]
vers := request.Args[RPVer]
for i, cloc := range locs.([]uint64) {
// Check if there was an error from the previous iteration
handleError(err)
smname := smnames.([]string)[i]
ver := vers.([]uint64)[i]
// Do not proceed if there is an error or if the location is out of
// range of responsibility
notInStoreRange := cloc < storeRangeStart || cloc > storeRangeStop
notInRepRange := cloc < repRangeStart || cloc > repRangeStop
// Check if the location exists in the local storage
tr, found, err = ms.at.TransClusterLoc(smname, cloc)
if err != nil || (notInStoreRange && notInRepRange) {
// Skip the location if there was an error or if this member
// is not relevant for the location in question (either as primary
// storage member or as replica)
continue
}
if found {
// Check if the version is newer and update the local record if it is
if tr.Ver < ver {
// Local record exists and needs to be updated
sm := ms.dataStorage(smname, false)
// Fetch the data from the remote machine
res, err = ms.ds.sendDataRequest(rsource, &DataRequest{RTFetch, map[DataRequestArg]interface{}{
RPStoreName: smname,
RPLoc: cloc,
}, nil, false})
if err == nil {
// Update the local storage
if err = sm.Update(tr.Loc, res); err == nil {
// Update the translation
_, _, err = ms.at.SetTransClusterLoc(smname, cloc, tr.Loc, ver)
manager.LogDebug(ms.ds.MemberManager.Name(),
fmt.Sprintf("(Store): Rebalance updated %v location: %v", smname, cloc))
}
}
}
} else {
// The data on the remote system should be inserted into the local
// datastore.
sm := ms.dataStorage(smname, true)
// Fetch the data from the remote machine
res, err = ms.ds.sendDataRequest(rsource, &DataRequest{RTFetch, map[DataRequestArg]interface{}{
RPStoreName: smname,
RPLoc: cloc,
}, nil, false})
if err == nil {
// Insert into the local storage
lloc, err = sm.Insert(res)
if err == nil {
// Add a translation
_, _, err = ms.at.SetTransClusterLoc(smname, cloc, lloc, ver)
manager.LogDebug(ms.ds.MemberManager.Name(),
fmt.Sprintf("(Store): Rebalance inserted %v location: %v", smname, cloc))
}
}
}
if err == nil {
// Should the sender have the data
sourceSRangeStart, sourceSRangeStop := distTable.MemberRange(rsource)
sourceRRangeStart, sourceRRangeStop := distTable.ReplicationRange(rsource)
notInSourceSRange := cloc < sourceSRangeStart || cloc > sourceSRangeStop
notInSourceRRange := cloc < sourceRRangeStart || cloc > sourceRRangeStop
if notInSourceSRange && notInSourceRRange {
manager.LogDebug(ms.ds.MemberManager.Name(),
fmt.Sprintf("(Store): Rebalance removes %v location: %v from member %v",
smname, tr.Loc, rsource))
_, err = ms.ds.sendDataRequest(rsource, &DataRequest{RTFree, map[DataRequestArg]interface{}{
RPStoreName: smname,
RPLoc: cloc,
}, nil, true})
}
}
}
handleError(err)
return nil
}
/*
dataStorage returns a storage.StorageManager which will only store byte slices.
*/
func (ms *memberStorage) dataStorage(dsname string, create bool) storage.Manager {
return ms.gs.StorageManager(LocalStoragePrefix+dsname, create)
}
/*
dump dumps the contents of a particular member storage manager as escaped strings.
(Works only for MemoryStorageManagers.)
*/
func (ms *memberStorage) dump(smname string) string {
var res string
printTransferTable := func(buf *bytes.Buffer) {
// Go through the transfer table and see if there is anything
it := hash.NewHTreeIterator(ms.at.transfer)
for it.HasNext() {
_, val := it.Next()
if val != nil {
tr := val.(*transferRec)
args, _ := json.Marshal(tr.Request.Args)
vals, ok := tr.Request.Value.([]byte)
if !ok {
vals, _ = json.Marshal(tr.Request.Value)
}
buf.WriteString(fmt.Sprintf("transfer: %v - %v %v %q\n",
tr.Members, tr.Request.RequestType, string(args), vals))
}
}
}
if smname == "" {
// Dump the contents of the MainDB if no name is given
buf := new(bytes.Buffer)
buf.WriteString(fmt.Sprintf("%v MemberStorageManager MainDB\n",
ms.ds.MemberManager.Name()))
var keys []string
for k := range ms.gs.MainDB() {
keys = append(keys, k)
}
sort.Strings(keys)
// Output local storage content with mapped cluster locations
for _, k := range keys {
v := ms.gs.MainDB()[k]
buf.WriteString(fmt.Sprintf("%v - %q\n", k, v))
}
printTransferTable(buf)
return buf.String()
}
sm := ms.dataStorage(smname, false)
if sm != nil {
// Make sure the storage manager is a MemoryStorageManager
if msm, ok := sm.(*storage.MemoryStorageManager); ok {
// Get all stored cluster locations
locmap := make(map[uint64]string)
it := hash.NewHTreeIterator(ms.at.translation)
for it.HasNext() {
k, v := it.Next()
key := string(k)
if strings.HasPrefix(key, fmt.Sprint(transPrefix, smname, "#")) {
key = string(key[len(fmt.Sprint(transPrefix, smname, "#")):])
locmap[v.(*translationRec).Loc] = fmt.Sprintf("%v (v:%v)",
key, v.(*translationRec).Ver)
}
}
buf := new(bytes.Buffer)
buf.WriteString(fmt.Sprintf("%v MemberStorageManager %v\n",
ms.ds.MemberManager.Name(), msm.Name()))
buf.WriteString("Roots: ")
// Go through root values
for i := 0; i < 10; i++ {
rootVal := msm.Root(i)
buf.WriteString(fmt.Sprintf("%v=%v ", i, rootVal))
}
buf.WriteString("\n")
var keys []uint64
for k := range msm.Data {
keys = append(keys, k)
}
sortutil.UInt64s(keys)
// Output local storage content with mapped cluster locations
for _, k := range keys {
v := msm.Data[k]
caddr := locmap[k]
buf.WriteString(fmt.Sprintf("cloc: %v - lloc: %v - %q\n",
caddr, k, v))
}
printTransferTable(buf)
res = buf.String()
}
}
return res
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package cluster
import (
"fmt"
"strconv"
"strings"
"devt.de/krotik/eliasdb/cluster/manager"
"devt.de/krotik/eliasdb/hash"
)
/*
MaxSizeRebalanceLists is the maximum size for rebalancing lists within one rebalance request.
*/
const MaxSizeRebalanceLists = 100
/*
runRebalanceWorker flag to switch off automatic rebalancing
*/
var runRebalanceWorker = true
/*
logRebalanceWorker flag to write a log message every time automatic rebalancing is running
*/
var logRebalanceWorker = false
/*
rebalanceHousekeepingInterval defines how often housekeeping needs to run before
a rebalance task is run.
*/
var rebalanceHousekeepingInterval = 180
/*
rebalanceWorker is the background thread which handles automatic rebalancing
when the configuration of the cluster changes or to autocorrect certain errors.
*/
func (ms *memberStorage) rebalanceWorker(forceRun bool) {
// Make sure only one transfer task is running at a time and that
// subsequent requests are not queued up
ms.rebalanceLock.Lock()
if !runRebalanceWorker || ms.rebalanceRunning {
ms.rebalanceLock.Unlock()
return
}
// Make sure rebalancing only runs every rebalanceHousekeepingInterval
if !forceRun && ms.rebalanceCounter > 0 {
ms.rebalanceCounter--
ms.rebalanceLock.Unlock()
return
}
ms.rebalanceCounter = rebalanceHousekeepingInterval
ms.rebalanceRunning = true
ms.rebalanceLock.Unlock()
defer func() {
ms.rebalanceLock.Lock()
ms.rebalanceRunning = false
ms.rebalanceLock.Unlock()
}()
if logRebalanceWorker {
manager.LogDebug(ms.ds.Name(), "(RB): Running rebalance worker task")
}
distTable, err := ms.ds.DistributionTable()
if err != nil {
manager.LogDebug(ms.ds.Name(), "(RB): Cannot rebalance not operational cluster: ",
err.Error())
return
}
// Go through all maintained stuff and collect storage name, location and version
it := hash.NewHTreeIterator(ms.at.translation)
for it.HasNext() {
chunks := MaxSizeRebalanceLists
maintLocs := make([]uint64, 0, MaxSizeRebalanceLists)
maintVers := make([]uint64, 0, MaxSizeRebalanceLists)
maintMgmts := make([]string, 0, MaxSizeRebalanceLists)
for it.HasNext() || chunks <= 0 {
key, val := it.Next()
if tr, ok := val.(*translationRec); ok {
smname := strings.Split(string(key[len(transPrefix):]), "#")[0]
cloc, _ := strconv.ParseUint(string(key[len(fmt.Sprint(transPrefix, smname, "#")):]), 10, 64)
maintMgmts = append(maintMgmts, smname)
maintLocs = append(maintLocs, cloc)
maintVers = append(maintVers, tr.Ver)
}
}
// Send info about maintained stuff to all relevant members
receiverMap := make(map[string]string)
for _, cloc := range maintLocs {
primary, replicas := distTable.LocationHome(cloc)
members := make([]string, 0, len(replicas)+1)
members = append(members, primary)
members = append(members, replicas...)
for _, member := range members {
_, ok := receiverMap[member]
if member == ms.ds.MemberManager.Name() || ok {
continue
}
receiverMap[member] = ""
request := &DataRequest{RTRebalance, map[DataRequestArg]interface{}{
RPStoreName: maintMgmts,
RPLoc: maintLocs,
RPVer: maintVers,
RPSrc: ms.ds.MemberManager.Name(),
}, nil, false}
_, err := ms.ds.sendDataRequest(member, request)
if err != nil {
manager.LogDebug(ms.ds.Name(), "(RB): ",
fmt.Sprintf("Member %v Error: %v", member, err))
}
}
}
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package cluster
import "encoding/gob"
func init() {
// Make sure we can use the relevant types in a gob operation
gob.Register(&DataRequest{})
gob.Register(make(map[string]string))
}
/*
RequestType is the type of a request
*/
type RequestType string
/*
List of all possible request types
*/
const (
// Main DB
RTGetMain RequestType = "GetMain"
RTSetMain = "SetMain"
// Roots
RTGetRoot = "GetRoot"
RTSetRoot = "SetRoot"
// Insert data
RTInsert = "Insert"
// Update data
RTUpdate = "Update"
// Free data
RTFree = "Free"
// Check for data
RTExists = "Exists"
// Retrieve data
RTFetch = "Fetch"
// Rebalance data
RTRebalance = "Rebalance"
)
/*
DataRequestArg is a data request argument
*/
type DataRequestArg string
/*
List of all possible data request parameters.
*/
const (
RPStoreName DataRequestArg = "StoreName" // Name of the store
RPLoc = "Loc" // Location of data
RPVer = "Ver" // Version of data
RPRoot = "Root" // Root id
RPSrc = "Src" // Request source member
)
/*
DataRequest data structure
*/
type DataRequest struct {
RequestType RequestType // Type of request
Args map[DataRequestArg]interface{} // Request arguments
Value interface{} // Request value
Transfer bool // Flag for data transfer request
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package cluster
import (
"fmt"
"devt.de/krotik/common/timeutil"
"devt.de/krotik/eliasdb/cluster/manager"
"devt.de/krotik/eliasdb/hash"
)
/*
runTransferWorker flag to switch off transfer record processing
*/
var runTransferWorker = true
/*
logTransferWorker flag to write a log message every time the transfer worker task is running
*/
var logTransferWorker = false
/*
transferWorker is the background thread which handles various tasks to provide
"eventual" consistency for the cluster storage.
*/
func (ms *memberStorage) transferWorker() {
// Make sure only one transfer task is running at a time and that
// subsequent requests are not queued up
ms.transferLock.Lock()
if !runTransferWorker || ms.transferRunning {
ms.transferLock.Unlock()
return
}
ms.transferRunning = true
ms.transferLock.Unlock()
defer func() {
ms.transferLock.Lock()
ms.transferRunning = false
ms.transferLock.Unlock()
}()
if logTransferWorker {
manager.LogDebug(ms.ds.Name(), "(TR): Running transfer worker task")
}
// Go through the transfer table and try to process the tasks
var processed [][]byte
it := hash.NewHTreeIterator(ms.at.transfer)
for it.HasNext() {
key, val := it.Next()
if val != nil {
tr := val.(*transferRec)
ts, _ := timeutil.TimestampString(string(key), "UTC")
manager.LogDebug(ms.ds.Name(), "(TR): ",
fmt.Sprintf("Processing transfer request %v for %v from %v",
tr.Request.RequestType, tr.Members, ts))
// Send the request to all members
var failedMembers []string
for _, member := range tr.Members {
if _, err := ms.ds.sendDataRequest(member, tr.Request); err != nil {
manager.LogDebug(ms.ds.Name(), "(TR): ",
fmt.Sprintf("Member %v Error: %v", member, err))
failedMembers = append(failedMembers, member)
}
}
// Update or remove the translation record
if len(failedMembers) == 0 {
processed = append(processed, key)
} else if len(failedMembers) < len(tr.Members) {
tr.Members = failedMembers
ms.at.transfer.Put(key, tr)
}
}
}
// Remove all processed transfer requests
for _, key := range processed {
ms.at.transfer.Remove(key)
}
// Flush the local storage
ms.gs.FlushAll()
// Trigger the rebalancing task - the task will only execute if it is time
go ms.rebalanceWorker(false)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package cluster
import (
"fmt"
"strconv"
"devt.de/krotik/common/errorutil"
)
/*
toUInt64 safely converts an interface{} to an uint64.
*/
func toUInt64(v interface{}) uint64 {
if vu, ok := v.(uint64); ok {
return vu
}
cloc, err := strconv.ParseInt(fmt.Sprint(v), 10, 64)
errorutil.AssertOk(err)
return uint64(cloc)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package config
import (
"fmt"
"path"
"strconv"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/fileutil"
)
// Global variables
// ================
/*
ProductVersion is the current version of EliasDB
*/
const ProductVersion = "1.4.0"
/*
DefaultConfigFile is the default config file which will be used to configure EliasDB
*/
var DefaultConfigFile = "eliasdb.config.json"
/*
Known configuration options for EliasDB
*/
const (
MemoryOnlyStorage = "MemoryOnlyStorage"
LocationDatastore = "LocationDatastore"
LocationHTTPS = "LocationHTTPS"
LocationWebFolder = "LocationWebFolder"
LocationUserDB = "LocationUserDB"
LocationAccessDB = "LocationAccessDB"
HTTPSCertificate = "HTTPSCertificate"
HTTPSKey = "HTTPSKey"
LockFile = "LockFile"
HTTPSHost = "HTTPSHost"
HTTPSPort = "HTTPSPort"
CookieMaxAgeSeconds = "CookieMaxAgeSeconds"
EnableReadOnly = "EnableReadOnly"
EnableECALScripts = "EnableECALScripts"
EnableECALDebugServer = "EnableECALDebugServer"
EnableWebFolder = "EnableWebFolder"
EnableAccessControl = "EnableAccessControl"
EnableWebTerminal = "EnableWebTerminal"
EnableCluster = "EnableCluster"
EnableClusterTerminal = "EnableClusterTerminal"
ResultCacheMaxSize = "ResultCacheMaxSize"
ResultCacheMaxAgeSeconds = "ResultCacheMaxAgeSeconds"
ClusterStateInfoFile = "ClusterStateInfoFile"
ClusterConfigFile = "ClusterConfigFile"
ClusterLogHistory = "ClusterLogHistory"
ECALScriptFolder = "ECALScriptFolder"
ECALWorkerCount = "ECALWorkerCount"
ECALEntryScript = "ECALEntryScript"
ECALLogLevel = "ECALLogLevel"
ECALLogFile = "ECALLogFile"
ECALDebugServerHost = "ECALDebugServerHost"
ECALDebugServerPort = "ECALDebugServerPort"
)
/*
DefaultConfig is the defaut configuration
*/
var DefaultConfig = map[string]interface{}{
MemoryOnlyStorage: false,
EnableReadOnly: false,
EnableECALScripts: false,
EnableECALDebugServer: false,
EnableWebFolder: true,
EnableAccessControl: false,
EnableWebTerminal: true,
EnableCluster: false,
EnableClusterTerminal: false,
LocationDatastore: "db",
LocationHTTPS: "ssl",
LocationWebFolder: "web",
LocationUserDB: "users.db",
LocationAccessDB: "access.db",
HTTPSHost: "127.0.0.1",
HTTPSPort: "9090",
CookieMaxAgeSeconds: "86400",
HTTPSCertificate: "cert.pem",
HTTPSKey: "key.pem",
LockFile: "eliasdb.lck",
ResultCacheMaxSize: 0,
ResultCacheMaxAgeSeconds: 0,
ClusterStateInfoFile: "cluster.stateinfo",
ClusterConfigFile: "cluster.config.json",
ClusterLogHistory: 100.0,
ECALScriptFolder: "scripts",
ECALWorkerCount: 10,
ECALEntryScript: "main.ecal",
ECALLogLevel: "info",
ECALLogFile: "",
ECALDebugServerHost: "127.0.0.1",
ECALDebugServerPort: "33274",
}
/*
Config is the actual config which is used
*/
var Config map[string]interface{}
/*
LoadConfigFile loads a given config file. If the config file does not exist it is
created with the default options.
*/
func LoadConfigFile(configfile string) error {
var err error
Config, err = fileutil.LoadConfig(configfile, DefaultConfig)
return err
}
/*
LoadDefaultConfig loads the default configuration.
*/
func LoadDefaultConfig() {
data := make(map[string]interface{})
for k, v := range DefaultConfig {
data[k] = v
}
Config = data
}
// Helper functions
// ================
/*
Str reads a config value as a string value.
*/
func Str(key string) string {
return fmt.Sprint(Config[key])
}
/*
Int reads a config value as an int value.
*/
func Int(key string) int64 {
ret, err := strconv.ParseInt(fmt.Sprint(Config[key]), 10, 64)
errorutil.AssertTrue(err == nil,
fmt.Sprintf("Could not parse config key %v: %v", key, err))
return ret
}
/*
Bool reads a config value as a boolean value.
*/
func Bool(key string) bool {
ret, err := strconv.ParseBool(fmt.Sprint(Config[key]))
errorutil.AssertTrue(err == nil,
fmt.Sprintf("Could not parse config key %v: %v", key, err))
return ret
}
/*
WebPath returns a path relative to the web directory.
*/
func WebPath(parts ...string) string {
return path.Join("web", path.Join(parts...))
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package console
import (
"bytes"
"fmt"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/api/ac"
)
// Command: ver
// ============
/*
CommandVer is a command name.
*/
const CommandVer = "ver"
/*
CmdVer displays descriptions of other commands.
*/
type CmdVer struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdVer) Name() string {
return CommandVer
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdVer) ShortDescription() string {
return "Displays server version information."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdVer) LongDescription() string {
return "Displays server version information."
}
/*
Run executes the command.
*/
func (c *CmdVer) Run(args []string, capi CommandConsoleAPI) error {
fmt.Fprintln(capi.Out(), fmt.Sprintf("Connected to: %v", capi.URL()))
res, err := capi.Req(api.EndpointAbout, "GET", nil)
if err == nil {
data := res.(map[string]interface{})
fmt.Fprintln(capi.Out(), fmt.Sprintf("%v %v (REST versions: %v)",
data["product"], data["version"], data["api_versions"]))
}
return err
}
// Command: whoami
// ===============
/*
CommandWhoAmI is a command name.
*/
const CommandWhoAmI = "whoami"
/*
CmdWhoAmI returns the current login status.
*/
type CmdWhoAmI struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdWhoAmI) Name() string {
return CommandWhoAmI
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdWhoAmI) ShortDescription() string {
return "Returns the current login status."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdWhoAmI) LongDescription() string {
return "Returns the current login status."
}
/*
Run executes the command.
*/
func (c *CmdWhoAmI) Run(args []string, capi CommandConsoleAPI) error {
res, err := capi.Req(ac.EndpointWhoAmI, "GET", nil)
if err == nil {
var out string
o := res.(map[string]interface{})
if o["logged_in"].(bool) {
out = fmt.Sprintf("%s", o["username"])
} else {
out = "Nobody - not logged in"
}
fmt.Fprintln(capi.Out(), out)
}
return err
}
// Command: export
// ===============
/*
CommandExport is a command name.
*/
const CommandExport = "export"
/*
CmdExport exports the data which is currently in the export buffer.
*/
type CmdExport struct {
exportFunc func([]string, *bytes.Buffer) error
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdExport) Name() string {
return CommandExport
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdExport) ShortDescription() string {
return "Exports the last output."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdExport) LongDescription() string {
return "Exports the data which is currently in the export buffer. The export " +
"buffer is filled with the previous command output in a machine readable form."
}
/*
Run executes the command.
*/
func (c *CmdExport) Run(args []string, capi CommandConsoleAPI) error {
return c.exportFunc(args, capi.ExportBuffer())
}
// Command: login
// ==============
/*
CommandLogin is a command name.
*/
const CommandLogin = "login"
/*
CmdLogin placeholder for the login command.
*/
type CmdLogin struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdLogin) Name() string {
return CommandLogin
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdLogin) ShortDescription() string {
return "Log in as a user."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdLogin) LongDescription() string {
return "Log in as a user."
}
/*
Run executes the command.
*/
func (c *CmdLogin) Run(args []string, capi CommandConsoleAPI) error {
return nil // Functionality is implemented in the command processor
}
// Command: logout
// ===============
/*
CommandLogout is a command name.
*/
const CommandLogout = "logout"
/*
CmdLogout placeholder for the logout command.
*/
type CmdLogout struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdLogout) Name() string {
return CommandLogout
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdLogout) ShortDescription() string {
return "Log out the current user."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdLogout) LongDescription() string {
return "Log out the current user."
}
/*
Run executes the command.
*/
func (c *CmdLogout) Run(args []string, capi CommandConsoleAPI) error {
return nil // Functionality is implemented in the command processor
}
// Command: help
// =============
/*
CommandHelp is a command name.
*/
const CommandHelp = "help"
/*
CmdHelp displays descriptions of other commands.
*/
type CmdHelp struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdHelp) Name() string {
return CommandHelp
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdHelp) ShortDescription() string {
return "Display descriptions for all available commands."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdHelp) LongDescription() string {
return "Display descriptions for all available commands."
}
/*
Run executes the command.
*/
func (c *CmdHelp) Run(args []string, capi CommandConsoleAPI) error {
cmds := capi.Commands()
if len(args) > 0 {
name := args[0]
for _, cmd := range cmds {
if cmd.Name() == name {
capi.ExportBuffer().WriteString(cmd.LongDescription())
fmt.Fprintln(capi.Out(), cmd.LongDescription())
return nil
}
}
return fmt.Errorf("Unknown command: %s", name)
}
var tab []string
tab = append(tab, "Command")
tab = append(tab, "Description")
for _, cmd := range cmds {
tab = append(tab, cmd.Name())
tab = append(tab, cmd.ShortDescription())
}
capi.ExportBuffer().WriteString(stringutil.PrintCSVTable(tab, 2))
fmt.Fprint(capi.Out(), stringutil.PrintStringTable(tab, 2))
return nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package console
import (
"fmt"
"net/url"
"sort"
"strings"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api/v1"
)
// Command: info
// =============
/*
CommandInfo is a command name.
*/
const CommandInfo = "info"
/*
CmdInfo returns general database information.
*/
type CmdInfo struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdInfo) Name() string {
return CommandInfo
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdInfo) ShortDescription() string {
return "Returns general database information."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdInfo) LongDescription() string {
return "Returns general database information such as known node kinds, known attributes, etc ..."
}
/*
Run executes the command.
*/
func (c *CmdInfo) Run(args []string, capi CommandConsoleAPI) error {
res, err := capi.Req(v1.EndpointInfoQuery, "GET", nil)
if err == nil {
var data = res.(map[string]interface{})
var keys, tab []string
tab = append(tab, "Kind")
tab = append(tab, "Count")
counts := data["node_counts"].(map[string]interface{})
for k := range counts {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
c := counts[k]
tab = append(tab, k)
tab = append(tab, fmt.Sprintf("%10v", c))
}
capi.ExportBuffer().WriteString(stringutil.PrintCSVTable(tab, 2))
fmt.Fprint(capi.Out(), stringutil.PrintGraphicStringTable(tab, 2, 1,
stringutil.SingleLineTable))
}
return err
}
// Command: part
// =============
/*
CommandPart is a command name.
*/
const CommandPart = "part"
/*
CmdPart displays or sets the current partition.
*/
type CmdPart struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdPart) Name() string {
return CommandPart
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdPart) ShortDescription() string {
return "Displays or sets the current partition."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdPart) LongDescription() string {
return "Displays or sets the current partition."
}
/*
Run executes the command.
*/
func (c *CmdPart) Run(args []string, capi CommandConsoleAPI) error {
if len(args) == 0 {
fmt.Fprintln(capi.Out(), capi.Partition())
} else {
capi.SetPartition(args[0])
fmt.Fprintln(capi.Out(),
fmt.Sprintf("Current partition is: %s", args[0]))
}
return nil
}
// Command: find
// =============
/*
CommandFind is a command name.
*/
const CommandFind = "find"
/*
CmdFind does a full-text search of the database.
*/
type CmdFind struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdFind) Name() string {
return CommandFind
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdFind) ShortDescription() string {
return "Do a full-text search of the database."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdFind) LongDescription() string {
return "Do a full-text search of the database."
}
/*
Run executes the command.
*/
func (c *CmdFind) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 1 {
return fmt.Errorf("Please specify a search phrase")
}
phrase := url.QueryEscape(strings.Join(args, " "))
res, err := capi.Req(fmt.Sprintf("%s?lookup=1&text=%s", v1.EndpointFindQuery, phrase), "GET", nil)
if err == nil {
partitions := res.(map[string]interface{})
for _, p := range stringutil.MapKeys(partitions) {
kinds := partitions[p].(map[string]interface{})
for _, k := range stringutil.MapKeys(kinds) {
nodes := kinds[k].([]interface{})
// Construct table header
header := []string{"Partition", p, "Kind", k}
capi.ExportBuffer().WriteString(stringutil.PrintCSVTable(header, 2))
fmt.Fprint(capi.Out(), stringutil.PrintStringTable(header, 2))
// Construct table
node := nodes[0].(map[string]interface{})
attrs := stringutil.MapKeys(node)
var tab []string
tab = append(tab, attrs...)
for _, n := range nodes {
node := n.(map[string]interface{})
for _, attr := range attrs {
tab = append(tab, fmt.Sprint(node[attr]))
}
}
capi.ExportBuffer().WriteString(stringutil.PrintCSVTable(tab, len(attrs)))
fmt.Fprint(capi.Out(), stringutil.PrintGraphicStringTable(tab, len(attrs), 1,
stringutil.SingleLineTable))
fmt.Fprintln(capi.Out(), "")
}
}
}
return nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package console
import (
"encoding/json"
"fmt"
"sort"
"strings"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api/ac"
)
// Command: users
// ==============
/*
CommandUsers is a command name.
*/
const CommandUsers = "users"
/*
CmdUsers returns a list of all users.
*/
type CmdUsers struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdUsers) Name() string {
return CommandUsers
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdUsers) ShortDescription() string {
return "Returns a list of all users."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdUsers) LongDescription() string {
return "Returns a table of all users and their groups."
}
/*
Run executes the command.
*/
func (c *CmdUsers) Run(args []string, capi CommandConsoleAPI) error {
res, err := capi.Req(ac.EndpointUser+"u/", "GET", nil)
if err == nil {
var data = res.([]interface{})
var tab []string
tab = append(tab, "Username")
tab = append(tab, "Groups")
for _, d := range data {
u := d.(map[string]interface{})
tab = append(tab, fmt.Sprint(u["username"]))
var groups []string
for _, g := range u["groups"].([]interface{}) {
groups = append(groups, fmt.Sprint(g))
}
tab = append(tab, strings.Join(groups, "/"))
}
capi.ExportBuffer().WriteString(stringutil.PrintCSVTable(tab, 2))
fmt.Fprint(capi.Out(), stringutil.PrintGraphicStringTable(tab, 2, 1,
stringutil.SingleLineTable))
}
return err
}
// Command: groups
// ===============
/*
CommandGroups is a command name.
*/
const CommandGroups = "groups"
/*
CmdGroups returns a list of all groups and their permissions.
*/
type CmdGroups struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdGroups) Name() string {
return CommandGroups
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdGroups) ShortDescription() string {
return "Returns a list of all groups and their permissions."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdGroups) LongDescription() string {
return "Returns a list of all groups and their permissions."
}
/*
Run executes the command.
*/
func (c *CmdGroups) Run(args []string, capi CommandConsoleAPI) error {
res, err := capi.Req(ac.EndpointUser+"g/", "GET", nil)
if err == nil {
var data = res.(map[string]interface{})
var groups []string
var tab []string
tab = append(tab, "Group")
tab = append(tab, "Path")
tab = append(tab, "Permissions")
for g := range data {
groups = append(groups, g)
}
sort.Strings(groups)
for _, g := range groups {
var paths []string
perms := data[g].(map[string]interface{})
for p := range perms {
paths = append(paths, p)
}
sort.Strings(paths)
if len(paths) > 0 {
for i, p := range paths {
if i == 0 {
tab = append(tab, g)
} else {
tab = append(tab, "")
}
tab = append(tab, p)
tab = append(tab, fmt.Sprint(perms[p]))
}
} else {
tab = append(tab, g)
tab = append(tab, "")
tab = append(tab, "")
}
}
capi.ExportBuffer().WriteString(stringutil.PrintCSVTable(tab, 3))
fmt.Fprint(capi.Out(), stringutil.PrintGraphicStringTable(tab, 3, 1,
stringutil.SingleLineTable))
}
return err
}
// Command: useradd
// ================
/*
CommandUseradd is a command name.
*/
const CommandUseradd = "useradd"
/*
CmdUseradd adds a user.
*/
type CmdUseradd struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdUseradd) Name() string {
return CommandUseradd
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdUseradd) ShortDescription() string {
return "Adds a user to the system."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdUseradd) LongDescription() string {
return "Adds a user to the system."
}
/*
Run executes the command.
*/
func (c *CmdUseradd) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 1 {
return fmt.Errorf("Please specify a username")
}
user := args[0]
pass := capi.AskPassword()
data, err := json.Marshal(map[string]interface{}{
"password": pass,
"user_data": map[string]interface{}{},
"group_list": []string{},
})
if err == nil {
_, err = capi.Req(ac.EndpointUser+"u/"+user, "POST", data)
if err == nil {
fmt.Fprintln(capi.Out(), fmt.Sprintf("User %s was created", user))
}
}
return err
}
// Command: newpass
// ================
/*
CommandNewpass is a command name.
*/
const CommandNewpass = "newpass"
/*
CmdNewpass changes the password of a user.
*/
type CmdNewpass struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdNewpass) Name() string {
return CommandNewpass
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdNewpass) ShortDescription() string {
return "Changes the password of a user."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdNewpass) LongDescription() string {
return "Changes the password of a user."
}
/*
Run executes the command.
*/
func (c *CmdNewpass) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 1 {
return fmt.Errorf("Please specify a username")
}
user := args[0]
pass := capi.AskPassword()
data, err := json.Marshal(map[string]interface{}{
"password": pass,
})
if err == nil {
_, err = capi.Req(ac.EndpointUser+"u/"+user, "PUT", data)
if err == nil {
fmt.Fprintln(capi.Out(), fmt.Sprintf("Password for user %s was changed", user))
}
}
return err
}
// Command: joingroup
// ==================
/*
CommandJoingroup is a command name.
*/
const CommandJoingroup = "joingroup"
/*
CmdJoingroup joins a user to a group.
*/
type CmdJoingroup struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdJoingroup) Name() string {
return CommandJoingroup
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdJoingroup) ShortDescription() string {
return "Joins a user to a group."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdJoingroup) LongDescription() string {
return "Joins a user to a group."
}
/*
Run executes the command.
*/
func (c *CmdJoingroup) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 2 {
return fmt.Errorf("Please specify a username and a group")
}
user := args[0]
group := args[1]
res, err := capi.Req(ac.EndpointUser+"u/"+user, "GET", nil)
if err == nil {
groups := res.(map[string]interface{})["groups"].([]interface{})
for _, g := range groups {
if g == group {
err = fmt.Errorf("User %s is already member of group %s", user, group)
break
}
}
if err == nil {
var data []byte
data, err = json.Marshal(map[string]interface{}{
"group_list": append(groups, group),
})
if err == nil {
_, err = capi.Req(ac.EndpointUser+"u/"+user, "PUT", data)
if err == nil {
fmt.Fprintln(capi.Out(), fmt.Sprintf("User %s has joined group %s", user, group))
}
}
}
}
return err
}
// Command: leavegroup
// ===================
/*
CommandLeavegroup is a command name.
*/
const CommandLeavegroup = "leavegroup"
/*
CmdLeavegroup removes a user from a group.
*/
type CmdLeavegroup struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdLeavegroup) Name() string {
return CommandLeavegroup
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdLeavegroup) ShortDescription() string {
return "Removes a user from a group."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdLeavegroup) LongDescription() string {
return "Removes a user from a group."
}
/*
Run executes the command.
*/
func (c *CmdLeavegroup) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 2 {
return fmt.Errorf("Please specify a username and a group")
}
user := args[0]
group := args[1]
res, err := capi.Req(ac.EndpointUser+"u/"+user, "GET", nil)
if err == nil {
var newgroups []interface{}
groups := res.(map[string]interface{})["groups"].([]interface{})
for i, g := range groups {
if g == group {
newgroups = append(groups[:i], groups[i+1:]...)
break
}
}
if newgroups != nil {
var data []byte
data, err = json.Marshal(map[string]interface{}{
"group_list": newgroups,
})
if err == nil {
_, err = capi.Req(ac.EndpointUser+"u/"+user, "PUT", data)
if err == nil {
fmt.Fprintln(capi.Out(), fmt.Sprintf("User %s has left group %s", user, group))
}
}
} else {
err = fmt.Errorf("User %s is not in group %s", user, group)
}
}
return err
}
// Command: userdel
// ================
/*
CommandUserdel is a command name.
*/
const CommandUserdel = "userdel"
/*
CmdUserdel deletes a user.
*/
type CmdUserdel struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdUserdel) Name() string {
return CommandUserdel
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdUserdel) ShortDescription() string {
return "Removes a user from the system."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdUserdel) LongDescription() string {
return "Removes a user from the system."
}
/*
Run executes the command.
*/
func (c *CmdUserdel) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 1 {
return fmt.Errorf("Please specify a username")
}
user := args[0]
_, err := capi.Req(ac.EndpointUser+"u/"+user, "DELETE", nil)
if err == nil {
fmt.Fprintln(capi.Out(), fmt.Sprintf("User %s was deleted", user))
}
return err
}
// Command: groupadd
// =================
/*
CommandGroupadd is a command name.
*/
const CommandGroupadd = "groupadd"
/*
CmdGroupadd adds a new group.
*/
type CmdGroupadd struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdGroupadd) Name() string {
return CommandGroupadd
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdGroupadd) ShortDescription() string {
return "Adds a group to the system."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdGroupadd) LongDescription() string {
return "Adds a group to the system."
}
/*
Run executes the command.
*/
func (c *CmdGroupadd) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 1 {
return fmt.Errorf("Please specify a groupname")
}
group := args[0]
_, err := capi.Req(ac.EndpointUser+"g/"+group, "POST", nil)
if err == nil {
fmt.Fprintln(capi.Out(), fmt.Sprintf("Group %s was created", group))
}
return err
}
// Command: groupadd
// =================
/*
CommandGroupdel is a command name.
*/
const CommandGroupdel = "groupdel"
/*
CmdGroupdel deletes a group.
*/
type CmdGroupdel struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdGroupdel) Name() string {
return CommandGroupdel
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdGroupdel) ShortDescription() string {
return "Removes a group from the system."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdGroupdel) LongDescription() string {
return "Removes a group from the system."
}
/*
Run executes the command.
*/
func (c *CmdGroupdel) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 1 {
return fmt.Errorf("Please specify a groupname")
}
group := args[0]
_, err := capi.Req(ac.EndpointUser+"g/"+group, "DELETE", nil)
if err == nil {
fmt.Fprintln(capi.Out(), fmt.Sprintf("Group %s was deleted", group))
}
return err
}
// Command: grantperm
// ==================
/*
CommandGrantperm is a command name.
*/
const CommandGrantperm = "grantperm"
/*
CmdGrantperm grants a new permission to a group.
*/
type CmdGrantperm struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdGrantperm) Name() string {
return CommandGrantperm
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdGrantperm) ShortDescription() string {
return "Grants a new permission to a group."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdGrantperm) LongDescription() string {
return "Grants a new permission to a group. Specify first the permission " +
"in CRUD format (Create, Read, Update or Delete), then a resource path and " +
"then a group name."
}
/*
Run executes the command.
*/
func (c *CmdGrantperm) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 3 {
return fmt.Errorf("Please specify a permission, a resource path and a groupname")
}
perm := args[0]
path := args[1]
group := args[2]
res, err := capi.Req(ac.EndpointUser+"g/"+group, "GET", nil)
if err == nil {
var data []byte
perms := res.(map[string]interface{})
// Merge in new permission
perms[path] = perm
if data, err = json.Marshal(perms); err == nil {
if _, err = capi.Req(ac.EndpointUser+"g/"+group, "PUT", data); err == nil {
fmt.Fprintln(capi.Out(), fmt.Sprintf("Permission %s on %s was granted to %s", perm, path, group))
}
}
}
return err
}
// Command: revokeperm
// ===================
/*
CommandRevokeperm is a command name.
*/
const CommandRevokeperm = "revokeperm"
/*
CmdRevokeperm revokes permissions to a resource for a group.
*/
type CmdRevokeperm struct {
}
/*
Name returns the command name (as it should be typed)
*/
func (c *CmdRevokeperm) Name() string {
return CommandRevokeperm
}
/*
ShortDescription returns a short description of the command (single line)
*/
func (c *CmdRevokeperm) ShortDescription() string {
return "Revokes permissions to a resource for a group."
}
/*
LongDescription returns an extensive description of the command (can be multiple lines)
*/
func (c *CmdRevokeperm) LongDescription() string {
return "Revokes permissions to a resource for a group."
}
/*
Run executes the command.
*/
func (c *CmdRevokeperm) Run(args []string, capi CommandConsoleAPI) error {
if len(args) < 2 {
return fmt.Errorf("Please specify a resource path and a groupname")
}
path := args[0]
group := args[1]
res, err := capi.Req(ac.EndpointUser+"g/"+group, "GET", nil)
if err == nil {
var data []byte
perms := res.(map[string]interface{})
// Merge in new permission
delete(perms, path)
if data, err = json.Marshal(perms); err == nil {
if _, err = capi.Req(ac.EndpointUser+"g/"+group, "PUT", data); err == nil {
fmt.Fprintln(capi.Out(), fmt.Sprintf("All permissions on %s were revoked for %s", path, group))
}
}
}
return err
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package console contains the console command processor for EliasDB.
*/
package console
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"sort"
"strings"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/eliasdb/api/ac"
"devt.de/krotik/eliasdb/config"
)
/*
NewConsole creates a new Console object which can parse and execute given
commands from the given Reader and outputs the result to the Writer. It
optionally exports data with the given export function via the save command.
Export is disabled if no export function is defined.
*/
func NewConsole(url string, out io.Writer, getCredentials func() (string, string),
getPassword func() string, exportFunc func([]string, *bytes.Buffer) error) CommandConsole {
cmdMap := make(map[string]Command)
cmdMap[CommandHelp] = &CmdHelp{}
cmdMap[CommandVer] = &CmdVer{}
// Adding commands specific to access control
if config.Bool(config.EnableAccessControl) {
cmdMap[CommandLogin] = &CmdLogin{}
cmdMap[CommandLogout] = &CmdLogout{}
cmdMap[CommandWhoAmI] = &CmdWhoAmI{}
cmdMap[CommandUsers] = &CmdUsers{}
cmdMap[CommandGroups] = &CmdGroups{}
cmdMap[CommandUseradd] = &CmdUseradd{}
cmdMap[CommandGroupadd] = &CmdGroupadd{}
cmdMap[CommandUserdel] = &CmdUserdel{}
cmdMap[CommandGroupdel] = &CmdGroupdel{}
cmdMap[CommandNewpass] = &CmdNewpass{}
cmdMap[CommandJoingroup] = &CmdJoingroup{}
cmdMap[CommandLeavegroup] = &CmdLeavegroup{}
cmdMap[CommandGrantperm] = &CmdGrantperm{}
cmdMap[CommandRevokeperm] = &CmdRevokeperm{}
}
cmdMap[CommandInfo] = &CmdInfo{}
cmdMap[CommandPart] = &CmdPart{}
cmdMap[CommandFind] = &CmdFind{}
// Add export if we got an export function
if exportFunc != nil {
cmdMap[CommandExport] = &CmdExport{exportFunc}
}
c := &EliasDBConsole{url, "main", out, bytes.NewBuffer(nil), nil,
nil, false, cmdMap, getCredentials, getPassword}
c.childConsoles = []CommandConsole{&EQLConsole{c}, &GraphQLConsole{c}}
return c
}
/*
CommandConsole is the main interface for command processors.
*/
type CommandConsole interface {
/*
Run executes one or more commands. It returns an error if the command
had an unexpected result and a flag if the command was handled.
*/
Run(cmd string) (bool, error)
/*
Commands returns a sorted list of all available commands.
*/
Commands() []Command
}
/*
CommandConsoleAPI is the console interface which commands can use to send communicate to the server.
*/
type CommandConsoleAPI interface {
CommandConsole
/*
Authenticate authenticates the user if necessary.
*/
Authenticate(force bool)
/*
URL returns the current connection URL.
*/
URL() string
/*
Partition returns the current partition.
*/
Partition() string
/*
Sets the current partition.
*/
SetPartition(string)
/*
AskPassword asks the user for a password.
*/
AskPassword() string
/*
Req is a convenience function to send common requests.
*/
Req(endpoint string, method string, content []byte) (interface{}, error)
/*
SendRequest sends a request to the connected server. The calling code of the
function can specify the contentType (e.g. application/json), the method
(e.g. GET), the content (for POST, PUT and DELETE requests) and a request
modifier function which can be used to modify the request object before the
request to the server is being made.
*/
SendRequest(endpoint string, contentType string, method string,
content []byte, reqMod func(*http.Request)) (string, *http.Response, error)
/*
Out returns a writer which can be used to write to the console.
*/
Out() io.Writer
/*
ExportBuffer returns a buffer which can be used to write exportable data.
*/
ExportBuffer() *bytes.Buffer
}
/*
CommError is a communication error from the ConsoleAPI.
*/
type CommError struct {
err error // Nice error message
Resp *http.Response // Error response from the REST API
}
/*
Error returns a textual representation of this error.
*/
func (c *CommError) Error() string {
return c.err.Error()
}
/*
Command describes an available command.
*/
type Command interface {
/*
Name returns the command name (as it should be typed).
*/
Name() string
/*
ShortDescription returns a short description of the command (single line).
*/
ShortDescription() string
/*
LongDescription returns an extensive description of the command (can be multiple lines).
*/
LongDescription() string
/*
Run executes the command.
*/
Run(args []string, capi CommandConsoleAPI) error
}
// EliasDB Console
// ===============
/*
EliasDBConsole implements the basic console functionality like login and version.
*/
type EliasDBConsole struct {
url string // Current server url (e.g. http://localhost:9090)
part string // Current partition
out io.Writer // Output for this console
export *bytes.Buffer // Export buffer
childConsoles []CommandConsole // List of child consoles
authCookie *http.Cookie // User token
credsAsked bool // Flag if the credentials have been asked
CommandMap map[string]Command // Map of registered commands
GetCredentials func() (string, string) // Ask the user for credentials
GetPassword func() string // Ask the user for a password
}
/*
URL returns the current connected server URL.
*/
func (c *EliasDBConsole) URL() string {
return c.url
}
/*
Out returns a writer which can be used to write to the console.
*/
func (c *EliasDBConsole) Out() io.Writer {
return c.out
}
/*
Partition returns the current partition.
*/
func (c *EliasDBConsole) Partition() string {
return c.part
}
/*
SetPartition sets the current partition.
*/
func (c *EliasDBConsole) SetPartition(part string) {
c.part = part
}
/*
AskPassword asks the user for a password.
*/
func (c *EliasDBConsole) AskPassword() string {
return c.GetPassword()
}
/*
ExportBuffer returns a buffer which can be used to write exportable data.
*/
func (c *EliasDBConsole) ExportBuffer() *bytes.Buffer {
return c.export
}
/*
Run executes one or more commands. It returns an error if the command
had an unexpected result and a flag if the command was handled.
*/
func (c *EliasDBConsole) Run(cmd string) (bool, error) {
// First split a line with multiple commands
cmds := strings.Split(cmd, ";")
for _, cmd := range cmds {
// Run the command and return if there is an error
if ok, err := c.RunCommand(cmd); err != nil {
// Return if there was an unexpected error
return false, err
} else if !ok {
// Try child consoles
for _, c := range c.childConsoles {
if ok, err := c.Run(cmd); err != nil || ok {
return ok, err
}
}
return false, fmt.Errorf("Unknown command")
}
}
// Everything was handled
return true, nil
}
/*
RunCommand executes a single command. It returns an error for unexpected results and
a flag if the command was handled.
*/
func (c *EliasDBConsole) RunCommand(cmdString string) (bool, error) {
cmdSplit := strings.Fields(cmdString)
if len(cmdSplit) > 0 {
cmd := cmdSplit[0]
args := cmdSplit[1:]
// Reset the export buffer if we are not exporting
if cmd != CommandExport {
c.export.Reset()
}
if config.Bool(config.EnableAccessControl) {
// Extra commands when access control is enabled
if cmd == "logout" {
// Special command "logout" to remove the current auth token
c.authCookie = nil
fmt.Fprintln(c.out, "Current user logged out.")
} else if cmd != "ver" && cmd != "whoami" && cmd != "help" &&
cmd != "?" && cmd != "export" {
// Do not authenticate if running local commands
// Authenticate user this is a NOP if the user is authenticated unless
// the command "login" is given. Then the user is reauthenticated.
c.Authenticate(cmd == "login")
}
}
if cmdObj, ok := c.CommandMap[cmd]; ok {
return true, cmdObj.Run(args, c)
} else if cmd == "?" {
return true, c.CommandMap["help"].Run(args, c)
}
}
return false, nil
}
/*
Commands returns a sorted list of all available commands.
*/
func (c *EliasDBConsole) Commands() []Command {
var res []Command
for _, c := range c.CommandMap {
res = append(res, c)
}
sort.Slice(res, func(i, j int) bool {
return res[i].Name() < res[j].Name()
})
return res
}
/*
Authenticate authenticates the user if necessary.
*/
func (c *EliasDBConsole) Authenticate(force bool) {
// Only do the authentication if we haven't asked yet or it is
// explicitly desired
if !c.credsAsked || force {
c.credsAsked = false
for !c.credsAsked {
// Ask for credentials
user, pass := c.GetCredentials()
if user == "" {
// User doesn't want to authenticate - do nothing
fmt.Fprintln(c.out, "Skipping authentication")
c.credsAsked = true
return
}
content, err := json.Marshal(map[string]interface{}{
"user": user,
"pass": pass,
})
errorutil.AssertOk(err) // Json marshall should never fail
res, resp, err := c.SendRequest(ac.EndpointLogin, "application/json", "POST", content, nil)
if err == nil {
if resp.StatusCode == http.StatusOK && len(resp.Cookies()) > 0 {
fmt.Fprintln(c.out, "Login as user", user)
c.authCookie = resp.Cookies()[0]
c.credsAsked = true
return
}
}
fmt.Fprintln(c.out, fmt.Sprintf("Login failed for user %s: %s (error=%v)", user, res, err))
}
}
}
/*
Req is a convenience function to send common requests.
*/
func (c *EliasDBConsole) Req(endpoint string, method string, content []byte) (interface{}, error) {
var res interface{}
bodyStr, resp, err := c.SendRequest(endpoint, "application/json", method, content,
func(r *http.Request) {})
if err == nil {
// Try json decoding
if jerr := json.Unmarshal([]byte(bodyStr), &res); jerr != nil {
res = bodyStr
// Check if we got an error back
if resp.StatusCode != http.StatusOK {
return nil, &CommError{
fmt.Errorf("%s request to %s failed: %s", method, endpoint, bodyStr),
resp,
}
}
}
}
return res, err
}
/*
SendRequest sends a request to the connected server. The calling code of the
function can specify the contentType (e.g. application/json), the method
(e.g. GET), the content (for POST, PUT and DELETE requests) and a request
modifier function which can be used to modify the request object before the
request to the server is being made.
*/
func (c *EliasDBConsole) SendRequest(endpoint string, contentType string, method string,
content []byte, reqMod func(*http.Request)) (string, *http.Response, error) {
var bodyStr string
var req *http.Request
var resp *http.Response
var err error
if content != nil {
req, err = http.NewRequest(method, c.url+endpoint, bytes.NewBuffer(content))
} else {
req, err = http.NewRequest(method, c.url+endpoint, nil)
}
if err == nil {
req.Header.Set("Content-Type", contentType)
// Set auth cookie
if c.authCookie != nil {
req.AddCookie(c.authCookie)
}
if reqMod != nil {
reqMod(req)
}
// Console client does not verify the SSL keys
tlsConfig := &tls.Config{
InsecureSkipVerify: true,
}
transport := &http.Transport{TLSClientConfig: tlsConfig}
client := &http.Client{
Transport: transport,
}
resp, err = client.Do(req)
if err == nil {
defer resp.Body.Close()
body, _ := ioutil.ReadAll(resp.Body)
bodyStr = strings.Trim(string(body), " \n")
}
}
// Just return the body
return bodyStr, resp, err
}
// Util functions
// ==============
/*
cmdStartsWithKeyword checks if a given command line starts with a given list
of keywords.
*/
func cmdStartsWithKeyword(cmd string, keywords []string) bool {
ss := strings.Fields(strings.ToLower(cmd))
if len(ss) > 0 {
firstCmd := strings.ToLower(ss[0])
for _, k := range keywords {
if k == firstCmd || strings.HasPrefix(firstCmd, k) {
return true
}
}
}
return false
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package console
import (
"fmt"
"net/url"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/api/v1"
)
// EQL Console
// ===========
/*
EQLConsole runs EQL queries.
*/
type EQLConsole struct {
parent CommandConsoleAPI // Parent console API
}
/*
eqlConsoleKeywords are all keywords which this console can process.
*/
var eqlConsoleKeywords = []string{"part", "get", "lookup"}
/*
Run executes one or more commands. It returns an error if the command
had an unexpected result and a flag if the command was handled.
*/
func (c *EQLConsole) Run(cmd string) (bool, error) {
if !cmdStartsWithKeyword(cmd, eqlConsoleKeywords) {
return false, nil
}
// Escape query so it can be used in a request
q := url.QueryEscape(cmd)
resObj, err := c.parent.Req(
fmt.Sprintf("%s%s?q=%s", v1.EndpointQuery, c.parent.Partition(), q), "GET", nil)
if err == nil && resObj != nil {
res := resObj.(map[string]interface{})
var out []string
header := res["header"].(map[string]interface{})
labels := header["labels"].([]interface{})
data := header["data"].([]interface{})
rows := res["rows"].([]interface{})
for _, l := range labels {
out = append(out, fmt.Sprint(l))
}
for _, d := range data {
out = append(out, fmt.Sprint(d))
}
for _, r := range rows {
for _, c := range r.([]interface{}) {
out = append(out, fmt.Sprint(c))
}
}
c.parent.ExportBuffer().WriteString(stringutil.PrintCSVTable(out, len(labels)))
fmt.Fprint(c.parent.Out(), stringutil.PrintGraphicStringTable(out, len(labels), 2, stringutil.SingleLineTable))
}
return true, err
}
/*
Commands returns an empty list. The command line is interpreted as an EQL query.
*/
func (c *EQLConsole) Commands() []Command {
return nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package console
import (
"encoding/json"
"fmt"
"devt.de/krotik/common/errorutil"
v1 "devt.de/krotik/eliasdb/api/v1"
)
// GraphQL Console
// ===============
/*
GraphQLConsole runs GraphQL queries.
*/
type GraphQLConsole struct {
parent CommandConsoleAPI // Parent console API
}
/*
graphQLConsoleKeywords are all keywords which this console can process.
*/
var graphQLConsoleKeywords = []string{"{", "query", "mutation"}
/*
Run executes one or more commands. It returns an error if the command
had an unexpected result and a flag if the command was handled.
*/
func (c *GraphQLConsole) Run(cmd string) (bool, error) {
if !cmdStartsWithKeyword(cmd, graphQLConsoleKeywords) {
return false, nil
}
q, err := json.Marshal(map[string]interface{}{
"operationName": nil,
"variables": nil,
"query": cmd,
})
errorutil.AssertOk(err)
resObj, err := c.parent.Req(
fmt.Sprintf("%s%s", v1.EndpointGraphQL, c.parent.Partition()), "POST", q)
if err == nil && resObj != nil {
actualResultBytes, _ := json.MarshalIndent(resObj, "", " ")
out := string(actualResultBytes)
c.parent.ExportBuffer().WriteString(out)
fmt.Fprint(c.parent.Out(), out)
}
return true, err
}
/*
Commands returns an empty list. The command line is interpreted as a GraphQL query.
*/
func (c *GraphQLConsole) Commands() []Command {
return nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package dbfunc contains EliasDB specific functions for the event condition action language (ECAL).
*/
package dbfunc
import (
"fmt"
"devt.de/krotik/ecal/parser"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
/*
StoreEdgeFunc inserts or updates an edge in EliasDB.
*/
type StoreEdgeFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *StoreEdgeFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
if arglen := len(args); arglen != 2 && arglen != 3 {
err = fmt.Errorf("Function requires 2 or 3 parameters: partition, edge" +
" map and optionally a transaction")
}
if err == nil {
var trans graph.Trans
part := fmt.Sprint(args[0])
nodeMap, ok := args[1].(map[interface{}]interface{})
// Check parameters
if !ok {
err = fmt.Errorf("Second parameter must be a map")
}
if err == nil && len(args) > 2 {
if trans, ok = args[2].(graph.Trans); !ok {
err = fmt.Errorf("Third parameter must be a transaction")
}
}
// Build up node to store
edge := data.NewGraphEdgeFromNode(NewGraphNodeFromECALMap(nodeMap))
// Store the edge
if err == nil {
if trans != nil {
err = trans.StoreEdge(part, edge)
} else {
err = f.GM.StoreEdge(part, edge)
}
}
}
return nil, err
}
/*
DocString returns a descriptive string.
*/
func (f *StoreEdgeFunc) DocString() (string, error) {
return "Inserts or updates an edge in EliasDB.", nil
}
/*
RemoveEdgeFunc removes an edge in EliasDB.
*/
type RemoveEdgeFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *RemoveEdgeFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
if arglen := len(args); arglen != 3 && arglen != 4 {
err = fmt.Errorf("Function requires 3 or 4 parameters: partition, edge key," +
" edge kind and optionally a transaction")
}
if err == nil {
var trans graph.Trans
part := fmt.Sprint(args[0])
key := fmt.Sprint(args[1])
kind := fmt.Sprint(args[2])
// Check parameters
if len(args) > 3 {
var ok bool
if trans, ok = args[3].(graph.Trans); !ok {
err = fmt.Errorf("Fourth parameter must be a transaction")
}
}
// Remove the edge
if err == nil {
if trans != nil {
err = trans.RemoveEdge(part, key, kind)
} else {
_, err = f.GM.RemoveEdge(part, key, kind)
}
}
}
return nil, err
}
/*
DocString returns a descriptive string.
*/
func (f *RemoveEdgeFunc) DocString() (string, error) {
return "Removes an edge in EliasDB.", nil
}
/*
FetchEdgeFunc fetches an edge in EliasDB.
*/
type FetchEdgeFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *FetchEdgeFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var res interface{}
var err error
if arglen := len(args); arglen != 3 {
err = fmt.Errorf("Function requires 3 parameters: partition, edge key and" +
" edge kind")
}
if err == nil {
var node data.Node
part := fmt.Sprint(args[0])
key := fmt.Sprint(args[1])
kind := fmt.Sprint(args[2])
conv := func(m map[string]interface{}) map[interface{}]interface{} {
c := make(map[interface{}]interface{})
for k, v := range m {
c[k] = v
}
return c
}
// Fetch the node
if node, err = f.GM.FetchEdge(part, key, kind); node != nil {
res = conv(node.Data())
}
}
return res, err
}
/*
DocString returns a descriptive string.
*/
func (f *FetchEdgeFunc) DocString() (string, error) {
return "Fetches an edge in EliasDB.", nil
}
/*
TraverseFunc traverses an edge in EliasDB.
*/
type TraverseFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *TraverseFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var res interface{}
var err error
if arglen := len(args); arglen != 4 {
err = fmt.Errorf("Function requires 4 parameters: partition, node key," +
" node kind and a traversal spec")
}
if err == nil {
var nodes []data.Node
var edges []data.Edge
part := fmt.Sprint(args[0])
key := fmt.Sprint(args[1])
kind := fmt.Sprint(args[2])
spec := fmt.Sprint(args[3])
conv := func(m map[string]interface{}) map[interface{}]interface{} {
c := make(map[interface{}]interface{})
for k, v := range m {
c[k] = v
}
return c
}
// Do the traversal
if nodes, edges, err = f.GM.TraverseMulti(part, key, kind, spec, true); err == nil {
resNodes := make([]interface{}, len(nodes))
for i, n := range nodes {
resNodes[i] = conv(n.Data())
}
resEdges := make([]interface{}, len(edges))
for i, e := range edges {
resEdges[i] = conv(e.Data())
}
res = []interface{}{resNodes, resEdges}
}
}
return res, err
}
/*
DocString returns a descriptive string.
*/
func (f *TraverseFunc) DocString() (string, error) {
return "Traverses an edge in EliasDB from a given node.", nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package dbfunc
import (
"fmt"
"devt.de/krotik/ecal/parser"
"devt.de/krotik/eliasdb/eql"
"devt.de/krotik/eliasdb/graph"
)
/*
QueryFunc runs an EQL query.
*/
type QueryFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *QueryFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
var cols, rows []interface{}
if arglen := len(args); arglen != 2 {
err = fmt.Errorf("Function requires 2 parameters: partition and a query string")
}
if err == nil {
var res eql.SearchResult
part := fmt.Sprint(args[0])
query := fmt.Sprint(args[1])
res, err = eql.RunQuery("db.query", part, query, f.GM)
if err != nil {
return nil, err
}
// Convert result to rumble data structure
labels := res.Header().Labels()
cols = make([]interface{}, len(labels))
for i, v := range labels {
cols[i] = v
}
rrows := res.Rows()
rows = make([]interface{}, len(rrows))
for i, v := range rrows {
rows[i] = v
}
}
return map[interface{}]interface{}{
"cols": cols,
"rows": rows,
}, err
}
/*
DocString returns a descriptive string.
*/
func (f *QueryFunc) DocString() (string, error) {
return "Run an EQL query.", nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package dbfunc
import (
"fmt"
"devt.de/krotik/ecal/parser"
"devt.de/krotik/ecal/scope"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graphql"
)
/*
GraphQLFunc runs a GraphQL query.
*/
type GraphQLFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *GraphQLFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
var ret interface{}
if arglen := len(args); arglen < 2 {
err = fmt.Errorf("Function requires at least 2 parameters: partition and query with optionally a map of variables and an operation name")
}
if err == nil {
var res, varMap map[string]interface{}
part := fmt.Sprint(args[0])
query := fmt.Sprint(args[1])
opname := ""
if err == nil && len(args) > 2 {
varECALMap, ok := args[2].(map[interface{}]interface{})
if !ok {
err = fmt.Errorf("Third parameter must be a map")
} else {
varMap = make(map[string]interface{})
for k, v := range varECALMap {
varMap[fmt.Sprint(k)] = v
}
}
}
if err == nil && len(args) > 3 {
opname = fmt.Sprint(args[3])
}
if err == nil {
res, err = graphql.RunQuery("db.query", part, map[string]interface{}{
"operationName": opname,
"query": query,
"variables": varMap,
}, f.GM, nil, false)
if err == nil {
ret = scope.ConvertJSONToECALObject(res)
}
}
}
return ret, err
}
/*
DocString returns a descriptive string.
*/
func (f *GraphQLFunc) DocString() (string, error) {
return "Run a GraphQL query.", nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package dbfunc
import (
"fmt"
"devt.de/krotik/ecal/parser"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
/*
StoreNodeFunc inserts a node in EliasDB.
*/
type StoreNodeFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *StoreNodeFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
if arglen := len(args); arglen != 2 && arglen != 3 {
err = fmt.Errorf("Function requires 2 or 3 parameters: partition, node" +
" map and optionally a transaction")
}
if err == nil {
var trans graph.Trans
part := fmt.Sprint(args[0])
nodeMap, ok := args[1].(map[interface{}]interface{})
// Check parameters
if !ok {
err = fmt.Errorf("Second parameter must be a map")
}
if err == nil && len(args) > 2 {
if trans, ok = args[2].(graph.Trans); !ok {
err = fmt.Errorf("Third parameter must be a transaction")
}
}
// Store the node
if err == nil {
node := NewGraphNodeFromECALMap(nodeMap)
if trans != nil {
err = trans.StoreNode(part, node)
} else {
err = f.GM.StoreNode(part, node)
}
}
}
return nil, err
}
/*
DocString returns a descriptive string.
*/
func (f *StoreNodeFunc) DocString() (string, error) {
return "Inserts a node in EliasDB.", nil
}
/*
UpdateNodeFunc updates a node in EliasDB (only update the given values of the node).
*/
type UpdateNodeFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *UpdateNodeFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
if arglen := len(args); arglen != 2 && arglen != 3 {
err = fmt.Errorf("Function requires 2 or 3 parameters: partition, node" +
" map and optionally a transaction")
}
if err == nil {
var trans graph.Trans
part := fmt.Sprint(args[0])
nodeMap, ok := args[1].(map[interface{}]interface{})
// Check parameters
if !ok {
err = fmt.Errorf("Second parameter must be a map")
}
if err == nil && len(args) > 2 {
if trans, ok = args[2].(graph.Trans); !ok {
err = fmt.Errorf("Third parameter must be a transaction")
}
}
// Store the node
if err == nil {
node := NewGraphNodeFromECALMap(nodeMap)
if trans != nil {
err = trans.UpdateNode(part, node)
} else {
err = f.GM.UpdateNode(part, node)
}
}
}
return nil, err
}
/*
DocString returns a descriptive string.
*/
func (f *UpdateNodeFunc) DocString() (string, error) {
return "Updates a node in EliasDB (only update the given values of the node).", nil
}
/*
RemoveNodeFunc removes a node in EliasDB.
*/
type RemoveNodeFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *RemoveNodeFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
if arglen := len(args); arglen != 3 && arglen != 4 {
err = fmt.Errorf("Function requires 3 or 4 parameters: partition, node key" +
" node kind and optionally a transaction")
}
if err == nil {
var trans graph.Trans
part := fmt.Sprint(args[0])
key := fmt.Sprint(args[1])
kind := fmt.Sprint(args[2])
// Check parameters
if len(args) > 3 {
var ok bool
if trans, ok = args[3].(graph.Trans); !ok {
err = fmt.Errorf("Fourth parameter must be a transaction")
}
}
// Remove the node
if err == nil {
if trans != nil {
err = trans.RemoveNode(part, key, kind)
} else {
_, err = f.GM.RemoveNode(part, key, kind)
}
}
}
return nil, err
}
/*
DocString returns a descriptive string.
*/
func (f *RemoveNodeFunc) DocString() (string, error) {
return "Removes a node in EliasDB.", nil
}
/*
FetchNodeFunc fetches a node in EliasDB.
*/
type FetchNodeFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *FetchNodeFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var res interface{}
var err error
if arglen := len(args); arglen != 3 {
err = fmt.Errorf("Function requires 3 parameters: partition, node key" +
" node kind")
}
if err == nil {
var node data.Node
part := fmt.Sprint(args[0])
key := fmt.Sprint(args[1])
kind := fmt.Sprint(args[2])
conv := func(m map[string]interface{}) map[interface{}]interface{} {
c := make(map[interface{}]interface{})
for k, v := range m {
c[k] = v
}
return c
}
// Fetch the node
if node, err = f.GM.FetchNode(part, key, kind); node != nil {
res = conv(node.Data())
}
}
return res, err
}
/*
DocString returns a descriptive string.
*/
func (f *FetchNodeFunc) DocString() (string, error) {
return "Fetches a node in EliasDB.", nil
}
// Helper functions
// ================
/*
NewGraphNodeFromECALMap creates a new Node instance from a given map.
*/
func NewGraphNodeFromECALMap(d map[interface{}]interface{}) data.Node {
node := data.NewGraphNode()
for k, v := range d {
node.SetAttr(fmt.Sprint(k), v)
}
return node
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package dbfunc
import (
"fmt"
"strconv"
"devt.de/krotik/ecal/parser"
"devt.de/krotik/eliasdb/graph"
)
/*
NewTransFunc creates a new transaction for EliasDB.
*/
type NewTransFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *NewTransFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
if len(args) != 0 {
err = fmt.Errorf("Function does not require any parameters")
}
return graph.NewConcurrentGraphTrans(f.GM), err
}
/*
DocString returns a descriptive string.
*/
func (f *NewTransFunc) DocString() (string, error) {
return "Creates a new transaction for EliasDB.", nil
}
/*
NewRollingTransFunc creates a new rolling transaction for EliasDB.
A rolling transaction commits after n entries.
*/
type NewRollingTransFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *NewRollingTransFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
var trans graph.Trans
if arglen := len(args); arglen != 1 {
err = fmt.Errorf(
"Function requires the rolling threshold (number of operations before rolling)")
}
if err == nil {
var i int
if i, err = strconv.Atoi(fmt.Sprint(args[0])); err != nil {
err = fmt.Errorf("Rolling threshold must be a number not: %v", args[0])
} else {
trans = graph.NewRollingTrans(graph.NewConcurrentGraphTrans(f.GM),
i, f.GM, graph.NewConcurrentGraphTrans)
}
}
return trans, err
}
/*
DocString returns a descriptive string.
*/
func (f *NewRollingTransFunc) DocString() (string, error) {
return "Creates a new rolling transaction for EliasDB. A rolling transaction commits after n entries.", nil
}
/*
CommitTransFunc commits an existing transaction for EliasDB.
*/
type CommitTransFunc struct {
GM *graph.Manager
}
/*
Run executes the ECAL function.
*/
func (f *CommitTransFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
var err error
if arglen := len(args); arglen != 1 {
err = fmt.Errorf(
"Function requires the transaction to commit as parameter")
}
if err == nil {
trans, ok := args[0].(graph.Trans)
// Check parameters
if !ok {
err = fmt.Errorf("Parameter must be a transaction")
} else {
err = trans.Commit()
}
}
return nil, err
}
/*
DocString returns a descriptive string.
*/
func (f *CommitTransFunc) DocString() (string, error) {
return "Commits an existing transaction for EliasDB.", nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package dbfunc
import (
"fmt"
"devt.de/krotik/ecal/interpreter"
"devt.de/krotik/ecal/parser"
"devt.de/krotik/ecal/util"
"devt.de/krotik/eliasdb/graph"
)
/*
RaiseGraphEventHandledFunc returns the special graph.ErrEventHandled error which a sink,
handling graph events, can return to notify the GraphManager that no further
action is necessary.
*/
type RaiseGraphEventHandledFunc struct {
}
/*
Run executes the ECAL function.
*/
func (f *RaiseGraphEventHandledFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
return nil, graph.ErrEventHandled
}
/*
DocString returns a descriptive string.
*/
func (f *RaiseGraphEventHandledFunc) DocString() (string, error) {
return "When handling a graph event, notify the GraphManager of EliasDB that no further action is necessary.", nil
}
/*
ErrWebEventHandled is a special error to signal that a web request was handled.
*/
var ErrWebEventHandled = fmt.Errorf("Web event handled")
/*
RaiseWebEventHandledFunc returns a special error which a sink can return to notify
the web API that a web request was handled.
*/
type RaiseWebEventHandledFunc struct {
}
/*
Run executes the ECAL function.
*/
func (f *RaiseWebEventHandledFunc) Run(instanceID string, vs parser.Scope, is map[string]interface{}, tid uint64, args []interface{}) (interface{}, error) {
if arglen := len(args); arglen != 1 {
return nil, fmt.Errorf("Function requires 1 parameter: request response object")
}
res := args[0]
resMap, ok := res.(map[interface{}]interface{})
if !ok {
return nil, fmt.Errorf("Request response object should be a map")
}
if _, ok := resMap["status"]; !ok {
resMap["status"] = 200
}
if _, ok := resMap["headers"]; !ok {
resMap["header"] = map[interface{}]interface{}{
"Content-Type": "application/json; charset=utf-8",
"X-Content-Type-Options": "nosniff",
}
}
if _, ok := resMap["body"]; !ok {
resMap["body"] = map[interface{}]interface{}{}
}
erp := is["erp"].(*interpreter.ECALRuntimeProvider)
node := is["astnode"].(*parser.ASTNode)
return nil, &util.RuntimeErrorWithDetail{
RuntimeError: erp.NewRuntimeError(ErrWebEventHandled, "", node).(*util.RuntimeError),
Environment: vs,
Data: res,
}
}
/*
DocString returns a descriptive string.
*/
func (f *RaiseWebEventHandledFunc) DocString() (string, error) {
return "When handling a web event, notify the web API of EliasDB that the web request was handled.", nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package ecal contains the main API for the event condition action language (ECAL).
*/
package ecal
import (
"fmt"
"strings"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/ecal/engine"
"devt.de/krotik/ecal/scope"
"devt.de/krotik/ecal/util"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
/*
EventMapping is a mapping between EliasDB event types to EliasDB specific event kinds in ECAL.
*/
var EventMapping = map[int]string{
/*
EventNodeCreated is thrown when a node was created.
Parameters: partition of created node, created node
*/
graph.EventNodeCreated: "db.node.created",
/*
EventNodeUpdated is thrown when a node was updated.
Parameters: partition of updated node, updated node, old node
*/
graph.EventNodeUpdated: "db.node.updated",
/*
EventNodeDeleted is thrown when a node was deleted.
Parameters: partition of deleted node, deleted node
*/
graph.EventNodeDeleted: "db.node.deleted",
/*
EventEdgeCreated is thrown when an edge was created.
Parameters: partition of created edge, created edge
*/
graph.EventEdgeCreated: "db.edge.created",
/*
EventEdgeUpdated is thrown when an edge was updated.
Parameters: partition of updated edge, updated edge, old edge
*/
graph.EventEdgeUpdated: "db.edge.updated",
/*
EventEdgeDeleted is thrown when an edge was deleted.
Parameters: partition of deleted edge, deleted edge
*/
graph.EventEdgeDeleted: "db.edge.deleted",
/*
EventNodeStore is thrown before a node is stored (always overwriting existing values).
Parameters: partition of node to store, node to store
*/
graph.EventNodeStore: "db.node.store",
/*
EventNodeUpdate is thrown before a node is updated.
Parameters: partition of node to update, node to update
*/
graph.EventNodeUpdate: "db.node.update",
/*
EventNodeDelete is thrown before a node is deleted.
Parameters: partition of node to delete, key of node to delete, kind of node to delete
*/
graph.EventNodeDelete: "db.node.delete",
/*
EventEdgeStore is thrown before an edge is stored (always overwriting existing values).
Parameters: partition of stored edge, stored edge
*/
graph.EventEdgeStore: "db.edge.store",
/*
EventEdgeDelete is thrown before an edge is deleted.
Parameters: partition of deleted edge, deleted edge
*/
graph.EventEdgeDelete: "db.edge.delete",
}
/*
EventBridge is a rule for a graph manager to forward all graph events to ECAL.
*/
type EventBridge struct {
Processor engine.Processor
Logger util.Logger
}
/*
Name returns the name of the rule.
*/
func (eb *EventBridge) Name() string {
return "ecal.eventbridge"
}
/*
Handles returns a list of events which are handled by this rule.
*/
func (eb *EventBridge) Handles() []int {
return []int{
graph.EventNodeCreated,
graph.EventNodeUpdated,
graph.EventNodeDeleted,
graph.EventEdgeCreated,
graph.EventEdgeUpdated,
graph.EventEdgeDeleted,
graph.EventNodeStore,
graph.EventNodeUpdate,
graph.EventNodeDelete,
graph.EventEdgeStore,
graph.EventEdgeDelete,
}
}
/*
Handle handles an event.
*/
func (eb *EventBridge) Handle(gm *graph.Manager, trans graph.Trans, event int, ed ...interface{}) error {
var err error
if name, ok := EventMapping[event]; ok {
eventName := fmt.Sprintf("EliasDB: %v", name)
eventKind := strings.Split(name, ".")
// Construct an event which can be used to check if any rule will trigger.
// This is to avoid the relative costly state construction below for events
// which would not trigger any rules.
triggerCheckEvent := engine.NewEvent(eventName, eventKind, nil)
if !eb.Processor.IsTriggering(triggerCheckEvent) {
return nil
}
// Build up state
state := map[interface{}]interface{}{
"part": fmt.Sprint(ed[0]),
"trans": trans,
}
// Include the right arguments into the state
switch event {
case graph.EventNodeCreated, graph.EventNodeUpdate, graph.EventNodeDeleted, graph.EventNodeStore:
state["node"] = scope.ConvertJSONToECALObject(ed[1].(data.Node).Data())
case graph.EventNodeUpdated:
state["node"] = scope.ConvertJSONToECALObject(ed[1].(data.Node).Data())
state["old_node"] = scope.ConvertJSONToECALObject(ed[2].(data.Node).Data())
case graph.EventEdgeCreated, graph.EventEdgeDeleted, graph.EventEdgeStore:
state["edge"] = scope.ConvertJSONToECALObject(ed[1].(data.Edge).Data())
case graph.EventEdgeUpdated:
state["edge"] = scope.ConvertJSONToECALObject(ed[1].(data.Edge).Data())
state["old_edge"] = scope.ConvertJSONToECALObject(ed[2].(data.Edge).Data())
case graph.EventNodeDelete, graph.EventEdgeDelete:
state["key"] = fmt.Sprint(ed[1])
state["kind"] = fmt.Sprint(ed[2])
}
// Try to inject the event
event := engine.NewEvent(fmt.Sprintf("EliasDB: %v", name), strings.Split(name, "."), state)
var m engine.Monitor
m, err = eb.Processor.AddEventAndWait(event, nil)
if err == nil {
// If there was no direct error adding the event then check if an error was
// raised in a sink
if errs := m.(*engine.RootMonitor).AllErrors(); len(errs) > 0 {
var errList []error
for _, e := range errs {
addError := true
for _, se := range e.ErrorMap {
// Check if the sink returned a special graph.ErrEventHandled error
if re, ok := se.(*util.RuntimeErrorWithDetail); ok && re.Detail == graph.ErrEventHandled.Error() {
addError = false
}
}
if addError {
errList = append(errList, e)
}
}
if len(errList) > 0 {
err = &errorutil.CompositeError{Errors: errList}
} else {
err = graph.ErrEventHandled
}
}
}
if err != nil {
eb.Logger.LogDebug(fmt.Sprintf("EliasDB event %v was handled by ECAL and returned: %v", name, err))
}
}
return err
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package ecal
import (
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"devt.de/krotik/common/datautil"
"devt.de/krotik/common/fileutil"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/ecal/cli/tool"
ecalconfig "devt.de/krotik/ecal/config"
"devt.de/krotik/ecal/engine"
"devt.de/krotik/ecal/scope"
"devt.de/krotik/ecal/stdlib"
"devt.de/krotik/ecal/util"
"devt.de/krotik/eliasdb/config"
"devt.de/krotik/eliasdb/ecal/dbfunc"
"devt.de/krotik/eliasdb/graph"
)
/*
ScriptingInterpreter models a ECAL script interpreter instance.
*/
type ScriptingInterpreter struct {
GM *graph.Manager // GraphManager for the interpreter
Interpreter *tool.CLIInterpreter // ECAL Interpreter object
Dir string // Root dir for interpreter
EntryFile string // Entry file for the program
LogLevel string // Log level string (Debug, Info, Error)
LogFile string // Logfile (blank for stdout)
RunDebugServer bool // Run a debug server
DebugServerHost string // Debug server host
DebugServerPort string // Debug server port
WebsocketConnections *datautil.MapCache
}
/*
NewScriptingInterpreter returns a new ECAL scripting interpreter.
*/
func NewScriptingInterpreter(scriptFolder string, gm *graph.Manager) *ScriptingInterpreter {
return &ScriptingInterpreter{
GM: gm,
Dir: scriptFolder,
EntryFile: filepath.Join(scriptFolder, config.Str(config.ECALEntryScript)),
LogLevel: config.Str(config.ECALLogLevel),
LogFile: config.Str(config.ECALLogFile),
RunDebugServer: config.Bool(config.EnableECALDebugServer),
DebugServerHost: config.Str(config.ECALDebugServerHost),
DebugServerPort: config.Str(config.ECALDebugServerPort),
WebsocketConnections: datautil.NewMapCache(5000, 0),
}
}
/*
dummyEntryFile is a small valid ECAL which does not do anything. It is used
as the default entry file if no entry file exists.
*/
const dummyEntryFile = `0 # Write your ECAL code here
`
/*
Run runs the ECAL scripting interpreter.
After this function completes:
- EntryScript in config and all related scripts in the interpreter root dir have been executed
- ECAL Interpreter object is fully initialized
- A debug server might be running which can reload the entry script
- ECAL's event processor has been started
- GraphManager events are being forwarded to ECAL
*/
func (si *ScriptingInterpreter) Run() error {
var err error
// Ensure we have a dummy entry point
if ok, _ := fileutil.PathExists(si.EntryFile); !ok {
err = ioutil.WriteFile(si.EntryFile, []byte(dummyEntryFile), 0600)
}
if err == nil {
i := tool.NewCLIInterpreter()
si.Interpreter = i
// Set worker count in ecal config
ecalconfig.Config[ecalconfig.WorkerCount] = config.Config[config.ECALWorkerCount]
i.Dir = &si.Dir
i.LogFile = &si.LogFile
i.LogLevel = &si.LogLevel
i.EntryFile = si.EntryFile
i.LoadPlugins = true
i.CreateRuntimeProvider("eliasdb-runtime")
// Adding functions
AddEliasDBStdlibFunctions(si.GM)
// Adding rules
sockRule := &engine.Rule{
Name: "EliasDB-websocket-communication-rule", // Name
Desc: "Handles a websocket communication", // Description
KindMatch: []string{"db.web.sock.msg"}, // Kind match
ScopeMatch: []string{},
StateMatch: nil,
Priority: 0,
SuppressionList: nil,
Action: si.HandleECALSockEvent,
}
si.Interpreter.CustomRules = append(si.Interpreter.CustomRules, sockRule)
if err == nil {
if si.RunDebugServer {
di := tool.NewCLIDebugInterpreter(i)
addr := fmt.Sprintf("%v:%v", si.DebugServerHost, si.DebugServerPort)
di.DebugServerAddr = &addr
di.RunDebugServer = &si.RunDebugServer
falseFlag := false
di.EchoDebugServer = &falseFlag
di.Interactive = &falseFlag
di.BreakOnStart = &falseFlag
di.BreakOnError = &falseFlag
err = di.Interpret()
} else {
err = i.Interpret(false)
}
// EliasDB graph events are now forwarded to ECAL via the eventbridge.
si.GM.SetGraphRule(&EventBridge{
Processor: i.RuntimeProvider.Processor,
Logger: i.RuntimeProvider.Logger,
})
}
}
// Include a traceback if possible
if ss, ok := err.(util.TraceableRuntimeError); ok {
err = fmt.Errorf("%v\n %v", err.Error(), strings.Join(ss.GetTraceString(), "\n "))
}
return err
}
/*
RegisterECALSock registers a websocket which should be connected to ECAL events.
*/
func (si *ScriptingInterpreter) RegisterECALSock(conn *WebsocketConnection) {
si.WebsocketConnections.Put(conn.CommID, conn)
}
/*
DeregisterECALSock removes a registered websocket.
*/
func (si *ScriptingInterpreter) DeregisterECALSock(conn *WebsocketConnection) {
si.WebsocketConnections.Remove(conn.CommID)
}
/*
HandleECALSockEvent handles websocket events from the ECAL interpreter (db.web.sock.msg events).
*/
func (si *ScriptingInterpreter) HandleECALSockEvent(p engine.Processor, m engine.Monitor, e *engine.Event, tid uint64) error {
state := e.State()
payload := scope.ConvertECALToJSONObject(state["payload"])
shouldClose := stringutil.IsTrueValue(fmt.Sprint(state["close"]))
id := "null"
if commID, ok := state["commID"]; ok {
id = fmt.Sprint(commID)
}
err := fmt.Errorf("Could not send data to unknown websocket - commID: %v", id)
if conn, ok := si.WebsocketConnections.Get(id); ok {
err = nil
wconn := conn.(*WebsocketConnection)
wconn.WriteData(map[string]interface{}{
"commID": id,
"payload": payload,
"close": shouldClose,
})
if shouldClose {
wconn.Close("")
}
}
return err
}
/*
AddEliasDBStdlibFunctions adds EliasDB related ECAL stdlib functions.
*/
func AddEliasDBStdlibFunctions(gm *graph.Manager) {
stdlib.AddStdlibPkg("db", "EliasDB related functions")
stdlib.AddStdlibFunc("db", "storeNode", &dbfunc.StoreNodeFunc{GM: gm})
stdlib.AddStdlibFunc("db", "updateNode", &dbfunc.UpdateNodeFunc{GM: gm})
stdlib.AddStdlibFunc("db", "removeNode", &dbfunc.RemoveNodeFunc{GM: gm})
stdlib.AddStdlibFunc("db", "fetchNode", &dbfunc.FetchNodeFunc{GM: gm})
stdlib.AddStdlibFunc("db", "storeEdge", &dbfunc.StoreEdgeFunc{GM: gm})
stdlib.AddStdlibFunc("db", "removeEdge", &dbfunc.RemoveEdgeFunc{GM: gm})
stdlib.AddStdlibFunc("db", "fetchEdge", &dbfunc.FetchEdgeFunc{GM: gm})
stdlib.AddStdlibFunc("db", "traverse", &dbfunc.TraverseFunc{GM: gm})
stdlib.AddStdlibFunc("db", "newTrans", &dbfunc.NewTransFunc{GM: gm})
stdlib.AddStdlibFunc("db", "newRollingTrans", &dbfunc.NewRollingTransFunc{GM: gm})
stdlib.AddStdlibFunc("db", "commit", &dbfunc.CommitTransFunc{GM: gm})
stdlib.AddStdlibFunc("db", "query", &dbfunc.QueryFunc{GM: gm})
stdlib.AddStdlibFunc("db", "graphQL", &dbfunc.GraphQLFunc{GM: gm})
stdlib.AddStdlibFunc("db", "raiseGraphEventHandled", &dbfunc.RaiseGraphEventHandledFunc{})
stdlib.AddStdlibFunc("db", "raiseWebEventHandled", &dbfunc.RaiseWebEventHandledFunc{})
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package ecal
import (
"encoding/json"
"sync"
"time"
"github.com/gorilla/websocket"
)
/*
WebsocketConnection models a single websocket connection.
Websocket connections support one concurrent reader and one concurrent writer.
See: https://godoc.org/github.com/gorilla/websocket#hdr-Concurrency
*/
type WebsocketConnection struct {
CommID string
Conn *websocket.Conn
RMutex *sync.Mutex
WMutex *sync.Mutex
}
/*
NewWebsocketConnection creates a new WebsocketConnection object.
*/
func NewWebsocketConnection(commID string, c *websocket.Conn) *WebsocketConnection {
return &WebsocketConnection{
CommID: commID,
Conn: c,
RMutex: &sync.Mutex{},
WMutex: &sync.Mutex{}}
}
/*
Init initializes the websocket connection.
*/
func (wc *WebsocketConnection) Init() {
wc.WMutex.Lock()
defer wc.WMutex.Unlock()
wc.Conn.WriteMessage(websocket.TextMessage, []byte(`{"type":"init_success","payload":{}}`))
}
/*
ReadData reads data from the websocket connection.
*/
func (wc *WebsocketConnection) ReadData() (map[string]interface{}, bool, error) {
var data map[string]interface{}
var fatal = true
wc.RMutex.Lock()
_, msg, err := wc.Conn.ReadMessage()
wc.RMutex.Unlock()
if err == nil {
fatal = false
err = json.Unmarshal(msg, &data)
}
return data, fatal, err
}
/*
WriteData writes data to the websocket.
*/
func (wc *WebsocketConnection) WriteData(data map[string]interface{}) {
wc.WMutex.Lock()
defer wc.WMutex.Unlock()
jsonData, _ := json.Marshal(map[string]interface{}{
"commID": wc.CommID,
"type": "data",
"payload": data,
})
wc.Conn.WriteMessage(websocket.TextMessage, jsonData)
}
/*
Close closes the websocket connection.
*/
func (wc *WebsocketConnection) Close(msg string) {
wc.Conn.WriteControl(websocket.CloseMessage,
websocket.FormatCloseMessage(
websocket.CloseNormalClosure, msg), time.Now().Add(10*time.Second))
wc.Conn.Close()
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package interpreter contains the EQL interpreter.
*/
package interpreter
import (
"errors"
"fmt"
"strconv"
"strings"
"time"
"devt.de/krotik/common/datautil"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/eliasdb/eql/parser"
"devt.de/krotik/eliasdb/graph/data"
)
// Where related functions
// =======================
/*
FuncWhere represents a where related function.
*/
type FuncWhere func(astNode *parser.ASTNode, rtp *eqlRuntimeProvider,
node data.Node, edge data.Edge) (interface{}, error)
/*
Runtime map for where related functions
*/
var whereFunc = map[string]FuncWhere{
"count": whereCount,
"parseDate": whereParseDate,
}
/*
whereCount counts reachable nodes via a given traversal.
*/
func whereCount(astNode *parser.ASTNode, rtp *eqlRuntimeProvider,
node data.Node, edge data.Edge) (interface{}, error) {
// Check parameters
np := len(astNode.Children)
if np != 2 && np != 3 {
return nil, rtp.newRuntimeError(ErrInvalidConstruct,
"Count function requires 1 or 2 parameters: traversal spec, condition clause", astNode)
}
spec := astNode.Children[1].Token.Val
// Only need to retrieve full node values if there is a where clause
nodes, _, err := rtp.gm.TraverseMulti(rtp.part, node.Key(), node.Kind(), spec, np == 3)
if np == 3 {
var filteredNodes []data.Node
// If a where clause was given parse it and evaluate it
conditionString := astNode.Children[2].Token.Val
ast, err := parser.ParseWithRuntime("count condition", "get _ where "+conditionString, &GetRuntimeProvider{rtp})
if err != nil {
return nil, rtp.newRuntimeError(ErrInvalidConstruct,
fmt.Sprintf("Invalid condition clause in count function: %s", err), astNode)
}
cond := ast.Children[1] // This should always pick out just the where clause
errorutil.AssertOk(cond.Runtime.Validate()) // Validation should alwasys succeed
for _, n := range nodes {
res, err := cond.Children[0].Runtime.(CondRuntime).CondEval(n, nil)
if err != nil {
return nil, rtp.newRuntimeError(ErrInvalidConstruct,
fmt.Sprintf("Invalid condition clause in count function: %s", err), astNode)
} else if b, ok := res.(bool); ok {
if b {
filteredNodes = append(filteredNodes, n)
}
} else {
return nil, rtp.newRuntimeError(ErrInvalidConstruct,
"Could not evaluate condition clause in count function", astNode)
}
}
nodes = filteredNodes
}
return len(nodes), err
}
/*
whereParseDate converts a date string into a unix time value.
*/
func whereParseDate(astNode *parser.ASTNode, rtp *eqlRuntimeProvider,
node data.Node, edge data.Edge) (interface{}, error) {
var datestr interface{}
var t time.Time
var ret int64
var err error
// Define default layout
layout := time.RFC3339
// Check parameters
if len(astNode.Children) < 2 {
return nil, rtp.newRuntimeError(ErrInvalidConstruct,
"parseDate function requires 1 parameter: date string", astNode)
}
if len(astNode.Children) > 2 {
datestr, err = astNode.Children[2].Runtime.(CondRuntime).CondEval(node, edge)
layout = fmt.Sprint(datestr)
}
if err == nil {
// Convert the date string
datestr, err = astNode.Children[1].Runtime.(CondRuntime).CondEval(node, edge)
if err == nil {
t, err = time.Parse(layout, fmt.Sprint(datestr))
if err == nil {
ret = t.Unix()
}
}
}
return ret, err
}
// Show related functions
// ======================
/*
Runtime map for show related functions
*/
var showFunc = map[string]FuncShowInst{
"count": showCountInst,
"objget": showObjgetInst,
}
/*
FuncShow is the interface definition for show related functions
*/
type FuncShow interface {
/*
name returns the name of the function.
*/
name() string
/*
eval runs the function. Returns the result and a source for the result.
The source should be a concrete node/edge key and kind or a query and
should be returned in either of the following formats:
n:<key>:<kind> for a node
e:<key>:<kind> for an edge
q:<query> for a query
*/
eval(node data.Node, edge data.Edge) (interface{}, string, error)
}
/*
FuncShowInst creates a function object. Returns which column data should be queried and
how the colummn should be named.
*/
type FuncShowInst func(astNode *parser.ASTNode, rtp *eqlRuntimeProvider) (FuncShow, string, string, error)
// Show Count
// ----------
/*
showCountInst creates a new showCount object.
*/
func showCountInst(astNode *parser.ASTNode, rtp *eqlRuntimeProvider) (FuncShow, string, string, error) {
var cond *parser.ASTNode
// Check parameters
np := len(astNode.Children)
if np != 3 && np != 4 {
return nil, "", "", errors.New("Count function requires 2 or 3 parameters: traversal step, traversal spec, condition clause")
}
pos := astNode.Children[1].Token.Val
spec := astNode.Children[2].Token.Val
if np == 4 {
// If a condition clause was given parse it
condString := astNode.Children[3].Token.Val
ast, err := parser.ParseWithRuntime("count condition", "get _ where "+condString, &GetRuntimeProvider{rtp})
if err != nil {
return nil, "", "", fmt.Errorf("Invalid condition clause in count function: %s", err)
}
cond = ast.Children[1] // This should always pick out just the condition clause
errorutil.AssertOk(cond.Runtime.Validate()) // Validation should alwasys succeed
}
return &showCount{rtp, astNode, spec, cond}, pos + ":n:key", "Count", nil
}
/*
showCount is the number of reachable nodes via a given traversal spec.
*/
type showCount struct {
rtp *eqlRuntimeProvider
astNode *parser.ASTNode
spec string
condition *parser.ASTNode
}
/*
name returns the name of the function.
*/
func (sc *showCount) name() string {
return "count"
}
/*
eval counts reachable nodes via a given traversal.
*/
func (sc *showCount) eval(node data.Node, edge data.Edge) (interface{}, string, error) {
condString := ""
// Only need to retrieve full node values if there is a where clause
nodes, _, err := sc.rtp.gm.TraverseMulti(sc.rtp.part, node.Key(), node.Kind(), sc.spec, sc.condition != nil)
if err != nil {
return nil, "", err
}
if sc.condition != nil {
var filteredNodes []data.Node
// If there is a condition clause filter the result
condString, _ = parser.PrettyPrint(sc.condition)
for _, n := range nodes {
res, err := sc.condition.Children[0].Runtime.(CondRuntime).CondEval(n, nil)
if err != nil {
return nil, "", err
} else if b, ok := res.(bool); ok {
if b {
filteredNodes = append(filteredNodes, n)
}
} else {
return nil, "", sc.rtp.newRuntimeError(ErrInvalidConstruct,
"Could not evaluate condition clause in count function", sc.astNode)
}
}
nodes = filteredNodes
}
srcQuery := fmt.Sprintf("q:lookup %s %s traverse %s %s end show 2:n:%s, 2:n:%s, 2:n:%s",
node.Kind(), strconv.Quote(node.Key()), sc.spec, condString, data.NodeKey, data.NodeKind, data.NodeName)
return len(nodes), srcQuery, nil
}
// Show Objget
// -----------
/*
showObjgetInst creates a new showObjget object.
*/
func showObjgetInst(astNode *parser.ASTNode, rtp *eqlRuntimeProvider) (FuncShow, string, string, error) {
// Check parameters
if len(astNode.Children) != 4 {
return nil, "", "",
fmt.Errorf("Objget function requires 3 parameters: traversal step, attribute name, path to value")
}
pos := astNode.Children[1].Token.Val
attr := astNode.Children[2].Token.Val
path := astNode.Children[3].Token.Val
return &showObjget{rtp, attr, strings.Split(path, ".")}, pos + ":n:" + attr,
rtp.ni.AttributeDisplayString("", attr) + "." + path, nil
}
/*
showObjget reaches into an object and extracts a value.
*/
type showObjget struct {
rtp *eqlRuntimeProvider
attr string
path []string
}
/*
name returns the name of the function.
*/
func (so *showObjget) name() string {
return "objget"
}
/*
eval reaches into an object and extracts a value.
*/
func (so *showObjget) eval(node data.Node, edge data.Edge) (interface{}, string, error) {
val := node.Attr(so.attr)
if valMap, ok := val.(map[string]interface{}); ok {
val, _ = datautil.GetNestedValue(valMap, so.path)
}
return val, "n:" + node.Kind() + ":" + node.Key(), nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"devt.de/krotik/eliasdb/eql/parser"
"devt.de/krotik/eliasdb/graph"
)
// Runtime provider for GET queries
// ================================
/*
Instance function for GET query components
*/
type getInst func(*GetRuntimeProvider, *parser.ASTNode) parser.Runtime
/*
Runtime map for GET query specific components
*/
var getProviderMap = map[string]getInst{
parser.NodeGET: getRuntimeInst,
}
/*
GetRuntimeProvider data structure
*/
type GetRuntimeProvider struct {
*eqlRuntimeProvider
}
/*
NewGetRuntimeProvider creates a new GetRuntimeProvider object. This provider
can interpret GET queries.
*/
func NewGetRuntimeProvider(name string, part string, gm *graph.Manager, ni NodeInfo) *GetRuntimeProvider {
return &GetRuntimeProvider{&eqlRuntimeProvider{name, part, gm, ni, "", false, nil, "",
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}}
}
/*
Runtime returns a runtime component for a given ASTNode.
*/
func (rtp *GetRuntimeProvider) Runtime(node *parser.ASTNode) parser.Runtime {
if pinst, ok := generalProviderMap[node.Name]; ok {
return pinst(rtp.eqlRuntimeProvider, node)
} else if pinst, ok := getProviderMap[node.Name]; ok {
return pinst(rtp, node)
}
return invalidRuntimeInst(rtp.eqlRuntimeProvider, node)
}
// GET Runtime
// ===========
type getRuntime struct {
rtp *GetRuntimeProvider
node *parser.ASTNode
}
func getRuntimeInst(rtp *GetRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &getRuntime{rtp, node}
}
/*
Validate and reset this runtime component and all its child components.
*/
func (rt *getRuntime) Validate() error {
// First child is always the first node kind to query
// (validation of this value was done during lexing)
startKind := rt.node.Children[0].Token.Val
initErr := rt.rtp.init(startKind, rt.node.Children[1:])
if rt.rtp.groupScope == "" {
// Start keys can be provided by a simple node key iterator
startKeyIterator, err := rt.rtp.gm.NodeKeyIterator(rt.rtp.part, startKind)
if err != nil {
return err
} else if startKeyIterator == nil {
return rt.rtp.newRuntimeError(ErrUnknownNodeKind, startKind, rt.node.Children[0])
}
rt.rtp.nextStartKey = func() (string, error) {
nextKey := startKeyIterator.Next()
if startKeyIterator.LastError != nil {
return "", startKeyIterator.LastError
}
return nextKey, nil
}
} else {
// Try to lookup group node
nodes, _, err := rt.rtp.gm.TraverseMulti(rt.rtp.part, rt.rtp.groupScope,
GroupNodeKind, ":::"+startKind, false)
if err != nil {
return err
}
nodePtr := len(nodes)
// Iterate over all traversed nodes
rt.rtp.nextStartKey = func() (string, error) {
nodePtr--
if nodePtr >= 0 {
return nodes[nodePtr].Key(), nil
}
return "", nil
}
}
return initErr
}
/*
Eval evaluate this runtime component.
*/
func (rt *getRuntime) Eval() (interface{}, error) {
// First validate the query and reset the runtime provider datastructures
if rt.rtp.specs == nil || !allowMultiEval {
if err := rt.Validate(); err != nil {
return nil, err
}
}
return rt.gaterResult(rt.node)
}
func (rt *getRuntime) gaterResult(topNode *parser.ASTNode) (interface{}, error) {
// Generate query
query, err := parser.PrettyPrint(topNode)
// Create result object
res := newSearchResult(rt.rtp.eqlRuntimeProvider, query)
if err == nil {
var more bool
// Go through all rows
more, err = rt.rtp.next()
for more && err == nil {
// Add row to the result
if err := res.addRow(rt.rtp.rowNode, rt.rtp.rowEdge); err != nil {
return nil, err
}
// More on to the next row
more, err = rt.rtp.next()
}
// Finish the result
res.finish()
}
return res, err
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"devt.de/krotik/common/datautil"
"devt.de/krotik/eliasdb/eql/parser"
"devt.de/krotik/eliasdb/graph/data"
)
// Not Implemented Runtime
// =======================
/*
Special runtime for not implemented constructs.
*/
type invalidRuntime struct {
rtp *eqlRuntimeProvider
node *parser.ASTNode
}
/*
invalidRuntimeInst returns a new runtime component instance.
*/
func invalidRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &invalidRuntime{rtp, node}
}
/*
Validate this node and all its child nodes.
*/
func (rt *invalidRuntime) Validate() error {
return rt.rtp.newRuntimeError(ErrInvalidConstruct, rt.node.Name, rt.node)
}
/*
Eval evaluate this runtime component.
*/
func (rt *invalidRuntime) Eval() (interface{}, error) {
return nil, rt.rtp.newRuntimeError(ErrInvalidConstruct, rt.node.Name, rt.node)
}
/*
Evaluate the value as a condition component.
*/
func (rt *invalidRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return nil, rt.rtp.newRuntimeError(ErrInvalidConstruct, rt.node.Name, rt.node)
}
// Value Runtime
// =============
/*
Runtime for values
*/
type valueRuntime struct {
rtp *eqlRuntimeProvider
node *parser.ASTNode
isNodeAttrValue bool
isEdgeAttrValue bool
nestedValuePath []string
condVal string
}
/*
valueRuntimeInst returns a new runtime component instance.
*/
func valueRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &valueRuntime{rtp, node, false, false, nil, ""}
}
/*
Validate this node and all its child nodes.
*/
func (rt *valueRuntime) Validate() error {
return nil
}
/*
Eval evaluate this runtime component.
*/
func (rt *valueRuntime) Eval() (interface{}, error) {
return rt.node.Token.Val, nil
}
/*
Evaluate the value as a condition component.
*/
func (rt *valueRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
// Check known constants
if rt.node.Token.ID == parser.TokenAT {
// Try to lookup a function
funcName := rt.node.Children[0].Token.Val
funcInst, ok := whereFunc[funcName]
if !ok {
return nil, rt.rtp.newRuntimeError(ErrInvalidConstruct,
"Unknown function: "+funcName, rt.node)
}
// Execute the function and return its result value
return funcInst(rt.node, rt.rtp, node, edge)
} else if rt.node.Token.ID == parser.TokenTRUE {
return true, nil
} else if rt.node.Token.ID == parser.TokenFALSE {
return false, nil
} else if rt.node.Token.ID == parser.TokenNULL {
return nil, nil
} else if rt.node.Name == parser.NodeLIST {
// Collect items of a list
var list []interface{}
for _, item := range rt.node.Children {
val, _ := item.Runtime.(CondRuntime).CondEval(node, edge)
list = append(list, val)
}
return list, nil
}
// Check if this is describing a node or edge value
var valRet interface{}
if rt.isNodeAttrValue {
// Check for nested values
if rt.nestedValuePath != nil {
if valMap, ok := node.Attr(rt.nestedValuePath[0]).(map[string]interface{}); ok {
valRet, _ = datautil.GetNestedValue(valMap, rt.nestedValuePath[1:])
}
} else {
valRet = node.Attr(rt.condVal)
}
return valRet, nil
} else if rt.isEdgeAttrValue {
if edge == nil {
return nil, rt.rtp.newRuntimeError(ErrInvalidWhere,
"No edge data available at this level", rt.node)
}
return edge.Attr(rt.condVal), nil
}
// Must be a constant value
return rt.condVal, nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"devt.de/krotik/eliasdb/eql/parser"
"devt.de/krotik/eliasdb/graph"
)
// Runtime provider for LOOKUP queries
// ===================================
/*
Instance function for LOOKUP query components
*/
type lookupInst func(*LookupRuntimeProvider, *parser.ASTNode) parser.Runtime
/*
Runtime map for LOOKUP query specific components
*/
var lookupProviderMap = map[string]lookupInst{
parser.NodeLOOKUP: lookupRuntimeInst,
}
/*
LookupRuntimeProvider data structure
*/
type LookupRuntimeProvider struct {
*eqlRuntimeProvider
}
/*
NewLookupRuntimeProvider creates a new LookupRuntimeProvider object. This provider
can interpret LOOKUP queries.
*/
func NewLookupRuntimeProvider(name string, part string, gm *graph.Manager, ni NodeInfo) *LookupRuntimeProvider {
return &LookupRuntimeProvider{&eqlRuntimeProvider{name, part, gm, ni, "", false, nil, "",
nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}}
}
/*
Runtime returns a runtime component for a given ASTNode.
*/
func (rtp *LookupRuntimeProvider) Runtime(node *parser.ASTNode) parser.Runtime {
if pinst, ok := generalProviderMap[node.Name]; ok {
return pinst(rtp.eqlRuntimeProvider, node)
} else if pinst, ok := lookupProviderMap[node.Name]; ok {
return pinst(rtp, node)
}
return invalidRuntimeInst(rtp.eqlRuntimeProvider, node)
}
// LOOKUP Runtime
// ==============
type lookupRuntime struct {
*getRuntime
rtp *LookupRuntimeProvider
node *parser.ASTNode
}
func lookupRuntimeInst(rtp *LookupRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &lookupRuntime{&getRuntime{&GetRuntimeProvider{rtp.eqlRuntimeProvider}, node}, rtp, node}
}
/*
Validate and reset this runtime component and all its child components.
*/
func (rt *lookupRuntime) Validate() error {
// First child is always the first node kind to query
// (validation of this value was done during lexing)
startKind := rt.node.Children[0].Token.Val
// Check how many keys were given
var keys []string
// Assume initially that only keys where given
initIndex := len(rt.node.Children) - 1
for i, child := range rt.node.Children[1:] {
if child.Token.ID != parser.TokenVALUE {
// We have a first non-id child
initIndex = i
break
} else {
// Collect all given keys
keys = append(keys, child.Token.Val)
}
}
// Initialise the runtime provider
initErr := rt.rtp.init(startKind, rt.node.Children[initIndex+1:])
if rt.rtp.groupScope == "" {
nodePtr := len(keys)
if nodePtr > 0 {
// Iterate over all traversed nodes
rt.rtp.nextStartKey = func() (string, error) {
nodePtr--
if nodePtr >= 0 {
return keys[nodePtr], nil
}
return "", nil
}
}
} else {
// Build a map of keys
keyMap := make(map[string]string)
for _, key := range keys {
keyMap[key] = ""
}
// Try to lookup group node
nodes, _, err := rt.rtp.gm.TraverseMulti(rt.rtp.part, rt.rtp.groupScope,
GroupNodeKind, ":::"+startKind, false)
if err != nil {
return err
}
nodePtr := len(nodes)
// Iterate over all traversed nodes
rt.rtp.nextStartKey = func() (string, error) {
nodePtr--
if nodePtr >= 0 {
nodeKey := nodes[nodePtr].Key()
if _, ok := keyMap[nodeKey]; ok {
return nodeKey, nil
}
return rt.rtp.nextStartKey()
}
return "", nil
}
}
return initErr
}
/*
Eval evaluate this runtime component.
*/
func (rt *lookupRuntime) Eval() (interface{}, error) {
if err := rt.Validate(); err != nil {
return nil, err
}
return rt.getRuntime.gaterResult(rt.node)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"sort"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
/*
NodeInfo interface. NodeInfo objects are used by the EQL interpreter to format
search results.
*/
type NodeInfo interface {
/*
SummaryAttributes returns the attributes which should be shown
in a list view for a given node kind.
*/
SummaryAttributes(kind string) []string
/*
Return the display string for a given attribute.
*/
AttributeDisplayString(kind string, attr string) string
/*
Check if a given string can be a valid node attribute.
*/
IsValidAttr(attr string) bool
}
/*
defaultNodeInfo data structure
*/
type defaultNodeInfo struct {
gm *graph.Manager
}
/*
NewDefaultNodeInfo creates a new default NodeInfo instance. The default NodeInfo
provides the most generic rendering information to the interpreter.
*/
func NewDefaultNodeInfo(gm *graph.Manager) NodeInfo {
return &defaultNodeInfo{gm}
}
/*
SummaryAttributes returns the attributes which should be shown
in a list view for a given node kind.
*/
func (ni *defaultNodeInfo) SummaryAttributes(kind string) []string {
if kind == "" {
return []string{data.NodeKey, data.NodeKind, data.NodeName}
}
attrs := ni.gm.NodeAttrs(kind)
ret := make([]string, 0, len(attrs))
for _, attr := range attrs {
if attr == data.NodeKey || attr == data.NodeKind {
continue
}
ret = append(ret, attr)
}
sort.StringSlice(ret).Sort()
// Prepend the key attribute
ret = append([]string{data.NodeKey}, ret...)
return ret
}
/*
Return the display string for a given attribute.
*/
func (ni *defaultNodeInfo) AttributeDisplayString(kind string, attr string) string {
if (attr == data.NodeKey || attr == data.NodeKind || attr == data.NodeName) && kind != "" {
return stringutil.CreateDisplayString(kind) + " " +
stringutil.CreateDisplayString(attr)
}
return stringutil.CreateDisplayString(attr)
}
/*
Check if a given string can be a valid node attribute.
*/
func (ni *defaultNodeInfo) IsValidAttr(attr string) bool {
return ni.gm.IsValidAttr(attr)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"fmt"
"strconv"
"strings"
"devt.de/krotik/eliasdb/eql/parser"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
/*
allowMultiEval allows multiple calls to eval of runtime components without
resetting state (used for testing)
*/
var allowMultiEval = false
// Special flags which can be set by with statements
type withFlags struct {
ordering []byte // Result ordering
orderingCol []int // Columns which should be ordered
notnullCol []int // Columns which must not be null
uniqueCol []int // Columns which will only contain unique values
uniqueColCnt []bool // Flag if unique values should be counted
}
const (
withOrderingAscending = 0x1
withOrderingDescending = 0x2
)
/*
GroupNodeKind is a special group node kind
*/
const GroupNodeKind = "group"
// General runtime provider
// ========================
/*
eqlRuntimeProvider defines the main interpreter
datastructure and all functions for general evaluation.
*/
type eqlRuntimeProvider struct {
name string // Name to identify the input
part string // Graph partition to query
gm *graph.Manager // GraphManager to operate on
ni NodeInfo // NodeInfo to use for formatting
groupScope string // Group scope for query
allowNilTraversal bool // Flag if empty traversals should be included in the result
withFlags *withFlags // Special flags which can be set by with statements
primaryKind string // Primary node kind
nextStartKey func() (string, error) // Function to get the next start key
traversals []*parser.ASTNode // Array of all top level query traversals
where *parser.ASTNode // First where clause
show *parser.ASTNode // Show clause node
specs []string // Flat list of traversals of this query
attrsNodes []map[string]string // Attributes for nodes to query on each traversal
attrsEdges []map[string]string // Attributes for nodes to query on each traversal
rowNode []data.Node // Current row of nodes which is evaluated
rowEdge []data.Edge // Current row of edges which is evaluated
colLabels []string // Labels for columns
colFormat []string // Format for columns
colData []string // Data for columns
colFunc []FuncShow // Function to transform column value
_attrsNodesFetch [][]string // Internal copy of attrsNodes better suited for fetchPart calls
_attrsEdgesFetch [][]string // Internal copy of attrsEdges better suited for fetchPart calls
}
/*
Initialise and validate data structures.
*/
func (p *eqlRuntimeProvider) init(startKind string,
rootChildren []*parser.ASTNode) error {
// By default we don't include empty traversals in the result
p.allowNilTraversal = false
// Clear any with flags
p.withFlags = &withFlags{make([]byte, 0), make([]int, 0), make([]int, 0),
make([]int, 0), make([]bool, 0)}
// Reinitialise datastructures
p.groupScope = ""
p.traversals = make([]*parser.ASTNode, 0)
p.where = nil
p.show = nil
p.specs = make([]string, 0)
p.attrsNodes = make([]map[string]string, 0)
p.attrsEdges = make([]map[string]string, 0)
p.rowNode = nil
p.rowEdge = nil
p._attrsNodesFetch = nil
p._attrsEdgesFetch = nil
p.colLabels = make([]string, 0)
p.colFormat = make([]string, 0)
p.colData = make([]string, 0)
p.colFunc = make([]FuncShow, 0)
p.primaryKind = ""
p.specs = append(p.specs, startKind)
p.attrsNodes = append(p.attrsNodes, make(map[string]string))
p.attrsEdges = append(p.attrsEdges, make(map[string]string))
// With clause is interpreted straight after finishing the columns
var withChild *parser.ASTNode
// Go through the children, check if they are valid and initialise them
for _, child := range rootChildren {
if child.Name == parser.NodeWHERE {
// Check if the show clause or some traversals are already populated
if p.show != nil || len(p.traversals) > 0 {
return p.newRuntimeError(ErrInvalidConstruct,
"condition must be before show clause and traversals", child)
}
// Reset state of where and store it
if err := child.Runtime.Validate(); err != nil {
return err
}
p.where = child
} else if child.Name == parser.NodeTRAVERSE {
// Check if show clause or where clause is already populated
if p.show != nil {
return p.newRuntimeError(ErrInvalidConstruct,
"traversals must be before show clause", child)
}
// Reset state of traversal and add it to the traversal list
if err := child.Runtime.Validate(); err != nil {
return err
}
p.traversals = append(p.traversals, child)
} else if child.Name == parser.NodeSHOW {
p.show = child
} else if child.Name == parser.NodeFROM {
// Set the group state
p.groupScope = child.Children[0].Children[0].Token.Val
} else if child.Name == parser.NodePRIMARY {
pk := child.Children[0].Token.Val
for _, nk := range p.gm.NodeKinds() {
if nk == pk {
p.primaryKind = pk
}
}
if p.primaryKind == "" {
return p.newRuntimeError(ErrUnknownNodeKind, pk, child.Children[0])
}
} else if child.Name == parser.NodeWITH {
withChild = child
} else {
return p.newRuntimeError(ErrInvalidConstruct, child.Name, child)
}
}
// Populate column related attributes
nodeKindPos, edgeKindPos, err := p.initCols()
if err != nil {
return err
}
// Interpret with clause straight after populating the columns
if withChild != nil {
if err := p.initWithFlags(withChild, nodeKindPos, edgeKindPos); err != nil {
return err
}
}
if p.primaryKind == "" {
p.primaryKind = startKind
}
return nil
}
/*
initWithFlags populates the withFlags datastructure. It is assumed that the
columns have been populated before calling this function.
*/
func (p *eqlRuntimeProvider) initWithFlags(withNode *parser.ASTNode,
nodeKindPos map[string][]int, edgeKindPos map[string][]int) error {
// Helper function to find a specified column
findColumn := func(colData string, node *parser.ASTNode) (int, error) {
col := -1
colDataSplit := strings.SplitN(colData, ":", 3)
switch len(colDataSplit) {
case 1:
// Find the first column which displays the given attribute
for i, cd := range p.colData {
cds := strings.SplitN(cd, ":", 3)
if cds[2] == colDataSplit[0] {
col = i
}
}
case 2:
// Search for first kind / attribute occurrence
kind := colDataSplit[0]
attr := colDataSplit[1]
searchColData := func(pos int, t string) {
cstr := fmt.Sprint(pos+1, ":", t, ":", attr)
for i, c := range p.colData {
if c == cstr {
col = i
}
}
}
if poslist, ok := nodeKindPos[kind]; ok {
searchColData(poslist[0], "n")
} else if poslist, ok := edgeKindPos[kind]; ok {
searchColData(poslist[0], "e")
} else {
return -1, p.newRuntimeError(ErrInvalidConstruct,
"Cannot determine column for with term: "+colData, node)
}
case 3:
// Search for exact specification
for i, c := range p.colData {
if c == colData {
col = i
}
}
}
if col == -1 {
return -1, p.newRuntimeError(ErrInvalidConstruct,
"Cannot determine column for with term: "+colData, node)
}
return col, nil
}
// Go through all children and initialise the withFlags data structure
for _, child := range withNode.Children {
if child.Name == parser.NodeNULLTRAVERSAL && child.Children[0].Name == parser.NodeTRUE {
p.allowNilTraversal = true
} else if child.Name == parser.NodeFILTERING {
for _, child := range child.Children {
if child.Name == parser.NodeISNOTNULL || child.Name == parser.NodeUNIQUE || child.Name == parser.NodeUNIQUECOUNT {
c, err := findColumn(child.Children[0].Token.Val, child)
if err != nil {
return err
}
if child.Name == parser.NodeISNOTNULL {
p.withFlags.notnullCol = append(p.withFlags.notnullCol, c)
} else if child.Name == parser.NodeUNIQUE {
p.withFlags.uniqueCol = append(p.withFlags.uniqueCol, c)
p.withFlags.uniqueColCnt = append(p.withFlags.uniqueColCnt, false)
} else if child.Name == parser.NodeUNIQUECOUNT {
p.withFlags.uniqueCol = append(p.withFlags.uniqueCol, c)
p.withFlags.uniqueColCnt = append(p.withFlags.uniqueColCnt, true)
}
} else {
return p.newRuntimeError(ErrInvalidConstruct, child.Token.Val, child)
}
}
} else if child.Name == parser.NodeORDERING {
for _, child := range child.Children {
if child.Name == parser.NodeASCENDING || child.Name == parser.NodeDESCENDING {
c, err := findColumn(child.Children[0].Token.Val, child)
if err != nil {
return err
}
if child.Name == parser.NodeASCENDING {
p.withFlags.ordering = append(p.withFlags.ordering, withOrderingAscending)
} else {
p.withFlags.ordering = append(p.withFlags.ordering, withOrderingDescending)
}
p.withFlags.orderingCol = append(p.withFlags.orderingCol, c)
} else {
return p.newRuntimeError(ErrInvalidConstruct, child.Token.Val, child)
}
}
} else {
return p.newRuntimeError(ErrInvalidConstruct, child.Token.Val, child)
}
}
return nil
}
/*
initCols populates the column related attributes. This function assumes that
specs is filled with all necessary traversals.
The following formats for a show term are allowed:
<step>:<type>:<attr> - Attribute from whatever is at the given traversal step
<kind>:<attr> - First matching kind in a row provides the attribute
<attr> - Show attribute from root node kind
*/
func (p *eqlRuntimeProvider) initCols() (map[string][]int, map[string][]int, error) {
// Fill lookup maps for traversal kind positions
// Show term match by kind uses these
nodeKindPos := make(map[string][]int)
edgeKindPos := make(map[string][]int)
addPos := func(kmap map[string][]int, kind string, pos int) {
if l, ok := kmap[kind]; ok {
kmap[kind] = append(l, pos)
} else {
kmap[kind] = []int{pos}
}
}
for i, spec := range p.specs {
if i == 0 {
addPos(nodeKindPos, spec, i)
} else {
sspec := strings.Split(spec, ":")
if sspec[1] != "" {
addPos(edgeKindPos, sspec[1], i)
}
if sspec[3] != "" {
addPos(nodeKindPos, sspec[3], i)
}
}
}
// Fill up column lists
if p.show == nil || len(p.show.Children) == 0 {
// If no show clause is defined ask the NodeInfo to provide a summary list
for i, spec := range p.specs {
sspec := strings.Split(spec, ":")
kind := sspec[len(sspec)-1]
for _, attr := range p.ni.SummaryAttributes(kind) {
// Make sure the attribute is in attrsNodes
p.attrsNodes[i][attr] = ""
// Fill col attributes (we only show nodes)
p.colLabels = append(p.colLabels, p.ni.AttributeDisplayString(kind, attr))
p.colFormat = append(p.colFormat, "auto")
p.colData = append(p.colData, fmt.Sprintf("%v:n:%s", i+1, attr))
p.colFunc = append(p.colFunc, nil)
}
}
} else {
var err error
var attr, label, colData string
var pos int
var isNode bool
var colFunc FuncShow
// Go through the elements of the provided show clause
for _, col := range p.show.Children {
if col.Name != parser.NodeSHOWTERM {
return nil, nil, p.newRuntimeError(ErrInvalidConstruct, col.Name, col)
}
// Reset label value
label = ""
colFunc = nil
// Create the correct colData value
if col.Token.ID == parser.TokenAT {
// We have a function get the attribute which it operates on
funcName := col.Children[0].Children[0].Token.Val
funcInst, ok := showFunc[funcName]
if !ok {
return nil, nil, p.newRuntimeError(ErrInvalidConstruct,
"Unknown function: "+funcName, col)
}
colFunc, colData, label, err = funcInst(col.Children[0], p)
if err != nil {
return nil, nil, p.newRuntimeError(ErrInvalidConstruct,
err.Error(), col)
}
} else {
colData = col.Token.Val
}
colDataSplit := strings.SplitN(colData, ":", 3)
switch len(colDataSplit) {
case 1:
// Show attribute from root node kind
attr = colDataSplit[0]
pos = 0
isNode = true
colData = "1:n:" + attr
if label == "" {
label = p.ni.AttributeDisplayString(p.specs[0], attr)
}
case 2:
// First matching kind in a row provides the attribute
kind := colDataSplit[0]
if poslist, ok := nodeKindPos[kind]; ok {
attr = colDataSplit[1]
pos = poslist[0]
isNode = true
colData = fmt.Sprint(pos+1) + ":n:" + attr
} else if poslist, ok := edgeKindPos[kind]; ok {
attr = colDataSplit[1]
pos = poslist[0]
isNode = false
colData = fmt.Sprint(pos+1) + ":e:" + attr
} else {
return nil, nil, p.newRuntimeError(ErrInvalidConstruct,
"Cannot determine data position for kind: "+kind, col)
}
if label == "" {
label = p.ni.AttributeDisplayString(kind, attr)
}
case 3:
// Attribute from whatever is at the given traversal step
attr = colDataSplit[2]
pos, err = strconv.Atoi(colDataSplit[0])
if err != nil {
return nil, nil, p.newRuntimeError(ErrInvalidConstruct,
"Invalid data index: "+colData+" ("+err.Error()+")", col)
} else if pos < 1 {
return nil, nil, p.newRuntimeError(ErrInvalidConstruct,
"Invalid data index: "+colData+" (index must be greater than 0)", col)
}
pos--
if colDataSplit[1] == "n" {
isNode = true
} else if colDataSplit[1] == "e" {
isNode = false
} else {
return nil, nil, p.newRuntimeError(ErrInvalidConstruct,
"Invalid data source '"+colDataSplit[1]+"' (either n - Node or e - Edge)", col)
}
if label == "" {
label = p.ni.AttributeDisplayString("", attr)
}
}
if pos >= len(p.attrsNodes) {
return nil, nil, p.newRuntimeError(ErrInvalidColData,
fmt.Sprintf("Data index out of range: %v", pos+1), col)
}
// Determine label and format
colLabel := label
colFormat := "auto"
for _, t := range col.Children {
if t.Name == parser.NodeAS {
colLabel = t.Children[0].Token.Val
} else if t.Name == parser.NodeFORMAT {
colFormat = t.Children[0].Token.Val
} else if t.Name != parser.NodeFUNC {
return nil, nil, p.newRuntimeError(ErrInvalidConstruct, t.Name, t)
}
}
// Fill col attributes
p.colLabels = append(p.colLabels, colLabel)
p.colFormat = append(p.colFormat, colFormat)
p.colData = append(p.colData, colData)
p.colFunc = append(p.colFunc, colFunc)
// Populate attrsNodes and attrsEdges
if isNode {
p.attrsNodes[pos][attr] = ""
} else {
p.attrsEdges[pos][attr] = ""
}
}
}
return nodeKindPos, edgeKindPos, nil
}
/*
next advances to the next query row. Returns false if no more rows are available.
It is assumed that all traversal specs and query attrs have been filled.
*/
func (p *eqlRuntimeProvider) next() (bool, error) {
// Create fetch lists if it is the first next() call
if p._attrsNodesFetch == nil {
makeFetchList := func(attrs []map[string]string, isEdge bool) [][]string {
var fetchlist [][]string
for _, attrs := range attrs {
var attrsFetch []string
for attr := range attrs {
// Condition needs to be different for nodes and edges
if !isEdge && attr != "" && attr != data.NodeKey && attr != data.NodeKind {
attrsFetch = append(attrsFetch, attr)
} else if attr != "" && attr != data.NodeKey && attr != data.NodeKind &&
attr != data.EdgeEnd1Key && attr != data.EdgeEnd1Kind &&
attr != data.EdgeEnd1Role && attr != data.EdgeEnd1Cascading &&
attr != data.EdgeEnd2Key && attr != data.EdgeEnd2Kind &&
attr != data.EdgeEnd2Role && attr != data.EdgeEnd2Cascading {
attrsFetch = append(attrsFetch, attr)
}
}
fetchlist = append(fetchlist, attrsFetch)
}
return fetchlist
}
p._attrsNodesFetch = makeFetchList(p.attrsNodes, false)
p._attrsEdgesFetch = makeFetchList(p.attrsEdges, true)
}
// Make sure we have the row and rowEdge arrays
if p.rowNode == nil {
p.rowNode = make([]data.Node, 0)
p.rowEdge = make([]data.Edge, 0)
}
// Check if a traversal can handle the call
for _, child := range p.traversals {
childRuntime := child.Runtime.(*traversalRuntime)
if childRuntime.hasMoreNodes() {
_, err := childRuntime.Eval()
return err == nil, err
}
}
// Get next root node
startKey, err := p.nextStartKey()
if err != nil || startKey == "" {
return false, err
}
// Fetch node - always require the key attribute
// to make sure we get a node back if it exists
node, err := p.gm.FetchNodePart(p.part, startKey, p.specs[0],
append(p._attrsNodesFetch[0], "key"))
if err != nil || node == nil {
return false, err
}
// Decide if this node should be added
addNode := true
if p.where != nil {
res, err := p.where.Runtime.(CondRuntime).CondEval(node, nil)
if err != nil {
return false, err
}
addNode = res.(bool)
}
if addNode {
// Add node and the first traversal
if len(p.rowNode) == 0 {
p.rowNode = append(p.rowNode, node)
p.rowEdge = append(p.rowEdge, nil)
} else {
// Clear out the row
for i := range p.rowNode {
p.rowNode[i] = nil
p.rowEdge[i] = nil
}
// Fill in the first node
p.rowNode[0] = node
p.rowEdge[0] = nil
}
// Give the new source to the children and let them evaluate
for _, child := range p.traversals {
childRuntime := child.Runtime.(*traversalRuntime)
if err := childRuntime.newSource(node); err == ErrEmptyTraversal {
// If an empty traversal error comes back advance until
// there is an element or the end
p.rowNode[0] = nil
p.rowEdge[0] = nil
return p.next()
} else if err != nil {
return false, err
}
}
} else {
// Recursively call next until there is a condition-matching node or
// there are no more start keys available
return p.next()
}
return true, nil
}
/*
Instance function for general components
*/
type generalInst func(*eqlRuntimeProvider, *parser.ASTNode) parser.Runtime
/*
Runtime map for general components
*/
var generalProviderMap = map[string]generalInst{
parser.NodeEOF: invalidRuntimeInst,
parser.NodeVALUE: valueRuntimeInst,
parser.NodeTRUE: valueRuntimeInst,
parser.NodeFALSE: valueRuntimeInst,
parser.NodeNULL: valueRuntimeInst,
parser.NodeLIST: valueRuntimeInst,
parser.NodeFUNC: valueRuntimeInst,
parser.NodeTRAVERSE: traversalRuntimeInst,
parser.NodeWHERE: whereRuntimeInst,
// Condition components
// ====================
parser.NodeEQ: equalRuntimeInst,
parser.NodeNEQ: notEqualRuntimeInst,
parser.NodeLT: lessThanRuntimeInst,
parser.NodeLEQ: lessThanEqualsRuntimeInst,
parser.NodeGT: greaterThanRuntimeInst,
parser.NodeGEQ: greaterThanEqualsRuntimeInst,
parser.NodeNOT: notRuntimeInst,
parser.NodeAND: andRuntimeInst,
parser.NodeOR: orRuntimeInst,
// Simple arithmetic expressions
parser.NodePLUS: plusRuntimeInst,
parser.NodeMINUS: minusRuntimeInst,
parser.NodeTIMES: timesRuntimeInst,
parser.NodeDIV: divRuntimeInst,
parser.NodeMODINT: modIntRuntimeInst,
parser.NodeDIVINT: divIntRuntimeInst,
// List operations
parser.NodeIN: inRuntimeInst,
parser.NodeNOTIN: notInRuntimeInst,
// String operations
parser.NodeLIKE: likeRuntimeInst,
parser.NodeCONTAINS: containsRuntimeInst,
parser.NodeCONTAINSNOT: containsNotRuntimeInst,
parser.NodeBEGINSWITH: beginsWithRuntimeInst,
parser.NodeENDSWITH: endsWithRuntimeInst,
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"errors"
"fmt"
"devt.de/krotik/eliasdb/eql/parser"
)
/*
newRuntimeError creates a new RuntimeError object.
*/
func (rt *eqlRuntimeProvider) newRuntimeError(t error, d string, node *parser.ASTNode) error {
return &RuntimeError{rt.name, t, d, node, node.Token.Lline, node.Token.Lpos}
}
/*
RuntimeError is a runtime related error
*/
type RuntimeError struct {
Source string // Name of the source which was given to the parser
Type error // Error type (to be used for equal checks)
Detail string // Details of this error
Node *parser.ASTNode // AST Node where the error occurred
Line int // Line of the error
Pos int // Position of the error
}
/*
Error returns a human-readable string representation of this error.
*/
func (re *RuntimeError) Error() string {
ret := fmt.Sprintf("EQL error in %s: %v (%v)", re.Source, re.Type, re.Detail)
if re.Line != 0 {
return fmt.Sprintf("%s (Line:%d Pos:%d)", ret, re.Line, re.Pos)
}
return ret
}
/*
Runtime related error types
*/
var (
ErrNotARegex = errors.New("Value of operand is not a valid regex")
ErrNotANumber = errors.New("Value of operand is not a number")
ErrNotAList = errors.New("Value of operand is not a list")
ErrInvalidConstruct = errors.New("Invalid construct")
ErrUnknownNodeKind = errors.New("Unknown node kind")
ErrInvalidSpec = errors.New("Invalid traversal spec")
ErrInvalidWhere = errors.New("Invalid where clause")
ErrInvalidColData = errors.New("Invalid column data spec")
ErrEmptyTraversal = errors.New("Empty traversal")
)
/*
ResultError is a result related error (e.g. wrong defined show clause)
*/
type ResultError struct {
Source string // Name of the source which was given to the parser
Type error // Error type (to be used for equal checks)
Detail string // Details of this error
}
/*
Error returns a human-readable string representation of this error.
*/
func (re *ResultError) Error() string {
return fmt.Sprintf("EQL result error in %s: %v (%v)", re.Source, re.Type, re.Detail)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"bytes"
"encoding/csv"
"fmt"
"sort"
"strconv"
"strings"
"devt.de/krotik/eliasdb/graph/data"
)
/*
SearchHeader is the header of a search result.
*/
type SearchHeader struct {
ResPrimaryKind string // Primary node kind
ResPartition string // Partition of result
ColLabels []string // Labels for columns
ColFormat []string // Format for columns
ColData []string // Data which should be displayed in the columns
}
/*
Partition returns the partition of a search result.
*/
func (sh *SearchHeader) Partition() string {
return sh.ResPartition
}
/*
PrimaryKind returns the primary kind of a search result.
*/
func (sh *SearchHeader) PrimaryKind() string {
return sh.ResPrimaryKind
}
/*
Labels returns all column labels of a search result.
*/
func (sh *SearchHeader) Labels() []string {
return sh.ColLabels
}
/*
Format returns all column format definitions of a search result.
*/
func (sh *SearchHeader) Format() []string {
return sh.ColFormat
}
/*
Data returns the data which is displayed in each column of a search result.
(e.g. 1:n:name - Name of starting nodes,
3:e:key - Key of edge traversed in the second traversal)
*/
func (sh *SearchHeader) Data() []string {
return sh.ColData
}
/*
SearchResult data structure. A search result represents the result of an EQL query.
*/
type SearchResult struct {
name string // Name to identify the result
query string // Query which produced the search result
withFlags *withFlags // With flags which should be applied to the result
SearchHeader // Embedded search header
colFunc []FuncShow // Function which transforms the data
Source [][]string // Special string holding the data source (node / edge) for each column
Data [][]interface{} // Data which is held by this search result
}
/*
newSearchResult creates a new search result object.
*/
func newSearchResult(rtp *eqlRuntimeProvider, query string) *SearchResult {
cdl := make([]string, 0, len(rtp.colData))
for i, cd := range rtp.colData {
if rtp.colFunc[i] != nil {
colDataSpec := strings.SplitN(cd, ":", 2)
cdl = append(cdl, colDataSpec[0]+":func:"+rtp.colFunc[i].name()+"()")
} else {
cdl = append(cdl, cd)
}
}
return &SearchResult{rtp.name, query, rtp.withFlags, SearchHeader{rtp.primaryKind, rtp.part, rtp.colLabels, rtp.colFormat,
cdl}, rtp.colFunc, make([][]string, 0), make([][]interface{}, 0)}
}
/*
addRow adds a row to the result.
*/
func (sr *SearchResult) addRow(rowNodes []data.Node, rowEdges []data.Edge) error {
var pos int
var isNode bool
var err error
src := make([]string, 0, len(sr.ColData))
row := make([]interface{}, 0, len(sr.ColData))
addNil := func() {
src = append(src, "")
row = append(row, nil)
}
addNode := func(n data.Node, attr string) {
if n == nil {
addNil()
return
}
src = append(src, "n:"+n.Kind()+":"+n.Key())
row = append(row, n.Attr(attr))
}
addEdge := func(e data.Edge, attr string) {
if e == nil {
addNil()
return
}
row = append(row, e.Attr(attr))
src = append(src, "e:"+e.Kind()+":"+e.Key())
}
// Pick only the data which is needed for the result
for i, colData := range sr.ColData {
attr := ""
// Row data should be picked from the node
colDataSpec := strings.SplitN(colData, ":", 3)
if len(colDataSpec) != 3 {
return &ResultError{sr.name, ErrInvalidColData, "Column data spec must have 3 items: " + colData}
}
posstring := colDataSpec[0]
if colDataSpec[1] == "func" {
pos, _ = strconv.Atoi(posstring)
} else {
if colDataSpec[1] == "n" {
isNode = true
} else if colDataSpec[1] == "e" {
isNode = false
} else {
return &ResultError{sr.name, ErrInvalidColData, "Invalid data source '" + colDataSpec[1] + "' (either n - Node or e - Edge)"}
}
attr = colDataSpec[2]
pos, err = strconv.Atoi(posstring)
if err != nil || pos < 1 {
return &ResultError{sr.name, ErrInvalidColData, "Invalid data index: " + colData}
}
}
// Make pos an index
pos--
// Check if the row data should come from a function transformation
// or from a node itself
if cf := sr.colFunc[i]; cf != nil {
fres, fsrc, err := sr.colFunc[i].eval(rowNodes[pos], rowEdges[pos])
if err != nil {
return err
}
row = append(row, fres)
src = append(src, fsrc)
} else {
if isNode {
addNode(rowNodes[pos], attr)
} else {
addEdge(rowEdges[pos], attr)
}
}
}
sr.Source = append(sr.Source, src)
sr.Data = append(sr.Data, row)
return nil
}
/*
finish is called once all rows have been added.
*/
func (sr *SearchResult) finish() {
// Apply filtering
if len(sr.withFlags.notnullCol) > 0 || len(sr.withFlags.uniqueCol) > 0 {
uniqueMaps := make([]map[string]int, len(sr.withFlags.uniqueCol))
for i := range uniqueMaps {
uniqueMaps[i] = make(map[string]int)
}
// Using downward loop so we can remove the current element if necessary
for i := len(sr.Data) - 1; i >= 0; i-- {
row := sr.Data[i]
cont := false
// Apply not null
for _, nn := range sr.withFlags.notnullCol {
if row[nn] == nil {
sr.Data = append(sr.Data[:i], sr.Data[i+1:]...)
cont = true
break
}
}
if cont {
continue
}
// Apply unique
for j, u := range sr.withFlags.uniqueCol {
if _, ok := uniqueMaps[j][fmt.Sprint(row[u])]; ok {
uniqueMaps[j][fmt.Sprint(row[u])]++
sr.Data = append(sr.Data[:i], sr.Data[i+1:]...)
break
} else {
uniqueMaps[j][fmt.Sprint(row[u])] = 1
}
}
}
// Add unique counts if necessary
for j, uc := range sr.withFlags.uniqueColCnt {
u := sr.withFlags.uniqueCol[j]
if uc {
for _, row := range sr.Data {
row[u] = fmt.Sprintf("%v (%d)", row[u], uniqueMaps[j][fmt.Sprint(row[u])])
}
}
}
}
// Apply ordering
for i, ordering := range sr.withFlags.ordering {
sort.Stable(&SearchResultRowComparator{ordering == withOrderingAscending,
sr.withFlags.orderingCol[i], sr.Data, sr.Source})
}
}
/*
Header returns all column headers.
*/
func (sr *SearchResult) Header() *SearchHeader {
return &sr.SearchHeader
}
/*
Query returns the query which produced this result.
*/
func (sr *SearchResult) Query() string {
return sr.query
}
/*
RowCount returns the number of rows of the result.
*/
func (sr *SearchResult) RowCount() int {
return len(sr.Data)
}
/*
Row returns a row of the result.
*/
func (sr *SearchResult) Row(line int) []interface{} {
return sr.Data[line]
}
/*
Rows returns all rows.
*/
func (sr *SearchResult) Rows() [][]interface{} {
return sr.Data
}
/*
RowSource returns the sources of a result row.
Format is either: <n/e>:<kind>:<key> or q:<query>
*/
func (sr *SearchResult) RowSource(line int) []string {
return sr.Source[line]
}
/*
RowSources returns the sources of a result.
*/
func (sr *SearchResult) RowSources() [][]string {
return sr.Source
}
/*
String returns a string representation of this search result.
*/
func (sr *SearchResult) String() string {
var buf bytes.Buffer
buf.WriteString("Labels: ")
buf.WriteString(strings.Join(sr.ColLabels, ", "))
buf.WriteString("\n")
buf.WriteString("Format: ")
buf.WriteString(strings.Join(sr.ColFormat, ", "))
buf.WriteString("\n")
buf.WriteString("Data: ")
buf.WriteString(strings.Join(sr.ColData, ", "))
buf.WriteString("\n")
// Render the table
for _, row := range sr.Data {
for i, col := range row {
if col != nil {
buf.WriteString(fmt.Sprint(col))
} else {
buf.WriteString("<not set>")
}
if i < len(row)-1 {
buf.WriteString(", ")
}
}
buf.WriteString("\n")
}
return buf.String()
}
/*
CSV returns this search result as comma-separated strings.
*/
func (sr *SearchResult) CSV() string {
var buf bytes.Buffer
labels := sr.Header().ColLabels
strData := make([][]string, len(sr.Data)+1)
// Prepare string data
strData[0] = make([]string, len(labels))
for i, s := range labels {
strData[0][i] = s
}
for i, row := range sr.Data {
strData[i+1] = make([]string, len(row))
for j, s := range row {
strData[i+1][j] = fmt.Sprint(s)
}
}
// Write CSV data into buffer
w := csv.NewWriter(&buf)
w.WriteAll(strData)
return buf.String()
}
// Util functions
// ==============
/*
SearchResultRowComparator is a comparator object used for sorting the result
*/
type SearchResultRowComparator struct {
Ascening bool // Sort should be ascending
Column int // Column to sort
Data [][]interface{} // Data to sort
Source [][]string // Source entries which follow the data
}
func (c SearchResultRowComparator) Len() int {
return len(c.Data)
}
func (c SearchResultRowComparator) Less(i, j int) bool {
c1 := c.Data[i][c.Column]
c2 := c.Data[j][c.Column]
num1, err := strconv.ParseFloat(fmt.Sprint(c1), 64)
if err == nil {
num2, err := strconv.ParseFloat(fmt.Sprint(c2), 64)
if err == nil {
if c.Ascening {
return num1 < num2
}
return num1 > num2
}
}
if c.Ascening {
return fmt.Sprintf("%v", c1) < fmt.Sprintf("%v", c2)
}
return fmt.Sprintf("%v", c1) > fmt.Sprintf("%v", c2)
}
func (c SearchResultRowComparator) Swap(i, j int) {
c.Data[i], c.Data[j] = c.Data[j], c.Data[i]
c.Source[i], c.Source[j] = c.Source[j], c.Source[i]
}
// Testing functions
// =================
type rowSort SearchResult
func (s rowSort) Len() int {
return len(s.Data)
}
func (s rowSort) Swap(i, j int) {
s.Data[i], s.Data[j] = s.Data[j], s.Data[i]
s.Source[i], s.Source[j] = s.Source[j], s.Source[i]
}
func (s rowSort) Less(i, j int) bool {
keyString := func(data []interface{}) string {
var ret bytes.Buffer
for _, d := range data {
ret.WriteString(fmt.Sprintf("%v", d))
}
return ret.String()
}
return keyString(s.Data[i]) < keyString(s.Data[j])
}
/*
StableSort sorts the rows of the result in a stable 100% reproducible way.
*/
func (sr *SearchResult) StableSort() {
sort.Stable(rowSort(*sr))
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"strings"
"devt.de/krotik/eliasdb/eql/parser"
"devt.de/krotik/eliasdb/graph/data"
)
/*
traversalRuntime is the runtime for traversals.
*/
type traversalRuntime struct {
rtp *eqlRuntimeProvider
node *parser.ASTNode
where *parser.ASTNode // Traversal where clause
sourceNode data.Node // Source node for traversal - should be injected by the parent
spec string // Spec for this traversal
specIndex int // Index of this traversal in the traversals array
nodes []data.Node // Nodes of the last traversal result
edges []data.Edge // Edges of the last traversal result
curptr int // Pointer to the next node in the last traversal result
}
/*
traversalRuntimeInst returns a new runtime component instance.
*/
func traversalRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &traversalRuntime{rtp, node, nil, nil, "", -1, nil, nil, 0}
}
/*
Validate this node and all its child nodes.
*/
func (rt *traversalRuntime) Validate() error {
spec := rt.node.Children[0].Token.Val
rt.specIndex = -1
// Check traversal spec
sspec := strings.Split(spec, ":")
if len(sspec) != 4 {
return rt.rtp.newRuntimeError(ErrInvalidSpec, spec, rt.node)
}
rt.spec = spec
rt.specIndex = len(rt.rtp.specs)
rt.where = nil
rt.rtp.specs = append(rt.rtp.specs, spec)
rt.rtp.attrsNodes = append(rt.rtp.attrsNodes, make(map[string]string))
rt.rtp.attrsEdges = append(rt.rtp.attrsEdges, make(map[string]string))
// Go through all deeper traversals
for _, child := range rt.node.Children[1:] {
if child.Name == parser.NodeTRAVERSE {
if err := child.Runtime.Validate(); err != nil {
return err
}
} else if child.Name == parser.NodeWHERE {
whereRuntime := child.Runtime.(*whereRuntime)
whereRuntime.specIndex = rt.specIndex
// Reset state of where and store it
if err := whereRuntime.Validate(); err != nil {
return err
}
rt.where = child
} else {
return rt.rtp.newRuntimeError(ErrInvalidConstruct, child.Name, child)
}
}
return nil
}
/*
hasMoreNodes returns true if this traversal runtime component can produce more
nodes. If the result is negative then a new source node is required.
*/
func (rt *traversalRuntime) hasMoreNodes() bool {
for _, child := range rt.node.Children[1:] {
if child.Name == parser.NodeTRAVERSE {
childRuntime := child.Runtime.(*traversalRuntime)
if childRuntime.hasMoreNodes() {
return true
}
}
}
return rt.curptr < len(rt.nodes)
}
/*
newSource assigns a new source node to this traversal component and
traverses it.
*/
func (rt *traversalRuntime) newSource(node data.Node) error {
var nodes []data.Node
var edges []data.Edge
rt.sourceNode = node
// Do the actual traversal if we got a node
if node != nil {
var err error
// Do a simple traversal without getting any node data first
nodes, edges, err = rt.rtp.gm.TraverseMulti(rt.rtp.part, rt.sourceNode.Key(),
rt.sourceNode.Kind(), rt.spec, false)
if err != nil {
return err
}
// Now get the attributes which are required
for _, node := range nodes {
attrs := rt.rtp._attrsNodesFetch[rt.specIndex]
if len(attrs) > 0 {
n, err := rt.rtp.gm.FetchNodePart(rt.rtp.part, node.Key(), node.Kind(), attrs)
if err != nil {
return err
} else if n != nil {
for _, attr := range attrs {
node.SetAttr(attr, n.Attr(attr))
}
}
}
}
for _, edge := range edges {
attrs := rt.rtp._attrsEdgesFetch[rt.specIndex]
if len(attrs) > 0 {
e, err := rt.rtp.gm.FetchEdgePart(rt.rtp.part, edge.Key(), edge.Kind(), attrs)
if err != nil {
return err
} else if e != nil {
for _, attr := range attrs {
edge.SetAttr(attr, e.Attr(attr))
}
}
}
}
}
// Apply where clause
if rt.where != nil {
fNodes := make([]data.Node, 0, len(nodes))
fEdges := make([]data.Edge, 0, len(edges))
for i, node := range nodes {
edge := edges[i]
res, err := rt.where.Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return err
}
if res.(bool) {
fNodes = append(fNodes, node)
fEdges = append(fEdges, edge)
}
}
nodes = fNodes
edges = fEdges
}
rt.nodes = nodes
rt.edges = edges
rt.curptr = 0
// Check if there are no nodes to display and return an error if
// empty traversals are not allowed
if len(rt.nodes) == 0 && !rt.rtp.allowNilTraversal {
return ErrEmptyTraversal
}
// Evaluate the new source
_, err := rt.Eval()
return err
}
/*
Eval evaluate this runtime component.
*/
func (rt *traversalRuntime) Eval() (interface{}, error) {
// Check if a child can handle the call
for _, child := range rt.node.Children[1:] {
if child.Name == parser.NodeTRAVERSE {
childRuntime := child.Runtime.(*traversalRuntime)
if childRuntime.hasMoreNodes() {
return childRuntime.Eval()
}
}
}
// Get the next node and fill the row entry in the provider
var rowNode data.Node
var rowEdge data.Edge
if rt.curptr < len(rt.nodes) {
// Get a new node from our node list if possible
rowNode = rt.nodes[rt.curptr]
rowEdge = rt.edges[rt.curptr]
rt.curptr++
}
if len(rt.rtp.rowNode) == rt.specIndex {
rt.rtp.rowNode = append(rt.rtp.rowNode, rowNode)
rt.rtp.rowEdge = append(rt.rtp.rowEdge, rowEdge)
} else {
rt.rtp.rowNode[rt.specIndex] = rowNode
rt.rtp.rowEdge[rt.specIndex] = rowEdge
}
// Give the new source to the children and let them evaluate
for _, child := range rt.node.Children[1:] {
if child.Name == parser.NodeTRAVERSE {
childRuntime := child.Runtime.(*traversalRuntime)
if err := childRuntime.newSource(rt.rtp.rowNode[rt.specIndex]); err != nil {
return nil, err
}
}
}
return nil, nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"fmt"
"regexp"
"strconv"
"strings"
"devt.de/krotik/eliasdb/eql/parser"
"devt.de/krotik/eliasdb/graph/data"
)
/*
CondRuntime is a component of a condition which can be evaluated
with a node and an edge.
*/
type CondRuntime interface {
/*
CondEval evaluates this condition runtime element.
*/
CondEval(node data.Node, edge data.Edge) (interface{}, error)
}
/*
Abstract runtime for condition components
*/
type whereItemRuntime struct {
rtp *eqlRuntimeProvider
astNode *parser.ASTNode
}
/*
Validate this node and all its child nodes.
*/
func (rt *whereItemRuntime) Validate() error {
return rt.rtp.newRuntimeError(ErrInvalidConstruct, rt.astNode.Name, rt.astNode)
}
/*
Eval evaluate this condition component.
*/
func (rt *whereItemRuntime) Eval() (interface{}, error) {
return nil, rt.rtp.newRuntimeError(ErrInvalidConstruct, rt.astNode.Name, rt.astNode)
}
/*
valOp executes an operation on two abstract values.
*/
func (rt *whereItemRuntime) valOp(node data.Node, edge data.Edge, op func(interface{}, interface{}) interface{}) (interface{}, error) {
res1, err := rt.astNode.Children[0].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
res2, err := rt.astNode.Children[1].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
return op(res1, res2), nil
}
/*
stringOp executes an operation on two strings.
*/
func (rt *whereItemRuntime) stringOp(node data.Node, edge data.Edge, op func(string, string) interface{}) (interface{}, error) {
res1, err := rt.astNode.Children[0].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
res2, err := rt.astNode.Children[1].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
return op(fmt.Sprint(res1), fmt.Sprint(res2)), nil
}
/*
regexOp executes an operation on a string and a regex.
*/
func (rt *whereItemRuntime) regexOp(node data.Node, edge data.Edge, op func(string, *regexp.Regexp) interface{}) (interface{}, error) {
res1, err := rt.astNode.Children[0].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
res2, err := rt.astNode.Children[1].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
// Try to compile the regex
res2String := fmt.Sprint(res2)
regexp, err := regexp.Compile(res2String)
if err != nil {
return nil, rt.rtp.newRuntimeError(ErrNotARegex,
fmt.Sprintf("%#v - %s", res2String, err.Error()), rt.astNode.Children[1])
}
return op(fmt.Sprint(res1), regexp), nil
}
/*
numOp executes an operation on two number values.
*/
func (rt *whereItemRuntime) numOp(node data.Node, edge data.Edge, op func(float64, float64) interface{}) (interface{}, error) {
res1, err := rt.astNode.Children[0].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
res2, err := rt.astNode.Children[1].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
errDetail := func(tokenVal string, opVal string) string {
if tokenVal == opVal {
return opVal
}
return tokenVal + "=" + opVal
}
// Parse the values to numbers
res1Str := fmt.Sprint(res1)
res1Num, err := strconv.ParseFloat(res1Str, 64)
if err != nil {
return nil, rt.rtp.newRuntimeError(ErrNotANumber, errDetail(rt.astNode.Children[0].Token.Val, res1Str), rt.astNode.Children[0])
}
res2Str := fmt.Sprint(res2)
res2Num, err := strconv.ParseFloat(res2Str, 64)
if err != nil {
return nil, rt.rtp.newRuntimeError(ErrNotANumber, errDetail(rt.astNode.Children[1].Token.Val, res2Str), rt.astNode.Children[1])
}
return op(res1Num, res2Num), nil
}
/*
listOp executes a list operation on a single value and a list.
*/
func (rt *whereItemRuntime) listOp(node data.Node, edge data.Edge, op func(interface{}, []interface{}) interface{}) (interface{}, error) {
res1, err := rt.astNode.Children[0].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
res2, err := rt.astNode.Children[1].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
errDetail := func(tokenVal string, opVal string) string {
if tokenVal == opVal {
return opVal
}
return tokenVal + "=" + opVal
}
// Parse right value to a list
res2List, ok := res2.([]interface{})
if !ok {
return nil, rt.rtp.newRuntimeError(ErrNotAList, errDetail(rt.astNode.Children[1].Token.Val, fmt.Sprint(res2)), rt.astNode.Children[1])
}
return op(res1, res2List), nil
}
/*
boolOp executes an operation on two boolean values. Can optionally try a
short circuit operation.
*/
func (rt *whereItemRuntime) boolOp(node data.Node, edge data.Edge, op func(bool, bool) interface{},
scop func(bool) interface{}) (interface{}, error) {
res1, err := rt.astNode.Children[0].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
if len(rt.astNode.Children) == 1 {
// Special case for "not" operation
return op(toBool(res1), false), nil
}
// Try short circuit
res1bool := toBool(res1)
if scop != nil {
if ret := scop(res1bool); ret != nil {
return ret, nil
}
}
res2, err := rt.astNode.Children[1].Runtime.(CondRuntime).CondEval(node, edge)
if err != nil {
return nil, err
}
return op(res1bool, toBool(res2)), nil
}
/*
toBool is a helper function to turn any value into a boolean.
*/
func toBool(res interface{}) bool {
switch res := res.(type) {
default:
return res != nil
case bool:
return res
case float64:
return res > 0
case string:
// Try to convert the string into a number
num, err := strconv.ParseFloat(res, 64)
if err == nil {
return num > 0
}
return res != ""
}
}
func equals(res1 interface{}, res2 interface{}) bool {
// Try to convert the string into a number
num1, err := strconv.ParseFloat(fmt.Sprint(res1), 64)
if err == nil {
num2, err := strconv.ParseFloat(fmt.Sprint(res2), 64)
if err == nil {
return num1 == num2
}
}
return fmt.Sprintf("%v", res1) == fmt.Sprintf("%v", res2)
}
// Where runtime
// =============
/*
Runtime for where
*/
type whereRuntime struct {
rtp *eqlRuntimeProvider
astNode *parser.ASTNode
specIndex int // Index of this traversal in the traversals array
}
/*
whereRuntimeInst returns a new runtime component instance.
*/
func whereRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &whereRuntime{rtp, node, 0}
}
/*
Validate this node and all its child nodes.
*/
func (rt *whereRuntime) Validate() error {
var visitChildren func(astNode *parser.ASTNode) error
visitChildren = func(astNode *parser.ASTNode) error {
// Determine which values should be interpreted as node attributes
if astNode.Name == parser.NodeVALUE {
val := astNode.Token.Val
lcval := strings.ToLower(val)
valRuntime, ok := astNode.Runtime.(*valueRuntime)
if !ok {
return astNode.Runtime.Validate()
}
if strings.HasPrefix(lcval, "eattr:") {
valRuntime.condVal = val[6:]
valRuntime.isNodeAttrValue = false
valRuntime.isEdgeAttrValue = true
} else if strings.HasPrefix(lcval, "attr:") {
valRuntime.condVal = val[5:]
valRuntime.isNodeAttrValue = true
valRuntime.isEdgeAttrValue = false
} else if strings.HasPrefix(lcval, "val:") {
valRuntime.condVal = val[4:]
valRuntime.isNodeAttrValue = false
valRuntime.isEdgeAttrValue = false
} else {
valRuntime.condVal = val
valRuntime.isNodeAttrValue = rt.rtp.ni.IsValidAttr(val)
valRuntime.isEdgeAttrValue = false
// Check if we have a nested value
if strings.Contains(val, ".") {
nestedValuePath := strings.Split(val, ".")
if rt.rtp.ni.IsValidAttr(nestedValuePath[0]) {
valRuntime.condVal = nestedValuePath[0]
valRuntime.nestedValuePath = nestedValuePath
valRuntime.isNodeAttrValue = true
}
}
}
// Make sure attributes are queried
if valRuntime.isNodeAttrValue {
rt.rtp.attrsNodes[rt.specIndex][valRuntime.condVal] = ""
} else if valRuntime.isEdgeAttrValue {
rt.rtp.attrsEdges[rt.specIndex][valRuntime.condVal] = ""
}
}
for _, child := range astNode.Children {
if err := visitChildren(child); err != nil {
return err
}
}
return nil
}
return visitChildren(rt.astNode)
}
/*
Eval evaluates the where clause a
*/
func (rt *whereRuntime) Eval() (interface{}, error) {
return nil, rt.rtp.newRuntimeError(ErrInvalidConstruct, rt.astNode.Name, rt.astNode)
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *whereRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
res, err := rt.astNode.Children[0].Runtime.(CondRuntime).CondEval(node, edge)
return toBool(res), err
}
// Where related runtimes
// ======================
/*
Equal runtime
*/
type equalRuntime struct {
*whereItemRuntime
}
/*
equalRuntimeInst returns a new runtime component instance.
*/
func equalRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &equalRuntime{&whereItemRuntime{rtp, node}}
}
/*
Evaluate this condition runtime element.
*/
func (rt *equalRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.valOp(node, edge, func(res1 interface{}, res2 interface{}) interface{} { return equals(res1, res2) })
}
/*
CondEval evaluates this condition runtime element.
*/
type notEqualRuntime struct {
*whereItemRuntime
}
/*
notEqualRuntimeInst returns a new runtime component instance.
*/
func notEqualRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return ¬EqualRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *notEqualRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.valOp(node, edge, func(res1 interface{}, res2 interface{}) interface{} { return !equals(res1, res2) })
}
/*
Less than runtime
*/
type lessThanRuntime struct {
*whereItemRuntime
}
/*
lessThanRuntimeInst returns a new runtime component instance.
*/
func lessThanRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &lessThanRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *lessThanRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
ret, err := rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return res1 < res2 })
if err != nil {
// Do a simple string ordering
ret, err = rt.valOp(node, edge, func(res1 interface{}, res2 interface{}) interface{} { return fmt.Sprint(res1) < fmt.Sprint(res2) })
}
return ret, err
}
/*
Less than equals runtime
*/
type lessThanEqualsRuntime struct {
*whereItemRuntime
}
/*
lessThanEqualsRuntimeInst returns a new runtime component instance.
*/
func lessThanEqualsRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &lessThanEqualsRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *lessThanEqualsRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
ret, err := rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return res1 <= res2 })
if err != nil {
// Do a simple string ordering
ret, err = rt.valOp(node, edge, func(res1 interface{}, res2 interface{}) interface{} { return fmt.Sprint(res1) <= fmt.Sprint(res2) })
}
return ret, err
}
/*
Greater than runtime
*/
type greaterThanRuntime struct {
*whereItemRuntime
}
/*
greaterThanRuntimeInst returns a new runtime component instance.
*/
func greaterThanRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &greaterThanRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *greaterThanRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
ret, err := rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return res1 > res2 })
if err != nil {
// Do a simple string ordering
ret, err = rt.valOp(node, edge, func(res1 interface{}, res2 interface{}) interface{} { return fmt.Sprint(res1) > fmt.Sprint(res2) })
}
return ret, err
}
/*
Greater than equals runtime
*/
type greaterThanEqualsRuntime struct {
*whereItemRuntime
}
/*
greaterThanEqualsRuntimeInst returns a new runtime component instance.
*/
func greaterThanEqualsRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &greaterThanEqualsRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *greaterThanEqualsRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
ret, err := rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return res1 >= res2 })
if err != nil {
// Do a simple string ordering
ret, err = rt.valOp(node, edge, func(res1 interface{}, res2 interface{}) interface{} { return fmt.Sprint(res1) >= fmt.Sprint(res2) })
}
return ret, err
}
/*
And runtime
*/
type andRuntime struct {
*whereItemRuntime
}
/*
andRuntimeInst returns a new runtime component instance.
*/
func andRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &andRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *andRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.boolOp(node, edge, func(res1 bool, res2 bool) interface{} { return res1 && res2 },
func(res1 bool) interface{} {
if !res1 {
return false
}
return nil
})
}
/*
Or runtime
*/
type orRuntime struct {
*whereItemRuntime
}
/*
orRuntimeInst returns a new runtime component instance.
*/
func orRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &orRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *orRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.boolOp(node, edge, func(res1 bool, res2 bool) interface{} { return res1 || res2 },
func(res1 bool) interface{} {
if res1 {
return true
}
return nil
})
}
/*
Not runtime
*/
type notRuntime struct {
*whereItemRuntime
}
/*
notRuntimeInst returns a new runtime component instance.
*/
func notRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return ¬Runtime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *notRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.boolOp(node, edge, func(res1 bool, res2 bool) interface{} { return !res1 }, nil)
}
/*
Plus runtime
*/
type plusRuntime struct {
*whereItemRuntime
}
/*
plusRuntimeInst returns a new runtime component instance.
*/
func plusRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &plusRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *plusRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return res1 + res2 })
}
/*
Minus runtime
*/
type minusRuntime struct {
*whereItemRuntime
}
/*
minusRuntimeInst returns a new runtime component instance.
*/
func minusRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &minusRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *minusRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return res1 - res2 })
}
/*
Times runtime
*/
type timesRuntime struct {
*whereItemRuntime
}
/*
timesRuntimeInst returns a new runtime component instance.
*/
func timesRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return ×Runtime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *timesRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return res1 * res2 })
}
/*
Div runtime
*/
type divRuntime struct {
*whereItemRuntime
}
/*
divRuntimeInst returns a new runtime component instance.
*/
func divRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &divRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *divRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return res1 / res2 })
}
/*
ModInt runtime
*/
type modIntRuntime struct {
*whereItemRuntime
}
/*
modIntRuntimeInst returns a new runtime component instance.
*/
func modIntRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &modIntRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *modIntRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return int(int(res1) % int(res2)) })
}
/*
DivInt runtime
*/
type divIntRuntime struct {
*whereItemRuntime
}
/*
divIntRuntimeInst returns a new runtime component instance.
*/
func divIntRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &divIntRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *divIntRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.numOp(node, edge, func(res1 float64, res2 float64) interface{} { return int(int(res1) / int(res2)) })
}
/*
In runtime
*/
type inRuntime struct {
*whereItemRuntime
}
/*
inRuntimeInst returns a new runtime component instance.
*/
func inRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &inRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *inRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.listOp(node, edge, func(res1 interface{}, res2 []interface{}) interface{} {
for _, item := range res2 {
if equals(res1, item) {
return true
}
}
return false
})
}
/*
Not in runtime
*/
type notInRuntime struct {
*whereItemRuntime
}
/*
notInRuntimeInst returns a new runtime component instance.
*/
func notInRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return ¬InRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *notInRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.listOp(node, edge, func(res1 interface{}, res2 []interface{}) interface{} {
for _, item := range res2 {
if equals(res1, item) {
return false
}
}
return true
})
}
/*
Like runtime
*/
type likeRuntime struct {
compiledRegex *regexp.Regexp // Quick lookup of the compiled regex if it is a constant
*whereItemRuntime
}
/*
likeRuntimeInst returns a new runtime component instance.
*/
func likeRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &likeRuntime{nil, &whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *likeRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
// Check for constant regexp
if valRT, ok := rt.astNode.Children[1].Runtime.(*valueRuntime); ok {
if !valRT.isNodeAttrValue && !valRT.isEdgeAttrValue {
// Given regex is a constant and only needs to be compiled once
val, _ := valRT.CondEval(node, edge)
valStr := fmt.Sprint(val)
regexp, err := regexp.Compile(valStr)
if err != nil {
return nil, rt.rtp.newRuntimeError(ErrNotARegex,
fmt.Sprintf("%#v - %s", valStr, err.Error()), rt.astNode.Children[1])
}
rt.compiledRegex = regexp
}
}
if rt.compiledRegex == nil {
return rt.regexOp(node, edge, func(res1 string, res2 *regexp.Regexp) interface{} { return res2.MatchString(res1) })
}
return rt.stringOp(node, edge, func(res1 string, res2 string) interface{} { return rt.compiledRegex.MatchString(res1) })
}
/*
Contains runtime
*/
type containsRuntime struct {
*whereItemRuntime
}
/*
containsRuntimeInst returns a new runtime component instance.
*/
func containsRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &containsRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *containsRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.stringOp(node, edge, func(res1 string, res2 string) interface{} { return strings.Contains(res1, res2) })
}
/*
Contains not runtime
*/
type containsNotRuntime struct {
*whereItemRuntime
}
/*
containsNotRuntimeInst returns a new runtime component instance.
*/
func containsNotRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &containsNotRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *containsNotRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.stringOp(node, edge, func(res1 string, res2 string) interface{} { return !strings.Contains(res1, res2) })
}
/*
Begins with runtime
*/
type beginsWithRuntime struct {
*whereItemRuntime
}
/*
beginsWithRuntimeInst returns a new runtime component instance.
*/
func beginsWithRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &beginsWithRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *beginsWithRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.stringOp(node, edge, func(res1 string, res2 string) interface{} { return strings.HasPrefix(res1, res2) })
}
/*
Ends with runtime
*/
type endsWithRuntime struct {
*whereItemRuntime
}
/*
endsWithRuntimeInst returns a new runtime component instance.
*/
func endsWithRuntimeInst(rtp *eqlRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &endsWithRuntime{&whereItemRuntime{rtp, node}}
}
/*
CondEval evaluates this condition runtime element.
*/
func (rt *endsWithRuntime) CondEval(node data.Node, edge data.Edge) (interface{}, error) {
return rt.stringOp(node, edge, func(res1 string, res2 string) interface{} { return strings.HasSuffix(res1, res2) })
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package parser
import (
"fmt"
"strconv"
"strings"
"unicode"
"unicode/utf8"
"devt.de/krotik/common/stringutil"
)
/*
LexToken represents a token which is returned by the lexer.
*/
type LexToken struct {
ID LexTokenID // Token kind
Pos int // Starting position (in runes)
Val string // Token value
Lline int // Line in the input this token appears
Lpos int // Position in the input line this token appears
}
/*
PosString returns the position of this token in the origianl input as a string.
*/
func (t LexToken) PosString() string {
return fmt.Sprintf("Line %v, Pos %v", t.Lline, t.Lpos)
}
/*
String returns a string representation of a token.
*/
func (t LexToken) String() string {
switch {
case t.ID == TokenEOF:
return "EOF"
case t.ID == TokenError:
return fmt.Sprintf("Error: %s (%s)", t.Val, t.PosString())
case t.ID > TOKENodeSYMBOLS && t.ID < TOKENodeKEYWORDS:
return fmt.Sprintf("%s", strings.ToUpper(t.Val))
case t.ID > TOKENodeKEYWORDS:
return fmt.Sprintf("<%s>", strings.ToUpper(t.Val))
case len(t.Val) > 10:
// Special case for very long values
return fmt.Sprintf("%.10q...", t.Val)
}
return fmt.Sprintf("%q", t.Val)
}
/*
Map of keywords - these require spaces between them
*/
var keywordMap = map[string]LexTokenID{
"get": TokenGET,
"lookup": TokenLOOKUP,
"from": TokenFROM,
"group": TokenGROUP,
"with": TokenWITH,
"filtering": TokenFILTERING,
"ordering": TokenORDERING,
"nulltraversal": TokenNULLTRAVERSAL,
"where": TokenWHERE,
"traverse": TokenTRAVERSE,
"end": TokenEND,
"primary": TokenPRIMARY,
"show": TokenSHOW,
"as": TokenAS,
"format": TokenFORMAT,
"and": TokenAND,
"or": TokenOR,
"like": TokenLIKE,
"in": TokenIN,
"contains": TokenCONTAINS,
"beginswith": TokenBEGINSWITH,
"endswith": TokenENDSWITH,
"containsnot": TokenCONTAINSNOT,
"not": TokenNOT,
"notin": TokenNOTIN,
"false": TokenFALSE,
"true": TokenTRUE,
"unique": TokenUNIQUE,
"uniquecount": TokenUNIQUECOUNT,
"null": TokenNULL,
"isnotnull": TokenISNOTNULL,
"ascending": TokenASCENDING,
"descending": TokenDESCENDING,
}
/*
Special symbols which will always be unique - these will separate unquoted strings
*/
var symbolMap = map[string]LexTokenID{
"@": TokenAT,
">=": TokenGEQ,
"<=": TokenLEQ,
"!=": TokenNEQ,
"=": TokenEQ,
">": TokenGT,
"<": TokenLT,
"(": TokenLPAREN,
")": TokenRPAREN,
"[": TokenLBRACK,
"]": TokenRBRACK,
",": TokenCOMMA,
"+": TokenPLUS,
"-": TokenMINUS,
"*": TokenTIMES,
"/": TokenDIV,
"//": TokenDIVINT,
"%": TokenMODINT,
}
// Lexer
// =====
/*
RuneEOF is a special rune which represents the end of the input
*/
const RuneEOF = -1
/*
Function which represents the current state of the lexer and returns the next state
*/
type lexFunc func(*lexer) lexFunc
/*
Lexer data structure
*/
type lexer struct {
name string // Name to identify the input
input string // Input string of the lexer
pos int // Current rune pointer
line int // Current line pointer
lastnl int // Last newline position
width int // Width of last rune
start int // Start position of the current read token
scope LexTokenID // Current scope
tokens chan LexToken // Channel for lexer output
}
/*
FirstWord returns the first word of a given input.
*/
func FirstWord(input string) string {
var word string
l := &lexer{"", input, 0, 0, 0, 0, 0, -1, nil}
if skipWhiteSpace(l) {
l.startNew()
lexTextBlock(l, false)
word = input[l.start:l.pos]
}
return word
}
/*
Lex lexes a given input. Returns a channel which contains tokens.
*/
func Lex(name string, input string) chan LexToken {
l := &lexer{name, input, 0, 0, 0, 0, 0, -1, make(chan LexToken)}
go l.run()
return l.tokens
}
/*
LexToList lexes a given input. Returns a list of tokens.
*/
func LexToList(name string, input string) []LexToken {
var tokens []LexToken
for t := range Lex(name, input) {
tokens = append(tokens, t)
}
return tokens
}
/*
Main look of the lexer.
*/
func (l *lexer) run() {
if skipWhiteSpace(l) {
for state := lexToken; state != nil; {
state = state(l)
if !skipWhiteSpace(l) {
break
}
}
}
close(l.tokens)
}
/*
next returns the next rune in the input and advances the current rune pointer
if the peek flag is not set. If the peek flag is set then the rune pointer
is not advanced.
*/
func (l *lexer) next(peek bool) rune {
// Check if we reached the end
if int(l.pos) >= len(l.input) {
return RuneEOF
}
// Decode the next rune
r, w := utf8.DecodeRuneInString(l.input[l.pos:])
if !peek {
l.width = w
l.pos += l.width
}
return r
}
/*
backup sets the pointer one rune back. Can only be called once per next call.
*/
func (l *lexer) backup() {
if l.width == -1 {
panic("Can only backup once per next call")
}
l.pos -= l.width
l.width = -1
}
/*
startNew starts a new token.
*/
func (l *lexer) startNew() {
l.start = l.pos
}
/*
emitToken passes a token back to the client.
*/
func (l *lexer) emitToken(t LexTokenID) {
if t == TokenEOF {
l.emitTokenAndValue(t, "")
return
}
if l.tokens != nil {
l.tokens <- LexToken{t, l.start, l.input[l.start:l.pos],
l.line + 1, l.start - l.lastnl + 1}
}
}
/*
emitTokenAndValue passes a token with a given value back to the client.
*/
func (l *lexer) emitTokenAndValue(t LexTokenID, val string) {
if l.tokens != nil {
l.tokens <- LexToken{t, l.start, val, l.line + 1, l.start - l.lastnl + 1}
}
}
/*
emitError passes an error token back to the client.
*/
func (l *lexer) emitError(msg string) {
if l.tokens != nil {
l.tokens <- LexToken{TokenError, l.start, msg, l.line + 1, l.start - l.lastnl + 1}
}
}
// State functions
// ===============
/*
lexToken is the main entry function for the lexer.
*/
func lexToken(l *lexer) lexFunc {
// Check if we got a quoted value or a comment
n1 := l.next(false)
n2 := l.next(true)
l.backup()
if n1 == '#' {
return skipRestOfLine
}
if (n1 == '"' || n1 == '\'') || (n1 == 'r' && (n2 == '"' || n2 == '\'')) {
return lexValue
}
// Lex a block of text and emit any found tokens
l.startNew()
lexTextBlock(l, true)
// Try to lookup the keyword or an unquoted value
keywordCandidate := strings.ToLower(l.input[l.start:l.pos])
token, ok := keywordMap[keywordCandidate]
if !ok {
token, ok = symbolMap[keywordCandidate]
}
if ok {
// Special start token was found
l.emitToken(token)
switch token {
case TokenGET:
l.scope = token
return lexNodeKind
case TokenLOOKUP:
l.scope = token
return lexNodeKind
}
} else {
// An unknown token was found - it must be an unquoted value
// emit and continue
l.emitToken(TokenVALUE)
}
return lexToken
}
/*
skipRestOfLine skips all characters until the next newline character.
*/
func skipRestOfLine(l *lexer) lexFunc {
r := l.next(false)
for r != '\n' && r != RuneEOF {
r = l.next(false)
}
if r == RuneEOF {
return nil
}
return lexToken
}
/*
lexNodeKind lexes a node kind string.
*/
func lexNodeKind(l *lexer) lexFunc {
l.startNew()
lexTextBlock(l, false)
nodeKindCandidate := strings.ToLower(l.input[l.start:l.pos])
if !stringutil.IsAlphaNumeric(nodeKindCandidate) {
l.emitError("Invalid node kind " + fmt.Sprintf("'%v'", nodeKindCandidate) +
" - can only contain [a-zA-Z0-9_]")
return nil
}
l.emitToken(TokenNODEKIND)
if l.scope == TokenGET {
return lexToken
}
// In a lookup scope more values are following
return lexValue
}
/*
lexValue lexes a value which can describe names, values, regexes, etc ...
Values can be declared in different ways:
' ... ' or " ... "
Characters are parsed between quotes (escape sequences are interpreted)
r' ... ' or r" ... "
Characters are parsed plain between quote
*/
func lexValue(l *lexer) lexFunc {
var endToken rune
l.startNew()
allowEscapes := false
r := l.next(false)
// Check if we have a raw quoted string
if q := l.next(true); r == 'r' && (q == '"' || q == '\'') {
endToken = q
l.next(false)
} else if r == '"' || r == '\'' {
allowEscapes = true
endToken = r
} else {
l.emitError("Value expected")
return nil
}
r = l.next(false)
rprev := ' '
lLine := l.line
lLastnl := l.lastnl
for (!allowEscapes && r != endToken) ||
(allowEscapes && (r != endToken || rprev == '\\')) {
if r == '\n' {
lLine++
lLastnl = l.pos
}
rprev = r
r = l.next(false)
if r == RuneEOF {
l.emitError("Unexpected end while reading value")
return nil
}
}
if allowEscapes {
val := l.input[l.start+1 : l.pos-1]
// Interpret escape sequences right away
if endToken == '\'' {
// Escape double quotes in a single quoted string
val = strings.Replace(val, "\"", "\\\"", -1)
}
s, err := strconv.Unquote("\"" + val + "\"")
if err != nil {
l.emitError(err.Error() + " while parsing escape sequences")
return nil
}
l.emitTokenAndValue(TokenVALUE, s)
} else {
l.emitTokenAndValue(TokenVALUE, l.input[l.start+2:l.pos-1])
}
// Set newline
l.line = lLine
l.lastnl = lLastnl
return lexToken
}
// Helper functions
// ================
/*
skipWhiteSpace skips any number of whitespace characters. Returns false if the parser
reaches EOF while skipping whitespaces.
*/
func skipWhiteSpace(l *lexer) bool {
r := l.next(false)
for unicode.IsSpace(r) || unicode.IsControl(r) || r == RuneEOF {
if r == '\n' {
l.line++
l.lastnl = l.pos
}
r = l.next(false)
if r == RuneEOF {
l.emitToken(TokenEOF)
return false
}
}
l.backup()
return true
}
/*
lexTextBlock lexes a block of text without whitespaces. Interprets
optionally all one or two letter tokens.
*/
func lexTextBlock(l *lexer, interpretToken bool) {
r := l.next(false)
if interpretToken {
// Check if we start with a known symbol
nr := l.next(true)
if _, ok := symbolMap[strings.ToLower(string(r)+string(nr))]; ok {
l.next(false)
return
}
if _, ok := symbolMap[strings.ToLower(string(r))]; ok {
return
}
}
for !unicode.IsSpace(r) && !unicode.IsControl(r) && r != RuneEOF {
if interpretToken {
// Check if we find a token in the block
if _, ok := symbolMap[strings.ToLower(string(r))]; ok {
l.backup()
return
}
nr := l.next(true)
if _, ok := symbolMap[strings.ToLower(string(r)+string(nr))]; ok {
l.backup()
return
}
}
r = l.next(false)
}
if r != RuneEOF {
l.backup()
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package parser
import (
"bytes"
"fmt"
"devt.de/krotik/common/stringutil"
)
// AST Nodes
// =========
/*
ASTNode models a node in the AST
*/
type ASTNode struct {
Name string // Name of the node
Token *LexToken // Lexer token of this ASTNode
Children []*ASTNode // Child nodes
Runtime Runtime // Runtime component for this ASTNode
binding int // Binding power of this node
nullDenotation func(p *parser, self *ASTNode) (*ASTNode, error) // Configure token as beginning node
leftDenotation func(p *parser, self *ASTNode, left *ASTNode) (*ASTNode, error) // Configure token as left node
}
/*
ASTFromPlain creates an AST from a plain AST.
A plain AST is a nested map structure like this:
{
name : <name of node>
value : <value of node>
children : [ <child nodes> ]
}
*/
func ASTFromPlain(plainAST map[string]interface{}) (*ASTNode, error) {
var astChildren []*ASTNode
name, ok := plainAST["name"]
if !ok {
return nil, fmt.Errorf("Found plain ast node without a name: %v", plainAST)
}
value, ok := plainAST["value"]
if !ok {
return nil, fmt.Errorf("Found plain ast node without a value: %v", plainAST)
}
// Create children
if children, ok := plainAST["children"]; ok {
if ic, ok := children.([]interface{}); ok {
// Do a list conversion if necessary - this is necessary when we parse
// JSON with map[string]interface{} this
childrenList := make([]map[string]interface{}, len(ic))
for i := range ic {
childrenList[i] = ic[i].(map[string]interface{})
}
children = childrenList
}
for _, child := range children.([]map[string]interface{}) {
astChild, err := ASTFromPlain(child)
if err != nil {
return nil, err
}
astChildren = append(astChildren, astChild)
}
}
return &ASTNode{fmt.Sprint(name), &LexToken{TokenGeneral, 0,
fmt.Sprint(value), 0, 0}, astChildren, nil, 0, nil, nil}, nil
}
/*
Create a new instance of this ASTNode which is connected to a concrete lexer token.
*/
func (n *ASTNode) instance(p *parser, t *LexToken) *ASTNode {
ret := &ASTNode{n.Name, t, make([]*ASTNode, 0, 2), nil, n.binding, n.nullDenotation, n.leftDenotation}
if p.rp != nil {
ret.Runtime = p.rp.Runtime(ret)
}
return ret
}
/*
Plain returns this ASTNode and all its children as plain AST. A plain AST
only contains map objects, lists and primitive types which can be serialized
with JSON.
*/
func (n *ASTNode) Plain() map[string]interface{} {
ret := make(map[string]interface{})
ret["name"] = n.Name
lenChildren := len(n.Children)
if lenChildren > 0 {
children := make([]map[string]interface{}, lenChildren)
for i, child := range n.Children {
children[i] = child.Plain()
}
ret["children"] = children
}
// The value is what the lexer found in the source
ret["value"] = n.Token.Val
return ret
}
/*
String returns a string representation of this token.
*/
func (n *ASTNode) String() string {
var buf bytes.Buffer
n.levelString(0, &buf)
return buf.String()
}
/*
levelString function to recursively print the tree.
*/
func (n *ASTNode) levelString(indent int, buf *bytes.Buffer) {
// Print current level
buf.WriteString(stringutil.GenerateRollingString(" ", indent*2))
if n.Name == NodeVALUE || (n.Name == NodeSHOWTERM && n.Token.Val != "@") {
buf.WriteString(fmt.Sprintf(n.Name+": %v", n.Token))
} else {
buf.WriteString(n.Name)
}
buf.WriteString("\n")
// Print children
for _, child := range n.Children {
child.levelString(indent+1, buf)
}
}
/*
Map of AST nodes corresponding to lexer tokens
*/
var astNodeMap map[LexTokenID]*ASTNode
/*
TokenSHOWTERM is an extra token which is generated by the parser
to group show terms
*/
const TokenSHOWTERM = LexTokenID(-1)
func init() {
astNodeMap = map[LexTokenID]*ASTNode{
TokenEOF: {NodeEOF, nil, nil, nil, 0, ndTerm, nil},
TokenVALUE: {NodeVALUE, nil, nil, nil, 0, ndTerm, nil},
TokenNODEKIND: {NodeVALUE, nil, nil, nil, 0, ndTerm, nil},
TokenTRUE: {NodeTRUE, nil, nil, nil, 0, ndTerm, nil},
TokenFALSE: {NodeFALSE, nil, nil, nil, 0, ndTerm, nil},
TokenNULL: {NodeNULL, nil, nil, nil, 0, ndTerm, nil},
TokenAT: {NodeFUNC, nil, nil, nil, 0, ndFunc, nil},
TokenORDERING: {NodeORDERING, nil, nil, nil, 0, ndWithFunc, nil},
TokenFILTERING: {NodeFILTERING, nil, nil, nil, 0, ndWithFunc, nil},
TokenNULLTRAVERSAL: {NodeNULLTRAVERSAL, nil, nil, nil, 0, ndWithFunc, nil},
// Special tokens - always handled in a denotation function
TokenCOMMA: {NodeCOMMA, nil, nil, nil, 0, nil, nil},
TokenGROUP: {NodeGROUP, nil, nil, nil, 0, nil, nil},
TokenEND: {NodeEND, nil, nil, nil, 0, nil, nil},
TokenAS: {NodeAS, nil, nil, nil, 0, nil, nil},
TokenFORMAT: {NodeFORMAT, nil, nil, nil, 0, nil, nil},
// Keywords
TokenGET: {NodeGET, nil, nil, nil, 0, ndGet, nil},
TokenLOOKUP: {NodeLOOKUP, nil, nil, nil, 0, ndLookup, nil},
TokenFROM: {NodeFROM, nil, nil, nil, 0, ndFrom, nil},
TokenWHERE: {NodeWHERE, nil, nil, nil, 0, ndPrefix, nil},
TokenUNIQUE: {NodeUNIQUE, nil, nil, nil, 0, ndPrefix, nil},
TokenUNIQUECOUNT: {NodeUNIQUECOUNT, nil, nil, nil, 0, ndPrefix, nil},
TokenISNOTNULL: {NodeISNOTNULL, nil, nil, nil, 0, ndPrefix, nil},
TokenASCENDING: {NodeASCENDING, nil, nil, nil, 0, ndPrefix, nil},
TokenDESCENDING: {NodeDESCENDING, nil, nil, nil, 0, ndPrefix, nil},
TokenTRAVERSE: {NodeTRAVERSE, nil, nil, nil, 0, ndTraverse, nil},
TokenPRIMARY: {NodePRIMARY, nil, nil, nil, 0, ndPrefix, nil},
TokenSHOW: {NodeSHOW, nil, nil, nil, 0, ndShow, nil},
TokenSHOWTERM: {NodeSHOWTERM, nil, nil, nil, 0, ndShow, nil},
TokenWITH: {NodeWITH, nil, nil, nil, 0, ndWith, nil},
TokenLIST: {NodeLIST, nil, nil, nil, 0, nil, nil},
// Boolean operations
TokenNOT: {NodeNOT, nil, nil, nil, 20, ndPrefix, nil},
TokenOR: {NodeOR, nil, nil, nil, 30, nil, ldInfix},
TokenAND: {NodeAND, nil, nil, nil, 40, nil, ldInfix},
TokenGEQ: {NodeGEQ, nil, nil, nil, 60, nil, ldInfix},
TokenLEQ: {NodeLEQ, nil, nil, nil, 60, nil, ldInfix},
TokenNEQ: {NodeNEQ, nil, nil, nil, 60, nil, ldInfix},
TokenEQ: {NodeEQ, nil, nil, nil, 60, nil, ldInfix},
TokenGT: {NodeGT, nil, nil, nil, 60, nil, ldInfix},
TokenLT: {NodeLT, nil, nil, nil, 60, nil, ldInfix},
TokenLIKE: {NodeLIKE, nil, nil, nil, 60, nil, ldInfix},
TokenIN: {NodeIN, nil, nil, nil, 60, nil, ldInfix},
TokenCONTAINS: {NodeCONTAINS, nil, nil, nil, 60, nil, ldInfix},
TokenBEGINSWITH: {NodeBEGINSWITH, nil, nil, nil, 60, nil, ldInfix},
TokenENDSWITH: {NodeENDSWITH, nil, nil, nil, 60, nil, ldInfix},
TokenCONTAINSNOT: {NodeCONTAINSNOT, nil, nil, nil, 60, nil, ldInfix},
TokenNOTIN: {NodeNOTIN, nil, nil, nil, 60, nil, ldInfix},
// Simple arithmetic expressions
TokenPLUS: {NodePLUS, nil, nil, nil, 110, ndPrefix, ldInfix},
TokenMINUS: {NodeMINUS, nil, nil, nil, 110, ndPrefix, ldInfix},
TokenTIMES: {NodeTIMES, nil, nil, nil, 120, nil, ldInfix},
TokenDIV: {NodeDIV, nil, nil, nil, 120, nil, ldInfix},
TokenMODINT: {NodeMODINT, nil, nil, nil, 120, nil, ldInfix},
TokenDIVINT: {NodeDIVINT, nil, nil, nil, 120, nil, ldInfix},
// Brackets
TokenLPAREN: {NodeLPAREN, nil, nil, nil, 150, ndInner, nil},
TokenRPAREN: {NodeRPAREN, nil, nil, nil, 0, nil, nil},
TokenLBRACK: {NodeLBRACK, nil, nil, nil, 150, ndList, nil},
TokenRBRACK: {NodeRBRACK, nil, nil, nil, 0, nil, nil},
}
}
// Parser
// ======
/*
Parser data structure
*/
type parser struct {
name string // Name to identify the input
node *ASTNode // Current ast node
tokens chan LexToken // Channel which contains lex tokens
rp RuntimeProvider // Runtime provider which creates runtime components
}
/*
Parse parses a given input string and returns an AST.
*/
func Parse(name string, input string) (*ASTNode, error) {
return ParseWithRuntime(name, input, nil)
}
/*
ParseWithRuntime parses a given input string and returns an AST decorated with
runtime components.
*/
func ParseWithRuntime(name string, input string, rp RuntimeProvider) (*ASTNode, error) {
p := &parser{name, nil, Lex(name, input), rp}
node, err := p.next()
if err != nil {
return nil, err
}
p.node = node
return p.run(0)
}
/*
run models the main parser function.
*/
func (p *parser) run(rightBinding int) (*ASTNode, error) {
var err error
n := p.node
p.node, err = p.next()
if err != nil {
return nil, err
}
// Start with the null denotation of this statement / expression
if n.nullDenotation == nil {
return nil, p.newParserError(ErrImpossibleNullDenotation,
n.Token.String(), *n.Token)
}
left, err := n.nullDenotation(p, n)
if err != nil {
return nil, err
}
// Collect left denotations as long as the left binding power is greater
// than the initial right one
for rightBinding < p.node.binding {
var nleft *ASTNode
n = p.node
p.node, err = p.next()
if err != nil {
return nil, err
}
if n.leftDenotation == nil {
return nil, p.newParserError(ErrImpossibleLeftDenotation,
n.Token.String(), *n.Token)
}
// Get the next left denotation
nleft, err = n.leftDenotation(p, n, left)
left = nleft
if err != nil {
return nil, err
}
}
return left, nil
}
/*
next retrieves the next lexer token.
*/
func (p *parser) next() (*ASTNode, error) {
token, more := <-p.tokens
if !more {
// Unexpected end of input - the associated token is an empty error token
return nil, p.newParserError(ErrUnexpectedEnd, "", token)
} else if token.ID == TokenError {
// There was a lexer error wrap it in a parser error
return nil, p.newParserError(ErrLexicalError, token.Val, token)
} else if node, ok := astNodeMap[token.ID]; ok {
return node.instance(p, &token), nil
}
return nil, p.newParserError(ErrUnknownToken, fmt.Sprintf("id:%v (%v)", token.ID, token), token)
}
// Standard null denotation functions
// ==================================
/*
ndTerm is used for terminals.
*/
func ndTerm(p *parser, self *ASTNode) (*ASTNode, error) {
return self, nil
}
/*
ndInner returns the inner expression of an enclosed block and discard the
block token. This method is used for brackets.
*/
func ndInner(p *parser, self *ASTNode) (*ASTNode, error) {
// Get the inner expression
exp, err := p.run(0)
if err != nil {
return nil, err
}
// We return here the inner expression - discarding the bracket tokens
return exp, skipToken(p, TokenRPAREN)
}
/*
ndPrefix is used for prefix operators.
*/
func ndPrefix(p *parser, self *ASTNode) (*ASTNode, error) {
// Make sure a prefix will only prefix the next item
val, err := p.run(self.binding + 20)
if err != nil {
return nil, err
}
self.Children = append(self.Children, val)
return self, nil
}
// Null denotation functions for specific expressions
// ==================================================
/*
ndGet is used to parse lookup expressions.
*/
func ndGet(p *parser, self *ASTNode) (*ASTNode, error) {
// Must specify a node kind
if err := acceptChild(p, self, TokenNODEKIND); err != nil {
return nil, err
}
// Parse the rest and add it as children
for p.node.Token.ID != TokenEOF {
exp, err := p.run(0)
if err != nil {
return nil, err
}
self.Children = append(self.Children, exp)
}
return self, nil
}
/*
ndLookup is used to parse lookup expressions.
*/
func ndLookup(p *parser, self *ASTNode) (*ASTNode, error) {
// Must specify a node kind
if err := acceptChild(p, self, TokenNODEKIND); err != nil {
return nil, err
}
// Must have at least on node key
if err := acceptChild(p, self, TokenVALUE); err != nil {
return nil, err
}
// Read all commas and accept further values as additional node keys
for skipToken(p, TokenCOMMA) == nil {
if err := acceptChild(p, self, TokenVALUE); err != nil {
return nil, err
}
}
// Parse the rest and add it as children
for p.node.Token.ID != TokenEOF {
exp, err := p.run(0)
if err != nil {
return nil, err
}
self.Children = append(self.Children, exp)
}
return self, nil
}
/*
ndFrom is used to parse from group ... expressions.
*/
func ndFrom(p *parser, self *ASTNode) (*ASTNode, error) {
// Must be followed by a group keyword
if err := acceptChild(p, self, TokenGROUP); err != nil {
return nil, err
}
// Must have a group name
return self, acceptChild(p, self.Children[0], TokenVALUE)
}
/*
ndTraverse is used to parse traverse expressions.
*/
func ndTraverse(p *parser, self *ASTNode) (*ASTNode, error) {
// Must be followed by traversal spec
if err := acceptChild(p, self, TokenVALUE); err != nil {
return nil, err
}
// Parse the rest and add it as children - must end with "end" if
// further clauses are given
for p.node.Token.ID != TokenEOF && p.node.Token.ID != TokenEND {
exp, err := p.run(0)
if err != nil {
return nil, err
}
self.Children = append(self.Children, exp)
}
if p.node.Token.ID == TokenEND {
skipToken(p, TokenEND)
}
return self, nil
}
/*
ndFunc is used to parse functions.
*/
func ndFunc(p *parser, self *ASTNode) (*ASTNode, error) {
// Must specify a name
if err := acceptChild(p, self, TokenVALUE); err != nil {
return nil, err
}
// Must have an opening bracket
if err := skipToken(p, TokenLPAREN); err != nil {
return nil, err
}
// Read in the first attribute
if p.node.Token.ID == TokenVALUE {
// Next call cannot fail since we just checked for it. Value is optional.
acceptChild(p, self, TokenVALUE)
// Read all commas and accept further values as parameters until the end
for skipToken(p, TokenCOMMA) == nil {
if err := acceptChild(p, self, TokenVALUE); err != nil {
return nil, err
}
}
}
// Must have a closing bracket
return self, skipToken(p, TokenRPAREN)
}
/*
ndShow is used to parse a show clauses.
*/
func ndShow(p *parser, self *ASTNode) (*ASTNode, error) {
acceptShowTerm := func() error {
st := astNodeMap[TokenSHOWTERM].instance(p, p.node.Token)
if p.node.Token.ID == TokenAT {
// Parse a function
exp, err := p.run(0)
if err != nil {
return err
}
st.Children = append(st.Children, exp)
} else {
// Skip the value token from which we just created an AST node
skipToken(p, TokenVALUE)
}
// Parse an "as" definition if given
if p.node.Token.ID == TokenAS {
current := p.node
acceptChild(p, st, TokenAS)
if err := acceptChild(p, current, TokenVALUE); err != nil {
return err
}
}
// Parse a "format" definition if given
if p.node.Token.ID == TokenFORMAT {
current := p.node
acceptChild(p, st, TokenFORMAT)
if err := acceptChild(p, current, TokenVALUE); err != nil {
return err
}
}
self.Children = append(self.Children, st)
return nil
}
// Read in the first node attribute
if p.node.Token.ID == TokenVALUE || p.node.Token.ID == TokenAT {
if err := acceptShowTerm(); err != nil {
return nil, err
}
// Read further show entries
for skipToken(p, TokenCOMMA) == nil {
if err := acceptShowTerm(); err != nil {
return nil, err
}
}
}
return self, nil
}
/*
ndWith is used to parse a with clauses.
*/
func ndWith(p *parser, self *ASTNode) (*ASTNode, error) {
// Parse the rest and add it as children
for p.node.Token.ID != TokenEOF {
exp, err := p.run(0)
if err != nil {
return nil, err
}
self.Children = append(self.Children, exp)
if p.node.Token.ID == TokenCOMMA {
skipToken(p, TokenCOMMA)
}
}
return self, nil
}
/*
ndWithFunc is used to parse directives in with clauses.
*/
func ndWithFunc(p *parser, self *ASTNode) (*ASTNode, error) {
// Must have an opening bracket
if err := skipToken(p, TokenLPAREN); err != nil {
return nil, err
}
for p.node.Token.ID != TokenRPAREN {
// Parse all the expressions inside the directives
exp, err := p.run(0)
if err != nil {
return nil, err
}
self.Children = append(self.Children, exp)
if p.node.Token.ID == TokenCOMMA {
skipToken(p, TokenCOMMA)
}
}
// Must have a closing bracket
return self, skipToken(p, TokenRPAREN)
}
/*
ndList is used to collect elements of a list.
*/
func ndList(p *parser, self *ASTNode) (*ASTNode, error) {
// Create a list token
st := astNodeMap[TokenLIST].instance(p, self.Token)
// Get the inner expression
for p.node.Token.ID != TokenRBRACK {
// Parse all the expressions inside the directives
exp, err := p.run(0)
if err != nil {
return nil, err
}
st.Children = append(st.Children, exp)
if p.node.Token.ID == TokenCOMMA {
skipToken(p, TokenCOMMA)
}
}
// Must have a closing bracket
return st, skipToken(p, TokenRBRACK)
}
// Standard left denotation functions
// ==================================
/*
ldInfix is used for infix operators.
*/
func ldInfix(p *parser, self *ASTNode, left *ASTNode) (*ASTNode, error) {
right, err := p.run(self.binding)
if err != nil {
return nil, err
}
self.Children = append(self.Children, left)
self.Children = append(self.Children, right)
return self, nil
}
// Helper functions
// ================
/*
skipToken skips over a given token.
*/
func skipToken(p *parser, ids ...LexTokenID) error {
var err error
canSkip := func(id LexTokenID) bool {
for _, i := range ids {
if i == id {
return true
}
}
return false
}
if !canSkip(p.node.Token.ID) {
if p.node.Token.ID == TokenEOF {
return p.newParserError(ErrUnexpectedEnd, "", *p.node.Token)
}
return p.newParserError(ErrUnexpectedToken, p.node.Token.Val, *p.node.Token)
}
// This should never return an error unless we skip over EOF or complex tokens
// like values
p.node, err = p.next()
return err
}
/*
acceptChild accepts the current token as a child.
*/
func acceptChild(p *parser, self *ASTNode, id LexTokenID) error {
var err error
current := p.node
p.node, err = p.next()
if err != nil {
return err
}
if current.Token.ID == id {
self.Children = append(self.Children, current)
return nil
}
return p.newParserError(ErrUnexpectedToken, current.Token.Val, *current.Token)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package parser
import (
"errors"
"fmt"
)
/*
newParserError creates a new ParserError object.
*/
func (p *parser) newParserError(t error, d string, token LexToken) error {
return &Error{p.name, t, d, token.Lline, token.Lpos}
}
/*
Error models a parser related error
*/
type Error struct {
Source string // Name of the source which was given to the parser
Type error // Error type (to be used for equal checks)
Detail string // Details of this error
Line int // Line of the error
Pos int // Position of the error
}
/*
Error returns a human-readable string representation of this error.
*/
func (pe *Error) Error() string {
var ret string
if pe.Detail != "" {
ret = fmt.Sprintf("Parse error in %s: %v (%v)", pe.Source, pe.Type, pe.Detail)
} else {
ret = fmt.Sprintf("Parse error in %s: %v", pe.Source, pe.Type)
}
if pe.Line != 0 {
return fmt.Sprintf("%s (Line:%d Pos:%d)", ret, pe.Line, pe.Pos)
}
return ret
}
/*
Parser related error types
*/
var (
ErrUnexpectedEnd = errors.New("Unexpected end")
ErrLexicalError = errors.New("Lexical error")
ErrUnknownToken = errors.New("Unknown term")
ErrImpossibleNullDenotation = errors.New("Term cannot start an expression")
ErrImpossibleLeftDenotation = errors.New("Term can only start an expression")
ErrUnexpectedToken = errors.New("Unexpected term")
)
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package parser
import (
"bytes"
"fmt"
"regexp"
"strings"
"text/template"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/stringutil"
)
/*
Map of pretty printer templates for AST nodes
There is special treatment for NodeVALUE, NodeGET, NodeLOOKUP, NodeTRAVERSE,
NodeFUNC, NodeSHOW, NodeSHOWTERM, NodeORDERING, NodeFILTERING, NodeWITH,
NodeLPAREN, NodeRPAREN, NodeLBRACK and NodeRBRACK.
*/
var prettyPrinterMap = map[string]*template.Template{
NodeTRUE: template.Must(template.New(NodeTRUE).Parse("true")),
NodeFALSE: template.Must(template.New(NodeFALSE).Parse("false")),
NodeNULL: template.Must(template.New(NodeNULL).Parse("null")),
NodeNULLTRAVERSAL + "_1": template.Must(template.New(NodeNULLTRAVERSAL).Parse("nulltraversal({{.c1}})")),
// Special tokens - always handled in a denotation function
NodeGROUP + "_1": template.Must(template.New(NodeGROUP).Parse("group {{.c1}}")),
NodeEND: template.Must(template.New(NodeEND).Parse("end")),
NodeAS + "_1": template.Must(template.New(NodeAS).Parse("as {{.c1}}")),
NodeFORMAT + "_1": template.Must(template.New(NodeFORMAT).Parse("format {{.c1}}")),
// Keywords
NodeFROM + "_1": template.Must(template.New(NodeFROM).Parse("from {{.c1}}")),
NodeWHERE + "_1": template.Must(template.New(NodeWHERE).Parse("where {{.c1}}")),
NodeUNIQUE + "_1": template.Must(template.New(NodeUNIQUE).Parse("unique {{.c1}}")),
NodeUNIQUECOUNT + "_1": template.Must(template.New(NodeUNIQUECOUNT).Parse("uniquecount {{.c1}}")),
NodeISNOTNULL + "_1": template.Must(template.New(NodeISNOTNULL).Parse("isnotnull {{.c1}}")),
NodeASCENDING + "_1": template.Must(template.New(NodeASCENDING).Parse("ascending {{.c1}}")),
NodeDESCENDING + "_1": template.Must(template.New(NodeDESCENDING).Parse("descending {{.c1}}")),
NodePRIMARY + "_1": template.Must(template.New(NodePRIMARY).Parse("primary {{.c1}}")),
NodeLIST: template.Must(template.New(NodeLIST).Parse("list")),
// Boolean operations
NodeNOT + "_1": template.Must(template.New(NodeNOT).Parse("not {{.c1}}")),
NodeGEQ + "_2": template.Must(template.New(NodeGEQ).Parse("{{.c1}} >= {{.c2}}")),
NodeLEQ + "_2": template.Must(template.New(NodeLEQ).Parse("{{.c1}} <= {{.c2}}")),
NodeNEQ + "_2": template.Must(template.New(NodeNEQ).Parse("{{.c1}} != {{.c2}}")),
NodeEQ + "_2": template.Must(template.New(NodeEQ).Parse("{{.c1}} = {{.c2}}")),
NodeGT + "_2": template.Must(template.New(NodeGT).Parse("{{.c1}} > {{.c2}}")),
NodeLT + "_2": template.Must(template.New(NodeLT).Parse("{{.c1}} < {{.c2}}")),
// List operations
NodeIN + "_2": template.Must(template.New(NodeIN).Parse("{{.c1}} in {{.c2}}")),
NodeNOTIN + "_2": template.Must(template.New(NodeNOTIN).Parse("{{.c1}} notin {{.c2}}")),
// String operations
NodeLIKE + "_2": template.Must(template.New(NodeLIKE).Parse("{{.c1}} like {{.c2}}")),
NodeCONTAINS + "_2": template.Must(template.New(NodeCONTAINS).Parse("{{.c1}} contains {{.c2}}")),
NodeBEGINSWITH + "_2": template.Must(template.New(NodeBEGINSWITH).Parse("{{.c1}} beginswith {{.c2}}")),
NodeENDSWITH + "_2": template.Must(template.New(NodeENDSWITH).Parse("{{.c1}} endswith {{.c2}}")),
NodeCONTAINSNOT + "_2": template.Must(template.New(NodeCONTAINSNOT).Parse("{{.c1}} containsnot {{.c2}}")),
// Simple arithmetic expressions
NodePLUS + "_2": template.Must(template.New(NodePLUS).Parse("{{.c1}} + {{.c2}}")),
NodeMINUS + "_1": template.Must(template.New(NodeMINUS).Parse("-{{.c1}}")),
NodeMINUS + "_2": template.Must(template.New(NodeMINUS).Parse("{{.c1}} - {{.c2}}")),
NodeTIMES + "_2": template.Must(template.New(NodeTIMES).Parse("{{.c1}} * {{.c2}}")),
NodeDIV + "_2": template.Must(template.New(NodeDIV).Parse("{{.c1}} / {{.c2}}")),
NodeMODINT + "_2": template.Must(template.New(NodeMODINT).Parse("{{.c1}} % {{.c2}}")),
NodeDIVINT + "_2": template.Must(template.New(NodeDIVINT).Parse("{{.c1}} // {{.c2}}")),
}
/*
Map of nodes where the precedence might have changed because of parentheses
*/
var bracketPrecedenceMap = map[string]bool{
NodePLUS: true,
NodeMINUS: true,
NodeAND: true,
NodeOR: true,
}
/*
PrettyPrint produces a pretty printed EQL query from a given AST.
*/
func PrettyPrint(ast *ASTNode) (string, error) {
var visit func(ast *ASTNode, level int) (string, error)
quoteValue := func(val string, allowNonQuotation bool) string {
if val == "" {
return `""`
}
isNumber, _ := regexp.MatchString("^[0-9][0-9\\.e-+]*$", val)
isInlineString, _ := regexp.MatchString("^[a-zA-Z0-9_:.]*$", val)
if allowNonQuotation && (isNumber || isInlineString) {
return val
} else if strings.ContainsRune(val, '"') {
if strings.ContainsRune(val, '\'') {
val = strings.Replace(val, "\"", "\\\"", -1)
} else {
return fmt.Sprintf("'%v'", val)
}
}
return fmt.Sprintf("\"%v\"", val)
}
visit = func(ast *ASTNode, level int) (string, error) {
// Handle special cases which don't have children
if ast.Name == NodeVALUE || (ast.Name == NodeSHOWTERM && len(ast.Children) == 0) {
return quoteValue(ast.Token.Val, true), nil
}
var children map[string]string
var tempKey = ast.Name
var buf bytes.Buffer
// First pretty print children
if len(ast.Children) > 0 {
children = make(map[string]string)
for i, child := range ast.Children {
res, err := visit(child, level+1)
if err != nil {
return "", err
}
if _, ok := bracketPrecedenceMap[child.Name]; ok && ast.binding > child.binding {
res = fmt.Sprintf("(%v)", res)
}
children[fmt.Sprint("c", i+1)] = res
}
tempKey += fmt.Sprint("_", len(children))
}
// Handle special cases requiring children
if ast.Name == NodeLIST {
buf.WriteString("[")
if children != nil {
i := 1
for ; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i)])
buf.WriteString(", ")
}
buf.WriteString(children[fmt.Sprint("c", i)])
}
buf.WriteString("]")
return buf.String(), nil
} else if ast.Name == NodeLOOKUP {
buf.WriteString("lookup ")
buf.WriteString(children["c1"])
if 1 < len(children) {
buf.WriteString(" ")
}
i := 1
for ; i < len(children) && ast.Children[i].Name == NodeVALUE; i++ {
buf.WriteString(quoteValue(ast.Children[i].Token.Val, false))
if i < len(children)-1 && ast.Children[i+1].Name == NodeVALUE {
buf.WriteString(", ")
}
}
if i < len(children) {
buf.WriteString(" ")
}
for ; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 && ast.Children[i+1].Name != NodeSHOW {
buf.WriteString(" ")
}
}
return buf.String(), nil
} else if ast.Name == NodeGET {
buf.WriteString("get ")
buf.WriteString(children["c1"])
if 1 < len(children) {
buf.WriteString(" ")
}
for i := 1; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 && ast.Children[i+1].Name != NodeSHOW {
buf.WriteString(" ")
}
}
return buf.String(), nil
} else if ast.Name == NodeTRAVERSE {
buf.WriteString("\n")
buf.WriteString(stringutil.GenerateRollingString(" ", level*2))
buf.WriteString("traverse ")
for i := 0; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 {
buf.WriteString(" ")
}
}
buf.WriteString("\n")
buf.WriteString(stringutil.GenerateRollingString(" ", level*2))
buf.WriteString("end")
return buf.String(), nil
} else if ast.Name == NodeFUNC {
buf.WriteString("@")
buf.WriteString(children["c1"])
buf.WriteString("(")
for i := 1; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 {
buf.WriteString(", ")
}
}
buf.WriteString(")")
return buf.String(), nil
} else if ast.Name == NodeSHOW {
buf.WriteString("\nshow\n ")
for i := 0; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 {
buf.WriteString(",\n ")
}
}
return buf.String(), nil
} else if ast.Name == NodeSHOWTERM {
if ast.Token.Val != "" && ast.Token.Val != "@" {
buf.WriteString(quoteValue(ast.Token.Val, true))
buf.WriteString(" ")
}
for i := 0; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 {
buf.WriteString(" ")
}
}
return buf.String(), nil
} else if ast.Name == NodeORDERING {
buf.WriteString("ordering")
buf.WriteString("(")
for i := 0; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 {
buf.WriteString(", ")
}
}
buf.WriteString(")")
return buf.String(), nil
} else if ast.Name == NodeFILTERING {
buf.WriteString("filtering")
buf.WriteString("(")
for i := 0; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 {
buf.WriteString(", ")
}
}
buf.WriteString(")")
return buf.String(), nil
} else if ast.Name == NodeWITH {
buf.WriteString("\nwith\n")
for i := 0; i < len(children); i++ {
buf.WriteString(" ")
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 {
buf.WriteString(",\n")
}
}
return buf.String(), nil
} else if ast.Name == NodeAND || ast.Name == NodeOR {
for i := 0; i < len(children); i++ {
buf.WriteString(children[fmt.Sprint("c", i+1)])
if i < len(children)-1 {
buf.WriteString(" ")
buf.WriteString(strings.ToLower(ast.Token.Val))
buf.WriteString(" ")
}
}
return buf.String(), nil
}
// Retrieve the template
temp, ok := prettyPrinterMap[tempKey]
if !ok {
return "", fmt.Errorf("Could not find template for %v (tempkey: %v)",
ast.Name, tempKey)
}
// Use the children as parameters for template
errorutil.AssertOk(temp.Execute(&buf, children))
return buf.String(), nil
}
return visit(ast, 0)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package eql contains the main API for EQL.
Example EQL query:
GET Person where name = "Marvin"
*/
package eql
import (
"strings"
"devt.de/krotik/eliasdb/eql/interpreter"
"devt.de/krotik/eliasdb/eql/parser"
"devt.de/krotik/eliasdb/graph"
)
/*
GroupNodeKind is a special node kind representing groups
*/
const GroupNodeKind = interpreter.GroupNodeKind
/*
RunQuery runs a search query against a given graph database.
*/
func RunQuery(name string, part string, query string, gm *graph.Manager) (SearchResult, error) {
return RunQueryWithNodeInfo(name, part, query, gm, interpreter.NewDefaultNodeInfo(gm))
}
/*
RunQueryWithNodeInfo runs a search query against a given graph database. Using
a given NodeInfo object to retrieve rendering information.
*/
func RunQueryWithNodeInfo(name string, part string, query string, gm *graph.Manager, ni interpreter.NodeInfo) (SearchResult, error) {
var rtp parser.RuntimeProvider
word := strings.ToLower(parser.FirstWord(query))
if word == "get" {
rtp = interpreter.NewGetRuntimeProvider(name, part, gm, ni)
} else if word == "lookup" {
rtp = interpreter.NewLookupRuntimeProvider(name, part, gm, ni)
} else {
return nil, &interpreter.RuntimeError{
Source: name,
Type: interpreter.ErrInvalidConstruct,
Detail: "Unknown query type: " + word,
Node: nil,
Line: 1,
Pos: 1,
}
}
ast, err := parser.ParseWithRuntime(name, query, rtp)
if err != nil {
return nil, err
}
res, err := ast.Runtime.Eval()
if err != nil {
return nil, err
}
return &queryResult{res.(*interpreter.SearchResult)}, nil
}
/*
ParseQuery parses a search query and return its Abstract Syntax Tree.
*/
func ParseQuery(name string, query string) (*parser.ASTNode, error) {
ast, err := parser.Parse(name, query)
if err != nil {
return nil, err
}
return ast, nil
}
/*
queryResult datastructure to hide implementation details.
*/
type queryResult struct {
*interpreter.SearchResult
}
/*
Header returns a data structure describing the result header.
*/
func (qr *queryResult) Header() SearchResultHeader {
return qr.SearchResult.Header()
}
package eql
import (
"fmt"
"sort"
"strings"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/eql/parser"
)
/*
RefineQueryByResultRows tries to construct a query which will only
show certain rows of a given query result. Might fail if no primary
node per row can be identified or if the "primary" keyword is used.
*/
func RefineQueryByResultRows(res SearchResult, rows []int) (string, error) {
var col = -1
var ret = ""
// Search for column which gives the root node key and kind
err := fmt.Errorf("Could not determine root nodes")
rowCount := res.RowCount()
if rowCount > 0 {
for i, d := range res.Header().Data() {
if ds := strings.Split(d, ":"); ds[0] == "1" && ds[1] == "n" {
col = i
err = nil
}
}
}
if err == nil {
var ast *parser.ASTNode
// Get the AST
if ast, err = ParseQuery("Refine query", res.Query()); err == nil {
var qtail []*parser.ASTNode
// Get the children of the AST which do not produce the root nodes
for i, c := range ast.Children {
if c.Name != parser.NodeVALUE {
qtail = ast.Children[i:]
break
}
}
// Now collect the keys which should be the new root nodes
keys := make([]string, 0, len(rows))
kind := ""
for _, r := range rows {
if r < rowCount {
src := strings.Split(res.RowSource(r)[col], ":")
keys = append(keys, src[2])
if kind == "" {
kind = src[1]
}
}
}
sort.Strings(keys)
err = fmt.Errorf("Could not find requested row%v", stringutil.Plural(len(rows)))
if len(keys) > 0 {
// Assemble the query
ast, _ = ParseQuery("", fmt.Sprintf("lookup %v '%v'", kind, strings.Join(keys, "', '")))
ast.Children = append(ast.Children, qtail...)
ret, err = parser.PrettyPrint(ast)
}
}
}
return ret, err
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package data contains classes and functions to handle graph data.
Nodes
Nodes are items stored in the graph. The graphNode object is the minimal
implementation of the Node interface and represents a simple node. Setting a
nil value to an attribute is equivalent to removing the attribute. An attribute
value can be any object which can be serialized by gob.
Edges
Edges are items stored in the graph. Edges connect nodes. The graphEdge object
is the minimal implementation of the Edge interface and represents a simple edge.
Setting a nil value to an attribute is equivalent to removing the attribute. An
attribute value can be any object which can be serialized by gob.
*/
package data
import "fmt"
/*
Edge models edges in the graph
*/
type Edge interface {
Node
/*
End1Key returns the key of the first end of this edge.
*/
End1Key() string
/*
End1Kind returns the kind of the first end of this edge.
*/
End1Kind() string
/*
End1Role returns the role of the first end of this edge.
*/
End1Role() string
/*
End1IsCascading is a flag to indicate that delete operations from this
end are cascaded to the other end.
*/
End1IsCascading() bool
/*
End1IsCascadingLast is a flag to indicate that cascading delete
operations are only executed if this is the last/only edge of
this kind to the other end. The flag is ignored if End1IsCascading is
false.
*/
End1IsCascadingLast() bool
/*
End2Key returns the key of the second end of this edge.
*/
End2Key() string
/*
End2Kind returns the kind of the second end of this edge.
*/
End2Kind() string
/*
End2Role returns the role of the second end of this edge.
*/
End2Role() string
/*
End2IsCascading is a flag to indicate that delete operations from this
end are cascaded to the other end.
*/
End2IsCascading() bool
/*
End2IsCascadingLast is a flag to indicate that cascading delete
operations are only executed if this is the last/only edge of
this kind to the other end. The flag is ignored if End2IsCascading is
false.
*/
End2IsCascadingLast() bool
/*
Spec returns the spec for this edge from the view of a specified endpoint.
A spec is always of the form: <End Role>:<Kind>:<End Role>:<Other node kind>
*/
Spec(key string) string
/*
OtherEndKey returns the key of the endpoint which is on the other side
from the given key.
*/
OtherEndKey(key string) string
/*
OtherEndKind returns the kind of the endpoint which is on the other side
from the given key.
*/
OtherEndKind(key string) string
}
/*
EdgeEnd1Key is the key of the first end
*/
const EdgeEnd1Key = "end1key"
/*
EdgeEnd1Kind is the kind of the first end
*/
const EdgeEnd1Kind = "end1kind"
/*
EdgeEnd1Role is the role of the first end
*/
const EdgeEnd1Role = "end1role"
/*
EdgeEnd1Cascading is the flag to cascade delete operations from the first end
*/
const EdgeEnd1Cascading = "end1cascading"
/*
EdgeEnd1CascadingLast is a flag to indicate that cascading delete
operations are only executed on the last/only edge of
a kind
*/
const EdgeEnd1CascadingLast = "end1cascadinglast"
/*
EdgeEnd2Key is the key of the second end
*/
const EdgeEnd2Key = "end2key"
/*
EdgeEnd2Kind is the kind of the second end
*/
const EdgeEnd2Kind = "end2kind"
/*
EdgeEnd2Role is the role of the second end
*/
const EdgeEnd2Role = "end2role"
/*
EdgeEnd2Cascading is the flag to cascade delete operations from the second end
*/
const EdgeEnd2Cascading = "end2cascading"
/*
EdgeEnd2CascadingLast is a flag to indicate that cascading delete
operations are only executed on the last/only edge of
a kind
*/
const EdgeEnd2CascadingLast = "end2cascadinglast"
/*
graphEdge data structure.
*/
type graphEdge struct {
*graphNode
}
/*
NewGraphEdge creates a new Edge instance.
*/
func NewGraphEdge() Edge {
return &graphEdge{&graphNode{make(map[string]interface{})}}
}
/*
NewGraphEdgeFromNode creates a new Edge instance.
*/
func NewGraphEdgeFromNode(node Node) Edge {
if node == nil {
return nil
}
return &graphEdge{&graphNode{node.Data()}}
}
/*
End1Key returns the key of the first end of this edge.
*/
func (ge *graphEdge) End1Key() string {
return ge.stringAttr(EdgeEnd1Key)
}
/*
End1Kind returns the kind of the first end of this edge.
*/
func (ge *graphEdge) End1Kind() string {
return ge.stringAttr(EdgeEnd1Kind)
}
/*
End1Role returns the role of the first end of this edge.
*/
func (ge *graphEdge) End1Role() string {
return ge.stringAttr(EdgeEnd1Role)
}
/*
End1IsCascading is a flag to indicate that delete operations from this
end are cascaded to the other end.
*/
func (ge *graphEdge) End1IsCascading() bool {
return ge.Attr(EdgeEnd1Cascading).(bool)
}
/*
End1IsCascadingLast is a flag to indicate that cascading delete
operations are only executed if this is the last/only edge of
this kind to the other end. The flag is ignored if End1IsCascading is
false.
*/
func (ge *graphEdge) End1IsCascadingLast() bool {
a := ge.Attr(EdgeEnd1CascadingLast)
return a != nil && a.(bool)
}
/*
End2Key returns the key of the second end of this edge.
*/
func (ge *graphEdge) End2Key() string {
return ge.stringAttr(EdgeEnd2Key)
}
/*
End2Kind returns the kind of the second end of this edge.
*/
func (ge *graphEdge) End2Kind() string {
return ge.stringAttr(EdgeEnd2Kind)
}
/*
End2Role returns the role of the second end of this edge.
*/
func (ge *graphEdge) End2Role() string {
return ge.stringAttr(EdgeEnd2Role)
}
/*
End2IsCascading is a flag to indicate that delete operations from this
end are cascaded to the other end.
*/
func (ge *graphEdge) End2IsCascading() bool {
return ge.Attr(EdgeEnd2Cascading).(bool)
}
/*
End2IsCascadingLast is a flag to indicate that cascading delete
operations are only executed if this is the last/only edge of
this kind to the other end. The flag is ignored if End2IsCascading is
false.
*/
func (ge *graphEdge) End2IsCascadingLast() bool {
a := ge.Attr(EdgeEnd2CascadingLast)
return a != nil && a.(bool)
}
/*
Spec returns the spec for this edge from the view of a specified endpoint.
A spec is always of the form: <End Role>:<Kind>:<End Role>:<Other node kind>
*/
func (ge *graphEdge) Spec(key string) string {
if key == ge.End1Key() {
return fmt.Sprintf("%s:%s:%s:%s", ge.End1Role(), ge.Kind(), ge.End2Role(), ge.End2Kind())
} else if key == ge.End2Key() {
return fmt.Sprintf("%s:%s:%s:%s", ge.End2Role(), ge.Kind(), ge.End1Role(), ge.End1Kind())
}
return ""
}
/*
OtherEndKey returns the key of the endpoint which is on the other side
from the given key.
*/
func (ge *graphEdge) OtherEndKey(key string) string {
if key == ge.End1Key() {
return ge.End2Key()
} else if key == ge.End2Key() {
return ge.End1Key()
}
return ""
}
/*
OtherEndKind returns the kind of the endpoint which is on the other side
from the given key.
*/
func (ge *graphEdge) OtherEndKind(key string) string {
if key == ge.End1Key() {
return ge.End2Kind()
} else if key == ge.End2Key() {
return ge.End1Kind()
}
return ""
}
/*
IndexMap returns a representation of this node as a string map which
can be used to provide a full-text search.
*/
func (ge *graphEdge) IndexMap() map[string]string {
return createIndexMap(ge.graphNode, func(attr string) bool {
return attr == NodeKey || attr == NodeKind || attr == EdgeEnd1Key ||
attr == EdgeEnd1Kind || attr == EdgeEnd1Role ||
attr == EdgeEnd1Cascading || attr == EdgeEnd1CascadingLast ||
attr == EdgeEnd2Key || attr == EdgeEnd2Kind || attr == EdgeEnd2Role ||
attr == EdgeEnd2Cascading || attr == EdgeEnd2CascadingLast
})
}
/*
String returns a string representation of this edge.
*/
func (ge *graphEdge) String() string {
return dataToString("GraphEdge", ge.graphNode)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package data
import (
"bytes"
"encoding/json"
"fmt"
"sort"
"strconv"
)
/*
Node models nodes in the graph
*/
type Node interface {
/*
Key returns a potentially non human-readable unique key for this node.
*/
Key() string
/*
Name returns a human-readable name for this node.
*/
Name() string
/*
Kind returns a human-readable kind for this node.
*/
Kind() string
/*
Data returns the node data of this node.
*/
Data() map[string]interface{}
/*
Attr returns an attribute of this node.
*/
Attr(attr string) interface{}
/*
SetAttr sets an attribute of this node. Setting a nil
value removes the attribute.
*/
SetAttr(attr string, val interface{})
/*
IndexMap returns a representation of this node as a string map which
can be used to provide a full-text search.
*/
IndexMap() map[string]string
/*
String returns a string representation of this node.
*/
String() string
}
/*
NodeKey is the key attribute for a node
*/
const NodeKey = "key"
/*
NodeName is the name attribute for a node
*/
const NodeName = "name"
/*
NodeKind is the kind attribute for a node
*/
const NodeKind = "kind"
/*
CopyNode returns a shallow copy of a given node.
*/
func CopyNode(node Node) Node {
ret := NewGraphNode()
for k, v := range node.Data() {
ret.SetAttr(k, v)
}
return ret
}
/*
graphNode data structure.
*/
type graphNode struct {
data map[string]interface{} // Data which is held by this node
}
/*
NewGraphNode creates a new Node instance.
*/
func NewGraphNode() Node {
return &graphNode{make(map[string]interface{})}
}
/*
NewGraphNodeFromMap creates a new Node instance.
*/
func NewGraphNodeFromMap(data map[string]interface{}) Node {
return &graphNode{data}
}
/*
Key returns a potentially non human-readable unique key for this node.
*/
func (gn *graphNode) Key() string {
return gn.stringAttr(NodeKey)
}
/*
Kind returns a human-readable kind for this node.
*/
func (gn *graphNode) Kind() string {
return gn.stringAttr(NodeKind)
}
/*
Data returns the node data of this node.
*/
func (gn *graphNode) Data() map[string]interface{} {
return gn.data
}
/*
Name returns a human-readable name for this node.
*/
func (gn *graphNode) Name() string {
return gn.stringAttr(NodeName)
}
/*
Attr returns an attribute of this node.
*/
func (gn *graphNode) Attr(attr string) interface{} {
val, _ := gn.data[attr]
return val
}
/*
SetAttr sets an attribute of this node. Setting a nil
value removes the attribute.
*/
func (gn *graphNode) SetAttr(attr string, val interface{}) {
if val != nil {
gn.data[attr] = val
} else {
delete(gn.data, attr)
}
}
/*
Return the value of an attribute as a string. Or an
empty string if it can't be represented as a string.
*/
func (gn *graphNode) stringAttr(attr string) string {
val, found := gn.data[attr]
if st, ok := val.(string); found && ok {
return st
} else if st, ok := val.(fmt.Stringer); found && ok {
return st.String()
} else if found {
return fmt.Sprintf("%v", val)
}
return ""
}
/*
IndexMap returns a representation of this node as a string map which
can be used to provide a full-text search.
*/
func (gn *graphNode) IndexMap() map[string]string {
return createIndexMap(gn, func(attr string) bool {
return attr == NodeKey || attr == NodeKind
})
}
/*
createIndexMap creates a representation of a node as a string map. A filter
function can be specified to filters out specific attributes.
*/
func createIndexMap(gn *graphNode, attFilter func(attr string) bool) map[string]string {
var addMap func(prefix string, data map[string]interface{})
ret := make(map[string]string)
addMap = func(prefix string, data map[string]interface{}) {
for key, val := range data {
attr := prefix + key
// Ignore attributes which are uninteresting for a full-text search
if attFilter(attr) {
continue
}
// Detect nested structures and recurse into them
if valmap, ok := val.(map[string]interface{}); ok {
addMap(prefix+key+".", valmap)
}
// See the type of val and print it accordingly - ignore byte slices
if st, ok := val.(string); ok {
// Value is actually a string - no change needed
ret[attr] = st
} else if st, ok := val.(fmt.Stringer); ok {
// Value has a proper string representation - use that
ret[attr] = st.String()
} else if _, ok := val.([]byte); !ok {
// For all other cases (except ignored byte slices) try first a
// JSON representation
jsonBytes, err := json.Marshal(val)
jsonString := string(jsonBytes)
if err == nil && jsonString != "{}" {
ret[attr] = string(jsonString)
} else {
// Otherwise do best effort printing
ret[attr] = fmt.Sprintf("%v", val)
}
}
}
}
addMap("", gn.data)
return ret
}
/*
String returns a string representation of this node.
*/
func (gn *graphNode) String() string {
return dataToString("GraphNode", gn)
}
/*
dataToString returns a string representation of a data item.
*/
func dataToString(dataType string, gn *graphNode) string {
var buf bytes.Buffer
attrlist := make([]string, 0, len(gn.data))
maxlen := 0
for attr := range gn.data {
attrlist = append(attrlist, attr)
if alen := len(attr); alen > maxlen {
maxlen = alen
}
}
sort.StringSlice(attrlist).Sort()
buf.WriteString(dataType + ":\n")
buf.WriteString(fmt.Sprintf(" %"+
strconv.Itoa(maxlen)+"v : %v\n", "key", gn.Key()))
buf.WriteString(fmt.Sprintf(" %"+
strconv.Itoa(maxlen)+"v : %v\n", "kind", gn.Kind()))
for _, attr := range attrlist {
if attr == NodeKey || attr == NodeKind {
continue
}
buf.WriteString(fmt.Sprintf(" %"+
strconv.Itoa(maxlen)+"v : %v\n", attr, gn.data[attr]))
}
return buf.String()
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package data
import (
"sort"
"devt.de/krotik/common/datautil"
)
/*
NodeCompare compares node attributes.
*/
func NodeCompare(node1 Node, node2 Node, attrs []string) bool {
if attrs == nil {
if len(node1.Data()) != len(node2.Data()) {
return false
}
attrs = make([]string, 0, len(node1.Data()))
for attr := range node1.Data() {
attrs = append(attrs, attr)
}
}
for _, attr := range attrs {
if node1.Attr(attr) != node2.Attr(attr) {
return false
}
}
return true
}
/*
NodeClone clones a node.
*/
func NodeClone(node Node) Node {
var data map[string]interface{}
datautil.CopyObject(node.Data(), &data)
return &graphNode{data}
}
/*
NodeMerge merges two nodes together in a third node. The node values are copied
by reference.
*/
func NodeMerge(node1 Node, node2 Node) Node {
data := make(map[string]interface{})
for k, v := range node1.Data() {
data[k] = v
}
for k, v := range node2.Data() {
data[k] = v
}
return &graphNode{data}
}
/*
NodeSort sorts a list of nodes.
*/
func NodeSort(list []Node) {
sort.Sort(NodeSlice(list))
}
/*
NodeSlice attaches the methods of sort.Interface to []Node, sorting in
increasing order by key and kind.
*/
type NodeSlice []Node
/*
Len belongs to the sort.Interface.
*/
func (p NodeSlice) Len() int { return len(p) }
/*
Less belongs to the sort.Interface.
*/
func (p NodeSlice) Less(i, j int) bool {
in := p[i]
jn := p[j]
if in.Kind() != jn.Kind() {
return in.Kind() < jn.Kind()
}
return in.Key() < jn.Key()
}
/*
Swap belongs to the sort.Interface.
*/
func (p NodeSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package graph
import (
"fmt"
"sort"
"strconv"
"sync"
"devt.de/krotik/eliasdb/graph/data"
"devt.de/krotik/eliasdb/graph/graphstorage"
"devt.de/krotik/eliasdb/graph/util"
)
/*
Manager data structure
*/
type Manager struct {
gs graphstorage.Storage // Graph storage of this graph manager
gr *graphRulesManager // Manager for graph rules
nm *util.NamesManager // Manager object which manages name encodings
mapCache map[string]map[string]string // Cache which caches maps stored in the main database
mutex *sync.RWMutex // Mutex to protect atomic graph operations
storageMutex *sync.Mutex // Special mutex for storage object access
}
/*
NewGraphManager returns a new GraphManager instance.
*/
func NewGraphManager(gs graphstorage.Storage) *Manager {
gm := createGraphManager(gs)
gm.SetGraphRule(&SystemRuleDeleteNodeEdges{})
gm.SetGraphRule(&SystemRuleUpdateNodeStats{})
return gm
}
/*
createGraphManager creates a new GraphManager instance.
*/
func createGraphManager(gs graphstorage.Storage) *Manager {
mdb := gs.MainDB()
// Check version
if version, ok := mdb[MainDBVersion]; !ok {
mdb[MainDBVersion] = strconv.Itoa(VERSION)
gs.FlushMain()
} else {
if v, _ := strconv.Atoi(version); v > VERSION {
panic(fmt.Sprintf("Cannot open graph storage of version: %v - "+
"max supported version: %v", version, VERSION))
} else if v < VERSION {
// Update the version if it is older
mdb[MainDBVersion] = strconv.Itoa(VERSION)
gs.FlushMain()
}
}
gm := &Manager{gs, &graphRulesManager{nil, make(map[string]Rule),
make(map[int]map[string]Rule)}, util.NewNamesManager(mdb),
make(map[string]map[string]string), &sync.RWMutex{}, &sync.Mutex{}}
gm.gr.gm = gm
return gm
}
/*
Name returns the name of this graph manager.
*/
func (gm *Manager) Name() string {
return fmt.Sprint("Graph ", gm.gs.Name())
}
/*
SetGraphRule sets a GraphRule.
*/
func (gm *Manager) SetGraphRule(rule Rule) {
gm.gr.SetGraphRule(rule)
}
/*
GraphRules returns a list of all available graph rules.
*/
func (gm *Manager) GraphRules() []string {
return gm.gr.GraphRules()
}
/*
NodeIndexQuery returns an object to query the full text search index for nodes.
*/
func (gm *Manager) NodeIndexQuery(part string, kind string) (IndexQuery, error) {
iht, err := gm.getNodeIndexHTree(part, kind, false)
if err != nil || iht == nil {
return nil, err
}
return util.NewIndexManager(iht), nil
}
/*
EdgeIndexQuery returns an object to query the full text search index for edges.
*/
func (gm *Manager) EdgeIndexQuery(part string, kind string) (IndexQuery, error) {
iht, err := gm.getEdgeIndexHTree(part, kind, false)
if err != nil || iht == nil {
return nil, err
}
return util.NewIndexManager(iht), nil
}
/*
Partitions returns all existing partitions.
*/
func (gm *Manager) Partitions() []string {
return gm.mainStringList(MainDBParts)
}
/*
NodeKinds returns all possible node kinds.
*/
func (gm *Manager) NodeKinds() []string {
return gm.mainStringList(MainDBNodeKinds)
}
/*
EdgeKinds returns all possible node kinds.
*/
func (gm *Manager) EdgeKinds() []string {
return gm.mainStringList(MainDBEdgeKinds)
}
/*
NodeAttrs returns all possible node attributes for a given node kind.
*/
func (gm *Manager) NodeAttrs(kind string) []string {
return gm.mainStringList(MainDBNodeAttrs + kind)
}
/*
NodeEdges returns all possible node edge specs for a given node kind.
*/
func (gm *Manager) NodeEdges(kind string) []string {
return gm.mainStringList(MainDBNodeEdges + kind)
}
/*
EdgeAttrs returns all possible edge attributes for a given edge kind.
*/
func (gm *Manager) EdgeAttrs(kind string) []string {
return gm.mainStringList(MainDBEdgeAttrs + kind)
}
/*
mainStringList return a list in the MainDB.
*/
func (gm *Manager) mainStringList(name string) []string {
items := gm.getMainDBMap(name)
var ret []string
if items != nil {
for item := range items {
ret = append(ret, item)
}
}
sort.StringSlice(ret).Sort()
return ret
}
/*
IsValidAttr checks if a given string can be a valid node attribute.
*/
func (gm *Manager) IsValidAttr(attr string) bool {
return gm.nm.Encode32(attr, false) != "" ||
attr == data.NodeKey || attr == data.NodeKind ||
attr == data.EdgeEnd1Key || attr == data.EdgeEnd1Kind ||
attr == data.EdgeEnd1Role || attr == data.EdgeEnd1Cascading ||
attr == data.EdgeEnd2Key || attr == data.EdgeEnd2Kind ||
attr == data.EdgeEnd2Role || attr == data.EdgeEnd2Cascading
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package graph
import (
"encoding/binary"
"encoding/gob"
"fmt"
"sort"
"strings"
"devt.de/krotik/eliasdb/graph/data"
"devt.de/krotik/eliasdb/graph/util"
"devt.de/krotik/eliasdb/hash"
)
/*
edgeTargetInfo is an internal structure which stores edge information
*/
type edgeTargetInfo struct {
CascadeToTarget bool // Flag if delete operations should be cascaded to the target
CascadeLastToTarget bool // Flag if delete operations should be cascaded to the target after the last edge was deleted
CascadeFromTarget bool // Flag if delete operations should be cascaded from the target after the last edge was deleted
CascadeLastFromTarget bool // Flag if delete operations should be cascaded from the target
TargetNodeKey string // Key of the target node
TargetNodeKind string // Kind of the target ndoe
}
func init() {
// Make sure we can use the relevant types in a gob operation
gob.Register(make(map[string]string))
gob.Register(make(map[string]*edgeTargetInfo))
gob.Register(&edgeTargetInfo{})
}
/*
EdgeCount returns the edge count for a given edge kind.
*/
func (gm *Manager) EdgeCount(kind string) uint64 {
if val, ok := gm.gs.MainDB()[MainDBEdgeCount+kind]; ok {
return binary.LittleEndian.Uint64([]byte(val))
}
return 0
}
/*
FetchNodeEdgeSpecs returns all possible edge specs for a certain node.
*/
func (gm *Manager) FetchNodeEdgeSpecs(part string, key string, kind string) ([]string, error) {
_, tree, err := gm.getNodeStorageHTree(part, kind, false)
if err != nil || tree == nil {
return nil, err
}
// Take reader lock
gm.mutex.RLock()
defer gm.mutex.RUnlock()
specsNodeKey := PrefixNSSpecs + key
obj, err := tree.Get([]byte(specsNodeKey))
if err != nil {
return nil, &util.GraphError{Type: util.ErrReading, Detail: err.Error()}
} else if obj == nil {
return nil, nil
}
specsNodeMap := obj.(map[string]string)
specsNode := make([]string, 0, len(specsNodeMap))
for spec := range specsNodeMap {
role1 := gm.nm.Decode16(spec[:2])
relKind := gm.nm.Decode16(spec[2:4])
role2 := gm.nm.Decode16(spec[4:6])
end2Kind := gm.nm.Decode16(spec[6:])
specsNode = append(specsNode,
role1+":"+relKind+":"+role2+":"+end2Kind)
}
// Ensure the output is deterministic
sort.StringSlice(specsNode).Sort()
return specsNode, nil
}
/*
TraverseMulti traverses from a given node to other nodes following a given
partial edge spec. Since the edge spec can be partial it is possible to
traverse multiple edge kinds. A spec with the value ":::" would follow
all relationships. The last parameter allData specifies if all data
should be retrieved for the connected nodes and edges. If set to false only
the minimal set of attributes will be populated.
*/
func (gm *Manager) TraverseMulti(part string, key string, kind string,
spec string, allData bool) ([]data.Node, []data.Edge, error) {
sspec := strings.Split(spec, ":")
if len(sspec) != 4 {
return nil, nil, &util.GraphError{Type: util.ErrInvalidData, Detail: "Invalid spec: " + spec}
} else if IsFullSpec(spec) {
return gm.Traverse(part, key, kind, spec, allData)
}
// Get all specs for the given node
specs, err := gm.FetchNodeEdgeSpecs(part, key, kind)
if err != nil || specs == nil {
return nil, nil, err
}
matchSpec := func(spec string) bool {
mspec := strings.Split(spec, ":")
// Check spec components
if (sspec[0] != "" && mspec[0] != sspec[0]) ||
(sspec[1] != "" && mspec[1] != sspec[1]) ||
(sspec[2] != "" && mspec[2] != sspec[2]) ||
(sspec[3] != "" && mspec[3] != sspec[3]) {
return false
}
return true
}
// Match specs and collect the results
var nodes []data.Node
var edges []data.Edge
for _, rspec := range specs {
if spec == ":::" || matchSpec(rspec) {
sn, se, err := gm.Traverse(part, key, kind, rspec, allData)
if err != nil {
return nil, nil, err
}
nodes = append(nodes, sn...)
edges = append(edges, se...)
}
}
return nodes, edges, nil
}
/*
Traverse traverses from a given node to other nodes following a given edge spec.
The last parameter allData specifies if all data should be retrieved for
the connected nodes and edges. If set to false only the minimal set of
attributes will be populated.
*/
func (gm *Manager) Traverse(part string, key string, kind string,
spec string, allData bool) ([]data.Node, []data.Edge, error) {
_, tree, err := gm.getNodeStorageHTree(part, kind, false)
if err != nil || tree == nil {
return nil, nil, err
}
// Take reader lock
gm.mutex.RLock()
defer gm.mutex.RUnlock()
sspec := strings.Split(spec, ":")
if len(sspec) != 4 {
return nil, nil, &util.GraphError{Type: util.ErrInvalidData, Detail: "Invalid spec: " + spec}
} else if !IsFullSpec(spec) {
return nil, nil, &util.GraphError{Type: util.ErrInvalidData, Detail: "Invalid spec: " + spec +
" - spec needs to be fully specified for direct traversal"}
}
encspec := gm.nm.Encode16(sspec[0], false) + gm.nm.Encode16(sspec[1], false) +
gm.nm.Encode16(sspec[2], false) + gm.nm.Encode16(sspec[3], false)
edgeInfoKey := PrefixNSEdge + key + encspec
// Lookup the target map containing edgeTargetInfo objects
obj, err := tree.Get([]byte(edgeInfoKey))
if err != nil || obj == nil {
return nil, nil, err
}
targetMap := obj.(map[string]*edgeTargetInfo)
nodes := make([]data.Node, 0, len(targetMap))
edges := make([]data.Edge, 0, len(targetMap))
if !allData {
// Populate nodes and edges with the minimal set of attributes
// no further lookups required
for k, v := range targetMap {
edge := data.NewGraphEdge()
edge.SetAttr(data.NodeKey, k)
edge.SetAttr(data.NodeKind, sspec[1])
edge.SetAttr(data.EdgeEnd1Key, key)
edge.SetAttr(data.EdgeEnd1Kind, kind)
edge.SetAttr(data.EdgeEnd1Role, sspec[0])
edge.SetAttr(data.EdgeEnd1Cascading, v.CascadeToTarget)
edge.SetAttr(data.EdgeEnd1CascadingLast, v.CascadeLastToTarget)
edge.SetAttr(data.EdgeEnd2Key, v.TargetNodeKey)
edge.SetAttr(data.EdgeEnd2Kind, v.TargetNodeKind)
edge.SetAttr(data.EdgeEnd2Role, sspec[2])
edge.SetAttr(data.EdgeEnd2Cascading, v.CascadeFromTarget)
edge.SetAttr(data.EdgeEnd2CascadingLast, v.CascadeLastFromTarget)
edges = append(edges, edge)
node := data.NewGraphNode()
node.SetAttr(data.NodeKey, v.TargetNodeKey)
node.SetAttr(data.NodeKind, v.TargetNodeKind)
nodes = append(nodes, node)
}
} else {
// Get the HTrees which stores the edges
edgeht, err := gm.getEdgeStorageHTree(part, sspec[1], false)
if err != nil || edgeht == nil {
return nil, nil, err
}
for k, v := range targetMap {
// Read the edge from the datastore
edgenode, err := gm.readNode(k, sspec[1], nil, edgeht, edgeht)
if err != nil || edgenode == nil {
return nil, nil, err
}
edge := data.NewGraphEdgeFromNode(edgenode)
// Exchange ends if necessary
if edge.End2Key() == key && edge.End2Kind() == kind {
swap := func(attr1 string, attr2 string) {
tmp := edge.Attr(attr1)
edge.SetAttr(attr1, edge.Attr(attr2))
edge.SetAttr(attr2, tmp)
}
swap(data.EdgeEnd1Key, data.EdgeEnd2Key)
swap(data.EdgeEnd1Kind, data.EdgeEnd2Kind)
swap(data.EdgeEnd1Role, data.EdgeEnd2Role)
swap(data.EdgeEnd1Cascading, data.EdgeEnd2Cascading)
}
edges = append(edges, edge)
// Get the HTrees which stores the node
attht, valht, err := gm.getNodeStorageHTree(part, v.TargetNodeKind, false)
if err != nil || attht == nil || valht == nil {
return nil, nil, err
}
node, err := gm.readNode(v.TargetNodeKey, v.TargetNodeKind, nil, attht, valht)
if err != nil {
return nil, nil, err
}
nodes = append(nodes, node)
}
}
return nodes, edges, nil
}
/*
FetchEdge fetches a single edge from a partition of the graph.
*/
func (gm *Manager) FetchEdge(part string, key string, kind string) (data.Edge, error) {
return gm.FetchEdgePart(part, key, kind, nil)
}
/*
FetchEdgePart fetches part of a single edge from a partition of the graph.
*/
func (gm *Manager) FetchEdgePart(part string, key string, kind string,
attrs []string) (data.Edge, error) {
// Get the HTrees which stores the edge
edgeht, err := gm.getEdgeStorageHTree(part, kind, true)
if err != nil || edgeht == nil {
return nil, err
}
// Take reader lock
gm.mutex.RLock()
defer gm.mutex.RUnlock()
// Read the edge from the datastore
node, err := gm.readNode(key, kind, attrs, edgeht, edgeht)
return data.NewGraphEdgeFromNode(node), err
}
/*
StoreEdge stores a single edge in a partition of the graph. This function will
overwrites any existing edge.
*/
func (gm *Manager) StoreEdge(part string, edge data.Edge) error {
trans := newInternalGraphTrans(gm)
trans.subtrans = true
err := gm.gr.graphEvent(trans, EventEdgeStore, part, edge)
if err != nil {
if err == ErrEventHandled {
err = nil
}
return err
}
if err = trans.Commit(); err == nil {
// Check if the edge can be stored
if err := gm.checkEdge(edge); err != nil {
return err
}
// Get the HTrees which stores the edges and the edge index
iht, err := gm.getEdgeIndexHTree(part, edge.Kind(), true)
if err != nil {
return err
}
edgeht, err := gm.getEdgeStorageHTree(part, edge.Kind(), true)
if err != nil {
return err
}
// Get the HTrees which stores the edge endpoints and make sure the endpoints
// do exist
end1nodeht, end1ht, err := gm.getNodeStorageHTree(part, edge.End1Kind(), false)
if err != nil {
return err
} else if end1ht == nil {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: "Can't store edge to non-existing node kind: " + edge.End1Kind(),
}
} else if end1, err := end1nodeht.Get([]byte(PrefixNSAttrs + edge.End1Key())); err != nil || end1 == nil {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Can't find edge endpoint: %s (%s)", edge.End1Key(), edge.End1Kind()),
}
}
end2nodeht, end2ht, err := gm.getNodeStorageHTree(part, edge.End2Kind(), false)
if err != nil {
return err
} else if end2ht == nil {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: "Can't store edge to non-existing node kind: " + edge.End2Kind(),
}
} else if end2, err := end2nodeht.Get([]byte(PrefixNSAttrs + edge.End2Key())); err != nil || end2 == nil {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Can't find edge endpoint: %s (%s)", edge.End2Key(), edge.End2Kind()),
}
}
// Take writer lock
gm.mutex.Lock()
defer gm.mutex.Unlock()
// Write edge to the datastore
oldedge, err := gm.writeEdge(edge, edgeht, end1ht, end2ht)
if err != nil {
return err
}
// Increase edge count if the edge was inserted and write the changes
// to the index.
if oldedge == nil {
// Increase edge count
currentCount := gm.EdgeCount(edge.Kind())
if err := gm.writeEdgeCount(edge.Kind(), currentCount+1, true); err != nil {
return err
}
// Write edge data to the index
if iht != nil {
if err := util.NewIndexManager(iht).Index(edge.Key(), edge.IndexMap()); err != nil {
// The edge was written at this point and the model is
// consistent only the index is missing entries
return err
}
}
} else if iht != nil {
err := util.NewIndexManager(iht).Reindex(edge.Key(), edge.IndexMap(),
oldedge.IndexMap())
if err != nil {
// The edge was written at this point and the model is
// consistent only the index is missing entries
return err
}
}
defer func() {
// Flush changes - errors only reported on the actual node storage flush
gm.gs.FlushMain()
gm.flushEdgeIndex(part, edge.Kind())
gm.flushNodeStorage(part, edge.End1Kind())
gm.flushNodeStorage(part, edge.End2Kind())
gm.flushEdgeStorage(part, edge.Kind())
}()
// Execute rules
trans := newInternalGraphTrans(gm)
trans.subtrans = true
var event int
if oldedge == nil {
event = EventEdgeCreated
} else {
event = EventEdgeUpdated
}
if err := gm.gr.graphEvent(trans, event, part, edge, oldedge); err != nil && err != ErrEventHandled {
return err
} else if err := trans.Commit(); err != nil {
return err
}
}
return err
}
/*
writeEdge writes a given edge to the datastore. It is assumed that the caller
holds the writer lock before calling the functions and that, after the function
returns, the changes are flushed to the storage. The caller has also to ensure
that the endpoints of the edge do exist. Returns the old edge if an
update occurred.
*/
func (gm *Manager) writeEdge(edge data.Edge, edgeTree *hash.HTree,
end1Tree *hash.HTree, end2Tree *hash.HTree) (data.Edge, error) {
// Create lookup keys
spec1 := gm.nm.Encode16(edge.End1Role(), true) + gm.nm.Encode16(edge.Kind(), true) +
gm.nm.Encode16(edge.End2Role(), true) + gm.nm.Encode16(edge.End2Kind(), true)
spec2 := gm.nm.Encode16(edge.End2Role(), true) + gm.nm.Encode16(edge.Kind(), true) +
gm.nm.Encode16(edge.End1Role(), true) + gm.nm.Encode16(edge.End1Kind(), true)
specsNode1Key := PrefixNSSpecs + edge.End1Key()
edgeInfo1Key := PrefixNSEdge + edge.End1Key() + spec1
specsNode2Key := PrefixNSSpecs + edge.End2Key()
edgeInfo2Key := PrefixNSEdge + edge.End2Key() + spec2
// Function to insert a new spec into a specs map
updateSpecMap := func(key string, spec string, tree *hash.HTree) error {
var specsNode map[string]string
obj, err := tree.Get([]byte(key))
if err != nil {
return err
} else if obj == nil {
specsNode = make(map[string]string)
} else {
specsNode = obj.(map[string]string)
}
specsNode[spec] = ""
if _, err = tree.Put([]byte(key), specsNode); err != nil {
return err
}
return nil
}
// Function to update the edgeTargetInfo entry
updateTargetInfo := func(key string, endkey string, endkind string,
cascadeToTarget bool, cascadeLastToTarget bool, cascadeFromTarget bool, cascadeLastFromTarget bool, tree *hash.HTree) error {
var targetMap map[string]*edgeTargetInfo
obj, err := tree.Get([]byte(key))
if err != nil {
return err
} else if obj == nil {
targetMap = make(map[string]*edgeTargetInfo)
} else {
targetMap = obj.(map[string]*edgeTargetInfo)
}
// Update the target info
targetMap[edge.Key()] = &edgeTargetInfo{cascadeToTarget, cascadeLastToTarget,
cascadeFromTarget, cascadeLastFromTarget, endkey, endkind}
if _, err = tree.Put([]byte(key), targetMap); err != nil {
return err
}
return nil
}
// Write node data for edge - if the data is incorrect we write the old
// data back later. It is assumed that most of the time the data is correct
// so we can avoid an extra read lookup
var oldedge data.Edge
if oldedgenode, err := gm.writeNode(edge, false, edgeTree, edgeTree, edgeAttributeFilter); err != nil {
return nil, err
} else if oldedgenode != nil {
oldedge = data.NewGraphEdgeFromNode(oldedgenode)
// Do a sanity check that the endpoints were not updated.
if !data.NodeCompare(oldedge, edge, []string{data.EdgeEnd1Key,
data.EdgeEnd1Kind, data.EdgeEnd1Role, data.EdgeEnd2Key,
data.EdgeEnd2Kind, data.EdgeEnd2Role}) {
// If the check fails then write back the old data and return
// no error checking when writing back
gm.writeNode(oldedge, false, edgeTree, edgeTree, edgeAttributeFilter)
return nil, &util.GraphError{
Type: util.ErrInvalidData,
Detail: "Cannot update endpoints or spec of existing edge: " + edge.Key(),
}
}
return oldedge, nil
}
// Create / update specs map on the nodes
if err := updateSpecMap(specsNode1Key, spec1, end1Tree); err != nil {
return nil, err
}
if err := updateSpecMap(specsNode2Key, spec2, end2Tree); err != nil {
return nil, err
}
// Create / update the edgeInfo entries
if err := updateTargetInfo(edgeInfo1Key, edge.End2Key(), edge.End2Kind(),
edge.End1IsCascading(), edge.End1IsCascadingLast(), edge.End2IsCascading(),
edge.End2IsCascadingLast(), end1Tree); err != nil {
return nil, err
}
if err := updateTargetInfo(edgeInfo2Key, edge.End1Key(), edge.End1Kind(),
edge.End2IsCascading(), edge.End2IsCascadingLast(),
edge.End1IsCascading(), edge.End1IsCascadingLast(), end2Tree); err != nil {
return nil, err
}
return nil, nil
}
/*
RemoveEdge removes a single edge from a partition of the graph.
*/
func (gm *Manager) RemoveEdge(part string, key string, kind string) (data.Edge, error) {
var err error
trans := newInternalGraphTrans(gm)
trans.subtrans = true
if err = gm.gr.graphEvent(trans, EventEdgeDelete, part, key, kind); err != nil {
if err == ErrEventHandled {
err = nil
}
return nil, err
}
err = trans.Commit()
if err == nil {
// Get the HTrees which stores the edges and the edge index
iht, err := gm.getEdgeIndexHTree(part, kind, true)
if err != nil {
return nil, err
}
edgeht, err := gm.getEdgeStorageHTree(part, kind, true)
if err != nil {
return nil, err
}
// Take writer lock
gm.mutex.Lock()
defer gm.mutex.Unlock()
// Delete the node from the datastore
node, err := gm.deleteNode(key, kind, edgeht, edgeht)
edge := data.NewGraphEdgeFromNode(node)
if err != nil {
return edge, err
}
if node != nil {
// Get the HTrees which stores the edge endpoints
_, end1ht, err := gm.getNodeStorageHTree(part, edge.End1Kind(), false)
if err != nil {
return edge, err
}
_, end2ht, err := gm.getNodeStorageHTree(part, edge.End2Kind(), false)
if err != nil {
return edge, err
}
// Delete edge info from node storage
if err := gm.deleteEdge(edge, end1ht, end2ht); err != nil {
return edge, err
}
if iht != nil {
err := util.NewIndexManager(iht).Deindex(key, edge.IndexMap())
if err != nil {
return edge, err
}
}
// Decrease edge count
currentCount := gm.EdgeCount(edge.Kind())
if err := gm.writeEdgeCount(edge.Kind(), currentCount-1, true); err != nil {
return edge, err
}
defer func() {
// Flush changes - errors only reported on the actual node storage flush
gm.gs.FlushMain()
gm.flushEdgeIndex(part, edge.Kind())
gm.flushNodeStorage(part, edge.End1Kind())
gm.flushNodeStorage(part, edge.End2Kind())
gm.flushEdgeStorage(part, edge.Kind())
}()
// Execute rules
trans := newInternalGraphTrans(gm)
trans.subtrans = true
if err := gm.gr.graphEvent(trans, EventEdgeDeleted, part, edge); err != nil && err != ErrEventHandled {
return edge, err
} else if err := trans.Commit(); err != nil {
return edge, err
}
return edge, nil
}
}
return nil, err
}
/*
Delete edge information from a given node storage
*/
func (gm *Manager) deleteEdge(edge data.Edge, end1Tree *hash.HTree, end2Tree *hash.HTree) error {
// Create lookup keys
spec1 := gm.nm.Encode16(edge.End1Role(), true) + gm.nm.Encode16(edge.Kind(), true) +
gm.nm.Encode16(edge.End2Role(), true) + gm.nm.Encode16(edge.End2Kind(), true)
spec2 := gm.nm.Encode16(edge.End2Role(), true) + gm.nm.Encode16(edge.Kind(), true) +
gm.nm.Encode16(edge.End1Role(), true) + gm.nm.Encode16(edge.End1Kind(), true)
specsNode1Key := PrefixNSSpecs + edge.End1Key()
edgeInfo1Key := PrefixNSEdge + edge.End1Key() + spec1
specsNode2Key := PrefixNSSpecs + edge.End2Key()
edgeInfo2Key := PrefixNSEdge + edge.End2Key() + spec2
// Function to delete a spec from a specs map
updateSpecMap := func(key string, spec string, tree *hash.HTree) error {
var specsNode map[string]string
obj, err := tree.Get([]byte(key))
if err != nil {
return &util.GraphError{Type: util.ErrReading, Detail: err.Error()}
} else if obj == nil {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Expected spec entry is missing: %v", key),
}
} else {
specsNode = obj.(map[string]string)
}
delete(specsNode, spec)
if len(specsNode) == 0 {
if _, err = tree.Remove([]byte(key)); err != nil {
return err
}
} else if _, err = tree.Put([]byte(key), specsNode); err != nil {
return err
}
return nil
}
// Function to delete the edgeTargetInfo entry
updateTargetInfo := func(key string, tree *hash.HTree) (bool, error) {
var targetMap map[string]*edgeTargetInfo
obj, err := tree.Get([]byte(key))
if err != nil {
return false, &util.GraphError{Type: util.ErrReading, Detail: err.Error()}
} else if obj == nil {
return false, &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Expected edgeTargetInfo entry is missing: %v", key),
}
} else {
targetMap = obj.(map[string]*edgeTargetInfo)
}
delete(targetMap, edge.Key())
if len(targetMap) == 0 {
if _, err = tree.Remove([]byte(key)); err != nil {
return false, err
}
return true, nil
} else if _, err = tree.Put([]byte(key), targetMap); err != nil {
return false, err
}
return false, nil
}
// Remove the edgeInfo entries
end1TargetInfoRemoved, err := updateTargetInfo(edgeInfo1Key, end1Tree)
if err != nil {
return err
}
end2TargetInfoRemoved, err := updateTargetInfo(edgeInfo2Key, end2Tree)
if err != nil {
return err
}
// Remove specs map on the nodes if the target info structure was removed
if end1TargetInfoRemoved {
if err := updateSpecMap(specsNode1Key, spec1, end1Tree); err != nil {
return err
}
}
if end2TargetInfoRemoved {
if err := updateSpecMap(specsNode2Key, spec2, end2Tree); err != nil {
return err
}
}
return nil
}
/*
Default filter function to filter out system edge attributes.
*/
func edgeAttributeFilter(attr string) bool {
return attr == data.NodeKey || attr == data.NodeKind
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package graph
import (
"encoding/binary"
"encoding/gob"
"devt.de/krotik/eliasdb/graph/data"
"devt.de/krotik/eliasdb/graph/util"
"devt.de/krotik/eliasdb/hash"
)
func init() {
// It is possible to store nested structures on nodes
gob.Register(make(map[string]interface{}))
}
/*
NodeCount returns the node count for a given node kind.
*/
func (gm *Manager) NodeCount(kind string) uint64 {
if val, ok := gm.gs.MainDB()[MainDBNodeCount+kind]; ok {
return binary.LittleEndian.Uint64([]byte(val))
}
return 0
}
/*
NodeKeyIterator iterates node keys of a certain kind.
*/
func (gm *Manager) NodeKeyIterator(part string, kind string) (*NodeKeyIterator, error) {
// Get the HTrees which stores the node
tree, _, err := gm.getNodeStorageHTree(part, kind, false)
if err != nil || tree == nil {
return nil, err
}
it := hash.NewHTreeIterator(tree)
if it.LastError != nil {
return nil, &util.GraphError{
Type: util.ErrReading,
Detail: it.LastError.Error(),
}
}
return &NodeKeyIterator{gm, it, nil}, nil
}
/*
FetchNode fetches a single node from a partition of the graph.
*/
func (gm *Manager) FetchNode(part string, key string, kind string) (data.Node, error) {
return gm.FetchNodePart(part, key, kind, nil)
}
/*
FetchNodePart fetches part of a single node from a partition of the graph.
*/
func (gm *Manager) FetchNodePart(part string, key string, kind string,
attrs []string) (data.Node, error) {
// Get the HTrees which stores the node
attht, valht, err := gm.getNodeStorageHTree(part, kind, false)
if err != nil || attht == nil || valht == nil {
return nil, err
}
// Take reader lock
gm.mutex.RLock()
defer gm.mutex.RUnlock()
// Read the node from the datastore
return gm.readNode(key, kind, attrs, attht, valht)
}
/*
readNode reads a given node from the datastore.
*/
func (gm *Manager) readNode(key string, kind string, attrs []string,
attrTree *hash.HTree, valTree *hash.HTree) (data.Node, error) {
keyAttrs := PrefixNSAttrs + key
keyAttrPrefix := PrefixNSAttr + key
// Check if the node exists
attrList, err := attrTree.Get([]byte(keyAttrs))
if err != nil {
return nil, &util.GraphError{Type: util.ErrReading, Detail: err.Error()}
} else if attrList == nil {
return nil, nil
}
var node data.Node
tryPopulateNode := func(encattr string, attr string) error {
// Try to lookup the attribute
val, err := valTree.Get([]byte(keyAttrPrefix + encattr))
if err != nil {
return &util.GraphError{Type: util.ErrReading, Detail: err.Error()}
}
if val != nil {
if node == nil {
node = data.NewGraphNode()
}
node.SetAttr(attr, val)
}
return nil
}
if len(attrs) == 0 {
// Allways create a node if we fetch all attributes
node = data.NewGraphNode()
// Lookup all attributes
for _, encattr := range attrList.([]string) {
attr := gm.nm.Decode32(encattr)
if err := tryPopulateNode(encattr, attr); err != nil {
return nil, err
}
}
} else {
// Lookup the given attributes - it is assumed that most of the time the
// queried attributes do exist
for _, attr := range attrs {
if (attr == data.NodeKey || attr == data.NodeKind) && node == nil {
// Create node - we might only query for node key or node kind
node = data.NewGraphNode()
continue
}
// Only try to populate the attribute if it can be decoded
if encattr := gm.nm.Encode32(attr, false); encattr != "" {
if err := tryPopulateNode(encattr, attr); err != nil {
return nil, err
}
}
}
}
// Set key and kind attributes
if node != nil {
node.SetAttr(data.NodeKey, key)
node.SetAttr(data.NodeKind, kind)
}
return node, nil
}
/*
StoreNode stores a single node in a partition of the graph. This function will
overwrites any existing node.
*/
func (gm *Manager) StoreNode(part string, node data.Node) error {
trans := newInternalGraphTrans(gm)
trans.subtrans = true
err := gm.gr.graphEvent(trans, EventNodeStore, part, node)
if err != nil {
if err == ErrEventHandled {
err = nil
}
return err
}
if err = trans.Commit(); err == nil {
err = gm.storeOrUpdateNode(part, node, false)
}
return err
}
/*
UpdateNode updates a single node in a partition of the graph. This function will
only update the given values of the node.
*/
func (gm *Manager) UpdateNode(part string, node data.Node) error {
trans := newInternalGraphTrans(gm)
trans.subtrans = true
err := gm.gr.graphEvent(trans, EventNodeUpdate, part, node)
if err != nil {
if err == ErrEventHandled {
err = nil
}
return err
}
if err = trans.Commit(); err == nil {
err = gm.storeOrUpdateNode(part, node, true)
}
return err
}
/*
storeOrUpdateNode stores or updates a single node in a partition of the graph.
*/
func (gm *Manager) storeOrUpdateNode(part string, node data.Node, onlyUpdate bool) error {
// Check if the node can be stored
if err := gm.checkNode(node); err != nil {
return err
}
// Get the HTrees which stores the node index and node
iht, err := gm.getNodeIndexHTree(part, node.Kind(), true)
if err != nil {
return err
}
attht, valht, err := gm.getNodeStorageHTree(part, node.Kind(), true)
if err != nil || attht == nil || valht == nil {
return err
}
// Take writer lock
gm.mutex.Lock()
defer gm.mutex.Unlock()
// Write the node to the datastore
oldnode, err := gm.writeNode(node, onlyUpdate, attht, valht, nodeAttributeFilter)
if err != nil {
return err
}
// Increase node count if the node was inserted and write the changes
// to the index.
if oldnode == nil {
currentCount := gm.NodeCount(node.Kind())
if err := gm.writeNodeCount(node.Kind(), currentCount+1, true); err != nil {
return err
}
if iht != nil {
err := util.NewIndexManager(iht).Index(node.Key(), node.IndexMap())
if err != nil {
// The node was written at this point and the model is
// consistent only the index is missing entries
return err
}
}
} else if iht != nil {
err := util.NewIndexManager(iht).Reindex(node.Key(), node.IndexMap(),
oldnode.IndexMap())
if err != nil {
// The node was written at this point and the model is
// consistent only the index is missing entries
return err
}
}
defer func() {
// Flush changes
gm.gs.FlushMain()
gm.flushNodeIndex(part, node.Kind())
gm.flushNodeStorage(part, node.Kind())
}()
// Execute rules
trans := newInternalGraphTrans(gm)
trans.subtrans = true
var event int
if oldnode == nil {
event = EventNodeCreated
} else {
event = EventNodeUpdated
}
if err := gm.gr.graphEvent(trans, event, part, node, oldnode); err != nil && err != ErrEventHandled {
return err
} else if err := trans.Commit(); err != nil {
return err
}
return nil
}
/*
writeNode writes a given node in full or part to the datastore. It is assumed
that the caller holds the writer lock before calling the functions and that,
after the function returns, the changes are flushed to the storage. Returns
the old node if an update occurred. An attribute filter can be speified to skip
specific attributes.
*/
func (gm *Manager) writeNode(node data.Node, onlyUpdate bool, attrTree *hash.HTree,
valTree *hash.HTree, attFilter func(attr string) bool) (data.Node, error) {
keyAttrs := PrefixNSAttrs + node.Key()
keyAttrPrefix := PrefixNSAttr + node.Key()
var oldnode data.Node
var attrListOld interface{}
var err error
// Store the node attributes
attrList := make([]string, 0, len(node.IndexMap()))
attrMap := make(map[string]string)
for attr, val := range node.Data() {
// Ignore filtered attributes
if attFilter(attr) {
continue
}
encattr := gm.nm.Encode32(attr, true)
// Build up a lookup map to identify which attribute exist
attrMap[encattr] = ""
// Build up new attributes list
attrList = append(attrList, encattr)
// Store the value in the datastore
oldval, err := valTree.Put([]byte(keyAttrPrefix+encattr), val)
if err != nil {
return nil, &util.GraphError{Type: util.ErrWriting, Detail: err.Error()}
}
// Build up old node
if oldval != nil {
if oldnode == nil {
oldnode = data.NewGraphNode()
}
oldnode.SetAttr(attr, oldval)
}
}
if onlyUpdate {
// If we do only an update write the attribute list only if we added
// new attributes
attrListOld, err = attrTree.Get([]byte(keyAttrs))
if err != nil {
return nil, &util.GraphError{Type: util.ErrReading, Detail: err.Error()}
}
if attrListOld != nil {
// Fill up the attrMap with the existing attributes
for _, encattr := range attrListOld.([]string) {
attrMap[encattr] = ""
}
// Now check if we need to write the attribute list
if len(attrListOld.([]string)) != len(attrMap) {
// Store the new node attributes
attrList = make([]string, 0, len(attrMap))
for encattr := range attrMap {
attrList = append(attrList, encattr)
}
attrListOld, err = attrTree.Put([]byte(keyAttrs), attrList)
}
} else {
// We are actually doing an insert - just write the attribute list
_, err = attrTree.Put([]byte(keyAttrs), attrList)
}
} else {
// Store the new node attributes
attrListOld, err = attrTree.Put([]byte(keyAttrs), attrList)
}
if err != nil {
// Do not try cleanup in case we updated a node - we would do more
// harm than good.
return nil, &util.GraphError{Type: util.ErrWriting, Detail: err.Error()}
}
// Remove deleted keys
if attrListOld != nil {
// Create old node if non of the new attributes were on the old node
if oldnode == nil {
oldnode = data.NewGraphNode()
}
oldnode.SetAttr(data.NodeKey, node.Key())
oldnode.SetAttr(data.NodeKind, node.Kind())
for _, encattrold := range attrListOld.([]string) {
if _, ok := attrMap[encattrold]; !ok {
oldval, err := valTree.Remove([]byte(keyAttrPrefix + encattrold))
if err != nil {
return nil, &util.GraphError{Type: util.ErrWriting, Detail: err.Error()}
}
oldnode.SetAttr(gm.nm.Decode32(encattrold), oldval)
}
}
return oldnode, nil
}
return nil, nil
}
/*
RemoveNode removes a single node from a partition of the graph.
*/
func (gm *Manager) RemoveNode(part string, key string, kind string) (data.Node, error) {
var err error
trans := newInternalGraphTrans(gm)
trans.subtrans = true
if err = gm.gr.graphEvent(trans, EventNodeDelete, part, key, kind); err != nil {
if err == ErrEventHandled {
err = nil
}
return nil, err
}
err = trans.Commit()
if err == nil {
// Get the HTree which stores the node index and node kind
iht, err := gm.getNodeIndexHTree(part, kind, false)
if err != nil {
return nil, err
}
attTree, valTree, err := gm.getNodeStorageHTree(part, kind, false)
if err != nil || attTree == nil || valTree == nil {
return nil, err
}
// Take writer lock
gm.mutex.Lock()
defer gm.mutex.Unlock()
// Delete the node from the datastore
node, err := gm.deleteNode(key, kind, attTree, valTree)
if err != nil {
return node, err
}
// Update the index
if node != nil {
if iht != nil {
err := util.NewIndexManager(iht).Deindex(key, node.IndexMap())
if err != nil {
return node, err
}
}
// Decrease the node count
currentCount := gm.NodeCount(kind)
if err := gm.writeNodeCount(kind, currentCount-1, true); err != nil {
return node, err
}
defer func() {
// Flush changes
gm.gs.FlushMain()
gm.flushNodeIndex(part, kind)
gm.flushNodeStorage(part, kind)
}()
// Execute rules
trans := newInternalGraphTrans(gm)
trans.subtrans = true
if err := gm.gr.graphEvent(trans, EventNodeDeleted, part, node); err != nil && err != ErrEventHandled {
return node, err
} else if err := trans.Commit(); err != nil {
return node, err
}
return node, nil
}
}
return nil, err
}
/*
deleteNode deletes a given node from the datastore. It is assumed that the caller
holds the writer lock before calling the functions and that, after the function
returns, the changes are flushed to the storage. Returns the deleted node.
*/
func (gm *Manager) deleteNode(key string, kind string, attrTree *hash.HTree,
valTree *hash.HTree) (data.Node, error) {
keyAttrs := PrefixNSAttrs + key
keyAttrPrefix := PrefixNSAttr + key
// Remove the attribute list entry
attrList, err := attrTree.Remove([]byte(keyAttrs))
if err != nil {
return nil, &util.GraphError{Type: util.ErrWriting, Detail: err.Error()}
} else if attrList == nil {
return nil, nil
}
// Create the node object which is returned
node := data.NewGraphNode()
node.SetAttr(data.NodeKey, key)
node.SetAttr(data.NodeKind, kind)
// Remove node attributes
for _, encattr := range attrList.([]string) {
attr := gm.nm.Decode32(encattr)
// Try to remove the attribute
val, err := valTree.Remove([]byte(keyAttrPrefix + encattr))
if err != nil {
return node, &util.GraphError{Type: util.ErrWriting, Detail: err.Error()}
}
node.SetAttr(attr, val)
}
return node, nil
}
/*
Default filter function to filter out system node attributes.
*/
func nodeAttributeFilter(attr string) bool {
return attr == data.NodeKey || attr == data.NodeKind
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package graphstorage contains classes which model storage objects for graph data.
There are two main storage objects: DiskGraphStorage which provides disk storage
and MemoryGraphStorage which provides memory-only storage.
*/
package graphstorage
import (
"fmt"
"os"
"strings"
"devt.de/krotik/common/datautil"
"devt.de/krotik/common/fileutil"
"devt.de/krotik/eliasdb/graph/util"
"devt.de/krotik/eliasdb/storage"
)
/*
FilenameNameDB is the filename for the name storage file
*/
var FilenameNameDB = "names.pm"
/*
DiskGraphStorage data structure
*/
type DiskGraphStorage struct {
name string // Name of the graph storage
readonly bool // Flag for readonly mode
mainDB *datautil.PersistentStringMap // Database storing names
storagemanagers map[string]storage.Manager // Map of StorageManagers
}
/*
NewDiskGraphStorage creates a new DiskGraphStorage instance.
*/
func NewDiskGraphStorage(name string, readonly bool) (Storage, error) {
dgs := &DiskGraphStorage{name, readonly, nil, make(map[string]storage.Manager)}
// Load the graph storage if the storage directory already exists if not try to create it
if res, _ := fileutil.PathExists(name); !res {
if err := os.Mkdir(name, 0770); err != nil {
return nil, &util.GraphError{Type: util.ErrOpening, Detail: err.Error()}
}
// Create the graph storage files
mainDB, err := datautil.NewPersistentStringMap(name + "/" + FilenameNameDB)
if err != nil {
return nil, &util.GraphError{Type: util.ErrOpening, Detail: err.Error()}
}
dgs.mainDB = mainDB
} else {
// Load graph storage files
mainDB, err := datautil.LoadPersistentStringMap(name + "/" + FilenameNameDB)
if err != nil {
return nil, &util.GraphError{Type: util.ErrOpening, Detail: err.Error()}
}
dgs.mainDB = mainDB
}
return dgs, nil
}
/*
Name returns the name of the DiskGraphStorage instance.
*/
func (dgs *DiskGraphStorage) Name() string {
return dgs.name
}
/*
MainDB returns the main database.
*/
func (dgs *DiskGraphStorage) MainDB() map[string]string {
return dgs.mainDB.Data
}
/*
RollbackMain rollback the main database.
*/
func (dgs *DiskGraphStorage) RollbackMain() error {
// Fail operation when readonly
if dgs.readonly {
return &util.GraphError{Type: util.ErrReadOnly, Detail: "Cannot rollback main db"}
}
mainDB, err := datautil.LoadPersistentStringMap(dgs.name + "/" + FilenameNameDB)
if err != nil {
return &util.GraphError{Type: util.ErrOpening, Detail: err.Error()}
}
dgs.mainDB = mainDB
return nil
}
/*
FlushMain writes the main database to the storage.
*/
func (dgs *DiskGraphStorage) FlushMain() error {
// Fail operation when readonly
if dgs.readonly {
return &util.GraphError{Type: util.ErrReadOnly, Detail: "Cannot flush main db"}
}
if err := dgs.mainDB.Flush(); err != nil {
return &util.GraphError{Type: util.ErrFlushing, Detail: err.Error()}
}
return nil
}
/*
StorageManager gets a storage manager with a certain name. A non-existing
StorageManager is created automatically if the create flag is set to true.
*/
func (dgs *DiskGraphStorage) StorageManager(smname string, create bool) storage.Manager {
sm, ok := dgs.storagemanagers[smname]
filename := dgs.name + "/" + smname
// Create storage manager object either if we may create or if the
// database already exists
if !ok && (create || storage.DataFileExist(filename)) {
dsm := storage.NewDiskStorageManager(dgs.name+"/"+smname, dgs.readonly, false, false, false)
sm = storage.NewCachedDiskStorageManager(dsm, 100000)
dgs.storagemanagers[smname] = sm
}
return sm
}
/*
FlushAll writes all pending changes to the storage.
*/
func (dgs *DiskGraphStorage) FlushAll() error {
if dgs.readonly {
return nil
}
var errors []string
err := dgs.mainDB.Flush()
if err != nil {
errors = append(errors, err.Error())
}
for _, sm := range dgs.storagemanagers {
err := sm.Flush()
if err != nil {
errors = append(errors, err.Error())
}
}
if len(errors) > 0 {
details := fmt.Sprint(dgs.name, " :", strings.Join(errors, "; "))
return &util.GraphError{Type: util.ErrFlushing, Detail: details}
}
return nil
}
/*
Close closes the storage.
*/
func (dgs *DiskGraphStorage) Close() error {
var errors []string
err := dgs.mainDB.Flush()
if err != nil {
errors = append(errors, err.Error())
}
for _, sm := range dgs.storagemanagers {
err := sm.Close()
if err != nil {
errors = append(errors, err.Error())
}
}
if len(errors) > 0 {
details := fmt.Sprint(dgs.name, " :", strings.Join(errors, "; "))
return &util.GraphError{Type: util.ErrClosing, Detail: details}
}
return nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package graphstorage
import "devt.de/krotik/eliasdb/storage"
/*
MgsRetClose is the return value on successful close
*/
var MgsRetClose error
/*
MgsRetFlushAll is the return value on successful flush all
*/
var MgsRetFlushAll error
/*
MgsRetFlushMain is the return value on successful flush
*/
var MgsRetFlushMain error
/*
MgsRetRollbackMain is the return value on successful rollback
*/
var MgsRetRollbackMain error
/*
MemoryGraphStorage data structure
*/
type MemoryGraphStorage struct {
name string // Name of the graph storage
mainDB map[string]string // Database storing names
storagemanagers map[string]storage.Manager // Map of StorageManagers
}
/*
NewMemoryGraphStorage creates a new MemoryGraphStorage instance.
*/
func NewMemoryGraphStorage(name string) Storage {
return &MemoryGraphStorage{name, make(map[string]string),
make(map[string]storage.Manager)}
}
/*
Name returns the name of the MemoryGraphStorage instance.
*/
func (mgs *MemoryGraphStorage) Name() string {
return mgs.name
}
/*
MainDB returns the main database.
*/
func (mgs *MemoryGraphStorage) MainDB() map[string]string {
return mgs.mainDB
}
/*
RollbackMain rollback th/e main database.
*/
func (mgs *MemoryGraphStorage) RollbackMain() error {
return MgsRetRollbackMain
}
/*
FlushMain writes the main database to the storage.
*/
func (mgs *MemoryGraphStorage) FlushMain() error {
return MgsRetFlushMain
}
/*
StorageManager gets a storage manager with a certain name. A non-existing
StorageManager is created automatically if the create flag is set to true.
*/
func (mgs *MemoryGraphStorage) StorageManager(smname string, create bool) storage.Manager {
sm, ok := mgs.storagemanagers[smname]
if !ok && create {
sm = storage.NewMemoryStorageManager(mgs.name + "/" + smname)
mgs.storagemanagers[smname] = sm
}
return sm
}
/*
FlushAll writes all pending changes to the storage.
*/
func (mgs *MemoryGraphStorage) FlushAll() error {
return MgsRetFlushAll
}
/*
Close closes the storage.
*/
func (mgs *MemoryGraphStorage) Close() error {
return MgsRetClose
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package graph
import (
"bytes"
"encoding/binary"
"encoding/gob"
"fmt"
"strings"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/graph/data"
"devt.de/krotik/eliasdb/graph/util"
"devt.de/krotik/eliasdb/hash"
"devt.de/krotik/eliasdb/storage"
)
// Helper functions for GraphManager
// =================================
/*
checkPartitionName checks if a given partition name is valid.
*/
func (gm *Manager) checkPartitionName(part string) error {
if !stringutil.IsAlphaNumeric(part) {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Partition name %v is not alphanumeric - can only contain [a-zA-Z0-9_]", part),
}
}
return nil
}
/*
checkNode checks if a given node can be written to the datastore.
*/
func (gm *Manager) checkNode(node data.Node) error {
return gm.checkItemGeneral(node, "Node")
}
/*
checkItemGeneral checks the general properties of a given graph item.
*/
func (gm *Manager) checkItemGeneral(node data.Node, name string) error {
if node.Key() == "" {
return &util.GraphError{Type: util.ErrInvalidData, Detail: name + " is missing a key value"}
}
if node.Kind() == "" {
return &util.GraphError{Type: util.ErrInvalidData, Detail: name + " is missing a kind value"}
}
if !stringutil.IsAlphaNumeric(node.Kind()) {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("%v kind %v is not alphanumeric - can only contain [a-zA-Z0-9_]", name, node.Kind()),
}
}
for attr := range node.Data() {
if attr == "" {
return &util.GraphError{Type: util.ErrInvalidData, Detail: name + " contains empty string attribute name"}
}
}
return nil
}
/*
checkEdge checks if a given edge can be written to the datastore.
*/
func (gm *Manager) checkEdge(edge data.Edge) error {
if err := gm.checkItemGeneral(edge, "Edge"); err != nil {
return err
}
if edge.End1Key() == "" {
return &util.GraphError{Type: util.ErrInvalidData, Detail: "Edge is missing a key value for end1"}
}
if edge.End1Kind() == "" {
return &util.GraphError{Type: util.ErrInvalidData, Detail: "Edge is missing a kind value for end1"}
}
if edge.End1Role() == "" {
return &util.GraphError{Type: util.ErrInvalidData, Detail: "Edge is missing a role value for end1"}
} else if !stringutil.IsAlphaNumeric(edge.End1Role()) {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Edge role %v is not alphanumeric - can only contain [a-zA-Z0-9_]", edge.End1Role()),
}
}
if _, ok := edge.Attr(data.EdgeEnd1Cascading).(bool); !ok {
return &util.GraphError{Type: util.ErrInvalidData, Detail: "Edge is missing a cascading value for end1"}
}
if edge.End2Key() == "" {
return &util.GraphError{Type: util.ErrInvalidData, Detail: "Edge is missing a key value for end2"}
}
if edge.End2Kind() == "" {
return &util.GraphError{Type: util.ErrInvalidData, Detail: "Edge is missing a kind value for end2"}
}
if edge.End2Role() == "" {
return &util.GraphError{Type: util.ErrInvalidData, Detail: "Edge is missing a role value for end2"}
} else if !stringutil.IsAlphaNumeric(edge.End2Role()) {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Edge role %v is not alphanumeric - can only contain [a-zA-Z0-9_]", edge.End2Role()),
}
}
if _, ok := edge.Attr(data.EdgeEnd2Cascading).(bool); !ok {
return &util.GraphError{Type: util.ErrInvalidData, Detail: "Edge is missing a cascading value for end2"}
}
return nil
}
/*
writeNodeCount writes a new node count for a specific kind to the datastore.
*/
func (gm *Manager) writeNodeCount(kind string, count uint64, flush bool) error {
numstr := make([]byte, 8)
binary.LittleEndian.PutUint64(numstr, count)
gm.gs.MainDB()[MainDBNodeCount+kind] = string(numstr)
if flush {
return gm.gs.FlushMain()
}
return nil
}
/*
writeEdgeCount writes a new edge count for a specific kind to the datastore.
*/
func (gm *Manager) writeEdgeCount(kind string, count uint64, flush bool) error {
numstr := make([]byte, 8)
binary.LittleEndian.PutUint64(numstr, count)
gm.gs.MainDB()[MainDBEdgeCount+kind] = string(numstr)
if flush {
return gm.gs.FlushMain()
}
return nil
}
/*
getNodeStorageHTree gets two HTree instances which can be used to store nodes.
This function ensures that depending entries in other datastructures do exist.
*/
func (gm *Manager) getNodeStorageHTree(part string, kind string,
create bool) (*hash.HTree, *hash.HTree, error) {
gm.storageMutex.Lock()
defer gm.storageMutex.Unlock()
// Check if the partition name is valid
if err := gm.checkPartitionName(part); err != nil {
return nil, nil, err
}
// Check if the node kind is valid
if !stringutil.IsAlphaNumeric(kind) {
return nil, nil, &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Node kind %v is not alphanumeric - can only contain [a-zA-Z0-9_]", kind),
}
}
// Make sure all required lookup maps are there
if gm.getMainDBMap(MainDBNodeKinds) == nil {
gm.storeMainDBMap(MainDBNodeKinds, make(map[string]string))
}
if gm.getMainDBMap(MainDBParts) == nil {
gm.storeMainDBMap(MainDBParts, make(map[string]string))
}
if gm.getMainDBMap(MainDBNodeAttrs+kind) == nil {
gm.storeMainDBMap(MainDBNodeAttrs+kind, make(map[string]string))
}
if gm.getMainDBMap(MainDBNodeEdges+kind) == nil {
gm.storeMainDBMap(MainDBNodeEdges+kind, make(map[string]string))
}
if _, ok := gm.gs.MainDB()[MainDBNodeCount+kind]; !ok {
gm.gs.MainDB()[MainDBNodeCount+kind] = string(make([]byte, 8, 8))
}
// Return the actual storage
gs := gm.gs.StorageManager(part+kind+StorageSuffixNodes, create)
if gs == nil {
return nil, nil, nil
}
attrTree, err := gm.getHTree(gs, RootIDNodeHTree)
if err != nil {
return nil, nil, err
}
valTree, err := gm.getHTree(gs, RootIDNodeHTreeSecond)
if err != nil {
return nil, nil, err
}
return attrTree, valTree, nil
}
/*
getEdgeStorageHTree gets a HTree which can be used to store edges. This function ensures that depending
entries in other datastructures do exist.
*/
func (gm *Manager) getEdgeStorageHTree(part string, kind string, create bool) (*hash.HTree, error) {
gm.storageMutex.Lock()
defer gm.storageMutex.Unlock()
// Check if the partition name is valid
if err := gm.checkPartitionName(part); err != nil {
return nil, err
}
// Check if the edge kind is valid
if !stringutil.IsAlphaNumeric(kind) {
return nil, &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Edge kind %v is not alphanumeric - can only contain [a-zA-Z0-9_]", kind),
}
}
// Make sure all required lookup maps are there
if gm.getMainDBMap(MainDBEdgeKinds) == nil {
gm.storeMainDBMap(MainDBEdgeKinds, make(map[string]string))
}
if gm.getMainDBMap(MainDBEdgeAttrs+kind) == nil {
gm.storeMainDBMap(MainDBEdgeAttrs+kind, make(map[string]string))
}
if _, ok := gm.gs.MainDB()[MainDBEdgeCount+kind]; !ok {
gm.gs.MainDB()[MainDBEdgeCount+kind] = string(make([]byte, 8, 8))
}
// Return the actual storage
gs := gm.gs.StorageManager(part+kind+StorageSuffixEdges, create)
if gs == nil {
return nil, nil
}
return gm.getHTree(gs, RootIDNodeHTree)
}
/*
getNodeIndexHTree gets a HTree which can be used to index nodes.
*/
func (gm *Manager) getNodeIndexHTree(part string, kind string, create bool) (*hash.HTree, error) {
return gm.getIndexHTree(part, kind, create, "Node", StorageSuffixNodesIndex)
}
/*
getEdgeIndexHTree gets a HTree which can be used to index edges.
*/
func (gm *Manager) getEdgeIndexHTree(part string, kind string, create bool) (*hash.HTree, error) {
return gm.getIndexHTree(part, kind, create, "Edge", StorageSuffixEdgesIndex)
}
/*
getIndexHTree gets a HTree which can be used to index items.
*/
func (gm *Manager) getIndexHTree(part string, kind string, create bool, name string, suffix string) (*hash.HTree, error) {
gm.storageMutex.Lock()
defer gm.storageMutex.Unlock()
// Check if the partition name is valid
if err := gm.checkPartitionName(part); err != nil {
return nil, err
}
// Check if the kind is valid
if !stringutil.IsAlphaNumeric(kind) {
return nil, &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("%v kind %v is not alphanumeric - can only contain [a-zA-Z0-9_]", name, kind),
}
}
gs := gm.gs.StorageManager(part+kind+suffix, create)
if gs == nil {
return nil, nil
}
return gm.getHTree(gs, RootIDNodeHTree)
}
/*
flushNodeStorage flushes a node storage.
*/
func (gm *Manager) flushNodeStorage(part string, kind string) error {
if sm := gm.gs.StorageManager(part+kind+StorageSuffixNodes, false); sm != nil {
if err := sm.Flush(); err != nil {
return &util.GraphError{Type: util.ErrFlushing, Detail: err.Error()}
}
}
return nil
}
/*
flushNodeIndex flushes a node index.
*/
func (gm *Manager) flushNodeIndex(part string, kind string) error {
if sm := gm.gs.StorageManager(part+kind+StorageSuffixNodesIndex, false); sm != nil {
if err := sm.Flush(); err != nil {
return &util.GraphError{Type: util.ErrFlushing, Detail: err.Error()}
}
}
return nil
}
/*
flushEdgeStorage flushes an edge storage.
*/
func (gm *Manager) flushEdgeStorage(part string, kind string) error {
if sm := gm.gs.StorageManager(part+kind+StorageSuffixEdges, false); sm != nil {
if err := sm.Flush(); err != nil {
return &util.GraphError{Type: util.ErrFlushing, Detail: err.Error()}
}
}
return nil
}
/*
flushEdgeIndex flushes an edge index.
*/
func (gm *Manager) flushEdgeIndex(part string, kind string) error {
if sm := gm.gs.StorageManager(part+kind+StorageSuffixEdgesIndex, false); sm != nil {
if err := sm.Flush(); err != nil {
return &util.GraphError{Type: util.ErrFlushing, Detail: err.Error()}
}
}
return nil
}
/*
rollbackNodeStorage rollbacks a node storage.
*/
func (gm *Manager) rollbackNodeStorage(part string, kind string) error {
if sm := gm.gs.StorageManager(part+kind+StorageSuffixNodes, false); sm != nil {
if err := sm.Rollback(); err != nil {
return &util.GraphError{Type: util.ErrRollback, Detail: err.Error()}
}
}
return nil
}
/*
rollbackNodeIndex rollbacks a node index.
*/
func (gm *Manager) rollbackNodeIndex(part string, kind string) error {
if sm := gm.gs.StorageManager(part+kind+StorageSuffixNodesIndex, false); sm != nil {
if err := sm.Rollback(); err != nil {
return &util.GraphError{Type: util.ErrRollback, Detail: err.Error()}
}
}
return nil
}
/*
rollbackEdgeStorage rollbacks an edge storage.
*/
func (gm *Manager) rollbackEdgeStorage(part string, kind string) error {
if sm := gm.gs.StorageManager(part+kind+StorageSuffixEdges, false); sm != nil {
if err := sm.Rollback(); err != nil {
return &util.GraphError{Type: util.ErrRollback, Detail: err.Error()}
}
}
return nil
}
/*
rollbackEdgeIndex rollbacks an edge index.
*/
func (gm *Manager) rollbackEdgeIndex(part string, kind string) error {
if sm := gm.gs.StorageManager(part+kind+StorageSuffixEdgesIndex, false); sm != nil {
if err := sm.Rollback(); err != nil {
return &util.GraphError{Type: util.ErrRollback, Detail: err.Error()}
}
}
return nil
}
/*
getHTree creates or loads a HTree from a given StorageManager. HTrees are not cached
since the creation shouldn't have too much overhead.
*/
func (gm *Manager) getHTree(sm storage.Manager, slot int) (*hash.HTree, error) {
var htree *hash.HTree
var err error
loc := sm.Root(slot)
if loc == 0 {
// Create a new HTree and store its location
htree, err = hash.NewHTree(sm)
if err != nil {
err = &util.GraphError{Type: util.ErrAccessComponent, Detail: err.Error()}
} else {
sm.SetRoot(slot, htree.Location())
}
} else {
// Load existing HTree
htree, err = hash.LoadHTree(sm, loc)
if err != nil {
err = &util.GraphError{Type: util.ErrAccessComponent, Detail: err.Error()}
}
}
return htree, err
}
/*
getMainDBMap gets a map from the main database.
*/
func (gm *Manager) getMainDBMap(key string) map[string]string {
// First try to cache
mapval, ok := gm.mapCache[key]
if ok {
return mapval
}
// Lookup map and decode it
val, ok := gm.gs.MainDB()[key]
if ok {
mapval = stringToMap(val)
gm.mapCache[key] = mapval
}
return mapval
}
/*
storeMainDBMap stores a map in the main database. The map is stored as a gob byte slice.
Once it has been decoded it is cached for read operations.
*/
func (gm *Manager) storeMainDBMap(key string, mapval map[string]string) {
gm.mapCache[key] = mapval
gm.gs.MainDB()[key] = mapToString(mapval)
}
// Static helper functions
// =======================
/*
IsFullSpec is a function to determine if a given spec is a fully specified spec
(i.e. all spec components are specified)
*/
func IsFullSpec(spec string) bool {
sspec := strings.Split(spec, ":")
if len(sspec) != 4 || sspec[0] == "" || sspec[1] == "" || sspec[2] == "" || sspec[3] == "" {
return false
}
return true
}
/*
mapToString turns a map of strings into a single string.
*/
func mapToString(stringmap map[string]string) string {
bb := &bytes.Buffer{}
gob.NewEncoder(bb).Encode(stringmap)
return string(bb.Bytes())
}
/*
stringToMap turns a string into a map of strings.
*/
func stringToMap(mapString string) map[string]string {
var stringmap map[string]string
if err := gob.NewDecoder(bytes.NewBufferString(mapString)).Decode(&stringmap); err != nil {
panic(fmt.Sprint("Cannot decode:", mapString, err))
}
return stringmap
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package graph
import (
"bytes"
"encoding/json"
"fmt"
"io"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/eliasdb/graph/data"
)
/*
ExportPartition dumps the contents of a partition to an io.Writer in JSON format:
{
nodes : [ { <attr> : <value> }, ... ]
edges : [ { <attr> : <value> }, ... ]
}
*/
func ExportPartition(out io.Writer, part string, gm *Manager) error {
// Use a map to unique found edge keys
edgeKeys := make(map[string]string)
writeData := func(data map[string]interface{}) {
nk := 0
for k, v := range data {
// JSON encode value - ignore values which cannot be JSON encoded
jv, err := json.Marshal(v)
// Encoding errors result in a null value
if err != nil {
jv = []byte("null")
}
// Write out the node attributes
fmt.Fprintf(out, " \"%s\" : %s", k, jv)
if nk < len(data)-1 {
fmt.Fprint(out, ",")
}
fmt.Fprint(out, "\n")
nk++
}
}
// Iterate over all available node kinds
fmt.Fprint(out, `{
"nodes" : [
`)
// Loop over all available kinds and build iterators if nodes
// exist in the given partition
var iters []*NodeKeyIterator
var kinds []string
for _, k := range gm.NodeKinds() {
it, err := gm.NodeKeyIterator(part, k)
if err != nil {
return err
}
if it != nil {
iters = append(iters, it)
kinds = append(kinds, k)
}
}
for ik, it := range iters {
// Iterate over all node keys
for i := 0; it.HasNext(); i++ {
key := it.Next()
if it.LastError != nil {
return it.LastError
}
node, err := gm.FetchNode(part, key, kinds[ik])
if err != nil {
return err
}
// Fetch all connected relationships and store their key and kind
_, edges, err := gm.TraverseMulti(part, key, kinds[ik], ":::", false)
if err != nil {
return err
}
for _, edge := range edges {
edgeKeys[edge.Kind()+edge.Key()] = edge.Kind()
}
// Write out JSON object
fmt.Fprint(out, " {\n")
writeData(node.Data())
if it.HasNext() || ik < len(iters)-1 {
fmt.Fprint(out, " },\n")
} else {
fmt.Fprint(out, " }\n")
}
}
}
fmt.Fprint(out, ` ],
"edges" : [
`)
// Iterate over all available edge kinds
ie := 0
for key, kind := range edgeKeys {
key = key[len(kind):]
edge, err := gm.FetchEdge(part, key, kind)
if err != nil {
return err
}
// Write out JSON object
fmt.Fprint(out, " {\n")
writeData(edge.Data())
if ie < len(edgeKeys)-1 {
fmt.Fprint(out, " },\n")
} else {
fmt.Fprint(out, " }\n")
}
ie++
}
fmt.Fprint(out, ` ]
}`)
return nil
}
/*
SortDump sorts a string result which was produced by ExportPartition.
Do not use this for very large results. Panics if the input data is not valid.
*/
func SortDump(in string) string {
var nodes []data.Node
var edges []data.Node
dec := json.NewDecoder(bytes.NewBufferString(in))
gdata := make(map[string][]map[string]interface{})
errorutil.AssertOk(dec.Decode(&gdata))
nDataList := gdata["nodes"]
for _, n := range nDataList {
nodes = append(nodes, data.NewGraphNodeFromMap(n))
}
data.NodeSort(nodes)
for i, n := range nodes {
nDataList[i] = n.Data()
}
eDataList := gdata["edges"]
for _, n := range eDataList {
edges = append(edges, data.NewGraphNodeFromMap(n))
}
data.NodeSort(edges)
for i, e := range edges {
eDataList[i] = e.Data()
}
res, err := json.MarshalIndent(map[string]interface{}{
"nodes": nDataList,
"edges": eDataList,
}, "", " ")
errorutil.AssertOk(err)
return string(res)
}
/*
ImportPartition imports the JSON contents of an io.Reader into a given partition.
The following format is expected:
{
nodes : [ { <attr> : <value> }, ... ]
edges : [ { <attr> : <value> }, ... ]
}
*/
func ImportPartition(in io.Reader, part string, gm *Manager) error {
dec := json.NewDecoder(in)
gdata := make(map[string][]map[string]interface{})
if err := dec.Decode(&gdata); err != nil {
return fmt.Errorf("Could not decode file content as object with list of nodes and edges: %s", err.Error())
}
nDataList := gdata["nodes"]
eDataList := gdata["edges"]
// Create a transaction
trans := NewGraphTrans(gm)
// Store nodes in transaction
for _, ndata := range nDataList {
node := data.NewGraphNodeFromMap(ndata)
if err := trans.StoreNode(part, node); err != nil {
return err
}
}
// Store edges in transaction
for _, edata := range eDataList {
edge := data.NewGraphEdgeFromNode(data.NewGraphNodeFromMap(edata))
if err := trans.StoreEdge(part, edge); err != nil {
return err
}
}
// Commit transaction
return trans.Commit()
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package graph
import (
"devt.de/krotik/eliasdb/graph/util"
"devt.de/krotik/eliasdb/hash"
)
/*
NodeKeyIterator can be used to iterate node keys of a certain node kind.
*/
type NodeKeyIterator struct {
gm *Manager // GraphManager which created the iterator
it *hash.HTreeIterator // Internal HTree iterator
LastError error // Last encountered error
}
/*
Next returns the next node key. Sets the LastError attribute if an error occurs.
*/
func (it *NodeKeyIterator) Next() string {
// Take reader lock
it.gm.mutex.RLock()
defer it.gm.mutex.RUnlock()
k, _ := it.it.Next()
if it.it.LastError != nil {
it.LastError = &util.GraphError{Type: util.ErrReading, Detail: it.it.LastError.Error()}
return ""
} else if len(k) == 0 {
return ""
}
return string(k[len(PrefixNSAttrs):])
}
/*
HasNext returns if there is a next node key.
*/
func (it *NodeKeyIterator) HasNext() bool {
return it.it.HasNext()
}
/*
Error returns the last encountered error.
*/
func (it *NodeKeyIterator) Error() error {
return it.LastError
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package graph
import (
"sort"
"strings"
"sync"
"devt.de/krotik/eliasdb/graph/data"
"devt.de/krotik/eliasdb/graph/util"
)
/*
GraphRulesManager data structure
*/
type graphRulesManager struct {
gm *Manager // GraphManager which provides events
rules map[string]Rule // Map of graph rules
eventMap map[int]map[string]Rule // Map of events to graph rules
}
/*
Rule models a graph rule.
*/
type Rule interface {
/*
Name returns the name of the rule.
*/
Name() string
/*
Handles returns a list of events which are handled by this rule.
*/
Handles() []int
/*
Handle handles an event. The function should write all changes to the
given transaction.
*/
Handle(gm *Manager, trans Trans, event int, data ...interface{}) error
}
/*
graphEvent main event handler which receives all graph related events.
*/
func (gr *graphRulesManager) graphEvent(trans Trans, event int, data ...interface{}) error {
var result error
var errors []string
rules, ok := gr.eventMap[event]
handled := false // Flag to return a special handled error if no other error occured
if ok {
for _, rule := range rules {
// Craete a GraphManager clone which can be used for queries only
gmclone := gr.cloneGraphManager()
gmclone.mutex.RLock()
defer gmclone.mutex.RUnlock()
// Handle the event
err := rule.Handle(gmclone, trans, event, data...)
if err != nil {
if err == ErrEventHandled {
handled = true
} else {
if errors == nil {
errors = make([]string, 0)
}
errors = append(errors, err.Error())
}
}
}
}
if errors != nil {
return &util.GraphError{Type: util.ErrRule, Detail: strings.Join(errors, ";")}
}
if handled {
result = ErrEventHandled
}
return result
}
/*
Clone a given graph manager and insert a new RWMutex.
*/
func (gr *graphRulesManager) cloneGraphManager() *Manager {
return &Manager{gr.gm.gs, gr, gr.gm.nm, gr.gm.mapCache, &sync.RWMutex{}, &sync.Mutex{}}
}
/*
SetGraphRule sets a GraphRule.
*/
func (gr *graphRulesManager) SetGraphRule(rule Rule) {
gr.rules[rule.Name()] = rule
for _, handledEvent := range rule.Handles() {
rules, ok := gr.eventMap[handledEvent]
if !ok {
rules = make(map[string]Rule)
gr.eventMap[handledEvent] = rules
}
rules[rule.Name()] = rule
}
}
/*
GraphRules returns a list of all available graph rules.
*/
func (gr *graphRulesManager) GraphRules() []string {
ret := make([]string, 0, len(gr.rules))
for rule := range gr.rules {
ret = append(ret, rule)
}
sort.StringSlice(ret).Sort()
return ret
}
// System rule SystemRuleDeleteNodeEdges
// =====================================
/*
SystemRuleDeleteNodeEdges is a system rule to delete all edges when a node is
deleted. Deletes also the other end if the cascading flag is set on the edge.
*/
type SystemRuleDeleteNodeEdges struct {
}
/*
Name returns the name of the rule.
*/
func (r *SystemRuleDeleteNodeEdges) Name() string {
return "system.deletenodeedges"
}
/*
Handles returns a list of events which are handled by this rule.
*/
func (r *SystemRuleDeleteNodeEdges) Handles() []int {
return []int{EventNodeDeleted}
}
/*
Handle handles an event.
*/
func (r *SystemRuleDeleteNodeEdges) Handle(gm *Manager, trans Trans, event int, ed ...interface{}) error {
part := ed[0].(string)
node := ed[1].(data.Node)
// Get all connected nodes and relationships
nnodes, edges, err := gm.TraverseMulti(part, node.Key(), node.Kind(), ":::", false)
if err != nil {
return err
}
edgeRemovalCount := make(map[string]int) // Count of cascading last edges which are removed
// Nodes which need to be checked if the last edge of a certain kind has been removed
var nodeRemovalCheckNodes []data.Node
var nodeRemovalCheckSpecs []string
for i, edge := range edges {
// Remove the edge in any case
trans.RemoveEdge(part, edge.Key(), edge.Kind())
// Remove the node on the other side if the edge is cascading on this end
if edge.End1IsCascading() {
if edge.End1IsCascadingLast() {
// Only remove the node at the other end if all edges of this kind
// have been removed from that node after this operation
// Get edge spec from other side
nodeOtherSide := nnodes[i]
specOtherSide := edge.Spec(nodeOtherSide.Key())
if c, ok := edgeRemovalCount[specOtherSide]; ok {
edgeRemovalCount[specOtherSide] = c + 1
} else {
edgeRemovalCount[specOtherSide] = 1
}
nodeRemovalCheckSpecs = append(nodeRemovalCheckSpecs, specOtherSide)
nodeRemovalCheckNodes = append(nodeRemovalCheckNodes, nodeOtherSide)
} else {
// No error handling at this point since only a wrong partition
// name can cause an issue and this would have failed before
trans.RemoveNode(part, nnodes[i].Key(), nnodes[i].Kind())
}
}
}
// Check cascading last edges
for i, node := range nodeRemovalCheckNodes {
specToCheck := nodeRemovalCheckSpecs[i]
removalCount := edgeRemovalCount[specToCheck]
if err == nil {
_, edges, err = gm.TraverseMulti(part, node.Key(), node.Kind(), specToCheck, false)
if len(edges)-removalCount == 0 {
trans.RemoveNode(part, node.Key(), node.Kind())
}
}
}
return err
}
// System rule SystemRuleUpdateNodeStats
// =====================================
/*
SystemRuleUpdateNodeStats is a system rule to update info entries such as
known node or edge kinds in the MainDB.
*/
type SystemRuleUpdateNodeStats struct {
}
/*
Name returns the name of the rule.
*/
func (r *SystemRuleUpdateNodeStats) Name() string {
return "system.updatenodestats"
}
/*
Handles returns a list of events which are handled by this rule.
*/
func (r *SystemRuleUpdateNodeStats) Handles() []int {
return []int{EventNodeCreated, EventNodeUpdated,
EventEdgeCreated, EventEdgeUpdated}
}
/*
Handle handles an event.
*/
func (r *SystemRuleUpdateNodeStats) Handle(gm *Manager, trans Trans, event int, ed ...interface{}) error {
attrMap := MainDBNodeAttrs
if event == EventEdgeCreated {
edge := ed[1].(data.Edge)
updateNodeRels := func(key string, kind string) {
spec := edge.Spec(key)
specs := gm.getMainDBMap(MainDBNodeEdges + kind)
if specs != nil {
if _, ok := specs[spec]; !ok {
specs[spec] = ""
gm.storeMainDBMap(MainDBNodeEdges+kind, specs)
}
}
}
// Update stored relationships for both ends
updateNodeRels(edge.End1Key(), edge.End1Kind())
updateNodeRels(edge.End2Key(), edge.End2Kind())
attrMap = MainDBEdgeAttrs
}
node := ed[1].(data.Node)
kind := node.Kind()
// Check if a new partition or kind was used
if event == EventNodeCreated || event == EventEdgeCreated {
part := ed[0].(string)
updateMainDB := func(entry string, val string) {
vals := gm.getMainDBMap(entry)
if _, ok := vals[val]; !ok {
vals[val] = ""
gm.storeMainDBMap(entry, vals)
}
}
updateMainDB(MainDBParts, part)
if event == EventNodeCreated {
updateMainDB(MainDBNodeKinds, kind)
} else {
updateMainDB(MainDBEdgeKinds, kind)
}
}
storeAttrs := false
attrs := gm.getMainDBMap(attrMap + kind)
if attrs != nil {
// Update stored node attributes
for attr := range node.Data() {
if _, ok := attrs[attr]; !ok {
attrs[attr] = ""
storeAttrs = true
}
}
// Store attribute map if something was changed
if storeAttrs {
gm.storeMainDBMap(attrMap+kind, attrs)
}
}
return nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package graph
import (
"fmt"
"strings"
"sync"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/eliasdb/graph/data"
"devt.de/krotik/eliasdb/graph/util"
)
/*
Trans is a transaction object which should be used to group node and edge operations.
*/
type Trans interface {
/*
ID returns a unique transaction ID.
*/
ID() string
/*
String returns a string representation of this transatction.
*/
String() string
/*
Counts returns the transaction size in terms of objects. Returned values
are nodes to store, edges to store, nodes to remove and edges to remove.
*/
Counts() (int, int, int, int)
/*
IsEmpty returns if this transaction is empty.
*/
IsEmpty() bool
/*
Commit writes the transaction to the graph database. An automatic rollback is done if
any non-fatal error occurs. Failed transactions cannot be committed again.
Serious write errors which may corrupt the database will cause a panic.
*/
Commit() error
/*
StoreNode stores a single node in a partition of the graph. This function will
overwrites any existing node.
*/
StoreNode(part string, node data.Node) error
/*
UpdateNode updates a single node in a partition of the graph. This function will
only update the given values of the node.
*/
UpdateNode(part string, node data.Node) error
/*
RemoveNode removes a single node from a partition of the graph.
*/
RemoveNode(part string, nkey string, nkind string) error
/*
StoreEdge stores a single edge in a partition of the graph. This function will
overwrites any existing edge.
*/
StoreEdge(part string, edge data.Edge) error
/*
RemoveEdge removes a single edge from a partition of the graph.
*/
RemoveEdge(part string, ekey string, ekind string) error
}
/*
NewGraphTrans creates a new graph transaction. This object is not thread safe
and should only be used for non-concurrent use cases; use NewConcurrentGraphTrans
for concurrent use cases.
*/
func NewGraphTrans(gm *Manager) Trans {
return newInternalGraphTrans(gm)
}
/*
NewConcurrentGraphTrans creates a new thread-safe graph transaction.
*/
func NewConcurrentGraphTrans(gm *Manager) Trans {
return &concurrentTrans{NewGraphTrans(gm), &sync.RWMutex{}}
}
/*
NewRollingTrans wraps an existing transaction into a rolling transaction.
Rolling transactions can be used for VERY large datasets and will commit
themselves after n operations. Rolling transactions are always thread-safe.
*/
func NewRollingTrans(t Trans, n int, gm *Manager, newTrans func(*Manager) Trans) Trans {
idCounterLock.Lock()
defer idCounterLock.Unlock()
idCounter++
// Smallest commit threshold is 1
if n < 1 {
n = 1
}
return &rollingTrans{
id: fmt.Sprint(idCounter),
gm: gm,
currentTrans: t,
newTransFunc: newTrans,
transErrors: errorutil.NewCompositeError(),
opThreshold: n,
opCount: 0,
inFlightCount: 0,
wg: &sync.WaitGroup{},
countNodeIns: 0,
countNodeRem: 0,
countEdgeIns: 0,
countEdgeRem: 0,
transLock: &sync.RWMutex{},
}
}
/*
newInternalGraphTrans is used for internal transactions. The returned object
contains extra fields which are only for internal use.
*/
func newInternalGraphTrans(gm *Manager) *baseTrans {
idCounterLock.Lock()
defer idCounterLock.Unlock()
idCounter++
return &baseTrans{fmt.Sprint(idCounter), gm, false, make(map[string]data.Node), make(map[string]data.Node),
make(map[string]data.Edge), make(map[string]data.Edge)}
}
/*
idCounter is a simple counter for ids
*/
var idCounter uint64
var idCounterLock = &sync.Mutex{}
/*
baseTrans is the main data structure for a graph transaction
*/
type baseTrans struct {
id string // Unique transaction ID - not used by EliasDB
gm *Manager // Graph manager which created this transaction
subtrans bool // Flag if the transaction is a subtransaction
storeNodes map[string]data.Node // Nodes which should be stored
removeNodes map[string]data.Node // Nodes which should be removed
storeEdges map[string]data.Edge // Edges which should be stored
removeEdges map[string]data.Edge // Edges which should be removed
}
/*
ID returns a unique transaction ID.
*/
func (gt *baseTrans) ID() string {
return gt.id
}
/*
IsEmpty returns if this transaction is empty.
*/
func (gt *baseTrans) IsEmpty() bool {
sn, se, rn, re := gt.Counts()
return sn == 0 && se == 0 && rn == 0 && re == 0
}
/*
Counts returns the transaction size in terms of objects. Returned values
are nodes to store, edges to store, nodes to remove and edges to remove.
*/
func (gt *baseTrans) Counts() (int, int, int, int) {
return len(gt.storeNodes), len(gt.storeEdges), len(gt.removeNodes), len(gt.removeEdges)
}
/*
String returns a string representation of this transatction.
*/
func (gt *baseTrans) String() string {
sn, se, rn, re := gt.Counts()
return fmt.Sprintf("Transaction %v - Nodes: I:%v R:%v - Edges: I:%v R:%v",
gt.id, sn, rn, se, re)
}
/*
Commit writes the transaction to the graph database. An automatic rollback is done if
any non-fatal error occurs. Failed transactions cannot be committed again.
Serious write errors which may corrupt the database will cause a panic.
*/
func (gt *baseTrans) Commit() error {
// Take writer lock if we are not in a subtransaction
if !gt.subtrans {
gt.gm.mutex.Lock()
defer gt.gm.mutex.Unlock()
}
// Return if there is nothing to do
if gt.IsEmpty() {
return nil
}
doRollback := func(nodePartsAndKinds map[string]string,
edgePartsAndKinds map[string]string) {
// Rollback main database
gt.gm.gs.RollbackMain()
// Rollback node storages
for kkey := range nodePartsAndKinds {
partAndKind := strings.Split(kkey, "#")
gt.gm.rollbackNodeIndex(partAndKind[0], partAndKind[1])
gt.gm.rollbackNodeStorage(partAndKind[0], partAndKind[1])
}
gt.storeNodes = make(map[string]data.Node)
gt.removeNodes = make(map[string]data.Node)
// Rollback edge storages
if edgePartsAndKinds != nil {
for kkey := range edgePartsAndKinds {
partAndKind := strings.Split(kkey, "#")
gt.gm.rollbackEdgeIndex(partAndKind[0], partAndKind[1])
gt.gm.rollbackEdgeStorage(partAndKind[0], partAndKind[1])
}
}
gt.storeEdges = make(map[string]data.Edge)
gt.removeEdges = make(map[string]data.Edge)
}
// Write nodes and edges until everything has been written
nodePartsAndKinds := make(map[string]string)
edgePartsAndKinds := make(map[string]string)
for !gt.IsEmpty() {
// Write the nodes first
if err := gt.commitNodes(nodePartsAndKinds, edgePartsAndKinds); err != nil {
doRollback(nodePartsAndKinds, nil)
return err
}
// After the nodes write the edges
if err := gt.commitEdges(nodePartsAndKinds, edgePartsAndKinds); err != nil {
doRollback(nodePartsAndKinds, edgePartsAndKinds)
return err
}
}
// Flush changes - panic instead of error reporting since the database
// may be inconsistent
panicIfError := func(err error) {
if err != nil {
panic("Fatal GraphError:" + err.Error())
}
}
panicIfError(gt.gm.gs.FlushMain())
for kkey := range nodePartsAndKinds {
partAndKind := strings.Split(kkey, "#")
panicIfError(gt.gm.flushNodeIndex(partAndKind[0], partAndKind[1]))
panicIfError(gt.gm.flushNodeStorage(partAndKind[0], partAndKind[1]))
}
for kkey := range edgePartsAndKinds {
partAndKind := strings.Split(kkey, "#")
panicIfError(gt.gm.flushEdgeIndex(partAndKind[0], partAndKind[1]))
panicIfError(gt.gm.flushEdgeStorage(partAndKind[0], partAndKind[1]))
}
return nil
}
/*
commitNodes tries to commit all transaction nodes.
*/
func (gt *baseTrans) commitNodes(nodePartsAndKinds map[string]string, edgePartsAndKinds map[string]string) error {
// First insert nodes
for tkey, node := range gt.storeNodes {
// Get partition and kind
partAndKind := strings.Split(tkey, "#")
nodePartsAndKinds[partAndKind[0]+"#"+partAndKind[1]] = ""
part := partAndKind[0]
// Get the HTrees which stores the node index and node
iht, err := gt.gm.getNodeIndexHTree(part, node.Kind(), true)
if err != nil {
return err
}
attht, valht, err := gt.gm.getNodeStorageHTree(part, node.Kind(), true)
if err != nil {
return err
}
// Write the node to the datastore
oldnode, err := gt.gm.writeNode(node, false, attht, valht, nodeAttributeFilter)
if err != nil {
return err
}
// Increase node count if the node was inserted and write the changes
// to the index.
if oldnode == nil {
currentCount := gt.gm.NodeCount(node.Kind())
gt.gm.writeNodeCount(node.Kind(), currentCount+1, false)
if iht != nil {
err := util.NewIndexManager(iht).Index(node.Key(), node.IndexMap())
if err != nil {
// The node was written at this point and the model is
// consistent only the index is missing entries
return err
}
}
} else if iht != nil {
err := util.NewIndexManager(iht).Reindex(node.Key(), node.IndexMap(),
oldnode.IndexMap())
if err != nil {
// The node was written at this point and the model is
// consistent only the index is missing entries
return err
}
}
// Execute rules
var event int
if oldnode == nil {
event = EventNodeCreated
} else {
event = EventNodeUpdated
}
if err := gt.gm.gr.graphEvent(gt, event, part, node, oldnode); err != nil {
return err
}
delete(gt.storeNodes, tkey)
}
// Then remove nodes
for tkey, node := range gt.removeNodes {
// Get partition and kind
partAndKind := strings.Split(tkey, "#")
nodePartsAndKinds[partAndKind[0]+"#"+partAndKind[1]] = ""
part := partAndKind[0]
// Get the HTree which stores the node index and node kind
iht, err := gt.gm.getNodeIndexHTree(part, node.Kind(), false)
if err != nil {
return err
}
attTree, valTree, err := gt.gm.getNodeStorageHTree(part, node.Kind(), false)
if err != nil {
return err
}
if attTree == nil || valTree == nil {
// Kind does not exist - continue
delete(gt.removeNodes, tkey)
continue
}
// Delete the node from the datastore
oldnode, err := gt.gm.deleteNode(node.Key(), node.Kind(), attTree, valTree)
if err != nil {
return err
}
// Update the index
if oldnode != nil {
if iht != nil {
err := util.NewIndexManager(iht).Deindex(node.Key(), oldnode.IndexMap())
if err != nil {
return err
}
}
// Decrease the node count
currentCount := gt.gm.NodeCount(node.Kind())
gt.gm.writeNodeCount(node.Kind(), currentCount-1, false)
// Execute rules
if err := gt.gm.gr.graphEvent(gt, EventNodeDeleted, part, oldnode); err != nil {
return err
}
}
delete(gt.removeNodes, tkey)
}
return nil
}
/*
commitEdges tries to commit all transaction edges.
*/
func (gt *baseTrans) commitEdges(nodePartsAndKinds map[string]string, edgePartsAndKinds map[string]string) error {
// First insert edges
for tkey, edge := range gt.storeEdges {
// Get partition and kind
partAndKind := strings.Split(tkey, "#")
edgePartsAndKinds[partAndKind[0]+"#"+partAndKind[1]] = ""
nodePartsAndKinds[partAndKind[0]+"#"+edge.End1Kind()] = ""
nodePartsAndKinds[partAndKind[0]+"#"+edge.End2Kind()] = ""
part := partAndKind[0]
// Get the HTrees which stores the edges and the edge index
iht, err := gt.gm.getEdgeIndexHTree(part, edge.Kind(), true)
if err != nil {
return err
}
edgeht, err := gt.gm.getEdgeStorageHTree(part, edge.Kind(), true)
if err != nil {
return err
}
// Get the HTrees which stores the edge endpoints and make sure the endpoints
// do exist
end1nodeht, end1ht, err := gt.gm.getNodeStorageHTree(part, edge.End1Kind(), false)
if err != nil {
return err
} else if end1ht == nil {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Can't store edge to non-existing node kind: %v", edge.End1Kind()),
}
} else if end1, err := end1nodeht.Get([]byte(PrefixNSAttrs + edge.End1Key())); err != nil || end1 == nil {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Can't find edge endpoint: %s (%s)", edge.End1Key(), edge.End1Kind()),
}
}
end2nodeht, end2ht, err := gt.gm.getNodeStorageHTree(part, edge.End2Kind(), false)
if err != nil {
return err
} else if end2ht == nil {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: "Can't store edge to non-existing node kind: " + edge.End2Kind()}
} else if end2, err := end2nodeht.Get([]byte(PrefixNSAttrs + edge.End2Key())); err != nil || end2 == nil {
return &util.GraphError{
Type: util.ErrInvalidData,
Detail: fmt.Sprintf("Can't find edge endpoint: %s (%s)", edge.End2Key(), edge.End2Kind()),
}
}
// Write edge to the datastore
oldedge, err := gt.gm.writeEdge(edge, edgeht, end1ht, end2ht)
if err != nil {
return err
}
// Increase edge count if the edge was inserted and write the changes
// to the index.
if oldedge == nil {
// Increase edge count
currentCount := gt.gm.EdgeCount(edge.Kind())
gt.gm.writeEdgeCount(edge.Kind(), currentCount+1, false)
// Write edge data to the index
if iht != nil {
if err := util.NewIndexManager(iht).Index(edge.Key(), edge.IndexMap()); err != nil {
// The edge was written at this point and the model is
// consistent only the index is missing entries
return err
}
}
} else if iht != nil {
err := util.NewIndexManager(iht).Reindex(edge.Key(), edge.IndexMap(),
oldedge.IndexMap())
if err != nil {
// The edge was written at this point and the model is
// consistent only the index is missing entries
return err
}
}
// Execute rules
var event int
if oldedge == nil {
event = EventEdgeCreated
} else {
event = EventEdgeUpdated
}
if err := gt.gm.gr.graphEvent(gt, event, part, edge, oldedge); err != nil {
return err
}
delete(gt.storeEdges, tkey)
}
// Then remove edges
for tkey, edge := range gt.removeEdges {
// Get partition and kind
partAndKind := strings.Split(tkey, "#")
edgePartsAndKinds[partAndKind[0]+"#"+partAndKind[1]] = ""
nodePartsAndKinds[partAndKind[0]+"#"+edge.End1Kind()] = ""
nodePartsAndKinds[partAndKind[0]+"#"+edge.End2Kind()] = ""
part := partAndKind[0]
// Get the HTrees which stores the edges and the edge index
iht, err := gt.gm.getEdgeIndexHTree(part, edge.Kind(), true)
if err != nil {
return err
}
edgeht, err := gt.gm.getEdgeStorageHTree(part, edge.Kind(), true)
if err != nil {
return err
}
// Delete the node from the datastore
node, err := gt.gm.deleteNode(edge.Key(), edge.Kind(), edgeht, edgeht)
oldedge := data.NewGraphEdgeFromNode(node)
if err != nil {
return err
}
if node != nil {
// Get the HTrees which stores the edge endpoints
_, end1ht, err := gt.gm.getNodeStorageHTree(part, oldedge.End1Kind(), false)
if err != nil {
return err
}
_, end2ht, err := gt.gm.getNodeStorageHTree(part, oldedge.End2Kind(), false)
if err != nil {
return err
}
// Delete edge info from node storage
if err := gt.gm.deleteEdge(oldedge, end1ht, end2ht); err != nil {
return err
}
if iht != nil {
err := util.NewIndexManager(iht).Deindex(edge.Key(), oldedge.IndexMap())
if err != nil {
return err
}
}
// Decrease edge count
currentCount := gt.gm.EdgeCount(oldedge.Kind())
gt.gm.writeEdgeCount(oldedge.Kind(), currentCount-1, false)
// Execute rules
if err := gt.gm.gr.graphEvent(gt, EventEdgeDeleted, part, oldedge); err != nil {
return err
}
}
delete(gt.removeEdges, tkey)
}
return nil
}
/*
StoreNode stores a single node in a partition of the graph. This function will
overwrites any existing node.
*/
func (gt *baseTrans) StoreNode(part string, node data.Node) error {
if err := gt.gm.checkPartitionName(part); err != nil {
return err
} else if err := gt.gm.checkNode(node); err != nil {
return err
}
key := gt.createKey(part, node.Key(), node.Kind())
if _, ok := gt.removeNodes[key]; ok {
delete(gt.removeNodes, key)
}
gt.storeNodes[key] = node
return nil
}
/*
UpdateNode updates a single node in a partition of the graph. This function will
only update the given values of the node.
*/
func (gt *baseTrans) UpdateNode(part string, node data.Node) error {
if err := gt.gm.checkPartitionName(part); err != nil {
return err
} else if err := gt.gm.checkNode(node); err != nil {
return err
}
key := gt.createKey(part, node.Key(), node.Kind())
if _, ok := gt.removeNodes[key]; ok {
delete(gt.removeNodes, key)
} else if storeNode, ok := gt.storeNodes[key]; ok {
node = data.NodeMerge(storeNode, node)
} else {
// Check the actual database if the node exists
storeNode, err := gt.gm.FetchNode(part, node.Key(), node.Kind())
if err != nil {
return err
} else if storeNode != nil {
node = data.NodeMerge(storeNode, node)
}
}
gt.storeNodes[key] = node
return nil
}
/*
RemoveNode removes a single node from a partition of the graph.
*/
func (gt *baseTrans) RemoveNode(part string, nkey string, nkind string) error {
if err := gt.gm.checkPartitionName(part); err != nil {
return err
}
key := gt.createKey(part, nkey, nkind)
if _, ok := gt.storeNodes[key]; ok {
delete(gt.storeNodes, key)
}
node := data.NewGraphNode()
node.SetAttr(data.NodeKey, nkey)
node.SetAttr(data.NodeKind, nkind)
gt.removeNodes[key] = node
return nil
}
/*
StoreEdge stores a single edge in a partition of the graph. This function will
overwrites any existing edge.
*/
func (gt *baseTrans) StoreEdge(part string, edge data.Edge) error {
if err := gt.gm.checkPartitionName(part); err != nil {
return err
} else if err := gt.gm.checkEdge(edge); err != nil {
return err
}
key := gt.createKey(part, edge.Key(), edge.Kind())
if _, ok := gt.removeEdges[key]; ok {
delete(gt.removeEdges, key)
}
gt.storeEdges[key] = edge
return nil
}
/*
RemoveEdge removes a single edge from a partition of the graph.
*/
func (gt *baseTrans) RemoveEdge(part string, ekey string, ekind string) error {
if err := gt.gm.checkPartitionName(part); err != nil {
return err
}
key := gt.createKey(part, ekey, ekind)
if _, ok := gt.storeEdges[key]; ok {
delete(gt.storeEdges, key)
}
edge := data.NewGraphEdge()
edge.SetAttr(data.NodeKey, ekey)
edge.SetAttr(data.NodeKind, ekind)
gt.removeEdges[key] = edge
return nil
}
/*
Create a key for the transaction storage.
*/
func (gt *baseTrans) createKey(part string, key string, kind string) string {
return part + "#" + kind + "#" + key
}
/*
concurrentTrans is a lock-wrapper around baseTrans which allows concurrent use.
*/
type concurrentTrans struct {
Trans
transLock *sync.RWMutex
}
/*
ID returns a unique transaction ID.
*/
func (gt *concurrentTrans) ID() string {
gt.transLock.RLock()
defer gt.transLock.RUnlock()
return gt.Trans.ID()
}
/*
String returns a string representation of this transatction.
*/
func (gt *concurrentTrans) String() string {
gt.transLock.RLock()
defer gt.transLock.RUnlock()
return gt.Trans.String()
}
/*
Counts returns the transaction size in terms of objects. Returned values
are nodes to store, edges to store, nodes to remove and edges to remove.
*/
func (gt *concurrentTrans) Counts() (int, int, int, int) {
gt.transLock.RLock()
defer gt.transLock.RUnlock()
return gt.Trans.Counts()
}
/*
IsEmpty returns if this transaction is empty.
*/
func (gt *concurrentTrans) IsEmpty() bool {
gt.transLock.RLock()
defer gt.transLock.RUnlock()
return gt.Trans.IsEmpty()
}
/*
Commit writes the transaction to the graph database. An automatic rollback is done if
any non-fatal error occurs. Failed transactions cannot be committed again.
Serious write errors which may corrupt the database will cause a panic.
*/
func (gt *concurrentTrans) Commit() error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
return gt.Trans.Commit()
}
/*
StoreNode stores a single node in a partition of the graph. This function will
overwrites any existing node.
*/
func (gt *concurrentTrans) StoreNode(part string, node data.Node) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
return gt.Trans.StoreNode(part, node)
}
/*
UpdateNode updates a single node in a partition of the graph. This function will
only update the given values of the node.
*/
func (gt *concurrentTrans) UpdateNode(part string, node data.Node) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
return gt.Trans.UpdateNode(part, node)
}
/*
RemoveNode removes a single node from a partition of the graph.
*/
func (gt *concurrentTrans) RemoveNode(part string, nkey string, nkind string) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
return gt.Trans.RemoveNode(part, nkey, nkind)
}
/*
StoreEdge stores a single edge in a partition of the graph. This function will
overwrites any existing edge.
*/
func (gt *concurrentTrans) StoreEdge(part string, edge data.Edge) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
return gt.Trans.StoreEdge(part, edge)
}
/*
RemoveEdge removes a single edge from a partition of the graph.
*/
func (gt *concurrentTrans) RemoveEdge(part string, ekey string, ekind string) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
return gt.Trans.RemoveEdge(part, ekey, ekind)
}
/*
rollingTrans is a rolling transaction which will commit itself after
n operations.
*/
type rollingTrans struct {
id string // ID of this transaction
gm *Manager // Graph manager which created this transaction
currentTrans Trans // Current transaction which is build up
newTransFunc func(*Manager) Trans // Function to create a new transaction
transErrors *errorutil.CompositeError // Collected transaction errors
opThreshold int // Operation threshold
opCount int // Operation count
inFlightCount int // Previous transactions which are still committing
wg *sync.WaitGroup // WaitGroup which releases after all in-flight transactions
countNodeIns int // Count for inserted nodes
countNodeRem int // Count for removed nodes
countEdgeIns int // Count for inserted edges
countEdgeRem int // Count for removed edges
transLock *sync.RWMutex // Lock for this transaction
}
/*
ID returns a unique transaction ID.
*/
func (gt *rollingTrans) ID() string {
gt.transLock.RLock()
defer gt.transLock.RUnlock()
return gt.id
}
/*
IsEmpty returns if this transaction is empty.
*/
func (gt *rollingTrans) IsEmpty() bool {
sn, se, rn, re := gt.Counts()
return sn == 0 && se == 0 && rn == 0 && re == 0
}
/*
Counts returns the transaction size in terms of objects. Returned values
are nodes to store, edges to store, nodes to remove and edges to remove.
*/
func (gt *rollingTrans) Counts() (int, int, int, int) {
gt.transLock.RLock()
defer gt.transLock.RUnlock()
// Count current trans
ns, es, nr, er := gt.currentTrans.Counts()
return ns + gt.countNodeIns, es + gt.countEdgeIns,
nr + gt.countNodeRem, er + gt.countEdgeRem
}
/*
String returns a string representation of this transatction.
*/
func (gt *rollingTrans) String() string {
gt.transLock.RLock()
defer gt.transLock.RUnlock()
ns, es, nr, er := gt.currentTrans.Counts()
return fmt.Sprintf("Rolling transaction %v - Nodes: I:%v R:%v - "+
"Edges: I:%v R:%v - Threshold: %v - In-flight: %v",
gt.id, ns+gt.countNodeIns, nr+gt.countNodeRem, es+gt.countEdgeIns,
er+gt.countEdgeRem, gt.opThreshold, gt.inFlightCount)
}
/*
Commit writes the remaining operations of this rolling transaction to
the graph database.
*/
func (gt *rollingTrans) Commit() error {
// Commit current transaction
gt.transLock.Lock()
if err := gt.currentTrans.Commit(); err != nil {
gt.transErrors.Add(err)
}
gt.transLock.Unlock()
// Wait for other transactions
gt.wg.Wait()
// Return any errors
if gt.transErrors.HasErrors() {
return gt.transErrors
}
return nil
}
/*
checkNewSubTrans checks if a new sub-transaction should be started.
*/
func (gt *rollingTrans) checkNewSubTrans() {
if gt.opCount++; gt.opCount >= gt.opThreshold {
// Reset the op counter
gt.opCount = 0
// Start a new transaction and add the counts to the overall counts
cTrans := gt.currentTrans
gt.currentTrans = gt.newTransFunc(gt.gm)
ns, es, nr, er := cTrans.Counts()
gt.countNodeIns += ns
gt.countNodeRem += nr
gt.countEdgeIns += es
gt.countEdgeRem += er
// Start go routine which commits the current transaction
gt.wg.Add(1) // Add to WaitGroup so we can wait for all in-flight transactions
gt.inFlightCount++ // Count the new in-flight transaction
go func() {
defer gt.wg.Done()
err := cTrans.Commit()
gt.transLock.Lock()
if err != nil {
// Store errors
gt.transErrors.Add(err)
}
// Reduce the counts (do this even if there were errors)
gt.countNodeIns -= ns
gt.countNodeRem -= nr
gt.countEdgeIns -= es
gt.countEdgeRem -= er
gt.inFlightCount--
gt.transLock.Unlock()
}()
}
}
/*
StoreNode stores a single node in a partition of the graph. This function will
overwrites any existing node.
*/
func (gt *rollingTrans) StoreNode(part string, node data.Node) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
err := gt.currentTrans.StoreNode(part, node)
if err == nil {
gt.checkNewSubTrans()
}
return err
}
/*
UpdateNode updates a single node in a partition of the graph. This function will
only update the given values of the node.
*/
func (gt *rollingTrans) UpdateNode(part string, node data.Node) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
err := gt.currentTrans.UpdateNode(part, node)
if err == nil {
gt.checkNewSubTrans()
}
return err
}
/*
RemoveNode removes a single node from a partition of the graph.
*/
func (gt *rollingTrans) RemoveNode(part string, nkey string, nkind string) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
err := gt.currentTrans.RemoveNode(part, nkey, nkind)
if err == nil {
gt.checkNewSubTrans()
}
return err
}
/*
StoreEdge stores a single edge in a partition of the graph. This function will
overwrites any existing edge.
*/
func (gt *rollingTrans) StoreEdge(part string, edge data.Edge) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
err := gt.currentTrans.StoreEdge(part, edge)
if err == nil {
gt.checkNewSubTrans()
}
return err
}
/*
RemoveEdge removes a single edge from a partition of the graph.
*/
func (gt *rollingTrans) RemoveEdge(part string, ekey string, ekind string) error {
gt.transLock.Lock()
defer gt.transLock.Unlock()
err := gt.currentTrans.RemoveEdge(part, ekey, ekind)
if err == nil {
gt.checkNewSubTrans()
}
return err
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package util contains utility classes for the graph storage.
GraphError
Models a graph related error. Low-level errors should be wrapped in a GraphError
before they are returned to a client.
IndexManager
Manages the full text search index. The index supports simple word searches as
well as phrase searches.
The index is a basically a key-value lookup which manages 2 types of entries:
Each node attribute value is split up into words. Each word gets an entry:
PrefixAttrWord + attr num + word (string) -> ids + pos
(provides word and phrase lookup)
Each node attribute value is also converted into a MD5 sum which makes attribute
value lookups very efficient:
PrefixAttrHash + attr num + hash (md5) -> ids
(provides exact match lookup)
NamesManager
Manages names of kinds, roles and attributes. Each stored name gets either a 16
or 32 bit (little endian) number assigned. The manager provides functions to lookup
either the names or their numbers.
*/
package util
import (
"errors"
"fmt"
)
/*
GraphError is a graph related error
*/
type GraphError struct {
Type error // Error type (to be used for equal checks)
Detail string // Details of this error
}
/*
Error returns a human-readable string representation of this error.
*/
func (ge *GraphError) Error() string {
if ge.Detail != "" {
return fmt.Sprintf("GraphError: %v (%v)", ge.Type, ge.Detail)
}
return fmt.Sprintf("GraphError: %v", ge.Type)
}
/*
Graph storage related error types
*/
var (
ErrOpening = errors.New("Failed to open graph storage")
ErrFlushing = errors.New("Failed to flush changes")
ErrRollback = errors.New("Failed to rollback changes")
ErrClosing = errors.New("Failed to close graph storage")
ErrAccessComponent = errors.New("Failed to access graph storage component")
ErrReadOnly = errors.New("Failed write to readonly storage")
)
/*
Graph related error types
*/
var (
ErrInvalidData = errors.New("Invalid data")
ErrIndexError = errors.New("Index error")
ErrReading = errors.New("Could not read graph information")
ErrWriting = errors.New("Could not write graph information")
ErrRule = errors.New("Graph rule error")
)
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package util
import (
"bytes"
"crypto/md5"
"encoding/gob"
"fmt"
"math"
"sort"
"strings"
"unicode"
"devt.de/krotik/common/bitutil"
"devt.de/krotik/common/sortutil"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/hash"
)
/*
MaxKeysetSize is the maximum number of keys for a single word lookup.
*/
const MaxKeysetSize = 1000
/*
CaseSensitiveWordIndex is a flag to indicate if the index should be case sensitive.
*/
var CaseSensitiveWordIndex = false
/*
PrefixAttrWord is the prefix used for word index entries
*/
const PrefixAttrWord = "\x01"
/*
PrefixAttrHash is the prefix used for hashes of attribute values
*/
const PrefixAttrHash = "\x01"
/*
IndexManager data structure
*/
type IndexManager struct {
htree *hash.HTree // Persistent HTree which stores this index
}
/*
indexEntry data structure
*/
type indexEntry struct {
WordPos map[string]string // Node id to word position array
}
func init() {
// Make sure we can use indexEntry in a gob operation
gob.Register(&indexEntry{})
}
/*
NewIndexManager creates a new index manager instance.
*/
func NewIndexManager(htree *hash.HTree) *IndexManager {
return &IndexManager{htree}
}
/*
Index indexes (inserts) a given object.
*/
func (im *IndexManager) Index(key string, obj map[string]string) error {
return im.updateIndex(key, obj, nil)
}
/*
Reindex reindexes (updates) a given object.
*/
func (im *IndexManager) Reindex(key string, newObj map[string]string,
oldObj map[string]string) error {
return im.updateIndex(key, newObj, oldObj)
}
/*
Deindex deindexes (removes) a given object.
*/
func (im *IndexManager) Deindex(key string, obj map[string]string) error {
return im.updateIndex(key, nil, obj)
}
/*
LookupPhrase finds all nodes where an attribute contains a certain phrase. This
call returns a list of node keys which contain the phrase at least once.
*/
func (im *IndexManager) LookupPhrase(attr, phrase string) ([]string, error) {
// Chop up the phrase into words
phraseWords := strings.FieldsFunc(phrase, func(r rune) bool {
return !stringutil.IsAlphaNumeric(string(r)) && (unicode.IsSpace(r) || unicode.IsControl(r) || unicode.IsPunct(r))
})
// Lookup every phrase word
results := make([]map[string][]uint64, len(phraseWords))
for i, phraseWord := range phraseWords {
res, err := im.LookupWord(attr, phraseWord)
if err != nil {
return nil, &GraphError{ErrIndexError, err.Error()}
}
results[i] = res
}
if len(results) == 0 || len(results[0]) == 0 {
return nil, nil
}
ret := make([]string, 0, len(results[0]))
// Go through all found nodes and try to find a path
path := make([]uint64, 0, len(phraseWords))
for key := range results[0] {
path = path[:0]
foundWords := im.findPhrasePath(key, 0, path, phraseWords, results)
if foundWords == len(phraseWords) {
// Add key to results if a path was found
ret = append(ret, key)
}
}
// Guarantee a stable result
sort.StringSlice(ret).Sort()
return ret, nil
}
/*
findPhrasePath tries to find a phrase in a given set of lookup results.
*/
func (im *IndexManager) findPhrasePath(key string, index int, path []uint64,
phraseWords []string, results []map[string][]uint64) int {
// Get the results for this word index
result := results[index]
// Check if there is a result for the given key
if posArr, ok := result[key]; ok {
// Check if any of the positions is at the right place
if index > 0 {
// Check with previous result
for _, pos := range posArr {
// Check if the position array contains the expected next word position
if pos == path[index-1]+1 {
path = append(path, pos)
break
}
// Abort if the expected position cannot be there
if pos > path[index-1] {
return len(path)
}
}
// Do the next iteration if a position was found and
// there are more words in the phrase to match
if len(path) == index+1 && index < len(phraseWords)-1 {
return im.findPhrasePath(key, index+1, path, phraseWords, results)
}
return index + 1
}
// Try every position as start position in the first iteration
for _, pos := range posArr {
path = path[:0]
path = append(path, pos)
// Test if the phrase only contained one word
if len(phraseWords) == 1 {
return 1
}
// Find the rest
ret := im.findPhrasePath(key, 1, path, phraseWords, results)
if ret == len(phraseWords) {
return ret
}
}
}
return len(path)
}
/*
LookupWord finds all nodes where an attribute contains a certain word. This call returns
a map which maps node key to a list of word positions.
*/
func (im *IndexManager) LookupWord(attr, word string) (map[string][]uint64, error) {
var s string
if CaseSensitiveWordIndex {
s = word
} else {
s = strings.ToLower(word)
}
entry, err := im.htree.Get([]byte(PrefixAttrWord + attr + s))
if err != nil {
return nil, &GraphError{ErrIndexError, err.Error()}
} else if entry == nil {
return nil, nil
}
ret := make(map[string][]uint64)
for k, l := range entry.(*indexEntry).WordPos {
ret[k] = bitutil.UnpackList(l)
}
return ret, nil
}
/*
LookupValue finds all nodes where an attribute has a certain value. This call
returns a list of node keys.
*/
func (im *IndexManager) LookupValue(attr, value string) ([]string, error) {
var entry *indexEntry
var sum [16]byte
if CaseSensitiveWordIndex {
sum = md5.Sum([]byte(value))
} else {
sum = md5.Sum([]byte(strings.ToLower(value)))
}
indexkey := []byte(PrefixAttrHash + attr + string(sum[:16]))
// Retrieve index entry
obj, err := im.htree.Get(indexkey)
if err != nil {
return nil, &GraphError{ErrIndexError, err.Error()}
}
if obj == nil {
return nil, nil
}
entry = obj.(*indexEntry)
ret := make([]string, 0, len(entry.WordPos))
for key := range entry.WordPos {
ret = append(ret, key)
}
sort.StringSlice(ret).Sort()
return ret, nil
}
/*
Count returns the number of found nodes for a given word in a given attribute.
*/
func (im *IndexManager) Count(attr, word string) (int, error) {
var s string
if CaseSensitiveWordIndex {
s = word
} else {
s = strings.ToLower(word)
}
entry, err := im.htree.Get([]byte(PrefixAttrWord + attr + s))
if err != nil {
return 0, &GraphError{ErrIndexError, err.Error()}
} else if entry == nil {
return 0, nil
}
return len(entry.(*indexEntry).WordPos), nil
}
/*
updateIndex updates the index for a specific object. Depending on the
new and old arguments being set a given object is either indexed/added
(only new is set), deindexted/removed (only old is set) or reindexted/updated
(new and old are set).
*/
func (im *IndexManager) updateIndex(key string, newObj map[string]string,
oldObj map[string]string) error {
attrMap := make(map[string][]byte)
if newObj != nil && oldObj == nil {
// Insert case
for attr := range newObj {
attrMap[attr] = nil
}
} else if newObj == nil && oldObj != nil {
// Remove case
for attr := range oldObj {
attrMap[attr] = nil
}
} else {
// Update case
for attr := range newObj {
attrMap[attr] = nil
}
for attr := range oldObj {
attrMap[attr] = nil
}
}
emptyws := newWordSet(1)
for attr := range attrMap {
var newwords, toadd, oldwords, toremove *wordSet
newval, newok := newObj[attr]
oldval, oldok := oldObj[attr]
// Calculate which words to add or remove
newwords = emptyws
oldwords = emptyws
if newok {
newwords = extractWords(newval)
}
// At this point we have only words to add
toadd = newwords
toremove = emptyws
if oldok {
oldwords = extractWords(oldval)
if !oldwords.Empty() && !newwords.Empty() {
// Here a diff is necessary
toadd = copyWordSet(newwords)
toadd.RemoveAll(oldwords)
toremove = oldwords
toremove.RemoveAll(newwords)
} else {
// Either no new words or no old words
toremove = oldwords
}
}
// Add and remove index entries
for w, p := range toremove.set {
if err := im.removeIndexEntry(key, attr, w, p); err != nil {
return &GraphError{ErrIndexError, err.Error()}
}
}
for w, p := range toadd.set {
if err := im.addIndexEntry(key, attr, w, p); err != nil {
return &GraphError{ErrIndexError, err.Error()}
}
}
// Update hash lookup
if newok && oldok {
// Update hash entry
if err := im.removeIndexHashEntry(key, attr, oldval); err != nil {
return &GraphError{ErrIndexError, err.Error()}
} else if err := im.addIndexHashEntry(key, attr, newval); err != nil {
return &GraphError{ErrIndexError, err.Error()}
}
} else if newok && !oldok {
// Insert hash entry
if err := im.addIndexHashEntry(key, attr, newval); err != nil {
return &GraphError{ErrIndexError, err.Error()}
}
} else if oldok {
// Delete old hash entry
if err := im.removeIndexHashEntry(key, attr, oldval); err != nil {
return &GraphError{ErrIndexError, err.Error()}
}
}
}
return nil
}
/*
addIndexHashEntry add a hash entry from the index. A hash entry stores a whole
value as MD5 sum.
*/
func (im *IndexManager) addIndexHashEntry(key string, attr string, value string) error {
var entry *indexEntry
var sum [16]byte
if CaseSensitiveWordIndex {
sum = md5.Sum([]byte(value))
} else {
sum = md5.Sum([]byte(strings.ToLower(value)))
}
indexkey := []byte(PrefixAttrHash + attr + string(sum[:16]))
// Retrieve index entry
obj, err := im.htree.Get(indexkey)
if err != nil {
return err
}
if obj == nil {
entry = &indexEntry{make(map[string]string)}
} else {
entry = obj.(*indexEntry)
}
entry.WordPos[key] = ""
_, err = im.htree.Put(indexkey, entry)
return err
}
/*
removeIndexHashEntry removes a hash entry from the index. A hash entry stores a whole
value as MD5 sum.
*/
func (im *IndexManager) removeIndexHashEntry(key string, attr string, value string) error {
var entry *indexEntry
var sum [16]byte
if CaseSensitiveWordIndex {
sum = md5.Sum([]byte(value))
} else {
sum = md5.Sum([]byte(strings.ToLower(value)))
}
indexkey := []byte(PrefixAttrHash + attr + string(sum[:16]))
// Retrieve index entry
obj, err := im.htree.Get(indexkey)
if err != nil {
return err
}
if obj == nil {
return nil
}
entry = obj.(*indexEntry)
delete(entry.WordPos, key)
if len(entry.WordPos) == 0 {
im.htree.Remove(indexkey)
} else {
im.htree.Put(indexkey, entry)
}
return err
}
/*
removeIndexEntry removes an entry from the index.
*/
func (im *IndexManager) removeIndexEntry(key string, attr string, word string, pos []uint64) error {
var entry *indexEntry
indexkey := []byte(PrefixAttrWord + attr + word)
// Retrieve index entry
obj, err := im.htree.Get(indexkey)
if err != nil {
return err
}
if obj == nil {
return nil
}
entry = obj.(*indexEntry)
// Remove given pos from existing pos information
if keyentry, ok := entry.WordPos[key]; ok {
keyentrylist := bitutil.UnpackList(keyentry)
res := make([]uint64, 0, len(keyentrylist))
remLookup := make(map[uint64]bool)
for _, item := range pos {
remLookup[item] = true
}
for _, item := range keyentrylist {
if _, ok := remLookup[item]; !ok {
res = append(res, item)
}
}
if len(res) == 0 {
delete(entry.WordPos, key)
} else {
entry.WordPos[key] = bitutil.PackList(res, res[len(res)-1])
}
}
if len(entry.WordPos) == 0 {
_, err = im.htree.Remove(indexkey)
} else {
_, err = im.htree.Put(indexkey, entry)
}
return err
}
/*
addIndexEntry adds an entry to the index.
*/
func (im *IndexManager) addIndexEntry(key string, attr string, word string, pos []uint64) error {
var entry *indexEntry
indexkey := []byte(PrefixAttrWord + attr + word)
// Retrieve or create index entry
obj, err := im.htree.Get(indexkey)
if err != nil {
return err
}
if obj == nil {
entry = &indexEntry{make(map[string]string)}
} else {
entry = obj.(*indexEntry)
}
// Create position string
if len(pos) == 0 {
panic("Trying to add index entry without position information")
}
// Mix in given pos with existing pos information
if keyentry, ok := entry.WordPos[key]; ok {
pos = append(bitutil.UnpackList(keyentry), pos...)
sortutil.UInt64s(pos)
pos = removeDuplicates(pos)
}
// Rely on the fact that position arrays are ordered in ascending order
maxpos := pos[len(pos)-1]
// Fill the entry and store it
entry.WordPos[key] = bitutil.PackList(pos, maxpos)
_, err = im.htree.Put(indexkey, entry)
return err
}
/*
Remove all duplicates from a given sorted list.
*/
func removeDuplicates(list []uint64) []uint64 {
if len(list) == 0 {
return list
}
res := make([]uint64, 1, len(list))
res[0] = list[0]
last := list[0]
for _, item := range list[1:] {
if item != last {
res = append(res, item)
last = item
}
}
return res
}
/*
String returns a string representation of this index manager.
*/
func (im *IndexManager) String() string {
var buf bytes.Buffer
buf.WriteString(fmt.Sprintf("IndexManager: %v\n", im.htree.Location()))
it := hash.NewHTreeIterator(im.htree)
for it.HasNext() {
key, value := it.Next()
posmap := make(map[string][]uint64)
for k, v := range value.(*indexEntry).WordPos {
posmap[k] = bitutil.UnpackList(v)
}
buf.WriteString(fmt.Sprintf(" %v%q %v\n", key[0], string(key[1:]), posmap))
}
return buf.String()
}
/*
extractWords extracts all words from a given string and return a wordSet which contains
all words and their positions.
*/
func extractWords(s string) *wordSet {
var text string
if CaseSensitiveWordIndex {
text = s
} else {
text = strings.ToLower(s)
}
initArrCap := int(math.Ceil(float64(len(text)) * 0.01))
if initArrCap < 4 {
initArrCap = 4
}
ws := newWordSet(initArrCap)
var pos uint64
wstart := -1
for i, rune := range text {
if !stringutil.IsAlphaNumeric(string(rune)) && (unicode.IsSpace(rune) || unicode.IsControl(rune) || unicode.IsPunct(rune)) {
if wstart >= 0 {
ws.Add(text[wstart:i], pos+1)
pos++
wstart = -1
}
} else if wstart == -1 {
wstart = i
}
}
if wstart >= 0 {
ws.Add(text[wstart:], pos+1)
}
return ws
}
/*
Internal data structure for sets of words and their positions.
*/
type wordSet struct {
set map[string][]uint64 // Map which holds the data
initArrCap int // Initial capacity for the position array
}
/*
newWordSet creates a new word set.
*/
func newWordSet(initArrCap int) *wordSet {
return &wordSet{make(map[string][]uint64), initArrCap}
}
/*
copyWordSet creates a new word set from a present one.
*/
func copyWordSet(ws *wordSet) *wordSet {
ret := &wordSet{make(map[string][]uint64), ws.initArrCap}
ret.AddAll(ws)
return ret
}
/*
Add adds a word to the word set. Returns true if the word was added and false
if an existing entry was updated.
*/
func (ws *wordSet) Add(s string, pos uint64) bool {
v, ok := ws.set[s]
if !ok {
ws.set[s] = make([]uint64, 1, ws.initArrCap)
ws.set[s][0] = pos
} else {
// Make sure the largest entry is always last
l := len(ws.set[s])
if ws.set[s][l-1] < pos {
ws.set[s] = append(v, pos)
} else {
// Make sure there is no double entry
for _, ex := range v {
if ex == pos {
return !ok
}
}
ws.set[s] = append(v, pos)
sortutil.UInt64s(ws.set[s])
}
}
return !ok
}
/*
AddAll adds all words from another word set to this word set.
*/
func (ws *wordSet) AddAll(ws2 *wordSet) {
for s, val := range ws2.set {
for _, v := range val {
ws.Add(s, v)
}
}
}
/*
Empty checks if this word set is empty.
*/
func (ws *wordSet) Empty() bool {
return len(ws.set) == 0
}
/*
Has checks if this word set has a certain word.
*/
func (ws *wordSet) Has(s string) bool {
_, ok := ws.set[s]
return ok
}
/*
Pos returns the positions of a certain word.
*/
func (ws *wordSet) Pos(s string) []uint64 {
if pos, ok := ws.set[s]; ok {
return pos
}
return nil
}
/*
Remove removes a word from the word set.
*/
func (ws *wordSet) Remove(s string, pos uint64) {
if posArr, ok := ws.set[s]; ok {
// Look for the position
for i, p := range posArr {
if p == pos {
posArr := append(posArr[:i], posArr[i+1:]...)
ws.set[s] = posArr
break
}
}
// Remove the word if no more positions are left
if len(ws.set[s]) == 0 {
delete(ws.set, s)
}
}
}
/*
RemoveAll removes all words from another word set from this word set.
*/
func (ws *wordSet) RemoveAll(ws2 *wordSet) {
for s, posArr2 := range ws2.set {
if posArr, ok := ws.set[s]; ok {
j := 0
for i := 0; i < len(posArr2); i++ {
for ; j < len(posArr); j++ {
if posArr[j] == posArr2[i] {
// If a matching entry was found remove it
posArr = append(posArr[:j], posArr[j+1:]...)
ws.set[s] = posArr
break
} else if posArr[j] > posArr2[i] {
// Skip over if a position is not in the current posArr
break
}
}
}
}
// Remove the word if no more positions are left
if len(ws.set[s]) == 0 {
delete(ws.set, s)
}
}
}
/*
String returns a string representation of this word set.
*/
func (ws *wordSet) String() string {
var buf bytes.Buffer
c := make([]string, 0, len(ws.set))
for s := range ws.set {
c = append(c, s)
}
sort.StringSlice(c).Sort()
buf.WriteString("WordSet:\n")
for _, k := range c {
buf.WriteString(fmt.Sprintf(" %v %v\n", k, ws.set[k]))
}
return buf.String()
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package util
import "encoding/binary"
/*
PrefixCode is the prefix for entries storing codes
*/
const PrefixCode = "\x00"
/*
PrefixName is the prefix for entries storing names
*/
const PrefixName = "\x01"
/*
PrefixCounter is the prefix for counter entries
*/
const PrefixCounter = "\x00"
/*
Prefix16Bit is the prefix for 16 bit kind related entries
*/
const Prefix16Bit = "\x01"
/*
Prefix32Bit is the prefix for attribute related entries
*/
const Prefix32Bit = "\x02"
/*
NamesManager data structure
*/
type NamesManager struct {
nameDB map[string]string // Database storing names
}
/*
NewNamesManager creates a new names manager instance.
*/
func NewNamesManager(nameDB map[string]string) *NamesManager {
return &NamesManager{nameDB}
}
/*
Encode32 encodes a given value as a 32 bit string. If the create flag
is set to false then a new entry will not be created if it does not exist.
*/
func (gs *NamesManager) Encode32(val string, create bool) string {
return gs.encode(Prefix32Bit, val, create)
}
/*
Decode32 decodes a given 32 bit string to a value.
*/
func (gs *NamesManager) Decode32(val string) string {
return gs.decode(Prefix32Bit, val)
}
/*
Encode16 encodes a given value as a 16 bit string. If the create flag
is set to false then a new entry will not be created if it does not exist.
*/
func (gs *NamesManager) Encode16(val string, create bool) string {
return gs.encode(Prefix16Bit, val, create)
}
/*
Decode16 decodes a given 16 bit string to a value.
*/
func (gs *NamesManager) Decode16(val string) string {
return gs.decode(Prefix16Bit, val)
}
/*
encode encodes a name to a code.
*/
func (gs *NamesManager) encode(prefix string, name string, create bool) string {
codekey := string(PrefixCode) + prefix + name
code, ok := gs.nameDB[codekey]
// If the code doesn't exist yet create it
if !ok && create {
if prefix == Prefix16Bit {
code = gs.newCode16()
} else {
code = gs.newCode32()
}
gs.nameDB[codekey] = code
namekey := string(PrefixName) + prefix + code
gs.nameDB[namekey] = name
}
return code
}
/*
decode decodes a name from a code.
*/
func (gs *NamesManager) decode(prefix string, code string) string {
namekey := string(PrefixName) + prefix + code
return gs.nameDB[namekey]
}
/*
newCode32 generates a new 32 bit number for the names map.
*/
func (gs *NamesManager) newCode32() (res string) {
var resnum uint32
// Calculate count entry
countAttr := string(PrefixCounter) + Prefix32Bit
// Calculate new code
val, ok := gs.nameDB[countAttr]
if !ok {
resnum = 1
} else {
resnum = binary.LittleEndian.Uint32([]byte(val))
resnum++
}
// Convert to a string
resStr := make([]byte, 4, 4)
binary.LittleEndian.PutUint32(resStr, resnum)
res = string(resStr)
// Write back
gs.nameDB[countAttr] = res
return res
}
/*
newCode16 generates a new 16 bit number for the names map.
*/
func (gs *NamesManager) newCode16() (res string) {
var resnum uint16
// Calculate count entry
countAttr := string(PrefixCounter) + Prefix16Bit
// Calculate new code
val, ok := gs.nameDB[countAttr]
if !ok {
resnum = 1
} else {
resnum = binary.LittleEndian.Uint16([]byte(val))
resnum++
}
// Convert to a string
resStr := make([]byte, 2, 2)
binary.LittleEndian.PutUint16(resStr, resnum)
res = string(resStr)
// Write back
gs.nameDB[countAttr] = res
return res
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package interpreter contains the GraphQL interpreter for EliasDB.
*/
package interpreter
import (
"fmt"
"sort"
"strconv"
"devt.de/krotik/common/lang/graphql/parser"
)
// Not Implemented Runtime
// =======================
/*
Special runtime for not implemented constructs.
*/
type invalidRuntime struct {
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
invalidRuntimeInst returns a new runtime component instance.
*/
func invalidRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &invalidRuntime{rtp, node}
}
/*
Validate this node and all its child nodes.
*/
func (rt *invalidRuntime) Validate() error {
return rt.rtp.newFatalRuntimeError(ErrInvalidConstruct, rt.node.Name, rt.node)
}
/*
Eval evaluate this runtime component.
*/
func (rt *invalidRuntime) Eval() (map[string]interface{}, error) {
return nil, rt.rtp.newFatalRuntimeError(ErrInvalidConstruct, rt.node.Name, rt.node)
}
// Value Runtime
// =============
/*
Special runtime for values.
*/
type valueRuntime struct {
*invalidRuntime
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
valueRuntimeInst returns a new runtime component instance.
*/
func valueRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &valueRuntime{&invalidRuntime{rtp, node}, rtp, node}
}
/*
Value returns the calculated value of the expression.
*/
func (rt *valueRuntime) Value() interface{} {
if rt.node.Name == parser.NodeVariable {
val, ok := rt.rtp.VariableValues[rt.node.Token.Val]
if !ok {
rt.rtp.handleRuntimeError(fmt.Errorf(
"Variable %s was used but not declared", rt.node.Token.Val),
[]string{}, rt.node)
}
return val
} else if rt.node.Name == parser.NodeValue || rt.node.Name == parser.NodeDefaultValue {
val := rt.node.Token.Val
if rt.node.Token.ID == parser.TokenIntValue {
i, _ := strconv.ParseInt(val, 10, 64)
return i
} else if rt.node.Token.ID == parser.TokenFloatValue {
f, _ := strconv.ParseFloat(val, 64)
return f
} else if rt.node.Token.ID == parser.TokenStringValue {
return rt.node.Token.Val
} else if val == "true" {
return true
} else if val == "false" {
return false
} else if val == "null" {
return nil
}
} else if rt.node.Name == parser.NodeObjectValue {
res := make(map[string]interface{})
for _, c := range rt.node.Children {
res[c.Token.Val] = c.Children[0].Runtime.(*valueRuntime).Value()
}
return res
} else if rt.node.Name == parser.NodeListValue {
res := make([]interface{}, 0)
for _, c := range rt.node.Children {
res = append(res, c.Runtime.(*valueRuntime).Value())
}
return res
}
// Default (e.g. enum type)
return rt.node.Token.Val
}
// Data sorting
// ============
/*
dataSort sorts a list of maps.
*/
func dataSort(list []map[string]interface{}, attr string, ascending bool) {
sort.Sort(&DataSlice{list, attr, ascending})
}
/*
DataSlice attaches the methods of sort.Interface to []map[string]interface{},
sorting in ascending or descending order by a given attribute.
*/
type DataSlice struct {
data []map[string]interface{}
attr string
ascending bool
}
/*
Len belongs to the sort.Interface.
*/
func (d DataSlice) Len() int { return len(d.data) }
/*
Less belongs to the sort.Interface.
*/
func (d DataSlice) Less(i, j int) bool {
ia, ok1 := d.data[i][d.attr]
ja, ok2 := d.data[j][d.attr]
if ok1 && ok2 {
is := fmt.Sprint(ia)
js := fmt.Sprint(ja)
in, err1 := strconv.Atoi(is)
jn, err2 := strconv.Atoi(js)
if err1 == nil && err2 == nil {
if d.ascending {
return in < jn
}
return in > jn
}
if d.ascending {
return is < js
}
return is > js
}
return false
}
/*
Swap belongs to the sort.Interface.
*/
func (d DataSlice) Swap(i, j int) {
d.data[i], d.data[j] = d.data[j], d.data[i]
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"fmt"
"strings"
"devt.de/krotik/common/lang/graphql/parser"
)
/*
ProcessIntrospection filters the full introspection down to the required fields.
*/
func (rt *selectionSetRuntime) ProcessIntrospection() map[string]interface{} {
return rt.FilterIntrospectionResponse(rt.ProcessFullIntrospection())
}
/*
ProcessFullIntrospection returns the full introspection with all known fields.
*/
func (rt *selectionSetRuntime) ProcessFullIntrospection() map[string]interface{} {
res := make(map[string]interface{})
fieldMap := rt.GetFields()
for symbol := range fieldMap {
// General types
if symbol == "queryType" {
res["queryType"] = map[string]interface{}{
"name": "Query",
}
if !rt.rtp.readOnly {
res["mutationType"] = map[string]interface{}{
"name": "Mutation",
}
} else {
res["mutationType"] = nil
}
res["subscriptionType"] = map[string]interface{}{
"name": "Subscription",
}
}
if symbol == "types" {
res["types"] = rt.GetTypesIntrospection()
}
if symbol == "directives" {
res["directives"] = rt.GetDirectivesIntrospection()
}
}
return res
}
func (rt *selectionSetRuntime) FilterIntrospectionResponse(res map[string]interface{}) map[string]interface{} {
filteredRes := make(map[string]interface{})
fieldMap := rt.GetFields()
for symbol, field := range fieldMap {
reschild := res[symbol]
if srt := field.SelectionSetRuntime(); srt != nil {
// Check for list
if reschildList, ok := reschild.([]interface{}); ok {
filterResList := []interface{}{}
for _, reschild := range reschildList {
filterResList = append(filterResList, srt.FilterIntrospectionResponse(reschild.(map[string]interface{})))
}
filteredRes[symbol] = filterResList
} else if reschildMap, ok := reschild.(map[string]interface{}); ok {
filteredRes[symbol] = srt.FilterIntrospectionResponse(reschildMap)
} else {
filteredRes[symbol] = reschild
}
} else {
filteredRes[symbol] = reschild
}
}
return filteredRes
}
/*
GetTypesIntrospection returns the introspection for all available types.
*/
func (rt *selectionSetRuntime) GetTypesIntrospection() interface{} {
res := make([]interface{}, 0)
queryType := map[string]interface{}{
"kind": "OBJECT",
"name": "Query",
"description": "Entry point for single read queryies.",
"fields": rt.GetFieldTypesIntrospection("Lookup", true),
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
}
res = append(res, queryType)
if !rt.rtp.readOnly {
mutationType := map[string]interface{}{
"kind": "OBJECT",
"name": "Mutation",
"description": "Entry point for writing queryies.",
"fields": rt.GetFieldTypesIntrospection("Insert or modify", false),
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
}
res = append(res, mutationType)
}
subscriptionType := map[string]interface{}{
"kind": "OBJECT",
"name": "Subscription",
"description": "Entry point for subscriptions.",
"fields": rt.GetFieldTypesIntrospection("Subscribe to", true),
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
}
res = append(res, subscriptionType)
// Add EliasDB specific types
res = append(res, rt.GetEliasDBTypesIntrospection().([]interface{})...)
// Add all the default GraphQL types like __Schema, __Type, etc.
res = append(res, rt.GetStandardTypesIntrospection().([]interface{})...)
return res
}
/*
GetFieldTypesIntrospection returns the introspection for all available field types.
*/
func (rt *selectionSetRuntime) GetFieldTypesIntrospection(action string, lookupArgs bool) interface{} {
var args []interface{}
res := make([]interface{}, 0)
if lookupArgs {
args = []interface{}{
map[string]interface{}{
"name": "key",
"defaultValue": nil,
"description": "Lookup a particular node by key.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
map[string]interface{}{
"name": "matches",
"defaultValue": nil,
"description": "Lookup nodes matching this template.",
"type": map[string]interface{}{
"kind": "OBJECT",
"name": "NodeTemplate",
"ofType": nil,
},
},
map[string]interface{}{
"name": "storeNode",
"defaultValue": nil,
"description": "Store a node according to this template.",
"type": map[string]interface{}{
"kind": "OBJECT",
"name": "NodeTemplate",
"ofType": nil,
},
},
map[string]interface{}{
"name": "removeNode",
"defaultValue": nil,
"description": "Remove a node according to this template (only kind is needed).",
"type": map[string]interface{}{
"kind": "OBJECT",
"name": "NodeTemplate",
"ofType": nil,
},
},
map[string]interface{}{
"name": "storeEdge",
"defaultValue": nil,
"description": "Store an edge according to this template.",
"type": map[string]interface{}{
"kind": "OBJECT",
"name": "NodeTemplate",
"ofType": nil,
},
},
map[string]interface{}{
"name": "removeEdge",
"defaultValue": nil,
"description": "Remove an edge according to this template (only key and kind are needed).",
"type": map[string]interface{}{
"kind": "OBJECT",
"name": "NodeTemplate",
"ofType": nil,
},
},
map[string]interface{}{
"name": "ascending",
"defaultValue": nil,
"description": "Sort resuting data ascending using the values of the specified key.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
map[string]interface{}{
"name": "descending",
"defaultValue": nil,
"description": "Sort resuting data descending using the values of the specified key.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
map[string]interface{}{
"name": "from",
"defaultValue": nil,
"description": "Retrieve data after the first n entries.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "Int",
"ofType": nil,
},
},
map[string]interface{}{
"name": "items",
"defaultValue": nil,
"description": "Retrieve n entries.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "Int",
"ofType": nil,
},
},
map[string]interface{}{
"name": "last",
"defaultValue": nil,
"description": "Only return last n entries.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "Int",
"ofType": nil,
},
},
}
} else {
args = []interface{}{}
}
for _, kind := range rt.rtp.gm.NodeKinds() {
res = append(res, map[string]interface{}{
"name": kind,
"description": fmt.Sprintf("%s %s nodes in the datastore.", action, kind),
"args": args,
"type": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": fmt.Sprintf("%sNode", strings.Title(kind)),
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
})
}
return res
}
/*
GetEliasDBTypesIntrospection returns EliasDB types.
*/
func (rt *selectionSetRuntime) GetEliasDBTypesIntrospection() interface{} {
res := make([]interface{}, 0)
for _, kind := range rt.rtp.gm.NodeKinds() {
fields := make([]interface{}, 0)
for _, attr := range rt.rtp.gm.NodeAttrs(kind) {
fields = append(fields, map[string]interface{}{
"name": attr,
"description": fmt.Sprintf("The %s attribute of a %s node.", attr, kind),
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
})
}
for _, edge := range rt.rtp.gm.NodeEdges(kind) {
edgeName := strings.Replace(edge, ":", "_", -1)
edgeTargetKind := strings.Split(edge, ":")[3]
fields = append(fields, map[string]interface{}{
"name": edgeName,
"description": fmt.Sprintf("The %s edge of a %s node to a %s node.", edge, kind, edgeTargetKind),
"args": []interface{}{
map[string]interface{}{
"name": "traverse",
"defaultValue": nil,
"description": fmt.Sprintf("Use %s to traverse from %s to %s.", edge, kind, edgeTargetKind),
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
},
map[string]interface{}{
"name": "matches",
"defaultValue": nil,
"description": "Lookup nodes matching this template.",
"type": map[string]interface{}{
"kind": "OBJECT",
"name": "NodeTemplate",
"ofType": nil,
},
},
map[string]interface{}{
"name": "ascending",
"defaultValue": nil,
"description": "Sort resuting data ascending using the values of the specified key.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
map[string]interface{}{
"name": "descending",
"defaultValue": nil,
"description": "Sort resuting data descending using the values of the specified key.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
map[string]interface{}{
"name": "from",
"defaultValue": nil,
"description": "Retrieve data after the first n entries.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "Int",
"ofType": nil,
},
},
map[string]interface{}{
"name": "items",
"defaultValue": nil,
"description": "Retrieve n entries.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "Int",
"ofType": nil,
},
},
map[string]interface{}{
"name": "last",
"defaultValue": nil,
"description": "Only return last n entries.",
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "Int",
"ofType": nil,
},
},
},
"type": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": fmt.Sprintf("%sNode", strings.Title(edgeTargetKind)),
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
})
}
res = append(res, map[string]interface{}{
"kind": "OBJECT",
"name": fmt.Sprintf("%sNode", strings.Title(kind)),
"description": fmt.Sprintf("Represents a %s node.", kind),
"fields": fields,
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
})
}
res = append(res, map[string]interface{}{
"kind": "INPUT_OBJECT",
"name": "NodeTemplate",
"description": "Template of a node. Fields of this object can either be regular expressions or direct matches. A `not_` prefix negates the condition (e.g. `not_key`).",
"fields": []interface{}{},
"inputFields": []interface{}{},
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
})
return res
}
/*
GetStandardTypesIntrospection returns the standard types.
*/
func (rt *selectionSetRuntime) GetStandardTypesIntrospection() interface{} {
res := make([]interface{}, 0)
// Schema type
res = append(res, map[string]interface{}{
"kind": "OBJECT",
"name": "__Schema",
"description": "A GraphQL Schema defines the capabilities of a GraphQL server. It exposes all available types and directives on the server, as well as the entry points for query, mutation, and subscription operations.",
"fields": []interface{}{
map[string]interface{}{
"name": "types",
"description": "A list of all types supported by this server.",
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"ofType": nil,
},
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "queryType",
"description": "The type that query operations will be rooted at.",
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "mutationType",
"description": "The type that mutation operations will be rooted at.",
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "subscriptionType",
"description": "The type that subscription operations will be rooted at.",
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "directives",
"description": "A list of all directives supported by this server.",
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__Directive",
"ofType": nil,
},
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
},
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
})
// Type type
res = append(res, map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"description": "The fundamental unit of the GraphQL Schema.",
"fields": []interface{}{
map[string]interface{}{
"name": "kind",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "ENUM",
"name": "__TypeKind",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "name",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "description",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "fields",
"description": nil,
"args": []interface{}{
map[string]interface{}{
"name": "includeDeprecated",
"description": nil,
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "Boolean",
"ofType": nil,
},
"defaultValue": "false",
},
},
"type": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__Field",
"ofType": nil,
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "interfaces",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"ofType": nil,
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "possibleTypes",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"ofType": nil,
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "enumValues",
"description": nil,
"args": []interface{}{
map[string]interface{}{
"name": "includeDeprecated",
"description": nil,
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "Boolean",
"ofType": nil,
},
"defaultValue": "false",
},
},
"type": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__EnumValue",
"ofType": nil,
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "inputFields",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__InputValue",
"ofType": nil,
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "ofType",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
},
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
})
// Default types
res = append(res, []interface{}{
map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"description": "The `String` scalar type represents textual data, represented as UTF-8 character sequences.",
"fields": nil,
"inputFields": nil,
"interfaces": nil,
"enumValues": nil,
"possibleTypes": nil,
},
map[string]interface{}{
"kind": "SCALAR",
"name": "Boolean",
"description": "The `Boolean` scalar type represents `true` or `false`.",
"fields": nil,
"inputFields": nil,
"interfaces": nil,
"enumValues": nil,
"possibleTypes": nil,
},
map[string]interface{}{
"kind": "SCALAR",
"name": "Float",
"description": "The `Float` scalar type represents signed double-precision fractional values.",
"fields": nil,
"inputFields": nil,
"interfaces": nil,
"enumValues": nil,
"possibleTypes": nil,
},
map[string]interface{}{
"kind": "SCALAR",
"name": "Int",
"description": "The `Int` scalar type represents non-fractional signed whole numeric values.",
"fields": nil,
"inputFields": nil,
"interfaces": nil,
"enumValues": nil,
"possibleTypes": nil,
},
map[string]interface{}{
"kind": "OBJECT",
"name": "__InputValue",
"description": "Arguments provided to Fields or Directives and the input fields of an InputObject are represented as Input Values which describe their type and optionally a default value.",
"fields": []interface{}{
map[string]interface{}{
"name": "name",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "description",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "type",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "defaultValue",
"description": "A GraphQL-formatted string representing the default value for this input value.",
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
},
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
},
map[string]interface{}{
"kind": "OBJECT",
"name": "__EnumValue",
"description": "One possible value for a given Enum. Enum values are unique values, not a placeholder for a string or numeric value. Enum values are returned in a JSON response as strings.",
"fields": []interface{}{
map[string]interface{}{
"name": "name",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "description",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "isDeprecated",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "SCALAR",
"name": "Boolean",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "deprecationReason",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
},
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
},
map[string]interface{}{
"kind": "ENUM",
"name": "__TypeKind",
"description": "An enum describing what kind of type a given `__Type` is.",
"fields": nil,
"inputFields": nil,
"interfaces": nil,
"enumValues": []interface{}{
map[string]interface{}{
"name": "SCALAR",
"description": "Indicates this type is a scalar.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "OBJECT",
"description": "Indicates this type is an object. `fields` and `interfaces` are valid fields.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "INTERFACE",
"description": "Indicates this type is an interface. `fields` and `possibleTypes` are valid fields.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "UNION",
"description": "Indicates this type is a union. `possibleTypes` is a valid field.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "ENUM",
"description": "Indicates this type is an enum. `enumValues` is a valid field.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "INPUT_OBJECT",
"description": "Indicates this type is an input object. `inputFields` is a valid field.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "LIST",
"description": "Indicates this type is a list. `ofType` is a valid field.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "NON_NULL",
"description": "Indicates this type is a non-null. `ofType` is a valid field.",
"isDeprecated": false,
"deprecationReason": nil,
},
},
"possibleTypes": nil,
},
map[string]interface{}{
"kind": "OBJECT",
"name": "__Field",
"description": "Object and Interface types are described by a list of Fields, each of which has a name, potentially a list of arguments, and a return type.",
"fields": []interface{}{
map[string]interface{}{
"name": "name",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "description",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "args",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__InputValue",
"ofType": nil,
},
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "type",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__Type",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "isDeprecated",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "SCALAR",
"name": "Boolean",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "deprecationReason",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
},
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
},
map[string]interface{}{
"kind": "OBJECT",
"name": "__Directive",
"description": "A Directive provides a way to describe alternate runtime execution and type validation behavior in a GraphQL document.",
"fields": []interface{}{
map[string]interface{}{
"name": "name",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "description",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "SCALAR",
"name": "String",
"ofType": nil,
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "locations",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "ENUM",
"name": "__DirectiveLocation",
"ofType": nil,
},
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "args",
"description": nil,
"args": []interface{}{},
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "LIST",
"name": nil,
"ofType": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "OBJECT",
"name": "__InputValue",
"ofType": nil,
},
},
},
},
"isDeprecated": false,
"deprecationReason": nil,
},
},
"inputFields": nil,
"interfaces": []interface{}{},
"enumValues": nil,
"possibleTypes": nil,
},
map[string]interface{}{
"kind": "ENUM",
"name": "__DirectiveLocation",
"description": "A Directive can be adjacent to many parts of the GraphQL language, a __DirectiveLocation describes one such possible adjacencies.",
"fields": nil,
"inputFields": nil,
"interfaces": nil,
"enumValues": []interface{}{
map[string]interface{}{
"name": "QUERY",
"description": "Location adjacent to a query operation.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "MUTATION",
"description": "Location adjacent to a mutation operation.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "SUBSCRIPTION",
"description": "Location adjacent to a subscription operation.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "FIELD",
"description": "Location adjacent to a field.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "FRAGMENT_DEFINITION",
"description": "Location adjacent to a fragment definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "FRAGMENT_SPREAD",
"description": "Location adjacent to a fragment spread.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "INLINE_FRAGMENT",
"description": "Location adjacent to an inline fragment.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "SCHEMA",
"description": "Location adjacent to a schema definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "SCALAR",
"description": "Location adjacent to a scalar definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "OBJECT",
"description": "Location adjacent to an object type definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "FIELD_DEFINITION",
"description": "Location adjacent to a field definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "ARGUMENT_DEFINITION",
"description": "Location adjacent to an argument definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "INTERFACE",
"description": "Location adjacent to an interface definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "UNION",
"description": "Location adjacent to a union definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "ENUM",
"description": "Location adjacent to an enum definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "ENUM_VALUE",
"description": "Location adjacent to an enum value definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "INPUT_OBJECT",
"description": "Location adjacent to an input object type definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
map[string]interface{}{
"name": "INPUT_FIELD_DEFINITION",
"description": "Location adjacent to an input object field definition.",
"isDeprecated": false,
"deprecationReason": nil,
},
},
"possibleTypes": nil,
},
}...)
return res
}
/*
GetDirectivesIntrospection returns the introspection for all available directives.
*/
func (rt *selectionSetRuntime) GetDirectivesIntrospection() interface{} {
return []interface{}{
map[string]interface{}{
"name": "skip",
"description": "Directs the executor to skip this field or fragment when the `if` argument is true.",
"locations": []interface{}{
"FIELD",
"FRAGMENT_SPREAD",
"INLINE_FRAGMENT",
},
"args": []interface{}{
map[string]interface{}{
"name": "if",
"description": "Skipped when true.",
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "SCALAR",
"name": "Boolean",
"ofType": nil,
},
},
"defaultValue": nil,
},
},
},
map[string]interface{}{
"name": "include",
"description": "Directs the executor to include this field or fragment only when the `if` argument is true.",
"locations": []interface{}{
"FIELD",
"FRAGMENT_SPREAD",
"INLINE_FRAGMENT",
},
"args": []interface{}{
map[string]interface{}{
"name": "if",
"description": "Included when true.",
"type": map[string]interface{}{
"kind": "NON_NULL",
"name": nil,
"ofType": map[string]interface{}{
"kind": "SCALAR",
"name": "Boolean",
"ofType": nil,
},
},
"defaultValue": nil,
},
},
},
}
}
/*
GetFields returns all fields of this selection set.
*/
func (rt *selectionSetRuntime) GetFields() map[string]*fieldRuntime {
resMap := make(map[string]*fieldRuntime)
fieldList := append(rt.node.Children[:0:0], rt.node.Children...) // Copy into new slice
for i := 0; i < len(fieldList); i++ {
c := fieldList[i]
// Check for skip and include directive
if rt.skipField([]string{}, c) {
continue
}
if c.Name == parser.NodeField {
// Handle simple fields - we ignore aliases as they will not be honored
// when filtering the introspection data
field := c.Runtime.(*fieldRuntime)
resMap[field.Name()] = field
} else if c.Name == parser.NodeFragmentSpread || c.Name == parser.NodeInlineFragment {
var fd fragmentRuntime
if c.Name == parser.NodeFragmentSpread {
// Lookup fragment spreads
fd = rt.rtp.fragments[c.Token.Val]
} else {
// Construct inline fragments
fd = c.Runtime.(*inlineFragmentDefinitionRuntime)
}
ss := fd.SelectionSet()
fieldList = append(fieldList, ss.Children...)
}
}
return resMap
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"fmt"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/lang/graphql/parser"
"devt.de/krotik/eliasdb/graph"
)
// Runtime definition
// ==================
/*
Instance function for runtime components
*/
type runtimeInst func(*GraphQLRuntimeProvider, *parser.ASTNode) parser.Runtime
/*
Runtime map for runtime components
*/
var runtimeProviderMap = map[string]runtimeInst{
parser.NodeEOF: invalidRuntimeInst,
parser.NodeDocument: documentRuntimeInst,
parser.NodeExecutableDefinition: executableDefinitionRuntimeInst,
parser.NodeFragmentDefinition: fragmentDefinitionRuntimeInst,
parser.NodeInlineFragment: inlineFragmentDefinitionRuntimeInst,
parser.NodeOperationDefinition: operationDefinitionRuntimeInst,
parser.NodeSelectionSet: selectionSetRuntimeInst,
parser.NodeField: fieldRuntimeInst,
parser.NodeDirective: argumentExpressionRuntimeInst,
parser.NodeObjectValue: valueRuntimeInst,
parser.NodeValue: valueRuntimeInst,
parser.NodeDefaultValue: valueRuntimeInst,
parser.NodeEnumValue: valueRuntimeInst,
parser.NodeListValue: valueRuntimeInst,
parser.NodeVariable: valueRuntimeInst,
}
// General runtime provider
// ========================
/*
QueryType is a know GraphQL query type
*/
type QueryType string
/*
All known query types
*/
const (
QueryTypeQuery QueryType = "query"
QueryTypeMutation = "mutation"
QueryTypeSubscription = "subscription"
)
/*
GraphQLRuntimeProvider defines the main interpreter
datastructure and all functions for general evaluation.
*/
type GraphQLRuntimeProvider struct {
Name string // Name to identify the input
QueryType QueryType // Query type (query, mutation, subscription)
OperationName string // Name of operation to execute
VariableValues map[string]interface{} // Values of variables
ErrorKeys []string // List of error hashes (used for deduplication)
Errors []*RuntimeError // List of errors
ErrorPaths [][]string // List of error paths
part string // Graph partition to query
gm *graph.Manager // GraphManager to operate on
callbackHandler SubscriptionCallbackHandler // Subscription callback handler for updates
subscriptionHandler *subscriptionHandler // Subscription handler forwarding event is the callback object
readOnly bool // Flag if only read operations are allowed
operation *parser.ASTNode // Operation to execute
fragments map[string]*fragmentDefinitionRuntime // Fragment definitions
}
/*
NewGraphQLRuntimeProvider creates a new GraphQLRuntimeProvider object.
*/
func NewGraphQLRuntimeProvider(name string, part string, gm *graph.Manager,
op string, vars map[string]interface{}, callbackHandler SubscriptionCallbackHandler,
readOnly bool) *GraphQLRuntimeProvider {
return &GraphQLRuntimeProvider{name, "", op, vars, []string{}, []*RuntimeError{},
[][]string{}, part, gm, callbackHandler, nil, readOnly, nil,
make(map[string]*fragmentDefinitionRuntime)}
}
/*
CheckWritePermission checks if the current query is allowed to modify data.
Returns true if data can be modified.
*/
func (rtp *GraphQLRuntimeProvider) CheckWritePermission(path []string, node *parser.ASTNode) bool {
if rtp.readOnly {
rtp.handleRuntimeError(fmt.Errorf("Can only perform read operations"),
path, node)
return false
}
if rtp.QueryType != QueryTypeMutation {
rtp.handleRuntimeError(fmt.Errorf("Operation must be a mutation to modify data"),
path, node)
return false
}
return true
}
/*
Initialise data structures.
*/
func (rtp *GraphQLRuntimeProvider) init() error {
rtp.QueryType = ""
rtp.operation = nil
return nil
}
/*
Runtime returns a runtime component for a given ASTNode.
*/
func (rtp *GraphQLRuntimeProvider) Runtime(node *parser.ASTNode) parser.Runtime {
if pinst, ok := runtimeProviderMap[node.Name]; ok {
return pinst(rtp, node)
}
return invalidRuntimeInst(rtp, node)
}
/*
TraverseAST traverses the AST starting with a given root and executes a given
visitor function on each node. An accumulator is given to track state. A path
is given to track selection sets.
*/
func (rtp *GraphQLRuntimeProvider) TraverseAST(root *parser.ASTNode,
visitor func(*parser.ASTNode)) {
visitor(root)
for _, child := range root.Children {
rtp.TraverseAST(child, visitor)
}
}
// Document Runtime
// ================
type documentRuntime struct {
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
documentRuntimeInst creates a new document runtime instance.
*/
func documentRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &documentRuntime{rtp, node}
}
/*
Validate and reset this runtime component and all its child components.
*/
func (rt *documentRuntime) Validate() error {
err := rt.rtp.init()
for _, c := range rt.node.Children {
if err == nil {
err = c.Runtime.Validate()
}
}
if rt.rtp.operation == nil {
// We didn't find an operation to execute
if rt.rtp.OperationName == "" {
err = rt.rtp.newFatalRuntimeError(ErrMissingOperation,
"No executable expression found", rt.node)
} else {
err = rt.rtp.newFatalRuntimeError(ErrMissingOperation,
fmt.Sprintf("Operation %s not found", rt.rtp.OperationName), rt.node)
}
}
if err == nil && rt.rtp.QueryType == "" {
rt.rtp.QueryType = QueryTypeQuery
}
if err == nil {
// Check variables - types are not checked
ort := rt.rtp.operation.Runtime.(*operationDefinitionRuntime)
declared, defaultValues, _ := ort.DeclaredVariables()
// Build up variable values
vals := rt.rtp.VariableValues
rt.rtp.VariableValues = make(map[string]interface{})
for _, name := range declared {
val, ok := vals[name]
if ok {
rt.rtp.VariableValues[name] = val
} else {
rt.rtp.VariableValues[name] = defaultValues[name]
}
}
}
if err == nil {
// Collect fragment definitions
rt.rtp.TraverseAST(rt.node, func(n *parser.ASTNode) {
if err == nil && n.Name == parser.NodeFragmentDefinition {
fr := n.Runtime.(*fragmentDefinitionRuntime)
if _, ok := rt.rtp.fragments[fr.Name()]; ok {
err = rt.rtp.newFatalRuntimeError(ErrAmbiguousDefinition,
fmt.Sprintf("Fragment %s defined multiple times",
fr.Name()), rt.node)
}
if err == nil {
rt.rtp.fragments[fr.Name()] = fr
}
}
})
if err == nil {
// Validate that all fragment spreads can be resolved
rt.rtp.TraverseAST(rt.node, func(n *parser.ASTNode) {
if err == nil && n.Name == parser.NodeFragmentSpread {
name := n.Token.Val
if _, ok := rt.rtp.fragments[name]; !ok {
err = rt.rtp.newFatalRuntimeError(ErrInvalidConstruct,
fmt.Sprintf("Fragment %s is not defined",
name), rt.node)
}
}
})
}
}
return err
}
/*
Eval evaluate this runtime component.
*/
func (rt *documentRuntime) Eval() (map[string]interface{}, error) {
var err error
// First validate the query and reset the runtime provider datastructures
if rt.rtp.QueryType == "" {
if err = rt.Validate(); err != nil {
return nil, err
}
}
// Validate must have found the query type and the operation to execute
errorutil.AssertTrue(rt.rtp.QueryType != "", "Unknown querytype")
errorutil.AssertTrue(rt.rtp.operation != nil, "Unknown operation")
if rt.rtp.QueryType == QueryTypeSubscription && rt.rtp.callbackHandler != nil {
rt.rtp.InitSubscription(rt)
}
return rt.rtp.operation.Runtime.Eval()
}
// ExecutableDefinition Runtime
// ============================
type executableDefinitionRuntime struct {
*invalidRuntime
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
executableDefinitionRuntimeInst creates a new document runtime instance.
*/
func executableDefinitionRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &executableDefinitionRuntime{&invalidRuntime{rtp, node}, rtp, node}
}
/*
Validate and reset this runtime component and all its child components.
*/
func (rt *executableDefinitionRuntime) Validate() error {
if rt.rtp.operation == nil {
// Choose an operation to execute
if rt.node.Children[0].Name == parser.NodeOperationDefinition {
if rt.rtp.OperationName == "" {
// No operation name defined - take the first available operation
rt.rtp.operation = rt.node.Children[0]
// Check the operation type
if rt.node.Children[0].Children[0].Name == parser.NodeOperationType {
if rt.node.Children[0].Children[0].Token.Val == "mutation" {
rt.rtp.QueryType = QueryTypeMutation
} else if rt.node.Children[0].Children[0].Token.Val == "subscription" {
rt.rtp.QueryType = QueryTypeSubscription
}
}
} else {
// If an operation name is defined we must not have a query shorthand
if rt.node.Children[0].Children[0].Name == parser.NodeOperationType {
name := rt.node.Children[0].Children[1].Token.Val
if rt.rtp.OperationName == name {
// We found the operation to execture
if rt.node.Children[0].Children[0].Name == parser.NodeOperationType {
// See what type it is
if rt.node.Children[0].Children[0].Token.Val == "mutation" {
rt.rtp.QueryType = QueryTypeMutation
} else if rt.node.Children[0].Children[0].Token.Val == "subscription" {
rt.rtp.QueryType = QueryTypeSubscription
}
}
rt.rtp.operation = rt.node.Children[0]
}
}
}
}
}
return nil
}
// OperationDefinition Runtime
// ============================
type operationDefinitionRuntime struct {
*invalidRuntime
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
operationDefinitionRuntimeInst creates a new operation definition runtime instance.
*/
func operationDefinitionRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &operationDefinitionRuntime{&invalidRuntime{rtp, node}, rtp, node}
}
/*
Eval evaluate this runtime component.
*/
func (rt *operationDefinitionRuntime) Eval() (map[string]interface{}, error) {
res := make(map[string]interface{})
// Execute the selection set
data, err := rt.node.Children[len(rt.node.Children)-1].Runtime.Eval()
res["data"] = data
// Collect errors
resErr := make([]map[string]interface{}, 0)
for i, rterr := range rt.rtp.Errors {
resErr = append(resErr, map[string]interface{}{
"message": rterr.Detail,
"locations": []map[string]interface{}{
{
"line": rterr.Line,
"column": rterr.Pos,
},
},
"path": rt.rtp.ErrorPaths[i],
})
}
if len(resErr) > 0 {
// Only add errors if there are any (@spec 7.1.2)
res["errors"] = resErr
}
return res, err
}
/*
DeclaredVariables returns all declared variables as list and their default
values (if defined) and their type as maps.
*/
func (rt *operationDefinitionRuntime) DeclaredVariables() ([]string, map[string]interface{}, map[string]string) {
declared := make([]string, 0)
defValues := make(map[string]interface{})
types := make(map[string]string)
for _, c := range rt.node.Children {
if c.Name == parser.NodeVariableDefinitions {
for _, vardef := range c.Children {
name := vardef.Children[0].Token.Val
declared = append(declared, name)
if len(vardef.Children) > 2 {
defValues[name] = vardef.Children[2].Runtime.(*valueRuntime).Value()
}
types[name] = vardef.Children[1].Token.Val
}
}
}
return declared, defValues, types
}
// FragmentDefinition Runtime
// ==========================
type fragmentDefinitionRuntime struct {
*invalidRuntime
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
fragmentDefinitionRuntimeInst creates a new fragment definition runtime instance.
*/
func fragmentDefinitionRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &fragmentDefinitionRuntime{&invalidRuntime{rtp, node}, rtp, node}
}
/*
Name returns the name of the fragment definition.
*/
func (rt *fragmentDefinitionRuntime) Name() string {
return rt.node.Children[0].Token.Val
}
/*
TypeCondition returns the type condition of the fragment definition.
*/
func (rt *fragmentDefinitionRuntime) TypeCondition() string {
return rt.node.Children[1].Token.Val
}
/*
SelectionSet returns the selection set of the fragment definition.
*/
func (rt *fragmentDefinitionRuntime) SelectionSet() *parser.ASTNode {
return rt.node.Children[len(rt.node.Children)-1]
}
// InlineFragmentDefinition Runtime
// ================================
type inlineFragmentDefinitionRuntime struct {
*invalidRuntime
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
fragmentDefinitionRuntimeInst creates a new inline fragment definition runtime instance.
*/
func inlineFragmentDefinitionRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &inlineFragmentDefinitionRuntime{&invalidRuntime{rtp, node}, rtp, node}
}
/*
TypeCondition returns the type condition of the inline fragment definition.
*/
func (rt *inlineFragmentDefinitionRuntime) TypeCondition() string {
return rt.node.Children[0].Token.Val
}
/*
SelectionSet returns the selection set of the inline fragment definition.
*/
func (rt *inlineFragmentDefinitionRuntime) SelectionSet() *parser.ASTNode {
return rt.node.Children[len(rt.node.Children)-1]
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"crypto/sha256"
"encoding/base64"
"errors"
"fmt"
"devt.de/krotik/common/lang/graphql/parser"
"devt.de/krotik/common/stringutil"
)
/*
handleRuntimeError handles any errors which happen at runtime.
*/
func (rtp *GraphQLRuntimeProvider) handleRuntimeError(err error, path []string, node *parser.ASTNode) {
if err != nil {
// Depuplicate errors (no point in showing the same error twice)
hasher := sha256.New()
hasher.Write([]byte(err.Error()))
hasher.Write([]byte(fmt.Sprint(path)))
hasher.Write([]byte(fmt.Sprint(node.Token.Lline)))
hasher.Write([]byte(fmt.Sprint(node.Token.Lpos)))
errorHash := base64.URLEncoding.EncodeToString(hasher.Sum(nil))
if stringutil.IndexOf(errorHash, rtp.ErrorKeys) == -1 {
rtp.Errors = append(rtp.Errors,
&RuntimeError{rtp.Name, ErrRuntimeError, err.Error(), node,
node.Token.Lline, node.Token.Lpos, false, rtp})
rtp.ErrorPaths = append(rtp.ErrorPaths, path)
rtp.ErrorKeys = append(rtp.ErrorKeys, errorHash)
}
}
}
/*
newRuntimeError creates a new RuntimeError object.
*/
func (rtp *GraphQLRuntimeProvider) newFatalRuntimeError(t error, d string, node *parser.ASTNode) error {
return &RuntimeError{rtp.Name, t, d, node, node.Token.Lline, node.Token.Lpos, true, rtp}
}
/*
RuntimeError is a runtime related error
*/
type RuntimeError struct {
Source string // Name of the source which was given to the parser
Type error // Error type (to be used for equal checks)
Detail string // Details of this error
Node *parser.ASTNode // AST Node where the error occurred
Line int // Line of the error
Pos int // Position of the error
IsFatal bool // Is a fatal error which should stop the whole operation
RuntimeProvider *GraphQLRuntimeProvider // Runtime provider which produced this error
}
/*
Error returns a human-readable string representation of this error.
*/
func (re *RuntimeError) Error() string {
op := re.RuntimeProvider.QueryType
if op == "" {
op = "operation"
}
fatal := ""
if re.IsFatal {
fatal = "Fatal "
}
ret := fmt.Sprintf("%sGraphQL %s error in %s: %v (%v)", fatal, op,
re.Source, re.Type, re.Detail)
if re.Line != 0 {
ret = fmt.Sprintf("%s (Line:%d Pos:%d)", ret, re.Line, re.Pos)
}
return ret
}
/*
Runtime related error types
*/
var (
ErrInvalidConstruct = errors.New("Invalid construct")
ErrAmbiguousDefinition = errors.New("Ambiguous definition")
ErrMissingOperation = errors.New("Missing operation")
ErrRuntimeError = errors.New("Runtime error")
)
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"fmt"
"regexp"
"strconv"
"strings"
"devt.de/krotik/common/lang/graphql/parser"
"devt.de/krotik/common/stringutil"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
// SelectionSet Runtime
// ====================
/*
Runtime for SelectionSets.
*/
type selectionSetRuntime struct {
*invalidRuntime
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
selectionSetRuntimeInst returns a new runtime component instance.
*/
func selectionSetRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &selectionSetRuntime{&invalidRuntime{rtp, node}, rtp, node}
}
/*
Eval evaluate this runtime component.
*/
func (rt *selectionSetRuntime) Eval() (map[string]interface{}, error) {
var err error
// Build result data
res := make(map[string]interface{})
for _, c := range rt.node.Children {
// Lookup nodes
if c.Name == parser.NodeField {
field := c.Runtime.(*fieldRuntime)
if field.Name() == "__schema" {
// We have an introspection query - handle this one in a special way
res[field.Alias()] = field.SelectionSetRuntime().ProcessIntrospection()
} else if field.SelectionSetRuntime() != nil {
nodes := field.SelectionSetRuntime().ProcessNodes([]string{field.Alias()},
field.Name(), field.Arguments(), nil)
res[field.Alias()] = nodes
}
}
}
return res, err
}
/*
nodeIterator is an object which can iterate over nodes.
*/
type nodeIterator interface {
Next() (string, string)
HasNext() bool
Error() error
}
/*
nodeKeyIteratorWrapper wraps around a normal node key iterator.
*/
type nodeKeyIteratorWrapper struct {
kind string
*graph.NodeKeyIterator
}
func (ni *nodeKeyIteratorWrapper) Next() (string, string) {
return ni.NodeKeyIterator.Next(), ni.kind
}
/*
traversalIterator contains a traversal result.
*/
type traversalIterator struct {
index int
nodeList []data.Node
}
func (ti *traversalIterator) Next() (string, string) {
next := ti.nodeList[ti.index]
ti.index++
return next.Key(), next.Kind()
}
func (ti *traversalIterator) HasNext() bool {
return ti.index < len(ti.nodeList)
}
func (ti *traversalIterator) Error() error {
return nil
}
func (rt *selectionSetRuntime) checkArgs(path []string, args map[string]interface{}) {
knownArgs := []string{"key", "matches", "traverse", "storeNode",
"storeEdge", "removeNode", "removeEdge", "ascending", "descending",
"from", "items", "last"}
for arg := range args {
if stringutil.IndexOf(arg, knownArgs) == -1 {
rt.rtp.handleRuntimeError(fmt.Errorf("Unknown argument: %s", arg),
path, rt.node)
}
}
}
/*
ProcessNodes uses the selection set to lookup/store nodes. Kind is not set during a
traversal.
*/
func (rt *selectionSetRuntime) ProcessNodes(path []string, kind string,
args map[string]interface{}, it nodeIterator) []map[string]interface{} {
var from, items, last int
var ascending, descending string
var err error
res := make([]map[string]interface{}, 0)
// Get only the attributes which were specified
attrs, aliasMap, traversalMap := rt.GetPlainFieldsAndAliases(path, kind)
addToRes := func(node data.Node) error {
var err error
r := make(map[string]interface{})
for alias, attr := range aliasMap {
if err == nil {
if traversal, ok := traversalMap[alias]; ok {
nodes, _, err := rt.rtp.gm.TraverseMulti(rt.rtp.part,
node.Key(), node.Kind(), traversal.spec, false)
if err == nil {
data.NodeSort(nodes)
r[alias] = traversal.selectionSetRuntime.ProcessNodes(
append(path, traversal.spec), "", traversal.args,
&traversalIterator{0, nodes})
}
} else {
r[alias] = node.Attr(attr)
}
}
}
if err == nil {
res = append(res, r)
}
return err
}
// Check arguments
rt.checkArgs(path, args)
if it == nil {
err = rt.handleMutationArgs(path, args, kind)
}
if err == nil {
ascending, descending, from, items, last, err = rt.handleOutputArgs(args)
if err == nil {
if key, ok := args["key"]; ok && it == nil {
var node data.Node
// Lookup a single node
if node, err = rt.rtp.FetchNode(rt.rtp.part, fmt.Sprint(key), kind); err == nil && node != nil {
addToRes(node)
}
} else {
matchesRegexMap := make(map[string]*regexp.Regexp)
matchAttrs := make([]string, 0)
// Handle matches expression
matches, matchesOk := args["matches"]
matchesMap, matchesMapOk := matches.(map[string]interface{})
if matchesOk {
if matchesMapOk {
for k, v := range matchesMap {
matchAttrs = append(matchAttrs, k)
if valueList, ok := v.([]interface{}); ok {
stringList := make([]string, 0, len(valueList))
// Shortcut for matching against multiple string values
for _, val := range valueList {
stringList = append(stringList, regexp.QuoteMeta(fmt.Sprint(val)))
}
v = fmt.Sprintf("^(%v)$", strings.Join(stringList, "|"))
}
if re, rerr := regexp.Compile(fmt.Sprint(v)); rerr == nil {
matchesRegexMap[k] = re
} else {
rt.rtp.handleRuntimeError(fmt.Errorf("Regex %s did not compile: %s", v, rerr.Error()),
path, rt.node)
}
}
} else {
rt.rtp.handleRuntimeError(fmt.Errorf("Matches expression is not a map"),
path, rt.node)
}
}
// Lookup a list of nodes
if it == nil {
var kit *graph.NodeKeyIterator
kit, err = rt.rtp.gm.NodeKeyIterator(rt.rtp.part, kind)
if kit != nil {
it = &nodeKeyIteratorWrapper{kind, kit}
}
}
if it != nil && err == nil {
for err == nil && it.HasNext() {
var node data.Node
if err = it.Error(); err == nil {
nkey, nkind := it.Next()
if kind == "" {
// If the kind is not fixed we need to reevaluate the attributes
// to query for every node
attrs, aliasMap, traversalMap = rt.GetPlainFieldsAndAliases(path, nkind)
}
if node, err = rt.rtp.FetchNodePart(rt.rtp.part, nkey,
nkind, append(attrs, matchAttrs...)); err == nil && node != nil {
if matchesOk && !rt.matchNode(node, matchesRegexMap) {
continue
}
err = addToRes(node)
}
}
}
}
}
// Check if the result should be sorted
if err == nil {
if _, aok := args["ascending"]; aok {
dataSort(res, ascending, true)
} else if _, dok := args["descending"]; dok {
dataSort(res, descending, false)
}
}
// Check if the result should be truncated
if last > 0 && last < len(res) {
res = res[len(res)-last:]
}
if from > 0 || items > 0 {
if from >= len(res) {
from = 0
}
if from+items > len(res) {
res = res[from:]
} else {
res = res[from : from+items]
}
}
}
}
rt.rtp.handleRuntimeError(err, path, rt.node)
return res
}
/*
handleOutputModifyingArgs handles arguments which modify the output presentation.
*/
func (rt *selectionSetRuntime) handleOutputArgs(args map[string]interface{}) (string, string, int, int, int, error) {
var from, items, last int
var ascending, descending string
var err error
ascendingData, aok := args["ascending"]
descendingData, dok := args["descending"]
if aok && dok {
err = fmt.Errorf("Cannot specify ascending and descending sorting")
} else if aok {
ascending = fmt.Sprint(ascendingData)
} else {
descending = fmt.Sprint(descendingData)
}
if err == nil {
if lastText, ok := args["last"]; ok {
last, err = strconv.Atoi(fmt.Sprint(lastText))
}
}
if err == nil {
if fromText, ok := args["from"]; ok {
from, err = strconv.Atoi(fmt.Sprint(fromText))
}
}
if err == nil {
if itemsText, ok := args["items"]; ok {
items, err = strconv.Atoi(fmt.Sprint(itemsText))
}
}
return ascending, descending, from, items, last, err
}
/*
handleMutationArgs handles node and edge insertion and removal.
*/
func (rt *selectionSetRuntime) handleMutationArgs(path []string, args map[string]interface{}, kind string) error {
var err error
if toStore, ok := args["storeNode"]; ok && rt.rtp.CheckWritePermission(path, rt.node) {
toStoreMap, ok := toStore.(map[string]interface{})
if ok {
// Handle mutations of nodes
node := data.NewGraphNodeFromMap(toStoreMap)
if node.Kind() == "" {
node.SetAttr("kind", kind)
}
err = rt.rtp.gm.StoreNode(rt.rtp.part, node)
} else {
rt.rtp.handleRuntimeError(fmt.Errorf("Object required for node attributes and values"),
path, rt.node)
}
}
if toRemove, ok := args["removeNode"]; ok && rt.rtp.CheckWritePermission(path, rt.node) {
toRemoveMap, ok := toRemove.(map[string]interface{})
if ok {
// Handle removal of nodes
node := data.NewGraphNodeFromMap(toRemoveMap)
if node.Kind() == "" {
node.SetAttr("kind", kind)
}
if node.Key() == "" {
var it *graph.NodeKeyIterator
if it, err = rt.rtp.gm.NodeKeyIterator(rt.rtp.part, node.Kind()); err == nil {
var keys []string
for it.HasNext() && err == nil {
keys = append(keys, it.Next())
err = it.Error()
}
if err == nil {
for _, key := range keys {
if err == nil {
_, err = rt.rtp.gm.RemoveNode(rt.rtp.part, key, node.Kind())
}
}
}
}
} else {
_, err = rt.rtp.gm.RemoveNode(rt.rtp.part, node.Key(), node.Kind())
}
} else {
rt.rtp.handleRuntimeError(fmt.Errorf("Object required for node key and kind"),
path, rt.node)
}
}
if toStore, ok := args["storeEdge"]; err == nil && ok && rt.rtp.CheckWritePermission(path, rt.node) {
toStoreMap, ok := toStore.(map[string]interface{})
if ok {
// Handle mutations of edges
node := data.NewGraphEdgeFromNode(data.NewGraphNodeFromMap(toStoreMap))
err = rt.rtp.gm.StoreEdge(rt.rtp.part, node)
} else {
rt.rtp.handleRuntimeError(fmt.Errorf("Object required for edge attributes and values"),
path, rt.node)
}
}
if toRemove, ok := args["removeEdge"]; err == nil && ok && rt.rtp.CheckWritePermission(path, rt.node) {
toRemoveMap, ok := toRemove.(map[string]interface{})
if ok {
// Handle mutations of edges
node := data.NewGraphEdgeFromNode(data.NewGraphNodeFromMap(toRemoveMap))
_, err = rt.rtp.gm.RemoveEdge(rt.rtp.part, node.Key(), node.Kind())
} else {
rt.rtp.handleRuntimeError(fmt.Errorf("Object required for edge key and kind"),
path, rt.node)
}
}
return err
}
/*
matchNode matches a given node against a given node template. Returns true if
the template matches, false otherwise.
*/
func (rt *selectionSetRuntime) matchNode(node data.Node, nodeTemplate map[string]*regexp.Regexp) bool {
nodeData := node.Data()
for k, v := range nodeTemplate {
// Check if the match query should be negated
negate := false
if strings.HasPrefix(k, "not_") {
k = k[4:]
negate = true
}
mapAttr, ok := nodeData[k]
if !ok {
return false // Attribute does not exist
}
if negate {
if v.MatchString(fmt.Sprint(mapAttr)) {
return false // Attribute is the same
}
} else {
if !v.MatchString(fmt.Sprint(mapAttr)) {
return false // Attribute is not the same
}
}
}
return true
}
/*
traversal captures all required data for a traversal during node lookup.
*/
type traversal struct {
spec string
args map[string]interface{}
selectionSetRuntime *selectionSetRuntime
}
/*
fragmentRuntime is the common interface for all fragment runtimes.
*/
type fragmentRuntime interface {
TypeCondition() string
SelectionSet() *parser.ASTNode
}
/*
GetPlainFieldsAndAliases returns all fields as a list of node attributes, a map of
aliases to names and a map from aliases to traversals.
*/
func (rt *selectionSetRuntime) GetPlainFieldsAndAliases(path []string, kind string) (
[]string, map[string]string, map[string]*traversal) {
errMultiFields := make([]string, 0)
resList := []string{"key", "kind"}
resMap := make(map[string]string)
traversalMap := make(map[string]*traversal)
fieldList := append(rt.node.Children[:0:0], rt.node.Children...) // Copy into new slice
for i := 0; i < len(fieldList); i++ {
var lastChild *parser.ASTNode
c := fieldList[i]
if len(c.Children) > 0 {
lastChild = c.Children[len(c.Children)-1]
}
// Check for skip and include directive
if rt.skipField(path, c) {
continue
}
if c.Name == parser.NodeField {
// Handle simple fields
field := c.Runtime.(*fieldRuntime)
if _, ok := resMap[field.Alias()]; ok {
// Alias was used before
if stringutil.IndexOf(field.Alias(), errMultiFields) == -1 {
errMultiFields = append(errMultiFields, field.Alias())
}
continue
}
// Map alias to name and process the field
resMap[field.Alias()] = field.Name()
if lastChild.Name == parser.NodeSelectionSet {
args := field.Arguments()
// Handle traversals
if spec, ok := args["traverse"]; ok {
traversalMap[field.Alias()] = &traversal{
spec: fmt.Sprint(spec),
args: args,
selectionSetRuntime: field.SelectionSetRuntime(),
}
} else {
// Shortcut to take the name as the traversal kind
traversalMap[field.Alias()] = &traversal{
spec: fmt.Sprintf(":::%v", field.Name()),
args: args,
selectionSetRuntime: field.SelectionSetRuntime(),
}
}
} else if stringutil.IndexOf(field.Name(), resList) == -1 {
// Handle normal attribute lookup
resList = append(resList, field.Name())
}
} else if c.Name == parser.NodeFragmentSpread || c.Name == parser.NodeInlineFragment {
var fd fragmentRuntime
if c.Name == parser.NodeFragmentSpread {
// Lookup fragment spreads
fd = rt.rtp.fragments[c.Token.Val]
} else {
// Construct inline fragments
fd = c.Runtime.(*inlineFragmentDefinitionRuntime)
}
if fd.TypeCondition() != kind {
// Type condition was not met - just skip the fragment
continue
}
ss := fd.SelectionSet()
fieldList = append(fieldList, ss.Children...)
}
}
if len(errMultiFields) > 0 {
for _, name := range errMultiFields {
rt.rtp.handleRuntimeError(fmt.Errorf(
"Field identifier %s used multiple times", name),
path, rt.node)
}
}
return resList, resMap, traversalMap
}
/*
skipField checks if a given field has a skip or include directive and returns
if the directive excludes the field.
*/
func (rt *selectionSetRuntime) skipField(path []string, node *parser.ASTNode) bool {
for _, c := range node.Children {
if c.Name == parser.NodeDirectives {
for _, directive := range c.Children {
rt := directive.Runtime.(*argumentExpressionRuntime)
name := rt.Name()
args := rt.Arguments()
if name == "skip" || name == "include" {
if cond, ok := args["if"]; ok {
if name == "skip" {
skip, _ := strconv.ParseBool(fmt.Sprint(cond))
return skip
}
include, _ := strconv.ParseBool(fmt.Sprint(cond))
return !include
}
rt.rtp.handleRuntimeError(fmt.Errorf(
"Directive %s is missing the 'if' argument", name), path, c)
}
}
}
}
return false
}
// ArgumentExpression Runtime
// ==========================
/*
Runtime for expressions with arguments.
*/
type argumentExpressionRuntime struct {
*invalidRuntime
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
argumentExpressionRuntimeInst returns a new runtime component instance.
*/
func argumentExpressionRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &argumentExpressionRuntime{&invalidRuntime{rtp, node}, rtp, node}
}
/*
Name returns the name of this field.
*/
func (rt *argumentExpressionRuntime) Name() string {
if rt.node.Children[0].Name == parser.NodeAlias {
return rt.node.Children[1].Token.Val
}
return rt.node.Children[0].Token.Val
}
/*
Arguments returns all arguments of the field as a map.
*/
func (rt *argumentExpressionRuntime) Arguments() map[string]interface{} {
res := make(map[string]interface{})
for _, c := range rt.node.Children {
if c.Name == parser.NodeArguments {
for _, a := range c.Children {
res[a.Children[0].Token.Val] = a.Children[1].Runtime.(*valueRuntime).Value()
}
}
}
return res
}
// Field Runtime
// =============
/*
Runtime for Fields.
*/
type fieldRuntime struct {
*argumentExpressionRuntime
rtp *GraphQLRuntimeProvider
node *parser.ASTNode
}
/*
fieldRuntimeInst returns a new runtime component instance.
*/
func fieldRuntimeInst(rtp *GraphQLRuntimeProvider, node *parser.ASTNode) parser.Runtime {
return &fieldRuntime{&argumentExpressionRuntime{&invalidRuntime{rtp, node},
rtp, node}, rtp, node}
}
/*
Alias returns the alias of this field.
*/
func (rt *fieldRuntime) Alias() string {
return rt.node.Children[0].Token.Val
}
/*
SelectionSetRuntime returns the SelectionSet runtime of this field.
*/
func (rt *fieldRuntime) SelectionSetRuntime() *selectionSetRuntime {
res, _ := rt.node.Children[len(rt.node.Children)-1].Runtime.(*selectionSetRuntime)
return res
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package interpreter
import (
"encoding/json"
"fmt"
"strings"
"sync"
"devt.de/krotik/common/cryptutil"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/data"
)
/*
SystemRuleGraphQLSubscriptionsName is the name of the graph manager rule which
deals with subscriptions.
*/
const SystemRuleGraphQLSubscriptionsName = "system.graphqlsubscriptions"
/*
SubscriptionCallbackHandler receives source stream events for a subscription.
*/
type SubscriptionCallbackHandler interface {
/*
Publish is called for every event in the source stream of a subscription.
This function should map the source stream event to a response stream event.
*/
Publish(map[string]interface{}, error)
/*
IsFinished should return true if this handler should no longer
receive events.
*/
IsFinished() bool
}
var ruleMap = make(map[string]*SystemRuleGraphQLSubscriptions)
/*
InitSubscription ensures that the current graph manager has a rule for
subscriptions to monitor data changes and forwards events to the subscription
callback handler.
*/
func (rtp *GraphQLRuntimeProvider) InitSubscription(rt *documentRuntime) {
var rule *SystemRuleGraphQLSubscriptions
if rt.rtp.subscriptionHandler != nil {
// We already got a handler no need to create another
return
}
// Lookup or create rule
for _, r := range rtp.gm.GraphRules() {
if strings.HasPrefix(r, SystemRuleGraphQLSubscriptionsName) {
id := strings.Split(r, "-")[1]
rule = ruleMap[id]
errorutil.AssertTrue(rule != nil, "Previously created rule not found")
}
}
if rule == nil {
rule = &SystemRuleGraphQLSubscriptions{
fmt.Sprintf("%x", cryptutil.GenerateUUID()),
make(map[string]*subscriptionHandler),
&sync.RWMutex{},
}
rtp.gm.SetGraphRule(rule)
ruleMap[rule.ID] = rule
}
rtp.subscriptionHandler = &subscriptionHandler{
fmt.Sprintf("%x", cryptutil.GenerateUUID()),
rtp.part,
make(map[string]string),
&sync.RWMutex{},
rt,
"",
rtp.callbackHandler,
rule,
}
rule.AddHandler(rtp.subscriptionHandler)
}
/*
subscriptionHandler coordinates a subscription.
*/
type subscriptionHandler struct {
id string // Unique ID which identifies the handler
part string // Partition this handler is monitoring
monitoredKinds map[string]string // All kinds which are monitored (for updates)
monitoredKindsLock *sync.RWMutex // Lock for monitored kinds
rt *documentRuntime // GraphQL document which can be executed
lastResponse string // Last response which was given to the callback handler
callbackHandler SubscriptionCallbackHandler // Handler which consumes updates
rule *SystemRuleGraphQLSubscriptions // Rule which is providing events
}
/*
HandleEvent handles an event from a rule and forwards it to the callbackHandler
if appropriate.
*/
func (h *subscriptionHandler) HandleEvent(event int, part string, node data.Node) {
defer func() {
// Check if the subscription is still needed - this call can be used
// for done() call on a WaitGroup.
if h.callbackHandler.IsFinished() {
// Unsubscribe this handler - we are done
h.rule.RemoveHandler(h)
}
}()
// Only care if we are in the right partition
if part == h.part {
if event == graph.EventNodeUpdated {
// If a node is updated only proceed if its kind is monitored
if _, ok := h.monitoredKinds[node.Kind()]; !ok {
return
}
}
// Rerun the query
resData, err := h.rt.Eval()
// Stringify the result and see if it is different from the last response
resBytes, _ := json.MarshalIndent(resData, "", " ")
resString := string(resBytes)
if h.lastResponse != resString || err != nil {
// Finally send the new result
h.callbackHandler.Publish(resData, err)
h.lastResponse = resString
}
}
}
/*
EnsureMonitoredKind ensure that the given kind is monitored for updates.
*/
func (h *subscriptionHandler) EnsureMonitoredKind(kind string) {
h.monitoredKindsLock.RLock()
if _, ok := h.monitoredKinds[kind]; !ok {
h.monitoredKindsLock.RUnlock()
h.monitoredKindsLock.Lock()
defer h.monitoredKindsLock.Unlock()
h.monitoredKinds[kind] = ""
} else {
h.monitoredKindsLock.RUnlock()
}
}
/*
FetchNode intercepts a FetchNode call to the graph.Manager in order to subscribe
to node updates if necessary.
*/
func (rtp *GraphQLRuntimeProvider) FetchNode(part string, key string, kind string) (data.Node, error) {
return rtp.FetchNodePart(part, key, kind, nil)
}
/*
FetchNodePart intercepts a FetchNodePart call to the graph.Manager in order to subscribe
to node updates if necessary.
*/
func (rtp *GraphQLRuntimeProvider) FetchNodePart(part string, key string, kind string, attrs []string) (data.Node, error) {
if rtp.subscriptionHandler != nil {
go rtp.subscriptionHandler.EnsureMonitoredKind(kind)
}
return rtp.gm.FetchNodePart(part, key, kind, attrs)
}
/*
SystemRuleGraphQLSubscriptions is a system rule to propagate state changes in the
datastore to all relevant GraphQL subscriptions.
*/
type SystemRuleGraphQLSubscriptions struct {
ID string // Unique ID which identifies the rule
handlers map[string]*subscriptionHandler
handlersLock *sync.RWMutex
}
/*
Name returns the name of the rule.
*/
func (r *SystemRuleGraphQLSubscriptions) Name() string {
return fmt.Sprintf("%s-%s", SystemRuleGraphQLSubscriptionsName, r.ID)
}
/*
Handles returns a list of events which are handled by this rule.
*/
func (r *SystemRuleGraphQLSubscriptions) Handles() []int {
return []int{
graph.EventNodeCreated,
graph.EventNodeUpdated,
graph.EventNodeDeleted,
}
}
/*
Handle handles an event.
*/
func (r *SystemRuleGraphQLSubscriptions) Handle(gm *graph.Manager, trans graph.Trans, event int, ed ...interface{}) error {
part := ed[0].(string)
node := ed[1].(data.Node)
r.handlersLock.RLock()
defer r.handlersLock.RUnlock()
for _, handler := range r.handlers {
// Event is handled in a separate go routine
go handler.HandleEvent(event, part, node)
}
return nil
}
/*
AddHandler adds a new handler for rule events.
*/
func (r *SystemRuleGraphQLSubscriptions) AddHandler(handler *subscriptionHandler) {
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
r.handlers[handler.id] = handler
}
/*
RemoveHandler removes a handler from receiving further rule events.
*/
func (r *SystemRuleGraphQLSubscriptions) RemoveHandler(handler *subscriptionHandler) {
r.handlersLock.Lock()
defer r.handlersLock.Unlock()
delete(r.handlers, handler.id)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package graphql contains the main API for GraphQL.
Example GraphQL query:
{
Person @withValue(name : "Marvin") {
key
kind
name
}
}
*/
package graphql
import (
"fmt"
"devt.de/krotik/common/lang/graphql/parser"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graphql/interpreter"
)
/*
RunQuery runs a GraphQL query against a given graph database. The query parameter
needs to have the following fields:
operationName - Operation to Execute (string)
query - Query document (string)
variables - Variables map (map[string]interface{})
Set the readOnly flag if the query should only be allowed to do read operations.
*/
func RunQuery(name string, part string, query map[string]interface{},
gm *graph.Manager, callbackHandler interpreter.SubscriptionCallbackHandler,
readOnly bool) (map[string]interface{}, error) {
var ok bool
var vars map[string]interface{}
// Make sure all info is present on the query object
for _, op := range []string{"operationName", "query", "variables"} {
if _, ok := query[op]; !ok {
return nil, fmt.Errorf("Mandatory field '%s' missing from query object", op)
}
}
// Nil pointer become empty strings
if query["operationName"] == nil {
query["operationName"] = ""
}
if query["query"] == nil {
query["query"] = ""
}
if vars, ok = query["variables"].(map[string]interface{}); !ok {
vars = make(map[string]interface{})
}
// Create runtime provider
rtp := interpreter.NewGraphQLRuntimeProvider(name, part, gm,
fmt.Sprint(query["operationName"]), vars, callbackHandler, readOnly)
// Parse the query and annotate the AST with runtime components
ast, err := parser.ParseWithRuntime(name, fmt.Sprint(query["query"]), rtp)
if err == nil {
if err = ast.Runtime.Validate(); err == nil {
// Evaluate the query
return ast.Runtime.Eval()
}
}
return nil, err
}
/*
ParseQuery parses a GraphQL query and return its Abstract Syntax Tree.
*/
func ParseQuery(name string, query string) (*parser.ASTNode, error) {
ast, err := parser.ParseWithRuntime(name, query, nil)
if err != nil {
return nil, err
}
return ast, nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package hash provides a HTree implementation to provide key-value storage functionality
for a StorageManager.
The HTree provides a persistent hashtable. Storing values in buckets on
pages as the tree gorws. It is not possible to store nil values. Storing a nil value
is equivalent to removing a key.
As the tree grows each tree level contains pages with links to underlying pages.
The last link is always to a bucket. The default tree has 4 levels each with
256 possible children. A hash code for the tree has 32 bits = 4 levels * 8 bit.
Hash buckets are on the lowest level of the tree and contain actual keys and
values. The object stores multiple keys and values if there are hash collisions.
In a sparsely populated tree buckets can also be found on the upper levels.
Iterator
Entries in the HTree can be iterated by using an HTreeIterator. The HTree may
change behind the iterator's back. The iterator will try to cope with best
effort and only report an error as a last resort.
Hash function
The HTree uses an implementation of Austin Appleby's MurmurHash3 (32bit) function
as hash function.
Reference implementation: http://code.google.com/p/smhasher/wiki/MurmurHash3
*/
package hash
import (
"fmt"
"sync"
"devt.de/krotik/eliasdb/storage"
)
/*
MaxTreeDepth is the maximum number of non-leaf levels in the tree (i.e. the complete tree has
a total of MAX_DEPTH+1 levels)
*/
const MaxTreeDepth = 3
/*
PageLevelBits is the number of significant bits per page level
*/
const PageLevelBits = 8
/*
MaxPageChildren is the maximum of children per page - (stored in PageLevelBits bits)
*/
const MaxPageChildren = 256
/*
MaxBucketElements is the maximum umber of elements a bucket can contain before it
is converted into a page except leaf buckets which grow indefinitely
*/
const MaxBucketElements = 8
/*
HTree data structure
*/
type HTree struct {
Root *htreePage // Root page of the HTree
mutex *sync.Mutex // Mutex to protect tree operations
}
/*
htreeNode data structure - this object models the
HTree storage structure on disk
*/
type htreeNode struct {
tree *HTree // Reference to the HTree which owns this node (not persisted)
loc uint64 // Storage location of this page (not persisted)
sm storage.Manager // StorageManager instance which stores the tree data (not persisted)
Depth byte // Depth of this node
Children []uint64 // Storage locations of children (only used for pages)
Keys [][]byte // Stored keys (only used for buckets)
Values []interface{} // Stored values (only used for buckets)
BucketSize byte // Bucket size (only used for buckets)
}
/*
Fetch a HTree node from the storage.
*/
func (n *htreeNode) fetchNode(loc uint64) (*htreeNode, error) {
var node *htreeNode
if obj, _ := n.sm.FetchCached(loc); obj == nil {
var res htreeNode
if err := n.sm.Fetch(loc, &res); err != nil {
return nil, err
}
node = &res
} else {
node = obj.(*htreeNode)
}
return node, nil
}
/*
NewHTree creates a new HTree.
*/
func NewHTree(sm storage.Manager) (*HTree, error) {
tree := &HTree{}
// Protect tree creation
cm := &sync.Mutex{}
cm.Lock()
defer cm.Unlock()
tree.Root = newHTreePage(tree, 0)
loc, err := sm.Insert(tree.Root.htreeNode)
if err != nil {
return nil, err
}
tree.Root.loc = loc
tree.Root.sm = sm
tree.mutex = &sync.Mutex{}
return tree, nil
}
/*
LoadHTree fetches a HTree from storage
*/
func LoadHTree(sm storage.Manager, loc uint64) (*HTree, error) {
var tree *HTree
// Protect tree creation
cm := &sync.Mutex{}
cm.Lock()
defer cm.Unlock()
if obj, _ := sm.FetchCached(loc); obj == nil {
var res htreeNode
if err := sm.Fetch(loc, &res); err != nil {
return nil, err
}
tree = &HTree{&htreePage{&res}, nil}
} else {
tree = &HTree{&htreePage{obj.(*htreeNode)}, nil}
}
tree.Root.loc = loc
tree.Root.sm = sm
tree.mutex = &sync.Mutex{}
return tree, nil
}
/*
Location returns the HTree location on disk.
*/
func (t *HTree) Location() uint64 {
return t.Root.loc
}
/*
Get gets a value for a given key.
*/
func (t *HTree) Get(key []byte) (interface{}, error) {
t.mutex.Lock()
defer t.mutex.Unlock()
res, _, err := t.Root.Get(key)
return res, err
}
/*
GetValueAndLocation returns the value and the storage location for a given key.
*/
func (t *HTree) GetValueAndLocation(key []byte) (interface{}, uint64, error) {
t.mutex.Lock()
defer t.mutex.Unlock()
res, bucket, err := t.Root.Get(key)
if bucket != nil {
return res, bucket.loc, err
}
return res, 0, err
}
/*
Exists checks if an element exists.
*/
func (t *HTree) Exists(key []byte) (bool, error) {
t.mutex.Lock()
defer t.mutex.Unlock()
return t.Root.Exists(key)
}
/*
Put adds or updates a new key / value pair.
*/
func (t *HTree) Put(key []byte, value interface{}) (interface{}, error) {
t.mutex.Lock()
defer t.mutex.Unlock()
return t.Root.Put(key, value)
}
/*
Remove removes a key / value pair.
*/
func (t *HTree) Remove(key []byte) (interface{}, error) {
t.mutex.Lock()
defer t.mutex.Unlock()
return t.Root.Remove(key)
}
/*
String returns a string representation of this tree.
*/
func (t *HTree) String() string {
t.mutex.Lock()
defer t.mutex.Unlock()
return fmt.Sprintf("HTree: %v (%v)\n%v", t.Root.sm.Name(), t.Root.loc, t.Root.String())
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package hash
import (
"bytes"
"fmt"
"devt.de/krotik/common/stringutil"
)
/*
htreeBucket data structure
*/
type htreeBucket struct {
*htreeNode
}
/*
htreeBucket creates a new bucket for the HTree.
*/
func newHTreeBucket(tree *HTree, depth byte) *htreeBucket {
return &htreeBucket{&htreeNode{tree, 0, nil, depth, nil,
make([][]byte, MaxBucketElements),
make([]interface{}, MaxBucketElements), 0}}
}
/*
Size returns the size of this bucket.
*/
func (b *htreeBucket) Size() byte {
return b.BucketSize
}
/*
IsLeaf returns if this bucket is a leaf node.
*/
func (b *htreeBucket) IsLeaf() bool {
return b.Depth == MaxTreeDepth+1
}
/*
HasRoom returns if this bucket has room for more data.
*/
func (b *htreeBucket) HasRoom() bool {
if b.IsLeaf() {
return true
}
return b.BucketSize < MaxBucketElements
}
/*
Put adds or updates a new key / value pair to the bucket.
*/
func (b *htreeBucket) Put(key []byte, value interface{}) interface{} {
if key == nil {
return nil
}
// Check if this is an update
for i, skey := range b.Keys {
if bytes.Compare(key, skey) == 0 {
old := b.Values[i]
b.Values[i] = value
return old
}
}
if !b.HasRoom() {
panic("Bucket has no more room")
}
if b.BucketSize >= MaxBucketElements {
b.Keys = append(b.Keys, key)
b.Values = append(b.Values, value)
b.BucketSize++
return nil
}
b.Keys[b.BucketSize] = key
b.Values[b.BucketSize] = value
b.BucketSize++
return nil
}
/*
Remove removes a key / value pair from the bucket.
*/
func (b *htreeBucket) Remove(key []byte) interface{} {
if key == nil || b.BucketSize == 0 {
return nil
}
// Look for the key
for i, skey := range b.Keys {
if bytes.Compare(key, skey) == 0 {
old := b.Values[i]
b.Keys[i] = b.Keys[b.BucketSize-1]
b.Values[i] = b.Values[b.BucketSize-1]
b.Keys[b.BucketSize-1] = nil
b.Values[b.BucketSize-1] = nil
b.BucketSize--
return old
}
}
return nil
}
/*
Get gets the value for a given key.
*/
func (b *htreeBucket) Get(key []byte) interface{} {
if key == nil || b.BucketSize == 0 {
return nil
}
// Look for the key
for i, skey := range b.Keys {
if bytes.Compare(key, skey) == 0 {
return b.Values[i]
}
}
return nil
}
/*
Exists checks if an element exists.
*/
func (b *htreeBucket) Exists(key []byte) bool {
if key == nil || b.BucketSize == 0 {
return false
}
// Look for the key
for _, skey := range b.Keys {
if bytes.Compare(key, skey) == 0 {
return true
}
}
return false
}
/*
String returns a string representation of this bucket.
*/
func (b *htreeBucket) String() string {
var j byte
buf := new(bytes.Buffer)
for j = 0; j < b.Depth; j++ {
buf.WriteString(" ")
}
buf.WriteString(fmt.Sprintf("HashBucket (%v element%s, depth: %v)\n",
b.Size(), stringutil.Plural(int(b.Size())), b.Depth))
for i, key := range b.Keys {
for j = 0; j < b.Depth; j++ {
buf.WriteString(" ")
}
buf.WriteString(fmt.Sprintf("%v - %v\n", key, b.Values[i]))
}
return buf.String()
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package hash
import (
"bytes"
"fmt"
)
/*
htreePage data structure
*/
type htreePage struct {
*htreeNode
}
/*
newHTreePage creates a new page for the HTree.
*/
func newHTreePage(tree *HTree, depth byte) *htreePage {
return &htreePage{&htreeNode{tree, 0, nil, depth, make([]uint64, MaxPageChildren), nil, nil, 0}}
}
/*
IsEmpty returns if this page is empty.
*/
func (p *htreePage) IsEmpty() bool {
for _, child := range p.Children {
if child != 0 {
return false
}
}
return true
}
/*
Location returns the location of this HTree page.
*/
func (p *htreePage) Location() uint64 {
return p.loc
}
/*
Get gets a value for a given key.
*/
func (p *htreePage) Get(key []byte) (interface{}, *htreeBucket, error) {
hash := p.hashKey(key)
loc := p.Children[hash]
if loc != 0 {
node, err := p.fetchNode(loc)
if err != nil {
return nil, nil, err
}
if node.Children != nil {
// If another page was found deligate the request
page := &htreePage{node}
page.loc = loc
page.sm = p.sm
return page.Get(key)
}
// If a Bucket was found return the value
bucket := &htreeBucket{node}
bucket.loc = loc
bucket.sm = p.sm
return bucket.Get(key), bucket, nil
}
return nil, nil, nil
}
/*
Exists checks if an element exists.
*/
func (p *htreePage) Exists(key []byte) (bool, error) {
hash := p.hashKey(key)
loc := p.Children[hash]
if loc != 0 {
node, err := p.fetchNode(loc)
if err != nil {
return false, err
}
if node.Children != nil {
// If another page was found deligate the request
page := &htreePage{node}
page.loc = loc
page.sm = p.sm
return page.Exists(key)
}
// If a Bucket was found return the value
bucket := &htreeBucket{node}
return bucket.Exists(key), nil
}
return false, nil
}
/*
Put adds or updates a new key / value pair.
*/
func (p *htreePage) Put(key []byte, value interface{}) (interface{}, error) {
// Putting a nil values will remove the element
if value == nil {
return p.Remove(key)
}
hash := p.hashKey(key)
loc := p.Children[hash]
if loc == 0 {
// If nothing exists yet for the hash code then create a new bucket
bucket := newHTreeBucket(p.tree, p.Depth+1)
existing := bucket.Put(key, value)
loc, err := p.sm.Insert(bucket.htreeNode)
if err != nil {
return nil, err
}
bucket.loc = loc
bucket.sm = p.sm
p.Children[hash] = loc
err = p.sm.Update(p.loc, p.htreeNode)
if err != nil {
return nil, err
}
return existing, nil
}
// If a bucket was found try to put the value on it if there is room
node, err := p.fetchNode(loc)
if err != nil {
return false, err
}
if node.Children != nil {
// If another page was found deligate the request
page := &htreePage{node}
page.loc = loc
page.sm = p.sm
return page.Put(key, value)
}
// If a bucket was found try to put the value on it if there is room
bucket := &htreeBucket{node}
bucket.loc = loc
bucket.sm = p.sm
if bucket.HasRoom() {
existing := bucket.Put(key, value)
return existing, p.sm.Update(bucket.loc, bucket.htreeNode)
}
// If the bucket is too full create a new directory
if p.Depth == MaxTreeDepth {
panic("Max depth of HTree exceeded")
}
page := newHTreePage(p.tree, p.Depth+1)
ploc, err := p.sm.Insert(page.htreeNode)
if err != nil {
return nil, err
}
page.loc = ploc
page.sm = p.sm
p.Children[hash] = ploc
if err := p.sm.Update(p.loc, p.htreeNode); err != nil {
// Try to clean up
p.Children[hash] = loc
p.sm.Free(ploc)
return nil, err
}
// At this point the bucket has been removed from the list of children
// It is no longer part of the tree
// Try inserting all keys of the bucket into the newly created page
// and remove the bucket - no error checking here - the recovery
// steps are too eloborate with little chance of success they
// might also damage the now intact tree
for i, key := range bucket.Keys {
page.Put(key, bucket.Values[i])
}
// Remove old bucket from file
p.sm.Free(bucket.loc)
// Finally insert key / value pair
return page.Put(key, value)
}
/*
Remove removes a key / value pair.
*/
func (p *htreePage) Remove(key []byte) (interface{}, error) {
hash := p.hashKey(key)
loc := p.Children[hash]
// Return if there is nothing to delete
if loc == 0 {
return nil, nil
}
node, err := p.fetchNode(loc)
if err != nil {
return false, err
}
if node.Children != nil {
// If another page was found deligate the request
page := &htreePage{node}
page.loc = loc
page.sm = p.sm
ret, err := page.Remove(key)
if err != nil {
return ret, err
}
if page.IsEmpty() {
// Remove page if it is empty
p.Children[hash] = 0
if err := p.sm.Update(p.loc, p.htreeNode); err != nil {
return nil, err
}
return ret, p.sm.Free(loc)
}
return ret, nil
}
// If a bucket is found just remove the key / value pair
bucket := &htreeBucket{node}
bucket.loc = loc
bucket.sm = p.sm
ret := bucket.Remove(key)
// Either update or remove the bucket
if bucket.Size() > 0 {
return ret, p.sm.Update(bucket.loc, bucket.htreeNode)
}
p.Children[hash] = 0
if err := p.sm.Update(p.loc, p.htreeNode); err != nil {
return nil, err
}
return ret, p.sm.Free(loc)
}
/*
String returns a string representation of this page.
*/
func (p *htreePage) String() string {
var j byte
buf := new(bytes.Buffer)
for j = 0; j < p.Depth; j++ {
buf.WriteString(" ")
}
buf.WriteString(fmt.Sprintf("HashPage %v (depth: %v)\n", p.loc, p.Depth))
for hash, child := range p.Children {
if child != 0 {
for j = 0; j < p.Depth+1; j++ {
buf.WriteString(" ")
}
buf.WriteString(fmt.Sprintf("Hash %08X (loc: %v)\n", hash, child))
node, err := p.fetchNode(child)
if err != nil {
buf.WriteString(err.Error())
buf.WriteString("\n")
} else if node.Children != nil {
page := &htreePage{node}
page.loc = child
page.sm = p.sm
buf.WriteString(page.String())
} else {
bucket := &htreeBucket{node}
buf.WriteString(bucket.String())
}
}
}
return buf.String()
}
/*
hashKey calculates the hash code for a given key.
*/
func (p *htreePage) hashKey(key []byte) uint32 {
var hash, hashMask uint32
// Calculate mask depending on page depth
// 0 masks out most significant bits while 2 masks out least significant bits
hashMask = (MaxPageChildren - 1) << ((MaxTreeDepth - p.Depth) * PageLevelBits)
// Calculate hash and apply mask
hash, _ = MurMurHashData(key, 0, len(key)-1, 42)
hash = hash & hashMask
// Move the bytes to the least significant position
hash = hash >> ((MaxTreeDepth - p.Depth) * PageLevelBits)
return hash % MaxPageChildren
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package hash
import (
"errors"
"fmt"
"devt.de/krotik/eliasdb/storage"
)
/*
ErrNoMoreItems is assigned to LastError when Next() is called and there are no
more items to iterate.
*/
var ErrNoMoreItems = errors.New("No more items to iterate")
/*
HTreeIterator data structure
*/
type HTreeIterator struct {
tree *HTree // Tree to iterate
nodePath []uint64 // Path in the tree we currently traversing
indices []int // List of the current indices in the current path
nextKey []byte // Next iterator key (overwritten by nextItem)
nextValue interface{} // Next iterator value
LastError error // Last encountered error
}
/*
NewHTreeIterator creates a new HTreeIterator.
*/
func NewHTreeIterator(tree *HTree) *HTreeIterator {
it := &HTreeIterator{tree, make([]uint64, 0), make([]int, 0), nil, nil, nil}
it.nodePath = append(it.nodePath, tree.Root.Location())
it.indices = append(it.indices, -1)
// Set the nextKey and nextValue properties
it.Next()
return it
}
/*
HasNext returns if there is a next key / value pair.
*/
func (it *HTreeIterator) HasNext() bool {
return it.nextKey != nil
}
/*
Next returns the next key / value pair.
*/
func (it *HTreeIterator) Next() ([]byte, interface{}) {
key := it.nextKey
value := it.nextValue
if err := it.nextItem(); err != ErrNoMoreItems && err != nil {
it.LastError = err
// There was a serious error terminate the iterator
it.nodePath = make([]uint64, 0)
it.indices = make([]int, 0)
it.nextKey = nil
it.nextValue = nil
}
return key, value
}
/*
Retrieve the next key / value pair for the iterator. The tree might
have changed significantly after the last call. We need to cope
with errors as best as we can.
*/
func (it *HTreeIterator) nextItem() error {
// Check if there are more items available to iterate
if len(it.nodePath) == 0 {
it.nextKey = nil
it.nextValue = nil
return ErrNoMoreItems
}
// Get the current path element
loc := it.nodePath[len(it.nodePath)-1]
index := it.indices[len(it.indices)-1]
node, err := it.tree.Root.fetchNode(loc)
if err != nil {
if smr, ok := err.(*storage.ManagerError); ok && smr.Type == storage.ErrSlotNotFound {
// Something is wrong - the tree must have changed since the last
// nextItem call. Remove the path element and try again.
it.nodePath = it.nodePath[:len(it.nodePath)-1]
it.indices = it.indices[:len(it.indices)-1]
return it.nextItem()
}
// If it is another error there is something more serious - report it
return err
}
if node.Children != nil {
// If the current path element is a page get the next child and delegate
page := &htreePage{node}
page.loc = loc
page.sm = it.tree.Root.sm
nextChild := it.searchNextChild(page, index)
if nextChild != -1 {
// If we found another element then update the current index and delegate to it
it.indices[len(it.indices)-1] = nextChild
it.nodePath = append(it.nodePath, page.Children[nextChild])
it.indices = append(it.indices, -1)
return it.nextItem()
}
// If we finished this page remove it from the stack and continue
// with the parent
it.nodePath = it.nodePath[:len(it.nodePath)-1]
it.indices = it.indices[:len(it.indices)-1]
return it.nextItem()
}
// If the current path element is a bucket just iterate the elements
// delegate once it has finished
bucket := &htreeBucket{node}
bucket.loc = loc
bucket.sm = it.tree.Root.sm
nextElement := it.searchNextElement(bucket, index)
if nextElement != -1 {
// If we found another element then update the current index and return it
it.indices[len(it.indices)-1] = nextElement
it.nextKey = bucket.Keys[nextElement]
it.nextValue = bucket.Values[nextElement]
return nil
}
// If we finished this bucket remove it from the stack and continue
// with the parent
it.nodePath = it.nodePath[:len(it.nodePath)-1]
it.indices = it.indices[:len(it.indices)-1]
return it.nextItem()
}
/*
searchNextChild searches for the index of the next available page child from a given index.
*/
func (it *HTreeIterator) searchNextChild(page *htreePage, current int) int {
for i := current + 1; i < MaxPageChildren; i++ {
child := page.Children[i]
if child != 0 {
return i
}
}
return -1
}
/*
searchNextElement searches for the index of the next available bucket element from a given index.
*/
func (it *HTreeIterator) searchNextElement(bucket *htreeBucket, current int) int {
next := current + 1
if next < int(bucket.BucketSize) {
return next
}
return -1
}
/*
Return a string representation of the iterator.
*/
func (it *HTreeIterator) String() string {
return fmt.Sprintf("HTree Iterator (tree: %v)\n path: %v\n indices: %v\n next: %v / %v\n",
it.tree.Root.Location(), it.nodePath, it.indices, it.nextKey, it.nextValue)
}
/*
* Public Domain Software
*
* I (Matthias Ladkau) am the author of the source code in this file.
* I have placed the source code in this file in the public domain.
*
* For further information see: http://creativecommons.org/publicdomain/zero/1.0/
*/
package hash
import "fmt"
const (
c1 uint32 = 0xcc9e2d51
c2 uint32 = 0x1b873593
)
/*
MurMurHashData hashes a given array of bytes. This is an implementation
of Austin Appleby's MurmurHash3 (32bit) function.
Reference implementation: http://code.google.com/p/smhasher/wiki/MurmurHash3
*/
func MurMurHashData(data []byte, offset int, size int, seed int) (uint32, error) {
// Check parameters
if offset < 0 || size < 0 {
return 0, fmt.Errorf("Invalid data boundaries; offset: %v; size: %v",
offset, size)
}
h1 := uint32(seed)
end := offset + size
end -= end % 4
// Check length of available data
if len(data) <= end {
return 0, fmt.Errorf("Data out of bounds; set boundary: %v; data length: %v",
end, len(data))
}
for i := offset; i < end; i += 4 {
var k1 = uint32(data[i])
k1 |= uint32(data[i+1]) << 8
k1 |= uint32(data[i+2]) << 16
k1 |= uint32(data[i+3]) << 24
k1 *= c1
k1 = (k1 << 15) | (k1 >> 17) // ROTL32(k1,15);
k1 *= c2
h1 ^= k1
h1 = (h1 << 13) | (h1 >> 19) // ROTL32(h1,13);
h1 = h1*5 + 0xe6546b64
}
// Tail
var k1 uint32
switch size & 3 {
case 3:
k1 = uint32(data[end+2]) << 16
fallthrough
case 2:
k1 |= uint32(data[end+1]) << 8
fallthrough
case 1:
k1 |= uint32(data[end])
k1 *= c1
k1 = (k1 << 15) | (k1 >> 17) // ROTL32(k1,15);
k1 *= c2
h1 ^= k1
}
h1 ^= uint32(size)
h1 ^= h1 >> 16
h1 *= 0x85ebca6b
h1 ^= h1 >> 13
h1 *= 0xc2b2ae35
h1 ^= h1 >> 16
return h1, nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package server contains the code for the EliasDB server.
*/
package server
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"path/filepath"
"sync"
"time"
"devt.de/krotik/common/cryptutil"
"devt.de/krotik/common/datautil"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/fileutil"
"devt.de/krotik/common/httputil"
"devt.de/krotik/common/httputil/access"
"devt.de/krotik/common/httputil/auth"
"devt.de/krotik/common/httputil/user"
"devt.de/krotik/common/lockutil"
"devt.de/krotik/common/timeutil"
"devt.de/krotik/eliasdb/api"
"devt.de/krotik/eliasdb/api/ac"
v1 "devt.de/krotik/eliasdb/api/v1"
"devt.de/krotik/eliasdb/cluster"
"devt.de/krotik/eliasdb/cluster/manager"
"devt.de/krotik/eliasdb/config"
"devt.de/krotik/eliasdb/ecal"
"devt.de/krotik/eliasdb/graph"
"devt.de/krotik/eliasdb/graph/graphstorage"
)
/*
Using custom consolelogger type so we can test log.Fatal calls with unit tests. Overwrite
these if the server should not call os.Exit on a fatal error.
*/
type consolelogger func(v ...interface{})
var fatal = consolelogger(log.Fatal)
var print = consolelogger(log.Print)
/*
Base path for all file (used by unit tests)
*/
var basepath = ""
/*
UserDBPassphrase is the passphrase which will be used for the user db (only used if
access control is enabled)
*/
var UserDBPassphrase = ""
/*
StartServer runs the EliasDB server. The server uses config.Config for all its configuration
parameters.
*/
func StartServer() {
StartServerWithSingleOp(nil)
}
/*
StartServerWithSingleOp runs the EliasDB server. If the singleOperation function is
not nil then the server executes the function and exists if the function returns true.
*/
func StartServerWithSingleOp(singleOperation func(*graph.Manager) bool) {
var err error
var gs graphstorage.Storage
print(fmt.Sprintf("EliasDB %v", config.ProductVersion))
// Ensure we have a configuration - use the default configuration if nothing was set
if config.Config == nil {
config.LoadDefaultConfig()
}
// Create graph storage
if config.Bool(config.MemoryOnlyStorage) {
print("Starting memory only datastore")
gs = graphstorage.NewMemoryGraphStorage(config.MemoryOnlyStorage)
if config.Bool(config.EnableReadOnly) {
print("Ignoring EnableReadOnly setting")
}
} else {
loc := filepath.Join(basepath, config.Str(config.LocationDatastore))
readonly := config.Bool(config.EnableReadOnly)
if readonly {
print("Starting datastore (readonly) in ", loc)
} else {
print("Starting datastore in ", loc)
}
// Ensure path for database exists
ensurePath(loc)
gs, err = graphstorage.NewDiskGraphStorage(loc, readonly)
if err != nil {
fatal(err)
return
}
}
// Check if clustering is enabled
if config.Bool(config.EnableCluster) {
print("Reading cluster config")
cconfig, err := fileutil.LoadConfig(filepath.Join(basepath, config.Str(config.ClusterConfigFile)),
manager.DefaultConfig)
if err != nil {
fatal("Failed to load cluster config:", err)
return
}
print("Opening cluster state info")
si, err := manager.NewDefaultStateInfo(filepath.Join(basepath, config.Str(config.ClusterStateInfoFile)))
if err != nil {
fatal("Failed to load cluster state info:", err)
return
}
loghist := int(config.Int(config.ClusterLogHistory))
print(fmt.Sprintf("Starting cluster (log history: %v)", loghist))
ds, err := cluster.NewDistributedStorage(gs, cconfig, si)
if err != nil {
fatal("Failed to create distributed storage:", err)
return
}
gs = ds
// Make the distributed storage and the cluster log available for the REST API
api.DD = ds
api.DDLog = datautil.NewRingBuffer(loghist)
logFunc := func(v ...interface{}) {
api.DDLog.Log(timeutil.MakeTimestamp(), " ", fmt.Sprint(v...))
}
logPrintFunc := func(v ...interface{}) {
print("[Cluster] ", fmt.Sprint(v...))
api.DDLog.Log(timeutil.MakeTimestamp(), " ", fmt.Sprint(v...))
}
manager.LogDebug = logFunc
manager.LogInfo = logPrintFunc
// Kick off the cluster
ds.MemberManager.Start()
}
// Create GraphManager
print("Creating GraphManager instance")
api.GS = gs
api.GM = graph.NewGraphManager(gs)
defer func() {
print("Closing datastore")
if err := gs.Close(); err != nil {
fatal(err)
return
}
os.RemoveAll(filepath.Join(basepath, config.Str(config.LockFile)))
}()
// Create ScriptingInterpreter instance and run ECAL scripts
if config.Bool(config.EnableECALScripts) {
// Make sure the script directory exists
loc := filepath.Join(basepath, config.Str(config.ECALScriptFolder))
ensurePath(loc)
print("Loading ECAL scripts in ", loc)
api.SI = ecal.NewScriptingInterpreter(loc, api.GM)
if err := api.SI.Run(); err != nil {
fatal("Failed to start ECAL scripting interpreter:", err)
return
}
}
// Handle single operation - these are operations which work on the GraphManager
// and then exit.
if singleOperation != nil && singleOperation(api.GM) {
return
}
// Setting other API parameters
// Setup cookie expiry
cookieMaxAge := int(config.Int(config.CookieMaxAgeSeconds))
auth.CookieMaxLifetime = cookieMaxAge
user.CookieMaxLifetime = cookieMaxAge
user.UserSessionManager.Provider.(*user.MemorySessionProvider).SetExpiry(cookieMaxAge)
api.APIHost = config.Str(config.HTTPSHost) + ":" + config.Str(config.HTTPSPort)
v1.ResultCacheMaxSize = uint64(config.Int(config.ResultCacheMaxSize))
v1.ResultCacheMaxAge = config.Int(config.ResultCacheMaxAgeSeconds)
// Check if HTTPS key and certificate are in place
keyPath := filepath.Join(basepath, config.Str(config.LocationHTTPS), config.Str(config.HTTPSKey))
certPath := filepath.Join(basepath, config.Str(config.LocationHTTPS), config.Str(config.HTTPSCertificate))
keyExists, _ := fileutil.PathExists(keyPath)
certExists, _ := fileutil.PathExists(certPath)
if !keyExists || !certExists {
// Ensure path for ssl files exists
ensurePath(filepath.Join(basepath, config.Str(config.LocationHTTPS)))
print("Creating key (", config.Str(config.HTTPSKey), ") and certificate (",
config.Str(config.HTTPSCertificate), ") in: ", config.Str(config.LocationHTTPS))
// Generate a certificate and private key
err = cryptutil.GenCert(filepath.Join(basepath, config.Str(config.LocationHTTPS)),
config.Str(config.HTTPSCertificate), config.Str(config.HTTPSKey),
"localhost", "", 365*24*time.Hour, false, 4096, "")
if err != nil {
fatal("Failed to generate ssl key and certificate:", err)
return
}
}
// Register public REST endpoints - these will never be checked for authentication
api.RegisterRestEndpoints(api.GeneralEndpointMap)
api.RegisterRestEndpoints(v1.V1PublicEndpointMap)
// Setup access control
if config.Bool(config.EnableAccessControl) {
// Register REST endpoints for access control
api.RegisterRestEndpoints(ac.PublicAccessControlEndpointMap)
// Setup user database
ac.UserDB, err = datautil.NewEnforcedUserDB(filepath.Join(basepath, config.Str(config.LocationUserDB)),
UserDBPassphrase)
if err == nil {
var ok bool
// Setup access control - this will initialise the global ACL (access
// control lists) object
if ok, err = fileutil.PathExists(filepath.Join(basepath, config.Str(config.LocationAccessDB))); !ok && err == nil {
err = ioutil.WriteFile(filepath.Join(basepath, config.Str(config.LocationAccessDB)), ac.DefaultAccessDB, 0600)
}
if err == nil {
tab, err := access.NewPersistedACLTable(filepath.Join(basepath, config.Str(config.LocationAccessDB)), 3*time.Second)
if err == nil {
ac.InitACLs(tab)
}
}
}
if err == nil {
// Make sure there are the initial accounts (circumventing the
// enforced password constrains by using the embedded UserDB directly)
if len(ac.UserDB.AllUsers()) == 0 {
ac.UserDB.UserDB.AddUserEntry("elias", "elias", nil)
ac.UserDB.UserDB.AddUserEntry("johndoe", "doe", nil)
}
// Setup the AuthHandler object which provides cookie based authentication
// for endpoints which are registered with its HandleFunc
ac.AuthHandler = auth.NewCookieAuthHandleFuncWrapper(http.HandleFunc)
// Connect the UserDB object to the AuthHandler - this provides authentication for users
ac.AuthHandler.SetAuthFunc(ac.UserDB.CheckUserPassword)
// Connect the ACL object to the AuthHandler - this provides authorization for users
ac.AuthHandler.SetAccessFunc(ac.ACL.CheckHTTPRequest)
// Make login page a "public" page i.e. a page which can be reached without
// authentication
ac.AuthHandler.AddPublicPage("/login.html",
httputil.SingleFileServer(filepath.Join(
config.Str(config.LocationWebFolder), "login.html"),
nil).ServeHTTP)
// Also make the fingerprint.json a public page
ac.AuthHandler.AddPublicPage("/fingerprint.json",
httputil.SingleFileServer(filepath.Join(
config.Str(config.LocationWebFolder), "fingerprint.json"),
nil).ServeHTTP)
// Adding special handlers which redirect to the login page
ac.AuthHandler.CallbackSessionExpired = ac.CallbackSessionExpired
ac.AuthHandler.CallbackUnauthorized = ac.CallbackUnauthorized
// Finally set the HandleFunc of the AuthHandler as the HandleFunc of the API
api.HandleFunc = ac.AuthHandler.HandleFunc
// After the api.HandleFunc has been set we can now register the management
// endpoints which should be subject to access control
api.RegisterRestEndpoints(ac.AccessManagementEndpointMap)
}
}
// Register EliasDB API endpoints - depending on if access control has been enabled
// these will require authentication and authorization for a given user
api.RegisterRestEndpoints(v1.V1EndpointMap)
// Register normal web server
if config.Bool(config.EnableWebFolder) {
webFolder := filepath.Join(basepath, config.Str(config.LocationWebFolder))
print("Ensuring web folder: ", webFolder)
ensurePath(webFolder)
fs := http.FileServer(http.Dir(webFolder))
api.HandleFunc("/", fs.ServeHTTP)
// Write login
if config.Bool(config.EnableAccessControl) {
loginFile := filepath.Join(webFolder, "login.html")
print("Ensuring login page: ", loginFile)
if res, _ := fileutil.PathExists(loginFile); !res {
errorutil.AssertOk(ioutil.WriteFile(loginFile, []byte(LoginSRC[1:]), 0644))
}
}
// Write terminal(s)
if config.Bool(config.EnableWebTerminal) {
ensurePath(filepath.Join(webFolder, api.APIRoot))
termFile := filepath.Join(webFolder, api.APIRoot, "term.html")
print("Ensuring web terminal: ", termFile)
if res, _ := fileutil.PathExists(termFile); !res {
errorutil.AssertOk(ioutil.WriteFile(termFile, []byte(TermSRC[1:]), 0644))
}
}
if config.Bool(config.EnableClusterTerminal) {
ensurePath(filepath.Join(webFolder, api.APIRoot))
termFile := filepath.Join(webFolder, api.APIRoot, "cluster.html")
if config.Bool(config.EnableCluster) {
// Add the url to the member info of the member manager
api.DD.MemberManager.MemberInfo()[manager.MemberInfoTermURL] =
fmt.Sprintf("https://%v:%v%v/%v", config.Str(config.HTTPSHost),
config.Str(config.HTTPSPort), api.APIRoot, "cluster.html")
}
print("Ensuring cluster terminal: ", termFile)
if res, _ := fileutil.PathExists(termFile); !res {
errorutil.AssertOk(ioutil.WriteFile(termFile, []byte(ClusterTermSRC[1:]), 0644))
}
}
}
// Start HTTPS server and enable REST API
hs := &httputil.HTTPServer{}
var wg sync.WaitGroup
wg.Add(1)
port := config.Str(config.HTTPSPort)
print("Starting HTTPS server on: ", api.APIHost)
go hs.RunHTTPSServer(basepath+config.Str(config.LocationHTTPS), config.Str(config.HTTPSCertificate),
config.Str(config.HTTPSKey), ":"+port, &wg)
// Wait until the server has started
wg.Wait()
// HTTPS Server has started
if hs.LastError != nil {
fatal(hs.LastError)
return
}
// Add to the wait group so we can wait for the shutdown
wg.Add(1)
// Read server certificate and write a fingerprint file
fpfile := filepath.Join(basepath, config.Str(config.LocationWebFolder), "fingerprint.json")
print("Writing fingerprint file: ", fpfile)
certs, _ := cryptutil.ReadX509CertsFromFile(certPath)
if len(certs) > 0 {
buf := bytes.Buffer{}
buf.WriteString("{\n")
buf.WriteString(fmt.Sprintf(` "md5" : "%s",`, cryptutil.Md5CertFingerprint(certs[0])))
buf.WriteString("\n")
buf.WriteString(fmt.Sprintf(` "sha1" : "%s",`, cryptutil.Sha1CertFingerprint(certs[0])))
buf.WriteString("\n")
buf.WriteString(fmt.Sprintf(` "sha256" : "%s"`, cryptutil.Sha256CertFingerprint(certs[0])))
buf.WriteString("\n")
buf.WriteString("}\n")
ioutil.WriteFile(fpfile, buf.Bytes(), 0644)
}
// Create a lockfile so the server can be shut down
lf := lockutil.NewLockFile(basepath+config.Str(config.LockFile), time.Duration(2)*time.Second)
lf.Start()
go func() {
// Check if the lockfile watcher is running and
// call shutdown once it has finished
for lf.WatcherRunning() {
time.Sleep(time.Duration(1) * time.Second)
}
print("Lockfile was modified")
hs.Shutdown()
}()
print("Waiting for shutdown")
wg.Wait()
print("Shutting down")
if config.Bool(config.EnableCluster) {
// Shutdown cluster
gs.(*cluster.DistributedStorage).MemberManager.Shutdown()
}
}
/*
ensurePath ensures that a given relative path exists.
*/
func ensurePath(path string) {
if res, _ := fileutil.PathExists(path); !res {
if err := os.Mkdir(path, 0770); err != nil {
fatal("Could not create directory:", err.Error())
return
}
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package storage contains the low-level API for data storage. Data is stored
in slots. The interface defines methods to store, retrieve, update and delete
a given object to and from the disk. There are 3 main implementations:
DiskStorageManager
A disk storage manager handles the data storage on disk. It controls the actual
PhysicalSlotManager and LogicalSlotManager objects. It holds references to all
involved files and ensures exclusive access to them through a generated lock
file. The lockfile is checked and attempting to open another instance of the
DiskStorageManager on the same files will result in an error. The DiskStorageManager
is also responsible for marshalling given abstract objects into a binary form which
can be written to physical slots.
CachedDiskStorageManager
The CachedDiskStorageManager is a cache wrapper for the DiskStorageManager. Its
purpose is to intercept calls and to maintain a cache of stored objects. The cache
is limited in size by the number of total objects it references. Once the cache
is full it will forget the objects which have been requested the least.
MemoryStorageManager
A storage manager which keeps all its data in memory and provides several
error simulation facilities.
*/
package storage
import "sync"
/*
CachedDiskStorageManager data structure
*/
type CachedDiskStorageManager struct {
diskstoragemanager *DiskStorageManager // Wrapped instance of DiskStorageManager
mutex *sync.Mutex // Mutex to protect list and map operations
cache map[uint64]*cacheEntry // Map of stored cacheEntry objects
maxObjects int // Max number of objects which should be held in the cache
firstentry *cacheEntry // Pointer to first entry in cacheEntry linked list
lastentry *cacheEntry // Pointer to last entry in cacheEntry linked list
}
/*
cacheEntry data structure
*/
type cacheEntry struct {
location uint64 // Slot (logical) of the entry
object interface{} // Object of the entry
prev *cacheEntry // Pointer to previous entry in cacheEntry linked list
next *cacheEntry // Pointer to next entry in cacheEntry linked list
}
/*
Pool for cache entries
*/
var entryPool = &sync.Pool{New: func() interface{} { return &cacheEntry{} }}
/*
NewCachedDiskStorageManager creates a new cache wrapper for a DiskStorageManger.
*/
func NewCachedDiskStorageManager(diskstoragemanager *DiskStorageManager, maxObjects int) *CachedDiskStorageManager {
return &CachedDiskStorageManager{diskstoragemanager, &sync.Mutex{}, make(map[uint64]*cacheEntry),
maxObjects, nil, nil}
}
/*
Name returns the name of the StorageManager instance.
*/
func (cdsm *CachedDiskStorageManager) Name() string {
return cdsm.diskstoragemanager.Name()
}
/*
Root returns a root value.
*/
func (cdsm *CachedDiskStorageManager) Root(root int) uint64 {
return cdsm.diskstoragemanager.Root(root)
}
/*
SetRoot writes a root value.
*/
func (cdsm *CachedDiskStorageManager) SetRoot(root int, val uint64) {
cdsm.diskstoragemanager.SetRoot(root, val)
}
/*
Insert inserts an object and return its storage location.
*/
func (cdsm *CachedDiskStorageManager) Insert(o interface{}) (uint64, error) {
// Cannot cache inserts since the calling code needs a location
loc, err := cdsm.diskstoragemanager.Insert(o)
if loc != 0 && err == nil {
cdsm.mutex.Lock()
defer cdsm.mutex.Unlock()
cdsm.addToCache(loc, o)
}
return loc, err
}
/*
Update updates a storage location.
*/
func (cdsm *CachedDiskStorageManager) Update(loc uint64, o interface{}) error {
// Store the update in the cache
cdsm.mutex.Lock()
if entry, ok := cdsm.cache[loc]; !ok {
cdsm.addToCache(loc, o)
} else {
entry.object = o
cdsm.llTouchEntry(entry)
}
cdsm.mutex.Unlock()
return cdsm.diskstoragemanager.Update(loc, o)
}
/*
Free frees a storage location.
*/
func (cdsm *CachedDiskStorageManager) Free(loc uint64) error {
if ret := cdsm.diskstoragemanager.Free(loc); ret != nil {
return ret
}
cdsm.mutex.Lock()
defer cdsm.mutex.Unlock()
// Remove location entry from the cache
if entry, ok := cdsm.cache[loc]; ok {
delete(cdsm.cache, entry.location)
cdsm.llRemoveEntry(entry)
}
return nil
}
/*
Fetch fetches an object from a given storage location and writes it to
a given data container.
*/
func (cdsm *CachedDiskStorageManager) Fetch(loc uint64, o interface{}) error {
err := cdsm.diskstoragemanager.Fetch(loc, o)
if err != nil {
return err
}
cdsm.mutex.Lock()
defer cdsm.mutex.Unlock()
// Put the retrieved value into the cache
if entry, ok := cdsm.cache[loc]; !ok {
cdsm.addToCache(loc, o)
} else {
cdsm.llTouchEntry(entry)
}
return nil
}
/*
FetchCached fetches an object from a cache and returns its reference.
Returns a storage.ErrNotInCache error if the entry is not in the cache.
*/
func (cdsm *CachedDiskStorageManager) FetchCached(loc uint64) (interface{}, error) {
cdsm.mutex.Lock()
defer cdsm.mutex.Unlock()
if entry, ok := cdsm.cache[loc]; ok {
return entry.object, nil
}
return nil, NewStorageManagerError(ErrNotInCache, "", cdsm.Name())
}
/*
Rollback cancels all pending changes which have not yet been written to disk.
*/
func (cdsm *CachedDiskStorageManager) Rollback() error {
if cdsm.diskstoragemanager.transDisabled {
return nil
}
err := cdsm.diskstoragemanager.Rollback()
cdsm.mutex.Lock()
defer cdsm.mutex.Unlock()
// Cache is emptied in any case
cdsm.cache = make(map[uint64]*cacheEntry)
cdsm.firstentry = nil
cdsm.lastentry = nil
return err
}
/*
Close the StorageManager and write all pending changes to disk.
*/
func (cdsm *CachedDiskStorageManager) Close() error {
return cdsm.diskstoragemanager.Close()
}
/*
Flush writes all pending changes to disk.
*/
func (cdsm *CachedDiskStorageManager) Flush() error {
return cdsm.diskstoragemanager.Flush()
}
/*
addToCache adds an entry to the cache.
*/
func (cdsm *CachedDiskStorageManager) addToCache(loc uint64, o interface{}) {
var entry *cacheEntry
// Get an entry from the pool or recycle an entry from the cacheEntry
// linked list if the list is full
if len(cdsm.cache) >= cdsm.maxObjects {
entry = cdsm.removeOldestFromCache()
} else {
entry = entryPool.Get().(*cacheEntry)
}
// Fill the entry
entry.location = loc
entry.object = o
// Insert entry into the cacheEntry linked list (this will set the entries
// prev and next pointer)
cdsm.llAppendEntry(entry)
// Insert into the map of stored cacheEntry objects
cdsm.cache[loc] = entry
}
/*
removeOldestFromCache removes the oldest entry from the cache and return it.
*/
func (cdsm *CachedDiskStorageManager) removeOldestFromCache() *cacheEntry {
entry := cdsm.firstentry
// If no entries were stored yet just return an entry from the pool
if entry == nil {
return entryPool.Get().(*cacheEntry)
}
// Remove entry from the cacheEntry linked list (this will set the entries
// prev and next pointer)
cdsm.llRemoveEntry(entry)
// Remove entry from the map of stored cacheEntry objects
delete(cdsm.cache, entry.location)
return entry
}
/*
llTouchEntry puts an entry to the last position of the cacheEntry linked list.
Calling llTouchEntry on all requested items ensures that the oldest used
entry is at the beginning of the list.
*/
func (cdsm *CachedDiskStorageManager) llTouchEntry(entry *cacheEntry) {
if cdsm.lastentry == entry {
return
}
cdsm.llRemoveEntry(entry)
cdsm.llAppendEntry(entry)
}
/*
llAppendEntry appends a cacheEntry to the end of the cacheEntry linked list.
*/
func (cdsm *CachedDiskStorageManager) llAppendEntry(entry *cacheEntry) {
if cdsm.firstentry == nil {
cdsm.firstentry = entry
cdsm.lastentry = entry
entry.prev = nil
} else {
cdsm.lastentry.next = entry
entry.prev = cdsm.lastentry
cdsm.lastentry = entry
}
entry.next = nil
}
/*
llRemoveEntry removes a cacheEntry from the cacheEntry linked list.
*/
func (cdsm *CachedDiskStorageManager) llRemoveEntry(entry *cacheEntry) {
if entry == cdsm.firstentry {
cdsm.firstentry = entry.next
}
if cdsm.lastentry == entry {
cdsm.lastentry = entry.prev
}
if entry.prev != nil {
entry.prev.next = entry.next
entry.prev = nil
}
if entry.next != nil {
entry.next.prev = entry.prev
entry.next = nil
}
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package storage
import (
"bytes"
"encoding/gob"
"errors"
"fmt"
"io"
"sync"
"time"
"devt.de/krotik/common/errorutil"
"devt.de/krotik/common/fileutil"
"devt.de/krotik/common/lockutil"
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging"
"devt.de/krotik/eliasdb/storage/slotting"
"devt.de/krotik/eliasdb/storage/util"
)
/*
VERSION constains the version of the storage API
*/
const VERSION = 1
/*
FileSiffixLockfile is the file ending for lockfiles
*/
const FileSiffixLockfile = "lck"
/*
FileSuffixLogicalSlots is the file ending for a logical slot storage
*/
const FileSuffixLogicalSlots = "ix"
/*
FileSuffixLogicalFreeSlots is the file ending for a free logical slot storage
*/
const FileSuffixLogicalFreeSlots = "ixf"
/*
FileSuffixPhysicalSlots is the file ending for a physical slot storage
*/
const FileSuffixPhysicalSlots = "db"
/*
FileSuffixPhysicalFreeSlots is the file ending for a free physical slot storage
*/
const FileSuffixPhysicalFreeSlots = "dbf"
/*
BlockSizePhysicalSlots is the block for a physical slot file. Physical slots will
contain actual data they need to have fairly large block sizes.
*/
const BlockSizePhysicalSlots = 1024 * 8
/*
BlockSizeLogicalSlots is the block for a logical slot file. Logical slots contain only
pointers they only need small blocks.
*/
const BlockSizeLogicalSlots = 1024 * 2
/*
BlockSizeFreeSlots is the block for a free slot files. Files containing only free slot
pointers will always be small. They only need tiny blocks.
*/
const BlockSizeFreeSlots = 1024
/*
ErrReadonly is returned when attempting a write operation on a readonly datastore.
*/
var ErrReadonly = errors.New("Storage is readonly")
/*
DiskStorageManager is a storage manager which can store any gob serializable datastructure.
*/
type DiskStorageManager struct {
*ByteDiskStorageManager
}
/*
NewDiskStorageManager creates a new disk storage manager with optional
transaction management. If the onlyAppend flag is set then the manager will
not attempt to reuse space once it was released after use. If the
transDisabled flag is set then the storage manager will not support
transactions.
*/
func NewDiskStorageManager(filename string, readonly bool, onlyAppend bool,
transDisabled bool, lockfileDisabled bool) *DiskStorageManager {
return &DiskStorageManager{NewByteDiskStorageManager(filename, readonly,
onlyAppend, transDisabled, lockfileDisabled)}
}
/*
Name returns the name of the StorageManager instance.
*/
func (dsm *DiskStorageManager) Name() string {
return fmt.Sprint("DiskStorageFile:", dsm.ByteDiskStorageManager.filename)
}
/*
Serialize serializes an object into a byte slice.
*/
func (dsm *DiskStorageManager) Serialize(o interface{}) ([]byte, error) {
// Request a buffer from the buffer pool
bb := BufferPool.Get().(*bytes.Buffer)
defer func() {
bb.Reset()
BufferPool.Put(bb)
}()
// Serialize the object into a gob bytes stream
err := gob.NewEncoder(bb).Encode(o)
if err != nil {
return nil, err
}
return bb.Bytes(), nil
}
/*
Insert inserts an object and return its storage location.
*/
func (dsm *DiskStorageManager) Insert(o interface{}) (uint64, error) {
b, err := dsm.Serialize(o)
if err != nil {
return 0, err
}
return dsm.ByteDiskStorageManager.Insert(b)
}
/*
Update updates a storage location.
*/
func (dsm *DiskStorageManager) Update(loc uint64, o interface{}) error {
b, err := dsm.Serialize(o)
if err != nil {
return err
}
return dsm.ByteDiskStorageManager.Update(loc, b)
}
/*
Fetch fetches an object from a given storage location and writes it to
a given data container.
*/
func (dsm *DiskStorageManager) Fetch(loc uint64, o interface{}) error {
// Request a buffer from the buffer pool
bb := BufferPool.Get().(*bytes.Buffer)
defer func() {
bb.Reset()
BufferPool.Put(bb)
}()
if err := dsm.ByteDiskStorageManager.Fetch(loc, bb); err != nil {
return err
}
// Deserialize the object from a gob bytes stream
return gob.NewDecoder(bb).Decode(o)
}
/*
ByteDiskStorageManager is a disk storage manager which can only store byte slices.
*/
type ByteDiskStorageManager struct {
filename string // Filename for all managed files
readonly bool // Flag to make the storage readonly
onlyAppend bool // Flag for append-only mode
transDisabled bool // Flag if transactions are enabled
mutex *sync.Mutex // Mutex to protect actual file operations
physicalSlotsSf *file.StorageFile // StorageFile for physical slots
physicalSlotsPager *paging.PagedStorageFile // Pager for physical slots StorageFile
physicalFreeSlotsSf *file.StorageFile // StorageFile for free physical slots
physicalFreeSlotsPager *paging.PagedStorageFile // Pager for free physical slots StorageFile
physicalSlotManager *slotting.PhysicalSlotManager // Manager for physical slots
logicalSlotsSf *file.StorageFile // StorageFile for logical slots
logicalSlotsPager *paging.PagedStorageFile // Pager for logical slots StorageFile
logicalFreeSlotsSf *file.StorageFile // StorageFile for free logical slots
logicalFreeSlotsPager *paging.PagedStorageFile // Pager for free logical slots StorageFile
logicalSlotManager *slotting.LogicalSlotManager // Manager for physical slots
lockfile *lockutil.LockFile // Lockfile manager
}
/*
NewByteDiskStorageManager creates a new disk storage manager with optional
transaction management which can only store byte slices. If the onlyAppend
flag is set then the manager will not attempt to reuse space once it was
released after use. If the transDisabled flag is set then the storage
manager will not support transactions.
*/
func NewByteDiskStorageManager(filename string, readonly bool, onlyAppend bool,
transDisabled bool, lockfileDisabled bool) *ByteDiskStorageManager {
var lf *lockutil.LockFile
// Create a lockfile which is checked every 50 milliseconds
if !lockfileDisabled {
lf = lockutil.NewLockFile(fmt.Sprintf("%v.%v", filename, FileSiffixLockfile),
time.Duration(50)*time.Millisecond)
}
bdsm := &ByteDiskStorageManager{filename, readonly, onlyAppend, transDisabled, &sync.Mutex{}, nil, nil,
nil, nil, nil, nil, nil, nil, nil, nil, lf}
err := initByteDiskStorageManager(bdsm)
if err != nil {
panic(fmt.Sprintf("Could not initialize DiskStroageManager: %v", filename))
}
return bdsm
}
/*
DataFileExist checks if the main datastore file exists.
*/
func DataFileExist(filename string) bool {
ret, err := fileutil.PathExists(fmt.Sprintf("%v.%v.0", filename,
FileSuffixPhysicalSlots))
if err != nil {
return false
}
return ret
}
/*
Name returns the name of the StorageManager instance.
*/
func (bdsm *ByteDiskStorageManager) Name() string {
return fmt.Sprint("ByteDiskStorageFile:", bdsm.filename)
}
/*
Root returns a root value.
*/
func (bdsm *ByteDiskStorageManager) Root(root int) uint64 {
bdsm.mutex.Lock()
defer bdsm.mutex.Unlock()
bdsm.checkFileOpen()
return bdsm.physicalSlotsPager.Header().Root(root)
}
/*
SetRoot writes a root value.
*/
func (bdsm *ByteDiskStorageManager) SetRoot(root int, val uint64) {
// When readonly this operation becomes a NOP
if bdsm.readonly {
return
}
bdsm.mutex.Lock()
defer bdsm.mutex.Unlock()
bdsm.checkFileOpen()
bdsm.physicalSlotsPager.Header().SetRoot(root, val)
}
/*
Insert inserts an object and return its storage location.
*/
func (bdsm *ByteDiskStorageManager) Insert(o interface{}) (uint64, error) {
bdsm.checkFileOpen()
// Fail operation if readonly
if bdsm.readonly {
return 0, ErrReadonly
}
// Continue single threaded from here on
bdsm.mutex.Lock()
defer bdsm.mutex.Unlock()
// Store the data in a physical slot
b := o.([]byte)
ploc, err := bdsm.physicalSlotManager.Insert(b, 0, uint32(len(b)))
if err != nil {
return 0, err
}
// Get a logical slot for the physical slot
loc, err := bdsm.logicalSlotManager.Insert(ploc)
if err != nil {
return 0, err
}
return loc, nil
}
/*
Update updates a storage location.
*/
func (bdsm *ByteDiskStorageManager) Update(loc uint64, o interface{}) error {
bdsm.checkFileOpen()
// Fail operation if readonly
if bdsm.readonly {
return ErrReadonly
}
// Get the physical slot for the given logical slot
bdsm.mutex.Lock()
ploc, err := bdsm.logicalSlotManager.Fetch(loc)
bdsm.mutex.Unlock()
if err != nil {
return err
}
if ploc == 0 {
return NewStorageManagerError(ErrSlotNotFound, fmt.Sprint("Location:",
util.LocationRecord(loc), util.LocationOffset(loc)), bdsm.Name())
}
// Continue single threaded from here on
bdsm.mutex.Lock()
defer bdsm.mutex.Unlock()
// Update the physical record
b := o.([]byte)
newPloc, err := bdsm.physicalSlotManager.Update(ploc, b, 0, uint32(len(b)))
if err != nil {
return err
}
// Update the logical slot if the physical slot has changed
if newPloc != ploc {
return bdsm.logicalSlotManager.Update(loc, newPloc)
}
return nil
}
/*
Fetch fetches an object from a given storage location and writes it to
a given data container.
*/
func (bdsm *ByteDiskStorageManager) Fetch(loc uint64, o interface{}) error {
bdsm.checkFileOpen()
// Get the physical slot for the given logical slot
bdsm.mutex.Lock()
ploc, err := bdsm.logicalSlotManager.Fetch(loc)
bdsm.mutex.Unlock()
if err != nil {
return err
}
if ploc == 0 {
return NewStorageManagerError(ErrSlotNotFound, fmt.Sprint("Location:",
util.LocationRecord(loc), util.LocationOffset(loc)), bdsm.Name())
}
// Request the stored bytes
bdsm.mutex.Lock()
if w, ok := o.(io.Writer); ok {
err = bdsm.physicalSlotManager.Fetch(ploc, w)
} else {
var b bytes.Buffer
err = bdsm.physicalSlotManager.Fetch(ploc, &b)
copy(o.([]byte), b.Bytes())
}
bdsm.mutex.Unlock()
return err
}
/*
FetchCached is not implemented for a ByteDiskStorageManager.
Only defined to satisfy the StorageManager interface.
*/
func (bdsm *ByteDiskStorageManager) FetchCached(loc uint64) (interface{}, error) {
return nil, NewStorageManagerError(ErrNotInCache, "", bdsm.Name())
}
/*
Free frees a storage location.
*/
func (bdsm *ByteDiskStorageManager) Free(loc uint64) error {
bdsm.checkFileOpen()
// Fail operation if readonly
if bdsm.readonly {
return ErrReadonly
}
// Continue single threaded from here on
bdsm.mutex.Lock()
defer bdsm.mutex.Unlock()
// Get the physical slot for the given logical slot
ploc, err := bdsm.logicalSlotManager.Fetch(loc)
if err != nil {
return err
}
if ploc == 0 {
return NewStorageManagerError(ErrSlotNotFound, fmt.Sprint("Location:",
util.LocationRecord(loc), util.LocationOffset(loc)), bdsm.Name())
}
// First try to free the physical slot since here is the data
// if this fails we don't touch the logical slot
err = bdsm.physicalSlotManager.Free(ploc)
if err != nil {
return err
}
// This is very unlikely to fail - either way we can't do anything
// at this point since the physical slot has already gone away
return bdsm.logicalSlotManager.Free(loc)
}
/*
Flush writes all pending changes to disk.
*/
func (bdsm *ByteDiskStorageManager) Flush() error {
bdsm.checkFileOpen()
// When readonly this operation becomes a NOP
if bdsm.readonly {
return nil
}
ce := errorutil.NewCompositeError()
// Continue single threaded from here on
bdsm.mutex.Lock()
defer bdsm.mutex.Unlock()
// Write pending changes
if err := bdsm.physicalSlotManager.Flush(); err != nil {
ce.Add(err)
}
if err := bdsm.logicalSlotManager.Flush(); err != nil {
ce.Add(err)
}
if err := bdsm.physicalSlotsPager.Flush(); err != nil {
ce.Add(err)
}
if err := bdsm.physicalFreeSlotsPager.Flush(); err != nil {
ce.Add(err)
}
if err := bdsm.logicalSlotsPager.Flush(); err != nil {
ce.Add(err)
}
if err := bdsm.logicalFreeSlotsPager.Flush(); err != nil {
ce.Add(err)
}
// Return errors if there were any
if ce.HasErrors() {
return ce
}
return nil
}
/*
Rollback cancels all pending changes which have not yet been written to disk.
*/
func (bdsm *ByteDiskStorageManager) Rollback() error {
// Rollback has no effect if transactions are disabled or when readonly
if bdsm.transDisabled || bdsm.readonly {
return nil
}
bdsm.checkFileOpen()
ce := errorutil.NewCompositeError()
// Continue single threaded from here on
bdsm.mutex.Lock()
defer bdsm.mutex.Unlock()
// Write pending manager changes to transaction log
if err := bdsm.physicalSlotManager.Flush(); err != nil {
ce.Add(err)
}
if err := bdsm.logicalSlotManager.Flush(); err != nil {
ce.Add(err)
}
// Rollback current transaction
if err := bdsm.physicalSlotsPager.Rollback(); err != nil {
ce.Add(err)
}
if err := bdsm.physicalFreeSlotsPager.Rollback(); err != nil {
ce.Add(err)
}
if err := bdsm.logicalSlotsPager.Rollback(); err != nil {
ce.Add(err)
}
if err := bdsm.logicalFreeSlotsPager.Rollback(); err != nil {
ce.Add(err)
}
// Return errors if there were any
if ce.HasErrors() {
return ce
}
return nil
}
/*
Close closes the StorageManager and write all pending changes to disk.
*/
func (bdsm *ByteDiskStorageManager) Close() error {
bdsm.checkFileOpen()
ce := errorutil.NewCompositeError()
// Continue single threaded from here on
bdsm.mutex.Lock()
defer bdsm.mutex.Unlock()
// Try to close all files and collect any errors which are returned
if err := bdsm.physicalSlotsPager.Close(); err != nil {
ce.Add(err)
}
if err := bdsm.physicalFreeSlotsPager.Close(); err != nil {
ce.Add(err)
}
if err := bdsm.logicalSlotsPager.Close(); err != nil {
ce.Add(err)
}
if err := bdsm.logicalFreeSlotsPager.Close(); err != nil {
ce.Add(err)
}
// Return errors if there were any
if ce.HasErrors() {
return ce
}
// Release all file related objects
bdsm.physicalSlotsSf = nil
bdsm.physicalSlotsPager = nil
bdsm.physicalFreeSlotsSf = nil
bdsm.physicalFreeSlotsPager = nil
bdsm.physicalSlotManager = nil
bdsm.logicalSlotsSf = nil
bdsm.logicalSlotsPager = nil
bdsm.logicalFreeSlotsSf = nil
bdsm.logicalFreeSlotsPager = nil
bdsm.logicalSlotManager = nil
if bdsm.lockfile != nil {
return bdsm.lockfile.Finish()
}
return nil
}
/*
checkFileOpen checks that the files on disk are still open.
*/
func (bdsm *ByteDiskStorageManager) checkFileOpen() {
if bdsm.physicalSlotsSf == nil {
panic(fmt.Sprint("Trying to access storage after it was closed: ", bdsm.filename))
}
if bdsm.lockfile != nil && !bdsm.lockfile.WatcherRunning() {
err := bdsm.lockfile.Finish()
panic(fmt.Sprint("Error while checking lockfile:", err))
}
}
/*
initByteDiskStorageManager initialises the file managers of a given ByteDiskStorageManager.
*/
func initByteDiskStorageManager(bdsm *ByteDiskStorageManager) error {
// Kick off the lockfile watcher
if bdsm.lockfile != nil {
if err := bdsm.lockfile.Start(); err != nil {
panic(fmt.Sprintf("Could not take ownership of lockfile %v: %v",
bdsm.filename, err))
}
}
// Try to open all files and collect all errors
ce := errorutil.NewCompositeError()
sf, pager, err := createFileAndPager(
fmt.Sprintf("%v.%v", bdsm.filename, FileSuffixPhysicalSlots),
BlockSizePhysicalSlots, bdsm)
if err != nil {
ce.Add(err)
}
bdsm.physicalSlotsSf = sf
bdsm.physicalSlotsPager = pager
sf, pager, err = createFileAndPager(
fmt.Sprintf("%v.%v", bdsm.filename, FileSuffixPhysicalFreeSlots),
BlockSizeFreeSlots, bdsm)
if err != nil {
ce.Add(err)
}
bdsm.physicalFreeSlotsSf = sf
bdsm.physicalFreeSlotsPager = pager
if !ce.HasErrors() {
bdsm.physicalSlotManager = slotting.NewPhysicalSlotManager(bdsm.physicalSlotsPager,
bdsm.physicalFreeSlotsPager, bdsm.onlyAppend)
}
sf, pager, err = createFileAndPager(
fmt.Sprintf("%v.%v", bdsm.filename, FileSuffixLogicalSlots),
BlockSizeLogicalSlots, bdsm)
if err != nil {
ce.Add(err)
}
bdsm.logicalSlotsSf = sf
bdsm.logicalSlotsPager = pager
sf, pager, err = createFileAndPager(
fmt.Sprintf("%v.%v", bdsm.filename, FileSuffixLogicalFreeSlots),
BlockSizeFreeSlots, bdsm)
if err != nil {
ce.Add(err)
}
bdsm.logicalFreeSlotsSf = sf
bdsm.logicalFreeSlotsPager = pager
if !ce.HasErrors() {
bdsm.logicalSlotManager = slotting.NewLogicalSlotManager(bdsm.logicalSlotsPager,
bdsm.logicalFreeSlotsPager)
}
// If there were any file related errors return at this point
if ce.HasErrors() {
// Release the lockfile if there were errors
if bdsm.lockfile != nil {
bdsm.lockfile.Finish()
}
return ce
}
// Check version
version := bdsm.Root(RootIDVersion)
if version > VERSION {
// Try to clean up
bdsm.Close()
panic(fmt.Sprint("Cannot open datastore ", bdsm.filename, " - version of disk files is "+
"newer than supported version. Supported version:", VERSION,
" Disk files version:", version))
}
if version != VERSION {
bdsm.SetRoot(RootIDVersion, VERSION)
}
return nil
}
/*
createFileAndPager creates a storagefile and a pager.
*/
func createFileAndPager(filename string, recordSize uint32,
bdsm *ByteDiskStorageManager) (*file.StorageFile, *paging.PagedStorageFile, error) {
sf, err := file.NewStorageFile(filename, recordSize, bdsm.transDisabled)
if err != nil {
return nil, nil, err
}
pager, err := paging.NewPagedStorageFile(sf)
return sf, pager, err
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package file deals with low level file storage and transaction management.
StorageFile
StorageFile models a logical storage file which stores fixed size records on
disk. Each record has a unique record id. On disk this logical storage file
might be split into several smaller files. StorageFiles can be reused after
they were closed if the transaction management has been disabled. This is
not the case otherwise.
Record
A record is a byte slice of a StorageFile. It is a wrapper data structure for
a byte array which provides read and write methods for several data types.
TransactionManager
TransactionManager provides an optional transaction management for StorageFile.
When used each record which is released from use is added to an in memory
transaction log. Once the client calls Flush() on the StorageFile the
in memory transaction is written to a transaction log on disk. The in-memory log
is kept. The in-memory transaction log is written to the actual StorageFile once
maxTrans is reached or the StorageFile is closed.
Should the process crash during a transaction, then the transaction log is
written to the StorageFile on the next startup using the recover() function.
*/
package file
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"devt.de/krotik/common/bitutil"
"devt.de/krotik/common/stringutil"
)
/*
Size constants for a record
*/
const (
SizeByte = 1
SizeUnsignedShort = 2
SizeShort = 2
SizeThreeByteInt = 3
SizeUnsignedInt = 4
SizeInt = 4
SizeSixByteLong = 6
SizeLong = 8
)
/*
Record data structure
*/
type Record struct {
id uint64 // 64-bit record id
data []byte // Slice of the whole data byte array
dirty bool // Firty flag to indicate change
transCount int // Transaction counter
pageView interface{} // View on this record (this is not persisted)
}
/*
NewRecord creates a new Record and returns a pointer to it.
*/
func NewRecord(id uint64, data []byte) *Record {
return &Record{id, data, false, 0, nil}
}
/*
ID returns the id of a Record.
*/
func (r *Record) ID() uint64 {
return r.id
}
/*
SetID changes the id of a Record.
*/
func (r *Record) SetID(id uint64) error {
if r.InTransaction() {
return fmt.Errorf("Record id cannot be changed. Record "+
"is used in %d transaction%s.", r.transCount,
stringutil.Plural(r.transCount))
}
r.id = id
return nil
}
/*
Data returns the raw data of a Record.
*/
func (r *Record) Data() []byte {
return r.data
}
/*
Dirty returns the dirty flag of a Record.
*/
func (r *Record) Dirty() bool {
return r.dirty
}
/*
SetDirty sets the dirty flag of a Record.
*/
func (r *Record) SetDirty() {
r.dirty = true
}
/*
ClearDirty clears the dirty flag of a Record.
*/
func (r *Record) ClearDirty() {
r.dirty = false
}
/*
ClearData removes all stored data from a Record.
*/
func (r *Record) ClearData() {
var ccap, clen int
if r.data != nil {
ccap = cap(r.data)
clen = len(r.data)
} else {
clen = DefaultRecordSize
ccap = DefaultRecordSize
}
r.data = make([]byte, clen, ccap)
r.ClearDirty()
}
/*
InTransaction returns if the Record is used in a transaction.
*/
func (r *Record) InTransaction() bool {
return r.transCount != 0
}
/*
IncTransCount increments the transaction count which means the record is in the
log but not yet in the data file.
*/
func (r *Record) IncTransCount() {
r.transCount++
}
/*
DecTransCount decrements the transaction count which means the record has been
written to disk.
*/
func (r *Record) DecTransCount() {
r.transCount--
if r.transCount < 0 {
panic(fmt.Sprintf("Transaction count for record %v is below zero: %v",
r.id, r.transCount))
}
}
/*
PageView returns the view on this record. The view determines how the record
is being used.
*/
func (r *Record) PageView() interface{} {
return r.pageView
}
/*
SetPageView sets the view on this record.
*/
func (r *Record) SetPageView(view interface{}) {
r.pageView = view
}
/*
String prints a string representation the Record.
*/
func (r *Record) String() string {
return fmt.Sprintf("Record: %v (dirty:%v transCount:%v len:%v cap:%v)\n%v",
r.id, r.dirty, r.transCount, len(r.data), cap(r.data), bitutil.HexDump(r.data))
}
// Read and Write functions
// ========================
/*
ReadSingleByte reads a byte from a Record.
*/
func (r *Record) ReadSingleByte(pos int) byte {
return r.data[pos]
}
/*
WriteSingleByte writes a byte to a Record.
*/
func (r *Record) WriteSingleByte(pos int, value byte) {
r.data[pos] = value
r.SetDirty()
}
/*
ReadUInt16 reads a 16-bit unsigned integer from a Record.
*/
func (r *Record) ReadUInt16(pos int) uint16 {
return (uint16(r.data[pos+0]) << 8) |
(uint16(r.data[pos+1]) << 0)
}
/*
WriteUInt16 writes a 16-bit unsigned integer to a Record.
*/
func (r *Record) WriteUInt16(pos int, value uint16) {
r.data[pos+0] = byte(value >> 8)
r.data[pos+1] = byte(value >> 0)
r.SetDirty()
}
/*
ReadInt16 reads a 16-bit signed integer from a Record.
*/
func (r *Record) ReadInt16(pos int) int16 {
return (int16(r.data[pos+0]) << 8) |
(int16(r.data[pos+1]) << 0)
}
/*
WriteInt16 writes a 16-bit signed integer to a Record.
*/
func (r *Record) WriteInt16(pos int, value int16) {
r.data[pos+0] = byte(value >> 8)
r.data[pos+1] = byte(value >> 0)
r.SetDirty()
}
/*
ReadUInt32 reads a 32-bit unsigned integer from a Record.
*/
func (r *Record) ReadUInt32(pos int) uint32 {
return (uint32(r.data[pos+0]) << 24) |
(uint32(r.data[pos+1]) << 16) |
(uint32(r.data[pos+2]) << 8) |
(uint32(r.data[pos+3]) << 0)
}
/*
WriteUInt32 writes a 32-bit unsigned integer to a Record.
*/
func (r *Record) WriteUInt32(pos int, value uint32) {
r.data[pos+0] = byte(value >> 24)
r.data[pos+1] = byte(value >> 16)
r.data[pos+2] = byte(value >> 8)
r.data[pos+3] = byte(value >> 0)
r.SetDirty()
}
/*
ReadInt32 reads a 32-bit signed integer from a Record.
*/
func (r *Record) ReadInt32(pos int) int32 {
return (int32(r.data[pos+0]) << 24) |
(int32(r.data[pos+1]) << 16) |
(int32(r.data[pos+2]) << 8) |
(int32(r.data[pos+3]) << 0)
}
/*
WriteInt32 writes a 32-bit signed integer to a Record.
*/
func (r *Record) WriteInt32(pos int, value int32) {
r.data[pos+0] = byte(value >> 24)
r.data[pos+1] = byte(value >> 16)
r.data[pos+2] = byte(value >> 8)
r.data[pos+3] = byte(value >> 0)
r.SetDirty()
}
/*
ReadUInt64 reads a 64-bit unsigned integer from a Record.
*/
func (r *Record) ReadUInt64(pos int) uint64 {
return (uint64(r.data[pos+0]) << 56) |
(uint64(r.data[pos+1]) << 48) |
(uint64(r.data[pos+2]) << 40) |
(uint64(r.data[pos+3]) << 32) |
(uint64(r.data[pos+4]) << 24) |
(uint64(r.data[pos+5]) << 16) |
(uint64(r.data[pos+6]) << 8) |
(uint64(r.data[pos+7]) << 0)
}
/*
WriteUInt64 writes a 64-bit unsigned integer to a Record.
*/
func (r *Record) WriteUInt64(pos int, value uint64) {
r.data[pos+0] = byte(value >> 56)
r.data[pos+1] = byte(value >> 48)
r.data[pos+2] = byte(value >> 40)
r.data[pos+3] = byte(value >> 32)
r.data[pos+4] = byte(value >> 24)
r.data[pos+5] = byte(value >> 16)
r.data[pos+6] = byte(value >> 8)
r.data[pos+7] = byte(value >> 0)
r.SetDirty()
}
/*
MarshalBinary returns a binary representation of a Record.
*/
func (r *Record) MarshalBinary() (data []byte, err error) {
buf := new(bytes.Buffer)
// Using a normal memory buffer this should always succeed
r.WriteRecord(buf)
return buf.Bytes(), nil
}
/*
WriteRecord writes a record to an io.Writer.
*/
func (r *Record) WriteRecord(iow io.Writer) error {
if err := binary.Write(iow, binary.LittleEndian, r.id); err != nil {
return err
}
if r.dirty {
if err := binary.Write(iow, binary.LittleEndian, int8(1)); err != nil {
return err
}
} else {
if err := binary.Write(iow, binary.LittleEndian, int8(0)); err != nil {
return err
}
}
if err := binary.Write(iow, binary.LittleEndian, int64(r.transCount)); err != nil {
return err
}
if err := binary.Write(iow, binary.LittleEndian, int64(len(r.data))); err != nil {
return err
}
if _, err := iow.Write(r.data); err != nil {
return err
}
// PageView is not persisted since it is derived from the record data
return nil
}
/*
UnmarshalBinary decodes a record from a binary blob.
*/
func (r *Record) UnmarshalBinary(data []byte) error {
buf := new(bytes.Buffer)
buf.Write(data)
return r.ReadRecord(buf)
}
/*
ReadRecord decodes a record by reading from an io.Reader.
*/
func (r *Record) ReadRecord(ior io.Reader) error {
if err := binary.Read(ior, binary.LittleEndian, &r.id); err != nil {
return err
}
r.pageView = nil
var d int8
if err := binary.Read(ior, binary.LittleEndian, &d); err == io.EOF {
return io.ErrUnexpectedEOF
}
r.dirty = d == 1
var t int64
if err := binary.Read(ior, binary.LittleEndian, &t); err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
r.transCount = int(t)
if err := binary.Read(ior, binary.LittleEndian, &t); err != nil {
if err == io.EOF {
return io.ErrUnexpectedEOF
}
return err
}
r.data = make([]byte, t)
i, err := io.ReadFull(ior, r.data)
if int64(i) != t {
return io.ErrUnexpectedEOF
}
return err
}
/*
ReadRecord decodes a record by reading from an io.Reader.
*/
func ReadRecord(ior io.Reader) (*Record, error) {
r := NewRecord(0, nil)
if err := r.ReadRecord(ior); err != nil {
return nil, err
}
return r, nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package file
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"devt.de/krotik/common/sortutil"
)
/*
Common storage file related errors.
*/
var (
ErrAlreadyInUse = errors.New("Record is already in-use")
ErrNotInUse = errors.New("Record was not in-use")
ErrInUse = errors.New("Records are still in-use")
ErrTransDisabled = errors.New("Transactions are disabled")
ErrInTrans = errors.New("Records are still in a transaction")
ErrNilData = errors.New("Record has nil data")
)
/*
DefaultRecordSize is the default size of a record in bytes
*/
const DefaultRecordSize = 4096
/*
DefaultFileSize is the default size of a physical file (10GB)
*/
const DefaultFileSize = 0x2540BE401 // 10000000001 Bytes
/*
StorageFile data structure
*/
type StorageFile struct {
name string // Name of the storage file
transDisabled bool // Flag if transactions are disabled
recordSize uint32 // Size of a record
maxFileSize uint64 // Max size of a storage file on disk
free map[uint64]*Record // Map of records which are stored in memory
inUse map[uint64]*Record // Locked records which are currently being modified
inTrans map[uint64]*Record // Records which are in the transaction log but not yet written to disk
dirty map[uint64]*Record // Dirty little records waiting to be written
files []*os.File // List of storage files
tm *TransactionManager // Manager object for transactions
}
/*
NewDefaultStorageFile creates a new storage file with default record size and
returns a reference to it.
*/
func NewDefaultStorageFile(name string, transDisabled bool) (*StorageFile, error) {
return NewStorageFile(name, DefaultRecordSize, transDisabled)
}
/*
NewStorageFile creates a new storage file and returns a reference to it.
*/
func NewStorageFile(name string, recordSize uint32, transDisabled bool) (*StorageFile, error) {
maxFileSize := DefaultFileSize - DefaultFileSize%uint64(recordSize)
ret := &StorageFile{name, transDisabled, recordSize, maxFileSize,
make(map[uint64]*Record), make(map[uint64]*Record), make(map[uint64]*Record),
make(map[uint64]*Record), make([]*os.File, 0), nil}
if !transDisabled {
tm, err := NewTransactionManager(ret, true)
if err != nil {
return nil, err
}
ret.tm = tm
}
_, err := ret.getFile(0)
if err != nil {
return nil, err
}
return ret, nil
}
/*
Name returns the name of this storage file.
*/
func (s *StorageFile) Name() string {
return s.name
}
/*
RecordSize returns the size of records which can be storerd or retrieved.
*/
func (s *StorageFile) RecordSize() uint32 {
return s.recordSize
}
/*
Get returns a record from the file. Other components can write to this record.
Any write operation should set the dirty flag on the record. Dirty records will
be written back to disk when the file is flushed after which the dirty flag is
cleared. Get panics if a record is requested which is still in-use.
*/
func (s *StorageFile) Get(id uint64) (*Record, error) {
var record *Record
// Check if the record is in one of the caches
if record, ok := s.inTrans[id]; ok {
delete(s.inTrans, id)
s.inUse[id] = record
return record, nil
}
if record, ok := s.dirty[id]; ok {
delete(s.dirty, id)
s.inUse[id] = record
return record, nil
}
if record, ok := s.free[id]; ok {
delete(s.free, id)
s.inUse[id] = record
return record, nil
}
// Error if a record which is in-use is requested again before it is released.
if _, ok := s.inUse[id]; ok {
return nil, NewStorageFileError(ErrAlreadyInUse, fmt.Sprintf("Record %v", id), s.name)
}
// Read the record in from file
record = s.createRecord(id)
err := s.readRecord(record)
if err != nil {
return nil, err
}
s.inUse[id] = record
return record, nil
}
/*
getFile gets a physical file for a specific offset.
*/
func (s *StorageFile) getFile(offset uint64) (*os.File, error) {
filenumber := int(offset / s.maxFileSize)
// Make sure the index exists which we want to use.
// Fill all previous positions up with nil pointers if they don't exist.
for i := len(s.files); i <= filenumber; i++ {
s.files = append(s.files, nil)
}
var ret *os.File
if len(s.files) > filenumber {
ret = s.files[filenumber]
}
if ret == nil {
// Important not to have os.O_APPEND since we really want
// to have random access to the file.
filename := fmt.Sprintf("%s.%d", s.name, filenumber)
file, err := os.OpenFile(filename, os.O_CREATE|os.O_RDWR, 0660)
if err != nil {
return nil, err
}
s.files[filenumber] = file
ret = file
}
return ret, nil
}
/*
createRecord creates a new record - (either from the free cache or newly created).
*/
func (s *StorageFile) createRecord(id uint64) *Record {
var record *Record
if len(s.free) != 0 {
var rkey uint64
for rkey, record = range s.free {
break
}
delete(s.free, rkey)
// NOTE At this point the free record contains
// still old data. It is expected that the following
// readRecord operation will overwrite the data.
}
if record == nil {
record = NewRecord(id, make([]byte, s.recordSize, s.recordSize))
}
record.SetID(id)
record.SetPageView(nil)
record.ClearDirty()
return record
}
/*
writeRecord writes a record to disk.
*/
func (s *StorageFile) writeRecord(record *Record) error {
data := record.Data()
if data != nil {
offset := record.ID() * uint64(s.recordSize)
file, err := s.getFile(offset)
if err != nil {
return err
}
file.WriteAt(data, int64(offset%s.maxFileSize))
return nil
}
return NewStorageFileError(ErrNilData, fmt.Sprintf("Record %v", record.ID()), s.name)
}
/*
readRecord fills a given record object with data.
*/
func (s *StorageFile) readRecord(record *Record) error {
if record.Data() == nil {
return NewStorageFileError(ErrNilData, fmt.Sprintf("Record %v", record.ID()), s.name)
}
offset := record.ID() * uint64(s.recordSize)
file, err := s.getFile(offset)
if err != nil {
return err
}
n, err := file.ReadAt(record.Data(), int64(offset%s.maxFileSize))
if n > 0 && uint32(n) != s.recordSize {
panic(fmt.Sprintf("File on disk returned unexpected length of data: %v "+
"expected length was: %v", n, s.recordSize))
} else if n == 0 {
// We just allocate a new array here which seems to be the
// quickest way to get an empty array.
record.ClearData()
}
if err == io.EOF {
return nil
}
return err
}
/*
Discard a given record.
*/
func (s *StorageFile) Discard(record *Record) {
if record == nil {
return
}
delete(s.inUse, record.ID())
}
/*
releaseInTrans releases a record which was in a transaction. The client code
may indicate if the record should be recycled.
*/
func (s *StorageFile) releaseInTrans(record *Record, recycle bool) {
if record == nil {
return
}
_, ok := s.inTrans[record.ID()]
if ok {
delete(s.inTrans, record.ID())
if recycle {
s.free[record.ID()] = record
}
}
}
/*
ReleaseInUseID releases a record given by its id from the in-use map. The
client code may indicate if the record is not dirty.
*/
func (s *StorageFile) ReleaseInUseID(id uint64, dirty bool) error {
record, ok := s.inUse[id]
if !ok {
return NewStorageFileError(ErrNotInUse, fmt.Sprintf("Record %v", id), s.name)
}
if !record.Dirty() && dirty {
record.SetDirty()
}
s.ReleaseInUse(record)
return nil
}
/*
ReleaseInUse releases a record from the in-use map. ReleaseInUse panics if
the record was not in use.
*/
func (s *StorageFile) ReleaseInUse(record *Record) {
if record == nil {
return
}
id := record.ID()
// Panic if a record which is release was not in-use.
if _, ok := s.inUse[id]; !ok {
panic(fmt.Sprintf("Released record %d was not in-use", id))
}
delete(s.inUse, id)
if record.Dirty() {
s.dirty[id] = record
} else {
if !s.transDisabled && record.InTransaction() {
s.inTrans[id] = record
} else {
s.free[id] = record
}
}
}
/*
Flush commits the current transaction by flushing all dirty records to the
transaction log on disk. If transactions are disabled it simply
writes all dirty records to disk.
*/
func (s *StorageFile) Flush() error {
if len(s.inUse) > 0 {
return NewStorageFileError(ErrInUse, fmt.Sprintf("Records %v", len(s.inUse)), s.name)
}
if len(s.dirty) == 0 {
return nil
}
if !s.transDisabled {
s.tm.start()
}
for id, record := range s.dirty {
if s.transDisabled {
err := s.writeRecord(record)
if err != nil {
return err
}
record.ClearDirty()
delete(s.dirty, id)
s.free[id] = record
} else {
s.tm.add(record)
delete(s.dirty, id)
s.inTrans[id] = record
}
}
if !s.transDisabled {
return s.tm.commit()
}
return nil
}
/*
Rollback cancels the current transaction by discarding all dirty records.
*/
func (s *StorageFile) Rollback() error {
if s.transDisabled {
return NewStorageFileError(ErrTransDisabled, "", s.name)
}
if len(s.inUse) > 0 {
return NewStorageFileError(ErrInUse, fmt.Sprintf("Records %v", len(s.inUse)), s.name)
}
s.dirty = make(map[uint64]*Record)
if err := s.tm.syncLogFromDisk(); err != nil {
return err
}
if len(s.inTrans) > 0 {
return NewStorageFileError(ErrInTrans, fmt.Sprintf("Records %v", len(s.inTrans)), s.name)
}
return nil
}
/*
Sync syncs all physical files.
*/
func (s *StorageFile) Sync() {
for _, file := range s.files {
if file != nil {
file.Sync()
}
}
}
/*
Close commits all data and closes all physical files.
*/
func (s *StorageFile) Close() error {
if len(s.dirty) > 0 {
if err := s.Flush(); err != nil {
return err
}
}
if !s.transDisabled {
// If something fails here we will know about it
// when checking if there are records in inTrans
s.tm.syncLogFromMemory()
s.tm.close()
}
if len(s.inTrans) > 0 {
return NewStorageFileError(ErrInTrans, fmt.Sprintf("Records %v", len(s.inTrans)), s.name)
} else if len(s.inUse) > 0 {
return NewStorageFileError(ErrInUse, fmt.Sprintf("Records %v", len(s.inUse)), s.name)
}
for _, file := range s.files {
if file != nil {
file.Close()
}
}
s.free = make(map[uint64]*Record)
s.files = make([]*os.File, 0)
// If transactions are enabled then a StorageFile cannot be
// reused after it was closed.
s.tm = nil
return nil
}
/*
String returns a string representation of a StorageFile.
*/
func (s *StorageFile) String() string {
buf := new(bytes.Buffer)
buf.WriteString(fmt.Sprintf("Storage File: %v (transDisabled:%v recordSize:%v "+
"maxFileSize:%v)\n", s.name, s.transDisabled, s.recordSize, s.maxFileSize))
buf.WriteString("====\n")
printRecordIDMap(buf, &s.free, "Free")
buf.WriteString("\n")
printRecordIDMap(buf, &s.inUse, "InUse")
buf.WriteString("\n")
printRecordIDMap(buf, &s.inTrans, "InTrans")
buf.WriteString("\n")
printRecordIDMap(buf, &s.dirty, "Dirty")
buf.WriteString("\n")
buf.WriteString("Open files: ")
l := len(s.files)
for i, file := range s.files {
if file != nil {
buf.WriteString(file.Name())
buf.WriteString(fmt.Sprintf(" (%v)", i))
if i < l-1 {
buf.WriteString(", ")
}
}
}
buf.WriteString("\n")
buf.WriteString("====\n")
if s.tm != nil {
buf.WriteString(s.tm.String())
}
return buf.String()
}
/*
printRecordIDMap appends the ids of a record map to a given buffer.
*/
func printRecordIDMap(buf *bytes.Buffer, recordMap *map[uint64]*Record, name string) {
buf.WriteString(name)
buf.WriteString(" Records: ")
var keys []uint64
for k := range *recordMap {
keys = append(keys, k)
}
sortutil.UInt64s(keys)
l := len(*recordMap)
for _, id := range keys {
buf.WriteString(fmt.Sprintf("%v", id))
if l--; l > 0 {
buf.WriteString(", ")
}
}
}
/*
StorageFileError is a storage file related error.
*/
type StorageFileError struct {
Type error
Detail string
Filename string
}
/*
NewStorageFileError returns a new StorageFile specific error.
*/
func NewStorageFileError(sfeType error, sfeDetail string, sfeFilename string) *StorageFileError {
return &StorageFileError{sfeType, sfeDetail, sfeFilename}
}
/*
Error returns a string representation of the error.
*/
func (e *StorageFileError) Error() string {
return fmt.Sprintf("%s (%s - %s)", e.Type.Error(), e.Filename, e.Detail)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package file
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"os"
)
/*
Common TransactionManager related errors
*/
var (
ErrBadMagic = fmt.Errorf("Bad magic for transaction log")
)
/*
LogFileSuffix is the file suffix for transaction log files
*/
const LogFileSuffix = "tlg"
/*
DefaultTransInLog is the default number of transactions which should be kept in memory
(affects how often we sync the log from memory)
*/
const DefaultTransInLog = 10
/*
DefaultTransSize is the default number of records in a single transaction
(affects how many record pointers are allocated at first
per transaction)
*/
const DefaultTransSize = 10
/*
TransactionLogHeader is the magic number to identify transaction log files
*/
var TransactionLogHeader = []byte{0x66, 0x42}
/*
LogFile is the abstract interface for an transaction log file.
*/
type LogFile interface {
io.Writer
io.Closer
Sync() error
}
/*
TransactionManager data structure
*/
type TransactionManager struct {
name string // Name of this transaction manager
logFile LogFile // Log file for transactions
curTrans int // Current transaction pointer
transList [][]*Record // List of storage files
maxTrans int // Maximal number of transaction before log is written
owner *StorageFile // Owner of this manager
}
/*
String returns a string representation of a TransactionManager.
*/
func (t *TransactionManager) String() string {
buf := new(bytes.Buffer)
hasLog := t.logFile != nil
buf.WriteString(fmt.Sprintf("Transaction Manager: %v (logFile:%v curTrans:%v "+
"maxTrans:%v)\n", t.name, hasLog, t.curTrans, t.maxTrans))
buf.WriteString("====\n")
buf.WriteString("transList:\n")
for i := 0; i < len(t.transList); i++ {
buf.WriteString(fmt.Sprint(i, ": "))
for _, record := range t.transList[i] {
buf.WriteString(fmt.Sprint(record.ID(), " "))
}
buf.WriteString("\n")
}
buf.WriteString("====\n")
return buf.String()
}
/*
NewTransactionManager creates a new transaction manager and returns a reference to it.
*/
func NewTransactionManager(owner *StorageFile, doRecover bool) (*TransactionManager, error) {
name := fmt.Sprintf("%s.%s", owner.Name(), LogFileSuffix)
ret := &TransactionManager{name, nil, -1, make([][]*Record, DefaultTransInLog),
DefaultTransInLog, owner}
if doRecover {
if err := ret.recover(); err != nil {
if sfe, ok := err.(*StorageFileError); !ok || sfe.Type != ErrBadMagic {
return nil, err
}
}
// If we have a bad magic just overwrite the transaction file
}
if err := ret.open(); err != nil {
return nil, err
}
return ret, nil
}
/*
recover tries to recover pending transactions from the physical transaction log.
*/
func (t *TransactionManager) recover() error {
file, err := os.OpenFile(t.name, os.O_RDONLY, 0660)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return err
}
defer file.Close()
// Read and verify magic
magic := make([]byte, 2)
i, _ := file.Read(magic)
if i != 2 || magic[0] != TransactionLogHeader[0] ||
magic[1] != TransactionLogHeader[1] {
return NewStorageFileError(ErrBadMagic, "", t.owner.name)
}
for true {
var numRecords int64
if err := binary.Read(file, binary.LittleEndian, &numRecords); err != nil {
if err == io.EOF {
break
}
return err
}
recMap := make(map[uint64]*Record)
for i := int64(0); i < numRecords; i++ {
record, err := ReadRecord(file)
if err != nil {
return err
}
// Any duplicated records will only be synced once
// using the latest version
recMap[record.ID()] = record
}
// If something goes wrong here ignore and try to do the rest
t.syncRecords(recMap, false)
}
return nil
}
/*
Open opens the transaction log for writing.
*/
func (t *TransactionManager) open() error {
// Always create a new empty transaction log file
file, err := os.OpenFile(t.name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0660)
if err != nil {
return err
}
t.logFile = file
t.logFile.Write(TransactionLogHeader)
t.logFile.Sync()
t.curTrans = -1
return nil
}
/*
Start starts a new transaction.
*/
func (t *TransactionManager) start() {
t.curTrans++
if t.curTrans >= t.maxTrans {
t.syncLogFromMemory()
t.curTrans = 0
}
t.transList[t.curTrans] = make([]*Record, 0, DefaultTransSize)
}
/*
Add adds a record to the current transaction.
*/
func (t *TransactionManager) add(record *Record) {
record.IncTransCount()
t.transList[t.curTrans] = append(t.transList[t.curTrans], record)
}
/*
Commit commits the memory transaction log to the physical transaction log.
*/
func (t *TransactionManager) commit() error {
// Write how many records will be stored
if err := binary.Write(t.logFile, binary.LittleEndian,
int64(len(t.transList[t.curTrans]))); err != nil {
return err
}
// Write records to log file
for _, record := range t.transList[t.curTrans] {
if err := record.WriteRecord(t.logFile); err != nil {
return err
}
}
t.syncFile()
// Clear all dirty flags
for _, record := range t.transList[t.curTrans] {
record.ClearDirty()
}
return nil
}
/*
syncFile syncs the transaction log file with the disk.
*/
func (t *TransactionManager) syncFile() {
t.logFile.Sync()
}
/*
close closes the trasaction log file.
*/
func (t *TransactionManager) close() {
t.syncFile()
// If something went wrong with closing the handle
// we don't care as we release the reference
t.logFile.Close()
t.logFile = nil
}
/*
syncLogFromMemory syncs the transaction log from memory to disk.
*/
func (t *TransactionManager) syncLogFromMemory() error {
t.close()
recMap := make(map[uint64]*Record)
for i, transList := range t.transList {
if transList == nil {
continue
}
// Add each record to the record map, decreasing the transaction count
// if the same record is listed twice.
for _, record := range transList {
_, ok := recMap[record.ID()]
if ok {
record.DecTransCount()
} else {
recMap[record.ID()] = record
}
}
t.transList[i] = nil
}
// Write the records from the record list to disk
if err := t.syncRecords(recMap, true); err != nil {
return err
}
t.owner.Sync()
return t.open()
}
/*
syncLogFromDisk syncs the log from disk and clears the memory transaction log.
This is used for the rollback operation.
*/
func (t *TransactionManager) syncLogFromDisk() error {
t.close()
for i, transList := range t.transList {
if transList == nil {
continue
}
// Discard all records which are held in memory
for _, record := range transList {
record.DecTransCount()
if !record.InTransaction() {
t.owner.releaseInTrans(record, false)
}
}
t.transList[i] = nil
}
if err := t.recover(); err != nil {
return err
}
return t.open()
}
/*
syncRecords writes a list of records to the pysical disk file.
*/
func (t *TransactionManager) syncRecords(records map[uint64]*Record, clearMemTransLog bool) error {
for _, record := range records {
if err := t.owner.writeRecord(record); err != nil {
return err
}
if clearMemTransLog {
record.DecTransCount()
if !record.InTransaction() {
t.owner.releaseInTrans(record, true)
}
}
}
return nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package storage
import (
"errors"
"fmt"
"devt.de/krotik/common/pools"
)
/*
BufferPool is a pool of byte buffers.
*/
var BufferPool = pools.NewByteBufferPool()
/*
Common storage manager related errors.
*/
var (
ErrSlotNotFound = errors.New("Slot not found")
ErrNotInCache = errors.New("No entry in cache")
)
/*
ManagerError is a storage manager related error.
*/
type ManagerError struct {
Type error
Detail string
Managername string
}
/*
NewStorageManagerError returns a new StorageManager specific error.
*/
func NewStorageManagerError(smeType error, smeDetail string, smeManagername string) *ManagerError {
return &ManagerError{smeType, smeDetail, smeManagername}
}
/*
Error returns a string representation of the error.
*/
func (e *ManagerError) Error() string {
return fmt.Sprintf("%s (%s - %s)", e.Type.Error(), e.Managername, e.Detail)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package storage
import (
"bytes"
"fmt"
"sync"
"devt.de/krotik/common/datautil"
"devt.de/krotik/eliasdb/storage/file"
)
/*
Special flags which cause the manager to return errors on specific function calls
*/
const (
AccessNotInCache = 1 // The address will not be accessible via FetchCached
AccessFetchError = 2 // The address will not be accessible via Fetch
AccessUpdateError = 3 // The address will not be accessible via Update
AccessFreeError = 4 // The address will not be accessible via Free
AccessInsertError = 5 // The address will not be accessible via Insert
AccessCacheAndFetchError = 6 // The address will not be accessible via FetchCached nor Fetch
AccessCacheAndFetchSeriousError = 7 // The address will not be accessible via FetchCached nor Fetch
)
/*
MsmRetClose nil or the error which should be returned by a Close call
*/
var MsmRetClose error
/*
MsmCallNumClose counter how often Close is called
*/
var MsmCallNumClose int
/*
MsmRetFlush nil or the error which should be returned by a Flush call
*/
var MsmRetFlush error
/*
MsmCallNumFlush counter how often Flush is called
*/
var MsmCallNumFlush int
/*
MsmRetRollback nil or the error which should be returned by a Rollback call
*/
var MsmRetRollback error
/*
MsmCallNumRollback counter how often Rollback is called
*/
var MsmCallNumRollback int
/*
MemoryStorageManager data structure
*/
type MemoryStorageManager struct {
name string // Name of the storage manager
Roots map[int]uint64 // Map of roots
Data map[uint64]interface{} // Map of data
mutex *sync.Mutex // Mutex to protect map operations
LocCount uint64 // Counter for locations - Must start > 0
AccessMap map[uint64]int // Special map to simulate access issues
}
/*
NewMemoryStorageManager creates a new MemoryStorageManager
*/
func NewMemoryStorageManager(name string) *MemoryStorageManager {
// LocCount must start > 0 - so they can be stored in a Root. Roots with
// the value 0 are considered empty. See also graph.getHTree which will
// keep creating new HTrees if a Root is 0.
return &MemoryStorageManager{name, make(map[int]uint64),
make(map[uint64]interface{}), &sync.Mutex{}, 1, make(map[uint64]int)}
}
/*
Name returns the name of the StorageManager instance.
*/
func (msm *MemoryStorageManager) Name() string {
return msm.name
}
/*
Root returns a root value. Default (empty) value is 0.
*/
func (msm *MemoryStorageManager) Root(root int) uint64 {
msm.mutex.Lock()
defer msm.mutex.Unlock()
return msm.Roots[root]
}
/*
SetRoot writes a root value.
*/
func (msm *MemoryStorageManager) SetRoot(root int, val uint64) {
msm.mutex.Lock()
defer msm.mutex.Unlock()
msm.Roots[root] = val
}
/*
Insert inserts an object and return its storage location.
*/
func (msm *MemoryStorageManager) Insert(o interface{}) (uint64, error) {
msm.mutex.Lock()
defer msm.mutex.Unlock()
if msm.AccessMap[msm.LocCount] == AccessInsertError {
return 0, file.NewStorageFileError(file.ErrAlreadyInUse, "", "<memory>")
}
loc := msm.LocCount
msm.LocCount++
msm.Data[loc] = o
return loc, nil
}
/*
Update updates a storage location.
*/
func (msm *MemoryStorageManager) Update(loc uint64, o interface{}) error {
msm.mutex.Lock()
defer msm.mutex.Unlock()
if msm.AccessMap[loc] == AccessUpdateError {
return NewStorageManagerError(ErrSlotNotFound, fmt.Sprint("Location:", loc), msm.Name())
}
msm.Data[loc] = o
return nil
}
/*
Free frees a storage location.
*/
func (msm *MemoryStorageManager) Free(loc uint64) error {
msm.mutex.Lock()
defer msm.mutex.Unlock()
if msm.AccessMap[loc] == AccessFreeError {
return NewStorageManagerError(ErrSlotNotFound, fmt.Sprint("Location:", loc), msm.Name())
}
delete(msm.Data, loc)
return nil
}
/*
Fetch fetches an object from a given storage location and writes it to
a given data container.
*/
func (msm *MemoryStorageManager) Fetch(loc uint64, o interface{}) error {
var err error
msm.mutex.Lock()
defer msm.mutex.Unlock()
if msm.AccessMap[loc] == AccessFetchError || msm.AccessMap[loc] == AccessCacheAndFetchError {
return NewStorageManagerError(ErrSlotNotFound, fmt.Sprint("Location:", loc), msm.Name())
} else if msm.AccessMap[loc] == AccessCacheAndFetchSeriousError {
return file.NewStorageFileError(file.ErrAlreadyInUse, "", "<memory>")
}
if obj, ok := msm.Data[loc]; ok {
err = datautil.CopyObject(obj, o)
} else {
err = NewStorageManagerError(ErrSlotNotFound, fmt.Sprint("Location:", loc), msm.Name())
}
return err
}
/*
FetchCached fetches an object from a cache and returns its reference.
Returns a storage.ErrNotInCache error if the entry is not in the cache.
*/
func (msm *MemoryStorageManager) FetchCached(loc uint64) (interface{}, error) {
msm.mutex.Lock()
defer msm.mutex.Unlock()
if msm.AccessMap[loc] == AccessNotInCache || msm.AccessMap[loc] == AccessCacheAndFetchError {
return nil, NewStorageManagerError(ErrNotInCache, "", msm.Name())
} else if msm.AccessMap[loc] == AccessCacheAndFetchSeriousError {
return nil, file.NewStorageFileError(file.ErrAlreadyInUse, "", "<memory>")
}
return msm.Data[loc], nil
}
/*
Flush writes all pending changes to disk.
*/
func (msm *MemoryStorageManager) Flush() error {
MsmCallNumFlush++
return MsmRetFlush
}
/*
Rollback cancels all pending changes which have not yet been written to disk.
*/
func (msm *MemoryStorageManager) Rollback() error {
MsmCallNumRollback++
return MsmRetRollback
}
/*
Close the StorageManager and write all pending changes to disk.
*/
func (msm *MemoryStorageManager) Close() error {
MsmCallNumClose++
return MsmRetClose
}
/*
Show a string representation of the storage manager.
*/
func (msm *MemoryStorageManager) String() string {
buf := new(bytes.Buffer)
buf.WriteString(fmt.Sprintf("MemoryStorageManager %v\n", msm.name))
for k, v := range msm.Data {
buf.WriteString(fmt.Sprintf("%v - %v\n", k, v))
}
return buf.String()
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package paging contains functions and constants necessary for paging of records.
NOTE: Operations in this code are expected to either fail completely or succeed.
Errors in the middle of an operation may leave the datastructures in an
inconsistent state.
PageCursor
PageCursor is a pointer into a PagedStorageFile and can be used to traverse
a linked list of pages (see also PagedStorageFileHeader which stores the
entry points).
PagedStorageFile
PagedStorageFile is a wrapper object for a StorageFile which views the file
records as a linked list of pages.
PagedStorageFileHeader
PagedStorageFileHeader is a wrapper object for the header record of a StorageFile.
The header record stores information about linked lists and root values.
*/
package paging
/*
PageCursor data structure
*/
type PageCursor struct {
psf *PagedStorageFile // Pager to be used
ptype int16 // Page type which will be traversed
current uint64 // Current page
}
/*
NewPageCursor creates a new cursor object which can be used to traverse a set of pages.
*/
func NewPageCursor(psf *PagedStorageFile, ptype int16, current uint64) *PageCursor {
return &PageCursor{psf, ptype, current}
}
/*
Current gets the page this cursor currently points at.
*/
func (pc *PageCursor) Current() uint64 {
return pc.current
}
/*
Next moves the PageCursor to the next page and returns it.
*/
func (pc *PageCursor) Next() (uint64, error) {
var page uint64
var err error
if pc.current == 0 {
page = pc.psf.First(pc.ptype)
} else {
page, err = pc.psf.Next(pc.current)
if err != nil {
return 0, err
}
}
if page != 0 {
pc.current = page
}
return page, nil
}
/*
Prev moves the PageCursor to the previous page and returns it.
*/
func (pc *PageCursor) Prev() (uint64, error) {
if pc.current == 0 {
return 0, nil
}
page, err := pc.psf.Prev(pc.current)
if err != nil {
return 0, err
}
if page != 0 {
pc.current = page
}
return page, nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package paging
import (
"errors"
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging/view"
)
/*
Common paged storage file related errors
*/
var (
ErrFreePage = errors.New("Cannot allocate/free a free page")
ErrHeader = errors.New("Cannot modify header record")
)
/*
PagedStorageFile data structure
*/
type PagedStorageFile struct {
storagefile *file.StorageFile // StorageFile which is wrapped
header *PagedStorageFileHeader // Header object
}
/*
NewPagedStorageFile wraps a given StorageFile and returns a PagedStorageFile.
*/
func NewPagedStorageFile(storagefile *file.StorageFile) (*PagedStorageFile, error) {
var header *PagedStorageFileHeader
record, err := storagefile.Get(0)
if err != nil {
return nil, err
}
// Check if this is a new header record or not
isnew := record.ReadInt16(0) == 0
header = NewPagedStorageFileHeader(record, isnew)
return &PagedStorageFile{storagefile, header}, nil
}
/*
StorageFile returns the wrapped StorageFile.
*/
func (psf *PagedStorageFile) StorageFile() *file.StorageFile {
return psf.storagefile
}
/*
Header returns the header object of this PagedStorageFile.
*/
func (psf *PagedStorageFile) Header() *PagedStorageFileHeader {
return psf.header
}
/*
AllocatePage allocates a new page of a specific type.
*/
func (psf *PagedStorageFile) AllocatePage(pagetype int16) (uint64, error) {
var record *file.Record
var err error
if pagetype == view.TypeFreePage {
return 0, ErrFreePage
}
// Check first the free list
ptr := psf.header.FirstListElement(view.TypeFreePage)
isnew := ptr == 0
if !isnew {
// If there is something on the free list set the pointer
// for the first item to the second item. The first item
// becomes our newly allocated element.
nextptr, err := psf.Next(ptr)
if err != nil {
return 0, err
}
// Get the record - error checking already done in the
// previous psf.Next call
record, _ = psf.storagefile.Get(ptr)
psf.header.SetFirstListElement(view.TypeFreePage, nextptr)
} else {
// Need to create a new rcord
ptr = psf.header.LastListElement(view.TypeFreePage)
if ptr == 0 {
// If the file is new the first pointer is 1
ptr = 1
}
// Get the record - if it fails we need to return before
// increasing the last list element pointer
record, err = psf.storagefile.Get(ptr)
if err != nil {
return 0, err
}
// The last list element pointer is used to point to the next free record
// it is not actuallz the last element of the list.
psf.header.SetLastListElement(view.TypeFreePage, ptr+1)
}
// Set the view data on the record
var pageview *view.PageView
// Add a temp. page view so we can modify the record
if isnew {
pageview = view.NewPageView(record, pagetype)
} else {
pageview = view.GetPageView(record)
}
oldtail := psf.header.LastListElement(pagetype)
record.ClearData()
pageview.SetType(pagetype)
pageview.SetPrevPage(oldtail)
pageview.SetNextPage(0)
// Check if this page was the first of its type
if oldtail == 0 {
psf.header.SetFirstListElement(pagetype, ptr)
}
// New allocated record is now the new last element
psf.header.SetLastListElement(pagetype, ptr)
// We can release the record now
psf.storagefile.ReleaseInUse(record)
// Need to fix up the pointer of the former previous element
if oldtail != 0 {
record, err = psf.storagefile.Get(oldtail)
if err != nil {
return 0, err
}
pageview = view.GetPageView(record)
pageview.SetNextPage(ptr)
psf.storagefile.ReleaseInUse(record)
}
// Remove temp. page view
record.SetPageView(nil)
return ptr, nil
}
/*
FreePage frees a given page and adds it to the free list.
*/
func (psf *PagedStorageFile) FreePage(id uint64) error {
if id == 0 {
return ErrHeader
}
record, err := psf.storagefile.Get(id)
if err != nil {
return err
}
pageview := view.GetPageView(record)
pagetype := pageview.Type()
if pagetype == view.TypeFreePage {
psf.storagefile.ReleaseInUse(record)
return ErrFreePage
}
prev := pageview.PrevPage()
next := pageview.NextPage()
// Put the page to the front of the free list
pageview.SetType(view.TypeFreePage)
pageview.SetNextPage(psf.header.FirstListElement(view.TypeFreePage))
pageview.SetPrevPage(0)
psf.header.SetFirstListElement(view.TypeFreePage, id)
// NOTE The prev pointers will always point to 0 for records in the
// free list. There is no need to update them.
psf.storagefile.ReleaseInUse(record)
// Remove page from its old list - an error in the below leaves
// the lists in an inconsistent state.
if prev != 0 {
record, err = psf.storagefile.Get(prev)
if err != nil {
return err
}
pageview := view.GetPageView(record)
pageview.SetNextPage(next)
psf.storagefile.ReleaseInUse(record)
} else {
psf.header.SetFirstListElement(pagetype, next)
}
if next != 0 {
record, err = psf.storagefile.Get(next)
if err != nil {
return err
}
pageview := view.GetPageView(record)
pageview.SetPrevPage(prev)
psf.storagefile.ReleaseInUse(record)
} else {
psf.header.SetLastListElement(pagetype, prev)
}
return nil
}
/*
First returns the first page of a list of a given type.
*/
func (psf *PagedStorageFile) First(pagetype int16) uint64 {
return psf.header.FirstListElement(pagetype)
}
/*
Last returns the first page of a list of a given type.
*/
func (psf *PagedStorageFile) Last(pagetype int16) uint64 {
return psf.header.LastListElement(pagetype)
}
/*
Next returns the next page of a given page in a list.
*/
func (psf *PagedStorageFile) Next(id uint64) (uint64, error) {
record, err := psf.storagefile.Get(id)
if err != nil {
return 0, err
}
defer psf.storagefile.ReleaseInUse(record)
pageview := view.GetPageView(record)
return pageview.NextPage(), nil
}
/*
Prev returns the previous page of a given page in a list.
*/
func (psf *PagedStorageFile) Prev(id uint64) (uint64, error) {
record, err := psf.storagefile.Get(id)
if err != nil {
return 0, err
}
defer psf.storagefile.ReleaseInUse(record)
pageview := view.GetPageView(record)
return pageview.PrevPage(), nil
}
/*
Flush writes all pending data to disk.
*/
func (psf *PagedStorageFile) Flush() error {
psf.storagefile.ReleaseInUse(psf.header.record)
if err := psf.storagefile.Flush(); err != nil {
// If an error happens try to recover by putting
// the header record back in use
psf.storagefile.Get(0)
return err
}
// No particular error checking for Get operation as
// it should succeed if the previous Flush was successful.
record, _ := psf.storagefile.Get(0)
psf.header = NewPagedStorageFileHeader(record, false)
return nil
}
/*
Rollback discards all changes which were done after the last flush.
The PageStorageFile object should be discarded if something
goes wrong during a rollback operation.
*/
func (psf *PagedStorageFile) Rollback() error {
psf.storagefile.Discard(psf.header.record)
if err := psf.storagefile.Rollback(); err != nil {
// If there is a problem try to get the header record back
// otherwise close operations may fail later
psf.header.record, _ = psf.storagefile.Get(0)
return err
}
// No particular error checking for Get operation as
// it should succeed if the previous Rollback was successful.
record, _ := psf.storagefile.Get(0)
psf.header = NewPagedStorageFileHeader(record, record.ReadInt16(0) == 0)
return nil
}
/*
Close commits all data and closes all physical files.
*/
func (psf *PagedStorageFile) Close() error {
if psf.header != nil {
psf.storagefile.ReleaseInUse(psf.header.record)
psf.header = nil
}
if err := psf.storagefile.Close(); err != nil {
return err
}
psf.storagefile = nil
return nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package paging
import "devt.de/krotik/eliasdb/storage/file"
/*
PageHeader is the magic number to identify page headers
*/
const PageHeader = 0x1980
/*
TotalLists is the number of lists which can be stored in this header
*/
const TotalLists = 5
/*
OffsetLists is the offset for list entries in this header
*/
const OffsetLists = 2
/*
OffsetRoots is the number of lists which can be stored in this header
*/
const OffsetRoots = OffsetLists + (2 * TotalLists * file.SizeLong)
/*
PagedStorageFileHeader data structure
*/
type PagedStorageFileHeader struct {
record *file.Record // Record which is being used for the header information
totalRoots int // Number of root values which can be stored
}
/*
NewPagedStorageFileHeader creates a new NewPagedStorageFileHeader.
*/
func NewPagedStorageFileHeader(record *file.Record, isnew bool) *PagedStorageFileHeader {
totalRoots := (len(record.Data()) - OffsetRoots) / file.SizeLong
if totalRoots < 1 {
panic("Cannot store any roots - record is too small")
}
ret := &PagedStorageFileHeader{record, totalRoots}
if isnew {
record.WriteUInt16(0, PageHeader)
} else {
ret.CheckMagic()
}
return ret
}
/*
CheckMagic checks the header magic value of this header.
*/
func (psfh *PagedStorageFileHeader) CheckMagic() {
if psfh.record.ReadUInt16(0) != PageHeader {
panic("Unexpected header found in PagedStorageFileHeader")
}
}
/*
Roots returns the number of possible root values which can be set.
*/
func (psfh *PagedStorageFileHeader) Roots() int {
return psfh.totalRoots
}
/*
Root returns a root value.
*/
func (psfh *PagedStorageFileHeader) Root(root int) uint64 {
return psfh.record.ReadUInt64(offsetRoot(root))
}
/*
SetRoot sets a root value.
*/
func (psfh *PagedStorageFileHeader) SetRoot(root int, val uint64) {
psfh.record.WriteUInt64(offsetRoot(root), val)
}
/*
offsetRoot calculates the offset of a root in the header record.
*/
func offsetRoot(root int) int {
return OffsetRoots + root*file.SizeLong
}
/*
FirstListElement returns the first element of a list.
*/
func (psfh *PagedStorageFileHeader) FirstListElement(list int16) uint64 {
return psfh.record.ReadUInt64(offsetFirstListElement(list))
}
/*
SetFirstListElement sets the first element of a list.
*/
func (psfh *PagedStorageFileHeader) SetFirstListElement(list int16, val uint64) {
psfh.record.WriteUInt64(offsetFirstListElement(list), val)
}
/*
LastListElement returns the last element of a list.
*/
func (psfh *PagedStorageFileHeader) LastListElement(list int16) uint64 {
return psfh.record.ReadUInt64(offsetLastListElement(list))
}
/*
SetLastListElement sets the last element of a list.
*/
func (psfh *PagedStorageFileHeader) SetLastListElement(list int16, val uint64) {
psfh.record.WriteUInt64(offsetLastListElement(list), val)
}
/*
offsetFirstListElement returns offset of the first element of a list.
*/
func offsetFirstListElement(list int16) int {
return OffsetLists + 2*file.SizeLong*int(list)
}
/*
offsetLastListElement returns offset of the last element of a list.
*/
func offsetLastListElement(list int16) int {
return offsetFirstListElement(list) + file.SizeLong
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package paging
/*
CountPages counts the number of pages of a certain type of a given PagedStorageFile.
*/
func CountPages(pager *PagedStorageFile, pagetype int16) (int, error) {
var err error
cursor := NewPageCursor(pager, pagetype, 0)
page, _ := cursor.Next()
counter := 0
for page != 0 {
counter++
page, err = cursor.Next()
if err != nil {
return -1, err
}
}
return counter, nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package view contains general page view constants and functions.
PageView is the super-struct for all page views. A page view is special object
attached to a particular StorageFile record. A view provides specialised
functions for the record it is attached to.
Use GetPageView() if the record has already view information stored on it or
NewPageView() to initialise or reassign a given record.
*/
package view
import (
"fmt"
"devt.de/krotik/eliasdb/storage/file"
)
/*
ViewPageHeader is the header magic number to identify view pages
*/
const ViewPageHeader = 0x1990
/*
OffsetNextPage is the offset for next page id
*/
const OffsetNextPage = file.SizeShort
/*
OffsetPrevPage is the offset for previous page id
*/
const OffsetPrevPage = OffsetNextPage + file.SizeLong
/*
OffsetData is the offset for page specific data
*/
const OffsetData = OffsetPrevPage + file.SizeLong
/*
PageView data structure
*/
type PageView struct {
Record *file.Record // Record which is wrapped by the PageView
}
/*
GetPageView returns the page view of a given record.
*/
func GetPageView(record *file.Record) *PageView {
rpv := record.PageView()
pv, ok := rpv.(*PageView)
if ok {
return pv
}
pv = &PageView{record}
pv.checkMagic()
record.SetPageView(pv)
return pv
}
/*
NewPageView creates a new page view for a given record.
*/
func NewPageView(record *file.Record, pagetype int16) *PageView {
pv := &PageView{record}
record.SetPageView(pv)
pv.SetType(pagetype)
return pv
}
/*
Type gets the type of this page view which is stored on the record.
*/
func (pv *PageView) Type() int16 {
return pv.Record.ReadInt16(0) - ViewPageHeader
}
/*
SetType sets the type of this page view which is stored on the record.
*/
func (pv *PageView) SetType(pagetype int16) {
pv.Record.WriteInt16(0, ViewPageHeader+pagetype)
}
/*
checkMagic checks if the magic number at the beginning of the wrapped record
is valid.
*/
func (pv *PageView) checkMagic() bool {
magic := pv.Record.ReadInt16(0)
if magic >= ViewPageHeader &&
magic <= ViewPageHeader+TypeFreePhysicalSlotPage {
return true
}
panic("Unexpected header found in PageView")
}
/*
NextPage returns the id of the next page.
*/
func (pv *PageView) NextPage() uint64 {
pv.checkMagic()
return pv.Record.ReadUInt64(OffsetNextPage)
}
/*
SetNextPage sets the id of the next page.
*/
func (pv *PageView) SetNextPage(val uint64) {
pv.checkMagic()
pv.Record.WriteUInt64(OffsetNextPage, val)
}
/*
PrevPage returns the id of the previous page.
*/
func (pv *PageView) PrevPage() uint64 {
pv.checkMagic()
return pv.Record.ReadUInt64(OffsetPrevPage)
}
/*
SetPrevPage sets the id of the previous page.
*/
func (pv *PageView) SetPrevPage(val uint64) {
pv.checkMagic()
pv.Record.WriteUInt64(OffsetPrevPage, val)
}
func (pv *PageView) String() string {
return fmt.Sprintf("PageView: %v (type:%v previous page:%v next page:%v)",
pv.Record.ID(), pv.Type(), pv.PrevPage(), pv.NextPage())
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package slotting contains managers which deal with slots on pages.
FreeLogicalSlotManager
FreeLogicalSlotManager is a list manager for free logical slots. This manager
object is used by the LogicalSlotManager.
FreePhysicalSlotManager
FreePhysicalSlotManager is a list manager for free physical slots. This manager
object is used by the PhysicalSlotManager.
LogicalSlotManager
LogicalSlotManager is a list manager for logical slots. Logical slots are stored
on translation pages which store just pointers to physical slots.
PhysicalSlotManager
PhysicalSlotManager is a list manager for physical slots.
*/
package slotting
import (
"fmt"
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging"
"devt.de/krotik/eliasdb/storage/paging/view"
"devt.de/krotik/eliasdb/storage/slotting/pageview"
"devt.de/krotik/eliasdb/storage/util"
)
/*
FreeLogicalSlotManager data structure
*/
type FreeLogicalSlotManager struct {
storagefile *file.StorageFile // StorageFile which is wrapped
pager *paging.PagedStorageFile // Pager for StorageFile
slots []uint64 // List of free slots
}
/*
NewFreeLogicalSlotManager creates a new object to manage free logical slots.
*/
func NewFreeLogicalSlotManager(psf *paging.PagedStorageFile) *FreeLogicalSlotManager {
return &FreeLogicalSlotManager{psf.StorageFile(), psf, make([]uint64, 0)}
}
/*
Get gets a free slot.
*/
func (flsm *FreeLogicalSlotManager) Get() (uint64, error) {
// Try to get entry from the slots list
if len(flsm.slots) > 0 {
freeSlot := flsm.slots[len(flsm.slots)-1]
flsm.slots = flsm.slots[:len(flsm.slots)-1]
return freeSlot, nil
}
cursor := paging.NewPageCursor(flsm.pager, view.TypeFreeLogicalSlotPage, 0)
// No need for error checking on cursor next since all pages will be opened
// via Get calls in the loop.
page, _ := cursor.Next()
for page != 0 {
record, err := flsm.storagefile.Get(page)
if err != nil {
return 0, err
}
flsp := pageview.NewFreeLogicalSlotPage(record)
slot := flsp.FirstAllocatedSlotInfo()
if slot != -1 {
// Return a found slot and free the free page if necessary
loc := flsp.SlotInfoLocation(uint16(slot))
// Release the slot
flsp.ReleaseSlotInfo(uint16(slot))
if flsp.FreeSlotCount() == 0 {
// Free the page if no free row id slot is left
flsm.storagefile.ReleaseInUseID(page, false)
flsm.pager.FreePage(page)
} else {
flsm.storagefile.ReleaseInUseID(page, true)
}
return loc, nil
}
flsm.storagefile.ReleaseInUseID(page, false)
page, _ = cursor.Next()
}
return 0, nil
}
/*
Add adds a slot to the free slot set.
*/
func (flsm *FreeLogicalSlotManager) Add(loc uint64) {
if loc == 0 {
// The bit pattern for the 0 location is used to mark free slots
panic("Illigal free logical slot pattern: 0x0")
}
flsm.slots = append(flsm.slots, loc)
}
/*
Flush writes all added slotinfos to FreeLogicalSlotPages.
*/
func (flsm *FreeLogicalSlotManager) Flush() error {
cursor := paging.NewPageCursor(flsm.pager, view.TypeFreeLogicalSlotPage, 0)
index := 0
// Go through all free logical slot pages
// No need for error checking on cursor next since all pages will be opened
// via Get calls in the loop.
page, _ := cursor.Next()
for page != 0 {
// Need to declare err here otherwise index becomes a local for
// the "for" block
var err error
index, err = flsm.doFlush(page, index)
if err != nil {
return err
}
if index >= len(flsm.slots) {
break
}
page, _ = cursor.Next()
}
// Allocate new free logical slot pages if all present ones are full
// and we have still slots to process
for index < len(flsm.slots) {
allocPage, err := flsm.pager.AllocatePage(view.TypeFreeLogicalSlotPage)
if err != nil {
return err
}
index, err = flsm.doFlush(allocPage, index)
if err != nil {
// Try to free the allocated page if there was an error
// ignore any error of the FreePage call
flsm.pager.FreePage(allocPage)
return err
}
}
// Clear lists after all slots information have been written
flsm.slots = make([]uint64, 0)
return nil
}
/*
doFlush writes all added slotinfos to a FreeLogicalSlotPage. Stop if the page is full.
*/
func (flsm *FreeLogicalSlotManager) doFlush(page uint64, index int) (int, error) {
r, err := flsm.storagefile.Get(page)
if err != nil {
return index, err
}
flsp := pageview.NewFreeLogicalSlotPage(r)
// Iterate all page slots (stop if the page has no more available slots
// or we reached the end of the page)
slot := flsp.FirstFreeSlotInfo()
for ; slot != -1 && index < len(flsm.slots); index++ {
loc := flsm.slots[index]
offset := flsp.AllocateSlotInfo(uint16(slot))
flsp.SetSlotInfo(offset, util.LocationRecord(loc), util.LocationOffset(loc))
slot = flsp.FirstFreeSlotInfo()
}
flsm.storagefile.ReleaseInUseID(page, true)
return index, nil
}
/*
Returns a string representation of this FreeLogicalSlotManager.
*/
func (flsm *FreeLogicalSlotManager) String() string {
return fmt.Sprintf("FreeLogicalSlotManager: %v\nIds :%v\n",
flsm.storagefile.Name(), flsm.slots)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package slotting
import (
"fmt"
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging"
"devt.de/krotik/eliasdb/storage/paging/view"
"devt.de/krotik/eliasdb/storage/slotting/pageview"
"devt.de/krotik/eliasdb/storage/util"
)
/*
FreePhysicalSlotManager data structure
*/
type FreePhysicalSlotManager struct {
storagefile *file.StorageFile // StorageFile which is wrapped
pager *paging.PagedStorageFile // Pager for StorageFile
onlyAppend bool // Flag for append-only mode
lastMaxSlotSize int // Last max slot size
slots []uint64 // List of free slots
sizes []uint32 // List of free slot sizes
}
/*
NewFreePhysicalSlotManager creates a new object to manage free physical slots.
*/
func NewFreePhysicalSlotManager(psf *paging.PagedStorageFile, onlyAppend bool) *FreePhysicalSlotManager {
return &FreePhysicalSlotManager{psf.StorageFile(), psf, onlyAppend, 0,
make([]uint64, 0), make([]uint32, 0)}
}
/*
Get searches for a free location with the given size.
*/
func (fpsm *FreePhysicalSlotManager) Get(size uint32) (uint64, error) {
// Return always nothing found if we are in only-append mode
if fpsm.onlyAppend {
return 0, nil
}
// Return nothing if all previous found pages were too small
if fpsm.lastMaxSlotSize != 0 && int(size) > fpsm.lastMaxSlotSize {
return 0, nil
}
cursor := paging.NewPageCursor(fpsm.pager, view.TypeFreePhysicalSlotPage, 0)
// No need for error checking on cursor next since all pages will be opened
// via Get calls in the loop.
page, _ := cursor.Next()
for page != 0 {
record, err := fpsm.storagefile.Get(page)
if err != nil {
// Reset the lastMaxSlotSize since we didn't visit all
// FreePhysicalSlotPages
fpsm.lastMaxSlotSize = 0
return 0, err
}
fpsp := pageview.NewFreePhysicalSlotPage(record)
slot := fpsp.FindSlot(size)
// If a slot was found (Important: a slot can be >= 0)
if slot >= 0 {
// Return a found slot and free the free page if necessary
fpsm.lastMaxSlotSize = 0
loc := fpsp.SlotInfoLocation(uint16(slot))
// Release slot
fpsp.ReleaseSlotInfo(uint16(slot))
if fpsp.FreeSlotCount() == 0 {
// Free the page if no free slot is stored
fpsm.storagefile.ReleaseInUseID(page, false)
fpsm.pager.FreePage(page)
} else {
fpsm.storagefile.ReleaseInUseID(page, false)
}
return loc, nil
}
if fpsm.lastMaxSlotSize < -slot {
fpsm.lastMaxSlotSize = -slot
}
fpsm.storagefile.ReleaseInUseID(page, false)
page, _ = cursor.Next()
}
return 0, nil
}
/*
Add adds a slotinfo to the free slot set.
*/
func (fpsm *FreePhysicalSlotManager) Add(loc uint64, size uint32) {
if size > 0 {
fpsm.slots = append(fpsm.slots, loc)
fpsm.sizes = append(fpsm.sizes, size)
}
}
/*
Flush writes all added slotinfos to FreePhysicalSlotPages.
*/
func (fpsm *FreePhysicalSlotManager) Flush() error {
cursor := paging.NewPageCursor(fpsm.pager, view.TypeFreePhysicalSlotPage, 0)
index := 0
// Go through all free physical row ID pages
// No need for error checking on cursor next since all pages will be opened
// via Get calls in the loop.
page, _ := cursor.Next()
for page != 0 {
// Need to declare err here otherwise index becomes a local for
// the "for" block
var err error
index, err = fpsm.doFlush(page, index)
if err != nil {
return err
}
if index >= len(fpsm.slots) {
break
}
page, _ = cursor.Next()
}
// Allocate new free physical slot pages if all present ones are full
// and we have still slots to process
for index < len(fpsm.slots) {
allocPage, err := fpsm.pager.AllocatePage(view.TypeFreePhysicalSlotPage)
if err != nil {
return err
}
index, err = fpsm.doFlush(allocPage, index)
if err != nil {
// Try to free the allocated page if there was an error
// ignore any error of the FreePage call
fpsm.pager.FreePage(allocPage)
return err
}
}
// Clear lists after all slots information have been written
fpsm.slots = make([]uint64, 0)
fpsm.sizes = make([]uint32, 0)
return nil
}
/*
doFlush writes all added slotinfos to a FreePhysicalSlotPage. Stop if the page is full.
*/
func (fpsm *FreePhysicalSlotManager) doFlush(page uint64, index int) (int, error) {
r, err := fpsm.storagefile.Get(page)
if err != nil {
return index, err
}
fpsp := pageview.NewFreePhysicalSlotPage(r)
// Iterate all page slots (stop if the page has no more available slots
// or we reached the end of the page)
slot := fpsp.FirstFreeSlotInfo()
for ; slot != -1 && index < len(fpsm.slots); index++ {
loc := fpsm.slots[index]
size := fpsm.sizes[index]
if size > 0 {
offset := fpsp.AllocateSlotInfo(uint16(slot))
fpsp.SetSlotInfo(offset, util.LocationRecord(loc), util.LocationOffset(loc))
fpsp.SetFreeSlotSize(offset, size)
slot = fpsp.FirstFreeSlotInfo()
}
}
fpsm.storagefile.ReleaseInUseID(page, true)
return index, nil
}
/*
String returns a string representation of this FreePhysicalSlotManager.
*/
func (fpsm *FreePhysicalSlotManager) String() string {
return fmt.Sprintf("FreePhysicalSlotManager: %v (onlyAppend:%v lastMaxSlotSize:%v)\nIds :%v\nSizes:%v",
fpsm.storagefile.Name(), fpsm.onlyAppend, fpsm.lastMaxSlotSize, fpsm.slots, fpsm.sizes)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package slotting
import (
"fmt"
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging"
"devt.de/krotik/eliasdb/storage/paging/view"
"devt.de/krotik/eliasdb/storage/slotting/pageview"
"devt.de/krotik/eliasdb/storage/util"
)
/*
LogicalSlotManager data structure
*/
type LogicalSlotManager struct {
storagefile *file.StorageFile // StorageFile which is wrapped
pager *paging.PagedStorageFile // Pager for StorageFile
freeManager *FreeLogicalSlotManager // Manager for free slots
recordSize uint32 // Size of records
elementsPerPage uint16 // Available elements per page
}
/*
NewLogicalSlotManager creates a new object to manage logical slots. This
factory function requires two PagedStorageFiles the first will hold the actual
logical slots, the second is used to manage free logical slots.
*/
func NewLogicalSlotManager(lsf *paging.PagedStorageFile,
flsf *paging.PagedStorageFile) *LogicalSlotManager {
sf := lsf.StorageFile()
freeManager := NewFreeLogicalSlotManager(flsf)
recordSize := sf.RecordSize()
return &LogicalSlotManager{sf, lsf, freeManager,
recordSize, uint16((recordSize - pageview.OffsetTransData) / util.LocationSize)}
}
/*
ElementsPerPage returns the available elements per page.
*/
func (lsm *LogicalSlotManager) ElementsPerPage() uint16 {
return lsm.elementsPerPage
}
/*
Insert inserts a given physical slot info and returns a logical slot for it.
*/
func (lsm *LogicalSlotManager) Insert(location uint64) (uint64, error) {
// Try to get a free slot from the FreeLogicalSlotManager
slot, err := lsm.freeManager.Get()
if err != nil {
return 0, err
}
if slot == 0 {
// Allocate a new page and give all its rows to the free manager
allocPage, err := lsm.pager.AllocatePage(view.TypeTranslationPage)
if err != nil {
return 0, err
}
offset := uint16(pageview.OffsetTransData)
var i uint16
for i = 0; i < lsm.elementsPerPage; i++ {
lsm.freeManager.Add(util.PackLocation(allocPage, offset))
offset += util.LocationSize
}
err = lsm.Flush()
if err != nil {
// Try to clean up if something goes wrong
// Make the freeManager forget that he received anything
lsm.freeManager.slots = make([]uint64, 0)
// Free the allocated page again
lsm.pager.FreePage(allocPage)
return 0, err
}
// Now get the first slot from the newly allocated page - no need for
// error checking since we just flushed the page and all is well
slot, _ = lsm.freeManager.Get()
}
// Write physical slot data to translation page
return slot, lsm.Update(slot, location)
}
/*
ForceInsert inserts a given physical slot info at a given logical slot.
*/
func (lsm *LogicalSlotManager) ForceInsert(logicalSlot uint64, location uint64) error {
page := lsm.pager.Last(view.TypeTranslationPage)
targetPage := util.LocationRecord(logicalSlot)
// If the target page hasn't been allocated yet then create new pages
// until the target page is available and we can force insert into the
// requested slot
for page < targetPage {
var err error
page, err = lsm.pager.AllocatePage(view.TypeTranslationPage)
if err != nil {
return err
}
}
slot, err := lsm.Fetch(logicalSlot)
if err != nil {
return err
}
if slot != 0 {
panic(fmt.Sprintf("Cannot force insert into slot %v because it already exists",
logicalSlot))
}
return lsm.Update(logicalSlot, location)
}
/*
Update updates a given logical slot with a physical slot info.
*/
func (lsm *LogicalSlotManager) Update(logicalSlot uint64, location uint64) error {
recordID := util.LocationRecord(logicalSlot)
record, err := lsm.storagefile.Get(recordID)
if err != nil {
return err
}
page := pageview.NewTransPage(record)
page.SetSlotInfo(util.LocationOffset(logicalSlot), util.LocationRecord(location),
util.LocationOffset(location))
lsm.storagefile.ReleaseInUseID(recordID, true)
return nil
}
/*
Free frees a given logical slot. The given slot is given to the FreeLogicalSlotManager.
*/
func (lsm *LogicalSlotManager) Free(logicalSlot uint64) error {
recordID := util.LocationRecord(logicalSlot)
record, err := lsm.storagefile.Get(recordID)
if err != nil {
return err
}
page := pageview.NewTransPage(record)
page.SetSlotInfo(util.LocationOffset(logicalSlot), util.LocationRecord(0),
util.LocationOffset(0))
return lsm.storagefile.ReleaseInUseID(recordID, true)
}
/*
Fetch looks up a physical slot using a given logical slot.
*/
func (lsm *LogicalSlotManager) Fetch(logicalSlot uint64) (uint64, error) {
recordID := util.LocationRecord(logicalSlot)
offset := util.LocationOffset(logicalSlot)
if lastPage := lsm.pager.Last(view.TypeTranslationPage); lastPage < recordID {
// Return if the requested page doesn't exist yet
return 0, nil
}
record, err := lsm.storagefile.Get(recordID)
if err != nil {
return 0, err
}
page := pageview.NewTransPage(record)
slot := util.PackLocation(page.SlotInfoRecord(offset), page.SlotInfoOffset(offset))
lsm.storagefile.ReleaseInUseID(recordID, false)
return slot, nil
}
/*
Flush writes all pending changes.
*/
func (lsm *LogicalSlotManager) Flush() error {
return lsm.freeManager.Flush()
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package pageview contains object wrappers for different page types.
DataPage
DataPage is a page which holds actual data.
FreeLogicalSlotPage
FreeLogicalSlotPage is a page which holds information about free logical slots.
The page stores the slot location in a slotinfo data structure.
FreePhysicalSlotPage
FreePhysicalSlotPage is a page which holds information about free physical slots.
The page stores the slot location and its size in a slotinfo data structure
(see util/slotsize.go).
SlotInfoPage
SlotInfoPage is the super-struct for all page views which manage slotinfos.
Slotinfo are location (see util/location.go) pointers into the data store containing
record id and offset.
TransPage
TransPage is a page which holds data to translate between physical and logical
slots.
*/
package pageview
import (
"fmt"
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging/view"
)
/*
OffsetFirst is a pointer to first element on the page
*/
const OffsetFirst = view.OffsetData
// OffsetData is declared in freephysicalslotpage
/*
DataPage data structure
*/
type DataPage struct {
*SlotInfoPage
}
/*
NewDataPage creates a new page which holds actual data.
*/
func NewDataPage(record *file.Record) *DataPage {
checkDataPageMagic(record)
dp := &DataPage{NewSlotInfoPage(record)}
return dp
}
/*
checkDataPageMagic checks if the magic number at the beginning of
the wrapped record is valid.
*/
func checkDataPageMagic(record *file.Record) bool {
magic := record.ReadInt16(0)
if magic == view.ViewPageHeader+view.TypeDataPage {
return true
}
panic("Unexpected header found in DataPage")
}
/*
DataSpace returns the available data space on this page.
*/
func (dp *DataPage) DataSpace() uint16 {
return uint16(len(dp.Record.Data()) - OffsetData)
}
/*
OffsetFirst returns the pointer to the first element on the page.
*/
func (dp *DataPage) OffsetFirst() uint16 {
return dp.Record.ReadUInt16(OffsetFirst)
}
/*
SetOffsetFirst sets the pointer to the first element on the page.
*/
func (dp *DataPage) SetOffsetFirst(first uint16) {
if first > 0 && first < OffsetData {
panic(fmt.Sprint("Cannot set offset of first element on DataPage below ", OffsetData))
}
dp.Record.WriteUInt16(OffsetFirst, first)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package pageview
import (
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging/view"
"devt.de/krotik/eliasdb/storage/util"
)
// OFFSET_COUNT / OFFSET_DATA declared in freephysicalslotpage
/*
FreeLogicalSlotPage data structure
*/
type FreeLogicalSlotPage struct {
*SlotInfoPage
maxSlots uint16 // Max number of slots
prevFoundFreeSlot uint16 // Previous found free slot
prevFoundAllocatedSlot uint16 // Previous allocated slot
}
/*
NewFreeLogicalSlotPage creates a new page which can manage free slots.
*/
func NewFreeLogicalSlotPage(record *file.Record) *FreeLogicalSlotPage {
checkFreeLogicalSlotPageMagic(record)
maxSlots := (len(record.Data()) - OffsetData) / util.LocationSize
return &FreeLogicalSlotPage{NewSlotInfoPage(record), uint16(maxSlots), 0, 0}
}
/*
checkFreeLogicalSlotPageMagic checks if the magic number at the beginning of
the wrapped record is valid.
*/
func checkFreeLogicalSlotPageMagic(record *file.Record) bool {
magic := record.ReadInt16(0)
if magic == view.ViewPageHeader+view.TypeFreeLogicalSlotPage {
return true
}
panic("Unexpected header found in FreeLogicalSlotPage")
}
/*
MaxSlots returns the maximum number of slots which can be allocated.
*/
func (flsp *FreeLogicalSlotPage) MaxSlots() uint16 {
return flsp.maxSlots
}
/*
FreeSlotCount returns the number of free slots on this page.
*/
func (flsp *FreeLogicalSlotPage) FreeSlotCount() uint16 {
return flsp.Record.ReadUInt16(OffsetCount)
}
/*
SlotInfoLocation returns contents of a stored slotinfo as a location. Lookup is via a
given slotinfo id.
*/
func (flsp *FreeLogicalSlotPage) SlotInfoLocation(slotinfo uint16) uint64 {
offset := flsp.slotinfoToOffset(slotinfo)
return util.PackLocation(flsp.SlotInfoRecord(offset), flsp.SlotInfoOffset(offset))
}
/*
AllocateSlotInfo allocates a place for a slotinfo and returns the offset for it.
*/
func (flsp *FreeLogicalSlotPage) AllocateSlotInfo(slotinfo uint16) uint16 {
offset := flsp.slotinfoToOffset(slotinfo)
// Set slotinfo to initial values
flsp.SetSlotInfo(offset, 1, 1)
// Increase counter for allocated slotinfos
flsp.Record.WriteUInt16(OffsetCount, flsp.FreeSlotCount()+1)
// Update prevFoundAllocatedSlot if necessary
if slotinfo < flsp.prevFoundAllocatedSlot {
flsp.prevFoundAllocatedSlot = slotinfo
}
return offset
}
/*
ReleaseSlotInfo releases a place for a slotinfo and return its offset.
*/
func (flsp *FreeLogicalSlotPage) ReleaseSlotInfo(slotinfo uint16) uint16 {
offset := flsp.slotinfoToOffset(slotinfo)
// Set slotinfo to empty values
flsp.SetSlotInfo(offset, 0, 0)
// Decrease counter for allocated slotinfos
flsp.Record.WriteUInt16(OffsetCount, flsp.FreeSlotCount()-1)
// Update prevFoundFreeSlot if necessary
if slotinfo < flsp.prevFoundFreeSlot {
flsp.prevFoundFreeSlot = slotinfo
}
return offset
}
/*
FirstFreeSlotInfo returns the id for the first available slotinfo or -1 if
nothing is available.
*/
func (flsp *FreeLogicalSlotPage) FirstFreeSlotInfo() int {
for flsp.prevFoundFreeSlot < flsp.maxSlots {
if !flsp.isAllocatedSlot(flsp.prevFoundFreeSlot) {
return int(flsp.prevFoundFreeSlot)
}
flsp.prevFoundFreeSlot++
}
return -1
}
/*
FirstAllocatedSlotInfo returns the id for the first allocated slotinfo or -1 if
nothing is allocated.
*/
func (flsp *FreeLogicalSlotPage) FirstAllocatedSlotInfo() int {
for flsp.prevFoundAllocatedSlot < flsp.maxSlots {
if flsp.isAllocatedSlot(flsp.prevFoundAllocatedSlot) {
return int(flsp.prevFoundAllocatedSlot)
}
flsp.prevFoundAllocatedSlot++
}
return -1
}
/*
isAllocatedSlot checks if a given slotinfo is allocated.
*/
func (flsp *FreeLogicalSlotPage) isAllocatedSlot(slotinfo uint16) bool {
offset := flsp.slotinfoToOffset(slotinfo)
return flsp.SlotInfoRecord(offset) != 0 || flsp.SlotInfoOffset(offset) != 0
}
/*
slotinfoToOffset converts a slotinfo number into an offset on the record.
*/
func (flsp *FreeLogicalSlotPage) slotinfoToOffset(slotinfo uint16) uint16 {
return OffsetData + slotinfo*util.LocationSize
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package pageview
import (
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging/view"
"devt.de/krotik/eliasdb/storage/util"
)
/*
OffsetCount is the number of free slots which are stored on this page
*/
const OffsetCount = view.OffsetData
/*
OffsetData is the offset for slot information
*/
const OffsetData = OffsetCount + file.SizeShort
/*
SlotInfoSize is the size of a single free slot info
*/
const SlotInfoSize = util.LocationSize + util.SizeInfoSize
/*
OptimalWasteMargin is the max amount of allowed allocation waste. When searching a slot
on this page we should strife to find a slot which doesn't waste more than
OptimalWasteMargin bytes
*/
const OptimalWasteMargin = 128
/*
FreePhysicalSlotPage data structure
*/
type FreePhysicalSlotPage struct {
*SlotInfoPage
maxSlots uint16 // Max number of slots
maxAcceptableWaste uint32 // Max acceptable waste for a slot allocation
sizeCache []uint32 // Cache for slot sizes
}
/*
NewFreePhysicalSlotPage creates a new page which can manage free slots.
*/
func NewFreePhysicalSlotPage(record *file.Record) *FreePhysicalSlotPage {
checkFreePhysicalSlotPageMagic(record)
maxSlots := (len(record.Data()) - OffsetData) / SlotInfoSize
maxAcceptableWaste := len(record.Data()) / 4
return &FreePhysicalSlotPage{NewSlotInfoPage(record), uint16(maxSlots),
uint32(maxAcceptableWaste), make([]uint32, maxSlots, maxSlots)}
}
/*
checkFreePhysicalSlotPageMagic checks if the magic number at the beginning of
the wrapped record is valid.
*/
func checkFreePhysicalSlotPageMagic(record *file.Record) bool {
magic := record.ReadInt16(0)
if magic == view.ViewPageHeader+view.TypeFreePhysicalSlotPage {
return true
}
panic("Unexpected header found in FreePhysicalSlotPage")
}
/*
MaxSlots returns the maximum number of slots which can be allocated.
*/
func (fpsp *FreePhysicalSlotPage) MaxSlots() uint16 {
return fpsp.maxSlots
}
/*
FreeSlotCount returns the number of free slots on this page.
*/
func (fpsp *FreePhysicalSlotPage) FreeSlotCount() uint16 {
return fpsp.Record.ReadUInt16(OffsetCount)
}
/*
SlotInfoLocation returns contents of a stored slotinfo as a location. Lookup is via a
given slotinfo id.
*/
func (fpsp *FreePhysicalSlotPage) SlotInfoLocation(slotinfo uint16) uint64 {
offset := fpsp.slotinfoToOffset(slotinfo)
return util.PackLocation(fpsp.SlotInfoRecord(offset), fpsp.SlotInfoOffset(offset))
}
/*
FreeSlotSize returns the size of a free slot. Lookup is via offset.
*/
func (fpsp *FreePhysicalSlotPage) FreeSlotSize(offset uint16) uint32 {
slotinfo := fpsp.offsetToSlotinfo(offset)
if fpsp.sizeCache[slotinfo] == 0 {
fpsp.sizeCache[slotinfo] = fpsp.Record.ReadUInt32(int(offset + util.LocationSize))
}
return fpsp.sizeCache[slotinfo]
}
/*
SetFreeSlotSize sets the size of a free slot. Lookup is via offset.
*/
func (fpsp *FreePhysicalSlotPage) SetFreeSlotSize(offset uint16, size uint32) {
slotinfo := fpsp.offsetToSlotinfo(offset)
fpsp.sizeCache[slotinfo] = size
fpsp.Record.WriteUInt32(int(offset+util.LocationSize), size)
}
/*
AllocateSlotInfo allocates a place for a slotinfo and returns the offset for it.
*/
func (fpsp *FreePhysicalSlotPage) AllocateSlotInfo(slotinfo uint16) uint16 {
offset := fpsp.slotinfoToOffset(slotinfo)
// Set slotinfo to initial values
fpsp.SetFreeSlotSize(offset, 1)
fpsp.SetSlotInfo(offset, 1, 1)
// Increase counter for allocated slotinfos
fpsp.Record.WriteUInt16(OffsetCount, fpsp.FreeSlotCount()+1)
return offset
}
/*
ReleaseSlotInfo releases a place for a slotinfo and return its offset.
*/
func (fpsp *FreePhysicalSlotPage) ReleaseSlotInfo(slotinfo uint16) uint16 {
offset := fpsp.slotinfoToOffset(slotinfo)
// Set slotinfo to empty values
fpsp.SetFreeSlotSize(offset, 0)
fpsp.SetSlotInfo(offset, 0, 0)
// Decrease counter for allocated slotinfos
fpsp.Record.WriteUInt16(OffsetCount, fpsp.FreeSlotCount()-1)
return offset
}
/*
FirstFreeSlotInfo returns the id for the first available slotinfo for allocation
or -1 if nothing is available.
*/
func (fpsp *FreePhysicalSlotPage) FirstFreeSlotInfo() int {
var i uint16
for i = 0; i < fpsp.maxSlots; i++ {
if !fpsp.isAllocatedSlot(i) {
return int(i)
}
}
return -1
}
/*
FindSlot finds a slot which is suitable for a given amount of data but which is also not
too big to avoid wasting space.
*/
func (fpsp *FreePhysicalSlotPage) FindSlot(minSize uint32) int {
var i uint16
bestSlot := -1
bestSlotWaste := fpsp.maxAcceptableWaste + 1
var maxSize uint32
for i = 0; i < fpsp.maxSlots; i++ {
slotinfoOffset := fpsp.slotinfoToOffset(i)
slotinfoSize := fpsp.FreeSlotSize(slotinfoOffset)
if slotinfoSize > maxSize {
maxSize = slotinfoSize
}
// Calculate the wasted space
waste := slotinfoSize - minSize
// Test if the block would fit
if waste >= 0 {
if waste < OptimalWasteMargin {
// In the ideal case we can minimise the produced waste
return int(i)
} else if bestSlotWaste > waste {
// Too much for optimal waste margin but may still be OK if
// we don't find anything better
bestSlot = int(i)
bestSlotWaste = waste
}
}
}
if bestSlot != -1 {
// We found a block but its waste was above the optimal waste margin
// check if it is still acceptable
// Note: It must be below the MAX_AVAILABLE_SIZE_DIFFERENCE as a row
// stores the current size as the difference to the available size.
// This difference must fit in an unsigned short.
if bestSlotWaste < fpsp.maxAcceptableWaste &&
bestSlotWaste < util.MaxAvailableSizeDifference {
return bestSlot
}
}
return -int(maxSize)
}
/*
isAllocatedSlot checks if a given slotinfo is allocated.
*/
func (fpsp *FreePhysicalSlotPage) isAllocatedSlot(slotinfo uint16) bool {
offset := fpsp.slotinfoToOffset(slotinfo)
return fpsp.FreeSlotSize(offset) != 0
}
/*
slotinfoToOffset converts a slotinfo number into an offset on the record.
*/
func (fpsp *FreePhysicalSlotPage) slotinfoToOffset(slotinfo uint16) uint16 {
return OffsetData + slotinfo*SlotInfoSize
}
/*
offsetToSlotinfo converts an offset into a slotinfo number.
*/
func (fpsp *FreePhysicalSlotPage) offsetToSlotinfo(offset uint16) uint16 {
return (offset - OffsetData) / SlotInfoSize
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package pageview
import (
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging/view"
"devt.de/krotik/eliasdb/storage/util"
)
/*
SlotInfoPage data structure
*/
type SlotInfoPage struct {
*view.PageView
}
/*
NewSlotInfoPage creates a new SlotInfoPage object which can manage slotinfos.
*/
func NewSlotInfoPage(record *file.Record) *SlotInfoPage {
pv := view.GetPageView(record)
return &SlotInfoPage{pv}
}
/*
SlotInfoRecord gets record id of a stored slotinfo.
*/
func (lm *SlotInfoPage) SlotInfoRecord(offset uint16) uint64 {
return util.LocationRecord(lm.Record.ReadUInt64(int(offset)))
}
/*
SlotInfoOffset gets the record offset of a stored slotinfo.
*/
func (lm *SlotInfoPage) SlotInfoOffset(offset uint16) uint16 {
return util.LocationOffset(lm.Record.ReadUInt64(int(offset)))
}
/*
SetSlotInfo stores a slotinfo on the pageview's record.
*/
func (lm *SlotInfoPage) SetSlotInfo(slotinfoOffset uint16, recordID uint64, offset uint16) {
lm.Record.WriteUInt64(int(slotinfoOffset), util.PackLocation(recordID, offset))
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package pageview
import (
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging/view"
)
/*
OffsetTransData is the data offset for translation pages
*/
const OffsetTransData = view.OffsetData
/*
TransPage data structure
*/
type TransPage struct {
*SlotInfoPage
}
/*
NewTransPage creates a new page which holds data to translate between physical
and logical slots.
*/
func NewTransPage(record *file.Record) *DataPage {
checkTransPageMagic(record)
return &DataPage{NewSlotInfoPage(record)}
}
/*
checkTransPageMagic checks if the magic number at the beginning of
the wrapped record is valid.
*/
func checkTransPageMagic(record *file.Record) bool {
magic := record.ReadInt16(0)
if magic == view.ViewPageHeader+view.TypeTranslationPage {
return true
}
panic("Unexpected header found in TransPage")
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package slotting
import (
"io"
"devt.de/krotik/eliasdb/storage/file"
"devt.de/krotik/eliasdb/storage/paging"
"devt.de/krotik/eliasdb/storage/paging/view"
"devt.de/krotik/eliasdb/storage/slotting/pageview"
"devt.de/krotik/eliasdb/storage/util"
)
/*
AllocationRoundUpThreshold is used to decide if a slot size should be rounded up. If an
allocation would leave less than AllocationRoundUpThreshold + 1 left on the page then
the allocation size is rounded up to fit the page
*/
const AllocationRoundUpThreshold = 16
/*
PhysicalSlotManager data structure
*/
type PhysicalSlotManager struct {
storagefile *file.StorageFile // StorageFile which is wrapped
pager *paging.PagedStorageFile // Pager for StorageFile
freeManager *FreePhysicalSlotManager // Manager for free slots
recordSize uint32 // Size of records
availableRecordSize uint32 // Available space on records
}
/*
NewPhysicalSlotManager creates a new object to manage physical slots. This
factory function requires two PagedStorageFiles the first will hold the actual
physical slots, the second is used to manage free physical slots.
*/
func NewPhysicalSlotManager(psf *paging.PagedStorageFile,
fpsf *paging.PagedStorageFile, onlyAppend bool) *PhysicalSlotManager {
sf := psf.StorageFile()
freeManager := NewFreePhysicalSlotManager(fpsf, onlyAppend)
recordSize := sf.RecordSize()
return &PhysicalSlotManager{sf, psf, freeManager,
recordSize, recordSize - pageview.OffsetData}
}
/*
Insert inserts a new piece of data.
*/
func (psm *PhysicalSlotManager) Insert(data []byte, start uint32, length uint32) (uint64, error) {
if length == 0 {
panic("Cannot insert 0 bytes of data")
}
location, err := psm.allocate(length)
if err != nil {
return 0, err
}
err = psm.write(location, data, start, length)
if err != nil {
// Since the write operation failed declare the previous allocated space
// as free
psm.freeManager.Add(location, length)
return 0, err
}
return location, nil
}
/*
Update updates the data in a slot.
*/
func (psm *PhysicalSlotManager) Update(location uint64, data []byte, start uint32, length uint32) (uint64, error) {
record, err := psm.storagefile.Get(util.LocationRecord(location))
if err != nil {
return 0, err
}
offset := util.LocationOffset(location)
availableSize := util.AvailableSize(record, int(offset))
psm.storagefile.ReleaseInUse(record)
if length > availableSize || availableSize-length > util.MaxAvailableSizeDifference {
// Reallocate if the new data is too big for the old slot or if the
// data is much smaller than the available space in the slot (i.e.
// there would be a lot of waste)
// Error handling for free call is done by the first Get call of
// this function.
psm.Free(location)
location, err = psm.allocate(length)
if err != nil {
return 0, err
}
}
err = psm.write(location, data, start, length)
if err != nil {
return 0, err
}
return location, nil
}
/*
Fetch fetches data from a specified location.
*/
func (psm *PhysicalSlotManager) Fetch(location uint64, writer io.Writer) error {
cursor := paging.NewPageCursor(psm.pager, view.TypeDataPage, util.LocationRecord(location))
record, err := psm.storagefile.Get(cursor.Current())
if err != nil {
return err
}
length := util.CurrentSize(record, int(util.LocationOffset(location)))
if length == 0 {
// Return at this point if there is nothing to read
psm.storagefile.ReleaseInUseID(cursor.Current(), false)
return nil
}
// Read now the bytes
restSize := length
recordOffset := uint32(util.LocationOffset(location) + util.SizeInfoSize)
for restSize > 0 {
// Calculate how much data should be read
toCopy := psm.recordSize - uint32(recordOffset)
if restSize < toCopy {
// If the record can contain more than restSize just
// read restSize
toCopy = restSize
}
// Read the data
writer.Write(record.Data()[recordOffset : recordOffset+toCopy])
// Calculate the rest size and new offset
restSize -= toCopy
psm.storagefile.ReleaseInUseID(cursor.Current(), false)
// Go to the next record
if restSize > 0 {
// Error handling is done by surrounding Get calls
next, _ := cursor.Next()
record, err = psm.storagefile.Get(next)
if err != nil {
return err
}
recordOffset = pageview.OffsetData
}
}
return nil
}
/*
Free frees a given physical slot. The given slot is given to the FreePhysicalSlotManager.
*/
func (psm *PhysicalSlotManager) Free(location uint64) error {
slotRecord := util.LocationRecord(location)
slotOffset := int(util.LocationOffset(location))
record, err := psm.storagefile.Get(slotRecord)
if err != nil {
return err
}
util.SetCurrentSize(record, slotOffset, 0)
psm.storagefile.ReleaseInUseID(slotRecord, true)
psm.freeManager.Add(location, util.AvailableSize(record, slotOffset))
return nil
}
/*
Flush writes all pending changes.
*/
func (psm *PhysicalSlotManager) Flush() error {
return psm.freeManager.Flush()
}
/*
write writes data to a location. Should an error occurs, then the already written data
is not cleaned up.
*/
func (psm *PhysicalSlotManager) write(location uint64, data []byte, start uint32, length uint32) error {
cursor := paging.NewPageCursor(psm.pager, view.TypeDataPage, util.LocationRecord(location))
record, err := psm.storagefile.Get(cursor.Current())
if err != nil {
return err
}
util.SetCurrentSize(record, int(util.LocationOffset(location)), length)
if length == 0 {
// Return at this point if there is nothing to write
psm.storagefile.ReleaseInUseID(cursor.Current(), true)
return nil
}
// Write now the bytes
restSize := length
dataOffset := start
recordOffset := uint32(util.LocationOffset(location) + util.SizeInfoSize)
for restSize > 0 {
// Calculate how much data should be written
toCopy := psm.recordSize - uint32(recordOffset)
if restSize < toCopy {
// If the record can contain more than restSize just
// write restSize
toCopy = restSize
}
// Write the data
dataOffset2 := dataOffset + toCopy
recordOffset2 := recordOffset + toCopy
copy(record.Data()[recordOffset:recordOffset2], data[dataOffset:dataOffset2])
// Calculate the rest size and new offset
restSize -= toCopy
dataOffset += toCopy
psm.storagefile.ReleaseInUseID(cursor.Current(), true)
// Go to the next record
if restSize > 0 {
// Error handling is done by surrounding Get calls
next, _ := cursor.Next()
record, err = psm.storagefile.Get(next)
if err != nil {
return err
}
recordOffset = pageview.OffsetData
}
}
return nil
}
/*
allocate allocates a new slot of a given size.
*/
func (psm *PhysicalSlotManager) allocate(size uint32) (uint64, error) {
// Normalize slot size
normalizedSize := util.NormalizeSlotSize(size)
// Try to find a free slot which was previously allocated
loc, err := psm.freeManager.Get(normalizedSize)
if err != nil {
return 0, err
}
// If nothing of the right size was previously allocated then allocate
// something new
if loc == 0 {
lastpage := psm.pager.Last(view.TypeDataPage)
loc, err = psm.allocateNew(normalizedSize, lastpage)
if err != nil {
return 0, err
}
} else {
// IF a location was found in the freeManager then try
// to access it to make sure it is available - revert otherwise
slotRecord := util.LocationRecord(loc)
slotOffset := int(util.LocationOffset(loc))
record, err := psm.storagefile.Get(slotRecord)
if err != nil {
// Revert back - the size may now be wrong but this is
// still better than losing the whole record
psm.freeManager.Add(loc, normalizedSize)
return 0, err
}
util.SetCurrentSize(record, slotOffset, 0)
psm.storagefile.ReleaseInUseID(slotRecord, true)
}
return loc, nil
}
/*
allocateNew allocates a new slot in the PagedStorageFile. Errors during this function might
cause the allocation of empty pages. The last allocated page pointers might
get out of sync with the actual data pages.
*/
func (psm *PhysicalSlotManager) allocateNew(size uint32, startPage uint64) (uint64, error) {
var record *file.Record
var pv *pageview.DataPage
var offset uint32
var header int
var err error
if startPage == 0 {
// Create a new page if there is no start page
startPage, err = psm.pager.AllocatePage(view.TypeDataPage)
if err != nil {
return 0, err
}
// Get the newly allocated page - all error checking was
// done in the previous AllocatePage call
record, _ = psm.storagefile.Get(startPage)
pv = pageview.NewDataPage(record)
pv.SetOffsetFirst(pageview.OffsetData)
util.SetCurrentSize(record, pageview.OffsetData, 0)
util.SetAvailableSize(record, pageview.OffsetData, 0)
} else {
record, err = psm.storagefile.Get(startPage)
if err != nil {
return 0, err
}
pv = pageview.NewDataPage(record)
}
offset = uint32(pv.OffsetFirst())
if offset == 0 {
// Take care of the special case if the current page was filled
// exactly by the previous row
psm.storagefile.ReleaseInUse(record)
return psm.allocateNew(size, 0)
}
// Check if the last existing page is full - in that case just allocate
// a new page
header = int(offset)
if offset == psm.recordSize || offset > psm.recordSize-util.SizeInfoSize {
// Go to next page
psm.storagefile.ReleaseInUse(record)
return psm.allocateNew(size, 0)
}
slotsize := util.AvailableSize(record, header)
// Loop over the slots and update the header and offset pointer - stop
// if there is an empty space or we reach the end of the page
for slotsize != 0 && offset < psm.recordSize {
offset += slotsize + util.SizeInfoSize
if offset == psm.recordSize || offset > psm.recordSize-util.SizeInfoSize {
// Go to next page
psm.storagefile.ReleaseInUse(record)
return psm.allocateNew(size, 0)
}
header = int(offset)
slotsize = util.AvailableSize(record, header)
}
// At this point we have the location for the new row
loc := util.PackLocation(startPage, uint16(offset))
// Calculate the remaining free space for the current page
rspace := psm.recordSize - offset - util.SizeInfoSize
if rspace < size {
// If the remaining space is not enough we must allocate new pages
// Increase the size if after the allocation only
// ALLOCATION_ROUND_UP_THRESHOLD bytes would remain
// on the record
freeSpaceLastRecord := (size - rspace) % psm.availableRecordSize
if (psm.availableRecordSize - freeSpaceLastRecord) <=
(AllocationRoundUpThreshold + util.SizeInfoSize) {
newsize := size
newsize += (psm.availableRecordSize - freeSpaceLastRecord)
nnewsize := util.NormalizeSlotSize(newsize)
// Only do so if the new value is a valid normalized value
if newsize == nnewsize {
size = newsize
}
}
// Write row header
util.SetAvailableSize(record, header, size)
psm.storagefile.ReleaseInUseID(startPage, true)
// Calculate the rest size which needs to be allocated
allocSize := size - rspace
// Now allocate whole pages
for allocSize >= psm.availableRecordSize {
startPage, err = psm.pager.AllocatePage(view.TypeDataPage)
if err != nil {
return 0, err
}
// Error checking was done in previous AllocatePage call
record, _ = psm.storagefile.Get(startPage)
pv = pageview.NewDataPage(record)
// Since this page contains only data there is no first row
// offset
pv.SetOffsetFirst(0)
psm.storagefile.ReleaseInUseID(startPage, true)
allocSize -= psm.availableRecordSize
}
// If there is still a rest left allocate one more page but reserve
// only a part of it for the row
if allocSize > 0 {
startPage, err = psm.pager.AllocatePage(view.TypeDataPage)
if err != nil {
return 0, err
}
// Error checking was done in previous AllocatePage call
record, _ = psm.storagefile.Get(startPage)
pv = pageview.NewDataPage(record)
pv.SetOffsetFirst(uint16(pageview.OffsetData + allocSize))
psm.storagefile.ReleaseInUseID(startPage, true)
}
} else {
// We found a free space on the current page
// Increase the size if after the allocation only
// ALLOCATION_ROUND_UP_THRESHOLD bytes would remain
// on the record
if (rspace - size) <= (AllocationRoundUpThreshold + util.SizeInfoSize) {
newsize := rspace
nnewsize := util.NormalizeSlotSize(newsize)
// Only do so if the new value is a valid normalized value
if newsize == nnewsize {
size = newsize
}
}
// Write row header
util.SetAvailableSize(record, header, size)
psm.storagefile.ReleaseInUseID(startPage, true)
}
return loc, nil
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Package util contains utility functions for slot headers.
Packing and unpacking of slot sizes
The package contains functions to pack/unpack sizes for physical slots and
logical buckets. The size info is a 4 byte value which allocates 2 bytes
for current size and 2 bytes for available size.
CCCC CCCC CCCC CCCC AAAA AAAA AAAA AAAA
The allocated size value is a packed integer using a 2 bit multiplier
in the beginning - using these packed values a slot can grow up to
138681822 bytes (138 MB). The space allocation becomes more and more
wasteful with increasing slot size. The current size is stored as a
difference to the allocated size. The maximum difference between
alloacted and current space is 65534 bytes.
Packing and unpacking locations
The package contains utility functions to pack and unpack location information
in an uint64. A location is a pointer which identifies a specific record and
within the record a specific offset.
The 8 byte uint64 value is split into a 6 byte (48 bits) record address and
2 byte offset.
RRRR RRRR RRRR RRRR RRRR RRRR RRRR RRRR RRRR RRRR RRRR RRRR OOOO OOOO OOOO OOOO
We can address at maximum (having a record size of 32767 bytes):
(2^48 / 2 - 1) * 32767 = 4.61154528 * 10^18 which is around 4 exabyte
Considering a default page size of 4096 bytes we can address:
(2^48 / 2 - 1) * 4096 = 5.76460752 * 10^17 which is around 512 petabyte
*/
package util
import "devt.de/krotik/eliasdb/storage/file"
/*
LocationSize is the size of a location in bytes
*/
const LocationSize = file.SizeLong
/*
MaxRecordValue is the maximum record value (2^48 / 2 - 1)
6 byte = 48 bits
*/
const MaxRecordValue = 0xFFFFFF
/*
MaxOffsetValue is the maximum offset value for a location (32767).
*/
const MaxOffsetValue = 0xFFFF
/*
LocationRecord retirms the record id from a location.
*/
func LocationRecord(location uint64) uint64 {
return uint64(location >> 16)
}
/*
LocationOffset returns the offset from a location.
*/
func LocationOffset(location uint64) uint16 {
return uint16(location & 0xffff)
}
/*
PackLocation packs location information into an uint64.
*/
func PackLocation(recordID uint64, offset uint16) uint64 {
if offset == 0xFFFF && recordID == 0xFFFFFF {
return 0xFFFFFFFF
}
if recordID > MaxRecordValue {
panic("Cannot create location with record id greater than 0xFFFFFF")
}
return (recordID << 16) + uint64(offset)
}
/*
* EliasDB
*
* Copyright 2016 Matthias Ladkau. All rights reserved.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package util
import (
"fmt"
"devt.de/krotik/eliasdb/storage/file"
)
/*
OffsetCurrentSize is the offset for the size on a slotsize header.
*/
const OffsetCurrentSize = 0
/*
OffetAvailableSize is the offset for the available size on a slotsize header.
*/
const OffetAvailableSize = file.SizeUnsignedShort
/*
UnsignedShortMax is the maximum value of an unsigned short as used for slotsizes.
*/
const UnsignedShortMax = 0xFFFF
/*
MaxAvailableSizeDifference represents the maximal size of the difference
between available size and current size
*/
const MaxAvailableSizeDifference = UnsignedShortMax - 1
/*
SizeInfoSize represents the size of the size info
*/
const SizeInfoSize = OffetAvailableSize + file.SizeUnsignedShort
/*
CurrentSize returns the current size of a slot.
*/
func CurrentSize(record *file.Record, offset int) uint32 {
currentSize := record.ReadUInt16(offset + OffsetCurrentSize)
if currentSize == UnsignedShortMax {
return 0
}
return AvailableSize(record, offset) - uint32(currentSize)
}
/*
SetCurrentSize sets the current size of a slot.
*/
func SetCurrentSize(record *file.Record, offset int, value uint32) {
if value == 0 {
record.WriteUInt16(offset+OffsetCurrentSize, UnsignedShortMax)
return
}
size := AvailableSize(record, offset)
if (size > MaxAvailableSizeDifference &&
value < size-MaxAvailableSizeDifference) ||
value > size {
panic(fmt.Sprint("Cannot store current size as difference "+
"to available size. Value:", value, " Available size:", size))
}
record.WriteUInt16(offset+OffsetCurrentSize, uint16(size-value))
}
/*
AvailableSize returns the available size of a slot.
*/
func AvailableSize(record *file.Record, offset int) uint32 {
value := record.ReadUInt16(offset + OffetAvailableSize)
return decodeSize(value)
}
/*
SetAvailableSize sets the available size of a slot.
*/
func SetAvailableSize(record *file.Record, offset int, value uint32) {
currentSize := CurrentSize(record, offset)
size := encodeSize(value)
// Safeguard against not using normalized size values
if decodeSize(size) != value {
panic("Size value was not normalized")
}
record.WriteUInt16(offset+OffetAvailableSize, size)
// Current size needs to be updated since it depends on the available size
SetCurrentSize(record, offset, currentSize)
}
/*
NormalizeSlotSize normalizes a given slot size.
*/
func NormalizeSlotSize(value uint32) uint32 {
return decodeSize(encodeSize(value))
}
const sizeMask = 1<<15 | 1<<14
const multi0 = 1
const multi1 = 1 << 4
const multi2 = 1 << 8
const multi3 = 1 << 13
const base0 = 0
const base1 = base0 + multi0*((1<<14)-2)
const base2 = base1 + multi1*((1<<14)-2)
const base3 = base2 + multi2*((1<<14)-2)
/*
decodeSize decodes a given size value.
*/
func decodeSize(packedSize uint16) uint32 {
size := packedSize & sizeMask
multiplier := size >> 14
counter := uint32(packedSize - size)
switch multiplier {
case 0:
return counter * multi0
case 1:
return base1 + counter*multi1
case 2:
return base2 + counter*multi2
default:
return base3 + counter*multi3
}
}
/*
encodeSize encodes a given size value.
*/
func encodeSize(size uint32) uint16 {
var multiplier, counter, v uint32
switch {
case size <= base1:
multiplier = 0
counter = size / multi0
case size < base2:
multiplier = 1 << 14
v = size - base1
counter = v / multi1
if v%multi1 != 0 {
counter++
}
case size < base3:
multiplier = 2 << 14
v = size - base2
counter = v / multi2
if v%multi2 != 0 {
counter++
}
default:
multiplier = 3 << 14
v = size - base3
counter = v / multi3
if v%multi3 != 0 {
counter++
}
}
if counter >= (1 << 14) {
panic(fmt.Sprint("Cannot pack slot size:", size))
}
return uint16(multiplier + counter)
}