@muze-nl/simplystore 0.3.2 → 0.3.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.eslintrc.cjs +15 -0
- package/package.json +8 -4
- package/src/commands.mjs +7 -0
- package/src/produce.mjs +354 -0
- package/src/server.mjs +153 -147
- package/src/share.mjs +159 -0
- package/src/util.mjs +46 -0
- package/src/worker-command-init.mjs +11 -0
- package/src/worker-command.mjs +51 -0
- package/src/worker-query-init.mjs +14 -0
- package/src/worker-query.mjs +146 -0
- package/test/produce.mjs +79 -0
- package/test/share.mjs +18 -0
- package/test/test.jsontag +20 -0
- package/www/codemirror/keymap/vim.js +3 -3
- package/design/access-management.md +0 -25
- package/design/acid.md +0 -31
- package/design/commands.md +0 -25
- package/design/identity.md +0 -71
- package/design/immutability.md +0 -26
- package/design/jsontag-selector.md +0 -365
- package/design/multitasking.md +0 -13
- package/design/thoughts.md +0 -32
- package/docs/docker.md +0 -129
- package/www/assets/css/page.css +0 -34
- package/www/help/index.html +0 -94
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
import JSONTag from "@muze-nl/jsontag"
|
|
2
|
+
import pointer from 'json-pointer'
|
|
3
|
+
import {_,from,not,anyOf,allOf} from 'array-where-select'
|
|
4
|
+
import {deepFreeze} from './util.mjs'
|
|
5
|
+
import {VM} from 'vm2'
|
|
6
|
+
|
|
7
|
+
let dataspace, meta = {};
|
|
8
|
+
|
|
9
|
+
export function setDataspace(d, m) {
|
|
10
|
+
dataspace = d
|
|
11
|
+
if(m) {
|
|
12
|
+
meta = m
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
export function getDataSpace(path, dataspace) {
|
|
17
|
+
if (path.substring(path.length-1)==='/') {
|
|
18
|
+
//jsonpointer doesn't allow a trailing '/'
|
|
19
|
+
path = path.substring(0, path.length-1)
|
|
20
|
+
}
|
|
21
|
+
let result
|
|
22
|
+
if (path) {
|
|
23
|
+
//jsonpointer doesn't allow an empty pointer
|
|
24
|
+
try {
|
|
25
|
+
if (pointer.has(dataspace, path)) {
|
|
26
|
+
result = pointer.get(dataspace, path)
|
|
27
|
+
} else {
|
|
28
|
+
result = JSONTag.parse('<object class="Error">{"message":"Not found", "code":404}')
|
|
29
|
+
}
|
|
30
|
+
} catch(err) {
|
|
31
|
+
result = JSONTag.parse('<object class="Error">{"message":'+JSON.stringify(err.message)+', "code":500}')
|
|
32
|
+
}
|
|
33
|
+
} else {
|
|
34
|
+
result = dataspace
|
|
35
|
+
}
|
|
36
|
+
return [result,path]
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
export function linkReplacer(data, baseURL) {
|
|
40
|
+
let type = JSONTag.getType(data)
|
|
41
|
+
let attributes = JSONTag.getAttributes(data)
|
|
42
|
+
if (Array.isArray(data)) {
|
|
43
|
+
data = data.map((entry,index) => {
|
|
44
|
+
return linkReplacer(data[index], baseURL+index+'/')
|
|
45
|
+
})
|
|
46
|
+
} else if (type === 'link') {
|
|
47
|
+
// do nothing
|
|
48
|
+
} else if (data && typeof data === 'object') {
|
|
49
|
+
data = JSONTag.clone(data)
|
|
50
|
+
Object.keys(data).forEach(key => {
|
|
51
|
+
if (Array.isArray(data[key])) {
|
|
52
|
+
data[key] = new JSONTag.Link(baseURL+key+'/')
|
|
53
|
+
} else if (data[key] && typeof data[key] === 'object') {
|
|
54
|
+
if (JSONTag.getType(data[key])!=='link') {
|
|
55
|
+
let id=JSONTag.getAttribute(data[key], 'id')
|
|
56
|
+
if (!id) {
|
|
57
|
+
id = baseURL+key+'/'
|
|
58
|
+
}
|
|
59
|
+
data[key] = new JSONTag.Link(id)
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
})
|
|
63
|
+
}
|
|
64
|
+
return data
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
//@TODO: emit console events that server.mjs picks up
|
|
68
|
+
function connectConsole(res) {
|
|
69
|
+
return {
|
|
70
|
+
log: function(...args) {
|
|
71
|
+
// res.append('X-Console-Log', joinArgs(args))
|
|
72
|
+
},
|
|
73
|
+
warning: function(...args) {
|
|
74
|
+
// res.append('X-Console-Warning', joinArgs(args))
|
|
75
|
+
},
|
|
76
|
+
error: function(...args) {
|
|
77
|
+
// res.append('X-Console-Error', joinArgs(args))
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
export async function initialize(jsontag, m) {
|
|
83
|
+
if (!jsontag) { throw new Error('missing jsontag parameter')}
|
|
84
|
+
dataspace = jsontag
|
|
85
|
+
meta = m
|
|
86
|
+
deepFreeze(dataspace)
|
|
87
|
+
return true
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
export function runQuery({pointer, request, query}) {
|
|
91
|
+
if (!pointer) { throw new Error('missing pointer parameter')}
|
|
92
|
+
if (!request) { throw new Error('missing request parameter')}
|
|
93
|
+
console.log('query',pointer)
|
|
94
|
+
let response = {
|
|
95
|
+
jsontag: request.jsontag
|
|
96
|
+
}
|
|
97
|
+
let [result,path] = getDataSpace(pointer, dataspace)
|
|
98
|
+
|
|
99
|
+
if (query) {
|
|
100
|
+
// @todo add text search: https://github.com/nextapps-de/flexsearch
|
|
101
|
+
// @todo add tree walk map/reduce/find/filter style functions
|
|
102
|
+
// @todo add arc tree dive function?
|
|
103
|
+
// @todo replace VM with V8 isolate
|
|
104
|
+
const vm = new VM({
|
|
105
|
+
timeout: 1000,
|
|
106
|
+
allowAsync: false,
|
|
107
|
+
sandbox: {
|
|
108
|
+
root: dataspace, //@TODO: if we don't pass the root, we can later shard
|
|
109
|
+
data: result,
|
|
110
|
+
meta,
|
|
111
|
+
_,
|
|
112
|
+
from,
|
|
113
|
+
not,
|
|
114
|
+
anyOf,
|
|
115
|
+
allOf,
|
|
116
|
+
// console: connectConsole(res),
|
|
117
|
+
JSONTag,
|
|
118
|
+
request
|
|
119
|
+
},
|
|
120
|
+
wasm: false
|
|
121
|
+
})
|
|
122
|
+
try {
|
|
123
|
+
result = vm.run(query)
|
|
124
|
+
let used = Math.round(process.memoryUsage().heapUsed / 1024 / 1024);
|
|
125
|
+
console.log(`(${used} MB)`);
|
|
126
|
+
} catch(err) {
|
|
127
|
+
console.log(err)
|
|
128
|
+
response.code = 422;
|
|
129
|
+
if (request.jsontag) {
|
|
130
|
+
response.body = '<object class="Error">{"message":'+JSON.stringify(''+err)+',"code":422}'
|
|
131
|
+
} else {
|
|
132
|
+
response.body = JSON.stringify({message:err, code: 422})
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
} else {
|
|
136
|
+
result = linkReplacer(result, path+'/')
|
|
137
|
+
}
|
|
138
|
+
if (!response.code) {
|
|
139
|
+
if (response.jsontag) {
|
|
140
|
+
response.body = JSONTag.stringify(result)
|
|
141
|
+
} else {
|
|
142
|
+
response.body = JSON.stringify(result)
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
return response
|
|
146
|
+
}
|
package/test/produce.mjs
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
import tap from 'tap'
|
|
2
|
+
import fs from 'fs'
|
|
3
|
+
import JSONTag from '@muze-nl/jsontag'
|
|
4
|
+
import {produce,index} from '../src/produce.mjs'
|
|
5
|
+
import {deepFreeze} from '../src/util.mjs'
|
|
6
|
+
|
|
7
|
+
let data = deepFreeze(JSONTag.parse(fs.readFileSync('./test/test.jsontag','utf-8')))
|
|
8
|
+
|
|
9
|
+
tap.test('data is frozen', t => {
|
|
10
|
+
t.throws(() => {
|
|
11
|
+
data.persons.foo = 'bar'
|
|
12
|
+
})
|
|
13
|
+
t.notHas(data.persons, {foo:'bar'})
|
|
14
|
+
t.end()
|
|
15
|
+
})
|
|
16
|
+
|
|
17
|
+
tap.test('produce can create new data', t => {
|
|
18
|
+
let newData = produce(data, (draft) => {
|
|
19
|
+
draft.persons.foo = 'bar'
|
|
20
|
+
})
|
|
21
|
+
t.has(newData.persons, {foo:'bar'})
|
|
22
|
+
t.end()
|
|
23
|
+
})
|
|
24
|
+
|
|
25
|
+
tap.test('produce does not change base data', t => {
|
|
26
|
+
let newData = produce(data, (draft) => {
|
|
27
|
+
console.log('Persons draft',draft.persons, Object.isFrozen(draft.persons))
|
|
28
|
+
draft.persons.foo = 'bar'
|
|
29
|
+
})
|
|
30
|
+
t.notHas(data.persons, {foo:'bar'})
|
|
31
|
+
t.end()
|
|
32
|
+
})
|
|
33
|
+
|
|
34
|
+
tap.test('produce handles array access', t => {
|
|
35
|
+
let newData = produce(data, (draft) => {
|
|
36
|
+
draft.persons[0].name = 'Jan'
|
|
37
|
+
})
|
|
38
|
+
t.equal(newData.persons[0].name, 'Jan')
|
|
39
|
+
t.end()
|
|
40
|
+
})
|
|
41
|
+
|
|
42
|
+
tap.test('produce handles array functions', t => {
|
|
43
|
+
let newData = produce(data, (draft) => {
|
|
44
|
+
draft.persons.push({
|
|
45
|
+
name: 'Jan'
|
|
46
|
+
})
|
|
47
|
+
})
|
|
48
|
+
t.equal(newData.persons[2].name, 'Jan')
|
|
49
|
+
t.same(data.persons[2],null)
|
|
50
|
+
t.end()
|
|
51
|
+
})
|
|
52
|
+
|
|
53
|
+
tap.test('produce can use array.indexOf inside', t => {
|
|
54
|
+
let newData = produce(data, (draft) => {
|
|
55
|
+
let p = draft.persons[1] // this returns a proxy
|
|
56
|
+
let i = draft.persons.indexOf(p) // this is passed on to the baseState/clone, which has values without proxy
|
|
57
|
+
t.equal(i,1) // so this no longer fails, as indexOf automatically calls getRealValue on all params
|
|
58
|
+
})
|
|
59
|
+
t.end()
|
|
60
|
+
})
|
|
61
|
+
|
|
62
|
+
tap.test('produce does not alter unaccessed objects', t => {
|
|
63
|
+
let newData = produce(data, (draft) => {
|
|
64
|
+
draft.persons.foo = 'bar'
|
|
65
|
+
})
|
|
66
|
+
t.equal(data.persons[0],newData.persons[0])
|
|
67
|
+
t.equal(data.persons[1],newData.persons[1])
|
|
68
|
+
t.notEqual(data.persons,newData.persons)
|
|
69
|
+
t.end()
|
|
70
|
+
})
|
|
71
|
+
|
|
72
|
+
tap.test('proxies get re-used', t => {
|
|
73
|
+
let newData = produce(data, (draft) => {
|
|
74
|
+
let p1 = draft.persons[0]
|
|
75
|
+
let p2 = draft.persons[0]
|
|
76
|
+
t.equal(p1,p2)
|
|
77
|
+
})
|
|
78
|
+
t.end()
|
|
79
|
+
})
|
package/test/share.mjs
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
import tap from 'tap'
|
|
2
|
+
import {share} from '../src/share.mjs'
|
|
3
|
+
import JSONTag from '@muze-nl/jsontag'
|
|
4
|
+
import fs from 'fs'
|
|
5
|
+
|
|
6
|
+
let data = JSONTag.parse(fs.readFileSync('./test/test.jsontag','utf-8'))
|
|
7
|
+
|
|
8
|
+
tap.test('create shared array buffer', t => {
|
|
9
|
+
let shared = share(data)
|
|
10
|
+
console.log(shared)
|
|
11
|
+
let decoder = new TextDecoder()
|
|
12
|
+
let uint8buffer = new Uint8Array(shared.buffer)
|
|
13
|
+
let str = decoder.decode(uint8buffer)
|
|
14
|
+
console.log('string',str)
|
|
15
|
+
|
|
16
|
+
t.end()
|
|
17
|
+
})
|
|
18
|
+
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
{
|
|
2
|
+
"persons": [
|
|
3
|
+
<object id="john" class="Person">{
|
|
4
|
+
"name": "John",
|
|
5
|
+
"lastName": "Doe",
|
|
6
|
+
"dob": <date>"1972-09-20",
|
|
7
|
+
"foaf": [
|
|
8
|
+
<link>"jane"
|
|
9
|
+
]
|
|
10
|
+
},
|
|
11
|
+
<object id="jane" class="Person">{
|
|
12
|
+
"name": "Jane",
|
|
13
|
+
"lastName": "Doe",
|
|
14
|
+
"dob": <date>"1986-01-01",
|
|
15
|
+
"foaf": [
|
|
16
|
+
<link>"john"
|
|
17
|
+
]
|
|
18
|
+
}
|
|
19
|
+
]
|
|
20
|
+
}
|
|
@@ -5966,9 +5966,9 @@ function initVim$1(CodeMirror) {
|
|
|
5966
5966
|
return vimApi;
|
|
5967
5967
|
}
|
|
5968
5968
|
|
|
5969
|
-
function initVim(CodeMirror5) {
|
|
5970
|
-
CodeMirror5.Vim = initVim$1(CodeMirror5);
|
|
5971
|
-
return CodeMirror5.Vim;
|
|
5969
|
+
function initVim(CodeMirror5) {
|
|
5970
|
+
CodeMirror5.Vim = initVim$1(CodeMirror5);
|
|
5971
|
+
return CodeMirror5.Vim;
|
|
5972
5972
|
}
|
|
5973
5973
|
|
|
5974
5974
|
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
# access management (grants/rights/roles)
|
|
2
|
-
|
|
3
|
-
## Read grants
|
|
4
|
-
|
|
5
|
-
You can easily create a read access check that just checks the query/ endpoint, but this is not interesting.
|
|
6
|
-
Much more interesting is how to implement granular read access rights on subtrees of the dataset.
|
|
7
|
-
|
|
8
|
-
Ariadne has a mechanism that has been succesfully used since 1998. It allows you to defined grant strings, like 'read', 'edit', etc. and configure them on a path inside a tree of data. Each entity below that path will automatically inherit the grant.
|
|
9
|
-
|
|
10
|
-
The grant is then checked for each interaction with the data. Because all interactions in Ariadne are through templates, which can be custom made, the grants can also be custom strings. This way you can grow the access management system to your own needs.
|
|
11
|
-
|
|
12
|
-
However, this means that all read access must come through the data tree. Earlier we defined a /uuid/ endpoint which would give direct access to any object. This breaks this paradigm. Objects can be linked in multiple locations in the dataset. To check for read grants, you must find all valid paths to an object and check if any of them allow the user to 'read' the object. This is potentially very costly.
|
|
13
|
-
|
|
14
|
-
The easiest solution is to drop the /uuid endpoint, unless all data is publically readable. Each object can still have a uuid, but it is no longer a url that points to the objects contents.
|
|
15
|
-
|
|
16
|
-
Another problem is the changing nature of the JSON Pointer path in the URL. If an object is part of an array, and another object earlier (with smalled index) is removed, the URL for this object changes. Its index is lowered. So you cannot assign grants on a JSON Pointer path, containing an array index, with any hope of the grants staying in the correct spot.
|
|
17
|
-
|
|
18
|
-
If you instead assign grants to objects directly, the grants will correctly move when an array is updated. However this opens up the possibility that grants appear in multiple places, if the object is linked in multiple places. This should not be a problem though, since the subtree of objects is the same as long as the object is the same.
|
|
19
|
-
|
|
20
|
-
This opens up the possibility of storing the grants in an attribute on the JSONTag tag of the object. e.g.
|
|
21
|
-
|
|
22
|
-
<object class="Site" grants="user1: read edit, user2: read edit delete">{ ... }
|
|
23
|
-
|
|
24
|
-
Here we need to carefully consider how to treat links. Do we need read access to follow a link, or is that implied?
|
|
25
|
-
|
package/design/acid.md
DELETED
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
# JSONTAG REST Server - Commands / CQRS
|
|
2
|
-
|
|
3
|
-
Why the need for CQRS (Command Query Responsibility Segregation?)
|
|
4
|
-
|
|
5
|
-
The design of the server is purposely simple. It does away with a seperate database system, instead reading all data in memory, giving access to that data to queries written in javascript, using native javascript objects. This does away with the relational-object impedance mismatch. It does away with SQL and ORM solutions.
|
|
6
|
-
|
|
7
|
-
However, databases have the nice property that they are ACID compliant. We don't want to lose that. ACID stands for 'Atomicity', 'Consistency', 'Isolation' and 'Durability'. Our server should exhibit these same properties. Isolation is handled by handling each query in a seperate VM, using immutable data. But immutable data also means we need a different mechanism to update data, other than plain javascript.
|
|
8
|
-
|
|
9
|
-
Why the need for immutable data? This is tied to the ACID requirements. If you have multiple processes working with the same, shared, data, there is a good chance of creating inconsistent data. One process changes the data in some way, while another process tries to change the exact same data. One of these processes will 'win', potentially undoing the other process' change. Imagine you have a shop with an inventory. If product X has 1 item in the inventory, and there are two requests to buy that product being handled by two different processes at the same time, you could end up selling the same item twice.
|
|
10
|
-
|
|
11
|
-
There are ways around this, using locks and mutexes, but these are notoriously difficult to get right.
|
|
12
|
-
|
|
13
|
-
Just using immutable data alone won't solve this problem. Both processes will still see that there is 1 product X in store. But if we only allow updates / changes, through a single sequential process, and we allow for the possibility that a change request can fail, we can solve this problem much easier.
|
|
14
|
-
|
|
15
|
-
So both processes see that there is one item left in inventory. Both processes now create a command (buyProduct(X)) and send it to the command queue. They both get a unique ID as a result, and they can listen for an update on that ID. But there is only a single command handler, which handles commands on a First In First Out basis. The first buyProduct(X) command succeeds, and the inventory is decreased by 1. The next buyProduct(X) command fails its precondition, the inventory is 0. So it fails. Both processes get an update, one is a success, the other is a failure.
|
|
16
|
-
|
|
17
|
-
Now as the data is immutable, this is not entirely correct. In fact there is a difference between the client process calling the server, and the server process which only allows you to query the data. The server process can't create a command or listen for updates, only the client can. So when the client receives a success or failure resutl, asynchronously, it can send a new query to the server. The server in the mean time creates a new query handler, with the updated data after the changes from the last commands.
|
|
18
|
-
|
|
19
|
-
Because changes are only applied using the single process command handler, Atomicity is preserved. Each command is an atomic update, it either succeeds of fails. If it fails, none of the updates are kept, since the immutable data structure root is not updated.
|
|
20
|
-
|
|
21
|
-
Consistency is preserved. Each query request sees a concistent dataset, it may be slightly out of date, but it is internally consistent. Later queries will see a more up to date version of the data, so in the context of updates, the data is eventually consistent.
|
|
22
|
-
|
|
23
|
-
Isolation is preserved. Each query runs in its own VM, with its own access to the immutable data. Each command is run seperate from queries and custom javascript.
|
|
24
|
-
|
|
25
|
-
Durability is preserved. Each command is first written to a log, then the unique ID of the command is returned. The whole dataset is backup to disk once in a while, with the last command ID that has been processed. A backup can be restored, then all commands after the last processes one can be processed again.
|
|
26
|
-
|
|
27
|
-
The query handlers can run parallel, since they only have access to immutable data. In fact, each query handler process can use the exact same shared memory, keeping memory usage low.
|
|
28
|
-
|
|
29
|
-
There is a potential problem that a client sends a command, which the server enters into the command log, but before the command log id can be sent back to the client, the server dies. In that case the client cannot know if the command has been processed into the log. The only option is to send the command again. This means that a command that is meant to be processes once could be processed more than once. Commands must be written to ensure that this does not result in an inconsistent dataset. The easiest way to ensure this is to make sure the client gives each command a unique ID, a UUID. The server will then ignore any subsequent commands with the same id, and send a confirm reply back anyway.
|
|
30
|
-
|
|
31
|
-
The server must thus keep a log of all processed command id's. This can potentially grow so big as to impact performance, so there needs to be a timelimit on this processed log. We can enforce this by requiring a UUID with a timestamp, so either UUID V1 or V2. Any commands with a UUID older than the timelimit will be denied.
|
package/design/commands.md
DELETED
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
# commands (CQRS)
|
|
2
|
-
|
|
3
|
-
Updates/changes can only be done through commands. It uses a seperate endpoint (POST /update). There is only one process that handles this endpoint.
|
|
4
|
-
|
|
5
|
-
Each command must have a unique uuid, set by the client. A command is handled asynchronously, when fetching /update you will only get a response like 'command accepted'.
|
|
6
|
-
|
|
7
|
-
When the command is executed, the client is notified of the result (success or failure). This can be through a server-sent event, or though (long) polling. There should be an extra endpoint for each command, say GET /update/{uuid}
|
|
8
|
-
|
|
9
|
-
Commands are executed only once, but may be received more than once, this is why the client assigns the uuid. Commands with the same uuid (within a specified time window, say one day) are ignored.
|
|
10
|
-
|
|
11
|
-
commands are written to the command log. When this log is synced, then a response 'command accepted' is sent.
|
|
12
|
-
|
|
13
|
-
Each server can/should define its own commands, with as much semantics as possible. So instead of defining simple CRUD commands, a server should have meaningfull commands, with opaque inner workings.
|
|
14
|
-
|
|
15
|
-
A simple, but wrong, solution would be to implement a generic patch command, which uses jsonpatch. This woiuld allow for atomicity, thus fullfilling ACID requirements, but you cannot deduce from the command what the meaning of the change is. And you cannot change the data structure and command handling and then re-run all commands.
|
|
16
|
-
|
|
17
|
-
However if you create a data structure for support tickets, you could create a command 'createTicket', which would know what to change and where in the dataset. If the dataset changes, you can change the code in the createTicket command handler in tandem. This avoids most of the problems related to schema changes. And this means there is less need of versioning of the API.
|
|
18
|
-
|
|
19
|
-
You may still need to have versions of commands, so that if parameters to a command change, you can sense old commands through a version number.
|
|
20
|
-
|
|
21
|
-
Similarly, the query endpoint will need versioning to handle changes in the dataset structure, as all code is defined on the client side. This may be mitigated somewhat by providing a custom library of semantic methods for the client to use, e.g. `searchTickets(...)`.
|
|
22
|
-
|
|
23
|
-
Older versions of the query api can be simulated through a transformer, which translates/transforms from/to the new structure.
|
|
24
|
-
|
|
25
|
-
Commands can change the datastructure, by creating a new immutable root and then starving/stopping the current VM processes and starting new VM processes with the new root.
|
package/design/identity.md
DELETED
|
@@ -1,71 +0,0 @@
|
|
|
1
|
-
# Identity
|
|
2
|
-
|
|
3
|
-
One of many pitfalls in REST API's is mismanaging of the identity of entities. So we need to get this right here.
|
|
4
|
-
|
|
5
|
-
- Each entity should have exactly one identity, over time. You can have different versions of an entity, as in older versions that have been updated since. The main identity always points to the latest version in that case.
|
|
6
|
-
- Each identity should be a URL. This is because we want to make sure we can easily upgrade the API to a Linked Data API.
|
|
7
|
-
- If you implement versioning, you should be able to reference a specific version, also through a URL.
|
|
8
|
-
- If the JSONTag data adds a link to an object/entity, it should use the same ID (URL)
|
|
9
|
-
- Each entity must have a unique ID
|
|
10
|
-
|
|
11
|
-
A simple solution would be to use the API URL + JSON Pointer as the id of all entities. This fails for these reasons (at least):
|
|
12
|
-
- Entities may appear on multiple JSON Pointer paths, as they can be linked in multiple places
|
|
13
|
-
- Entities that are part of an array of entities, may have there JSON Pointer path changed when another, prior, entity is removed from the array. e.g.
|
|
14
|
-
|
|
15
|
-
```jsontag
|
|
16
|
-
[
|
|
17
|
-
{
|
|
18
|
-
"title":"Entity 1"
|
|
19
|
-
},
|
|
20
|
-
{
|
|
21
|
-
"title":"Entity 2"
|
|
22
|
-
}
|
|
23
|
-
]
|
|
24
|
-
```
|
|
25
|
-
|
|
26
|
-
Here you could select entity 2 using the JSON Pointer '/1/'. However as soon as entity 1 is removed, the JSON Pointer would become '/0/'.
|
|
27
|
-
|
|
28
|
-
Another solution would be to explicitly assign all objects a unique id, perhaps a UUID. Then you can use that with a specific Map container, e.g.
|
|
29
|
-
|
|
30
|
-
```
|
|
31
|
-
/uuid/9b91dcfd-dae7-47dc-b90f-a51b37e6dd3e
|
|
32
|
-
```
|
|
33
|
-
|
|
34
|
-
And all results would have their id encoded like this:
|
|
35
|
-
|
|
36
|
-
```jsontag
|
|
37
|
-
[
|
|
38
|
-
<object id="/uuid/9b91dcfd-dae7-47dc-b90f-a51b37e6dd3e">{
|
|
39
|
-
"title":"Entity 2"
|
|
40
|
-
}
|
|
41
|
-
]
|
|
42
|
-
```
|
|
43
|
-
|
|
44
|
-
The problem here is that the system now forces everything to use a specific UUID type as ID. This may not be the best option. And the URL that is used as the ID is much less expressive than it could be. All entities are forced to use the same non-descriptive ID style. The benefit is that at least some common pitfalls in assigning ID values are avoided...
|
|
45
|
-
|
|
46
|
-
The /uuid/ map should be invisible, as in, it should not be returned in any result. You can only use it to query a specific entity by ID.
|
|
47
|
-
UUID's should only be assigned to object values. All other values are supposed to be just values, even if they get parsed into Value Objects, like `<date>`.
|
|
48
|
-
|
|
49
|
-
One possible problem is that we've now encoded the identity as an attribute, instead of a property. This may come as a surprise to users of the API. However I do think that it is a better fit. Consider the JSON-LD '@id' property, which fulfills a similar role. It too is encoded seperate from normal properties, using the '@' prefix.
|
|
50
|
-
|
|
51
|
-
The /uuid/ endpoint can be written as a search in the dataset for an entity with the given ID. This way it is not part of the dataset itself, and thus 'invisible'.
|
|
52
|
-
|
|
53
|
-
If you need versioning, you could add a second parameter for the version, like this:
|
|
54
|
-
|
|
55
|
-
```
|
|
56
|
-
/uuid/9b91dcfd-dae7-47dc-b90f-a51b37e6dd3e/1
|
|
57
|
-
```
|
|
58
|
-
|
|
59
|
-
or
|
|
60
|
-
|
|
61
|
-
```
|
|
62
|
-
/uuid/9b91dcfd-dae7-47dc-b90f-a51b37e6dd3e/latest
|
|
63
|
-
```
|
|
64
|
-
|
|
65
|
-
To prevent collisions with a 'uuid' property in the dataset root, a normal JSON Pointer for the dataset could start with the `/query/` root path, e.g.:
|
|
66
|
-
|
|
67
|
-
```
|
|
68
|
-
api.url/query/tasks/
|
|
69
|
-
```
|
|
70
|
-
|
|
71
|
-
This would also make room for a seperate `/update/` path, which would handle update commands to the dataset.
|
package/design/immutability.md
DELETED
|
@@ -1,26 +0,0 @@
|
|
|
1
|
-
# Immutable JSONTag data
|
|
2
|
-
|
|
3
|
-
1 - When parsing jsontag data, each entry is frozen immediately after instantiation.
|
|
4
|
-
2 - Add a clone method to clone an object (shallow clone)
|
|
5
|
-
3 - a clone is mutable
|
|
6
|
-
4 - all objects linking to the original object get cloned as well, and the link updated to the new clone
|
|
7
|
-
5 - once all updates are done, you can freeze the root object again and it will also freeze all clones
|
|
8
|
-
6 - identity. Each object should have a clear identity, that is the same over mutations, as well as a static identity that changes with mutations. (one indentity-over-time, one identity-per-version)
|
|
9
|
-
7 - cleanup. Any object no longer linked should be removed, unless you want automatic versioning. In that case the root objects versions should all be kept, and therefor all objects remain linked.
|
|
10
|
-
|
|
11
|
-
## Problems
|
|
12
|
-
|
|
13
|
-
### 4 - all objects linking to the original object get cloned as well
|
|
14
|
-
|
|
15
|
-
This means that we need an index for each object, containing all other objects linking to it.
|
|
16
|
-
We can do this while parsing the JSONTag data, just make a weakmap for all objects, while treewalking the dataset.
|
|
17
|
-
|
|
18
|
-
### which acl grants apply to objects that are linked more than once?
|
|
19
|
-
|
|
20
|
-
The simplest solution seems to just use the acl metadata gathered while following the path used.
|
|
21
|
-
|
|
22
|
-
## existing solutions
|
|
23
|
-
|
|
24
|
-
- immutable.js
|
|
25
|
-
- immer
|
|
26
|
-
- redux
|