cojson 0.7.35-unique.2 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +2 -7
- package/.turbo/turbo-lint.log +4 -0
- package/.turbo/turbo-test.log +321 -253
- package/CHANGELOG.md +10 -2
- package/dist/PeerState.js +66 -0
- package/dist/PeerState.js.map +1 -0
- package/dist/PriorityBasedMessageQueue.js +51 -0
- package/dist/PriorityBasedMessageQueue.js.map +1 -0
- package/dist/base64url.js.map +1 -1
- package/dist/coValue.js.map +1 -1
- package/dist/coValueCore.js +3 -6
- package/dist/coValueCore.js.map +1 -1
- package/dist/coValues/account.js +0 -1
- package/dist/coValues/account.js.map +1 -1
- package/dist/coValues/coList.js.map +1 -1
- package/dist/coValues/coMap.js.map +1 -1
- package/dist/coValues/coStream.js +14 -15
- package/dist/coValues/coStream.js.map +1 -1
- package/dist/coValues/group.js.map +1 -1
- package/dist/coreToCoValue.js.map +1 -1
- package/dist/crypto/PureJSCrypto.js.map +1 -1
- package/dist/crypto/WasmCrypto.js.map +1 -1
- package/dist/crypto/crypto.js +3 -0
- package/dist/crypto/crypto.js.map +1 -1
- package/dist/index.js +3 -2
- package/dist/index.js.map +1 -1
- package/dist/jsonStringify.js.map +1 -1
- package/dist/localNode.js +7 -7
- package/dist/localNode.js.map +1 -1
- package/dist/permissions.js.map +1 -1
- package/dist/priority.js +31 -0
- package/dist/priority.js.map +1 -0
- package/dist/storage/FileSystem.js.map +1 -1
- package/dist/storage/chunksAndKnownStates.js +2 -0
- package/dist/storage/chunksAndKnownStates.js.map +1 -1
- package/dist/storage/index.js.map +1 -1
- package/dist/streamUtils.js.map +1 -1
- package/dist/sync.js +7 -18
- package/dist/sync.js.map +1 -1
- package/dist/tests/PeerState.test.js +80 -0
- package/dist/tests/PeerState.test.js.map +1 -0
- package/dist/tests/PriorityBasedMessageQueue.test.js +97 -0
- package/dist/tests/PriorityBasedMessageQueue.test.js.map +1 -0
- package/dist/tests/account.test.js +1 -2
- package/dist/tests/account.test.js.map +1 -1
- package/dist/tests/coMap.test.js.map +1 -1
- package/dist/tests/coStream.test.js +34 -1
- package/dist/tests/coStream.test.js.map +1 -1
- package/dist/tests/permissions.test.js +41 -42
- package/dist/tests/permissions.test.js.map +1 -1
- package/dist/tests/priority.test.js +61 -0
- package/dist/tests/priority.test.js.map +1 -0
- package/dist/tests/sync.test.js +327 -17
- package/dist/tests/sync.test.js.map +1 -1
- package/dist/tests/testUtils.js +1 -2
- package/dist/tests/testUtils.js.map +1 -1
- package/dist/typeUtils/expectGroup.js.map +1 -1
- package/package.json +3 -3
- package/src/PeerState.ts +85 -0
- package/src/PriorityBasedMessageQueue.ts +77 -0
- package/src/coValueCore.ts +4 -9
- package/src/coValues/account.ts +0 -1
- package/src/coValues/coStream.ts +21 -18
- package/src/crypto/crypto.ts +5 -0
- package/src/index.ts +3 -3
- package/src/localNode.ts +6 -7
- package/src/priority.ts +39 -0
- package/src/storage/chunksAndKnownStates.ts +2 -0
- package/src/sync.ts +19 -34
- package/src/tests/PeerState.test.ts +92 -0
- package/src/tests/PriorityBasedMessageQueue.test.ts +111 -0
- package/src/tests/account.test.ts +1 -2
- package/src/tests/coStream.test.ts +58 -1
- package/src/tests/permissions.test.ts +41 -42
- package/src/tests/priority.test.ts +75 -0
- package/src/tests/sync.test.ts +488 -26
- package/src/tests/testUtils.ts +1 -2
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
import { expect, test, describe } from "vitest";
|
|
2
|
+
import { WasmCrypto } from "../index.js";
|
|
3
|
+
import { LocalNode } from "../localNode.js";
|
|
4
|
+
import { randomAnonymousAccountAndSessionID } from "./testUtils.js";
|
|
5
|
+
import { getPriorityFromHeader, CO_VALUE_PRIORITY } from "../priority.js";
|
|
6
|
+
const Crypto = await WasmCrypto.create();
|
|
7
|
+
describe("getPriorityFromHeader", () => {
|
|
8
|
+
test("returns MEDIUM priority for boolean or undefined headers", () => {
|
|
9
|
+
expect(getPriorityFromHeader(true)).toEqual(CO_VALUE_PRIORITY.MEDIUM);
|
|
10
|
+
expect(getPriorityFromHeader(false)).toEqual(CO_VALUE_PRIORITY.MEDIUM);
|
|
11
|
+
expect(getPriorityFromHeader(undefined)).toEqual(CO_VALUE_PRIORITY.MEDIUM);
|
|
12
|
+
});
|
|
13
|
+
test("returns MEDIUM priority for costream type", () => {
|
|
14
|
+
const node = new LocalNode(...randomAnonymousAccountAndSessionID(), Crypto);
|
|
15
|
+
const costream = node.createCoValue({
|
|
16
|
+
type: "costream",
|
|
17
|
+
ruleset: { type: "unsafeAllowAll" },
|
|
18
|
+
meta: null,
|
|
19
|
+
...Crypto.createdNowUnique(),
|
|
20
|
+
});
|
|
21
|
+
expect(getPriorityFromHeader(costream.header)).toEqual(CO_VALUE_PRIORITY.MEDIUM);
|
|
22
|
+
});
|
|
23
|
+
test("returns LOW priority for binary costream type", () => {
|
|
24
|
+
const node = new LocalNode(...randomAnonymousAccountAndSessionID(), Crypto);
|
|
25
|
+
const costream = node.createCoValue({
|
|
26
|
+
type: "costream",
|
|
27
|
+
ruleset: { type: "unsafeAllowAll" },
|
|
28
|
+
meta: { type: "binary" },
|
|
29
|
+
...Crypto.createdNowUnique(),
|
|
30
|
+
});
|
|
31
|
+
expect(getPriorityFromHeader(costream.header)).toEqual(CO_VALUE_PRIORITY.LOW);
|
|
32
|
+
});
|
|
33
|
+
test("returns HIGH priority for account type", async () => {
|
|
34
|
+
const node = new LocalNode(...randomAnonymousAccountAndSessionID(), Crypto);
|
|
35
|
+
const account = node.createAccount(node.crypto.newRandomAgentSecret());
|
|
36
|
+
expect(getPriorityFromHeader(account.core.header)).toEqual(CO_VALUE_PRIORITY.HIGH);
|
|
37
|
+
});
|
|
38
|
+
test("returns HIGH priority for group type", () => {
|
|
39
|
+
const node = new LocalNode(...randomAnonymousAccountAndSessionID(), Crypto);
|
|
40
|
+
const group = node.createGroup();
|
|
41
|
+
expect(getPriorityFromHeader(group.core.header)).toEqual(CO_VALUE_PRIORITY.HIGH);
|
|
42
|
+
});
|
|
43
|
+
test("returns MEDIUM priority for other types", () => {
|
|
44
|
+
const node = new LocalNode(...randomAnonymousAccountAndSessionID(), Crypto);
|
|
45
|
+
const comap = node.createCoValue({
|
|
46
|
+
type: "comap",
|
|
47
|
+
ruleset: { type: "unsafeAllowAll" },
|
|
48
|
+
meta: null,
|
|
49
|
+
...Crypto.createdNowUnique(),
|
|
50
|
+
});
|
|
51
|
+
const colist = node.createCoValue({
|
|
52
|
+
type: "colist",
|
|
53
|
+
ruleset: { type: "unsafeAllowAll" },
|
|
54
|
+
meta: null,
|
|
55
|
+
...Crypto.createdNowUnique(),
|
|
56
|
+
});
|
|
57
|
+
expect(getPriorityFromHeader(comap.header)).toEqual(CO_VALUE_PRIORITY.MEDIUM);
|
|
58
|
+
expect(getPriorityFromHeader(colist.header)).toEqual(CO_VALUE_PRIORITY.MEDIUM);
|
|
59
|
+
});
|
|
60
|
+
});
|
|
61
|
+
//# sourceMappingURL=priority.test.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"priority.test.js","sourceRoot":"","sources":["../../src/tests/priority.test.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,MAAM,QAAQ,CAAC;AAChD,OAAO,EAAE,UAAU,EAAE,MAAM,aAAa,CAAC;AACzC,OAAO,EAAE,SAAS,EAAE,MAAM,iBAAiB,CAAC;AAC5C,OAAO,EAAE,kCAAkC,EAAE,MAAM,gBAAgB,CAAC;AACpE,OAAO,EAAE,qBAAqB,EAAE,iBAAiB,EAAE,MAAM,gBAAgB,CAAC;AAE1E,MAAM,MAAM,GAAG,MAAM,UAAU,CAAC,MAAM,EAAE,CAAC;AAEzC,QAAQ,CAAC,uBAAuB,EAAE,GAAG,EAAE;IACnC,IAAI,CAAC,0DAA0D,EAAE,GAAG,EAAE;QAClE,MAAM,CAAC,qBAAqB,CAAC,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QACtE,MAAM,CAAC,qBAAqB,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QACvE,MAAM,CAAC,qBAAqB,CAAC,SAAS,CAAC,CAAC,CAAC,OAAO,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;IAC/E,CAAC,CAAC,CAAC;IAEH,IAAI,CAAC,2CAA2C,EAAE,GAAG,EAAE;QACnD,MAAM,IAAI,GAAG,IAAI,SAAS,CAAC,GAAG,kCAAkC,EAAE,EAAE,MAAM,CAAC,CAAC;QAC5E,MAAM,QAAQ,GAAG,IAAI,CAAC,aAAa,CAAC;YAChC,IAAI,EAAE,UAAU;YAChB,OAAO,EAAE,EAAE,IAAI,EAAE,gBAAgB,EAAE;YACnC,IAAI,EAAE,IAAI;YACV,GAAG,MAAM,CAAC,gBAAgB,EAAE;SAC/B,CAAC,CAAC;QAEH,MAAM,CAAC,qBAAqB,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;IACrF,CAAC,CAAC,CAAC;IAEH,IAAI,CAAC,+CAA+C,EAAE,GAAG,EAAE;QACvD,MAAM,IAAI,GAAG,IAAI,SAAS,CAAC,GAAG,kCAAkC,EAAE,EAAE,MAAM,CAAC,CAAC;QAC5E,MAAM,QAAQ,GAAG,IAAI,CAAC,aAAa,CAAC;YAChC,IAAI,EAAE,UAAU;YAChB,OAAO,EAAE,EAAE,IAAI,EAAE,gBAAgB,EAAE;YACnC,IAAI,EAAE,EAAE,IAAI,EAAE,QAAQ,EAAE;YACxB,GAAG,MAAM,CAAC,gBAAgB,EAAE;SAC/B,CAAC,CAAC;QAEH,MAAM,CAAC,qBAAqB,CAAC,QAAQ,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,iBAAiB,CAAC,GAAG,CAAC,CAAC;IAClF,CAAC,CAAC,CAAC;IAEH,IAAI,CAAC,wCAAwC,EAAE,KAAK,IAAI,EAAE;QACtD,MAAM,IAAI,GAAE,IAAI,SAAS,CAAC,GAAG,kCAAkC,EAAE,EAAE,MAAM,CAAC,CAAC;QAE3E,MAAM,OAAO,GAAG,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,MAAM,CAAC,oBAAoB,EAAE,CAAC,CAAC;QAEvE,MAAM,CAAC,qBAAqB,CAAC,OAAO,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,iBAAiB,CAAC,IAAI,CAAC,CAAC;IACvF,CAAC,CAAC,CAAC;IAEH,IAAI,CAAC,sCAAsC,EAAE,GAAG,EAAE;QAC9C,MAAM,IAAI,GAAG,IAAI,SAAS,CAAC,GAAG,kCAAkC,EAAE,EAAE,MAAM,CAAC,CAAC;QAC5E,MAAM,KAAK,GAAG,IAAI,CAAC,WAAW,EAAE,CAAC;QAEjC,MAAM,CAAC,qBAAqB,CAAC,KAAK,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,iBAAiB,CAAC,IAAI,CAAC,CAAC;IACrF,CAAC,CAAC,CAAC;IAEH,IAAI,CAAC,yCAAyC,EAAE,GAAG,EAAE;QACjD,MAAM,IAAI,GAAG,IAAI,SAAS,CAAC,GAAG,kCAAkC,EAAE,EAAE,MAAM,CAAC,CAAC;QAE5E,MAAM,KAAK,GAAG,IAAI,CAAC,aAAa,CAAC;YAC7B,IAAI,EAAE,OAAO;YACb,OAAO,EAAE,EAAE,IAAI,EAAE,gBAAgB,EAAE;YACnC,IAAI,EAAE,IAAI;YACV,GAAG,MAAM,CAAC,gBAAgB,EAAE;SAC/B,CAAC,CAAC;QAEH,MAAM,MAAM,GAAG,IAAI,CAAC,aAAa,CAAC;YAC9B,IAAI,EAAE,QAAQ;YACd,OAAO,EAAE,EAAE,IAAI,EAAE,gBAAgB,EAAE;YACnC,IAAI,EAAE,IAAI;YACV,GAAG,MAAM,CAAC,gBAAgB,EAAE;SAC/B,CAAC,CAAC;QAEH,MAAM,CAAC,qBAAqB,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QAC9E,MAAM,CAAC,qBAAqB,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC,OAAO,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;IACnF,CAAC,CAAC,CAAC;AACP,CAAC,CAAC,CAAC"}
|
package/dist/tests/sync.test.js
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
|
-
import { expect, test } from "vitest";
|
|
1
|
+
import { expect, test, describe } from "vitest";
|
|
2
2
|
import { LocalNode } from "../localNode.js";
|
|
3
|
+
import { RawCoMap } from "../coValues/coMap.js";
|
|
3
4
|
import { randomAnonymousAccountAndSessionID } from "./testUtils.js";
|
|
4
5
|
import { connectedPeers, newQueuePair } from "../streamUtils.js";
|
|
5
6
|
import { stableStringify } from "../jsonStringify.js";
|
|
6
7
|
import { WasmCrypto } from "../crypto/WasmCrypto.js";
|
|
7
8
|
import { expectMap } from "../coValue.js";
|
|
8
|
-
import {
|
|
9
|
+
import { getPriorityFromHeader } from "../priority.js";
|
|
9
10
|
const Crypto = await WasmCrypto.create();
|
|
10
11
|
test("Node replies with initial tx and header to empty subscribe", async () => {
|
|
11
12
|
const [admin, session] = randomAnonymousAccountAndSessionID();
|
|
@@ -39,16 +40,17 @@ test("Node replies with initial tx and header to empty subscribe", async () => {
|
|
|
39
40
|
// expect((await outRxQ.next()).value).toMatchObject(admContEx(admin.id));
|
|
40
41
|
expect((await outRxQ.next()).value).toMatchObject(groupContentEx(group));
|
|
41
42
|
const newContentMsg = (await outRxQ.next()).value;
|
|
43
|
+
const expectedHeader = {
|
|
44
|
+
type: "comap",
|
|
45
|
+
ruleset: { type: "ownedByGroup", group: group.id },
|
|
46
|
+
meta: null,
|
|
47
|
+
createdAt: map.core.header.createdAt,
|
|
48
|
+
uniqueness: map.core.header.uniqueness,
|
|
49
|
+
};
|
|
42
50
|
expect(newContentMsg).toEqual({
|
|
43
51
|
action: "content",
|
|
44
52
|
id: map.core.id,
|
|
45
|
-
header:
|
|
46
|
-
type: "comap",
|
|
47
|
-
ruleset: { type: "ownedByGroup", group: group.id },
|
|
48
|
-
meta: null,
|
|
49
|
-
createdAt: map.core.header.createdAt,
|
|
50
|
-
uniqueness: map.core.header.uniqueness,
|
|
51
|
-
},
|
|
53
|
+
header: expectedHeader,
|
|
52
54
|
new: {
|
|
53
55
|
[node.currentSessionID]: {
|
|
54
56
|
after: 0,
|
|
@@ -70,6 +72,7 @@ test("Node replies with initial tx and header to empty subscribe", async () => {
|
|
|
70
72
|
.lastSignature,
|
|
71
73
|
},
|
|
72
74
|
},
|
|
75
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
73
76
|
});
|
|
74
77
|
});
|
|
75
78
|
test("Node replies with only new tx to subscribe with some known state", async () => {
|
|
@@ -132,6 +135,7 @@ test("Node replies with only new tx to subscribe with some known state", async (
|
|
|
132
135
|
.lastSignature,
|
|
133
136
|
},
|
|
134
137
|
},
|
|
138
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
135
139
|
});
|
|
136
140
|
});
|
|
137
141
|
test.todo("TODO: node only replies with new tx to subscribe with some known state, even in the depended on coValues");
|
|
@@ -173,6 +177,7 @@ test("After subscribing, node sends own known state and new txs to peer", async
|
|
|
173
177
|
id: map.core.id,
|
|
174
178
|
header: map.core.header,
|
|
175
179
|
new: {},
|
|
180
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
176
181
|
});
|
|
177
182
|
map.set("hello", "world", "trusting");
|
|
178
183
|
const mapEditMsg1 = (await outRxQ.next()).value;
|
|
@@ -200,6 +205,7 @@ test("After subscribing, node sends own known state and new txs to peer", async
|
|
|
200
205
|
.lastSignature,
|
|
201
206
|
},
|
|
202
207
|
},
|
|
208
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
203
209
|
});
|
|
204
210
|
map.set("goodbye", "world", "trusting");
|
|
205
211
|
const mapEditMsg2 = (await outRxQ.next()).value;
|
|
@@ -227,6 +233,7 @@ test("After subscribing, node sends own known state and new txs to peer", async
|
|
|
227
233
|
.lastSignature,
|
|
228
234
|
},
|
|
229
235
|
},
|
|
236
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
230
237
|
});
|
|
231
238
|
});
|
|
232
239
|
test("Client replies with known new content to tellKnownState from server", async () => {
|
|
@@ -289,6 +296,7 @@ test("Client replies with known new content to tellKnownState from server", asyn
|
|
|
289
296
|
.lastSignature,
|
|
290
297
|
},
|
|
291
298
|
},
|
|
299
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
292
300
|
});
|
|
293
301
|
});
|
|
294
302
|
test("No matter the optimistic known state, node respects invalid known state messages and resyncs", async () => {
|
|
@@ -329,6 +337,7 @@ test("No matter the optimistic known state, node respects invalid known state me
|
|
|
329
337
|
id: map.core.id,
|
|
330
338
|
header: map.core.header,
|
|
331
339
|
new: {},
|
|
340
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
332
341
|
});
|
|
333
342
|
map.set("hello", "world", "trusting");
|
|
334
343
|
map.set("goodbye", "world", "trusting");
|
|
@@ -369,6 +378,7 @@ test("No matter the optimistic known state, node respects invalid known state me
|
|
|
369
378
|
.lastSignature,
|
|
370
379
|
},
|
|
371
380
|
},
|
|
381
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
372
382
|
});
|
|
373
383
|
});
|
|
374
384
|
test("If we add a peer, but it never subscribes to a coValue, it won't get any messages", async () => {
|
|
@@ -451,6 +461,7 @@ test.todo("If we add a server peer, all updates to all coValues are sent to it,
|
|
|
451
461
|
lastSignature: map.core.sessionLogs.get(node.currentSessionID).lastSignature,
|
|
452
462
|
},
|
|
453
463
|
},
|
|
464
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
454
465
|
});
|
|
455
466
|
});
|
|
456
467
|
test.skip("If we add a server peer, newly created coValues are auto-subscribed to", async () => {
|
|
@@ -489,6 +500,7 @@ test.skip("If we add a server peer, newly created coValues are auto-subscribed t
|
|
|
489
500
|
id: map.core.id,
|
|
490
501
|
header: map.core.header,
|
|
491
502
|
new: {},
|
|
503
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
492
504
|
});
|
|
493
505
|
});
|
|
494
506
|
test.todo("TODO: when receiving a subscribe response that is behind our optimistic state (due to already sent content), we ignore it");
|
|
@@ -565,7 +577,7 @@ test.skip("When replaying creation and transactions of a coValue as new content,
|
|
|
565
577
|
role: "server",
|
|
566
578
|
crashOnClose: true,
|
|
567
579
|
});
|
|
568
|
-
const node2 = new LocalNode(admin, newRandomSessionID(admin.id), Crypto);
|
|
580
|
+
const node2 = new LocalNode(admin, Crypto.newRandomSessionID(admin.id), Crypto);
|
|
569
581
|
const [inRx2, inTx2] = newQueuePair();
|
|
570
582
|
const [outRx2, outTx2] = newQueuePair();
|
|
571
583
|
const outRxQ2 = outRx2[Symbol.asyncIterator]();
|
|
@@ -613,6 +625,7 @@ test.skip("When replaying creation and transactions of a coValue as new content,
|
|
|
613
625
|
id: map.core.id,
|
|
614
626
|
header: map.core.header,
|
|
615
627
|
new: {},
|
|
628
|
+
priority: getPriorityFromHeader(map.core.header),
|
|
616
629
|
});
|
|
617
630
|
await inTx2.push(mapSubscriptionMsg);
|
|
618
631
|
const mapTellKnownStateMsg = (await outRxQ2.next()).value;
|
|
@@ -666,14 +679,14 @@ test("Can sync a coValue through a server to another client", async () => {
|
|
|
666
679
|
map.set("hello", "world", "trusting");
|
|
667
680
|
const [serverUser, serverSession] = randomAnonymousAccountAndSessionID();
|
|
668
681
|
const server = new LocalNode(serverUser, serverSession, Crypto);
|
|
669
|
-
const [serverAsPeerForClient1, client1AsPeer] =
|
|
682
|
+
const [serverAsPeerForClient1, client1AsPeer] = connectedPeers("serverFor1", "client1", {
|
|
670
683
|
peer1role: "server",
|
|
671
684
|
peer2role: "client",
|
|
672
685
|
trace: true,
|
|
673
686
|
});
|
|
674
687
|
client1.syncManager.addPeer(serverAsPeerForClient1);
|
|
675
688
|
server.syncManager.addPeer(client1AsPeer);
|
|
676
|
-
const client2 = new LocalNode(admin, newRandomSessionID(admin.id), Crypto);
|
|
689
|
+
const client2 = new LocalNode(admin, Crypto.newRandomSessionID(admin.id), Crypto);
|
|
677
690
|
const [serverAsPeerForClient2, client2AsPeer] = connectedPeers("serverFor2", "client2", {
|
|
678
691
|
peer1role: "server",
|
|
679
692
|
peer2role: "client",
|
|
@@ -695,15 +708,15 @@ test("Can sync a coValue with private transactions through a server to another c
|
|
|
695
708
|
map.set("hello", "world", "private");
|
|
696
709
|
const [serverUser, serverSession] = randomAnonymousAccountAndSessionID();
|
|
697
710
|
const server = new LocalNode(serverUser, serverSession, Crypto);
|
|
698
|
-
const [serverAsPeer, client1AsPeer] =
|
|
711
|
+
const [serverAsPeer, client1AsPeer] = connectedPeers("server", "client1", {
|
|
699
712
|
trace: true,
|
|
700
713
|
peer1role: "server",
|
|
701
714
|
peer2role: "client",
|
|
702
715
|
});
|
|
703
716
|
client1.syncManager.addPeer(serverAsPeer);
|
|
704
717
|
server.syncManager.addPeer(client1AsPeer);
|
|
705
|
-
const client2 = new LocalNode(admin, newRandomSessionID(admin.id), Crypto);
|
|
706
|
-
const [serverAsOtherPeer, client2AsPeer] =
|
|
718
|
+
const client2 = new LocalNode(admin, client1.crypto.newRandomSessionID(admin.id), Crypto);
|
|
719
|
+
const [serverAsOtherPeer, client2AsPeer] = connectedPeers("server", "client2", {
|
|
707
720
|
trace: true,
|
|
708
721
|
peer1role: "server",
|
|
709
722
|
peer2role: "client",
|
|
@@ -833,8 +846,8 @@ test("If we start loading a coValue before connecting to a peer that has it, it
|
|
|
833
846
|
const group = node1.createGroup();
|
|
834
847
|
const map = group.createMap();
|
|
835
848
|
map.set("hello", "world", "trusting");
|
|
836
|
-
const node2 = new LocalNode(admin, newRandomSessionID(admin.id), Crypto);
|
|
837
|
-
const [node1asPeer, node2asPeer] =
|
|
849
|
+
const node2 = new LocalNode(admin, Crypto.newRandomSessionID(admin.id), Crypto);
|
|
850
|
+
const [node1asPeer, node2asPeer] = connectedPeers("peer1", "peer2", {
|
|
838
851
|
peer1role: "server",
|
|
839
852
|
peer2role: "client",
|
|
840
853
|
trace: true,
|
|
@@ -849,6 +862,303 @@ test("If we start loading a coValue before connecting to a peer that has it, it
|
|
|
849
862
|
}
|
|
850
863
|
expect(expectMap(mapOnNode2.getCurrentContent()).get("hello")).toEqual("world");
|
|
851
864
|
});
|
|
865
|
+
describe("sync - extra tests", () => {
|
|
866
|
+
test("Node handles disconnection and reconnection of a peer gracefully", async () => {
|
|
867
|
+
// Create two nodes
|
|
868
|
+
const [admin1, session1] = randomAnonymousAccountAndSessionID();
|
|
869
|
+
const node1 = new LocalNode(admin1, session1, Crypto);
|
|
870
|
+
const [admin2, session2] = randomAnonymousAccountAndSessionID();
|
|
871
|
+
const node2 = new LocalNode(admin2, session2, Crypto);
|
|
872
|
+
// Create a group and a map on node1
|
|
873
|
+
const group = node1.createGroup();
|
|
874
|
+
group.addMember("everyone", "writer");
|
|
875
|
+
const map = group.createMap();
|
|
876
|
+
map.set("key1", "value1", "trusting");
|
|
877
|
+
// Connect the nodes
|
|
878
|
+
const [node1AsPeer, node2AsPeer] = connectedPeers("node1", "node2", {
|
|
879
|
+
peer1role: "server",
|
|
880
|
+
peer2role: "client",
|
|
881
|
+
});
|
|
882
|
+
node1.syncManager.addPeer(node2AsPeer);
|
|
883
|
+
node2.syncManager.addPeer(node1AsPeer);
|
|
884
|
+
// Wait for initial sync
|
|
885
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
886
|
+
// Verify that node2 has received the map
|
|
887
|
+
const mapOnNode2 = await node2.loadCoValueCore(map.core.id);
|
|
888
|
+
if (mapOnNode2 === "unavailable") {
|
|
889
|
+
throw new Error("Map is unavailable on node2");
|
|
890
|
+
}
|
|
891
|
+
expect(expectMap(mapOnNode2.getCurrentContent()).get("key1")).toEqual("value1");
|
|
892
|
+
// Simulate disconnection
|
|
893
|
+
node1.syncManager.gracefulShutdown();
|
|
894
|
+
node2.syncManager.gracefulShutdown();
|
|
895
|
+
// Make changes on node1 while disconnected
|
|
896
|
+
map.set("key2", "value2", "trusting");
|
|
897
|
+
// Simulate reconnection
|
|
898
|
+
const [newNode1AsPeer, newNode2AsPeer] = connectedPeers("node11", "node22", {
|
|
899
|
+
peer1role: "server",
|
|
900
|
+
peer2role: "client",
|
|
901
|
+
// trace: true,
|
|
902
|
+
});
|
|
903
|
+
node1.syncManager.addPeer(newNode2AsPeer);
|
|
904
|
+
node2.syncManager.addPeer(newNode1AsPeer);
|
|
905
|
+
// Wait for re-sync
|
|
906
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
907
|
+
// Verify that node2 has received the changes made during disconnection
|
|
908
|
+
const updatedMapOnNode2 = await node2.loadCoValueCore(map.core.id);
|
|
909
|
+
if (updatedMapOnNode2 === "unavailable") {
|
|
910
|
+
throw new Error("Updated map is unavailable on node2");
|
|
911
|
+
}
|
|
912
|
+
expect(expectMap(updatedMapOnNode2.getCurrentContent()).get("key2")).toEqual("value2");
|
|
913
|
+
// Make a new change on node2 to verify two-way sync
|
|
914
|
+
const mapOnNode2ForEdit = await node2.loadCoValueCore(map.core.id);
|
|
915
|
+
if (mapOnNode2ForEdit === "unavailable") {
|
|
916
|
+
throw new Error("Updated map is unavailable on node2");
|
|
917
|
+
}
|
|
918
|
+
const success = mapOnNode2ForEdit.makeTransaction([
|
|
919
|
+
{
|
|
920
|
+
op: "set",
|
|
921
|
+
key: "key3",
|
|
922
|
+
value: "value3",
|
|
923
|
+
},
|
|
924
|
+
], "trusting");
|
|
925
|
+
if (!success) {
|
|
926
|
+
throw new Error("Failed to make transaction");
|
|
927
|
+
}
|
|
928
|
+
// Wait for sync back to node1
|
|
929
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
930
|
+
const mapOnNode1 = await node1.loadCoValueCore(map.core.id);
|
|
931
|
+
if (mapOnNode1 === "unavailable") {
|
|
932
|
+
throw new Error("Updated map is unavailable on node1");
|
|
933
|
+
}
|
|
934
|
+
// Verify that node1 has received the change from node2
|
|
935
|
+
expect(expectMap(mapOnNode1.getCurrentContent()).get("key3")).toEqual("value3");
|
|
936
|
+
});
|
|
937
|
+
test("Concurrent modifications on multiple nodes are resolved correctly", async () => {
|
|
938
|
+
// Create three nodes
|
|
939
|
+
const [admin1, session1] = randomAnonymousAccountAndSessionID();
|
|
940
|
+
const node1 = new LocalNode(admin1, session1, Crypto);
|
|
941
|
+
const [admin2, session2] = randomAnonymousAccountAndSessionID();
|
|
942
|
+
const node2 = new LocalNode(admin2, session2, Crypto);
|
|
943
|
+
const [admin3, session3] = randomAnonymousAccountAndSessionID();
|
|
944
|
+
const node3 = new LocalNode(admin3, session3, Crypto);
|
|
945
|
+
// Create a group and a map on node1
|
|
946
|
+
const group = node1.createGroup();
|
|
947
|
+
group.addMember("everyone", "writer");
|
|
948
|
+
const map = group.createMap();
|
|
949
|
+
// Connect the nodes in a triangle topology
|
|
950
|
+
const [node1AsPeerFor2, node2AsPeerFor1] = connectedPeers("node1", "node2", {
|
|
951
|
+
peer1role: "server",
|
|
952
|
+
peer2role: "client",
|
|
953
|
+
// trace: true,
|
|
954
|
+
});
|
|
955
|
+
const [node2AsPeerFor3, node3AsPeerFor2] = connectedPeers("node2", "node3", {
|
|
956
|
+
peer1role: "server",
|
|
957
|
+
peer2role: "client",
|
|
958
|
+
// trace: true,
|
|
959
|
+
});
|
|
960
|
+
const [node3AsPeerFor1, node1AsPeerFor3] = connectedPeers("node3", "node1", {
|
|
961
|
+
peer1role: "server",
|
|
962
|
+
peer2role: "client",
|
|
963
|
+
// trace: true,
|
|
964
|
+
});
|
|
965
|
+
node1.syncManager.addPeer(node2AsPeerFor1);
|
|
966
|
+
node1.syncManager.addPeer(node3AsPeerFor1);
|
|
967
|
+
node2.syncManager.addPeer(node1AsPeerFor2);
|
|
968
|
+
node2.syncManager.addPeer(node3AsPeerFor2);
|
|
969
|
+
node3.syncManager.addPeer(node1AsPeerFor3);
|
|
970
|
+
node3.syncManager.addPeer(node2AsPeerFor3);
|
|
971
|
+
// Wait for initial sync
|
|
972
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
973
|
+
// Verify that all nodes have the map
|
|
974
|
+
const mapOnNode1 = await node1.loadCoValueCore(map.core.id);
|
|
975
|
+
const mapOnNode2 = await node2.loadCoValueCore(map.core.id);
|
|
976
|
+
const mapOnNode3 = await node3.loadCoValueCore(map.core.id);
|
|
977
|
+
if (mapOnNode1 === "unavailable" ||
|
|
978
|
+
mapOnNode2 === "unavailable" ||
|
|
979
|
+
mapOnNode3 === "unavailable") {
|
|
980
|
+
throw new Error("Map is unavailable on node2 or node3");
|
|
981
|
+
}
|
|
982
|
+
// Perform concurrent modifications
|
|
983
|
+
map.set("key1", "value1", "trusting");
|
|
984
|
+
new RawCoMap(mapOnNode2).set("key2", "value2", "trusting");
|
|
985
|
+
new RawCoMap(mapOnNode3).set("key3", "value3", "trusting");
|
|
986
|
+
// Wait for sync to complete
|
|
987
|
+
await new Promise((resolve) => setTimeout(resolve, 200));
|
|
988
|
+
// Verify that all nodes have the same final state
|
|
989
|
+
const finalStateNode1 = expectMap(mapOnNode1.getCurrentContent());
|
|
990
|
+
const finalStateNode2 = expectMap(mapOnNode2.getCurrentContent());
|
|
991
|
+
const finalStateNode3 = expectMap(mapOnNode3.getCurrentContent());
|
|
992
|
+
const expectedState = {
|
|
993
|
+
key1: "value1",
|
|
994
|
+
key2: "value2",
|
|
995
|
+
key3: "value3",
|
|
996
|
+
};
|
|
997
|
+
expect(finalStateNode1.toJSON()).toEqual(expectedState);
|
|
998
|
+
expect(finalStateNode2.toJSON()).toEqual(expectedState);
|
|
999
|
+
expect(finalStateNode3.toJSON()).toEqual(expectedState);
|
|
1000
|
+
});
|
|
1001
|
+
test.skip("Large coValues are synced efficiently in chunks", async () => {
|
|
1002
|
+
// Create two nodes
|
|
1003
|
+
const [admin1, session1] = randomAnonymousAccountAndSessionID();
|
|
1004
|
+
const node1 = new LocalNode(admin1, session1, Crypto);
|
|
1005
|
+
const [admin2, session2] = randomAnonymousAccountAndSessionID();
|
|
1006
|
+
const node2 = new LocalNode(admin2, session2, Crypto);
|
|
1007
|
+
// Create a group and a large map on node1
|
|
1008
|
+
const group = node1.createGroup();
|
|
1009
|
+
group.addMember("everyone", "writer");
|
|
1010
|
+
const largeMap = group.createMap();
|
|
1011
|
+
// Generate a large amount of data (about 10MB)
|
|
1012
|
+
const dataSize = 1 * 1024 * 1024;
|
|
1013
|
+
const chunkSize = 1024; // 1KB chunks
|
|
1014
|
+
const chunks = dataSize / chunkSize;
|
|
1015
|
+
for (let i = 0; i < chunks; i++) {
|
|
1016
|
+
const key = `key${i}`;
|
|
1017
|
+
const value = Buffer.alloc(chunkSize, `value${i}`).toString("base64");
|
|
1018
|
+
largeMap.set(key, value, "trusting");
|
|
1019
|
+
}
|
|
1020
|
+
// Connect the nodes
|
|
1021
|
+
const [node1AsPeer, node2AsPeer] = connectedPeers("node1", "node2", {
|
|
1022
|
+
peer1role: "server",
|
|
1023
|
+
peer2role: "client",
|
|
1024
|
+
});
|
|
1025
|
+
node1.syncManager.addPeer(node2AsPeer);
|
|
1026
|
+
node2.syncManager.addPeer(node1AsPeer);
|
|
1027
|
+
await new Promise((resolve) => setTimeout(resolve, 4000));
|
|
1028
|
+
// Measure sync time
|
|
1029
|
+
const startSync = performance.now();
|
|
1030
|
+
// Load the large map on node2
|
|
1031
|
+
const largeMapOnNode2 = await node2.loadCoValueCore(largeMap.core.id);
|
|
1032
|
+
if (largeMapOnNode2 === "unavailable") {
|
|
1033
|
+
throw new Error("Large map is unavailable on node2");
|
|
1034
|
+
}
|
|
1035
|
+
const endSync = performance.now();
|
|
1036
|
+
const syncTime = endSync - startSync;
|
|
1037
|
+
// Verify that all data was synced correctly
|
|
1038
|
+
const syncedMap = new RawCoMap(largeMapOnNode2);
|
|
1039
|
+
expect(Object.keys(largeMapOnNode2.getCurrentContent().toJSON() || {})
|
|
1040
|
+
.length).toBe(chunks);
|
|
1041
|
+
for (let i = 0; i < chunks; i++) {
|
|
1042
|
+
const key = `key${i}`;
|
|
1043
|
+
const expectedValue = Buffer.alloc(chunkSize, `value${i}`).toString("base64");
|
|
1044
|
+
expect(syncedMap.get(key)).toBe(expectedValue);
|
|
1045
|
+
}
|
|
1046
|
+
// Check that sync time is reasonable (this threshold may need adjustment)
|
|
1047
|
+
const reasonableSyncTime = 10; // 30 seconds
|
|
1048
|
+
expect(syncTime).toBeLessThan(reasonableSyncTime);
|
|
1049
|
+
// Check memory usage (this threshold may need adjustment)
|
|
1050
|
+
const memoryUsage = process.memoryUsage().heapUsed / 1024 / 1024; // in MB
|
|
1051
|
+
const reasonableMemoryUsage = 1; // 500 MB
|
|
1052
|
+
expect(memoryUsage).toBeLessThan(reasonableMemoryUsage);
|
|
1053
|
+
});
|
|
1054
|
+
test("Node correctly handles and recovers from network partitions", async () => {
|
|
1055
|
+
// Create three nodes
|
|
1056
|
+
const [admin1, session1] = randomAnonymousAccountAndSessionID();
|
|
1057
|
+
const node1 = new LocalNode(admin1, session1, Crypto);
|
|
1058
|
+
const [admin2, session2] = randomAnonymousAccountAndSessionID();
|
|
1059
|
+
const node2 = new LocalNode(admin2, session2, Crypto);
|
|
1060
|
+
const [admin3, session3] = randomAnonymousAccountAndSessionID();
|
|
1061
|
+
const node3 = new LocalNode(admin3, session3, Crypto);
|
|
1062
|
+
// Create a group and a map on node1
|
|
1063
|
+
const group = node1.createGroup();
|
|
1064
|
+
group.addMember("everyone", "writer");
|
|
1065
|
+
const map = group.createMap();
|
|
1066
|
+
map.set("initial", "value", "trusting");
|
|
1067
|
+
// Connect all nodes
|
|
1068
|
+
const [node1AsPeerFor2, node2AsPeerFor1] = connectedPeers("node1", "node2", {
|
|
1069
|
+
peer1role: "server",
|
|
1070
|
+
peer2role: "client",
|
|
1071
|
+
// trace: true,
|
|
1072
|
+
});
|
|
1073
|
+
const [node2AsPeerFor3, node3AsPeerFor2] = connectedPeers("node2", "node3", {
|
|
1074
|
+
peer1role: "server",
|
|
1075
|
+
peer2role: "client",
|
|
1076
|
+
// trace: true,
|
|
1077
|
+
});
|
|
1078
|
+
const [node3AsPeerFor1, node1AsPeerFor3] = connectedPeers("node3", "node1", {
|
|
1079
|
+
peer1role: "server",
|
|
1080
|
+
peer2role: "client",
|
|
1081
|
+
// trace: true,
|
|
1082
|
+
});
|
|
1083
|
+
node1.syncManager.addPeer(node2AsPeerFor1);
|
|
1084
|
+
node1.syncManager.addPeer(node3AsPeerFor1);
|
|
1085
|
+
node2.syncManager.addPeer(node1AsPeerFor2);
|
|
1086
|
+
node2.syncManager.addPeer(node3AsPeerFor2);
|
|
1087
|
+
node3.syncManager.addPeer(node1AsPeerFor3);
|
|
1088
|
+
node3.syncManager.addPeer(node2AsPeerFor3);
|
|
1089
|
+
// Wait for initial sync
|
|
1090
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
1091
|
+
// Verify initial state
|
|
1092
|
+
const mapOnNode1Core = await node1.loadCoValueCore(map.core.id);
|
|
1093
|
+
const mapOnNode2Core = await node2.loadCoValueCore(map.core.id);
|
|
1094
|
+
const mapOnNode3Core = await node3.loadCoValueCore(map.core.id);
|
|
1095
|
+
if (mapOnNode1Core === "unavailable" ||
|
|
1096
|
+
mapOnNode2Core === "unavailable" ||
|
|
1097
|
+
mapOnNode3Core === "unavailable") {
|
|
1098
|
+
throw new Error("Map is unavailable on node2 or node3");
|
|
1099
|
+
}
|
|
1100
|
+
// const mapOnNode1 = new RawCoMap(mapOnNode1Core);
|
|
1101
|
+
const mapOnNode2 = new RawCoMap(mapOnNode2Core);
|
|
1102
|
+
const mapOnNode3 = new RawCoMap(mapOnNode3Core);
|
|
1103
|
+
expect(mapOnNode2.get("initial")).toBe("value");
|
|
1104
|
+
expect(mapOnNode3.get("initial")).toBe("value");
|
|
1105
|
+
// Simulate network partition: disconnect node3 from node1 and node2
|
|
1106
|
+
node1.syncManager.peers["node3"]?.gracefulShutdown();
|
|
1107
|
+
delete node1.syncManager.peers["node3"];
|
|
1108
|
+
node2.syncManager.peers["node3"]?.gracefulShutdown();
|
|
1109
|
+
delete node2.syncManager.peers["node3"];
|
|
1110
|
+
node3.syncManager.peers["node1"]?.gracefulShutdown();
|
|
1111
|
+
delete node3.syncManager.peers["node1"];
|
|
1112
|
+
node3.syncManager.peers["node2"]?.gracefulShutdown();
|
|
1113
|
+
delete node3.syncManager.peers["node2"];
|
|
1114
|
+
// Make changes on both sides of the partition
|
|
1115
|
+
map.set("node1", "partition", "trusting");
|
|
1116
|
+
mapOnNode2.set("node2", "partition", "trusting");
|
|
1117
|
+
mapOnNode3.set("node3", "partition", "trusting");
|
|
1118
|
+
// Wait for sync between node1 and node2
|
|
1119
|
+
await new Promise((resolve) => setTimeout(resolve, 100));
|
|
1120
|
+
// Verify that node1 and node2 are in sync, but node3 is not
|
|
1121
|
+
expect(expectMap(mapOnNode1Core.getCurrentContent()).get("node1")).toBe("partition");
|
|
1122
|
+
expect(expectMap(mapOnNode1Core.getCurrentContent()).get("node2")).toBe("partition");
|
|
1123
|
+
expect(expectMap(mapOnNode1Core.getCurrentContent()).toJSON()?.node3).toBe(undefined);
|
|
1124
|
+
expect(expectMap(mapOnNode2Core.getCurrentContent()).get("node1")).toBe("partition");
|
|
1125
|
+
expect(expectMap(mapOnNode2Core.getCurrentContent()).get("node2")).toBe("partition");
|
|
1126
|
+
expect(expectMap(mapOnNode2Core.getCurrentContent()).toJSON()?.node3).toBe(undefined);
|
|
1127
|
+
expect(expectMap(mapOnNode3Core.getCurrentContent()).toJSON()?.node1).toBe(undefined);
|
|
1128
|
+
expect(expectMap(mapOnNode3Core.getCurrentContent()).toJSON()?.node2).toBe(undefined);
|
|
1129
|
+
expect(expectMap(mapOnNode3Core.getCurrentContent()).toJSON()?.node3).toBe("partition");
|
|
1130
|
+
// Restore connectivity
|
|
1131
|
+
const [newNode3AsPeerFor1, newNode1AsPeerFor3] = connectedPeers("node3", "node1", {
|
|
1132
|
+
peer1role: "server",
|
|
1133
|
+
peer2role: "client",
|
|
1134
|
+
trace: true,
|
|
1135
|
+
});
|
|
1136
|
+
const [newNode3AsPeerFor2, newNode2AsPeerFor3] = connectedPeers("node3", "node2", {
|
|
1137
|
+
peer1role: "server",
|
|
1138
|
+
peer2role: "client",
|
|
1139
|
+
trace: true,
|
|
1140
|
+
});
|
|
1141
|
+
node1.syncManager.addPeer(newNode3AsPeerFor1);
|
|
1142
|
+
node2.syncManager.addPeer(newNode3AsPeerFor2);
|
|
1143
|
+
node3.syncManager.addPeer(newNode1AsPeerFor3);
|
|
1144
|
+
node3.syncManager.addPeer(newNode2AsPeerFor3);
|
|
1145
|
+
// Wait for re-sync
|
|
1146
|
+
await new Promise((resolve) => setTimeout(resolve, 200));
|
|
1147
|
+
// Verify final state: all nodes should have all changes
|
|
1148
|
+
const finalStateNode1 = expectMap(mapOnNode1Core.getCurrentContent()).toJSON();
|
|
1149
|
+
const finalStateNode2 = expectMap(mapOnNode2Core.getCurrentContent()).toJSON();
|
|
1150
|
+
const finalStateNode3 = expectMap(mapOnNode3Core.getCurrentContent()).toJSON();
|
|
1151
|
+
const expectedFinalState = {
|
|
1152
|
+
initial: "value",
|
|
1153
|
+
node1: "partition",
|
|
1154
|
+
node2: "partition",
|
|
1155
|
+
node3: "partition",
|
|
1156
|
+
};
|
|
1157
|
+
expect(finalStateNode1).toEqual(expectedFinalState);
|
|
1158
|
+
expect(finalStateNode2).toEqual(expectedFinalState);
|
|
1159
|
+
expect(finalStateNode3).toEqual(expectedFinalState);
|
|
1160
|
+
});
|
|
1161
|
+
});
|
|
852
1162
|
function groupContentEx(group) {
|
|
853
1163
|
return {
|
|
854
1164
|
action: "content",
|