@automerge/automerge-repo 1.0.19 → 1.1.0-alpha.13
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +12 -7
- package/dist/AutomergeUrl.js +2 -2
- package/dist/DocHandle.d.ts +6 -5
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +7 -7
- package/dist/RemoteHeadsSubscriptions.d.ts +42 -0
- package/dist/RemoteHeadsSubscriptions.d.ts.map +1 -0
- package/dist/RemoteHeadsSubscriptions.js +284 -0
- package/dist/Repo.d.ts +29 -2
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +168 -9
- package/dist/helpers/debounce.js +1 -1
- package/dist/helpers/pause.d.ts.map +1 -1
- package/dist/helpers/pause.js +2 -0
- package/dist/helpers/throttle.js +1 -1
- package/dist/helpers/withTimeout.d.ts.map +1 -1
- package/dist/helpers/withTimeout.js +2 -0
- package/dist/index.d.ts +3 -3
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -1
- package/dist/network/NetworkAdapter.d.ts +15 -1
- package/dist/network/NetworkAdapter.d.ts.map +1 -1
- package/dist/network/NetworkAdapter.js +3 -1
- package/dist/network/NetworkSubsystem.d.ts +4 -2
- package/dist/network/NetworkSubsystem.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.js +13 -7
- package/dist/network/messages.d.ts +68 -35
- package/dist/network/messages.d.ts.map +1 -1
- package/dist/network/messages.js +9 -7
- package/dist/storage/StorageSubsystem.d.ts +5 -3
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +23 -5
- package/dist/storage/keyHash.d.ts.map +1 -1
- package/dist/storage/types.d.ts +4 -0
- package/dist/storage/types.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.d.ts +2 -2
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.js +9 -3
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +20 -17
- package/dist/synchronizer/Synchronizer.d.ts +12 -3
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
- package/package.json +6 -6
- package/src/AutomergeUrl.ts +2 -2
- package/src/DocHandle.ts +10 -9
- package/src/RemoteHeadsSubscriptions.ts +375 -0
- package/src/Repo.ts +241 -16
- package/src/helpers/debounce.ts +1 -1
- package/src/helpers/pause.ts +4 -0
- package/src/helpers/throttle.ts +1 -1
- package/src/helpers/withTimeout.ts +2 -0
- package/src/index.ts +3 -1
- package/src/network/NetworkAdapter.ts +19 -2
- package/src/network/NetworkSubsystem.ts +21 -9
- package/src/network/messages.ts +88 -50
- package/src/storage/StorageSubsystem.ts +30 -7
- package/src/storage/keyHash.ts +2 -0
- package/src/storage/types.ts +3 -0
- package/src/synchronizer/CollectionSynchronizer.ts +13 -5
- package/src/synchronizer/DocSynchronizer.ts +27 -27
- package/src/synchronizer/Synchronizer.ts +13 -3
- package/test/DocHandle.test.ts +0 -17
- package/test/RemoteHeadsSubscriptions.test.ts +353 -0
- package/test/Repo.test.ts +108 -17
- package/test/StorageSubsystem.test.ts +29 -7
- package/test/helpers/waitForMessages.ts +22 -0
- package/test/remoteHeads.test.ts +260 -0
- package/.eslintrc +0 -28
package/test/Repo.test.ts
CHANGED
|
@@ -1,9 +1,9 @@
|
|
|
1
1
|
import { next as A } from "@automerge/automerge"
|
|
2
|
-
import { MessageChannelNetworkAdapter } from "
|
|
2
|
+
import { MessageChannelNetworkAdapter } from "../../automerge-repo-network-messagechannel/src/index.js"
|
|
3
3
|
import assert from "assert"
|
|
4
4
|
import * as Uuid from "uuid"
|
|
5
|
-
import { describe, it } from "vitest"
|
|
6
|
-
import {
|
|
5
|
+
import { describe, expect, it } from "vitest"
|
|
6
|
+
import { READY } from "../src/DocHandle.js"
|
|
7
7
|
import { parseAutomergeUrl } from "../src/AutomergeUrl.js"
|
|
8
8
|
import {
|
|
9
9
|
generateAutomergeUrl,
|
|
@@ -13,6 +13,7 @@ import { Repo } from "../src/Repo.js"
|
|
|
13
13
|
import { eventPromise } from "../src/helpers/eventPromise.js"
|
|
14
14
|
import { pause } from "../src/helpers/pause.js"
|
|
15
15
|
import {
|
|
16
|
+
AnyDocumentId,
|
|
16
17
|
AutomergeUrl,
|
|
17
18
|
DocHandle,
|
|
18
19
|
DocumentId,
|
|
@@ -28,8 +29,18 @@ import {
|
|
|
28
29
|
} from "./helpers/generate-large-object.js"
|
|
29
30
|
import { getRandomItem } from "./helpers/getRandomItem.js"
|
|
30
31
|
import { TestDoc } from "./types.js"
|
|
32
|
+
import { StorageId } from "../src/storage/types.js"
|
|
31
33
|
|
|
32
34
|
describe("Repo", () => {
|
|
35
|
+
describe("constructor", () => {
|
|
36
|
+
it("can be instantiated without network adapters", () => {
|
|
37
|
+
const repo = new Repo({
|
|
38
|
+
network: [],
|
|
39
|
+
})
|
|
40
|
+
expect(repo).toBeInstanceOf(Repo)
|
|
41
|
+
})
|
|
42
|
+
})
|
|
43
|
+
|
|
33
44
|
describe("local only", () => {
|
|
34
45
|
const setup = ({ startReady = true } = {}) => {
|
|
35
46
|
const storageAdapter = new DummyStorageAdapter()
|
|
@@ -320,6 +331,27 @@ describe("Repo", () => {
|
|
|
320
331
|
repo.delete(handle.documentId)
|
|
321
332
|
}))
|
|
322
333
|
|
|
334
|
+
it("exports a document", async () => {
|
|
335
|
+
const { repo } = setup()
|
|
336
|
+
const handle = repo.create<TestDoc>()
|
|
337
|
+
handle.change(d => {
|
|
338
|
+
d.foo = "bar"
|
|
339
|
+
})
|
|
340
|
+
assert.equal(handle.isReady(), true)
|
|
341
|
+
|
|
342
|
+
const exported = await repo.export(handle.documentId)
|
|
343
|
+
const loaded = A.load(exported)
|
|
344
|
+
const doc = await handle.doc()
|
|
345
|
+
assert.deepEqual(doc, loaded)
|
|
346
|
+
})
|
|
347
|
+
|
|
348
|
+
it("rejects when exporting a document that does not exist", async () => {
|
|
349
|
+
const { repo } = setup()
|
|
350
|
+
assert.rejects(async () => {
|
|
351
|
+
await repo.export("foo" as AnyDocumentId)
|
|
352
|
+
})
|
|
353
|
+
})
|
|
354
|
+
|
|
323
355
|
it("storage state doesn't change across reloads when the document hasn't changed", async () => {
|
|
324
356
|
const storage = new DummyStorageAdapter()
|
|
325
357
|
|
|
@@ -337,6 +369,9 @@ describe("Repo", () => {
|
|
|
337
369
|
d.count = 1
|
|
338
370
|
})
|
|
339
371
|
|
|
372
|
+
// wait because storage id is not initialized immediately
|
|
373
|
+
await pause()
|
|
374
|
+
|
|
340
375
|
const initialKeys = storage.keys()
|
|
341
376
|
|
|
342
377
|
const repo2 = new Repo({
|
|
@@ -392,10 +427,37 @@ describe("Repo", () => {
|
|
|
392
427
|
const storageKeyTypes = storageAdapter.keys().map(k => k.split(".")[1])
|
|
393
428
|
assert(storageKeyTypes.filter(k => k === "snapshot").length === 1)
|
|
394
429
|
})
|
|
430
|
+
|
|
431
|
+
it("can import an existing document", async () => {
|
|
432
|
+
const { repo } = setup()
|
|
433
|
+
const doc = A.init<TestDoc>()
|
|
434
|
+
const updatedDoc = A.change(doc, d => {
|
|
435
|
+
d.foo = "bar"
|
|
436
|
+
})
|
|
437
|
+
|
|
438
|
+
const saved = A.save(updatedDoc)
|
|
439
|
+
|
|
440
|
+
const handle = repo.import<TestDoc>(saved)
|
|
441
|
+
assert.equal(handle.isReady(), true)
|
|
442
|
+
const v = await handle.doc()
|
|
443
|
+
assert.equal(v?.foo, "bar")
|
|
444
|
+
|
|
445
|
+
expect(A.getHistory(v)).toEqual(A.getHistory(updatedDoc))
|
|
446
|
+
})
|
|
447
|
+
|
|
448
|
+
it("throws an error if we try to import an invalid document", async () => {
|
|
449
|
+
const { repo } = setup()
|
|
450
|
+
expect(() => {
|
|
451
|
+
repo.import<TestDoc>(A.init<TestDoc> as unknown as Uint8Array)
|
|
452
|
+
}).toThrow()
|
|
453
|
+
})
|
|
395
454
|
})
|
|
396
455
|
|
|
397
456
|
describe("with peers (linear network)", async () => {
|
|
398
|
-
const setup = async ({
|
|
457
|
+
const setup = async ({
|
|
458
|
+
connectAlice = true,
|
|
459
|
+
isCharlieEphemeral = false,
|
|
460
|
+
} = {}) => {
|
|
399
461
|
const charlieExcludedDocuments: DocumentId[] = []
|
|
400
462
|
const bobExcludedDocuments: DocumentId[] = []
|
|
401
463
|
|
|
@@ -451,6 +513,7 @@ describe("Repo", () => {
|
|
|
451
513
|
storage: charlieStorage,
|
|
452
514
|
network: [new MessageChannelNetworkAdapter(cb)],
|
|
453
515
|
peerId: charlie,
|
|
516
|
+
isEphemeral: isCharlieEphemeral,
|
|
454
517
|
})
|
|
455
518
|
|
|
456
519
|
const teardown = () => {
|
|
@@ -789,7 +852,7 @@ describe("Repo", () => {
|
|
|
789
852
|
})
|
|
790
853
|
|
|
791
854
|
it("should save sync state of other peers", async () => {
|
|
792
|
-
const { bobRepo, teardown,
|
|
855
|
+
const { bobRepo, teardown, charlieRepo } = await setup({
|
|
793
856
|
connectAlice: false,
|
|
794
857
|
})
|
|
795
858
|
|
|
@@ -803,7 +866,7 @@ describe("Repo", () => {
|
|
|
803
866
|
// bob should store the sync state of charlie
|
|
804
867
|
const storedSyncState = await bobRepo.storageSubsystem.loadSyncState(
|
|
805
868
|
bobHandle.documentId,
|
|
806
|
-
|
|
869
|
+
await charlieRepo!.storageSubsystem.id()
|
|
807
870
|
)
|
|
808
871
|
const docHeads = A.getHeads(bobHandle.docSync())
|
|
809
872
|
assert.deepStrictEqual(storedSyncState.sharedHeads, docHeads)
|
|
@@ -811,6 +874,29 @@ describe("Repo", () => {
|
|
|
811
874
|
teardown()
|
|
812
875
|
})
|
|
813
876
|
|
|
877
|
+
it("should not save sync state of ephemeral peers", async () => {
|
|
878
|
+
const { bobRepo, teardown, charlieRepo } = await setup({
|
|
879
|
+
connectAlice: false,
|
|
880
|
+
isCharlieEphemeral: true,
|
|
881
|
+
})
|
|
882
|
+
|
|
883
|
+
const bobHandle = bobRepo.create<TestDoc>()
|
|
884
|
+
bobHandle.change(d => {
|
|
885
|
+
d.foo = "bar"
|
|
886
|
+
})
|
|
887
|
+
|
|
888
|
+
await pause(200)
|
|
889
|
+
|
|
890
|
+
// bob should not store the sync state for charlie because charly is an ephemeral peer
|
|
891
|
+
const storedSyncState = await bobRepo.storageSubsystem.loadSyncState(
|
|
892
|
+
bobHandle.documentId,
|
|
893
|
+
await charlieRepo!.storageSubsystem.id()
|
|
894
|
+
)
|
|
895
|
+
assert.deepStrictEqual(storedSyncState, undefined)
|
|
896
|
+
|
|
897
|
+
teardown()
|
|
898
|
+
})
|
|
899
|
+
|
|
814
900
|
it("should load sync state from storage", async () => {
|
|
815
901
|
const { bobRepo, teardown, charlie, charlieRepo, bobStorage, bob } =
|
|
816
902
|
await setup({
|
|
@@ -823,13 +909,15 @@ describe("Repo", () => {
|
|
|
823
909
|
d.foo = "bar"
|
|
824
910
|
})
|
|
825
911
|
let bobSyncMessages = 0
|
|
826
|
-
bobRepo.networkSubsystem.on("message",
|
|
827
|
-
|
|
912
|
+
bobRepo.networkSubsystem.on("message", message => {
|
|
913
|
+
if (message.type === "sync") {
|
|
914
|
+
bobSyncMessages++
|
|
915
|
+
}
|
|
828
916
|
})
|
|
829
917
|
await pause(500)
|
|
830
918
|
|
|
831
|
-
// repo has no stored sync state for charlie so we should see
|
|
832
|
-
assert.strictEqual(bobSyncMessages,
|
|
919
|
+
// repo has no stored sync state for charlie so we should see 3 sync messages
|
|
920
|
+
assert.strictEqual(bobSyncMessages, 3)
|
|
833
921
|
|
|
834
922
|
// setup new repo which uses bob's storage
|
|
835
923
|
const bob2Repo = new Repo({
|
|
@@ -850,8 +938,10 @@ describe("Repo", () => {
|
|
|
850
938
|
// lookup doc we've previously created and count the messages
|
|
851
939
|
bob2Repo.find(bobHandle.documentId)
|
|
852
940
|
let bob2SyncMessages = 0
|
|
853
|
-
bob2Repo.networkSubsystem.on("message",
|
|
854
|
-
|
|
941
|
+
bob2Repo.networkSubsystem.on("message", message => {
|
|
942
|
+
if (message.type === "sync") {
|
|
943
|
+
bob2SyncMessages++
|
|
944
|
+
}
|
|
855
945
|
})
|
|
856
946
|
await pause(100)
|
|
857
947
|
|
|
@@ -866,6 +956,7 @@ describe("Repo", () => {
|
|
|
866
956
|
const { bobRepo, charlieRepo, teardown } = await setup({
|
|
867
957
|
connectAlice: false,
|
|
868
958
|
})
|
|
959
|
+
const charliedStorageId = await charlieRepo.storageSubsystem.id()
|
|
869
960
|
|
|
870
961
|
const handle = bobRepo.create<TestDoc>()
|
|
871
962
|
handle.change(d => {
|
|
@@ -876,11 +967,11 @@ describe("Repo", () => {
|
|
|
876
967
|
await pause(50)
|
|
877
968
|
|
|
878
969
|
const nextRemoteHeadsPromise = new Promise<{
|
|
879
|
-
|
|
970
|
+
storageId: StorageId
|
|
880
971
|
heads: A.Heads
|
|
881
972
|
}>(resolve => {
|
|
882
|
-
handle.on("remote-heads", ({
|
|
883
|
-
resolve({
|
|
973
|
+
handle.on("remote-heads", ({ storageId, heads }) => {
|
|
974
|
+
resolve({ storageId, heads })
|
|
884
975
|
})
|
|
885
976
|
})
|
|
886
977
|
|
|
@@ -901,11 +992,11 @@ describe("Repo", () => {
|
|
|
901
992
|
assert.deepStrictEqual(charlieHeads, bobHeads)
|
|
902
993
|
|
|
903
994
|
const nextRemoteHeads = await nextRemoteHeadsPromise
|
|
904
|
-
assert.deepStrictEqual(nextRemoteHeads.
|
|
995
|
+
assert.deepStrictEqual(nextRemoteHeads.storageId, charliedStorageId)
|
|
905
996
|
assert.deepStrictEqual(nextRemoteHeads.heads, charlieHeads)
|
|
906
997
|
|
|
907
998
|
assert.deepStrictEqual(
|
|
908
|
-
handle.getRemoteHeads(
|
|
999
|
+
handle.getRemoteHeads(charliedStorageId),
|
|
909
1000
|
A.getHeads(charlieHandle.docSync())
|
|
910
1001
|
)
|
|
911
1002
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { NodeFSStorageAdapter } from "
|
|
1
|
+
import { NodeFSStorageAdapter } from "../../automerge-repo-storage-nodefs/src/index.js"
|
|
2
2
|
import * as A from "@automerge/automerge/next"
|
|
3
3
|
import assert from "assert"
|
|
4
4
|
import fs from "fs"
|
|
@@ -8,7 +8,9 @@ import { describe, it } from "vitest"
|
|
|
8
8
|
import { generateAutomergeUrl, parseAutomergeUrl } from "../src/AutomergeUrl.js"
|
|
9
9
|
import { PeerId, cbor } from "../src/index.js"
|
|
10
10
|
import { StorageSubsystem } from "../src/storage/StorageSubsystem.js"
|
|
11
|
+
import { StorageId } from "../src/storage/types.js"
|
|
11
12
|
import { DummyStorageAdapter } from "./helpers/DummyStorageAdapter.js"
|
|
13
|
+
import * as Uuid from "uuid"
|
|
12
14
|
|
|
13
15
|
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), "automerge-repo-tests"))
|
|
14
16
|
|
|
@@ -182,12 +184,15 @@ describe("StorageSubsystem", () => {
|
|
|
182
184
|
|
|
183
185
|
const { documentId } = parseAutomergeUrl(generateAutomergeUrl())
|
|
184
186
|
const syncState = A.initSyncState()
|
|
185
|
-
const
|
|
187
|
+
const bobStorageId = Uuid.v4() as StorageId
|
|
186
188
|
|
|
187
189
|
const rawSyncState = A.decodeSyncState(A.encodeSyncState(syncState))
|
|
188
190
|
|
|
189
|
-
await storage.saveSyncState(documentId,
|
|
190
|
-
const loadedSyncState = await storage.loadSyncState(
|
|
191
|
+
await storage.saveSyncState(documentId, bobStorageId, syncState)
|
|
192
|
+
const loadedSyncState = await storage.loadSyncState(
|
|
193
|
+
documentId,
|
|
194
|
+
bobStorageId
|
|
195
|
+
)
|
|
191
196
|
assert.deepStrictEqual(loadedSyncState, rawSyncState)
|
|
192
197
|
})
|
|
193
198
|
|
|
@@ -196,14 +201,31 @@ describe("StorageSubsystem", () => {
|
|
|
196
201
|
|
|
197
202
|
const { documentId } = parseAutomergeUrl(generateAutomergeUrl())
|
|
198
203
|
const syncState = A.initSyncState()
|
|
199
|
-
const
|
|
204
|
+
const bobStorageId = Uuid.v4() as StorageId
|
|
200
205
|
|
|
201
|
-
await storage.saveSyncState(documentId,
|
|
206
|
+
await storage.saveSyncState(documentId, bobStorageId, syncState)
|
|
202
207
|
await storage.removeDoc(documentId)
|
|
203
|
-
const loadedSyncState = await storage.loadSyncState(
|
|
208
|
+
const loadedSyncState = await storage.loadSyncState(
|
|
209
|
+
documentId,
|
|
210
|
+
bobStorageId
|
|
211
|
+
)
|
|
204
212
|
assert.strictEqual(loadedSyncState, undefined)
|
|
205
213
|
})
|
|
206
214
|
})
|
|
215
|
+
|
|
216
|
+
describe("storage id", () => {
|
|
217
|
+
it("generates a unique id", async () => {
|
|
218
|
+
const storage = new StorageSubsystem(adapter)
|
|
219
|
+
|
|
220
|
+
// generate unique id and return same id on subsequence calls
|
|
221
|
+
const id1 = await storage.id()
|
|
222
|
+
const id2 = await storage.id()
|
|
223
|
+
|
|
224
|
+
assert.strictEqual(Uuid.validate(id1), true)
|
|
225
|
+
assert.strictEqual(Uuid.validate(id2), true)
|
|
226
|
+
assert.strictEqual(id1, id2)
|
|
227
|
+
})
|
|
228
|
+
})
|
|
207
229
|
})
|
|
208
230
|
})
|
|
209
231
|
})
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
import { EventEmitter } from "eventemitter3"
|
|
2
|
+
import { pause } from "../../src/helpers/pause.js"
|
|
3
|
+
|
|
4
|
+
export async function waitForMessages(
|
|
5
|
+
emitter: EventEmitter,
|
|
6
|
+
event: string,
|
|
7
|
+
timeout: number = 100
|
|
8
|
+
): Promise<any[]> {
|
|
9
|
+
const messages = []
|
|
10
|
+
|
|
11
|
+
const onEvent = message => {
|
|
12
|
+
messages.push(message)
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
emitter.on(event, onEvent)
|
|
16
|
+
|
|
17
|
+
await pause(timeout)
|
|
18
|
+
|
|
19
|
+
emitter.off(event)
|
|
20
|
+
|
|
21
|
+
return messages
|
|
22
|
+
}
|
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
import { MessageChannelNetworkAdapter } from "../../automerge-repo-network-messagechannel/dist/index.js"
|
|
2
|
+
import * as A from "@automerge/automerge/next"
|
|
3
|
+
import assert from "assert"
|
|
4
|
+
import { setTimeout } from "timers/promises"
|
|
5
|
+
import { describe, it } from "vitest"
|
|
6
|
+
import { generateAutomergeUrl, parseAutomergeUrl } from "../src/AutomergeUrl.js"
|
|
7
|
+
import { eventPromise } from "../src/helpers/eventPromise.js"
|
|
8
|
+
import {
|
|
9
|
+
DocHandle,
|
|
10
|
+
DocHandleRemoteHeadsPayload,
|
|
11
|
+
PeerId,
|
|
12
|
+
Repo,
|
|
13
|
+
} from "../src/index.js"
|
|
14
|
+
import { DummyStorageAdapter } from "./helpers/DummyStorageAdapter.js"
|
|
15
|
+
import { waitForMessages } from "./helpers/waitForMessages.js"
|
|
16
|
+
import { TestDoc } from "./types.js"
|
|
17
|
+
|
|
18
|
+
describe("DocHandle.remoteHeads", () => {
|
|
19
|
+
const TEST_ID = parseAutomergeUrl(generateAutomergeUrl()).documentId
|
|
20
|
+
|
|
21
|
+
it("should allow to listen for remote head changes and manually read remote heads", async () => {
|
|
22
|
+
const handle = new DocHandle<TestDoc>(TEST_ID, { isNew: true })
|
|
23
|
+
const bobRepo = new Repo({
|
|
24
|
+
peerId: "bob" as PeerId,
|
|
25
|
+
network: [],
|
|
26
|
+
storage: new DummyStorageAdapter(),
|
|
27
|
+
enableRemoteHeadsGossiping: true,
|
|
28
|
+
})
|
|
29
|
+
const bobStorageId = await bobRepo.storageId()
|
|
30
|
+
|
|
31
|
+
const remoteHeadsMessagePromise = eventPromise(handle, "remote-heads")
|
|
32
|
+
handle.setRemoteHeads(bobStorageId, [])
|
|
33
|
+
|
|
34
|
+
const remoteHeadsMessage = await remoteHeadsMessagePromise
|
|
35
|
+
|
|
36
|
+
assert.strictEqual(remoteHeadsMessage.storageId, bobStorageId)
|
|
37
|
+
assert.deepStrictEqual(remoteHeadsMessage.heads, [])
|
|
38
|
+
|
|
39
|
+
// read remote heads manually
|
|
40
|
+
assert.deepStrictEqual(handle.getRemoteHeads(bobStorageId), [])
|
|
41
|
+
})
|
|
42
|
+
|
|
43
|
+
describe("multi hop sync", () => {
|
|
44
|
+
async function setup() {
|
|
45
|
+
// setup topology: tab -> service worker -> sync server <- service worker <- tab
|
|
46
|
+
const leftTab1 = new Repo({
|
|
47
|
+
peerId: "left-tab-1" as PeerId,
|
|
48
|
+
network: [],
|
|
49
|
+
sharePolicy: async () => true,
|
|
50
|
+
enableRemoteHeadsGossiping: true,
|
|
51
|
+
})
|
|
52
|
+
const leftTab2 = new Repo({
|
|
53
|
+
peerId: "left-tab-2" as PeerId,
|
|
54
|
+
network: [],
|
|
55
|
+
sharePolicy: async () => true,
|
|
56
|
+
enableRemoteHeadsGossiping: true,
|
|
57
|
+
})
|
|
58
|
+
const leftServiceWorker = new Repo({
|
|
59
|
+
peerId: "left-service-worker" as PeerId,
|
|
60
|
+
network: [],
|
|
61
|
+
sharePolicy: async peer => peer === "sync-server",
|
|
62
|
+
storage: new DummyStorageAdapter(),
|
|
63
|
+
isEphemeral: false,
|
|
64
|
+
enableRemoteHeadsGossiping: true,
|
|
65
|
+
})
|
|
66
|
+
const syncServer = new Repo({
|
|
67
|
+
peerId: "sync-server" as PeerId,
|
|
68
|
+
network: [],
|
|
69
|
+
isEphemeral: false,
|
|
70
|
+
sharePolicy: async () => false,
|
|
71
|
+
storage: new DummyStorageAdapter(),
|
|
72
|
+
enableRemoteHeadsGossiping: true,
|
|
73
|
+
})
|
|
74
|
+
const rightServiceWorker = new Repo({
|
|
75
|
+
peerId: "right-service-worker" as PeerId,
|
|
76
|
+
network: [],
|
|
77
|
+
sharePolicy: async peer => peer === "sync-server",
|
|
78
|
+
isEphemeral: false,
|
|
79
|
+
storage: new DummyStorageAdapter(),
|
|
80
|
+
enableRemoteHeadsGossiping: true,
|
|
81
|
+
})
|
|
82
|
+
const rightTab = new Repo({
|
|
83
|
+
peerId: "right-tab" as PeerId,
|
|
84
|
+
network: [],
|
|
85
|
+
sharePolicy: async () => true,
|
|
86
|
+
enableRemoteHeadsGossiping: true,
|
|
87
|
+
})
|
|
88
|
+
|
|
89
|
+
// connect them all up
|
|
90
|
+
connectRepos(leftTab1, leftServiceWorker)
|
|
91
|
+
connectRepos(leftTab2, leftServiceWorker)
|
|
92
|
+
connectRepos(leftServiceWorker, syncServer)
|
|
93
|
+
connectRepos(syncServer, rightServiceWorker)
|
|
94
|
+
connectRepos(rightServiceWorker, rightTab)
|
|
95
|
+
|
|
96
|
+
await setTimeout(100)
|
|
97
|
+
|
|
98
|
+
return {
|
|
99
|
+
leftTab1,
|
|
100
|
+
leftTab2,
|
|
101
|
+
leftServiceWorker,
|
|
102
|
+
syncServer,
|
|
103
|
+
rightServiceWorker,
|
|
104
|
+
rightTab,
|
|
105
|
+
}
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
it("should report remoteHeads for peers", async () => {
|
|
109
|
+
const { rightTab, rightServiceWorker, leftServiceWorker, leftTab1 } =
|
|
110
|
+
await setup()
|
|
111
|
+
|
|
112
|
+
// subscribe to the left service worker storage ID on the right tab
|
|
113
|
+
rightTab.subscribeToRemotes([await leftServiceWorker.storageId()!])
|
|
114
|
+
|
|
115
|
+
await setTimeout(100)
|
|
116
|
+
|
|
117
|
+
// create a doc in the left tab
|
|
118
|
+
const leftTabDoc = leftTab1.create<TestDoc>()
|
|
119
|
+
leftTabDoc.change(d => (d.foo = "bar"))
|
|
120
|
+
|
|
121
|
+
// wait for the document to arrive on the right tab
|
|
122
|
+
const rightTabDoc = rightTab.find<TestDoc>(leftTabDoc.url)
|
|
123
|
+
await rightTabDoc.whenReady()
|
|
124
|
+
|
|
125
|
+
// wait for the document to arrive in the left service worker
|
|
126
|
+
const leftServiceWorkerDoc = leftServiceWorker.find(leftTabDoc.documentId)
|
|
127
|
+
await leftServiceWorkerDoc.whenReady()
|
|
128
|
+
|
|
129
|
+
const leftServiceWorkerStorageId = await leftServiceWorker.storageId()
|
|
130
|
+
let leftSeenByRightPromise = new Promise<DocHandleRemoteHeadsPayload>(
|
|
131
|
+
resolve => {
|
|
132
|
+
rightTabDoc.on("remote-heads", message => {
|
|
133
|
+
if (message.storageId === leftServiceWorkerStorageId) {
|
|
134
|
+
resolve(message)
|
|
135
|
+
}
|
|
136
|
+
})
|
|
137
|
+
}
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
// make a change on the right
|
|
141
|
+
rightTabDoc.change(d => (d.foo = "baz"))
|
|
142
|
+
|
|
143
|
+
// wait for the change to be acknolwedged by the left
|
|
144
|
+
const leftSeenByRight = await leftSeenByRightPromise
|
|
145
|
+
|
|
146
|
+
assert.deepStrictEqual(
|
|
147
|
+
leftSeenByRight.heads,
|
|
148
|
+
A.getHeads(leftServiceWorkerDoc.docSync())
|
|
149
|
+
)
|
|
150
|
+
})
|
|
151
|
+
|
|
152
|
+
it("should report remoteHeads only for documents the subscriber has open", async () => {
|
|
153
|
+
const { leftTab1, rightTab, rightServiceWorker } = await setup()
|
|
154
|
+
|
|
155
|
+
// subscribe leftTab to storageId of rightServiceWorker
|
|
156
|
+
leftTab1.subscribeToRemotes([await rightServiceWorker.storageId()!])
|
|
157
|
+
|
|
158
|
+
await setTimeout(100)
|
|
159
|
+
|
|
160
|
+
// create 2 docs in right tab
|
|
161
|
+
const rightTabDocA = rightTab.create<TestDoc>()
|
|
162
|
+
rightTabDocA.change(d => (d.foo = "A"))
|
|
163
|
+
|
|
164
|
+
const rightTabDocB = rightTab.create<TestDoc>()
|
|
165
|
+
rightTabDocB.change(d => (d.foo = "B"))
|
|
166
|
+
|
|
167
|
+
// open doc b in left tab 1
|
|
168
|
+
const leftTabDocA = leftTab1.find<TestDoc>(rightTabDocA.url)
|
|
169
|
+
|
|
170
|
+
const remoteHeadsChangedMessages = (
|
|
171
|
+
await waitForMessages(leftTab1.networkSubsystem, "message")
|
|
172
|
+
).filter(({ type }) => type === "remote-heads-changed")
|
|
173
|
+
|
|
174
|
+
// we should only be notified of the head changes of doc A
|
|
175
|
+
const docIds = remoteHeadsChangedMessages.map(d => d.documentId)
|
|
176
|
+
const uniqueDocIds = [...new Set(docIds)]
|
|
177
|
+
assert.deepStrictEqual(uniqueDocIds, [leftTabDocA.documentId])
|
|
178
|
+
})
|
|
179
|
+
|
|
180
|
+
it("should report remote heads for doc on subscribe if peer already knows them", async () => {
|
|
181
|
+
const { leftTab1, leftTab2, rightTab, rightServiceWorker } = await setup()
|
|
182
|
+
|
|
183
|
+
// create 2 docs in right tab
|
|
184
|
+
const rightTabDocA = rightTab.create<TestDoc>()
|
|
185
|
+
rightTabDocA.change(d => (d.foo = "A"))
|
|
186
|
+
|
|
187
|
+
const rightTabDocB = rightTab.create<TestDoc>()
|
|
188
|
+
rightTabDocB.change(d => (d.foo = "B"))
|
|
189
|
+
|
|
190
|
+
// open docs in left tab 1
|
|
191
|
+
const leftTab1DocA = leftTab1.find<TestDoc>(rightTabDocA.url)
|
|
192
|
+
const leftTab1DocB = leftTab1.find<TestDoc>(rightTabDocB.url)
|
|
193
|
+
|
|
194
|
+
// subscribe leftTab 1 to storageId of rightServiceWorker
|
|
195
|
+
leftTab1.subscribeToRemotes([await rightServiceWorker.storageId()!])
|
|
196
|
+
|
|
197
|
+
await setTimeout(200)
|
|
198
|
+
|
|
199
|
+
// now the left service worker has the remote heads of the right service worker for both doc A and doc B
|
|
200
|
+
// if we subscribe from left tab 1 the left service workers should send it's stored remote heads immediately
|
|
201
|
+
|
|
202
|
+
// open doc and subscribe leftTab 2 to storageId of rightServiceWorker
|
|
203
|
+
const leftTab2DocA = leftTab2.find<TestDoc>(rightTabDocA.url)
|
|
204
|
+
leftTab2.subscribeToRemotes([await rightServiceWorker.storageId()!])
|
|
205
|
+
|
|
206
|
+
const remoteHeadsChangedMessages = (
|
|
207
|
+
await waitForMessages(leftTab2.networkSubsystem, "message")
|
|
208
|
+
).filter(({ type }) => type === "remote-heads-changed")
|
|
209
|
+
|
|
210
|
+
// we should only be notified of the head changes of doc A
|
|
211
|
+
assert.strictEqual(remoteHeadsChangedMessages.length, 1)
|
|
212
|
+
assert.strictEqual(
|
|
213
|
+
remoteHeadsChangedMessages[0].documentId,
|
|
214
|
+
leftTab1DocA.documentId
|
|
215
|
+
)
|
|
216
|
+
})
|
|
217
|
+
|
|
218
|
+
it("should report remote heads for subscribed storage id once we open a new doc", async () => {
|
|
219
|
+
const { leftTab1, leftTab2, rightTab, rightServiceWorker } = await setup()
|
|
220
|
+
|
|
221
|
+
// create 2 docs in right tab
|
|
222
|
+
const rightTabDocA = rightTab.create<TestDoc>()
|
|
223
|
+
rightTabDocA.change(d => (d.foo = "A"))
|
|
224
|
+
|
|
225
|
+
const rightTabDocB = rightTab.create<TestDoc>()
|
|
226
|
+
rightTabDocB.change(d => (d.foo = "B"))
|
|
227
|
+
|
|
228
|
+
await setTimeout(200)
|
|
229
|
+
|
|
230
|
+
// subscribe leftTab 1 to storageId of rightServiceWorker
|
|
231
|
+
leftTab1.subscribeToRemotes([await rightServiceWorker.storageId()!])
|
|
232
|
+
|
|
233
|
+
// in leftTab 1 open doc A
|
|
234
|
+
const leftTab1DocA = leftTab1.find<TestDoc>(rightTabDocA.url)
|
|
235
|
+
|
|
236
|
+
const remoteHeadsChangedMessages = (
|
|
237
|
+
await waitForMessages(leftTab1.networkSubsystem, "message")
|
|
238
|
+
).filter(({ type }) => type === "remote-heads-changed")
|
|
239
|
+
|
|
240
|
+
// console.log(JSON.stringify(remoteHeadsChangedMessages, null, 2))
|
|
241
|
+
|
|
242
|
+
assert.strictEqual(remoteHeadsChangedMessages.length, 1)
|
|
243
|
+
assert.strictEqual(
|
|
244
|
+
remoteHeadsChangedMessages[0].documentId,
|
|
245
|
+
leftTab1DocA.documentId
|
|
246
|
+
)
|
|
247
|
+
})
|
|
248
|
+
})
|
|
249
|
+
})
|
|
250
|
+
|
|
251
|
+
function connectRepos(repo1: Repo, repo2: Repo) {
|
|
252
|
+
const { port1: leftToRight, port2: rightToLeft } = new MessageChannel()
|
|
253
|
+
|
|
254
|
+
repo1.networkSubsystem.addNetworkAdapter(
|
|
255
|
+
new MessageChannelNetworkAdapter(leftToRight)
|
|
256
|
+
)
|
|
257
|
+
repo2.networkSubsystem.addNetworkAdapter(
|
|
258
|
+
new MessageChannelNetworkAdapter(rightToLeft)
|
|
259
|
+
)
|
|
260
|
+
}
|
package/.eslintrc
DELETED
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
{
|
|
2
|
-
"env": {
|
|
3
|
-
"browser": true,
|
|
4
|
-
"es2021": true
|
|
5
|
-
},
|
|
6
|
-
"extends": [
|
|
7
|
-
"eslint:recommended",
|
|
8
|
-
"plugin:@typescript-eslint/eslint-recommended",
|
|
9
|
-
"plugin:@typescript-eslint/recommended"
|
|
10
|
-
],
|
|
11
|
-
"ignorePatterns": ["dist/**", "test/**", "node_modules/**"],
|
|
12
|
-
"parser": "@typescript-eslint/parser",
|
|
13
|
-
"plugins": ["@typescript-eslint"],
|
|
14
|
-
"parserOptions": {
|
|
15
|
-
"project": "./tsconfig.json",
|
|
16
|
-
"ecmaVersion": "latest",
|
|
17
|
-
"sourceType": "module"
|
|
18
|
-
},
|
|
19
|
-
"rules": {
|
|
20
|
-
"semi": ["error", "never"],
|
|
21
|
-
"import/extensions": 0,
|
|
22
|
-
"lines-between-class-members": 0,
|
|
23
|
-
"@typescript-eslint/no-floating-promises": "error",
|
|
24
|
-
"@typescript-eslint/no-empty-function": ["warn", { "allow": ["methods"] }],
|
|
25
|
-
"no-param-reassign": 0,
|
|
26
|
-
"no-use-before-define": 0
|
|
27
|
-
}
|
|
28
|
-
}
|