@automerge/automerge-repo 1.0.19 → 1.1.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/dist/DocHandle.d.ts +6 -5
  2. package/dist/DocHandle.d.ts.map +1 -1
  3. package/dist/DocHandle.js +7 -7
  4. package/dist/RemoteHeadsSubscriptions.d.ts +41 -0
  5. package/dist/RemoteHeadsSubscriptions.d.ts.map +1 -0
  6. package/dist/RemoteHeadsSubscriptions.js +224 -0
  7. package/dist/Repo.d.ts +11 -2
  8. package/dist/Repo.d.ts.map +1 -1
  9. package/dist/Repo.js +117 -8
  10. package/dist/index.d.ts +2 -2
  11. package/dist/index.d.ts.map +1 -1
  12. package/dist/network/NetworkAdapter.d.ts +15 -1
  13. package/dist/network/NetworkAdapter.d.ts.map +1 -1
  14. package/dist/network/NetworkAdapter.js +1 -0
  15. package/dist/network/NetworkSubsystem.d.ts +4 -2
  16. package/dist/network/NetworkSubsystem.d.ts.map +1 -1
  17. package/dist/network/NetworkSubsystem.js +8 -4
  18. package/dist/network/messages.d.ts +24 -1
  19. package/dist/network/messages.d.ts.map +1 -1
  20. package/dist/network/messages.js +5 -1
  21. package/dist/storage/StorageSubsystem.d.ts +5 -3
  22. package/dist/storage/StorageSubsystem.d.ts.map +1 -1
  23. package/dist/storage/StorageSubsystem.js +23 -5
  24. package/dist/storage/types.d.ts +4 -0
  25. package/dist/storage/types.d.ts.map +1 -1
  26. package/dist/synchronizer/CollectionSynchronizer.d.ts +2 -2
  27. package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
  28. package/dist/synchronizer/CollectionSynchronizer.js +7 -3
  29. package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
  30. package/dist/synchronizer/DocSynchronizer.js +0 -9
  31. package/package.json +3 -3
  32. package/src/DocHandle.ts +10 -9
  33. package/src/RemoteHeadsSubscriptions.ts +306 -0
  34. package/src/Repo.ts +172 -12
  35. package/src/index.ts +2 -0
  36. package/src/network/NetworkAdapter.ts +19 -1
  37. package/src/network/NetworkSubsystem.ts +17 -6
  38. package/src/network/messages.ts +30 -1
  39. package/src/storage/StorageSubsystem.ts +30 -7
  40. package/src/storage/types.ts +3 -0
  41. package/src/synchronizer/CollectionSynchronizer.ts +11 -5
  42. package/src/synchronizer/DocSynchronizer.ts +0 -12
  43. package/test/DocHandle.test.ts +0 -17
  44. package/test/RemoteHeadsSubscriptions.test.ts +343 -0
  45. package/test/Repo.test.ts +51 -15
  46. package/test/StorageSubsystem.test.ts +28 -6
  47. package/test/remoteHeads.test.ts +135 -0
package/test/Repo.test.ts CHANGED
@@ -3,7 +3,7 @@ import { MessageChannelNetworkAdapter } from "@automerge/automerge-repo-network-
3
3
  import assert from "assert"
4
4
  import * as Uuid from "uuid"
5
5
  import { describe, it } from "vitest"
6
- import { DocHandleRemoteHeadsPayload, READY } from "../src/DocHandle.js"
6
+ import { READY } from "../src/DocHandle.js"
7
7
  import { parseAutomergeUrl } from "../src/AutomergeUrl.js"
8
8
  import {
9
9
  generateAutomergeUrl,
@@ -28,6 +28,7 @@ import {
28
28
  } from "./helpers/generate-large-object.js"
29
29
  import { getRandomItem } from "./helpers/getRandomItem.js"
30
30
  import { TestDoc } from "./types.js"
31
+ import { StorageId } from "../src/storage/types.js"
31
32
 
32
33
  describe("Repo", () => {
33
34
  describe("local only", () => {
@@ -337,6 +338,9 @@ describe("Repo", () => {
337
338
  d.count = 1
338
339
  })
339
340
 
341
+ // wait because storage id is not initialized immediately
342
+ await pause()
343
+
340
344
  const initialKeys = storage.keys()
341
345
 
342
346
  const repo2 = new Repo({
@@ -395,7 +399,10 @@ describe("Repo", () => {
395
399
  })
396
400
 
397
401
  describe("with peers (linear network)", async () => {
398
- const setup = async ({ connectAlice = true } = {}) => {
402
+ const setup = async ({
403
+ connectAlice = true,
404
+ isCharlieEphemeral = false,
405
+ } = {}) => {
399
406
  const charlieExcludedDocuments: DocumentId[] = []
400
407
  const bobExcludedDocuments: DocumentId[] = []
401
408
 
@@ -451,6 +458,7 @@ describe("Repo", () => {
451
458
  storage: charlieStorage,
452
459
  network: [new MessageChannelNetworkAdapter(cb)],
453
460
  peerId: charlie,
461
+ isEphemeral: isCharlieEphemeral,
454
462
  })
455
463
 
456
464
  const teardown = () => {
@@ -789,7 +797,7 @@ describe("Repo", () => {
789
797
  })
790
798
 
791
799
  it("should save sync state of other peers", async () => {
792
- const { bobRepo, teardown, charlie } = await setup({
800
+ const { bobRepo, teardown, charlieRepo } = await setup({
793
801
  connectAlice: false,
794
802
  })
795
803
 
@@ -803,7 +811,7 @@ describe("Repo", () => {
803
811
  // bob should store the sync state of charlie
804
812
  const storedSyncState = await bobRepo.storageSubsystem.loadSyncState(
805
813
  bobHandle.documentId,
806
- charlie
814
+ await charlieRepo!.storageSubsystem.id()
807
815
  )
808
816
  const docHeads = A.getHeads(bobHandle.docSync())
809
817
  assert.deepStrictEqual(storedSyncState.sharedHeads, docHeads)
@@ -811,6 +819,29 @@ describe("Repo", () => {
811
819
  teardown()
812
820
  })
813
821
 
822
+ it("should not save sync state of ephemeral peers", async () => {
823
+ const { bobRepo, teardown, charlieRepo } = await setup({
824
+ connectAlice: false,
825
+ isCharlieEphemeral: true,
826
+ })
827
+
828
+ const bobHandle = bobRepo.create<TestDoc>()
829
+ bobHandle.change(d => {
830
+ d.foo = "bar"
831
+ })
832
+
833
+ await pause(200)
834
+
835
+ // bob should not store the sync state for charlie because charly is an ephemeral peer
836
+ const storedSyncState = await bobRepo.storageSubsystem.loadSyncState(
837
+ bobHandle.documentId,
838
+ await charlieRepo!.storageSubsystem.id()
839
+ )
840
+ assert.deepStrictEqual(storedSyncState, undefined)
841
+
842
+ teardown()
843
+ })
844
+
814
845
  it("should load sync state from storage", async () => {
815
846
  const { bobRepo, teardown, charlie, charlieRepo, bobStorage, bob } =
816
847
  await setup({
@@ -823,13 +854,15 @@ describe("Repo", () => {
823
854
  d.foo = "bar"
824
855
  })
825
856
  let bobSyncMessages = 0
826
- bobRepo.networkSubsystem.on("message", () => {
827
- bobSyncMessages++
857
+ bobRepo.networkSubsystem.on("message", message => {
858
+ if (message.type === "sync") {
859
+ bobSyncMessages++
860
+ }
828
861
  })
829
862
  await pause(500)
830
863
 
831
- // repo has no stored sync state for charlie so we should see two sync messages
832
- assert.strictEqual(bobSyncMessages, 2)
864
+ // repo has no stored sync state for charlie so we should see 3 sync messages
865
+ assert.strictEqual(bobSyncMessages, 3)
833
866
 
834
867
  // setup new repo which uses bob's storage
835
868
  const bob2Repo = new Repo({
@@ -850,8 +883,10 @@ describe("Repo", () => {
850
883
  // lookup doc we've previously created and count the messages
851
884
  bob2Repo.find(bobHandle.documentId)
852
885
  let bob2SyncMessages = 0
853
- bob2Repo.networkSubsystem.on("message", m => {
854
- bob2SyncMessages++
886
+ bob2Repo.networkSubsystem.on("message", message => {
887
+ if (message.type === "sync") {
888
+ bob2SyncMessages++
889
+ }
855
890
  })
856
891
  await pause(100)
857
892
 
@@ -866,6 +901,7 @@ describe("Repo", () => {
866
901
  const { bobRepo, charlieRepo, teardown } = await setup({
867
902
  connectAlice: false,
868
903
  })
904
+ const charliedStorageId = await charlieRepo.storageSubsystem.id()
869
905
 
870
906
  const handle = bobRepo.create<TestDoc>()
871
907
  handle.change(d => {
@@ -876,11 +912,11 @@ describe("Repo", () => {
876
912
  await pause(50)
877
913
 
878
914
  const nextRemoteHeadsPromise = new Promise<{
879
- peerId: PeerId
915
+ storageId: StorageId
880
916
  heads: A.Heads
881
917
  }>(resolve => {
882
- handle.on("remote-heads", ({ peerId, heads }) => {
883
- resolve({ peerId, heads })
918
+ handle.on("remote-heads", ({ storageId, heads }) => {
919
+ resolve({ storageId, heads })
884
920
  })
885
921
  })
886
922
 
@@ -901,11 +937,11 @@ describe("Repo", () => {
901
937
  assert.deepStrictEqual(charlieHeads, bobHeads)
902
938
 
903
939
  const nextRemoteHeads = await nextRemoteHeadsPromise
904
- assert.deepStrictEqual(nextRemoteHeads.peerId, "charlie")
940
+ assert.deepStrictEqual(nextRemoteHeads.storageId, charliedStorageId)
905
941
  assert.deepStrictEqual(nextRemoteHeads.heads, charlieHeads)
906
942
 
907
943
  assert.deepStrictEqual(
908
- handle.getRemoteHeads("charlie" as PeerId),
944
+ handle.getRemoteHeads(charliedStorageId),
909
945
  A.getHeads(charlieHandle.docSync())
910
946
  )
911
947
 
@@ -8,7 +8,9 @@ import { describe, it } from "vitest"
8
8
  import { generateAutomergeUrl, parseAutomergeUrl } from "../src/AutomergeUrl.js"
9
9
  import { PeerId, cbor } from "../src/index.js"
10
10
  import { StorageSubsystem } from "../src/storage/StorageSubsystem.js"
11
+ import { StorageId } from "../src/storage/types.js"
11
12
  import { DummyStorageAdapter } from "./helpers/DummyStorageAdapter.js"
13
+ import * as Uuid from "uuid"
12
14
 
13
15
  const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), "automerge-repo-tests"))
14
16
 
@@ -182,12 +184,15 @@ describe("StorageSubsystem", () => {
182
184
 
183
185
  const { documentId } = parseAutomergeUrl(generateAutomergeUrl())
184
186
  const syncState = A.initSyncState()
185
- const bob = "bob" as PeerId
187
+ const bobStorageId = Uuid.v4() as StorageId
186
188
 
187
189
  const rawSyncState = A.decodeSyncState(A.encodeSyncState(syncState))
188
190
 
189
- await storage.saveSyncState(documentId, bob, syncState)
190
- const loadedSyncState = await storage.loadSyncState(documentId, bob)
191
+ await storage.saveSyncState(documentId, bobStorageId, syncState)
192
+ const loadedSyncState = await storage.loadSyncState(
193
+ documentId,
194
+ bobStorageId
195
+ )
191
196
  assert.deepStrictEqual(loadedSyncState, rawSyncState)
192
197
  })
193
198
 
@@ -196,14 +201,31 @@ describe("StorageSubsystem", () => {
196
201
 
197
202
  const { documentId } = parseAutomergeUrl(generateAutomergeUrl())
198
203
  const syncState = A.initSyncState()
199
- const bob = "bob" as PeerId
204
+ const bobStorageId = Uuid.v4() as StorageId
200
205
 
201
- await storage.saveSyncState(documentId, bob, syncState)
206
+ await storage.saveSyncState(documentId, bobStorageId, syncState)
202
207
  await storage.removeDoc(documentId)
203
- const loadedSyncState = await storage.loadSyncState(documentId, bob)
208
+ const loadedSyncState = await storage.loadSyncState(
209
+ documentId,
210
+ bobStorageId
211
+ )
204
212
  assert.strictEqual(loadedSyncState, undefined)
205
213
  })
206
214
  })
215
+
216
+ describe("storage id", () => {
217
+ it("generates a unique id", async () => {
218
+ const storage = new StorageSubsystem(adapter)
219
+
220
+ // generate unique id and return same id on subsequence calls
221
+ const id1 = await storage.id()
222
+ const id2 = await storage.id()
223
+
224
+ assert.strictEqual(Uuid.validate(id1), true)
225
+ assert.strictEqual(Uuid.validate(id2), true)
226
+ assert.strictEqual(id1, id2)
227
+ })
228
+ })
207
229
  })
208
230
  })
209
231
  })
@@ -0,0 +1,135 @@
1
+ import * as A from "@automerge/automerge/next"
2
+ import assert from "assert"
3
+ import { decode } from "cbor-x"
4
+ import { describe, it } from "vitest"
5
+ import { generateAutomergeUrl, parseAutomergeUrl } from "../src/AutomergeUrl.js"
6
+ import { eventPromise } from "../src/helpers/eventPromise.js"
7
+ import { pause } from "../src/helpers/pause.js"
8
+ import {
9
+ DocHandle,
10
+ DocHandleRemoteHeadsPayload,
11
+ PeerId,
12
+ Repo,
13
+ } from "../src/index.js"
14
+ import { TestDoc } from "./types.js"
15
+ import { MessageChannelNetworkAdapter } from "@automerge/automerge-repo-network-messagechannel"
16
+ import { setTimeout } from "timers/promises"
17
+ import { DummyStorageAdapter } from "./helpers/DummyStorageAdapter.js"
18
+
19
+ describe("DocHandle.remoteHeads", () => {
20
+ const TEST_ID = parseAutomergeUrl(generateAutomergeUrl()).documentId
21
+
22
+ it("should allow to listen for remote head changes and manually read remote heads", async () => {
23
+ const handle = new DocHandle<TestDoc>(TEST_ID, { isNew: true })
24
+ const bobRepo = new Repo({
25
+ peerId: "bob" as PeerId,
26
+ network: [],
27
+ storage: new DummyStorageAdapter(),
28
+ })
29
+ const bobStorageId = await bobRepo.storageId()
30
+
31
+ const remoteHeadsMessagePromise = eventPromise(handle, "remote-heads")
32
+ handle.setRemoteHeads(bobStorageId, [])
33
+
34
+ const remoteHeadsMessage = await remoteHeadsMessagePromise
35
+
36
+ assert.strictEqual(remoteHeadsMessage.storageId, bobStorageId)
37
+ assert.deepStrictEqual(remoteHeadsMessage.heads, [])
38
+
39
+ // read remote heads manually
40
+ assert.deepStrictEqual(handle.getRemoteHeads(bobStorageId), [])
41
+ })
42
+
43
+ it("should report remoteHeads for peers who are several hops away", async () => {
44
+ // replicates a tab -> service worker -> sync server <- service worker <- tab scenario
45
+ const leftTab = new Repo({
46
+ peerId: "left-tab" as PeerId,
47
+ network: [],
48
+ sharePolicy: async () => true,
49
+ })
50
+ const leftServiceWorker = new Repo({
51
+ peerId: "left-service-worker" as PeerId,
52
+ network: [],
53
+ sharePolicy: async peer => peer === "sync-server",
54
+ storage: new DummyStorageAdapter(),
55
+ isEphemeral: false,
56
+ })
57
+ const syncServer = new Repo({
58
+ peerId: "sync-server" as PeerId,
59
+ network: [],
60
+ isEphemeral: false,
61
+ sharePolicy: async () => false,
62
+ storage: new DummyStorageAdapter(),
63
+ })
64
+ const rightServiceWorker = new Repo({
65
+ peerId: "right-service-worker" as PeerId,
66
+ network: [],
67
+ sharePolicy: async peer => peer === "sync-server",
68
+ isEphemeral: false,
69
+ storage: new DummyStorageAdapter(),
70
+ })
71
+ const rightTab = new Repo({
72
+ peerId: "right-tab" as PeerId,
73
+ network: [],
74
+ sharePolicy: async () => true,
75
+ })
76
+
77
+ // connect them all up
78
+ connectRepos(leftTab, leftServiceWorker)
79
+ connectRepos(leftServiceWorker, syncServer)
80
+ connectRepos(syncServer, rightServiceWorker)
81
+ connectRepos(rightServiceWorker, rightTab)
82
+
83
+ await setTimeout(100)
84
+
85
+ // subscribe to the left service worker storage ID on the right tab
86
+ rightTab.subscribeToRemotes([await leftServiceWorker.storageId()!])
87
+
88
+ await setTimeout(100)
89
+
90
+ // create a doc in the left tab
91
+ const leftTabDoc = leftTab.create<TestDoc>()
92
+ leftTabDoc.change(d => (d.foo = "bar"))
93
+
94
+ // wait for the document to arrive on the right tab
95
+ const rightTabDoc = rightTab.find<TestDoc>(leftTabDoc.url)
96
+ await rightTabDoc.whenReady()
97
+
98
+ // wait for the document to arrive in the left service worker
99
+ const leftServiceWorkerDoc = leftServiceWorker.find(leftTabDoc.documentId)
100
+ await leftServiceWorkerDoc.whenReady()
101
+
102
+ const leftServiceWorkerStorageId = await leftServiceWorker.storageId()
103
+ let leftSeenByRightPromise = new Promise<DocHandleRemoteHeadsPayload>(
104
+ resolve => {
105
+ rightTabDoc.on("remote-heads", message => {
106
+ if (message.storageId === leftServiceWorkerStorageId) {
107
+ resolve(message)
108
+ }
109
+ })
110
+ }
111
+ )
112
+
113
+ // make a change on the right
114
+ rightTabDoc.change(d => (d.foo = "baz"))
115
+
116
+ // wait for the change to be acknolwedged by the left
117
+ const leftSeenByRight = await leftSeenByRightPromise
118
+
119
+ assert.deepStrictEqual(
120
+ leftSeenByRight.heads,
121
+ A.getHeads(leftServiceWorkerDoc.docSync())
122
+ )
123
+ })
124
+ })
125
+
126
+ function connectRepos(repo1: Repo, repo2: Repo) {
127
+ const { port1: leftToRight, port2: rightToLeft } = new MessageChannel()
128
+
129
+ repo1.networkSubsystem.addNetworkAdapter(
130
+ new MessageChannelNetworkAdapter(leftToRight)
131
+ )
132
+ repo2.networkSubsystem.addNetworkAdapter(
133
+ new MessageChannelNetworkAdapter(rightToLeft)
134
+ )
135
+ }