@automerge/automerge-repo 1.0.0-alpha.0 → 1.0.0-alpha.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/DocCollection.d.ts +2 -1
- package/dist/DocCollection.d.ts.map +1 -1
- package/dist/DocCollection.js +17 -8
- package/dist/DocHandle.d.ts +27 -7
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +47 -23
- package/dist/DocUrl.d.ts +3 -3
- package/dist/DocUrl.js +9 -9
- package/dist/EphemeralData.d.ts +8 -16
- package/dist/EphemeralData.d.ts.map +1 -1
- package/dist/EphemeralData.js +1 -28
- package/dist/Repo.d.ts +0 -2
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +18 -36
- package/dist/helpers/headsAreSame.d.ts +2 -2
- package/dist/helpers/headsAreSame.d.ts.map +1 -1
- package/dist/helpers/headsAreSame.js +1 -4
- package/dist/helpers/tests/network-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/network-adapter-tests.js +15 -13
- package/dist/index.d.ts +2 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/network/NetworkAdapter.d.ts +4 -13
- package/dist/network/NetworkAdapter.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.d.ts +5 -4
- package/dist/network/NetworkSubsystem.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.js +39 -25
- package/dist/network/messages.d.ts +57 -0
- package/dist/network/messages.d.ts.map +1 -0
- package/dist/network/messages.js +21 -0
- package/dist/storage/StorageSubsystem.d.ts +2 -2
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +36 -6
- package/dist/synchronizer/CollectionSynchronizer.d.ts +3 -2
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.js +19 -13
- package/dist/synchronizer/DocSynchronizer.d.ts +9 -3
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +145 -29
- package/dist/synchronizer/Synchronizer.d.ts +3 -4
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
- package/dist/types.d.ts +1 -3
- package/dist/types.d.ts.map +1 -1
- package/fuzz/fuzz.ts +4 -4
- package/package.json +3 -3
- package/src/DocCollection.ts +19 -9
- package/src/DocHandle.ts +82 -37
- package/src/DocUrl.ts +9 -9
- package/src/EphemeralData.ts +6 -36
- package/src/Repo.ts +20 -52
- package/src/helpers/headsAreSame.ts +3 -5
- package/src/helpers/tests/network-adapter-tests.ts +18 -14
- package/src/index.ts +12 -2
- package/src/network/NetworkAdapter.ts +4 -20
- package/src/network/NetworkSubsystem.ts +61 -38
- package/src/network/messages.ts +123 -0
- package/src/storage/StorageSubsystem.ts +42 -6
- package/src/synchronizer/CollectionSynchronizer.ts +38 -19
- package/src/synchronizer/DocSynchronizer.ts +196 -38
- package/src/synchronizer/Synchronizer.ts +3 -8
- package/src/types.ts +4 -1
- package/test/CollectionSynchronizer.test.ts +6 -7
- package/test/DocHandle.test.ts +36 -22
- package/test/DocSynchronizer.test.ts +85 -9
- package/test/Repo.test.ts +279 -59
- package/test/StorageSubsystem.test.ts +9 -9
- package/test/helpers/DummyNetworkAdapter.ts +1 -1
- package/tsconfig.json +2 -1
- package/test/EphemeralData.test.ts +0 -44
|
@@ -5,16 +5,25 @@ import { DocSynchronizer } from "../src/synchronizer/DocSynchronizer.js"
|
|
|
5
5
|
import { eventPromise } from "../src/helpers/eventPromise.js"
|
|
6
6
|
import { TestDoc } from "./types.js"
|
|
7
7
|
import { parseAutomergeUrl, generateAutomergeUrl } from "../src/DocUrl.js"
|
|
8
|
+
import { SyncMessage } from "../src/index.js"
|
|
9
|
+
import {
|
|
10
|
+
DocumentUnavailableMessage,
|
|
11
|
+
DocumentUnavailableMessageContents,
|
|
12
|
+
MessageContents,
|
|
13
|
+
RequestMessageContents,
|
|
14
|
+
SyncMessageContents,
|
|
15
|
+
} from "../src/network/messages.js"
|
|
8
16
|
|
|
9
17
|
const alice = "alice" as PeerId
|
|
10
18
|
const bob = "bob" as PeerId
|
|
19
|
+
const charlie = "charlie" as PeerId
|
|
11
20
|
|
|
12
21
|
describe("DocSynchronizer", () => {
|
|
13
22
|
let handle: DocHandle<TestDoc>
|
|
14
23
|
let docSynchronizer: DocSynchronizer
|
|
15
24
|
|
|
16
25
|
const setup = () => {
|
|
17
|
-
const docId = parseAutomergeUrl(generateAutomergeUrl()).
|
|
26
|
+
const docId = parseAutomergeUrl(generateAutomergeUrl()).documentId
|
|
18
27
|
handle = new DocHandle<TestDoc>(docId, { isNew: true })
|
|
19
28
|
docSynchronizer = new DocSynchronizer(handle)
|
|
20
29
|
return { handle, docSynchronizer }
|
|
@@ -27,19 +36,21 @@ describe("DocSynchronizer", () => {
|
|
|
27
36
|
|
|
28
37
|
it("emits a syncMessage when beginSync is called", async () => {
|
|
29
38
|
const { docSynchronizer } = setup()
|
|
30
|
-
docSynchronizer.beginSync(alice)
|
|
31
|
-
const { targetId } = await eventPromise(docSynchronizer, "message")
|
|
39
|
+
docSynchronizer.beginSync([alice])
|
|
40
|
+
const { targetId, type } = await eventPromise(docSynchronizer, "message")
|
|
41
|
+
assert.equal(type, "sync")
|
|
32
42
|
assert.equal(targetId, "alice")
|
|
33
43
|
})
|
|
34
44
|
|
|
35
45
|
it("emits a syncMessage to peers when the handle is updated", async () => {
|
|
36
46
|
const { handle, docSynchronizer } = setup()
|
|
37
|
-
docSynchronizer.beginSync(alice)
|
|
47
|
+
docSynchronizer.beginSync([alice])
|
|
38
48
|
handle.change(doc => {
|
|
39
49
|
doc.foo = "bar"
|
|
40
50
|
})
|
|
41
|
-
const { targetId } = await eventPromise(docSynchronizer, "message")
|
|
51
|
+
const { targetId, type } = await eventPromise(docSynchronizer, "message")
|
|
42
52
|
assert.equal(targetId, "alice")
|
|
53
|
+
assert.equal(type, "sync")
|
|
43
54
|
})
|
|
44
55
|
|
|
45
56
|
it("still syncs with a peer after it disconnects and reconnects", async () => {
|
|
@@ -47,23 +58,88 @@ describe("DocSynchronizer", () => {
|
|
|
47
58
|
|
|
48
59
|
// first connection
|
|
49
60
|
{
|
|
50
|
-
|
|
61
|
+
docSynchronizer.beginSync([bob])
|
|
51
62
|
handle.change(doc => {
|
|
52
63
|
doc.foo = "a change"
|
|
53
64
|
})
|
|
54
|
-
const { targetId } = await eventPromise(docSynchronizer, "message")
|
|
65
|
+
const { targetId, type } = await eventPromise(docSynchronizer, "message")
|
|
55
66
|
assert.equal(targetId, "bob")
|
|
67
|
+
assert.equal(type, "sync")
|
|
56
68
|
docSynchronizer.endSync(bob)
|
|
57
69
|
}
|
|
58
70
|
|
|
59
71
|
// second connection
|
|
60
72
|
{
|
|
61
|
-
|
|
73
|
+
docSynchronizer.beginSync([bob])
|
|
62
74
|
handle.change(doc => {
|
|
63
75
|
doc.foo = "another change"
|
|
64
76
|
})
|
|
65
|
-
const { targetId } = await eventPromise(docSynchronizer, "message")
|
|
77
|
+
const { targetId, type } = await eventPromise(docSynchronizer, "message")
|
|
66
78
|
assert.equal(targetId, "bob")
|
|
79
|
+
assert.equal(type, "sync")
|
|
67
80
|
}
|
|
68
81
|
})
|
|
82
|
+
|
|
83
|
+
it("emits a requestMessage if the local handle is being requested", async () => {
|
|
84
|
+
const docId = parseAutomergeUrl(generateAutomergeUrl()).documentId
|
|
85
|
+
|
|
86
|
+
const handle = new DocHandle<TestDoc>(docId, { isNew: false })
|
|
87
|
+
docSynchronizer = new DocSynchronizer(handle)
|
|
88
|
+
docSynchronizer.beginSync([alice])
|
|
89
|
+
handle.request()
|
|
90
|
+
const message = await eventPromise(docSynchronizer, "message")
|
|
91
|
+
assert.equal(message.targetId, "alice")
|
|
92
|
+
assert.equal(message.type, "request")
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
it("emits the correct sequence of messages when a document is not found then not available", async () => {
|
|
96
|
+
const docId = parseAutomergeUrl(generateAutomergeUrl()).documentId
|
|
97
|
+
|
|
98
|
+
const bobHandle = new DocHandle<TestDoc>(docId, { isNew: false })
|
|
99
|
+
const bobDocSynchronizer = new DocSynchronizer(bobHandle)
|
|
100
|
+
bobDocSynchronizer.beginSync([alice])
|
|
101
|
+
bobHandle.request()
|
|
102
|
+
const message = await eventPromise(bobDocSynchronizer, "message")
|
|
103
|
+
|
|
104
|
+
const aliceHandle = new DocHandle<TestDoc>(docId, { isNew: false })
|
|
105
|
+
const aliceDocSynchronizer = new DocSynchronizer(aliceHandle)
|
|
106
|
+
aliceHandle.request()
|
|
107
|
+
|
|
108
|
+
aliceDocSynchronizer.receiveSyncMessage({ ...message, senderId: bob })
|
|
109
|
+
aliceDocSynchronizer.beginSync([charlie, bob])
|
|
110
|
+
|
|
111
|
+
const [charlieMessage, bobMessage] = await new Promise<MessageContents[]>(
|
|
112
|
+
resolve => {
|
|
113
|
+
const messages: MessageContents[] = []
|
|
114
|
+
aliceDocSynchronizer.on("message", message => {
|
|
115
|
+
messages.push(message)
|
|
116
|
+
if (messages.length === 2) {
|
|
117
|
+
resolve(messages)
|
|
118
|
+
}
|
|
119
|
+
})
|
|
120
|
+
}
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
// the response should be a sync message, not a request message
|
|
124
|
+
assert.equal(charlieMessage.targetId, "charlie")
|
|
125
|
+
assert.equal(charlieMessage.type, "request")
|
|
126
|
+
assert.equal(bobMessage.targetId, "bob")
|
|
127
|
+
assert.equal(bobMessage.type, "sync")
|
|
128
|
+
|
|
129
|
+
const docUnavailableMessage = {
|
|
130
|
+
type: "doc-unavailable",
|
|
131
|
+
targetId: alice,
|
|
132
|
+
senderId: charlie,
|
|
133
|
+
documentId: docId,
|
|
134
|
+
} satisfies DocumentUnavailableMessage
|
|
135
|
+
|
|
136
|
+
const p = eventPromise(aliceDocSynchronizer, "message")
|
|
137
|
+
|
|
138
|
+
aliceDocSynchronizer.receiveMessage(docUnavailableMessage)
|
|
139
|
+
|
|
140
|
+
const message2 = await p
|
|
141
|
+
|
|
142
|
+
assert.equal(message2.targetId, "bob")
|
|
143
|
+
assert.equal(message2.type, "doc-unavailable")
|
|
144
|
+
})
|
|
69
145
|
})
|
package/test/Repo.test.ts
CHANGED
|
@@ -1,10 +1,9 @@
|
|
|
1
1
|
import assert from "assert"
|
|
2
2
|
import { MessageChannelNetworkAdapter } from "@automerge/automerge-repo-network-messagechannel"
|
|
3
|
-
import
|
|
3
|
+
import { BroadcastChannelNetworkAdapter } from "@automerge/automerge-repo-network-broadcastchannel"
|
|
4
4
|
|
|
5
5
|
import {
|
|
6
6
|
AutomergeUrl,
|
|
7
|
-
ChannelId,
|
|
8
7
|
DocHandle,
|
|
9
8
|
DocumentId,
|
|
10
9
|
PeerId,
|
|
@@ -17,11 +16,8 @@ import { DummyNetworkAdapter } from "./helpers/DummyNetworkAdapter.js"
|
|
|
17
16
|
import { DummyStorageAdapter } from "./helpers/DummyStorageAdapter.js"
|
|
18
17
|
import { getRandomItem } from "./helpers/getRandomItem.js"
|
|
19
18
|
import { TestDoc } from "./types.js"
|
|
20
|
-
import {
|
|
21
|
-
|
|
22
|
-
generateAutomergeUrl,
|
|
23
|
-
stringifyAutomergeUrl,
|
|
24
|
-
} from "../src/DocUrl"
|
|
19
|
+
import { generateAutomergeUrl, stringifyAutomergeUrl } from "../src/DocUrl"
|
|
20
|
+
import { READY } from "../src/DocHandle"
|
|
25
21
|
|
|
26
22
|
describe("Repo", () => {
|
|
27
23
|
describe("single repo", () => {
|
|
@@ -57,7 +53,7 @@ describe("Repo", () => {
|
|
|
57
53
|
const v = await handle.doc()
|
|
58
54
|
assert.equal(handle.isReady(), true)
|
|
59
55
|
|
|
60
|
-
assert.equal(v
|
|
56
|
+
assert.equal(v?.foo, "bar")
|
|
61
57
|
})
|
|
62
58
|
|
|
63
59
|
it("throws an error if we try to find a handle with an invalid AutomergeUrl", async () => {
|
|
@@ -74,10 +70,17 @@ describe("Repo", () => {
|
|
|
74
70
|
const handle = repo.find<TestDoc>(generateAutomergeUrl())
|
|
75
71
|
assert.equal(handle.isReady(), false)
|
|
76
72
|
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
73
|
+
const doc = await handle.doc()
|
|
74
|
+
assert.equal(doc, undefined)
|
|
75
|
+
})
|
|
76
|
+
|
|
77
|
+
it("fires an 'unavailable' event when you don't have the document locally and network to connect to", async () => {
|
|
78
|
+
const { repo } = setup()
|
|
79
|
+
const url = generateAutomergeUrl()
|
|
80
|
+
const handle = repo.find<TestDoc>(url)
|
|
81
|
+
assert.equal(handle.isReady(), false)
|
|
82
|
+
|
|
83
|
+
await eventPromise(handle, "unavailable")
|
|
81
84
|
})
|
|
82
85
|
|
|
83
86
|
it("can find a created document", async () => {
|
|
@@ -94,7 +97,7 @@ describe("Repo", () => {
|
|
|
94
97
|
assert.equal(handle.isReady(), true)
|
|
95
98
|
|
|
96
99
|
const v = await bobHandle.doc()
|
|
97
|
-
assert.equal(v
|
|
100
|
+
assert.equal(v?.foo, "bar")
|
|
98
101
|
})
|
|
99
102
|
|
|
100
103
|
it("saves the document when changed and can find it again", async () => {
|
|
@@ -117,7 +120,7 @@ describe("Repo", () => {
|
|
|
117
120
|
const bobHandle = repo2.find<TestDoc>(handle.url)
|
|
118
121
|
|
|
119
122
|
const v = await bobHandle.doc()
|
|
120
|
-
assert.equal(v
|
|
123
|
+
assert.equal(v?.foo, "bar")
|
|
121
124
|
})
|
|
122
125
|
|
|
123
126
|
it("can delete an existing document", async () => {
|
|
@@ -172,18 +175,93 @@ describe("Repo", () => {
|
|
|
172
175
|
})
|
|
173
176
|
assert.equal(handle.isReady(), true)
|
|
174
177
|
|
|
175
|
-
repo.on("delete-document", ({
|
|
176
|
-
assert.equal(
|
|
178
|
+
repo.on("delete-document", ({ documentId }) => {
|
|
179
|
+
assert.equal(documentId, handle.documentId)
|
|
177
180
|
|
|
178
181
|
done()
|
|
179
182
|
})
|
|
180
183
|
|
|
181
184
|
repo.delete(handle.documentId)
|
|
182
185
|
})
|
|
186
|
+
|
|
187
|
+
it("storage state doesn't change across reloads when the document hasn't changed", async () => {
|
|
188
|
+
const storage = new DummyStorageAdapter()
|
|
189
|
+
|
|
190
|
+
const repo = new Repo({
|
|
191
|
+
storage,
|
|
192
|
+
network: [],
|
|
193
|
+
})
|
|
194
|
+
|
|
195
|
+
const handle = repo.create<{ count: number }>()
|
|
196
|
+
|
|
197
|
+
handle.change(d => {
|
|
198
|
+
d.count = 0
|
|
199
|
+
})
|
|
200
|
+
handle.change(d => {
|
|
201
|
+
d.count = 1
|
|
202
|
+
})
|
|
203
|
+
|
|
204
|
+
const initialKeys = storage.keys()
|
|
205
|
+
|
|
206
|
+
const repo2 = new Repo({
|
|
207
|
+
storage,
|
|
208
|
+
network: [],
|
|
209
|
+
})
|
|
210
|
+
const handle2 = repo2.find(handle.url)
|
|
211
|
+
await handle2.doc()
|
|
212
|
+
|
|
213
|
+
assert.deepEqual(storage.keys(), initialKeys)
|
|
214
|
+
})
|
|
215
|
+
|
|
216
|
+
it("doesn't delete a document from storage when we refresh", async () => {
|
|
217
|
+
const storage = new DummyStorageAdapter()
|
|
218
|
+
|
|
219
|
+
const repo = new Repo({
|
|
220
|
+
storage,
|
|
221
|
+
network: [],
|
|
222
|
+
})
|
|
223
|
+
|
|
224
|
+
const handle = repo.create<{ count: number }>()
|
|
225
|
+
|
|
226
|
+
handle.change(d => {
|
|
227
|
+
d.count = 0
|
|
228
|
+
})
|
|
229
|
+
handle.change(d => {
|
|
230
|
+
d.count = 1
|
|
231
|
+
})
|
|
232
|
+
|
|
233
|
+
for (let i = 0; i < 3; i++) {
|
|
234
|
+
const repo2 = new Repo({
|
|
235
|
+
storage,
|
|
236
|
+
network: [],
|
|
237
|
+
})
|
|
238
|
+
const handle2 = repo2.find(handle.url)
|
|
239
|
+
await handle2.doc()
|
|
240
|
+
|
|
241
|
+
assert(storage.keys().length !== 0)
|
|
242
|
+
}
|
|
243
|
+
})
|
|
183
244
|
})
|
|
184
245
|
|
|
185
246
|
describe("sync", async () => {
|
|
186
|
-
const
|
|
247
|
+
const charlieExcludedDocuments: DocumentId[] = []
|
|
248
|
+
const bobExcludedDocuments: DocumentId[] = []
|
|
249
|
+
|
|
250
|
+
const sharePolicy: SharePolicy = async (peerId, documentId) => {
|
|
251
|
+
if (documentId === undefined) return false
|
|
252
|
+
|
|
253
|
+
// make sure that charlie never gets excluded documents
|
|
254
|
+
if (charlieExcludedDocuments.includes(documentId) && peerId === "charlie")
|
|
255
|
+
return false
|
|
256
|
+
|
|
257
|
+
// make sure that bob never gets excluded documents
|
|
258
|
+
if (bobExcludedDocuments.includes(documentId) && peerId === "bob")
|
|
259
|
+
return false
|
|
260
|
+
|
|
261
|
+
return true
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
const setupRepos = (connectAlice = true) => {
|
|
187
265
|
// Set up three repos; connect Alice to Bob, and Bob to Charlie
|
|
188
266
|
|
|
189
267
|
const aliceBobChannel = new MessageChannel()
|
|
@@ -192,28 +270,10 @@ describe("Repo", () => {
|
|
|
192
270
|
const { port1: aliceToBob, port2: bobToAlice } = aliceBobChannel
|
|
193
271
|
const { port1: bobToCharlie, port2: charlieToBob } = bobCharlieChannel
|
|
194
272
|
|
|
195
|
-
const
|
|
196
|
-
const bobExcludedDocuments: DocumentId[] = []
|
|
197
|
-
|
|
198
|
-
const sharePolicy: SharePolicy = async (peerId, documentId) => {
|
|
199
|
-
if (documentId === undefined) return false
|
|
200
|
-
|
|
201
|
-
// make sure that charlie never gets excluded documents
|
|
202
|
-
if (
|
|
203
|
-
charlieExcludedDocuments.includes(documentId) &&
|
|
204
|
-
peerId === "charlie"
|
|
205
|
-
)
|
|
206
|
-
return false
|
|
207
|
-
|
|
208
|
-
// make sure that charlie never gets excluded documents
|
|
209
|
-
if (bobExcludedDocuments.includes(documentId) && peerId === "bob")
|
|
210
|
-
return false
|
|
211
|
-
|
|
212
|
-
return true
|
|
213
|
-
}
|
|
273
|
+
const aliceNetworkAdapter = new MessageChannelNetworkAdapter(aliceToBob)
|
|
214
274
|
|
|
215
275
|
const aliceRepo = new Repo({
|
|
216
|
-
network: [
|
|
276
|
+
network: connectAlice ? [aliceNetworkAdapter] : [],
|
|
217
277
|
peerId: "alice" as PeerId,
|
|
218
278
|
sharePolicy,
|
|
219
279
|
})
|
|
@@ -232,6 +292,24 @@ describe("Repo", () => {
|
|
|
232
292
|
peerId: "charlie" as PeerId,
|
|
233
293
|
})
|
|
234
294
|
|
|
295
|
+
const teardown = () => {
|
|
296
|
+
aliceBobChannel.port1.close()
|
|
297
|
+
bobCharlieChannel.port1.close()
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
return {
|
|
301
|
+
teardown,
|
|
302
|
+
aliceRepo,
|
|
303
|
+
bobRepo,
|
|
304
|
+
charlieRepo,
|
|
305
|
+
aliceNetworkAdapter,
|
|
306
|
+
}
|
|
307
|
+
}
|
|
308
|
+
|
|
309
|
+
const setup = async (connectAlice = true) => {
|
|
310
|
+
const { teardown, aliceRepo, bobRepo, charlieRepo, aliceNetworkAdapter } =
|
|
311
|
+
setupRepos(connectAlice)
|
|
312
|
+
|
|
235
313
|
const aliceHandle = aliceRepo.create<TestDoc>()
|
|
236
314
|
aliceHandle.change(d => {
|
|
237
315
|
d.foo = "bar"
|
|
@@ -252,16 +330,13 @@ describe("Repo", () => {
|
|
|
252
330
|
})
|
|
253
331
|
|
|
254
332
|
await Promise.all([
|
|
255
|
-
|
|
333
|
+
...(connectAlice
|
|
334
|
+
? [eventPromise(aliceRepo.networkSubsystem, "peer")]
|
|
335
|
+
: []),
|
|
256
336
|
eventPromise(bobRepo.networkSubsystem, "peer"),
|
|
257
337
|
eventPromise(charlieRepo.networkSubsystem, "peer"),
|
|
258
338
|
])
|
|
259
339
|
|
|
260
|
-
const teardown = () => {
|
|
261
|
-
aliceBobChannel.port1.close()
|
|
262
|
-
bobCharlieChannel.port1.close()
|
|
263
|
-
}
|
|
264
|
-
|
|
265
340
|
return {
|
|
266
341
|
aliceRepo,
|
|
267
342
|
bobRepo,
|
|
@@ -270,6 +345,7 @@ describe("Repo", () => {
|
|
|
270
345
|
notForCharlie,
|
|
271
346
|
notForBob,
|
|
272
347
|
teardown,
|
|
348
|
+
aliceNetworkAdapter,
|
|
273
349
|
}
|
|
274
350
|
}
|
|
275
351
|
|
|
@@ -336,13 +412,59 @@ describe("Repo", () => {
|
|
|
336
412
|
|
|
337
413
|
it("doesn't find a document which doesn't exist anywhere on the network", async () => {
|
|
338
414
|
const { charlieRepo } = await setup()
|
|
339
|
-
const
|
|
415
|
+
const url = generateAutomergeUrl()
|
|
416
|
+
const handle = charlieRepo.find<TestDoc>(url)
|
|
340
417
|
assert.equal(handle.isReady(), false)
|
|
341
418
|
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
419
|
+
const doc = await handle.doc()
|
|
420
|
+
assert.equal(doc, undefined)
|
|
421
|
+
})
|
|
422
|
+
|
|
423
|
+
it("fires an 'unavailable' event when a document is not available on the network", async () => {
|
|
424
|
+
const { charlieRepo } = await setup()
|
|
425
|
+
const url = generateAutomergeUrl()
|
|
426
|
+
const handle = charlieRepo.find<TestDoc>(url)
|
|
427
|
+
assert.equal(handle.isReady(), false)
|
|
428
|
+
|
|
429
|
+
await Promise.all([
|
|
430
|
+
eventPromise(handle, "unavailable"),
|
|
431
|
+
eventPromise(charlieRepo, "unavailable-document"),
|
|
432
|
+
])
|
|
433
|
+
|
|
434
|
+
// make sure it fires a second time if the doc is still unavailable
|
|
435
|
+
const handle2 = charlieRepo.find<TestDoc>(url)
|
|
436
|
+
assert.equal(handle2.isReady(), false)
|
|
437
|
+
await eventPromise(handle2, "unavailable")
|
|
438
|
+
})
|
|
439
|
+
|
|
440
|
+
it("a previously unavailable document syncs over the network if a peer with it connects", async () => {
|
|
441
|
+
const {
|
|
442
|
+
charlieRepo,
|
|
443
|
+
notForCharlie,
|
|
444
|
+
aliceRepo,
|
|
445
|
+
teardown,
|
|
446
|
+
aliceNetworkAdapter,
|
|
447
|
+
} = await setup(false)
|
|
448
|
+
|
|
449
|
+
const url = stringifyAutomergeUrl({ documentId: notForCharlie })
|
|
450
|
+
const handle = charlieRepo.find<TestDoc>(url)
|
|
451
|
+
assert.equal(handle.isReady(), false)
|
|
452
|
+
|
|
453
|
+
await eventPromise(handle, "unavailable")
|
|
454
|
+
|
|
455
|
+
aliceRepo.networkSubsystem.addNetworkAdapter(aliceNetworkAdapter)
|
|
456
|
+
|
|
457
|
+
await eventPromise(aliceRepo.networkSubsystem, "peer")
|
|
458
|
+
|
|
459
|
+
const doc = await handle.doc([READY])
|
|
460
|
+
assert.deepStrictEqual(doc, { foo: "baz" })
|
|
461
|
+
|
|
462
|
+
// an additional find should also return the correct resolved document
|
|
463
|
+
const handle2 = charlieRepo.find<TestDoc>(url)
|
|
464
|
+
const doc2 = await handle2.doc()
|
|
465
|
+
assert.deepStrictEqual(doc2, { foo: "baz" })
|
|
466
|
+
|
|
467
|
+
teardown()
|
|
346
468
|
})
|
|
347
469
|
|
|
348
470
|
it("a deleted document from charlieRepo can be refetched", async () => {
|
|
@@ -367,17 +489,40 @@ describe("Repo", () => {
|
|
|
367
489
|
teardown()
|
|
368
490
|
})
|
|
369
491
|
|
|
370
|
-
|
|
371
|
-
const
|
|
492
|
+
const setupMeshNetwork = async () => {
|
|
493
|
+
const aliceRepo = new Repo({
|
|
494
|
+
network: [new BroadcastChannelNetworkAdapter()],
|
|
495
|
+
peerId: "alice" as PeerId,
|
|
496
|
+
})
|
|
497
|
+
|
|
498
|
+
const bobRepo = new Repo({
|
|
499
|
+
network: [new BroadcastChannelNetworkAdapter()],
|
|
500
|
+
peerId: "bob" as PeerId,
|
|
501
|
+
})
|
|
502
|
+
|
|
503
|
+
const charlieRepo = new Repo({
|
|
504
|
+
network: [new BroadcastChannelNetworkAdapter()],
|
|
505
|
+
peerId: "charlie" as PeerId,
|
|
506
|
+
})
|
|
507
|
+
|
|
508
|
+
// pause to let the network set up
|
|
509
|
+
await pause(50)
|
|
372
510
|
|
|
373
|
-
|
|
374
|
-
|
|
511
|
+
return {
|
|
512
|
+
aliceRepo,
|
|
513
|
+
bobRepo,
|
|
514
|
+
charlieRepo,
|
|
515
|
+
}
|
|
516
|
+
}
|
|
375
517
|
|
|
376
|
-
|
|
377
|
-
const
|
|
518
|
+
it("can emit an 'unavailable' event when it's not found on the network", async () => {
|
|
519
|
+
const { charlieRepo } = await setupMeshNetwork()
|
|
378
520
|
|
|
379
|
-
|
|
380
|
-
|
|
521
|
+
const url = generateAutomergeUrl()
|
|
522
|
+
const handle = charlieRepo.find<TestDoc>(url)
|
|
523
|
+
assert.equal(handle.isReady(), false)
|
|
524
|
+
|
|
525
|
+
await eventPromise(handle, "unavailable")
|
|
381
526
|
})
|
|
382
527
|
|
|
383
528
|
it("syncs a bunch of changes", async () => {
|
|
@@ -393,9 +538,9 @@ describe("Repo", () => {
|
|
|
393
538
|
const doc =
|
|
394
539
|
Math.random() < 0.5
|
|
395
540
|
? // heads, create a new doc
|
|
396
|
-
|
|
541
|
+
repo.create<TestDoc>()
|
|
397
542
|
: // tails, pick a random doc
|
|
398
|
-
|
|
543
|
+
(getRandomItem(docs) as DocHandle<TestDoc>)
|
|
399
544
|
|
|
400
545
|
// make sure the doc is ready
|
|
401
546
|
if (!doc.isReady()) {
|
|
@@ -411,6 +556,81 @@ describe("Repo", () => {
|
|
|
411
556
|
|
|
412
557
|
teardown()
|
|
413
558
|
})
|
|
414
|
-
})
|
|
415
559
|
|
|
560
|
+
it("can broadcast a message to peers with the correct document only", async () => {
|
|
561
|
+
const { aliceRepo, bobRepo, charlieRepo, notForCharlie, teardown } =
|
|
562
|
+
await setup()
|
|
563
|
+
|
|
564
|
+
const data = { presence: "alice" }
|
|
565
|
+
|
|
566
|
+
const aliceHandle = aliceRepo.find<TestDoc>(
|
|
567
|
+
stringifyAutomergeUrl({ documentId: notForCharlie })
|
|
568
|
+
)
|
|
569
|
+
const bobHandle = bobRepo.find<TestDoc>(
|
|
570
|
+
stringifyAutomergeUrl({ documentId: notForCharlie })
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
await pause(50)
|
|
574
|
+
|
|
575
|
+
const charliePromise = new Promise<void>((resolve, reject) => {
|
|
576
|
+
charlieRepo.networkSubsystem.on("message", message => {
|
|
577
|
+
if (
|
|
578
|
+
message.type === "ephemeral" &&
|
|
579
|
+
message.documentId === notForCharlie
|
|
580
|
+
) {
|
|
581
|
+
reject(new Error("Charlie should not receive this message"))
|
|
582
|
+
}
|
|
583
|
+
})
|
|
584
|
+
setTimeout(resolve, 100)
|
|
585
|
+
})
|
|
586
|
+
|
|
587
|
+
aliceHandle.broadcast(data)
|
|
588
|
+
const { message } = await eventPromise(bobHandle, "ephemeral-message")
|
|
589
|
+
|
|
590
|
+
assert.deepStrictEqual(message, data)
|
|
591
|
+
assert.equal(charlieRepo.handles[notForCharlie], undefined, "charlie no")
|
|
592
|
+
|
|
593
|
+
await charliePromise
|
|
594
|
+
teardown()
|
|
595
|
+
})
|
|
596
|
+
|
|
597
|
+
it("can broadcast a message without entering into an infinite loop", async () => {
|
|
598
|
+
const { aliceRepo, bobRepo, charlieRepo } = await setupMeshNetwork()
|
|
599
|
+
|
|
600
|
+
// pause to let the network set up
|
|
601
|
+
await pause(50)
|
|
602
|
+
const message = { presence: "alex" }
|
|
603
|
+
|
|
604
|
+
const aliceHandle = aliceRepo.create<TestDoc>()
|
|
605
|
+
|
|
606
|
+
const bobHandle = bobRepo.find(aliceHandle.url)
|
|
607
|
+
const charlieHandle = charlieRepo.find(aliceHandle.url)
|
|
608
|
+
|
|
609
|
+
const aliceDoesntGetIt = new Promise<void>((resolve, reject) => {
|
|
610
|
+
setTimeout(() => {
|
|
611
|
+
resolve()
|
|
612
|
+
}, 100)
|
|
613
|
+
|
|
614
|
+
aliceHandle.on("ephemeral-message", () => {
|
|
615
|
+
reject("alice got the message")
|
|
616
|
+
})
|
|
617
|
+
})
|
|
618
|
+
|
|
619
|
+
const bobGotIt = eventPromise(bobHandle, "ephemeral-message")
|
|
620
|
+
const charlieGotIt = eventPromise(charlieHandle, "ephemeral-message")
|
|
621
|
+
|
|
622
|
+
// let things get in sync and peers meet one another
|
|
623
|
+
await pause(50)
|
|
624
|
+
aliceHandle.broadcast(message)
|
|
625
|
+
|
|
626
|
+
const [bob, charlie] = await Promise.all([
|
|
627
|
+
bobGotIt,
|
|
628
|
+
charlieGotIt,
|
|
629
|
+
aliceDoesntGetIt,
|
|
630
|
+
])
|
|
631
|
+
|
|
632
|
+
assert.deepStrictEqual(bob.message, message)
|
|
633
|
+
assert.deepStrictEqual(charlie.message, message)
|
|
634
|
+
})
|
|
635
|
+
})
|
|
416
636
|
})
|
|
@@ -31,12 +31,11 @@ describe("StorageSubsystem", () => {
|
|
|
31
31
|
})
|
|
32
32
|
|
|
33
33
|
// save it to storage
|
|
34
|
-
const key = parseAutomergeUrl(generateAutomergeUrl()).
|
|
35
|
-
await storage.
|
|
34
|
+
const key = parseAutomergeUrl(generateAutomergeUrl()).documentId
|
|
35
|
+
await storage.saveDoc(key, doc)
|
|
36
36
|
|
|
37
37
|
// reload it from storage
|
|
38
|
-
const
|
|
39
|
-
const reloadedDoc = A.load<TestDoc>(reloadedDocBinary)
|
|
38
|
+
const reloadedDoc = await storage.loadDoc(key)
|
|
40
39
|
|
|
41
40
|
// check that it's the same doc
|
|
42
41
|
assert.deepStrictEqual(reloadedDoc, doc)
|
|
@@ -53,15 +52,16 @@ describe("StorageSubsystem", () => {
|
|
|
53
52
|
})
|
|
54
53
|
|
|
55
54
|
// save it to storage
|
|
56
|
-
const key = parseAutomergeUrl(generateAutomergeUrl()).
|
|
57
|
-
storage.
|
|
55
|
+
const key = parseAutomergeUrl(generateAutomergeUrl()).documentId
|
|
56
|
+
storage.saveDoc(key, doc)
|
|
58
57
|
|
|
59
58
|
// create new storage subsystem to simulate a new process
|
|
60
59
|
const storage2 = new StorageSubsystem(adapter)
|
|
61
60
|
|
|
62
61
|
// reload it from storage
|
|
63
|
-
const
|
|
64
|
-
|
|
62
|
+
const reloadedDoc = await storage2.loadDoc(key)
|
|
63
|
+
|
|
64
|
+
assert(reloadedDoc, "doc should be loaded")
|
|
65
65
|
|
|
66
66
|
// make a change
|
|
67
67
|
const changedDoc = A.change<any>(reloadedDoc, "test 2", d => {
|
|
@@ -69,7 +69,7 @@ describe("StorageSubsystem", () => {
|
|
|
69
69
|
})
|
|
70
70
|
|
|
71
71
|
// save it to storage
|
|
72
|
-
storage2.
|
|
72
|
+
storage2.saveDoc(key, changedDoc)
|
|
73
73
|
|
|
74
74
|
// check that the storage adapter contains the correct keys
|
|
75
75
|
assert(adapter.keys().some(k => k.startsWith(`${key}.incremental.`)))
|