@automerge/automerge-repo 1.0.0-alpha.2 → 1.0.0-alpha.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/DocCollection.d.ts +4 -2
- package/dist/DocCollection.d.ts.map +1 -1
- package/dist/DocCollection.js +20 -11
- package/dist/DocHandle.d.ts +34 -6
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +69 -9
- package/dist/DocUrl.d.ts +4 -4
- package/dist/DocUrl.d.ts.map +1 -1
- package/dist/DocUrl.js +9 -9
- package/dist/EphemeralData.d.ts +8 -16
- package/dist/EphemeralData.d.ts.map +1 -1
- package/dist/EphemeralData.js +1 -28
- package/dist/Repo.d.ts +0 -2
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +37 -39
- package/dist/helpers/cbor.d.ts +4 -0
- package/dist/helpers/cbor.d.ts.map +1 -0
- package/dist/helpers/cbor.js +8 -0
- package/dist/helpers/eventPromise.d.ts +1 -1
- package/dist/helpers/eventPromise.d.ts.map +1 -1
- package/dist/helpers/headsAreSame.d.ts +0 -1
- package/dist/helpers/headsAreSame.d.ts.map +1 -1
- package/dist/helpers/tests/network-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/network-adapter-tests.js +15 -13
- package/dist/index.d.ts +3 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -0
- package/dist/network/NetworkAdapter.d.ts +6 -15
- package/dist/network/NetworkAdapter.d.ts.map +1 -1
- package/dist/network/NetworkAdapter.js +1 -1
- package/dist/network/NetworkSubsystem.d.ts +9 -6
- package/dist/network/NetworkSubsystem.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.js +69 -32
- package/dist/network/messages.d.ts +57 -0
- package/dist/network/messages.d.ts.map +1 -0
- package/dist/network/messages.js +21 -0
- package/dist/storage/StorageSubsystem.d.ts +1 -1
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +2 -2
- package/dist/synchronizer/CollectionSynchronizer.d.ts +3 -2
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.js +19 -13
- package/dist/synchronizer/DocSynchronizer.d.ts +9 -3
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +149 -34
- package/dist/synchronizer/Synchronizer.d.ts +4 -5
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
- package/dist/synchronizer/Synchronizer.js +1 -1
- package/dist/types.d.ts +1 -3
- package/dist/types.d.ts.map +1 -1
- package/fuzz/fuzz.ts +5 -5
- package/package.json +3 -3
- package/src/DocCollection.ts +23 -12
- package/src/DocHandle.ts +120 -13
- package/src/DocUrl.ts +10 -10
- package/src/EphemeralData.ts +6 -36
- package/src/Repo.ts +37 -55
- package/src/helpers/cbor.ts +10 -0
- package/src/helpers/eventPromise.ts +1 -1
- package/src/helpers/headsAreSame.ts +1 -1
- package/src/helpers/tests/network-adapter-tests.ts +18 -14
- package/src/index.ts +14 -2
- package/src/network/NetworkAdapter.ts +6 -22
- package/src/network/NetworkSubsystem.ts +94 -44
- package/src/network/messages.ts +123 -0
- package/src/storage/StorageSubsystem.ts +2 -2
- package/src/synchronizer/CollectionSynchronizer.ts +38 -19
- package/src/synchronizer/DocSynchronizer.ts +201 -43
- package/src/synchronizer/Synchronizer.ts +4 -9
- package/src/types.ts +4 -1
- package/test/CollectionSynchronizer.test.ts +6 -7
- package/test/DocCollection.test.ts +2 -2
- package/test/DocHandle.test.ts +32 -17
- package/test/DocSynchronizer.test.ts +85 -9
- package/test/Repo.test.ts +267 -63
- package/test/StorageSubsystem.test.ts +4 -5
- package/test/helpers/DummyNetworkAdapter.ts +12 -3
- package/test/helpers/DummyStorageAdapter.ts +1 -1
- package/tsconfig.json +4 -3
- package/test/EphemeralData.test.ts +0 -44
package/src/DocHandle.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import * as A from "@automerge/automerge"
|
|
1
|
+
import * as A from "@automerge/automerge/next"
|
|
2
2
|
import debug from "debug"
|
|
3
|
-
import EventEmitter from "eventemitter3"
|
|
3
|
+
import { EventEmitter } from "eventemitter3"
|
|
4
4
|
import {
|
|
5
5
|
assign,
|
|
6
6
|
BaseActionObject,
|
|
@@ -17,8 +17,9 @@ import { waitFor } from "xstate/lib/waitFor.js"
|
|
|
17
17
|
import { headsAreSame } from "./helpers/headsAreSame.js"
|
|
18
18
|
import { pause } from "./helpers/pause.js"
|
|
19
19
|
import { TimeoutError, withTimeout } from "./helpers/withTimeout.js"
|
|
20
|
-
import type {
|
|
20
|
+
import type { DocumentId, PeerId, AutomergeUrl } from "./types.js"
|
|
21
21
|
import { stringifyAutomergeUrl } from "./DocUrl.js"
|
|
22
|
+
import { encode } from "./helpers/cbor.js"
|
|
22
23
|
|
|
23
24
|
/** DocHandle is a wrapper around a single Automerge document that lets us listen for changes. */
|
|
24
25
|
export class DocHandle<T> //
|
|
@@ -42,7 +43,12 @@ export class DocHandle<T> //
|
|
|
42
43
|
this.#log = debug(`automerge-repo:dochandle:${this.documentId.slice(0, 5)}`)
|
|
43
44
|
|
|
44
45
|
// initial doc
|
|
45
|
-
|
|
46
|
+
let doc = A.init<T>()
|
|
47
|
+
|
|
48
|
+
// Make an empty change so that we have something to save to disk
|
|
49
|
+
if (isNew) {
|
|
50
|
+
doc = A.emptyChange(doc, {})
|
|
51
|
+
}
|
|
46
52
|
|
|
47
53
|
/**
|
|
48
54
|
* Internally we use a state machine to orchestrate document loading and/or syncing, in order to
|
|
@@ -81,6 +87,8 @@ export class DocHandle<T> //
|
|
|
81
87
|
UPDATE: { actions: "onUpdate", target: READY },
|
|
82
88
|
// REQUEST is called by the Repo if the document is not found in storage
|
|
83
89
|
REQUEST: { target: REQUESTING },
|
|
90
|
+
// AWAIT_NETWORK is called by the repo if the document is not found in storage but the network is not yet ready
|
|
91
|
+
AWAIT_NETWORK: { target: AWAITING_NETWORK },
|
|
84
92
|
DELETE: { actions: "onDelete", target: DELETED },
|
|
85
93
|
},
|
|
86
94
|
after: [
|
|
@@ -90,8 +98,17 @@ export class DocHandle<T> //
|
|
|
90
98
|
},
|
|
91
99
|
],
|
|
92
100
|
},
|
|
101
|
+
awaitingNetwork: {
|
|
102
|
+
on: {
|
|
103
|
+
NETWORK_READY: { target: REQUESTING },
|
|
104
|
+
}
|
|
105
|
+
},
|
|
93
106
|
requesting: {
|
|
94
107
|
on: {
|
|
108
|
+
MARK_UNAVAILABLE: {
|
|
109
|
+
target: UNAVAILABLE,
|
|
110
|
+
actions: "onUnavailable",
|
|
111
|
+
},
|
|
95
112
|
// UPDATE is called by the Repo when we receive changes from the network
|
|
96
113
|
UPDATE: { actions: "onUpdate" },
|
|
97
114
|
// REQUEST_COMPLETE is called from `onUpdate` when the doc has been fully loaded from the network
|
|
@@ -118,6 +135,14 @@ export class DocHandle<T> //
|
|
|
118
135
|
deleted: {
|
|
119
136
|
type: "final",
|
|
120
137
|
},
|
|
138
|
+
unavailable: {
|
|
139
|
+
on: {
|
|
140
|
+
UPDATE: { actions: "onUpdate" },
|
|
141
|
+
// REQUEST_COMPLETE is called from `onUpdate` when the doc has been fully loaded from the network
|
|
142
|
+
REQUEST_COMPLETE: { target: READY },
|
|
143
|
+
DELETE: { actions: "onDelete", target: DELETED },
|
|
144
|
+
},
|
|
145
|
+
},
|
|
121
146
|
},
|
|
122
147
|
},
|
|
123
148
|
|
|
@@ -136,6 +161,12 @@ export class DocHandle<T> //
|
|
|
136
161
|
this.emit("delete", { handle: this })
|
|
137
162
|
return { doc: undefined }
|
|
138
163
|
}),
|
|
164
|
+
onUnavailable: assign(context => {
|
|
165
|
+
const { doc } = context
|
|
166
|
+
|
|
167
|
+
this.emit("unavailable", { handle: this })
|
|
168
|
+
return { doc }
|
|
169
|
+
}),
|
|
139
170
|
},
|
|
140
171
|
}
|
|
141
172
|
)
|
|
@@ -144,9 +175,12 @@ export class DocHandle<T> //
|
|
|
144
175
|
const oldDoc = history?.context?.doc
|
|
145
176
|
const newDoc = context.doc
|
|
146
177
|
|
|
147
|
-
|
|
178
|
+
this.#log(`${history?.value}: ${event.type} → ${state}`, newDoc)
|
|
148
179
|
|
|
149
|
-
const docChanged =
|
|
180
|
+
const docChanged =
|
|
181
|
+
newDoc &&
|
|
182
|
+
oldDoc &&
|
|
183
|
+
!headsAreSame(A.getHeads(newDoc), A.getHeads(oldDoc))
|
|
150
184
|
if (docChanged) {
|
|
151
185
|
this.emit("heads-changed", { handle: this, doc: newDoc })
|
|
152
186
|
|
|
@@ -210,6 +244,7 @@ export class DocHandle<T> //
|
|
|
210
244
|
* @returns true if the document has been marked as deleted
|
|
211
245
|
*/
|
|
212
246
|
isDeleted = () => this.inState([HandleState.DELETED])
|
|
247
|
+
isUnavailable = () => this.inState([HandleState.UNAVAILABLE])
|
|
213
248
|
inState = (states: HandleState[]) =>
|
|
214
249
|
states.some(this.#machine?.getSnapshot().matches)
|
|
215
250
|
|
|
@@ -234,7 +269,9 @@ export class DocHandle<T> //
|
|
|
234
269
|
*
|
|
235
270
|
* @param {awaitStates=[READY]} optional states to wait for, such as "LOADING". mostly for internal use.
|
|
236
271
|
*/
|
|
237
|
-
async doc(
|
|
272
|
+
async doc(
|
|
273
|
+
awaitStates: HandleState[] = [READY, UNAVAILABLE]
|
|
274
|
+
): Promise<A.Doc<T> | undefined> {
|
|
238
275
|
await pause() // yield one tick because reasons
|
|
239
276
|
try {
|
|
240
277
|
// wait for the document to enter one of the desired states
|
|
@@ -245,7 +282,7 @@ export class DocHandle<T> //
|
|
|
245
282
|
else throw error
|
|
246
283
|
}
|
|
247
284
|
// Return the document
|
|
248
|
-
return this.#doc
|
|
285
|
+
return !this.isUnavailable() ? this.#doc : undefined
|
|
249
286
|
}
|
|
250
287
|
|
|
251
288
|
/**
|
|
@@ -308,15 +345,40 @@ export class DocHandle<T> //
|
|
|
308
345
|
})
|
|
309
346
|
}
|
|
310
347
|
|
|
348
|
+
unavailable() {
|
|
349
|
+
this.#machine.send(MARK_UNAVAILABLE)
|
|
350
|
+
}
|
|
351
|
+
|
|
311
352
|
/** `request` is called by the repo when the document is not found in storage */
|
|
312
353
|
request() {
|
|
313
354
|
if (this.#state === LOADING) this.#machine.send(REQUEST)
|
|
314
355
|
}
|
|
315
356
|
|
|
357
|
+
awaitNetwork() {
|
|
358
|
+
if (this.#state === LOADING) this.#machine.send(AWAIT_NETWORK)
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
networkReady() {
|
|
362
|
+
if (this.#state === AWAITING_NETWORK) this.#machine.send(NETWORK_READY)
|
|
363
|
+
}
|
|
364
|
+
|
|
316
365
|
/** `delete` is called by the repo when the document is deleted */
|
|
317
366
|
delete() {
|
|
318
367
|
this.#machine.send(DELETE)
|
|
319
368
|
}
|
|
369
|
+
|
|
370
|
+
/** `broadcast` sends an arbitrary ephemeral message out to all reachable peers who would receive sync messages from you
|
|
371
|
+
* it has no guarantee of delivery, and is not persisted to the underlying automerge doc in any way.
|
|
372
|
+
* messages will have a sending PeerId but this is *not* a useful user identifier.
|
|
373
|
+
* a user could have multiple tabs open and would appear as multiple PeerIds.
|
|
374
|
+
* every message source must have a unique PeerId.
|
|
375
|
+
*/
|
|
376
|
+
broadcast(message: any) {
|
|
377
|
+
this.emit("ephemeral-message-outbound", {
|
|
378
|
+
handle: this,
|
|
379
|
+
data: encode(message),
|
|
380
|
+
})
|
|
381
|
+
}
|
|
320
382
|
}
|
|
321
383
|
|
|
322
384
|
// WRAPPER CLASS TYPES
|
|
@@ -328,7 +390,7 @@ interface DocHandleOptions {
|
|
|
328
390
|
|
|
329
391
|
export interface DocHandleMessagePayload {
|
|
330
392
|
destinationId: PeerId
|
|
331
|
-
|
|
393
|
+
documentId: DocumentId
|
|
332
394
|
data: Uint8Array
|
|
333
395
|
}
|
|
334
396
|
|
|
@@ -348,10 +410,26 @@ export interface DocHandleChangePayload<T> {
|
|
|
348
410
|
patchInfo: A.PatchInfo<T>
|
|
349
411
|
}
|
|
350
412
|
|
|
413
|
+
export interface DocHandleEphemeralMessagePayload {
|
|
414
|
+
handle: DocHandle<any>
|
|
415
|
+
senderId: PeerId
|
|
416
|
+
message: unknown
|
|
417
|
+
}
|
|
418
|
+
|
|
419
|
+
export interface DocHandleOutboundEphemeralMessagePayload {
|
|
420
|
+
handle: DocHandle<any>
|
|
421
|
+
data: Uint8Array
|
|
422
|
+
}
|
|
423
|
+
|
|
351
424
|
export interface DocHandleEvents<T> {
|
|
352
425
|
"heads-changed": (payload: DocHandleEncodedChangePayload<T>) => void
|
|
353
426
|
change: (payload: DocHandleChangePayload<T>) => void
|
|
354
427
|
delete: (payload: DocHandleDeletePayload<T>) => void
|
|
428
|
+
unavailable: (payload: DocHandleDeletePayload<T>) => void
|
|
429
|
+
"ephemeral-message": (payload: DocHandleEphemeralMessagePayload) => void
|
|
430
|
+
"ephemeral-message-outbound": (
|
|
431
|
+
payload: DocHandleOutboundEphemeralMessagePayload
|
|
432
|
+
) => void
|
|
355
433
|
}
|
|
356
434
|
|
|
357
435
|
// STATE MACHINE TYPES
|
|
@@ -361,10 +439,12 @@ export interface DocHandleEvents<T> {
|
|
|
361
439
|
export const HandleState = {
|
|
362
440
|
IDLE: "idle",
|
|
363
441
|
LOADING: "loading",
|
|
442
|
+
AWAITING_NETWORK: "awaitingNetwork",
|
|
364
443
|
REQUESTING: "requesting",
|
|
365
444
|
READY: "ready",
|
|
366
445
|
FAILED: "failed",
|
|
367
446
|
DELETED: "deleted",
|
|
447
|
+
UNAVAILABLE: "unavailable",
|
|
368
448
|
} as const
|
|
369
449
|
export type HandleState = (typeof HandleState)[keyof typeof HandleState]
|
|
370
450
|
|
|
@@ -389,9 +469,12 @@ export const Event = {
|
|
|
389
469
|
FIND: "FIND",
|
|
390
470
|
REQUEST: "REQUEST",
|
|
391
471
|
REQUEST_COMPLETE: "REQUEST_COMPLETE",
|
|
472
|
+
AWAIT_NETWORK: "AWAIT_NETWORK",
|
|
473
|
+
NETWORK_READY: "NETWORK_READY",
|
|
392
474
|
UPDATE: "UPDATE",
|
|
393
475
|
TIMEOUT: "TIMEOUT",
|
|
394
476
|
DELETE: "DELETE",
|
|
477
|
+
MARK_UNAVAILABLE: "MARK_UNAVAILABLE",
|
|
395
478
|
} as const
|
|
396
479
|
type Event = (typeof Event)[keyof typeof Event]
|
|
397
480
|
|
|
@@ -405,6 +488,9 @@ type UpdateEvent<T> = {
|
|
|
405
488
|
payload: { callback: (doc: A.Doc<T>) => A.Doc<T> }
|
|
406
489
|
}
|
|
407
490
|
type TimeoutEvent = { type: typeof TIMEOUT }
|
|
491
|
+
type MarkUnavailableEvent = { type: typeof MARK_UNAVAILABLE }
|
|
492
|
+
type AwaitNetworkEvent = { type: typeof AWAIT_NETWORK }
|
|
493
|
+
type NetworkReadyEvent = { type: typeof NETWORK_READY }
|
|
408
494
|
|
|
409
495
|
type DocHandleEvent<T> =
|
|
410
496
|
| CreateEvent
|
|
@@ -414,6 +500,9 @@ type DocHandleEvent<T> =
|
|
|
414
500
|
| UpdateEvent<T>
|
|
415
501
|
| TimeoutEvent
|
|
416
502
|
| DeleteEvent
|
|
503
|
+
| MarkUnavailableEvent
|
|
504
|
+
| AwaitNetworkEvent
|
|
505
|
+
| NetworkReadyEvent
|
|
417
506
|
|
|
418
507
|
type DocHandleXstateMachine<T> = Interpreter<
|
|
419
508
|
DocHandleContext<T>,
|
|
@@ -432,7 +521,25 @@ type DocHandleXstateMachine<T> = Interpreter<
|
|
|
432
521
|
>
|
|
433
522
|
|
|
434
523
|
// CONSTANTS
|
|
435
|
-
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
524
|
+
export const {
|
|
525
|
+
IDLE,
|
|
526
|
+
LOADING,
|
|
527
|
+
AWAITING_NETWORK,
|
|
528
|
+
REQUESTING,
|
|
529
|
+
READY,
|
|
530
|
+
FAILED,
|
|
531
|
+
DELETED,
|
|
532
|
+
UNAVAILABLE,
|
|
533
|
+
} = HandleState
|
|
534
|
+
const {
|
|
535
|
+
CREATE,
|
|
536
|
+
FIND,
|
|
537
|
+
REQUEST,
|
|
538
|
+
UPDATE,
|
|
539
|
+
TIMEOUT,
|
|
540
|
+
DELETE,
|
|
541
|
+
REQUEST_COMPLETE,
|
|
542
|
+
MARK_UNAVAILABLE,
|
|
543
|
+
AWAIT_NETWORK,
|
|
544
|
+
NETWORK_READY
|
|
545
|
+
} = Event
|
package/src/DocUrl.ts
CHANGED
|
@@ -2,7 +2,7 @@ import {
|
|
|
2
2
|
type AutomergeUrl,
|
|
3
3
|
type BinaryDocumentId,
|
|
4
4
|
type DocumentId,
|
|
5
|
-
} from "./types"
|
|
5
|
+
} from "./types.js"
|
|
6
6
|
import { v4 as uuid } from "uuid"
|
|
7
7
|
import bs58check from "bs58check"
|
|
8
8
|
|
|
@@ -12,12 +12,12 @@ export const urlPrefix = "automerge:"
|
|
|
12
12
|
* given an Automerge URL, return a decoded DocumentId (and the encoded DocumentId)
|
|
13
13
|
*
|
|
14
14
|
* @param url
|
|
15
|
-
* @returns {
|
|
15
|
+
* @returns { binaryDocumentId: BinaryDocumentId, documentId: DocumentId }
|
|
16
16
|
*/
|
|
17
17
|
export const parseAutomergeUrl = (url: AutomergeUrl) => {
|
|
18
|
-
const { binaryDocumentId
|
|
18
|
+
const { binaryDocumentId, documentId } = parts(url)
|
|
19
19
|
if (!binaryDocumentId) throw new Error("Invalid document URL: " + url)
|
|
20
|
-
return { binaryDocumentId,
|
|
20
|
+
return { binaryDocumentId, documentId }
|
|
21
21
|
}
|
|
22
22
|
|
|
23
23
|
interface StringifyAutomergeUrlOptions {
|
|
@@ -28,7 +28,7 @@ interface StringifyAutomergeUrlOptions {
|
|
|
28
28
|
* Given a documentId in either canonical form, return an Automerge URL
|
|
29
29
|
* Throws on invalid input.
|
|
30
30
|
* Note: this is an object because we anticipate adding fields in the future.
|
|
31
|
-
* @param { documentId:
|
|
31
|
+
* @param { documentId: BinaryDocumentId | DocumentId }
|
|
32
32
|
* @returns AutomergeUrl
|
|
33
33
|
*/
|
|
34
34
|
export const stringifyAutomergeUrl = ({
|
|
@@ -79,12 +79,12 @@ export const binaryToDocumentId = (docId: BinaryDocumentId): DocumentId =>
|
|
|
79
79
|
* eventually this could include things like heads, so we use this structure
|
|
80
80
|
* we return both a binary & string-encoded version of the document ID
|
|
81
81
|
* @param str
|
|
82
|
-
* @returns { binaryDocumentId,
|
|
82
|
+
* @returns { binaryDocumentId, documentId }
|
|
83
83
|
*/
|
|
84
84
|
const parts = (str: string) => {
|
|
85
85
|
const regex = new RegExp(`^${urlPrefix}(\\w+)$`)
|
|
86
|
-
const [
|
|
87
|
-
const
|
|
88
|
-
const binaryDocumentId = documentIdToBinary(
|
|
89
|
-
return { binaryDocumentId,
|
|
86
|
+
const [_, docMatch] = str.match(regex) || []
|
|
87
|
+
const documentId = docMatch as DocumentId
|
|
88
|
+
const binaryDocumentId = documentIdToBinary(documentId)
|
|
89
|
+
return { binaryDocumentId, documentId }
|
|
90
90
|
}
|
package/src/EphemeralData.ts
CHANGED
|
@@ -1,46 +1,16 @@
|
|
|
1
|
-
import {
|
|
2
|
-
import
|
|
3
|
-
import { ChannelId, PeerId } from "./index.js"
|
|
4
|
-
import { MessagePayload } from "./network/NetworkAdapter.js"
|
|
5
|
-
|
|
6
|
-
/**
|
|
7
|
-
* EphemeralData provides a mechanism to broadcast short-lived data — cursor positions, presence,
|
|
8
|
-
* heartbeats, etc. — that is useful in the moment but not worth persisting.
|
|
9
|
-
*/
|
|
10
|
-
export class EphemeralData extends EventEmitter<EphemeralDataMessageEvents> {
|
|
11
|
-
/** Broadcast an ephemeral message */
|
|
12
|
-
broadcast(channelId: ChannelId, message: unknown) {
|
|
13
|
-
const messageBytes = encode(message)
|
|
14
|
-
|
|
15
|
-
this.emit("message", {
|
|
16
|
-
targetId: "*" as PeerId, // TODO: we don't really need a targetId for broadcast
|
|
17
|
-
channelId: ("m/" + channelId) as ChannelId,
|
|
18
|
-
message: messageBytes,
|
|
19
|
-
broadcast: true,
|
|
20
|
-
})
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
/** Receive an ephemeral message */
|
|
24
|
-
receive(senderId: PeerId, grossChannelId: ChannelId, message: Uint8Array) {
|
|
25
|
-
const data = decode(message)
|
|
26
|
-
const channelId = grossChannelId.slice(2) as ChannelId
|
|
27
|
-
this.emit("data", {
|
|
28
|
-
peerId: senderId,
|
|
29
|
-
channelId,
|
|
30
|
-
data,
|
|
31
|
-
})
|
|
32
|
-
}
|
|
33
|
-
}
|
|
1
|
+
import { DocumentId, PeerId } from "./index.js"
|
|
2
|
+
import { EphemeralMessageContents } from "./network/messages.js"
|
|
34
3
|
|
|
35
4
|
// types
|
|
5
|
+
export type SessionId = string & { __SessionId: false }
|
|
36
6
|
|
|
37
7
|
export interface EphemeralDataPayload {
|
|
38
|
-
|
|
8
|
+
documentId: DocumentId
|
|
39
9
|
peerId: PeerId
|
|
40
|
-
data: { peerId: PeerId;
|
|
10
|
+
data: { peerId: PeerId; documentId: DocumentId; data: unknown }
|
|
41
11
|
}
|
|
42
12
|
|
|
43
13
|
export type EphemeralDataMessageEvents = {
|
|
44
|
-
message: (event:
|
|
14
|
+
message: (event: EphemeralMessageContents) => void
|
|
45
15
|
data: (event: EphemeralDataPayload) => void
|
|
46
16
|
}
|
package/src/Repo.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
|
+
import debug from "debug"
|
|
1
2
|
import { DocCollection } from "./DocCollection.js"
|
|
2
|
-
import { EphemeralData } from "./EphemeralData.js"
|
|
3
3
|
import { NetworkAdapter } from "./network/NetworkAdapter.js"
|
|
4
4
|
import { NetworkSubsystem } from "./network/NetworkSubsystem.js"
|
|
5
5
|
import { StorageAdapter } from "./storage/StorageAdapter.js"
|
|
@@ -7,15 +7,12 @@ import { StorageSubsystem } from "./storage/StorageSubsystem.js"
|
|
|
7
7
|
import { CollectionSynchronizer } from "./synchronizer/CollectionSynchronizer.js"
|
|
8
8
|
import { DocumentId, PeerId } from "./types.js"
|
|
9
9
|
|
|
10
|
-
import debug from "debug"
|
|
11
|
-
|
|
12
10
|
/** A Repo is a DocCollection with networking, syncing, and storage capabilities. */
|
|
13
11
|
export class Repo extends DocCollection {
|
|
14
12
|
#log: debug.Debugger
|
|
15
13
|
|
|
16
14
|
networkSubsystem: NetworkSubsystem
|
|
17
15
|
storageSubsystem?: StorageSubsystem
|
|
18
|
-
ephemeralData: EphemeralData
|
|
19
16
|
|
|
20
17
|
constructor({ storage, network, peerId, sharePolicy }: RepoConfig) {
|
|
21
18
|
super()
|
|
@@ -26,58 +23,75 @@ export class Repo extends DocCollection {
|
|
|
26
23
|
|
|
27
24
|
// The `document` event is fired by the DocCollection any time we create a new document or look
|
|
28
25
|
// up a document by ID. We listen for it in order to wire up storage and network synchronization.
|
|
29
|
-
this.on("document", async ({ handle }) => {
|
|
26
|
+
this.on("document", async ({ handle, isNew }) => {
|
|
30
27
|
if (storageSubsystem) {
|
|
31
28
|
// Save when the document changes
|
|
32
29
|
handle.on("heads-changed", async ({ handle, doc }) => {
|
|
33
30
|
await storageSubsystem.saveDoc(handle.documentId, doc)
|
|
34
31
|
})
|
|
35
32
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
33
|
+
if (isNew) {
|
|
34
|
+
// this is a new document, immediately save it
|
|
35
|
+
await storageSubsystem.saveDoc(handle.documentId, handle.docSync()!)
|
|
36
|
+
} else {
|
|
37
|
+
// Try to load from disk
|
|
38
|
+
const loadedDoc = await storageSubsystem.loadDoc(handle.documentId)
|
|
39
|
+
if (loadedDoc) {
|
|
40
|
+
handle.update(() => loadedDoc)
|
|
41
|
+
}
|
|
40
42
|
}
|
|
41
43
|
}
|
|
42
44
|
|
|
43
|
-
handle.
|
|
45
|
+
handle.on("unavailable", () => {
|
|
46
|
+
this.#log("document unavailable", { documentId: handle.documentId })
|
|
47
|
+
this.emit("unavailable-document", {
|
|
48
|
+
documentId: handle.documentId,
|
|
49
|
+
})
|
|
50
|
+
})
|
|
51
|
+
|
|
52
|
+
if (this.networkSubsystem.isReady()) {
|
|
53
|
+
handle.request()
|
|
54
|
+
} else {
|
|
55
|
+
handle.awaitNetwork()
|
|
56
|
+
this.networkSubsystem.whenReady().then(() => {
|
|
57
|
+
handle.networkReady()
|
|
58
|
+
}).catch(err => {
|
|
59
|
+
this.#log("error waiting for network", { err })
|
|
60
|
+
})
|
|
61
|
+
}
|
|
44
62
|
|
|
45
63
|
// Register the document with the synchronizer. This advertises our interest in the document.
|
|
46
64
|
synchronizer.addDocument(handle.documentId)
|
|
47
65
|
})
|
|
48
66
|
|
|
49
|
-
this.on("delete-document", ({
|
|
67
|
+
this.on("delete-document", ({ documentId }) => {
|
|
50
68
|
// TODO Pass the delete on to the network
|
|
51
69
|
// synchronizer.removeDocument(documentId)
|
|
52
70
|
|
|
53
71
|
if (storageSubsystem) {
|
|
54
|
-
storageSubsystem.remove(
|
|
72
|
+
storageSubsystem.remove(documentId).catch(err => {
|
|
73
|
+
this.#log("error deleting document", { documentId, err })
|
|
74
|
+
})
|
|
55
75
|
}
|
|
56
76
|
})
|
|
57
77
|
|
|
58
78
|
// SYNCHRONIZER
|
|
59
79
|
// The synchronizer uses the network subsystem to keep documents in sync with peers.
|
|
60
|
-
|
|
61
80
|
const synchronizer = new CollectionSynchronizer(this)
|
|
62
81
|
|
|
63
82
|
// When the synchronizer emits sync messages, send them to peers
|
|
64
|
-
synchronizer.on(
|
|
65
|
-
|
|
66
|
-
(
|
|
67
|
-
|
|
68
|
-
networkSubsystem.sendMessage(targetId, channelId, message, broadcast)
|
|
69
|
-
}
|
|
70
|
-
)
|
|
83
|
+
synchronizer.on("message", message => {
|
|
84
|
+
this.#log(`sending sync message to ${message.targetId}`)
|
|
85
|
+
networkSubsystem.send(message)
|
|
86
|
+
})
|
|
71
87
|
|
|
72
88
|
// STORAGE
|
|
73
89
|
// The storage subsystem has access to some form of persistence, and deals with save and loading documents.
|
|
74
|
-
|
|
75
90
|
const storageSubsystem = storage ? new StorageSubsystem(storage) : undefined
|
|
76
91
|
this.storageSubsystem = storageSubsystem
|
|
77
92
|
|
|
78
93
|
// NETWORK
|
|
79
94
|
// The network subsystem deals with sending and receiving messages to and from peers.
|
|
80
|
-
|
|
81
95
|
const networkSubsystem = new NetworkSubsystem(network, peerId)
|
|
82
96
|
this.networkSubsystem = networkSubsystem
|
|
83
97
|
|
|
@@ -94,40 +108,8 @@ export class Repo extends DocCollection {
|
|
|
94
108
|
|
|
95
109
|
// Handle incoming messages
|
|
96
110
|
networkSubsystem.on("message", async msg => {
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
// TODO: this demands a more principled way of associating channels with recipients
|
|
100
|
-
|
|
101
|
-
// Ephemeral channel ids start with "m/"
|
|
102
|
-
if (channelId.startsWith("m/")) {
|
|
103
|
-
// Ephemeral message
|
|
104
|
-
this.#log(`receiving ephemeral message from ${senderId}`)
|
|
105
|
-
ephemeralData.receive(senderId, channelId, message)
|
|
106
|
-
} else {
|
|
107
|
-
// Sync message
|
|
108
|
-
this.#log(`receiving sync message from ${senderId}`)
|
|
109
|
-
await synchronizer.receiveSyncMessage(senderId, channelId, message)
|
|
110
|
-
}
|
|
111
|
+
await synchronizer.receiveMessage(msg)
|
|
111
112
|
})
|
|
112
|
-
|
|
113
|
-
// We establish a special channel for sync messages
|
|
114
|
-
networkSubsystem.join()
|
|
115
|
-
|
|
116
|
-
// EPHEMERAL DATA
|
|
117
|
-
// The ephemeral data subsystem uses the network to send and receive messages that are not
|
|
118
|
-
// persisted to storage, e.g. cursor position, presence, etc.
|
|
119
|
-
|
|
120
|
-
const ephemeralData = new EphemeralData()
|
|
121
|
-
this.ephemeralData = ephemeralData
|
|
122
|
-
|
|
123
|
-
// Send ephemeral messages to peers
|
|
124
|
-
ephemeralData.on(
|
|
125
|
-
"message",
|
|
126
|
-
({ targetId, channelId, message, broadcast }) => {
|
|
127
|
-
this.#log(`sending ephemeral message to ${targetId}`)
|
|
128
|
-
networkSubsystem.sendMessage(targetId, channelId, message, broadcast)
|
|
129
|
-
}
|
|
130
|
-
)
|
|
131
113
|
}
|
|
132
114
|
}
|
|
133
115
|
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { Encoder, decode as cborXdecode } from "cbor-x";
|
|
2
|
+
|
|
3
|
+
export function encode(obj: any): Buffer {
|
|
4
|
+
let encoder = new Encoder({tagUint8Array: false})
|
|
5
|
+
return encoder.encode(obj)
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export function decode(buf: Buffer | Uint8Array): any {
|
|
9
|
+
return cborXdecode(buf)
|
|
10
|
+
}
|
|
@@ -1,7 +1,8 @@
|
|
|
1
|
-
import { PeerId, Repo, type NetworkAdapter,
|
|
1
|
+
import { PeerId, Repo, type NetworkAdapter, DocumentId } from "../../index.js"
|
|
2
2
|
import { eventPromise, eventPromises } from "../eventPromise.js"
|
|
3
3
|
import { assert } from "chai"
|
|
4
4
|
import { describe, it } from "mocha"
|
|
5
|
+
import { pause } from "../pause.js"
|
|
5
6
|
|
|
6
7
|
/**
|
|
7
8
|
* Runs a series of tests against a set of three peers, each represented by one or more instantiated
|
|
@@ -46,7 +47,7 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
|
|
|
46
47
|
|
|
47
48
|
// Bob receives the change
|
|
48
49
|
await eventPromise(bobHandle, "change")
|
|
49
|
-
assert.equal((await bobHandle.doc())
|
|
50
|
+
assert.equal((await bobHandle.doc())?.foo, "bar")
|
|
50
51
|
|
|
51
52
|
// Bob changes the document
|
|
52
53
|
bobHandle.change(d => {
|
|
@@ -55,7 +56,7 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
|
|
|
55
56
|
|
|
56
57
|
// Alice receives the change
|
|
57
58
|
await eventPromise(aliceHandle, "change")
|
|
58
|
-
assert.equal((await aliceHandle.doc())
|
|
59
|
+
assert.equal((await aliceHandle.doc())?.foo, "baz")
|
|
59
60
|
}
|
|
60
61
|
|
|
61
62
|
// Run the test in both directions, in case they're different types of adapters
|
|
@@ -97,8 +98,8 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
|
|
|
97
98
|
|
|
98
99
|
// Bob and Charlie receive the change
|
|
99
100
|
await eventPromises([bobHandle, charlieHandle], "change")
|
|
100
|
-
assert.equal((await bobHandle.doc())
|
|
101
|
-
assert.equal((await charlieHandle.doc())
|
|
101
|
+
assert.equal((await bobHandle.doc())?.foo, "bar")
|
|
102
|
+
assert.equal((await charlieHandle.doc())?.foo, "bar")
|
|
102
103
|
|
|
103
104
|
// Charlie changes the document
|
|
104
105
|
charlieHandle.change(d => {
|
|
@@ -107,15 +108,13 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
|
|
|
107
108
|
|
|
108
109
|
// Alice and Bob receive the change
|
|
109
110
|
await eventPromises([aliceHandle, bobHandle], "change")
|
|
110
|
-
assert.equal((await bobHandle.doc())
|
|
111
|
-
assert.equal((await charlieHandle.doc())
|
|
111
|
+
assert.equal((await bobHandle.doc())?.foo, "baz")
|
|
112
|
+
assert.equal((await charlieHandle.doc())?.foo, "baz")
|
|
112
113
|
|
|
113
114
|
teardown()
|
|
114
115
|
})
|
|
115
116
|
|
|
116
|
-
|
|
117
|
-
// because the network has cycles (see #92)
|
|
118
|
-
it.skip("can broadcast a message", async () => {
|
|
117
|
+
it("can broadcast a message", async () => {
|
|
119
118
|
const { adapters, teardown } = await setup()
|
|
120
119
|
const [a, b, c] = adapters
|
|
121
120
|
|
|
@@ -128,13 +127,18 @@ export function runAdapterTests(_setup: SetupFn, title?: string): void {
|
|
|
128
127
|
"peer"
|
|
129
128
|
)
|
|
130
129
|
|
|
131
|
-
const
|
|
130
|
+
const aliceHandle = aliceRepo.create<TestDoc>()
|
|
131
|
+
const charlieHandle = charlieRepo.find(aliceHandle.url)
|
|
132
|
+
|
|
133
|
+
// pause to give charlie a chance to let alice know it wants the doc
|
|
134
|
+
await pause(100)
|
|
135
|
+
|
|
132
136
|
const alicePresenceData = { presence: "alice" }
|
|
137
|
+
aliceHandle.broadcast(alicePresenceData)
|
|
133
138
|
|
|
134
|
-
|
|
135
|
-
const { data } = await eventPromise(charlieRepo.ephemeralData, "data")
|
|
139
|
+
const { message } = await eventPromise(charlieHandle, "ephemeral-message")
|
|
136
140
|
|
|
137
|
-
assert.deepStrictEqual(
|
|
141
|
+
assert.deepStrictEqual(message, alicePresenceData)
|
|
138
142
|
teardown()
|
|
139
143
|
})
|
|
140
144
|
})
|
package/src/index.ts
CHANGED
|
@@ -3,12 +3,22 @@ export { DocHandle, HandleState } from "./DocHandle.js"
|
|
|
3
3
|
export type { DocHandleChangePayload } from "./DocHandle.js"
|
|
4
4
|
export { NetworkAdapter } from "./network/NetworkAdapter.js"
|
|
5
5
|
export type {
|
|
6
|
-
InboundMessagePayload,
|
|
7
|
-
MessagePayload,
|
|
8
6
|
OpenPayload,
|
|
9
7
|
PeerCandidatePayload,
|
|
10
8
|
PeerDisconnectedPayload,
|
|
11
9
|
} from "./network/NetworkAdapter.js"
|
|
10
|
+
|
|
11
|
+
// This is a bit confusing right now, but:
|
|
12
|
+
// Message is the type for messages used outside of the network adapters
|
|
13
|
+
// there are some extra internal network adapter-only messages on NetworkAdapterMessage
|
|
14
|
+
// and Message is (as of this writing) a union type for EphmeralMessage and SyncMessage
|
|
15
|
+
export type {
|
|
16
|
+
Message,
|
|
17
|
+
NetworkAdapterMessage,
|
|
18
|
+
EphemeralMessage,
|
|
19
|
+
SyncMessage,
|
|
20
|
+
} from "./network/messages.js"
|
|
21
|
+
|
|
12
22
|
export { NetworkSubsystem } from "./network/NetworkSubsystem.js"
|
|
13
23
|
export { Repo, type SharePolicy } from "./Repo.js"
|
|
14
24
|
export { StorageAdapter, type StorageKey } from "./storage/StorageAdapter.js"
|
|
@@ -20,3 +30,5 @@ export {
|
|
|
20
30
|
stringifyAutomergeUrl as generateAutomergeUrl,
|
|
21
31
|
} from "./DocUrl.js"
|
|
22
32
|
export * from "./types.js"
|
|
33
|
+
|
|
34
|
+
export * as cbor from "./helpers/cbor.js"
|