@automerge/automerge-repo 1.0.0-alpha.3 → 1.0.0-alpha.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/DocCollection.d.ts +2 -1
- package/dist/DocCollection.d.ts.map +1 -1
- package/dist/DocCollection.js +3 -3
- package/dist/DocHandle.d.ts +8 -3
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +28 -6
- package/dist/DocUrl.d.ts +1 -1
- package/dist/DocUrl.d.ts.map +1 -1
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +25 -7
- package/dist/helpers/cbor.d.ts +4 -0
- package/dist/helpers/cbor.d.ts.map +1 -0
- package/dist/helpers/cbor.js +8 -0
- package/dist/helpers/eventPromise.d.ts +1 -1
- package/dist/helpers/eventPromise.d.ts.map +1 -1
- package/dist/helpers/headsAreSame.d.ts +0 -1
- package/dist/helpers/headsAreSame.d.ts.map +1 -1
- package/dist/index.d.ts +1 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -0
- package/dist/network/NetworkAdapter.d.ts +2 -2
- package/dist/network/NetworkAdapter.d.ts.map +1 -1
- package/dist/network/NetworkAdapter.js +1 -1
- package/dist/network/NetworkSubsystem.d.ts +4 -2
- package/dist/network/NetworkSubsystem.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.js +30 -7
- package/dist/network/messages.d.ts +2 -2
- package/dist/network/messages.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.d.ts +1 -1
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +2 -2
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +11 -12
- package/dist/synchronizer/Synchronizer.d.ts +1 -1
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
- package/dist/synchronizer/Synchronizer.js +1 -1
- package/fuzz/fuzz.ts +1 -1
- package/package.json +3 -3
- package/src/DocCollection.ts +4 -3
- package/src/DocHandle.ts +34 -4
- package/src/DocUrl.ts +1 -1
- package/src/Repo.ts +23 -7
- package/src/helpers/cbor.ts +10 -0
- package/src/helpers/eventPromise.ts +1 -1
- package/src/helpers/headsAreSame.ts +1 -1
- package/src/index.ts +2 -0
- package/src/network/NetworkAdapter.ts +2 -2
- package/src/network/NetworkSubsystem.ts +33 -6
- package/src/network/messages.ts +2 -2
- package/src/storage/StorageSubsystem.ts +2 -2
- package/src/synchronizer/DocSynchronizer.ts +14 -14
- package/src/synchronizer/Synchronizer.ts +1 -1
- package/test/CollectionSynchronizer.test.ts +1 -1
- package/test/DocCollection.test.ts +2 -2
- package/test/DocHandle.test.ts +5 -5
- package/test/Repo.test.ts +53 -11
- package/test/StorageSubsystem.test.ts +2 -3
- package/test/helpers/DummyNetworkAdapter.ts +11 -2
- package/test/helpers/DummyStorageAdapter.ts +1 -1
- package/tsconfig.json +2 -2
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@automerge/automerge-repo",
|
|
3
|
-
"version": "1.0.0-alpha.
|
|
3
|
+
"version": "1.0.0-alpha.4",
|
|
4
4
|
"description": "A repository object to manage a collection of automerge documents",
|
|
5
5
|
"repository": "https://github.com/automerge/automerge-repo",
|
|
6
6
|
"author": "Peter van Hardenberg <pvh@pvh.ca>",
|
|
@@ -31,7 +31,7 @@
|
|
|
31
31
|
"typescript": "^5.1.6"
|
|
32
32
|
},
|
|
33
33
|
"peerDependencies": {
|
|
34
|
-
"@automerge/automerge": "^2.1.0-alpha.
|
|
34
|
+
"@automerge/automerge": "^2.1.0-alpha.12"
|
|
35
35
|
},
|
|
36
36
|
"dependencies": {
|
|
37
37
|
"bs58check": "^3.0.1",
|
|
@@ -65,5 +65,5 @@
|
|
|
65
65
|
"publishConfig": {
|
|
66
66
|
"access": "public"
|
|
67
67
|
},
|
|
68
|
-
"gitHead": "
|
|
68
|
+
"gitHead": "fbf71f0c3aaa2786a4e279f336f01d665f53ce5b"
|
|
69
69
|
}
|
package/src/DocCollection.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import EventEmitter from "eventemitter3"
|
|
1
|
+
import { EventEmitter } from "eventemitter3"
|
|
2
2
|
import { DocHandle } from "./DocHandle.js"
|
|
3
3
|
import { DocumentId, type BinaryDocumentId, AutomergeUrl } from "./types.js"
|
|
4
4
|
import { type SharePolicy } from "./Repo.js"
|
|
@@ -74,7 +74,7 @@ export class DocCollection extends EventEmitter<DocCollectionEvents> {
|
|
|
74
74
|
// Generate a new UUID and store it in the buffer
|
|
75
75
|
const { documentId } = parseAutomergeUrl(generateAutomergeUrl())
|
|
76
76
|
const handle = this.#getHandle<T>(documentId, true) as DocHandle<T>
|
|
77
|
-
this.emit("document", { handle })
|
|
77
|
+
this.emit("document", { handle, isNew: true })
|
|
78
78
|
return handle
|
|
79
79
|
}
|
|
80
80
|
|
|
@@ -105,7 +105,7 @@ export class DocCollection extends EventEmitter<DocCollectionEvents> {
|
|
|
105
105
|
}
|
|
106
106
|
|
|
107
107
|
const handle = this.#getHandle<T>(documentId, false) as DocHandle<T>
|
|
108
|
-
this.emit("document", { handle })
|
|
108
|
+
this.emit("document", { handle, isNew: false })
|
|
109
109
|
return handle
|
|
110
110
|
}
|
|
111
111
|
|
|
@@ -136,6 +136,7 @@ interface DocCollectionEvents {
|
|
|
136
136
|
|
|
137
137
|
interface DocumentPayload {
|
|
138
138
|
handle: DocHandle<any>
|
|
139
|
+
isNew: boolean
|
|
139
140
|
}
|
|
140
141
|
|
|
141
142
|
interface DeleteDocumentPayload {
|
package/src/DocHandle.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import * as A from "@automerge/automerge"
|
|
1
|
+
import * as A from "@automerge/automerge/next"
|
|
2
2
|
import debug from "debug"
|
|
3
|
-
import EventEmitter from "eventemitter3"
|
|
3
|
+
import { EventEmitter } from "eventemitter3"
|
|
4
4
|
import {
|
|
5
5
|
assign,
|
|
6
6
|
BaseActionObject,
|
|
@@ -19,7 +19,7 @@ import { pause } from "./helpers/pause.js"
|
|
|
19
19
|
import { TimeoutError, withTimeout } from "./helpers/withTimeout.js"
|
|
20
20
|
import type { DocumentId, PeerId, AutomergeUrl } from "./types.js"
|
|
21
21
|
import { stringifyAutomergeUrl } from "./DocUrl.js"
|
|
22
|
-
import { encode } from "cbor
|
|
22
|
+
import { encode } from "./helpers/cbor.js"
|
|
23
23
|
|
|
24
24
|
/** DocHandle is a wrapper around a single Automerge document that lets us listen for changes. */
|
|
25
25
|
export class DocHandle<T> //
|
|
@@ -43,7 +43,12 @@ export class DocHandle<T> //
|
|
|
43
43
|
this.#log = debug(`automerge-repo:dochandle:${this.documentId.slice(0, 5)}`)
|
|
44
44
|
|
|
45
45
|
// initial doc
|
|
46
|
-
|
|
46
|
+
let doc = A.init<T>()
|
|
47
|
+
|
|
48
|
+
// Make an empty change so that we have something to save to disk
|
|
49
|
+
if (isNew) {
|
|
50
|
+
doc = A.emptyChange(doc, {})
|
|
51
|
+
}
|
|
47
52
|
|
|
48
53
|
/**
|
|
49
54
|
* Internally we use a state machine to orchestrate document loading and/or syncing, in order to
|
|
@@ -82,6 +87,8 @@ export class DocHandle<T> //
|
|
|
82
87
|
UPDATE: { actions: "onUpdate", target: READY },
|
|
83
88
|
// REQUEST is called by the Repo if the document is not found in storage
|
|
84
89
|
REQUEST: { target: REQUESTING },
|
|
90
|
+
// AWAIT_NETWORK is called by the repo if the document is not found in storage but the network is not yet ready
|
|
91
|
+
AWAIT_NETWORK: { target: AWAITING_NETWORK },
|
|
85
92
|
DELETE: { actions: "onDelete", target: DELETED },
|
|
86
93
|
},
|
|
87
94
|
after: [
|
|
@@ -91,6 +98,11 @@ export class DocHandle<T> //
|
|
|
91
98
|
},
|
|
92
99
|
],
|
|
93
100
|
},
|
|
101
|
+
awaitingNetwork: {
|
|
102
|
+
on: {
|
|
103
|
+
NETWORK_READY: { target: REQUESTING },
|
|
104
|
+
}
|
|
105
|
+
},
|
|
94
106
|
requesting: {
|
|
95
107
|
on: {
|
|
96
108
|
MARK_UNAVAILABLE: {
|
|
@@ -342,6 +354,14 @@ export class DocHandle<T> //
|
|
|
342
354
|
if (this.#state === LOADING) this.#machine.send(REQUEST)
|
|
343
355
|
}
|
|
344
356
|
|
|
357
|
+
awaitNetwork() {
|
|
358
|
+
if (this.#state === LOADING) this.#machine.send(AWAIT_NETWORK)
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
networkReady() {
|
|
362
|
+
if (this.#state === AWAITING_NETWORK) this.#machine.send(NETWORK_READY)
|
|
363
|
+
}
|
|
364
|
+
|
|
345
365
|
/** `delete` is called by the repo when the document is deleted */
|
|
346
366
|
delete() {
|
|
347
367
|
this.#machine.send(DELETE)
|
|
@@ -419,6 +439,7 @@ export interface DocHandleEvents<T> {
|
|
|
419
439
|
export const HandleState = {
|
|
420
440
|
IDLE: "idle",
|
|
421
441
|
LOADING: "loading",
|
|
442
|
+
AWAITING_NETWORK: "awaitingNetwork",
|
|
422
443
|
REQUESTING: "requesting",
|
|
423
444
|
READY: "ready",
|
|
424
445
|
FAILED: "failed",
|
|
@@ -448,6 +469,8 @@ export const Event = {
|
|
|
448
469
|
FIND: "FIND",
|
|
449
470
|
REQUEST: "REQUEST",
|
|
450
471
|
REQUEST_COMPLETE: "REQUEST_COMPLETE",
|
|
472
|
+
AWAIT_NETWORK: "AWAIT_NETWORK",
|
|
473
|
+
NETWORK_READY: "NETWORK_READY",
|
|
451
474
|
UPDATE: "UPDATE",
|
|
452
475
|
TIMEOUT: "TIMEOUT",
|
|
453
476
|
DELETE: "DELETE",
|
|
@@ -466,6 +489,8 @@ type UpdateEvent<T> = {
|
|
|
466
489
|
}
|
|
467
490
|
type TimeoutEvent = { type: typeof TIMEOUT }
|
|
468
491
|
type MarkUnavailableEvent = { type: typeof MARK_UNAVAILABLE }
|
|
492
|
+
type AwaitNetworkEvent = { type: typeof AWAIT_NETWORK }
|
|
493
|
+
type NetworkReadyEvent = { type: typeof NETWORK_READY }
|
|
469
494
|
|
|
470
495
|
type DocHandleEvent<T> =
|
|
471
496
|
| CreateEvent
|
|
@@ -476,6 +501,8 @@ type DocHandleEvent<T> =
|
|
|
476
501
|
| TimeoutEvent
|
|
477
502
|
| DeleteEvent
|
|
478
503
|
| MarkUnavailableEvent
|
|
504
|
+
| AwaitNetworkEvent
|
|
505
|
+
| NetworkReadyEvent
|
|
479
506
|
|
|
480
507
|
type DocHandleXstateMachine<T> = Interpreter<
|
|
481
508
|
DocHandleContext<T>,
|
|
@@ -497,6 +524,7 @@ type DocHandleXstateMachine<T> = Interpreter<
|
|
|
497
524
|
export const {
|
|
498
525
|
IDLE,
|
|
499
526
|
LOADING,
|
|
527
|
+
AWAITING_NETWORK,
|
|
500
528
|
REQUESTING,
|
|
501
529
|
READY,
|
|
502
530
|
FAILED,
|
|
@@ -512,4 +540,6 @@ const {
|
|
|
512
540
|
DELETE,
|
|
513
541
|
REQUEST_COMPLETE,
|
|
514
542
|
MARK_UNAVAILABLE,
|
|
543
|
+
AWAIT_NETWORK,
|
|
544
|
+
NETWORK_READY
|
|
515
545
|
} = Event
|
package/src/DocUrl.ts
CHANGED
package/src/Repo.ts
CHANGED
|
@@ -23,17 +23,22 @@ export class Repo extends DocCollection {
|
|
|
23
23
|
|
|
24
24
|
// The `document` event is fired by the DocCollection any time we create a new document or look
|
|
25
25
|
// up a document by ID. We listen for it in order to wire up storage and network synchronization.
|
|
26
|
-
this.on("document", async ({ handle }) => {
|
|
26
|
+
this.on("document", async ({ handle, isNew }) => {
|
|
27
27
|
if (storageSubsystem) {
|
|
28
28
|
// Save when the document changes
|
|
29
29
|
handle.on("heads-changed", async ({ handle, doc }) => {
|
|
30
30
|
await storageSubsystem.saveDoc(handle.documentId, doc)
|
|
31
31
|
})
|
|
32
32
|
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
33
|
+
if (isNew) {
|
|
34
|
+
// this is a new document, immediately save it
|
|
35
|
+
await storageSubsystem.saveDoc(handle.documentId, handle.docSync()!)
|
|
36
|
+
} else {
|
|
37
|
+
// Try to load from disk
|
|
38
|
+
const loadedDoc = await storageSubsystem.loadDoc(handle.documentId)
|
|
39
|
+
if (loadedDoc) {
|
|
40
|
+
handle.update(() => loadedDoc)
|
|
41
|
+
}
|
|
37
42
|
}
|
|
38
43
|
}
|
|
39
44
|
|
|
@@ -44,7 +49,16 @@ export class Repo extends DocCollection {
|
|
|
44
49
|
})
|
|
45
50
|
})
|
|
46
51
|
|
|
47
|
-
|
|
52
|
+
if (this.networkSubsystem.isReady()) {
|
|
53
|
+
handle.request()
|
|
54
|
+
} else {
|
|
55
|
+
handle.awaitNetwork()
|
|
56
|
+
this.networkSubsystem.whenReady().then(() => {
|
|
57
|
+
handle.networkReady()
|
|
58
|
+
}).catch(err => {
|
|
59
|
+
this.#log("error waiting for network", { err })
|
|
60
|
+
})
|
|
61
|
+
}
|
|
48
62
|
|
|
49
63
|
// Register the document with the synchronizer. This advertises our interest in the document.
|
|
50
64
|
synchronizer.addDocument(handle.documentId)
|
|
@@ -55,7 +69,9 @@ export class Repo extends DocCollection {
|
|
|
55
69
|
// synchronizer.removeDocument(documentId)
|
|
56
70
|
|
|
57
71
|
if (storageSubsystem) {
|
|
58
|
-
storageSubsystem.remove(documentId)
|
|
72
|
+
storageSubsystem.remove(documentId).catch(err => {
|
|
73
|
+
this.#log("error deleting document", { documentId, err })
|
|
74
|
+
})
|
|
59
75
|
}
|
|
60
76
|
})
|
|
61
77
|
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
import { Encoder, decode as cborXdecode } from "cbor-x";
|
|
2
|
+
|
|
3
|
+
export function encode(obj: any): Buffer {
|
|
4
|
+
let encoder = new Encoder({tagUint8Array: false})
|
|
5
|
+
return encoder.encode(obj)
|
|
6
|
+
}
|
|
7
|
+
|
|
8
|
+
export function decode(buf: Buffer | Uint8Array): any {
|
|
9
|
+
return cborXdecode(buf)
|
|
10
|
+
}
|
package/src/index.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import EventEmitter from "eventemitter3"
|
|
1
|
+
import { EventEmitter } from "eventemitter3"
|
|
2
2
|
import { PeerId } from "../types.js"
|
|
3
3
|
import { Message } from "./messages.js"
|
|
4
4
|
|
|
@@ -17,7 +17,7 @@ export abstract class NetworkAdapter extends EventEmitter<NetworkAdapterEvents>
|
|
|
17
17
|
// events & payloads
|
|
18
18
|
|
|
19
19
|
export interface NetworkAdapterEvents {
|
|
20
|
-
|
|
20
|
+
ready: (payload: OpenPayload) => void
|
|
21
21
|
close: () => void
|
|
22
22
|
"peer-candidate": (payload: PeerCandidatePayload) => void
|
|
23
23
|
"peer-disconnected": (payload: PeerDisconnectedPayload) => void
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import EventEmitter from "eventemitter3"
|
|
1
|
+
import { EventEmitter } from "eventemitter3"
|
|
2
2
|
import { PeerId } from "../types.js"
|
|
3
3
|
import { NetworkAdapter, PeerDisconnectedPayload } from "./NetworkAdapter.js"
|
|
4
4
|
|
|
@@ -25,18 +25,27 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
|
|
|
25
25
|
#count = 0
|
|
26
26
|
#sessionId: SessionId = Math.random().toString(36).slice(2) as SessionId
|
|
27
27
|
#ephemeralSessionCounts: Record<EphemeralMessageSource, number> = {}
|
|
28
|
+
#readyAdapterCount = 0
|
|
29
|
+
#adapters: NetworkAdapter[] = []
|
|
28
30
|
|
|
29
31
|
constructor(
|
|
30
|
-
|
|
32
|
+
adapters: NetworkAdapter[],
|
|
31
33
|
public peerId = randomPeerId()
|
|
32
34
|
) {
|
|
33
35
|
super()
|
|
34
36
|
this.#log = debug(`automerge-repo:network:${this.peerId}`)
|
|
35
|
-
|
|
37
|
+
adapters.forEach(a => this.addNetworkAdapter(a))
|
|
36
38
|
}
|
|
37
39
|
|
|
38
40
|
addNetworkAdapter(networkAdapter: NetworkAdapter) {
|
|
39
|
-
|
|
41
|
+
this.#adapters.push(networkAdapter)
|
|
42
|
+
networkAdapter.once("ready", () => {
|
|
43
|
+
this.#readyAdapterCount++
|
|
44
|
+
this.#log("Adapters ready: ", this.#readyAdapterCount, "/", this.#adapters.length)
|
|
45
|
+
if (this.#readyAdapterCount === this.#adapters.length) {
|
|
46
|
+
this.emit("ready")
|
|
47
|
+
}
|
|
48
|
+
})
|
|
40
49
|
|
|
41
50
|
networkAdapter.on("peer-candidate", ({ peerId }) => {
|
|
42
51
|
this.#log(`peer candidate: ${peerId} `)
|
|
@@ -90,6 +99,7 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
|
|
|
90
99
|
})
|
|
91
100
|
})
|
|
92
101
|
|
|
102
|
+
networkAdapter.connect(this.peerId)
|
|
93
103
|
networkAdapter.join()
|
|
94
104
|
}
|
|
95
105
|
|
|
@@ -122,12 +132,28 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
|
|
|
122
132
|
|
|
123
133
|
join() {
|
|
124
134
|
this.#log(`Joining network`)
|
|
125
|
-
this
|
|
135
|
+
this.#adapters.forEach(a => a.join())
|
|
126
136
|
}
|
|
127
137
|
|
|
128
138
|
leave() {
|
|
129
139
|
this.#log(`Leaving network`)
|
|
130
|
-
this
|
|
140
|
+
this.#adapters.forEach(a => a.leave())
|
|
141
|
+
}
|
|
142
|
+
|
|
143
|
+
isReady = () => {
|
|
144
|
+
return this.#readyAdapterCount === this.#adapters.length
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
whenReady = async () => {
|
|
148
|
+
if (this.isReady()) {
|
|
149
|
+
return
|
|
150
|
+
} else {
|
|
151
|
+
return new Promise<void>(resolve => {
|
|
152
|
+
this.once("ready", () => {
|
|
153
|
+
resolve()
|
|
154
|
+
})
|
|
155
|
+
})
|
|
156
|
+
}
|
|
131
157
|
}
|
|
132
158
|
}
|
|
133
159
|
|
|
@@ -141,6 +167,7 @@ export interface NetworkSubsystemEvents {
|
|
|
141
167
|
peer: (payload: PeerPayload) => void
|
|
142
168
|
"peer-disconnected": (payload: PeerDisconnectedPayload) => void
|
|
143
169
|
message: (payload: Message) => void
|
|
170
|
+
"ready": () => void
|
|
144
171
|
}
|
|
145
172
|
|
|
146
173
|
export interface PeerPayload {
|
package/src/network/messages.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
// utilities
|
|
2
|
-
import { SessionId } from "../EphemeralData"
|
|
3
|
-
import { DocumentId, PeerId } from "../types"
|
|
2
|
+
import { SessionId } from "../EphemeralData.js"
|
|
3
|
+
import { DocumentId, PeerId } from "../types.js"
|
|
4
4
|
|
|
5
5
|
export function isValidMessage(
|
|
6
6
|
message: NetworkAdapterMessage
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import * as A from "@automerge/automerge"
|
|
1
|
+
import * as A from "@automerge/automerge/next"
|
|
2
2
|
import { StorageAdapter, StorageKey } from "./StorageAdapter.js"
|
|
3
3
|
import * as sha256 from "fast-sha256"
|
|
4
4
|
import { type DocumentId } from "../types.js"
|
|
@@ -25,7 +25,7 @@ function keyHash(binary: Uint8Array) {
|
|
|
25
25
|
|
|
26
26
|
function headsHash(heads: A.Heads): string {
|
|
27
27
|
let encoder = new TextEncoder()
|
|
28
|
-
let headsbinary = mergeArrays(heads.map(h => encoder.encode(h)))
|
|
28
|
+
let headsbinary = mergeArrays(heads.map((h: string) => encoder.encode(h)))
|
|
29
29
|
return keyHash(headsbinary)
|
|
30
30
|
}
|
|
31
31
|
|
|
@@ -1,5 +1,6 @@
|
|
|
1
|
-
import * as A from "@automerge/automerge"
|
|
1
|
+
import * as A from "@automerge/automerge/next"
|
|
2
2
|
import {
|
|
3
|
+
AWAITING_NETWORK,
|
|
3
4
|
DocHandle,
|
|
4
5
|
DocHandleOutboundEphemeralMessagePayload,
|
|
5
6
|
READY,
|
|
@@ -174,8 +175,8 @@ export class DocSynchronizer extends Synchronizer {
|
|
|
174
175
|
|
|
175
176
|
// expanding is expensive, so only do it if we're logging at this level
|
|
176
177
|
const expanded = this.#opsLog.enabled
|
|
177
|
-
? decoded.changes.flatMap(change =>
|
|
178
|
-
A.decodeChange(change).ops.map(op => JSON.stringify(op))
|
|
178
|
+
? decoded.changes.flatMap((change: A.Change) =>
|
|
179
|
+
A.decodeChange(change).ops.map((op: any) => JSON.stringify(op))
|
|
179
180
|
)
|
|
180
181
|
: null
|
|
181
182
|
this.#opsLog(logText, expanded)
|
|
@@ -190,20 +191,19 @@ export class DocSynchronizer extends Synchronizer {
|
|
|
190
191
|
beginSync(peerIds: PeerId[]) {
|
|
191
192
|
this.#log(`beginSync: ${peerIds.join(", ")}`)
|
|
192
193
|
|
|
194
|
+
// HACK: if we have a sync state already, we round-trip it through the encoding system to make
|
|
195
|
+
// sure state is preserved. This prevents an infinite loop caused by failed attempts to send
|
|
196
|
+
// messages during disconnection.
|
|
197
|
+
// TODO: cover that case with a test and remove this hack
|
|
198
|
+
peerIds.forEach(peerId => {
|
|
199
|
+
const syncStateRaw = this.#getSyncState(peerId)
|
|
200
|
+
const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw))
|
|
201
|
+
this.#setSyncState(peerId, syncState)
|
|
202
|
+
})
|
|
203
|
+
|
|
193
204
|
// At this point if we don't have anything in our storage, we need to use an empty doc to sync
|
|
194
205
|
// with; but we don't want to surface that state to the front end
|
|
195
206
|
void this.handle.doc([READY, REQUESTING, UNAVAILABLE]).then(doc => {
|
|
196
|
-
// if we don't have any peers, then we can say the document is unavailable
|
|
197
|
-
|
|
198
|
-
// HACK: if we have a sync state already, we round-trip it through the encoding system to make
|
|
199
|
-
// sure state is preserved. This prevents an infinite loop caused by failed attempts to send
|
|
200
|
-
// messages during disconnection.
|
|
201
|
-
// TODO: cover that case with a test and remove this hack
|
|
202
|
-
peerIds.forEach(peerId => {
|
|
203
|
-
const syncStateRaw = this.#getSyncState(peerId)
|
|
204
|
-
const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw))
|
|
205
|
-
this.#setSyncState(peerId, syncState)
|
|
206
|
-
})
|
|
207
207
|
|
|
208
208
|
// we register out peers first, then say that sync has started
|
|
209
209
|
this.#syncStarted = true
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import assert from "assert"
|
|
2
2
|
import { beforeEach } from "mocha"
|
|
3
|
-
import { DocCollection, PeerId } from "../src"
|
|
3
|
+
import { DocCollection, PeerId } from "../src/index.js"
|
|
4
4
|
import { CollectionSynchronizer } from "../src/synchronizer/CollectionSynchronizer.js"
|
|
5
5
|
|
|
6
6
|
describe("CollectionSynchronizer", () => {
|
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
import assert from "assert"
|
|
2
|
-
import { DocCollection, BinaryDocumentId } from "../src"
|
|
2
|
+
import { DocCollection, BinaryDocumentId } from "../src/index.js"
|
|
3
3
|
import { TestDoc } from "./types.js"
|
|
4
|
-
import { generateAutomergeUrl, stringifyAutomergeUrl } from "../src/DocUrl"
|
|
4
|
+
import { generateAutomergeUrl, stringifyAutomergeUrl } from "../src/DocUrl.js"
|
|
5
5
|
|
|
6
6
|
const MISSING_DOCID = generateAutomergeUrl()
|
|
7
7
|
|
package/test/DocHandle.test.ts
CHANGED
|
@@ -1,11 +1,11 @@
|
|
|
1
|
-
import * as A from "@automerge/automerge"
|
|
1
|
+
import * as A from "@automerge/automerge/next"
|
|
2
2
|
import assert from "assert"
|
|
3
3
|
import { it } from "mocha"
|
|
4
|
-
import { DocHandle, DocHandleChangePayload } from "../src"
|
|
5
|
-
import { pause } from "../src/helpers/pause"
|
|
4
|
+
import { DocHandle, DocHandleChangePayload } from "../src/index.js"
|
|
5
|
+
import { pause } from "../src/helpers/pause.js"
|
|
6
6
|
import { TestDoc } from "./types.js"
|
|
7
|
-
import { generateAutomergeUrl, parseAutomergeUrl } from "../src/DocUrl"
|
|
8
|
-
import { eventPromise } from "../src/helpers/eventPromise"
|
|
7
|
+
import { generateAutomergeUrl, parseAutomergeUrl } from "../src/DocUrl.js"
|
|
8
|
+
import { eventPromise } from "../src/helpers/eventPromise.js"
|
|
9
9
|
import { decode } from "cbor-x"
|
|
10
10
|
|
|
11
11
|
describe("DocHandle", () => {
|
package/test/Repo.test.ts
CHANGED
|
@@ -8,7 +8,7 @@ import {
|
|
|
8
8
|
DocumentId,
|
|
9
9
|
PeerId,
|
|
10
10
|
SharePolicy,
|
|
11
|
-
} from "../src"
|
|
11
|
+
} from "../src/index.js"
|
|
12
12
|
import { eventPromise } from "../src/helpers/eventPromise.js"
|
|
13
13
|
import { pause, rejectOnTimeout } from "../src/helpers/pause.js"
|
|
14
14
|
import { Repo } from "../src/Repo.js"
|
|
@@ -16,19 +16,20 @@ import { DummyNetworkAdapter } from "./helpers/DummyNetworkAdapter.js"
|
|
|
16
16
|
import { DummyStorageAdapter } from "./helpers/DummyStorageAdapter.js"
|
|
17
17
|
import { getRandomItem } from "./helpers/getRandomItem.js"
|
|
18
18
|
import { TestDoc } from "./types.js"
|
|
19
|
-
import { generateAutomergeUrl, stringifyAutomergeUrl } from "../src/DocUrl"
|
|
20
|
-
import { READY } from "../src/DocHandle"
|
|
19
|
+
import { generateAutomergeUrl, stringifyAutomergeUrl } from "../src/DocUrl.js"
|
|
20
|
+
import { READY, AWAITING_NETWORK } from "../src/DocHandle.js"
|
|
21
21
|
|
|
22
22
|
describe("Repo", () => {
|
|
23
23
|
describe("single repo", () => {
|
|
24
|
-
const setup = () => {
|
|
24
|
+
const setup = (networkReady = true) => {
|
|
25
25
|
const storageAdapter = new DummyStorageAdapter()
|
|
26
|
+
const networkAdapter = new DummyNetworkAdapter(networkReady)
|
|
26
27
|
|
|
27
28
|
const repo = new Repo({
|
|
28
29
|
storage: storageAdapter,
|
|
29
|
-
network: [
|
|
30
|
+
network: [networkAdapter],
|
|
30
31
|
})
|
|
31
|
-
return { repo, storageAdapter }
|
|
32
|
+
return { repo, storageAdapter, networkAdapter }
|
|
32
33
|
}
|
|
33
34
|
|
|
34
35
|
it("can instantiate a Repo", () => {
|
|
@@ -83,6 +84,23 @@ describe("Repo", () => {
|
|
|
83
84
|
await eventPromise(handle, "unavailable")
|
|
84
85
|
})
|
|
85
86
|
|
|
87
|
+
it("doesn't mark a document as unavailable until network adapters are ready", async () => {
|
|
88
|
+
const { repo, networkAdapter } = setup(false)
|
|
89
|
+
const url = generateAutomergeUrl()
|
|
90
|
+
const handle = repo.find<TestDoc>(url)
|
|
91
|
+
|
|
92
|
+
let wasUnavailable = false
|
|
93
|
+
handle.on("unavailable", () => {
|
|
94
|
+
wasUnavailable = true
|
|
95
|
+
})
|
|
96
|
+
await pause(50)
|
|
97
|
+
assert.equal(wasUnavailable, false)
|
|
98
|
+
|
|
99
|
+
networkAdapter.emit("ready", { network: networkAdapter })
|
|
100
|
+
await eventPromise(handle, "unavailable")
|
|
101
|
+
|
|
102
|
+
})
|
|
103
|
+
|
|
86
104
|
it("can find a created document", async () => {
|
|
87
105
|
const { repo } = setup()
|
|
88
106
|
const handle = repo.create<TestDoc>()
|
|
@@ -100,6 +118,21 @@ describe("Repo", () => {
|
|
|
100
118
|
assert.equal(v?.foo, "bar")
|
|
101
119
|
})
|
|
102
120
|
|
|
121
|
+
it("saves the document when creating it", async () => {
|
|
122
|
+
const { repo, storageAdapter } = setup()
|
|
123
|
+
const handle = repo.create<TestDoc>()
|
|
124
|
+
|
|
125
|
+
const repo2 = new Repo({
|
|
126
|
+
storage: storageAdapter,
|
|
127
|
+
network: [],
|
|
128
|
+
})
|
|
129
|
+
|
|
130
|
+
const bobHandle = repo2.find<TestDoc>(handle.url)
|
|
131
|
+
await bobHandle.whenReady()
|
|
132
|
+
assert.equal(bobHandle.isReady(), true)
|
|
133
|
+
|
|
134
|
+
})
|
|
135
|
+
|
|
103
136
|
it("saves the document when changed and can find it again", async () => {
|
|
104
137
|
const { repo, storageAdapter } = setup()
|
|
105
138
|
const handle = repo.create<TestDoc>()
|
|
@@ -297,17 +330,26 @@ describe("Repo", () => {
|
|
|
297
330
|
bobCharlieChannel.port1.close()
|
|
298
331
|
}
|
|
299
332
|
|
|
333
|
+
function doConnectAlice() {
|
|
334
|
+
aliceRepo.networkSubsystem.addNetworkAdapter(new MessageChannelNetworkAdapter(aliceToBob))
|
|
335
|
+
//bobRepo.networkSubsystem.addNetworkAdapter(new MessageChannelNetworkAdapter(bobToAlice))
|
|
336
|
+
}
|
|
337
|
+
|
|
338
|
+
if (connectAlice) {
|
|
339
|
+
doConnectAlice()
|
|
340
|
+
}
|
|
341
|
+
|
|
300
342
|
return {
|
|
301
343
|
teardown,
|
|
302
344
|
aliceRepo,
|
|
303
345
|
bobRepo,
|
|
304
346
|
charlieRepo,
|
|
305
|
-
|
|
347
|
+
connectAliceToBob: doConnectAlice,
|
|
306
348
|
}
|
|
307
349
|
}
|
|
308
350
|
|
|
309
351
|
const setup = async (connectAlice = true) => {
|
|
310
|
-
const { teardown, aliceRepo, bobRepo, charlieRepo,
|
|
352
|
+
const { teardown, aliceRepo, bobRepo, charlieRepo, connectAliceToBob } =
|
|
311
353
|
setupRepos(connectAlice)
|
|
312
354
|
|
|
313
355
|
const aliceHandle = aliceRepo.create<TestDoc>()
|
|
@@ -345,7 +387,7 @@ describe("Repo", () => {
|
|
|
345
387
|
notForCharlie,
|
|
346
388
|
notForBob,
|
|
347
389
|
teardown,
|
|
348
|
-
|
|
390
|
+
connectAliceToBob,
|
|
349
391
|
}
|
|
350
392
|
}
|
|
351
393
|
|
|
@@ -443,7 +485,7 @@ describe("Repo", () => {
|
|
|
443
485
|
notForCharlie,
|
|
444
486
|
aliceRepo,
|
|
445
487
|
teardown,
|
|
446
|
-
|
|
488
|
+
connectAliceToBob,
|
|
447
489
|
} = await setup(false)
|
|
448
490
|
|
|
449
491
|
const url = stringifyAutomergeUrl({ documentId: notForCharlie })
|
|
@@ -452,7 +494,7 @@ describe("Repo", () => {
|
|
|
452
494
|
|
|
453
495
|
await eventPromise(handle, "unavailable")
|
|
454
496
|
|
|
455
|
-
|
|
497
|
+
connectAliceToBob()
|
|
456
498
|
|
|
457
499
|
await eventPromise(aliceRepo.networkSubsystem, "peer")
|
|
458
500
|
|
|
@@ -4,13 +4,12 @@ import path from "path"
|
|
|
4
4
|
|
|
5
5
|
import assert from "assert"
|
|
6
6
|
|
|
7
|
-
import A from "@automerge/automerge"
|
|
7
|
+
import * as A from "@automerge/automerge/next"
|
|
8
8
|
|
|
9
9
|
import { DummyStorageAdapter } from "./helpers/DummyStorageAdapter.js"
|
|
10
10
|
import { NodeFSStorageAdapter } from "@automerge/automerge-repo-storage-nodefs"
|
|
11
11
|
|
|
12
|
-
import { StorageSubsystem } from "../src"
|
|
13
|
-
import { TestDoc } from "./types.js"
|
|
12
|
+
import { StorageSubsystem } from "../src/index.js"
|
|
14
13
|
import { generateAutomergeUrl, parseAutomergeUrl } from "../src/DocUrl.js"
|
|
15
14
|
|
|
16
15
|
const tempDir = fs.mkdtempSync(path.join(os.tmpdir(), "automerge-repo-tests"))
|
|
@@ -1,8 +1,17 @@
|
|
|
1
|
-
import { NetworkAdapter } from "../../src"
|
|
1
|
+
import { NetworkAdapter } from "../../src/index.js"
|
|
2
2
|
|
|
3
3
|
export class DummyNetworkAdapter extends NetworkAdapter {
|
|
4
|
+
#startReady = true
|
|
5
|
+
constructor(startReady: boolean) {
|
|
6
|
+
super()
|
|
7
|
+
this.#startReady = startReady
|
|
8
|
+
}
|
|
4
9
|
send() {}
|
|
5
|
-
connect(_: string) {
|
|
10
|
+
connect(_: string) {
|
|
11
|
+
if (this.#startReady) {
|
|
12
|
+
this.emit("ready", { network: this })
|
|
13
|
+
}
|
|
14
|
+
}
|
|
6
15
|
join() {}
|
|
7
16
|
leave() {}
|
|
8
17
|
}
|