@automerge/automerge-repo 2.0.0-collectionsync-alpha.1 → 2.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +8 -8
- package/dist/AutomergeUrl.d.ts +17 -5
- package/dist/AutomergeUrl.d.ts.map +1 -1
- package/dist/AutomergeUrl.js +71 -24
- package/dist/DocHandle.d.ts +33 -41
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +105 -66
- package/dist/FindProgress.d.ts +30 -0
- package/dist/FindProgress.d.ts.map +1 -0
- package/dist/FindProgress.js +1 -0
- package/dist/RemoteHeadsSubscriptions.d.ts +4 -5
- package/dist/RemoteHeadsSubscriptions.d.ts.map +1 -1
- package/dist/RemoteHeadsSubscriptions.js +4 -1
- package/dist/Repo.d.ts +24 -5
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +355 -169
- package/dist/helpers/abortable.d.ts +36 -0
- package/dist/helpers/abortable.d.ts.map +1 -0
- package/dist/helpers/abortable.js +47 -0
- package/dist/helpers/arraysAreEqual.d.ts.map +1 -1
- package/dist/helpers/bufferFromHex.d.ts +3 -0
- package/dist/helpers/bufferFromHex.d.ts.map +1 -0
- package/dist/helpers/bufferFromHex.js +13 -0
- package/dist/helpers/debounce.d.ts.map +1 -1
- package/dist/helpers/eventPromise.d.ts.map +1 -1
- package/dist/helpers/headsAreSame.d.ts +2 -2
- package/dist/helpers/headsAreSame.d.ts.map +1 -1
- package/dist/helpers/mergeArrays.d.ts +1 -1
- package/dist/helpers/mergeArrays.d.ts.map +1 -1
- package/dist/helpers/pause.d.ts.map +1 -1
- package/dist/helpers/tests/network-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/network-adapter-tests.js +13 -13
- package/dist/helpers/tests/storage-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/storage-adapter-tests.js +6 -9
- package/dist/helpers/throttle.d.ts.map +1 -1
- package/dist/helpers/withTimeout.d.ts.map +1 -1
- package/dist/index.d.ts +35 -7
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +37 -6
- package/dist/network/NetworkSubsystem.d.ts +0 -1
- package/dist/network/NetworkSubsystem.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.js +0 -3
- package/dist/network/messages.d.ts +1 -7
- package/dist/network/messages.d.ts.map +1 -1
- package/dist/network/messages.js +1 -2
- package/dist/storage/StorageAdapter.d.ts +0 -9
- package/dist/storage/StorageAdapter.d.ts.map +1 -1
- package/dist/storage/StorageAdapter.js +0 -33
- package/dist/storage/StorageSubsystem.d.ts +6 -2
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +131 -37
- package/dist/storage/keyHash.d.ts +1 -1
- package/dist/storage/keyHash.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.d.ts +3 -4
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.js +32 -26
- package/dist/synchronizer/DocSynchronizer.d.ts +8 -8
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +205 -79
- package/dist/types.d.ts +4 -1
- package/dist/types.d.ts.map +1 -1
- package/fuzz/fuzz.ts +3 -3
- package/package.json +4 -5
- package/src/AutomergeUrl.ts +101 -26
- package/src/DocHandle.ts +158 -77
- package/src/FindProgress.ts +48 -0
- package/src/RemoteHeadsSubscriptions.ts +11 -9
- package/src/Repo.ts +465 -180
- package/src/helpers/abortable.ts +62 -0
- package/src/helpers/bufferFromHex.ts +14 -0
- package/src/helpers/headsAreSame.ts +2 -2
- package/src/helpers/tests/network-adapter-tests.ts +14 -13
- package/src/helpers/tests/storage-adapter-tests.ts +13 -24
- package/src/index.ts +57 -38
- package/src/network/NetworkSubsystem.ts +0 -4
- package/src/network/messages.ts +2 -11
- package/src/storage/StorageAdapter.ts +0 -42
- package/src/storage/StorageSubsystem.ts +155 -45
- package/src/storage/keyHash.ts +1 -1
- package/src/synchronizer/CollectionSynchronizer.ts +42 -29
- package/src/synchronizer/DocSynchronizer.ts +263 -89
- package/src/types.ts +4 -1
- package/test/AutomergeUrl.test.ts +130 -0
- package/test/CollectionSynchronizer.test.ts +6 -8
- package/test/DocHandle.test.ts +161 -77
- package/test/DocSynchronizer.test.ts +11 -9
- package/test/RemoteHeadsSubscriptions.test.ts +1 -1
- package/test/Repo.test.ts +406 -341
- package/test/StorageSubsystem.test.ts +95 -20
- package/test/remoteHeads.test.ts +28 -13
- package/dist/CollectionHandle.d.ts +0 -14
- package/dist/CollectionHandle.d.ts.map +0 -1
- package/dist/CollectionHandle.js +0 -37
- package/dist/DocUrl.d.ts +0 -47
- package/dist/DocUrl.d.ts.map +0 -1
- package/dist/DocUrl.js +0 -72
- package/dist/EphemeralData.d.ts +0 -20
- package/dist/EphemeralData.d.ts.map +0 -1
- package/dist/EphemeralData.js +0 -1
- package/dist/ferigan.d.ts +0 -51
- package/dist/ferigan.d.ts.map +0 -1
- package/dist/ferigan.js +0 -98
- package/dist/src/DocHandle.d.ts +0 -182
- package/dist/src/DocHandle.d.ts.map +0 -1
- package/dist/src/DocHandle.js +0 -405
- package/dist/src/DocUrl.d.ts +0 -49
- package/dist/src/DocUrl.d.ts.map +0 -1
- package/dist/src/DocUrl.js +0 -72
- package/dist/src/EphemeralData.d.ts +0 -19
- package/dist/src/EphemeralData.d.ts.map +0 -1
- package/dist/src/EphemeralData.js +0 -1
- package/dist/src/Repo.d.ts +0 -74
- package/dist/src/Repo.d.ts.map +0 -1
- package/dist/src/Repo.js +0 -208
- package/dist/src/helpers/arraysAreEqual.d.ts +0 -2
- package/dist/src/helpers/arraysAreEqual.d.ts.map +0 -1
- package/dist/src/helpers/arraysAreEqual.js +0 -2
- package/dist/src/helpers/cbor.d.ts +0 -4
- package/dist/src/helpers/cbor.d.ts.map +0 -1
- package/dist/src/helpers/cbor.js +0 -8
- package/dist/src/helpers/eventPromise.d.ts +0 -11
- package/dist/src/helpers/eventPromise.d.ts.map +0 -1
- package/dist/src/helpers/eventPromise.js +0 -7
- package/dist/src/helpers/headsAreSame.d.ts +0 -2
- package/dist/src/helpers/headsAreSame.d.ts.map +0 -1
- package/dist/src/helpers/headsAreSame.js +0 -4
- package/dist/src/helpers/mergeArrays.d.ts +0 -2
- package/dist/src/helpers/mergeArrays.d.ts.map +0 -1
- package/dist/src/helpers/mergeArrays.js +0 -15
- package/dist/src/helpers/pause.d.ts +0 -6
- package/dist/src/helpers/pause.d.ts.map +0 -1
- package/dist/src/helpers/pause.js +0 -10
- package/dist/src/helpers/tests/network-adapter-tests.d.ts +0 -21
- package/dist/src/helpers/tests/network-adapter-tests.d.ts.map +0 -1
- package/dist/src/helpers/tests/network-adapter-tests.js +0 -122
- package/dist/src/helpers/withTimeout.d.ts +0 -12
- package/dist/src/helpers/withTimeout.d.ts.map +0 -1
- package/dist/src/helpers/withTimeout.js +0 -24
- package/dist/src/index.d.ts +0 -53
- package/dist/src/index.d.ts.map +0 -1
- package/dist/src/index.js +0 -40
- package/dist/src/network/NetworkAdapter.d.ts +0 -26
- package/dist/src/network/NetworkAdapter.d.ts.map +0 -1
- package/dist/src/network/NetworkAdapter.js +0 -4
- package/dist/src/network/NetworkSubsystem.d.ts +0 -23
- package/dist/src/network/NetworkSubsystem.d.ts.map +0 -1
- package/dist/src/network/NetworkSubsystem.js +0 -120
- package/dist/src/network/messages.d.ts +0 -85
- package/dist/src/network/messages.d.ts.map +0 -1
- package/dist/src/network/messages.js +0 -23
- package/dist/src/storage/StorageAdapter.d.ts +0 -14
- package/dist/src/storage/StorageAdapter.d.ts.map +0 -1
- package/dist/src/storage/StorageAdapter.js +0 -1
- package/dist/src/storage/StorageSubsystem.d.ts +0 -12
- package/dist/src/storage/StorageSubsystem.d.ts.map +0 -1
- package/dist/src/storage/StorageSubsystem.js +0 -145
- package/dist/src/synchronizer/CollectionSynchronizer.d.ts +0 -25
- package/dist/src/synchronizer/CollectionSynchronizer.d.ts.map +0 -1
- package/dist/src/synchronizer/CollectionSynchronizer.js +0 -106
- package/dist/src/synchronizer/DocSynchronizer.d.ts +0 -29
- package/dist/src/synchronizer/DocSynchronizer.d.ts.map +0 -1
- package/dist/src/synchronizer/DocSynchronizer.js +0 -263
- package/dist/src/synchronizer/Synchronizer.d.ts +0 -9
- package/dist/src/synchronizer/Synchronizer.d.ts.map +0 -1
- package/dist/src/synchronizer/Synchronizer.js +0 -2
- package/dist/src/types.d.ts +0 -16
- package/dist/src/types.d.ts.map +0 -1
- package/dist/src/types.js +0 -1
- package/dist/test/CollectionSynchronizer.test.d.ts +0 -2
- package/dist/test/CollectionSynchronizer.test.d.ts.map +0 -1
- package/dist/test/CollectionSynchronizer.test.js +0 -57
- package/dist/test/DocHandle.test.d.ts +0 -2
- package/dist/test/DocHandle.test.d.ts.map +0 -1
- package/dist/test/DocHandle.test.js +0 -238
- package/dist/test/DocSynchronizer.test.d.ts +0 -2
- package/dist/test/DocSynchronizer.test.d.ts.map +0 -1
- package/dist/test/DocSynchronizer.test.js +0 -111
- package/dist/test/Network.test.d.ts +0 -2
- package/dist/test/Network.test.d.ts.map +0 -1
- package/dist/test/Network.test.js +0 -11
- package/dist/test/Repo.test.d.ts +0 -2
- package/dist/test/Repo.test.d.ts.map +0 -1
- package/dist/test/Repo.test.js +0 -568
- package/dist/test/StorageSubsystem.test.d.ts +0 -2
- package/dist/test/StorageSubsystem.test.d.ts.map +0 -1
- package/dist/test/StorageSubsystem.test.js +0 -56
- package/dist/test/helpers/DummyNetworkAdapter.d.ts +0 -9
- package/dist/test/helpers/DummyNetworkAdapter.d.ts.map +0 -1
- package/dist/test/helpers/DummyNetworkAdapter.js +0 -15
- package/dist/test/helpers/DummyStorageAdapter.d.ts +0 -16
- package/dist/test/helpers/DummyStorageAdapter.d.ts.map +0 -1
- package/dist/test/helpers/DummyStorageAdapter.js +0 -33
- package/dist/test/helpers/generate-large-object.d.ts +0 -5
- package/dist/test/helpers/generate-large-object.d.ts.map +0 -1
- package/dist/test/helpers/generate-large-object.js +0 -9
- package/dist/test/helpers/getRandomItem.d.ts +0 -2
- package/dist/test/helpers/getRandomItem.d.ts.map +0 -1
- package/dist/test/helpers/getRandomItem.js +0 -4
- package/dist/test/types.d.ts +0 -4
- package/dist/test/types.d.ts.map +0 -1
- package/dist/test/types.js +0 -1
- package/src/CollectionHandle.ts +0 -54
- package/src/ferigan.ts +0 -184
package/src/Repo.ts
CHANGED
|
@@ -2,8 +2,10 @@ import { next as Automerge } from "@automerge/automerge/slim"
|
|
|
2
2
|
import debug from "debug"
|
|
3
3
|
import { EventEmitter } from "eventemitter3"
|
|
4
4
|
import {
|
|
5
|
+
encodeHeads,
|
|
5
6
|
generateAutomergeUrl,
|
|
6
7
|
interpretAsDocumentId,
|
|
8
|
+
isValidAutomergeUrl,
|
|
7
9
|
parseAutomergeUrl,
|
|
8
10
|
} from "./AutomergeUrl.js"
|
|
9
11
|
import {
|
|
@@ -14,6 +16,7 @@ import {
|
|
|
14
16
|
UNAVAILABLE,
|
|
15
17
|
UNLOADED,
|
|
16
18
|
} from "./DocHandle.js"
|
|
19
|
+
import { RemoteHeadsSubscriptions } from "./RemoteHeadsSubscriptions.js"
|
|
17
20
|
import { headsAreSame } from "./helpers/headsAreSame.js"
|
|
18
21
|
import { throttle } from "./helpers/throttle.js"
|
|
19
22
|
import {
|
|
@@ -21,7 +24,7 @@ import {
|
|
|
21
24
|
type PeerMetadata,
|
|
22
25
|
} from "./network/NetworkAdapterInterface.js"
|
|
23
26
|
import { NetworkSubsystem } from "./network/NetworkSubsystem.js"
|
|
24
|
-
import {
|
|
27
|
+
import { RepoMessage } from "./network/messages.js"
|
|
25
28
|
import { StorageAdapterInterface } from "./storage/StorageAdapterInterface.js"
|
|
26
29
|
import { StorageSubsystem } from "./storage/StorageSubsystem.js"
|
|
27
30
|
import { StorageId } from "./storage/types.js"
|
|
@@ -36,10 +39,20 @@ import type {
|
|
|
36
39
|
DocumentId,
|
|
37
40
|
PeerId,
|
|
38
41
|
} from "./types.js"
|
|
39
|
-
import {
|
|
40
|
-
import {
|
|
41
|
-
|
|
42
|
-
|
|
42
|
+
import { abortable, AbortOptions } from "./helpers/abortable.js"
|
|
43
|
+
import { FindProgress } from "./FindProgress.js"
|
|
44
|
+
|
|
45
|
+
export type FindProgressWithMethods<T> = FindProgress<T> & {
|
|
46
|
+
untilReady: (allowableStates: string[]) => Promise<DocHandle<T>>
|
|
47
|
+
peek: () => FindProgress<T>
|
|
48
|
+
subscribe: (callback: (progress: FindProgress<T>) => void) => () => void
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
export type ProgressSignal<T> = {
|
|
52
|
+
peek: () => FindProgress<T>
|
|
53
|
+
subscribe: (callback: (progress: FindProgress<T>) => void) => () => void
|
|
54
|
+
untilReady: (allowableStates: string[]) => Promise<DocHandle<T>>
|
|
55
|
+
}
|
|
43
56
|
|
|
44
57
|
function randomPeerId() {
|
|
45
58
|
return ("peer-" + Math.random().toString(36).slice(4)) as PeerId
|
|
@@ -58,7 +71,12 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
58
71
|
|
|
59
72
|
/** @hidden */
|
|
60
73
|
networkSubsystem: NetworkSubsystem
|
|
61
|
-
|
|
74
|
+
/** @hidden */
|
|
75
|
+
storageSubsystem?: StorageSubsystem
|
|
76
|
+
|
|
77
|
+
/** The debounce rate is adjustable on the repo. */
|
|
78
|
+
/** @hidden */
|
|
79
|
+
saveDebounceRate = 100
|
|
62
80
|
|
|
63
81
|
#handleCache: Record<DocumentId, DocHandle<any>> = {}
|
|
64
82
|
|
|
@@ -73,7 +91,9 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
73
91
|
/** @hidden */
|
|
74
92
|
peerMetadataByPeerId: Record<PeerId, PeerMetadata> = {}
|
|
75
93
|
|
|
76
|
-
#
|
|
94
|
+
#remoteHeadsSubscriptions = new RemoteHeadsSubscriptions()
|
|
95
|
+
#remoteHeadsGossipingEnabled = false
|
|
96
|
+
#progressCache: Record<DocumentId, FindProgress<any>> = {}
|
|
77
97
|
|
|
78
98
|
constructor({
|
|
79
99
|
storage,
|
|
@@ -85,75 +105,56 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
85
105
|
denylist = [],
|
|
86
106
|
}: RepoConfig = {}) {
|
|
87
107
|
super()
|
|
88
|
-
|
|
89
|
-
// beelayStorage = new InMemoryStorageAdapter()
|
|
90
|
-
storage = new InMemoryStorageAdapter()
|
|
91
|
-
}
|
|
92
|
-
this.#beelay = new A.beelay.Beelay({
|
|
93
|
-
storage,
|
|
94
|
-
peerId,
|
|
95
|
-
requestPolicy: async ({ docId }) => {
|
|
96
|
-
const peers = Array.from(this.networkSubsystem.peers)
|
|
97
|
-
const generousPeers: PeerId[] = []
|
|
98
|
-
for (const peerId of peers) {
|
|
99
|
-
const okToShare = await this.sharePolicy(peerId)
|
|
100
|
-
if (okToShare) generousPeers.push(peerId)
|
|
101
|
-
}
|
|
102
|
-
return generousPeers
|
|
103
|
-
},
|
|
104
|
-
})
|
|
105
|
-
this.storageSubsystem = new StorageSubsystem(this.#beelay, storage)
|
|
108
|
+
this.#remoteHeadsGossipingEnabled = enableRemoteHeadsGossiping
|
|
106
109
|
this.#log = debug(`automerge-repo:repo`)
|
|
107
110
|
this.sharePolicy = sharePolicy ?? this.sharePolicy
|
|
108
111
|
|
|
109
|
-
this
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
targetId: message.recipient as PeerId,
|
|
113
|
-
type: "beelay",
|
|
114
|
-
...message,
|
|
115
|
-
} as MessageContents)
|
|
116
|
-
})
|
|
117
|
-
|
|
118
|
-
this.#beelay.on("docEvent", event => {
|
|
119
|
-
this.#log(`received ${event.data.type} event for ${event.docId}`)
|
|
120
|
-
const handle = this.#handleCache[event.docId as DocumentId]
|
|
121
|
-
if (handle != null) {
|
|
122
|
-
handle.update(d => Automerge.loadIncremental(d, event.data.contents))
|
|
123
|
-
}
|
|
124
|
-
})
|
|
112
|
+
this.on("delete-document", ({ documentId }) => {
|
|
113
|
+
// TODO Pass the delete on to the network
|
|
114
|
+
// synchronizer.removeDocument(documentId)
|
|
125
115
|
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
if (doc == null) {
|
|
130
|
-
console.warn("document not found when creating bundle")
|
|
131
|
-
return
|
|
132
|
-
}
|
|
133
|
-
const bundle = A.saveBundle(doc, start, end)
|
|
134
|
-
this.#beelay.addBundle({
|
|
135
|
-
docId,
|
|
136
|
-
checkpoints,
|
|
137
|
-
start,
|
|
138
|
-
end,
|
|
139
|
-
data: bundle,
|
|
116
|
+
if (storageSubsystem) {
|
|
117
|
+
storageSubsystem.removeDoc(documentId).catch(err => {
|
|
118
|
+
this.#log("error deleting document", { documentId, err })
|
|
140
119
|
})
|
|
141
|
-
}
|
|
120
|
+
}
|
|
142
121
|
})
|
|
143
122
|
|
|
144
123
|
// SYNCHRONIZER
|
|
145
|
-
|
|
124
|
+
// The synchronizer uses the network subsystem to keep documents in sync with peers.
|
|
125
|
+
this.synchronizer = new CollectionSynchronizer(this, denylist)
|
|
146
126
|
|
|
127
|
+
// When the synchronizer emits messages, send them to peers
|
|
147
128
|
this.synchronizer.on("message", message => {
|
|
148
129
|
this.#log(`sending ${message.type} message to ${message.targetId}`)
|
|
149
130
|
networkSubsystem.send(message)
|
|
150
131
|
})
|
|
151
132
|
|
|
133
|
+
// Forward metrics from doc synchronizers
|
|
134
|
+
this.synchronizer.on("metrics", event => this.emit("doc-metrics", event))
|
|
135
|
+
|
|
136
|
+
if (this.#remoteHeadsGossipingEnabled) {
|
|
137
|
+
this.synchronizer.on("open-doc", ({ peerId, documentId }) => {
|
|
138
|
+
this.#remoteHeadsSubscriptions.subscribePeerToDoc(peerId, documentId)
|
|
139
|
+
})
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// STORAGE
|
|
143
|
+
// The storage subsystem has access to some form of persistence, and deals with save and loading documents.
|
|
144
|
+
const storageSubsystem = storage ? new StorageSubsystem(storage) : undefined
|
|
145
|
+
if (storageSubsystem) {
|
|
146
|
+
storageSubsystem.on("document-loaded", event =>
|
|
147
|
+
this.emit("doc-metrics", { type: "doc-loaded", ...event })
|
|
148
|
+
)
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
this.storageSubsystem = storageSubsystem
|
|
152
|
+
|
|
152
153
|
// NETWORK
|
|
153
154
|
// The network subsystem deals with sending and receiving messages to and from peers.
|
|
154
155
|
|
|
155
156
|
const myPeerMetadata: Promise<PeerMetadata> = (async () => ({
|
|
156
|
-
|
|
157
|
+
storageId: await storageSubsystem?.id(),
|
|
157
158
|
isEphemeral,
|
|
158
159
|
}))()
|
|
159
160
|
|
|
@@ -167,77 +168,174 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
167
168
|
// When we get a new peer, register it with the synchronizer
|
|
168
169
|
networkSubsystem.on("peer", async ({ peerId, peerMetadata }) => {
|
|
169
170
|
this.#log("peer connected", { peerId })
|
|
171
|
+
|
|
170
172
|
if (peerMetadata) {
|
|
171
173
|
this.peerMetadataByPeerId[peerId] = { ...peerMetadata }
|
|
172
174
|
}
|
|
175
|
+
|
|
176
|
+
this.sharePolicy(peerId)
|
|
177
|
+
.then(shouldShare => {
|
|
178
|
+
if (shouldShare && this.#remoteHeadsGossipingEnabled) {
|
|
179
|
+
this.#remoteHeadsSubscriptions.addGenerousPeer(peerId)
|
|
180
|
+
}
|
|
181
|
+
})
|
|
182
|
+
.catch(err => {
|
|
183
|
+
console.log("error in share policy", { err })
|
|
184
|
+
})
|
|
185
|
+
|
|
173
186
|
this.synchronizer.addPeer(peerId)
|
|
174
187
|
})
|
|
175
188
|
|
|
189
|
+
// When a peer disconnects, remove it from the synchronizer
|
|
190
|
+
networkSubsystem.on("peer-disconnected", ({ peerId }) => {
|
|
191
|
+
this.synchronizer.removePeer(peerId)
|
|
192
|
+
this.#remoteHeadsSubscriptions.removePeer(peerId)
|
|
193
|
+
})
|
|
194
|
+
|
|
176
195
|
// Handle incoming messages
|
|
177
196
|
networkSubsystem.on("message", async msg => {
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
197
|
+
this.#receiveMessage(msg)
|
|
198
|
+
})
|
|
199
|
+
|
|
200
|
+
this.synchronizer.on("sync-state", message => {
|
|
201
|
+
this.#saveSyncState(message)
|
|
202
|
+
|
|
203
|
+
const handle = this.#handleCache[message.documentId]
|
|
204
|
+
|
|
205
|
+
const { storageId } = this.peerMetadataByPeerId[message.peerId] || {}
|
|
206
|
+
if (!storageId) {
|
|
207
|
+
return
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
const heads = handle.getRemoteHeads(storageId)
|
|
211
|
+
const haveHeadsChanged =
|
|
212
|
+
message.syncState.theirHeads &&
|
|
213
|
+
(!heads ||
|
|
214
|
+
!headsAreSame(heads, encodeHeads(message.syncState.theirHeads)))
|
|
215
|
+
|
|
216
|
+
if (haveHeadsChanged && message.syncState.theirHeads) {
|
|
217
|
+
handle.setRemoteHeads(
|
|
218
|
+
storageId,
|
|
219
|
+
encodeHeads(message.syncState.theirHeads)
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
if (storageId && this.#remoteHeadsGossipingEnabled) {
|
|
223
|
+
this.#remoteHeadsSubscriptions.handleImmediateRemoteHeadsChanged(
|
|
224
|
+
message.documentId,
|
|
225
|
+
storageId,
|
|
226
|
+
encodeHeads(message.syncState.theirHeads)
|
|
227
|
+
)
|
|
189
228
|
}
|
|
190
|
-
this.#beelay.receiveMessage({
|
|
191
|
-
message: {
|
|
192
|
-
sender: msg.senderId,
|
|
193
|
-
recipient: msg.targetId,
|
|
194
|
-
message: msg.message,
|
|
195
|
-
},
|
|
196
|
-
})
|
|
197
|
-
} else {
|
|
198
|
-
this.#receiveMessage(msg)
|
|
199
229
|
}
|
|
200
230
|
})
|
|
231
|
+
|
|
232
|
+
if (this.#remoteHeadsGossipingEnabled) {
|
|
233
|
+
this.#remoteHeadsSubscriptions.on("notify-remote-heads", message => {
|
|
234
|
+
this.networkSubsystem.send({
|
|
235
|
+
type: "remote-heads-changed",
|
|
236
|
+
targetId: message.targetId,
|
|
237
|
+
documentId: message.documentId,
|
|
238
|
+
newHeads: {
|
|
239
|
+
[message.storageId]: {
|
|
240
|
+
heads: message.heads,
|
|
241
|
+
timestamp: message.timestamp,
|
|
242
|
+
},
|
|
243
|
+
},
|
|
244
|
+
})
|
|
245
|
+
})
|
|
246
|
+
|
|
247
|
+
this.#remoteHeadsSubscriptions.on("change-remote-subs", message => {
|
|
248
|
+
this.#log("change-remote-subs", message)
|
|
249
|
+
for (const peer of message.peers) {
|
|
250
|
+
this.networkSubsystem.send({
|
|
251
|
+
type: "remote-subscription-change",
|
|
252
|
+
targetId: peer,
|
|
253
|
+
add: message.add,
|
|
254
|
+
remove: message.remove,
|
|
255
|
+
})
|
|
256
|
+
}
|
|
257
|
+
})
|
|
258
|
+
|
|
259
|
+
this.#remoteHeadsSubscriptions.on("remote-heads-changed", message => {
|
|
260
|
+
const handle = this.#handleCache[message.documentId]
|
|
261
|
+
handle.setRemoteHeads(message.storageId, message.remoteHeads)
|
|
262
|
+
})
|
|
263
|
+
}
|
|
201
264
|
}
|
|
202
265
|
|
|
203
266
|
// The `document` event is fired by the DocCollection any time we create a new document or look
|
|
204
267
|
// up a document by ID. We listen for it in order to wire up storage and network synchronization.
|
|
205
268
|
#registerHandleWithSubsystems(handle: DocHandle<any>) {
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
269
|
+
const { storageSubsystem } = this
|
|
270
|
+
if (storageSubsystem) {
|
|
271
|
+
// Save when the document changes, but no more often than saveDebounceRate.
|
|
272
|
+
const saveFn = ({ handle, doc }: DocHandleEncodedChangePayload<any>) => {
|
|
273
|
+
void storageSubsystem.saveDoc(handle.documentId, doc)
|
|
210
274
|
}
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
this.#log("document unavailable", { documentId: handle.documentId })
|
|
214
|
-
this.emit("unavailable-document", {
|
|
215
|
-
documentId: handle.documentId,
|
|
216
|
-
})
|
|
217
|
-
})
|
|
218
|
-
|
|
219
|
-
this.synchronizer.addDocument(handle.documentId)
|
|
275
|
+
handle.on("heads-changed", throttle(saveFn, this.saveDebounceRate))
|
|
276
|
+
}
|
|
220
277
|
|
|
221
|
-
//
|
|
222
|
-
this.
|
|
278
|
+
// Register the document with the synchronizer. This advertises our interest in the document.
|
|
279
|
+
this.synchronizer.addDocument(handle)
|
|
223
280
|
}
|
|
224
281
|
|
|
225
282
|
#receiveMessage(message: RepoMessage) {
|
|
226
283
|
switch (message.type) {
|
|
227
284
|
case "remote-subscription-change":
|
|
285
|
+
if (this.#remoteHeadsGossipingEnabled) {
|
|
286
|
+
this.#remoteHeadsSubscriptions.handleControlMessage(message)
|
|
287
|
+
}
|
|
288
|
+
break
|
|
228
289
|
case "remote-heads-changed":
|
|
290
|
+
if (this.#remoteHeadsGossipingEnabled) {
|
|
291
|
+
this.#remoteHeadsSubscriptions.handleRemoteHeads(message)
|
|
292
|
+
}
|
|
229
293
|
break
|
|
230
294
|
case "sync":
|
|
231
295
|
case "request":
|
|
232
296
|
case "ephemeral":
|
|
233
297
|
case "doc-unavailable":
|
|
234
298
|
this.synchronizer.receiveMessage(message).catch(err => {
|
|
235
|
-
console.
|
|
299
|
+
console.log("error receiving message", { err })
|
|
236
300
|
})
|
|
237
|
-
break
|
|
238
301
|
}
|
|
239
302
|
}
|
|
240
303
|
|
|
304
|
+
#throttledSaveSyncStateHandlers: Record<
|
|
305
|
+
StorageId,
|
|
306
|
+
(payload: SyncStatePayload) => void
|
|
307
|
+
> = {}
|
|
308
|
+
|
|
309
|
+
/** saves sync state throttled per storage id, if a peer doesn't have a storage id it's sync state is not persisted */
|
|
310
|
+
#saveSyncState(payload: SyncStatePayload) {
|
|
311
|
+
if (!this.storageSubsystem) {
|
|
312
|
+
return
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
const { storageId, isEphemeral } =
|
|
316
|
+
this.peerMetadataByPeerId[payload.peerId] || {}
|
|
317
|
+
|
|
318
|
+
if (!storageId || isEphemeral) {
|
|
319
|
+
return
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
let handler = this.#throttledSaveSyncStateHandlers[storageId]
|
|
323
|
+
if (!handler) {
|
|
324
|
+
handler = this.#throttledSaveSyncStateHandlers[storageId] = throttle(
|
|
325
|
+
({ documentId, syncState }: SyncStatePayload) => {
|
|
326
|
+
void this.storageSubsystem!.saveSyncState(
|
|
327
|
+
documentId,
|
|
328
|
+
storageId,
|
|
329
|
+
syncState
|
|
330
|
+
)
|
|
331
|
+
},
|
|
332
|
+
this.saveDebounceRate
|
|
333
|
+
)
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
handler(payload)
|
|
337
|
+
}
|
|
338
|
+
|
|
241
339
|
/** Returns an existing handle if we have it; creates one otherwise. */
|
|
242
340
|
#getHandle<T>({
|
|
243
341
|
documentId,
|
|
@@ -262,7 +360,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
262
360
|
|
|
263
361
|
/** Returns a list of all connected peer ids */
|
|
264
362
|
get peers(): PeerId[] {
|
|
265
|
-
return this.
|
|
363
|
+
return this.synchronizer.peers
|
|
266
364
|
}
|
|
267
365
|
|
|
268
366
|
getStorageIdOfPeer(peerId: PeerId): StorageId | undefined {
|
|
@@ -281,7 +379,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
281
379
|
documentId,
|
|
282
380
|
}) as DocHandle<T>
|
|
283
381
|
|
|
284
|
-
|
|
382
|
+
this.#registerHandleWithSubsystems(handle)
|
|
285
383
|
|
|
286
384
|
handle.update(() => {
|
|
287
385
|
let nextDoc: Automerge.Doc<T>
|
|
@@ -290,33 +388,9 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
290
388
|
} else {
|
|
291
389
|
nextDoc = Automerge.emptyChange(Automerge.init())
|
|
292
390
|
}
|
|
293
|
-
const patches = A.diff(nextDoc, [], A.getHeads(nextDoc))
|
|
294
|
-
for (const patch of patches) {
|
|
295
|
-
initialLinks = patches
|
|
296
|
-
.map(patch => {
|
|
297
|
-
if (patch.action === "put") {
|
|
298
|
-
if (patch.value instanceof A.Link) {
|
|
299
|
-
return patch.value
|
|
300
|
-
}
|
|
301
|
-
}
|
|
302
|
-
return null
|
|
303
|
-
})
|
|
304
|
-
.filter(v => v != null)
|
|
305
|
-
}
|
|
306
391
|
return nextDoc
|
|
307
392
|
})
|
|
308
393
|
|
|
309
|
-
for (const link of initialLinks) {
|
|
310
|
-
const { documentId: target } = parseAutomergeUrl(
|
|
311
|
-
link.target as AutomergeUrl
|
|
312
|
-
)
|
|
313
|
-
this.#beelay.addLink({ from: documentId, to: target })
|
|
314
|
-
}
|
|
315
|
-
|
|
316
|
-
this.storageSubsystem.saveDoc(handle.documentId, handle.docSync()!)
|
|
317
|
-
|
|
318
|
-
this.#registerHandleWithSubsystems(handle)
|
|
319
|
-
|
|
320
394
|
handle.doneLoading()
|
|
321
395
|
return handle
|
|
322
396
|
}
|
|
@@ -333,8 +407,6 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
333
407
|
* Any peers this `Repo` is connected to for whom `sharePolicy` returns `true` will
|
|
334
408
|
* be notified of the newly created DocHandle.
|
|
335
409
|
*
|
|
336
|
-
* @throws if the cloned handle is not yet ready or if
|
|
337
|
-
* `clonedHandle.docSync()` returns `undefined` (i.e. the handle is unavailable).
|
|
338
410
|
*/
|
|
339
411
|
clone<T>(clonedHandle: DocHandle<T>) {
|
|
340
412
|
if (!clonedHandle.isReady()) {
|
|
@@ -344,11 +416,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
344
416
|
)
|
|
345
417
|
}
|
|
346
418
|
|
|
347
|
-
const sourceDoc = clonedHandle.
|
|
348
|
-
if (!sourceDoc) {
|
|
349
|
-
throw new Error("Cloned handle doesn't have a document.")
|
|
350
|
-
}
|
|
351
|
-
|
|
419
|
+
const sourceDoc = clonedHandle.doc()
|
|
352
420
|
const handle = this.create<T>()
|
|
353
421
|
|
|
354
422
|
handle.update(() => {
|
|
@@ -359,60 +427,267 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
359
427
|
return handle
|
|
360
428
|
}
|
|
361
429
|
|
|
362
|
-
|
|
363
|
-
|
|
364
|
-
|
|
365
|
-
|
|
366
|
-
|
|
367
|
-
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
this.#log("find", { id })
|
|
371
|
-
const documentId = interpretAsDocumentId(id)
|
|
430
|
+
findWithProgress<T>(
|
|
431
|
+
id: AnyDocumentId,
|
|
432
|
+
options: AbortOptions = {}
|
|
433
|
+
): FindProgressWithMethods<T> | FindProgress<T> {
|
|
434
|
+
const { signal } = options
|
|
435
|
+
const { documentId, heads } = isValidAutomergeUrl(id)
|
|
436
|
+
? parseAutomergeUrl(id)
|
|
437
|
+
: { documentId: interpretAsDocumentId(id), heads: undefined }
|
|
372
438
|
|
|
373
|
-
//
|
|
439
|
+
// Check handle cache first - return plain FindStep for terminal states
|
|
374
440
|
if (this.#handleCache[documentId]) {
|
|
375
|
-
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
}
|
|
441
|
+
const handle = this.#handleCache[documentId]
|
|
442
|
+
if (handle.state === UNAVAILABLE) {
|
|
443
|
+
const result = {
|
|
444
|
+
state: "unavailable" as const,
|
|
445
|
+
error: new Error(`Document ${id} is unavailable`),
|
|
446
|
+
handle,
|
|
447
|
+
}
|
|
448
|
+
return result
|
|
449
|
+
}
|
|
450
|
+
if (handle.state === DELETED) {
|
|
451
|
+
const result = {
|
|
452
|
+
state: "failed" as const,
|
|
453
|
+
error: new Error(`Document ${id} was deleted`),
|
|
454
|
+
handle,
|
|
455
|
+
}
|
|
456
|
+
return result
|
|
457
|
+
}
|
|
458
|
+
if (handle.state === READY) {
|
|
459
|
+
const result = {
|
|
460
|
+
state: "ready" as const,
|
|
461
|
+
handle: heads ? handle.view(heads) : handle,
|
|
462
|
+
}
|
|
463
|
+
return result
|
|
382
464
|
}
|
|
383
|
-
return this.#handleCache[documentId]
|
|
384
465
|
}
|
|
385
466
|
|
|
386
|
-
//
|
|
387
|
-
const
|
|
467
|
+
// Check progress cache for any existing signal
|
|
468
|
+
const cachedProgress = this.#progressCache[documentId]
|
|
469
|
+
if (cachedProgress) {
|
|
470
|
+
const handle = this.#handleCache[documentId]
|
|
471
|
+
// Return cached progress if we have a handle and it's either in a terminal state or loading
|
|
472
|
+
if (
|
|
473
|
+
handle &&
|
|
474
|
+
(handle.state === READY ||
|
|
475
|
+
handle.state === UNAVAILABLE ||
|
|
476
|
+
handle.state === DELETED ||
|
|
477
|
+
handle.state === "loading")
|
|
478
|
+
) {
|
|
479
|
+
return cachedProgress as FindProgressWithMethods<T>
|
|
480
|
+
}
|
|
481
|
+
}
|
|
482
|
+
|
|
483
|
+
const handle = this.#getHandle<T>({ documentId })
|
|
484
|
+
const initial = {
|
|
485
|
+
state: "loading" as const,
|
|
486
|
+
progress: 0,
|
|
487
|
+
handle,
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
// Create a new progress signal
|
|
491
|
+
const progressSignal = {
|
|
492
|
+
subscribers: new Set<(progress: FindProgress<T>) => void>(),
|
|
493
|
+
currentProgress: undefined as FindProgress<T> | undefined,
|
|
494
|
+
notify: (progress: FindProgress<T>) => {
|
|
495
|
+
progressSignal.currentProgress = progress
|
|
496
|
+
progressSignal.subscribers.forEach(callback => callback(progress))
|
|
497
|
+
// Cache all states, not just terminal ones
|
|
498
|
+
this.#progressCache[documentId] = progress
|
|
499
|
+
},
|
|
500
|
+
peek: () => progressSignal.currentProgress || initial,
|
|
501
|
+
subscribe: (callback: (progress: FindProgress<T>) => void) => {
|
|
502
|
+
progressSignal.subscribers.add(callback)
|
|
503
|
+
return () => progressSignal.subscribers.delete(callback)
|
|
504
|
+
},
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
progressSignal.notify(initial)
|
|
508
|
+
|
|
509
|
+
// Start the loading process
|
|
510
|
+
void this.#loadDocumentWithProgress(
|
|
511
|
+
id,
|
|
388
512
|
documentId,
|
|
389
|
-
|
|
513
|
+
handle,
|
|
514
|
+
progressSignal,
|
|
515
|
+
signal ? abortable(new Promise(() => {}), signal) : new Promise(() => {})
|
|
516
|
+
)
|
|
517
|
+
|
|
518
|
+
const result = {
|
|
519
|
+
...initial,
|
|
520
|
+
peek: progressSignal.peek,
|
|
521
|
+
subscribe: progressSignal.subscribe,
|
|
522
|
+
}
|
|
523
|
+
this.#progressCache[documentId] = result
|
|
524
|
+
return result
|
|
525
|
+
}
|
|
526
|
+
|
|
527
|
+
async #loadDocumentWithProgress<T>(
|
|
528
|
+
id: AnyDocumentId,
|
|
529
|
+
documentId: DocumentId,
|
|
530
|
+
handle: DocHandle<T>,
|
|
531
|
+
progressSignal: {
|
|
532
|
+
notify: (progress: FindProgress<T>) => void
|
|
533
|
+
},
|
|
534
|
+
abortPromise: Promise<never>
|
|
535
|
+
) {
|
|
536
|
+
try {
|
|
537
|
+
progressSignal.notify({
|
|
538
|
+
state: "loading" as const,
|
|
539
|
+
progress: 25,
|
|
540
|
+
handle,
|
|
541
|
+
})
|
|
542
|
+
|
|
543
|
+
const loadingPromise = await (this.storageSubsystem
|
|
544
|
+
? this.storageSubsystem.loadDoc(handle.documentId)
|
|
545
|
+
: Promise.resolve(null))
|
|
546
|
+
|
|
547
|
+
const loadedDoc = await Promise.race([loadingPromise, abortPromise])
|
|
548
|
+
|
|
549
|
+
if (loadedDoc) {
|
|
550
|
+
handle.update(() => loadedDoc as Automerge.Doc<T>)
|
|
551
|
+
handle.doneLoading()
|
|
552
|
+
progressSignal.notify({
|
|
553
|
+
state: "loading" as const,
|
|
554
|
+
progress: 50,
|
|
555
|
+
handle,
|
|
556
|
+
})
|
|
557
|
+
} else {
|
|
558
|
+
await Promise.race([this.networkSubsystem.whenReady(), abortPromise])
|
|
559
|
+
handle.request()
|
|
560
|
+
progressSignal.notify({
|
|
561
|
+
state: "loading" as const,
|
|
562
|
+
progress: 75,
|
|
563
|
+
handle,
|
|
564
|
+
})
|
|
565
|
+
}
|
|
566
|
+
|
|
567
|
+
this.#registerHandleWithSubsystems(handle)
|
|
568
|
+
|
|
569
|
+
await Promise.race([handle.whenReady([READY, UNAVAILABLE]), abortPromise])
|
|
390
570
|
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
attemptLoad
|
|
396
|
-
.then(async loadedDoc => {
|
|
397
|
-
if (loadedDoc) {
|
|
398
|
-
// uhhhh, sorry if you're reading this because we were lying to the type system
|
|
399
|
-
handle.update(() => loadedDoc as Automerge.Doc<T>)
|
|
400
|
-
handle.doneLoading()
|
|
401
|
-
} else {
|
|
402
|
-
// we want to wait for the network subsystem to be ready before
|
|
403
|
-
// we request the document. this prevents entering unavailable during initialization.
|
|
404
|
-
await this.networkSubsystem.whenReady()
|
|
405
|
-
console.log("we didn't find it so we're requesting")
|
|
406
|
-
handle.request()
|
|
571
|
+
if (handle.state === UNAVAILABLE) {
|
|
572
|
+
const unavailableProgress = {
|
|
573
|
+
state: "unavailable" as const,
|
|
574
|
+
handle,
|
|
407
575
|
}
|
|
408
|
-
|
|
576
|
+
progressSignal.notify(unavailableProgress)
|
|
577
|
+
return
|
|
578
|
+
}
|
|
579
|
+
if (handle.state === DELETED) {
|
|
580
|
+
throw new Error(`Document ${id} was deleted`)
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
progressSignal.notify({ state: "ready" as const, handle })
|
|
584
|
+
} catch (error) {
|
|
585
|
+
progressSignal.notify({
|
|
586
|
+
state: "failed" as const,
|
|
587
|
+
error: error instanceof Error ? error : new Error(String(error)),
|
|
588
|
+
handle: this.#getHandle<T>({ documentId }),
|
|
409
589
|
})
|
|
410
|
-
|
|
411
|
-
|
|
590
|
+
}
|
|
591
|
+
}
|
|
592
|
+
|
|
593
|
+
async find<T>(
|
|
594
|
+
id: AnyDocumentId,
|
|
595
|
+
options: RepoFindOptions & AbortOptions = {}
|
|
596
|
+
): Promise<DocHandle<T>> {
|
|
597
|
+
const { allowableStates = ["ready"], signal } = options
|
|
598
|
+
|
|
599
|
+
// Check if already aborted
|
|
600
|
+
if (signal?.aborted) {
|
|
601
|
+
throw new Error("Operation aborted")
|
|
602
|
+
}
|
|
603
|
+
|
|
604
|
+
const progress = this.findWithProgress<T>(id, { signal })
|
|
605
|
+
|
|
606
|
+
if ("subscribe" in progress) {
|
|
607
|
+
this.#registerHandleWithSubsystems(progress.handle)
|
|
608
|
+
return new Promise((resolve, reject) => {
|
|
609
|
+
const unsubscribe = progress.subscribe(state => {
|
|
610
|
+
if (allowableStates.includes(state.handle.state)) {
|
|
611
|
+
unsubscribe()
|
|
612
|
+
resolve(state.handle)
|
|
613
|
+
} else if (state.state === "unavailable") {
|
|
614
|
+
unsubscribe()
|
|
615
|
+
reject(new Error(`Document ${id} is unavailable`))
|
|
616
|
+
} else if (state.state === "failed") {
|
|
617
|
+
unsubscribe()
|
|
618
|
+
reject(state.error)
|
|
619
|
+
}
|
|
620
|
+
})
|
|
412
621
|
})
|
|
622
|
+
} else {
|
|
623
|
+
if (progress.handle.state === READY) {
|
|
624
|
+
return progress.handle
|
|
625
|
+
}
|
|
626
|
+
// If the handle isn't ready, wait for it and then return it
|
|
627
|
+
await progress.handle.whenReady([READY, UNAVAILABLE])
|
|
628
|
+
return progress.handle
|
|
629
|
+
}
|
|
630
|
+
}
|
|
631
|
+
|
|
632
|
+
/**
|
|
633
|
+
* Loads a document without waiting for ready state
|
|
634
|
+
*/
|
|
635
|
+
async #loadDocument<T>(documentId: DocumentId): Promise<DocHandle<T>> {
|
|
636
|
+
// If we have the handle cached, return it
|
|
637
|
+
if (this.#handleCache[documentId]) {
|
|
638
|
+
return this.#handleCache[documentId]
|
|
639
|
+
}
|
|
640
|
+
|
|
641
|
+
// If we don't already have the handle, make an empty one and try loading it
|
|
642
|
+
const handle = this.#getHandle<T>({ documentId })
|
|
643
|
+
const loadedDoc = await (this.storageSubsystem
|
|
644
|
+
? this.storageSubsystem.loadDoc(handle.documentId)
|
|
645
|
+
: Promise.resolve(null))
|
|
646
|
+
|
|
647
|
+
if (loadedDoc) {
|
|
648
|
+
// We need to cast this to <T> because loadDoc operates in <unknowns>.
|
|
649
|
+
// This is really where we ought to be validating the input matches <T>.
|
|
650
|
+
handle.update(() => loadedDoc as Automerge.Doc<T>)
|
|
651
|
+
handle.doneLoading()
|
|
652
|
+
} else {
|
|
653
|
+
// Because the network subsystem might still be booting up, we wait
|
|
654
|
+
// here so that we don't immediately give up loading because we're still
|
|
655
|
+
// making our initial connection to a sync server.
|
|
656
|
+
await this.networkSubsystem.whenReady()
|
|
657
|
+
handle.request()
|
|
658
|
+
}
|
|
659
|
+
|
|
660
|
+
this.#registerHandleWithSubsystems(handle)
|
|
413
661
|
return handle
|
|
414
662
|
}
|
|
415
663
|
|
|
664
|
+
/**
|
|
665
|
+
* Retrieves a document by id. It gets data from the local system, but also emits a `document`
|
|
666
|
+
* event to advertise interest in the document.
|
|
667
|
+
*/
|
|
668
|
+
async findClassic<T>(
|
|
669
|
+
/** The url or documentId of the handle to retrieve */
|
|
670
|
+
id: AnyDocumentId,
|
|
671
|
+
options: RepoFindOptions & AbortOptions = {}
|
|
672
|
+
): Promise<DocHandle<T>> {
|
|
673
|
+
const documentId = interpretAsDocumentId(id)
|
|
674
|
+
const { allowableStates, signal } = options
|
|
675
|
+
|
|
676
|
+
return abortable(
|
|
677
|
+
(async () => {
|
|
678
|
+
const handle = await this.#loadDocument<T>(documentId)
|
|
679
|
+
if (!allowableStates) {
|
|
680
|
+
await handle.whenReady([READY, UNAVAILABLE])
|
|
681
|
+
if (handle.state === UNAVAILABLE && !signal?.aborted) {
|
|
682
|
+
throw new Error(`Document ${id} is unavailable`)
|
|
683
|
+
}
|
|
684
|
+
}
|
|
685
|
+
return handle
|
|
686
|
+
})(),
|
|
687
|
+
signal
|
|
688
|
+
)
|
|
689
|
+
}
|
|
690
|
+
|
|
416
691
|
delete(
|
|
417
692
|
/** The url or documentId of the handle to delete */
|
|
418
693
|
id: AnyDocumentId
|
|
@@ -423,6 +698,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
423
698
|
handle.delete()
|
|
424
699
|
|
|
425
700
|
delete this.#handleCache[documentId]
|
|
701
|
+
delete this.#progressCache[documentId]
|
|
426
702
|
this.emit("delete-document", { documentId })
|
|
427
703
|
}
|
|
428
704
|
|
|
@@ -437,8 +713,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
437
713
|
const documentId = interpretAsDocumentId(id)
|
|
438
714
|
|
|
439
715
|
const handle = this.#getHandle({ documentId })
|
|
440
|
-
const doc =
|
|
441
|
-
if (!doc) return undefined
|
|
716
|
+
const doc = handle.doc()
|
|
442
717
|
return Automerge.save(doc)
|
|
443
718
|
}
|
|
444
719
|
|
|
@@ -458,7 +733,16 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
458
733
|
return handle
|
|
459
734
|
}
|
|
460
735
|
|
|
461
|
-
subscribeToRemotes = (remotes: StorageId[]) => {
|
|
736
|
+
subscribeToRemotes = (remotes: StorageId[]) => {
|
|
737
|
+
if (this.#remoteHeadsGossipingEnabled) {
|
|
738
|
+
this.#log("subscribeToRemotes", { remotes })
|
|
739
|
+
this.#remoteHeadsSubscriptions.subscribeToRemotes(remotes)
|
|
740
|
+
} else {
|
|
741
|
+
this.#log(
|
|
742
|
+
"WARN: subscribeToRemotes called but remote heads gossiping is not enabled"
|
|
743
|
+
)
|
|
744
|
+
}
|
|
745
|
+
}
|
|
462
746
|
|
|
463
747
|
storageId = async (): Promise<StorageId | undefined> => {
|
|
464
748
|
if (!this.storageSubsystem) {
|
|
@@ -483,11 +767,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
483
767
|
: Object.values(this.#handleCache)
|
|
484
768
|
await Promise.all(
|
|
485
769
|
handles.map(async handle => {
|
|
486
|
-
|
|
487
|
-
if (!doc) {
|
|
488
|
-
return
|
|
489
|
-
}
|
|
490
|
-
return this.storageSubsystem!.saveDoc(handle.documentId, doc)
|
|
770
|
+
return this.storageSubsystem!.saveDoc(handle.documentId, handle.doc())
|
|
491
771
|
})
|
|
492
772
|
)
|
|
493
773
|
}
|
|
@@ -506,7 +786,9 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
506
786
|
return
|
|
507
787
|
}
|
|
508
788
|
const handle = this.#getHandle({ documentId })
|
|
509
|
-
|
|
789
|
+
await handle.whenReady([READY, UNLOADED, DELETED, UNAVAILABLE])
|
|
790
|
+
const doc = handle.doc()
|
|
791
|
+
// because this is an internal-ish function, we'll be extra careful about undefined docs here
|
|
510
792
|
if (doc) {
|
|
511
793
|
if (handle.isReady()) {
|
|
512
794
|
handle.unload()
|
|
@@ -533,8 +815,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
533
815
|
}
|
|
534
816
|
|
|
535
817
|
metrics(): { documents: { [key: string]: any } } {
|
|
536
|
-
|
|
537
|
-
return { documents: {} }
|
|
818
|
+
return { documents: this.synchronizer.metrics() }
|
|
538
819
|
}
|
|
539
820
|
}
|
|
540
821
|
|
|
@@ -595,6 +876,10 @@ export interface RepoEvents {
|
|
|
595
876
|
"doc-metrics": (arg: DocMetrics) => void
|
|
596
877
|
}
|
|
597
878
|
|
|
879
|
+
export interface RepoFindOptions {
|
|
880
|
+
allowableStates?: string[]
|
|
881
|
+
}
|
|
882
|
+
|
|
598
883
|
export interface DocumentPayload {
|
|
599
884
|
handle: DocHandle<any>
|
|
600
885
|
}
|