@automerge/automerge-repo 2.0.0-alpha.6 → 2.0.0-collectionsync-alpha.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/CollectionHandle.d.ts +14 -0
- package/dist/CollectionHandle.d.ts.map +1 -0
- package/dist/CollectionHandle.js +37 -0
- package/dist/DocHandle.d.ts +67 -2
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +113 -2
- package/dist/DocUrl.d.ts +47 -0
- package/dist/DocUrl.d.ts.map +1 -0
- package/dist/DocUrl.js +72 -0
- package/dist/EphemeralData.d.ts +20 -0
- package/dist/EphemeralData.d.ts.map +1 -0
- package/dist/EphemeralData.js +1 -0
- package/dist/Repo.d.ts +28 -7
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +142 -143
- package/dist/ferigan.d.ts +51 -0
- package/dist/ferigan.d.ts.map +1 -0
- package/dist/ferigan.js +98 -0
- package/dist/helpers/tests/storage-adapter-tests.d.ts +2 -2
- package/dist/helpers/tests/storage-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/storage-adapter-tests.js +19 -39
- package/dist/index.d.ts +2 -0
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -0
- package/dist/network/NetworkSubsystem.d.ts +1 -0
- package/dist/network/NetworkSubsystem.d.ts.map +1 -1
- package/dist/network/NetworkSubsystem.js +3 -0
- package/dist/network/messages.d.ts +7 -1
- package/dist/network/messages.d.ts.map +1 -1
- package/dist/network/messages.js +2 -1
- package/dist/src/DocHandle.d.ts +182 -0
- package/dist/src/DocHandle.d.ts.map +1 -0
- package/dist/src/DocHandle.js +405 -0
- package/dist/src/DocUrl.d.ts +49 -0
- package/dist/src/DocUrl.d.ts.map +1 -0
- package/dist/src/DocUrl.js +72 -0
- package/dist/src/EphemeralData.d.ts +19 -0
- package/dist/src/EphemeralData.d.ts.map +1 -0
- package/dist/src/EphemeralData.js +1 -0
- package/dist/src/Repo.d.ts +74 -0
- package/dist/src/Repo.d.ts.map +1 -0
- package/dist/src/Repo.js +208 -0
- package/dist/src/helpers/arraysAreEqual.d.ts +2 -0
- package/dist/src/helpers/arraysAreEqual.d.ts.map +1 -0
- package/dist/src/helpers/arraysAreEqual.js +2 -0
- package/dist/src/helpers/cbor.d.ts +4 -0
- package/dist/src/helpers/cbor.d.ts.map +1 -0
- package/dist/src/helpers/cbor.js +8 -0
- package/dist/src/helpers/eventPromise.d.ts +11 -0
- package/dist/src/helpers/eventPromise.d.ts.map +1 -0
- package/dist/src/helpers/eventPromise.js +7 -0
- package/dist/src/helpers/headsAreSame.d.ts +2 -0
- package/dist/src/helpers/headsAreSame.d.ts.map +1 -0
- package/dist/src/helpers/headsAreSame.js +4 -0
- package/dist/src/helpers/mergeArrays.d.ts +2 -0
- package/dist/src/helpers/mergeArrays.d.ts.map +1 -0
- package/dist/src/helpers/mergeArrays.js +15 -0
- package/dist/src/helpers/pause.d.ts +6 -0
- package/dist/src/helpers/pause.d.ts.map +1 -0
- package/dist/src/helpers/pause.js +10 -0
- package/dist/src/helpers/tests/network-adapter-tests.d.ts +21 -0
- package/dist/src/helpers/tests/network-adapter-tests.d.ts.map +1 -0
- package/dist/src/helpers/tests/network-adapter-tests.js +122 -0
- package/dist/src/helpers/withTimeout.d.ts +12 -0
- package/dist/src/helpers/withTimeout.d.ts.map +1 -0
- package/dist/src/helpers/withTimeout.js +24 -0
- package/dist/src/index.d.ts +53 -0
- package/dist/src/index.d.ts.map +1 -0
- package/dist/src/index.js +40 -0
- package/dist/src/network/NetworkAdapter.d.ts +26 -0
- package/dist/src/network/NetworkAdapter.d.ts.map +1 -0
- package/dist/src/network/NetworkAdapter.js +4 -0
- package/dist/src/network/NetworkSubsystem.d.ts +23 -0
- package/dist/src/network/NetworkSubsystem.d.ts.map +1 -0
- package/dist/src/network/NetworkSubsystem.js +120 -0
- package/dist/src/network/messages.d.ts +85 -0
- package/dist/src/network/messages.d.ts.map +1 -0
- package/dist/src/network/messages.js +23 -0
- package/dist/src/storage/StorageAdapter.d.ts +14 -0
- package/dist/src/storage/StorageAdapter.d.ts.map +1 -0
- package/dist/src/storage/StorageAdapter.js +1 -0
- package/dist/src/storage/StorageSubsystem.d.ts +12 -0
- package/dist/src/storage/StorageSubsystem.d.ts.map +1 -0
- package/dist/src/storage/StorageSubsystem.js +145 -0
- package/dist/src/synchronizer/CollectionSynchronizer.d.ts +25 -0
- package/dist/src/synchronizer/CollectionSynchronizer.d.ts.map +1 -0
- package/dist/src/synchronizer/CollectionSynchronizer.js +106 -0
- package/dist/src/synchronizer/DocSynchronizer.d.ts +29 -0
- package/dist/src/synchronizer/DocSynchronizer.d.ts.map +1 -0
- package/dist/src/synchronizer/DocSynchronizer.js +263 -0
- package/dist/src/synchronizer/Synchronizer.d.ts +9 -0
- package/dist/src/synchronizer/Synchronizer.d.ts.map +1 -0
- package/dist/src/synchronizer/Synchronizer.js +2 -0
- package/dist/src/types.d.ts +16 -0
- package/dist/src/types.d.ts.map +1 -0
- package/dist/src/types.js +1 -0
- package/dist/storage/StorageAdapter.d.ts +9 -0
- package/dist/storage/StorageAdapter.d.ts.map +1 -1
- package/dist/storage/StorageAdapter.js +33 -0
- package/dist/storage/StorageSubsystem.d.ts +12 -2
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +42 -100
- package/dist/synchronizer/CollectionSynchronizer.d.ts +4 -2
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.js +28 -15
- package/dist/synchronizer/DocSynchronizer.d.ts +6 -5
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +76 -178
- package/dist/synchronizer/Synchronizer.d.ts +11 -0
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
- package/dist/test/CollectionSynchronizer.test.d.ts +2 -0
- package/dist/test/CollectionSynchronizer.test.d.ts.map +1 -0
- package/dist/test/CollectionSynchronizer.test.js +57 -0
- package/dist/test/DocHandle.test.d.ts +2 -0
- package/dist/test/DocHandle.test.d.ts.map +1 -0
- package/dist/test/DocHandle.test.js +238 -0
- package/dist/test/DocSynchronizer.test.d.ts +2 -0
- package/dist/test/DocSynchronizer.test.d.ts.map +1 -0
- package/dist/test/DocSynchronizer.test.js +111 -0
- package/dist/test/Network.test.d.ts +2 -0
- package/dist/test/Network.test.d.ts.map +1 -0
- package/dist/test/Network.test.js +11 -0
- package/dist/test/Repo.test.d.ts +2 -0
- package/dist/test/Repo.test.d.ts.map +1 -0
- package/dist/test/Repo.test.js +568 -0
- package/dist/test/StorageSubsystem.test.d.ts +2 -0
- package/dist/test/StorageSubsystem.test.d.ts.map +1 -0
- package/dist/test/StorageSubsystem.test.js +56 -0
- package/dist/test/helpers/DummyNetworkAdapter.d.ts +9 -0
- package/dist/test/helpers/DummyNetworkAdapter.d.ts.map +1 -0
- package/dist/test/helpers/DummyNetworkAdapter.js +15 -0
- package/dist/test/helpers/DummyStorageAdapter.d.ts +16 -0
- package/dist/test/helpers/DummyStorageAdapter.d.ts.map +1 -0
- package/dist/test/helpers/DummyStorageAdapter.js +33 -0
- package/dist/test/helpers/generate-large-object.d.ts +5 -0
- package/dist/test/helpers/generate-large-object.d.ts.map +1 -0
- package/dist/test/helpers/generate-large-object.js +9 -0
- package/dist/test/helpers/getRandomItem.d.ts +2 -0
- package/dist/test/helpers/getRandomItem.d.ts.map +1 -0
- package/dist/test/helpers/getRandomItem.js +4 -0
- package/dist/test/types.d.ts +4 -0
- package/dist/test/types.d.ts.map +1 -0
- package/dist/test/types.js +1 -0
- package/package.json +3 -3
- package/src/CollectionHandle.ts +54 -0
- package/src/DocHandle.ts +133 -4
- package/src/Repo.ts +192 -183
- package/src/ferigan.ts +184 -0
- package/src/helpers/tests/storage-adapter-tests.ts +31 -62
- package/src/index.ts +2 -0
- package/src/network/NetworkSubsystem.ts +4 -0
- package/src/network/messages.ts +11 -2
- package/src/storage/StorageAdapter.ts +42 -0
- package/src/storage/StorageSubsystem.ts +59 -119
- package/src/synchronizer/CollectionSynchronizer.ts +34 -26
- package/src/synchronizer/DocSynchronizer.ts +84 -231
- package/src/synchronizer/Synchronizer.ts +14 -0
- package/test/CollectionSynchronizer.test.ts +4 -2
- package/test/DocHandle.test.ts +141 -0
- package/test/DocSynchronizer.test.ts +6 -1
- package/test/RemoteHeadsSubscriptions.test.ts +1 -1
- package/test/Repo.test.ts +225 -117
- package/test/StorageSubsystem.test.ts +20 -16
- package/test/remoteHeads.test.ts +1 -1
|
@@ -0,0 +1,263 @@
|
|
|
1
|
+
import * as A from "@automerge/automerge/next"
|
|
2
|
+
import { READY, REQUESTING, UNAVAILABLE } from "../DocHandle.js"
|
|
3
|
+
import { Synchronizer } from "./Synchronizer.js"
|
|
4
|
+
import debug from "debug"
|
|
5
|
+
import { isRequestMessage } from "../network/messages.js"
|
|
6
|
+
import { decode } from "cbor-x"
|
|
7
|
+
/**
|
|
8
|
+
* DocSynchronizer takes a handle to an Automerge document, and receives & dispatches sync messages
|
|
9
|
+
* to bring it inline with all other peers' versions.
|
|
10
|
+
*/
|
|
11
|
+
export class DocSynchronizer extends Synchronizer {
|
|
12
|
+
handle
|
|
13
|
+
#log
|
|
14
|
+
#conciseLog
|
|
15
|
+
#opsLog
|
|
16
|
+
/** Active peers */
|
|
17
|
+
#peers = []
|
|
18
|
+
#peerDocumentStatuses = {}
|
|
19
|
+
/** Sync state for each peer we've communicated with (including inactive peers) */
|
|
20
|
+
#syncStates = {}
|
|
21
|
+
#pendingSyncMessages = []
|
|
22
|
+
#syncStarted = false
|
|
23
|
+
constructor(handle) {
|
|
24
|
+
super()
|
|
25
|
+
this.handle = handle
|
|
26
|
+
const docId = handle.documentId.slice(0, 5)
|
|
27
|
+
this.#conciseLog = debug(`automerge-repo:concise:docsync:${docId}`) // Only logs one line per receive/send
|
|
28
|
+
this.#log = debug(`automerge-repo:docsync:${docId}`)
|
|
29
|
+
this.#opsLog = debug(`automerge-repo:ops:docsync:${docId}`) // Log list of ops of each message
|
|
30
|
+
handle.on("change", () => this.#syncWithPeers())
|
|
31
|
+
handle.on("ephemeral-message-outbound", payload =>
|
|
32
|
+
this.#broadcastToPeers(payload)
|
|
33
|
+
)
|
|
34
|
+
// Process pending sync messages immediately after the handle becomes ready.
|
|
35
|
+
void (async () => {
|
|
36
|
+
await handle.doc([READY, REQUESTING])
|
|
37
|
+
this.#processAllPendingSyncMessages()
|
|
38
|
+
})()
|
|
39
|
+
}
|
|
40
|
+
get peerStates() {
|
|
41
|
+
return this.#peerDocumentStatuses
|
|
42
|
+
}
|
|
43
|
+
get documentId() {
|
|
44
|
+
return this.handle.documentId
|
|
45
|
+
}
|
|
46
|
+
/// PRIVATE
|
|
47
|
+
async #syncWithPeers() {
|
|
48
|
+
this.#log(`syncWithPeers`)
|
|
49
|
+
const doc = await this.handle.doc()
|
|
50
|
+
if (doc === undefined) return
|
|
51
|
+
this.#peers.forEach(peerId => this.#sendSyncMessage(peerId, doc))
|
|
52
|
+
}
|
|
53
|
+
async #broadcastToPeers({ data }) {
|
|
54
|
+
this.#log(`broadcastToPeers`, this.#peers)
|
|
55
|
+
this.#peers.forEach(peerId => this.#sendEphemeralMessage(peerId, data))
|
|
56
|
+
}
|
|
57
|
+
#sendEphemeralMessage(peerId, data) {
|
|
58
|
+
this.#log(`sendEphemeralMessage ->${peerId}`)
|
|
59
|
+
this.emit("message", {
|
|
60
|
+
type: "ephemeral",
|
|
61
|
+
targetId: peerId,
|
|
62
|
+
documentId: this.handle.documentId,
|
|
63
|
+
data,
|
|
64
|
+
})
|
|
65
|
+
}
|
|
66
|
+
#getSyncState(peerId) {
|
|
67
|
+
if (!this.#peers.includes(peerId)) {
|
|
68
|
+
this.#log("adding a new peer", peerId)
|
|
69
|
+
this.#peers.push(peerId)
|
|
70
|
+
}
|
|
71
|
+
// when a peer is added, we don't know if it has the document or not
|
|
72
|
+
if (!(peerId in this.#peerDocumentStatuses)) {
|
|
73
|
+
this.#peerDocumentStatuses[peerId] = "unknown"
|
|
74
|
+
}
|
|
75
|
+
return this.#syncStates[peerId] ?? A.initSyncState()
|
|
76
|
+
}
|
|
77
|
+
#setSyncState(peerId, syncState) {
|
|
78
|
+
// TODO: we maybe should be persisting sync states. But we want to be careful about how often we
|
|
79
|
+
// do that, because it can generate a lot of disk activity.
|
|
80
|
+
// TODO: we only need to do this on reconnect
|
|
81
|
+
this.#syncStates[peerId] = syncState
|
|
82
|
+
}
|
|
83
|
+
#sendSyncMessage(peerId, doc) {
|
|
84
|
+
this.#log(`sendSyncMessage ->${peerId}`)
|
|
85
|
+
const syncState = this.#getSyncState(peerId)
|
|
86
|
+
const [newSyncState, message] = A.generateSyncMessage(doc, syncState)
|
|
87
|
+
this.#setSyncState(peerId, newSyncState)
|
|
88
|
+
if (message) {
|
|
89
|
+
this.#logMessage(`sendSyncMessage 🡒 ${peerId}`, message)
|
|
90
|
+
const decoded = A.decodeSyncMessage(message)
|
|
91
|
+
if (
|
|
92
|
+
!this.handle.isReady() &&
|
|
93
|
+
decoded.heads.length === 0 &&
|
|
94
|
+
newSyncState.sharedHeads.length === 0 &&
|
|
95
|
+
!Object.values(this.#peerDocumentStatuses).includes("has") &&
|
|
96
|
+
this.#peerDocumentStatuses[peerId] === "unknown"
|
|
97
|
+
) {
|
|
98
|
+
// we don't have the document (or access to it), so we request it
|
|
99
|
+
this.emit("message", {
|
|
100
|
+
type: "request",
|
|
101
|
+
targetId: peerId,
|
|
102
|
+
documentId: this.handle.documentId,
|
|
103
|
+
data: message,
|
|
104
|
+
})
|
|
105
|
+
} else {
|
|
106
|
+
this.emit("message", {
|
|
107
|
+
type: "sync",
|
|
108
|
+
targetId: peerId,
|
|
109
|
+
data: message,
|
|
110
|
+
documentId: this.handle.documentId,
|
|
111
|
+
})
|
|
112
|
+
}
|
|
113
|
+
// if we have sent heads, then the peer now has or will have the document
|
|
114
|
+
if (decoded.heads.length > 0) {
|
|
115
|
+
this.#peerDocumentStatuses[peerId] = "has"
|
|
116
|
+
}
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
#logMessage = (label, message) => {
|
|
120
|
+
// This is real expensive...
|
|
121
|
+
return
|
|
122
|
+
const size = message.byteLength
|
|
123
|
+
const logText = `${label} ${size}b`
|
|
124
|
+
const decoded = A.decodeSyncMessage(message)
|
|
125
|
+
this.#conciseLog(logText)
|
|
126
|
+
this.#log(logText, decoded)
|
|
127
|
+
// expanding is expensive, so only do it if we're logging at this level
|
|
128
|
+
const expanded = this.#opsLog.enabled
|
|
129
|
+
? decoded.changes.flatMap(change =>
|
|
130
|
+
A.decodeChange(change).ops.map(op => JSON.stringify(op))
|
|
131
|
+
)
|
|
132
|
+
: null
|
|
133
|
+
this.#opsLog(logText, expanded)
|
|
134
|
+
}
|
|
135
|
+
/// PUBLIC
|
|
136
|
+
hasPeer(peerId) {
|
|
137
|
+
return this.#peers.includes(peerId)
|
|
138
|
+
}
|
|
139
|
+
beginSync(peerIds) {
|
|
140
|
+
this.#log(`beginSync: ${peerIds.join(", ")}`)
|
|
141
|
+
// HACK: if we have a sync state already, we round-trip it through the encoding system to make
|
|
142
|
+
// sure state is preserved. This prevents an infinite loop caused by failed attempts to send
|
|
143
|
+
// messages during disconnection.
|
|
144
|
+
// TODO: cover that case with a test and remove this hack
|
|
145
|
+
peerIds.forEach(peerId => {
|
|
146
|
+
const syncStateRaw = this.#getSyncState(peerId)
|
|
147
|
+
const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw))
|
|
148
|
+
this.#setSyncState(peerId, syncState)
|
|
149
|
+
})
|
|
150
|
+
// At this point if we don't have anything in our storage, we need to use an empty doc to sync
|
|
151
|
+
// with; but we don't want to surface that state to the front end
|
|
152
|
+
void this.handle.doc([READY, REQUESTING, UNAVAILABLE]).then(doc => {
|
|
153
|
+
// we register out peers first, then say that sync has started
|
|
154
|
+
this.#syncStarted = true
|
|
155
|
+
this.#checkDocUnavailable()
|
|
156
|
+
if (doc === undefined) return
|
|
157
|
+
peerIds.forEach(peerId => {
|
|
158
|
+
this.#sendSyncMessage(peerId, doc)
|
|
159
|
+
})
|
|
160
|
+
})
|
|
161
|
+
}
|
|
162
|
+
endSync(peerId) {
|
|
163
|
+
this.#log(`removing peer ${peerId}`)
|
|
164
|
+
this.#peers = this.#peers.filter(p => p !== peerId)
|
|
165
|
+
}
|
|
166
|
+
receiveMessage(message) {
|
|
167
|
+
switch (message.type) {
|
|
168
|
+
case "sync":
|
|
169
|
+
case "request":
|
|
170
|
+
this.receiveSyncMessage(message)
|
|
171
|
+
break
|
|
172
|
+
case "ephemeral":
|
|
173
|
+
this.receiveEphemeralMessage(message)
|
|
174
|
+
break
|
|
175
|
+
case "doc-unavailable":
|
|
176
|
+
this.#peerDocumentStatuses[message.senderId] = "unavailable"
|
|
177
|
+
this.#checkDocUnavailable()
|
|
178
|
+
break
|
|
179
|
+
default:
|
|
180
|
+
throw new Error(`unknown message type: ${message}`)
|
|
181
|
+
}
|
|
182
|
+
}
|
|
183
|
+
receiveEphemeralMessage(message) {
|
|
184
|
+
if (message.documentId !== this.handle.documentId)
|
|
185
|
+
throw new Error(`channelId doesn't match documentId`)
|
|
186
|
+
const { senderId, data } = message
|
|
187
|
+
const contents = decode(data)
|
|
188
|
+
this.handle.emit("ephemeral-message", {
|
|
189
|
+
handle: this.handle,
|
|
190
|
+
senderId,
|
|
191
|
+
message: contents,
|
|
192
|
+
})
|
|
193
|
+
this.#peers.forEach(peerId => {
|
|
194
|
+
if (peerId === senderId) return
|
|
195
|
+
this.emit("message", {
|
|
196
|
+
...message,
|
|
197
|
+
targetId: peerId,
|
|
198
|
+
})
|
|
199
|
+
})
|
|
200
|
+
}
|
|
201
|
+
receiveSyncMessage(message) {
|
|
202
|
+
if (message.documentId !== this.handle.documentId)
|
|
203
|
+
throw new Error(`channelId doesn't match documentId`)
|
|
204
|
+
// We need to block receiving the syncMessages until we've checked local storage
|
|
205
|
+
if (!this.handle.inState([READY, REQUESTING, UNAVAILABLE])) {
|
|
206
|
+
this.#pendingSyncMessages.push(message)
|
|
207
|
+
return
|
|
208
|
+
}
|
|
209
|
+
this.#processAllPendingSyncMessages()
|
|
210
|
+
this.#processSyncMessage(message)
|
|
211
|
+
}
|
|
212
|
+
#processSyncMessage(message) {
|
|
213
|
+
if (isRequestMessage(message)) {
|
|
214
|
+
this.#peerDocumentStatuses[message.senderId] = "wants"
|
|
215
|
+
}
|
|
216
|
+
this.#checkDocUnavailable()
|
|
217
|
+
// if the message has heads, then the peer has the document
|
|
218
|
+
if (A.decodeSyncMessage(message.data).heads.length > 0) {
|
|
219
|
+
this.#peerDocumentStatuses[message.senderId] = "has"
|
|
220
|
+
}
|
|
221
|
+
this.handle.update(doc => {
|
|
222
|
+
const [newDoc, newSyncState] = A.receiveSyncMessage(
|
|
223
|
+
doc,
|
|
224
|
+
this.#getSyncState(message.senderId),
|
|
225
|
+
message.data
|
|
226
|
+
)
|
|
227
|
+
this.#setSyncState(message.senderId, newSyncState)
|
|
228
|
+
// respond to just this peer (as required)
|
|
229
|
+
this.#sendSyncMessage(message.senderId, doc)
|
|
230
|
+
return newDoc
|
|
231
|
+
})
|
|
232
|
+
this.#checkDocUnavailable()
|
|
233
|
+
}
|
|
234
|
+
#checkDocUnavailable() {
|
|
235
|
+
// if we know none of the peers have the document, tell all our peers that we don't either
|
|
236
|
+
if (
|
|
237
|
+
this.#syncStarted &&
|
|
238
|
+
this.handle.inState([REQUESTING]) &&
|
|
239
|
+
this.#peers.every(
|
|
240
|
+
peerId =>
|
|
241
|
+
this.#peerDocumentStatuses[peerId] === "unavailable" ||
|
|
242
|
+
this.#peerDocumentStatuses[peerId] === "wants"
|
|
243
|
+
)
|
|
244
|
+
) {
|
|
245
|
+
this.#peers
|
|
246
|
+
.filter(peerId => this.#peerDocumentStatuses[peerId] === "wants")
|
|
247
|
+
.forEach(peerId => {
|
|
248
|
+
this.emit("message", {
|
|
249
|
+
type: "doc-unavailable",
|
|
250
|
+
documentId: this.handle.documentId,
|
|
251
|
+
targetId: peerId,
|
|
252
|
+
})
|
|
253
|
+
})
|
|
254
|
+
this.handle.unavailable()
|
|
255
|
+
}
|
|
256
|
+
}
|
|
257
|
+
#processAllPendingSyncMessages() {
|
|
258
|
+
for (const message of this.#pendingSyncMessages) {
|
|
259
|
+
this.#processSyncMessage(message)
|
|
260
|
+
}
|
|
261
|
+
this.#pendingSyncMessages = []
|
|
262
|
+
}
|
|
263
|
+
}
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
import { EventEmitter } from "eventemitter3"
|
|
2
|
+
import { Message, MessageContents } from "../network/messages.js"
|
|
3
|
+
export declare abstract class Synchronizer extends EventEmitter<SynchronizerEvents> {
|
|
4
|
+
abstract receiveMessage(message: Message): void
|
|
5
|
+
}
|
|
6
|
+
export interface SynchronizerEvents {
|
|
7
|
+
message: (arg: MessageContents) => void
|
|
8
|
+
}
|
|
9
|
+
//# sourceMappingURL=Synchronizer.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"Synchronizer.d.ts","sourceRoot":"","sources":["../../../src/synchronizer/Synchronizer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAC5C,OAAO,EAAE,OAAO,EAAE,eAAe,EAAE,MAAM,wBAAwB,CAAA;AAEjE,8BAAsB,YAAa,SAAQ,YAAY,CAAC,kBAAkB,CAAC;IACzE,QAAQ,CAAC,cAAc,CAAC,OAAO,EAAE,OAAO,GAAG,IAAI;CAChD;AAED,MAAM,WAAW,kBAAkB;IACjC,OAAO,EAAE,CAAC,GAAG,EAAE,eAAe,KAAK,IAAI,CAAA;CACxC"}
|
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
export type DocumentId = string & {
|
|
2
|
+
__documentId: true
|
|
3
|
+
}
|
|
4
|
+
export type AutomergeUrl = string & {
|
|
5
|
+
__documentUrl: true
|
|
6
|
+
}
|
|
7
|
+
export type BinaryDocumentId = Uint8Array & {
|
|
8
|
+
__binaryDocumentId: true
|
|
9
|
+
}
|
|
10
|
+
export type PeerId = string & {
|
|
11
|
+
__peerId: false
|
|
12
|
+
}
|
|
13
|
+
export type DistributiveOmit<T, K extends keyof any> = T extends any
|
|
14
|
+
? Omit<T, K>
|
|
15
|
+
: never
|
|
16
|
+
//# sourceMappingURL=types.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"types.d.ts","sourceRoot":"","sources":["../../src/types.ts"],"names":[],"mappings":"AAAA,MAAM,MAAM,UAAU,GAAG,MAAM,GAAG;IAAE,YAAY,EAAE,IAAI,CAAA;CAAE,CAAA;AACxD,MAAM,MAAM,YAAY,GAAG,MAAM,GAAG;IAAE,aAAa,EAAE,IAAI,CAAA;CAAE,CAAA;AAC3D,MAAM,MAAM,gBAAgB,GAAG,UAAU,GAAG;IAAE,kBAAkB,EAAE,IAAI,CAAA;CAAE,CAAA;AAExE,MAAM,MAAM,MAAM,GAAG,MAAM,GAAG;IAAE,QAAQ,EAAE,KAAK,CAAA;CAAE,CAAA;AAEjD,MAAM,MAAM,gBAAgB,CAAC,CAAC,EAAE,CAAC,SAAS,MAAM,GAAG,IAAI,CAAC,SAAS,GAAG,GAChE,IAAI,CAAC,CAAC,EAAE,CAAC,CAAC,GACV,KAAK,CAAA"}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {}
|
|
@@ -29,4 +29,13 @@ export declare abstract class StorageAdapter implements StorageAdapterInterface
|
|
|
29
29
|
/** Remove all values with keys that start with `keyPrefix` */
|
|
30
30
|
abstract removeRange(keyPrefix: StorageKey): Promise<void>;
|
|
31
31
|
}
|
|
32
|
+
export declare class InMemoryStorageAdapter extends StorageAdapter {
|
|
33
|
+
#private;
|
|
34
|
+
loadRange(keyPrefix: StorageKey): Promise<Chunk[]>;
|
|
35
|
+
removeRange(keyPrefix: string[]): Promise<void>;
|
|
36
|
+
load(key: string[]): Promise<Uint8Array | undefined>;
|
|
37
|
+
save(key: string[], binary: Uint8Array): Promise<void>;
|
|
38
|
+
remove(key: string[]): Promise<void>;
|
|
39
|
+
keys(): string[];
|
|
40
|
+
}
|
|
32
41
|
//# sourceMappingURL=StorageAdapter.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"StorageAdapter.d.ts","sourceRoot":"","sources":["../../src/storage/StorageAdapter.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,uBAAuB,EAAE,MAAM,8BAA8B,CAAA;AACtE,OAAO,EAAE,UAAU,EAAE,KAAK,EAAE,MAAM,YAAY,CAAA;AAE9C;;;;;;GAMG;AACH,8BAAsB,cAAe,YAAW,uBAAuB;IACrE,mDAAmD;IACnD,QAAQ,CAAC,IAAI,CAAC,GAAG,EAAE,UAAU,GAAG,OAAO,CAAC,UAAU,GAAG,SAAS,CAAC;IAE/D,6CAA6C;IAC7C,QAAQ,CAAC,IAAI,CAAC,GAAG,EAAE,UAAU,EAAE,IAAI,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAE/D,8CAA8C;IAC9C,QAAQ,CAAC,MAAM,CAAC,GAAG,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAE/C;;;;;;;;;;OAUG;IACH,QAAQ,CAAC,SAAS,CAAC,SAAS,EAAE,UAAU,GAAG,OAAO,CAAC,KAAK,EAAE,CAAC;IAE3D,8DAA8D;IAC9D,QAAQ,CAAC,WAAW,CAAC,SAAS,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;CAC3D"}
|
|
1
|
+
{"version":3,"file":"StorageAdapter.d.ts","sourceRoot":"","sources":["../../src/storage/StorageAdapter.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,uBAAuB,EAAE,MAAM,8BAA8B,CAAA;AACtE,OAAO,EAAE,UAAU,EAAE,KAAK,EAAE,MAAM,YAAY,CAAA;AAE9C;;;;;;GAMG;AACH,8BAAsB,cAAe,YAAW,uBAAuB;IACrE,mDAAmD;IACnD,QAAQ,CAAC,IAAI,CAAC,GAAG,EAAE,UAAU,GAAG,OAAO,CAAC,UAAU,GAAG,SAAS,CAAC;IAE/D,6CAA6C;IAC7C,QAAQ,CAAC,IAAI,CAAC,GAAG,EAAE,UAAU,EAAE,IAAI,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAE/D,8CAA8C;IAC9C,QAAQ,CAAC,MAAM,CAAC,GAAG,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;IAE/C;;;;;;;;;;OAUG;IACH,QAAQ,CAAC,SAAS,CAAC,SAAS,EAAE,UAAU,GAAG,OAAO,CAAC,KAAK,EAAE,CAAC;IAE3D,8DAA8D;IAC9D,QAAQ,CAAC,WAAW,CAAC,SAAS,EAAE,UAAU,GAAG,OAAO,CAAC,IAAI,CAAC;CAC3D;AAED,qBAAa,sBAAuB,SAAQ,cAAc;;IAWlD,SAAS,CAAC,SAAS,EAAE,UAAU,GAAG,OAAO,CAAC,KAAK,EAAE,CAAC;IAOlD,WAAW,CAAC,SAAS,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,IAAI,CAAC;IAM/C,IAAI,CAAC,GAAG,EAAE,MAAM,EAAE,GAAG,OAAO,CAAC,UAAU,GAAG,SAAS,CAAC;IAIpD,IAAI,CAAC,GAAG,EAAE,MAAM,EAAE,EAAE,MAAM,EAAE,UAAU;IAKtC,MAAM,CAAC,GAAG,EAAE,MAAM,EAAE;IAI1B,IAAI;CAGL"}
|
|
@@ -7,3 +7,36 @@
|
|
|
7
7
|
*/
|
|
8
8
|
export class StorageAdapter {
|
|
9
9
|
}
|
|
10
|
+
export class InMemoryStorageAdapter extends StorageAdapter {
|
|
11
|
+
#data = {};
|
|
12
|
+
#keyToString(key) {
|
|
13
|
+
return key.join(".");
|
|
14
|
+
}
|
|
15
|
+
#stringToKey(key) {
|
|
16
|
+
return key.split(".");
|
|
17
|
+
}
|
|
18
|
+
async loadRange(keyPrefix) {
|
|
19
|
+
const range = Object.entries(this.#data)
|
|
20
|
+
.filter(([key, _]) => key.startsWith(this.#keyToString(keyPrefix)))
|
|
21
|
+
.map(([key, data]) => ({ key: this.#stringToKey(key), data }));
|
|
22
|
+
return Promise.resolve(range);
|
|
23
|
+
}
|
|
24
|
+
async removeRange(keyPrefix) {
|
|
25
|
+
Object.entries(this.#data)
|
|
26
|
+
.filter(([key, _]) => key.startsWith(this.#keyToString(keyPrefix)))
|
|
27
|
+
.forEach(([key, _]) => delete this.#data[key]);
|
|
28
|
+
}
|
|
29
|
+
async load(key) {
|
|
30
|
+
return new Promise(resolve => resolve(this.#data[this.#keyToString(key)]));
|
|
31
|
+
}
|
|
32
|
+
async save(key, binary) {
|
|
33
|
+
this.#data[this.#keyToString(key)] = binary;
|
|
34
|
+
return Promise.resolve();
|
|
35
|
+
}
|
|
36
|
+
async remove(key) {
|
|
37
|
+
delete this.#data[this.#keyToString(key)];
|
|
38
|
+
}
|
|
39
|
+
keys() {
|
|
40
|
+
return Object.keys(this.#data);
|
|
41
|
+
}
|
|
42
|
+
}
|
|
@@ -2,13 +2,22 @@ import * as A from "@automerge/automerge/slim/next";
|
|
|
2
2
|
import { type DocumentId } from "../types.js";
|
|
3
3
|
import { StorageAdapterInterface } from "./StorageAdapterInterface.js";
|
|
4
4
|
import { StorageId } from "./types.js";
|
|
5
|
+
import { EventEmitter } from "eventemitter3";
|
|
6
|
+
type StorageSubsystemEvents = {
|
|
7
|
+
"document-loaded": (arg: {
|
|
8
|
+
documentId: DocumentId;
|
|
9
|
+
durationMillis: number;
|
|
10
|
+
numOps: number;
|
|
11
|
+
numChanges: number;
|
|
12
|
+
}) => void;
|
|
13
|
+
};
|
|
5
14
|
/**
|
|
6
15
|
* The storage subsystem is responsible for saving and loading Automerge documents to and from
|
|
7
16
|
* storage adapter. It also provides a generic key/value storage interface for other uses.
|
|
8
17
|
*/
|
|
9
|
-
export declare class StorageSubsystem {
|
|
18
|
+
export declare class StorageSubsystem extends EventEmitter<StorageSubsystemEvents> {
|
|
10
19
|
#private;
|
|
11
|
-
constructor(storageAdapter: StorageAdapterInterface);
|
|
20
|
+
constructor(beelay: A.beelay.Beelay, storageAdapter: StorageAdapterInterface);
|
|
12
21
|
id(): Promise<StorageId>;
|
|
13
22
|
/** Loads a value from storage. */
|
|
14
23
|
load(
|
|
@@ -49,4 +58,5 @@ export declare class StorageSubsystem {
|
|
|
49
58
|
loadSyncState(documentId: DocumentId, storageId: StorageId): Promise<A.SyncState | undefined>;
|
|
50
59
|
saveSyncState(documentId: DocumentId, storageId: StorageId, syncState: A.SyncState): Promise<void>;
|
|
51
60
|
}
|
|
61
|
+
export {};
|
|
52
62
|
//# sourceMappingURL=StorageSubsystem.d.ts.map
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"StorageSubsystem.d.ts","sourceRoot":"","sources":["../../src/storage/StorageSubsystem.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,gCAAgC,CAAA;AAInD,OAAO,EAAE,KAAK,UAAU,EAAE,MAAM,aAAa,CAAA;AAC7C,OAAO,EAAE,uBAAuB,EAAE,MAAM,8BAA8B,CAAA;AACtE,OAAO,EAAyB,SAAS,EAAE,MAAM,YAAY,CAAA;
|
|
1
|
+
{"version":3,"file":"StorageSubsystem.d.ts","sourceRoot":"","sources":["../../src/storage/StorageSubsystem.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,CAAC,MAAM,gCAAgC,CAAA;AAInD,OAAO,EAAE,KAAK,UAAU,EAAE,MAAM,aAAa,CAAA;AAC7C,OAAO,EAAE,uBAAuB,EAAE,MAAM,8BAA8B,CAAA;AACtE,OAAO,EAAyB,SAAS,EAAE,MAAM,YAAY,CAAA;AAI7D,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAE5C,KAAK,sBAAsB,GAAG;IAC5B,iBAAiB,EAAE,CAAC,GAAG,EAAE;QACvB,UAAU,EAAE,UAAU,CAAA;QACtB,cAAc,EAAE,MAAM,CAAA;QACtB,MAAM,EAAE,MAAM,CAAA;QACd,UAAU,EAAE,MAAM,CAAA;KACnB,KAAK,IAAI,CAAA;CACX,CAAA;AAED;;;GAGG;AACH,qBAAa,gBAAiB,SAAQ,YAAY,CAAC,sBAAsB,CAAC;;gBAYtE,MAAM,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,EACvB,cAAc,EAAE,uBAAuB;IAOnC,EAAE,IAAI,OAAO,CAAC,SAAS,CAAC;IA2B9B,kCAAkC;IAC5B,IAAI;IACR,iFAAiF;IACjF,SAAS,EAAE,MAAM;IAEjB,yFAAyF;IACzF,GAAG,EAAE,MAAM,GACV,OAAO,CAAC,UAAU,GAAG,SAAS,CAAC;IAKlC,gCAAgC;IAC1B,IAAI;IACR,iFAAiF;IACjF,SAAS,EAAE,MAAM;IAEjB,yFAAyF;IACzF,GAAG,EAAE,MAAM;IAEX,sCAAsC;IACtC,IAAI,EAAE,UAAU,GACf,OAAO,CAAC,IAAI,CAAC;IAKhB,oCAAoC;IAC9B,MAAM;IACV,iFAAiF;IACjF,SAAS,EAAE,MAAM;IAEjB,2FAA2F;IAC3F,GAAG,EAAE,MAAM,GACV,OAAO,CAAC,IAAI,CAAC;IAOhB;;OAEG;IACG,OAAO,CAAC,CAAC,EAAE,UAAU,EAAE,UAAU,GAAG,OAAO,CAAC,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,GAAG,IAAI,CAAC;IAwBlE;;;;;;OAMG;IACG,OAAO,CAAC,UAAU,EAAE,UAAU,EAAE,GAAG,EAAE,CAAC,CAAC,GAAG,CAAC,OAAO,CAAC,GAAG,OAAO,CAAC,IAAI,CAAC;IAoCzE;;OAEG;IACG,SAAS,CAAC,UAAU,EAAE,UAAU;IAMhC,aAAa,CACjB,UAAU,EAAE,UAAU,EACtB,SAAS,EAAE,SAAS,GACnB,OAAO,CAAC,CAAC,CAAC,SAAS,GAAG,SAAS,CAAC;IAW7B,aAAa,CACjB,UAAU,EAAE,UAAU,EACtB,SAAS,EAAE,SAAS,EACpB,SAAS,EAAE,CAAC,CAAC,SAAS,GACrB,OAAO,CAAC,IAAI,CAAC;CAuBjB"}
|
|
@@ -2,25 +2,23 @@ import * as A from "@automerge/automerge/slim/next";
|
|
|
2
2
|
import debug from "debug";
|
|
3
3
|
import { headsAreSame } from "../helpers/headsAreSame.js";
|
|
4
4
|
import { mergeArrays } from "../helpers/mergeArrays.js";
|
|
5
|
-
import { keyHash, headsHash } from "./keyHash.js";
|
|
6
|
-
import { chunkTypeFromKey } from "./chunkTypeFromKey.js";
|
|
7
5
|
import * as Uuid from "uuid";
|
|
6
|
+
import { EventEmitter } from "eventemitter3";
|
|
8
7
|
/**
|
|
9
8
|
* The storage subsystem is responsible for saving and loading Automerge documents to and from
|
|
10
9
|
* storage adapter. It also provides a generic key/value storage interface for other uses.
|
|
11
10
|
*/
|
|
12
|
-
export class StorageSubsystem {
|
|
11
|
+
export class StorageSubsystem extends EventEmitter {
|
|
13
12
|
/** The storage adapter to use for saving and loading documents */
|
|
14
13
|
#storageAdapter;
|
|
15
14
|
/** Record of the latest heads we've loaded or saved for each document */
|
|
16
15
|
#storedHeads = new Map();
|
|
17
|
-
/** Metadata on the chunks we've already loaded for each document */
|
|
18
|
-
#chunkInfos = new Map();
|
|
19
|
-
/** Flag to avoid compacting when a compaction is already underway */
|
|
20
|
-
#compacting = false;
|
|
21
16
|
#log = debug(`automerge-repo:storage-subsystem`);
|
|
22
|
-
|
|
17
|
+
#beelay;
|
|
18
|
+
constructor(beelay, storageAdapter) {
|
|
19
|
+
super();
|
|
23
20
|
this.#storageAdapter = storageAdapter;
|
|
21
|
+
this.#beelay = beelay;
|
|
24
22
|
}
|
|
25
23
|
async id() {
|
|
26
24
|
const storedId = await this.#storageAdapter.load(["storage-adapter-id"]);
|
|
@@ -76,31 +74,23 @@ export class StorageSubsystem {
|
|
|
76
74
|
* Loads the Automerge document with the given ID from storage.
|
|
77
75
|
*/
|
|
78
76
|
async loadDoc(documentId) {
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
const chunkInfos = [];
|
|
83
|
-
for (const chunk of chunks) {
|
|
84
|
-
// chunks might have been deleted in the interim
|
|
85
|
-
if (chunk.data === undefined)
|
|
86
|
-
continue;
|
|
87
|
-
const chunkType = chunkTypeFromKey(chunk.key);
|
|
88
|
-
if (chunkType == null)
|
|
89
|
-
continue;
|
|
90
|
-
chunkInfos.push({
|
|
91
|
-
key: chunk.key,
|
|
92
|
-
type: chunkType,
|
|
93
|
-
size: chunk.data.length,
|
|
94
|
-
});
|
|
95
|
-
binaries.push(chunk.data);
|
|
77
|
+
const doc = await this.#beelay.loadDocument(documentId);
|
|
78
|
+
if (doc == null) {
|
|
79
|
+
return null;
|
|
96
80
|
}
|
|
97
|
-
|
|
98
|
-
// Merge the chunks into a single binary
|
|
81
|
+
const binaries = doc.map(c => c.contents);
|
|
99
82
|
const binary = mergeArrays(binaries);
|
|
100
83
|
if (binary.length === 0)
|
|
101
84
|
return null;
|
|
102
85
|
// Load into an Automerge document
|
|
86
|
+
const start = performance.now();
|
|
103
87
|
const newDoc = A.loadIncremental(A.init(), binary);
|
|
88
|
+
const end = performance.now();
|
|
89
|
+
this.emit("document-loaded", {
|
|
90
|
+
documentId,
|
|
91
|
+
durationMillis: end - start,
|
|
92
|
+
...A.stats(newDoc),
|
|
93
|
+
});
|
|
104
94
|
// Record the latest heads for the document
|
|
105
95
|
this.#storedHeads.set(documentId, A.getHeads(newDoc));
|
|
106
96
|
return newDoc;
|
|
@@ -116,14 +106,32 @@ export class StorageSubsystem {
|
|
|
116
106
|
// Don't bother saving if the document hasn't changed
|
|
117
107
|
if (!this.#shouldSave(documentId, doc))
|
|
118
108
|
return;
|
|
119
|
-
const
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
109
|
+
const changes = A.getChanges(A.view(doc, this.#storedHeads.get(documentId) ?? []), doc);
|
|
110
|
+
const commits = changes.map(c => {
|
|
111
|
+
const decoded = A.decodeChange(c);
|
|
112
|
+
return {
|
|
113
|
+
parents: decoded.deps,
|
|
114
|
+
hash: decoded.hash,
|
|
115
|
+
contents: c,
|
|
116
|
+
};
|
|
117
|
+
});
|
|
118
|
+
let done = this.#beelay
|
|
119
|
+
.addCommits({
|
|
120
|
+
docId: documentId,
|
|
121
|
+
commits: changes.map(c => {
|
|
122
|
+
const decoded = A.decodeChange(c);
|
|
123
|
+
return {
|
|
124
|
+
parents: decoded.deps,
|
|
125
|
+
hash: decoded.hash,
|
|
126
|
+
contents: c,
|
|
127
|
+
};
|
|
128
|
+
}),
|
|
129
|
+
})
|
|
130
|
+
.catch(e => {
|
|
131
|
+
console.error(`Error saving document ${documentId}: ${e}`);
|
|
132
|
+
});
|
|
126
133
|
this.#storedHeads.set(documentId, A.getHeads(doc));
|
|
134
|
+
await done;
|
|
127
135
|
}
|
|
128
136
|
/**
|
|
129
137
|
* Removes the Automerge document with the given ID from storage
|
|
@@ -133,49 +141,6 @@ export class StorageSubsystem {
|
|
|
133
141
|
await this.#storageAdapter.removeRange([documentId, "incremental"]);
|
|
134
142
|
await this.#storageAdapter.removeRange([documentId, "sync-state"]);
|
|
135
143
|
}
|
|
136
|
-
/**
|
|
137
|
-
* Saves just the incremental changes since the last save.
|
|
138
|
-
*/
|
|
139
|
-
async #saveIncremental(documentId, doc) {
|
|
140
|
-
const binary = A.saveSince(doc, this.#storedHeads.get(documentId) ?? []);
|
|
141
|
-
if (binary && binary.length > 0) {
|
|
142
|
-
const key = [documentId, "incremental", keyHash(binary)];
|
|
143
|
-
this.#log(`Saving incremental ${key} for document ${documentId}`);
|
|
144
|
-
await this.#storageAdapter.save(key, binary);
|
|
145
|
-
if (!this.#chunkInfos.has(documentId)) {
|
|
146
|
-
this.#chunkInfos.set(documentId, []);
|
|
147
|
-
}
|
|
148
|
-
this.#chunkInfos.get(documentId).push({
|
|
149
|
-
key,
|
|
150
|
-
type: "incremental",
|
|
151
|
-
size: binary.length,
|
|
152
|
-
});
|
|
153
|
-
this.#storedHeads.set(documentId, A.getHeads(doc));
|
|
154
|
-
}
|
|
155
|
-
else {
|
|
156
|
-
return Promise.resolve();
|
|
157
|
-
}
|
|
158
|
-
}
|
|
159
|
-
/**
|
|
160
|
-
* Compacts the document storage into a single shapshot.
|
|
161
|
-
*/
|
|
162
|
-
async #saveTotal(documentId, doc, sourceChunks) {
|
|
163
|
-
this.#compacting = true;
|
|
164
|
-
const binary = A.save(doc);
|
|
165
|
-
const snapshotHash = headsHash(A.getHeads(doc));
|
|
166
|
-
const key = [documentId, "snapshot", snapshotHash];
|
|
167
|
-
const oldKeys = new Set(sourceChunks.map(c => c.key).filter(k => k[2] !== snapshotHash));
|
|
168
|
-
this.#log(`Saving snapshot ${key} for document ${documentId}`);
|
|
169
|
-
this.#log(`deleting old chunks ${Array.from(oldKeys)}`);
|
|
170
|
-
await this.#storageAdapter.save(key, binary);
|
|
171
|
-
for (const key of oldKeys) {
|
|
172
|
-
await this.#storageAdapter.remove(key);
|
|
173
|
-
}
|
|
174
|
-
const newChunkInfos = this.#chunkInfos.get(documentId)?.filter(c => !oldKeys.has(c.key)) ?? [];
|
|
175
|
-
newChunkInfos.push({ key, type: "snapshot", size: binary.length });
|
|
176
|
-
this.#chunkInfos.set(documentId, newChunkInfos);
|
|
177
|
-
this.#compacting = false;
|
|
178
|
-
}
|
|
179
144
|
async loadSyncState(documentId, storageId) {
|
|
180
145
|
const key = [documentId, "sync-state", storageId];
|
|
181
146
|
try {
|
|
@@ -207,27 +172,4 @@ export class StorageSubsystem {
|
|
|
207
172
|
}
|
|
208
173
|
return true; // the document has changed
|
|
209
174
|
}
|
|
210
|
-
/**
|
|
211
|
-
* We only compact if the incremental size is greater than the snapshot size.
|
|
212
|
-
*/
|
|
213
|
-
#shouldCompact(sourceChunks) {
|
|
214
|
-
if (this.#compacting)
|
|
215
|
-
return false;
|
|
216
|
-
let snapshotSize = 0;
|
|
217
|
-
let incrementalSize = 0;
|
|
218
|
-
for (const chunk of sourceChunks) {
|
|
219
|
-
if (chunk.type === "snapshot") {
|
|
220
|
-
snapshotSize += chunk.size;
|
|
221
|
-
}
|
|
222
|
-
else {
|
|
223
|
-
incrementalSize += chunk.size;
|
|
224
|
-
}
|
|
225
|
-
}
|
|
226
|
-
// if the file is currently small, don't worry, just compact
|
|
227
|
-
// this might seem a bit arbitrary (1k is arbitrary) but is designed to ensure compaction
|
|
228
|
-
// for documents with only a single large change on top of an empty (or nearly empty) document
|
|
229
|
-
// for example: imported NPM modules, images, etc.
|
|
230
|
-
// if we have even more incrementals (so far) than the snapshot, compact
|
|
231
|
-
return snapshotSize < 1024 || incrementalSize >= snapshotSize;
|
|
232
|
-
}
|
|
233
175
|
}
|
|
@@ -1,16 +1,18 @@
|
|
|
1
1
|
import { Repo } from "../Repo.js";
|
|
2
2
|
import { DocMessage } from "../network/messages.js";
|
|
3
|
-
import { DocumentId, PeerId } from "../types.js";
|
|
3
|
+
import { AutomergeUrl, DocumentId, PeerId } from "../types.js";
|
|
4
4
|
import { DocSynchronizer } from "./DocSynchronizer.js";
|
|
5
5
|
import { Synchronizer } from "./Synchronizer.js";
|
|
6
|
+
import { next as A } from "@automerge/automerge";
|
|
6
7
|
/** A CollectionSynchronizer is responsible for synchronizing a DocCollection with peers. */
|
|
7
8
|
export declare class CollectionSynchronizer extends Synchronizer {
|
|
8
9
|
#private;
|
|
10
|
+
private beelay;
|
|
9
11
|
private repo;
|
|
10
12
|
/** A map of documentIds to their synchronizers */
|
|
11
13
|
/** @hidden */
|
|
12
14
|
docSynchronizers: Record<DocumentId, DocSynchronizer>;
|
|
13
|
-
constructor(repo: Repo);
|
|
15
|
+
constructor(beelay: A.beelay.Beelay, repo: Repo, denylist?: AutomergeUrl[]);
|
|
14
16
|
/**
|
|
15
17
|
* When we receive a sync message for a document we haven't got in memory, we
|
|
16
18
|
* register it with the repo and start synchronizing
|
|
@@ -1 +1 @@
|
|
|
1
|
-
{"version":3,"file":"CollectionSynchronizer.d.ts","sourceRoot":"","sources":["../../src/synchronizer/CollectionSynchronizer.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,IAAI,EAAE,MAAM,YAAY,CAAA;AACjC,OAAO,EAAE,UAAU,EAAE,MAAM,wBAAwB,CAAA;AACnD,OAAO,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,aAAa,CAAA;
|
|
1
|
+
{"version":3,"file":"CollectionSynchronizer.d.ts","sourceRoot":"","sources":["../../src/synchronizer/CollectionSynchronizer.ts"],"names":[],"mappings":"AAGA,OAAO,EAAE,IAAI,EAAE,MAAM,YAAY,CAAA;AACjC,OAAO,EAAE,UAAU,EAAE,MAAM,wBAAwB,CAAA;AACnD,OAAO,EAAE,YAAY,EAAE,UAAU,EAAE,MAAM,EAAE,MAAM,aAAa,CAAA;AAC9D,OAAO,EAAE,eAAe,EAAE,MAAM,sBAAsB,CAAA;AACtD,OAAO,EAAE,YAAY,EAAE,MAAM,mBAAmB,CAAA;AAChD,OAAO,EAAE,IAAI,IAAI,CAAC,EAAE,MAAM,sBAAsB,CAAA;AAIhD,4FAA4F;AAC5F,qBAAa,sBAAuB,SAAQ,YAAY;;IAcpD,OAAO,CAAC,MAAM;IACd,OAAO,CAAC,IAAI;IAXd,kDAAkD;IAClD,cAAc;IACd,gBAAgB,EAAE,MAAM,CAAC,UAAU,EAAE,eAAe,CAAC,CAAK;gBAQhD,MAAM,EAAE,CAAC,CAAC,MAAM,CAAC,MAAM,EACvB,IAAI,EAAE,IAAI,EAClB,QAAQ,GAAE,YAAY,EAAO;IAyC/B;;;OAGG;IACG,cAAc,CAAC,OAAO,EAAE,UAAU;IAsCxC;;OAEG;IACH,WAAW,CAAC,UAAU,EAAE,UAAU;IAalC,cAAc,CAAC,UAAU,EAAE,UAAU;IAIrC,2DAA2D;IAC3D,OAAO,CAAC,MAAM,EAAE,MAAM;IAgBtB,uDAAuD;IACvD,UAAU,CAAC,MAAM,EAAE,MAAM;IASzB,+CAA+C;IAC/C,IAAI,KAAK,IAAI,MAAM,EAAE,CAEpB;IAED,OAAO,IAAI;QACT,CAAC,GAAG,EAAE,MAAM,GAAG;YACb,KAAK,EAAE,MAAM,EAAE,CAAA;YACf,IAAI,EAAE;gBAAE,MAAM,EAAE,MAAM,CAAC;gBAAC,UAAU,EAAE,MAAM,CAAA;aAAE,CAAA;SAC7C,CAAA;KACF;CAUF"}
|