@automerge/automerge-repo 2.0.0-alpha.2 → 2.0.0-alpha.20
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/AutomergeUrl.d.ts +17 -5
- package/dist/AutomergeUrl.d.ts.map +1 -1
- package/dist/AutomergeUrl.js +71 -24
- package/dist/DocHandle.d.ts +80 -8
- package/dist/DocHandle.d.ts.map +1 -1
- package/dist/DocHandle.js +181 -10
- package/dist/RemoteHeadsSubscriptions.d.ts +4 -5
- package/dist/RemoteHeadsSubscriptions.d.ts.map +1 -1
- package/dist/RemoteHeadsSubscriptions.js +4 -1
- package/dist/Repo.d.ts +35 -2
- package/dist/Repo.d.ts.map +1 -1
- package/dist/Repo.js +112 -70
- package/dist/entrypoints/fullfat.d.ts +1 -0
- package/dist/entrypoints/fullfat.d.ts.map +1 -1
- package/dist/entrypoints/fullfat.js +1 -2
- package/dist/helpers/bufferFromHex.d.ts +3 -0
- package/dist/helpers/bufferFromHex.d.ts.map +1 -0
- package/dist/helpers/bufferFromHex.js +13 -0
- package/dist/helpers/headsAreSame.d.ts +2 -2
- package/dist/helpers/headsAreSame.d.ts.map +1 -1
- package/dist/helpers/mergeArrays.d.ts +1 -1
- package/dist/helpers/mergeArrays.d.ts.map +1 -1
- package/dist/helpers/tests/storage-adapter-tests.d.ts +2 -2
- package/dist/helpers/tests/storage-adapter-tests.d.ts.map +1 -1
- package/dist/helpers/tests/storage-adapter-tests.js +25 -48
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +1 -1
- package/dist/storage/StorageSubsystem.d.ts +11 -1
- package/dist/storage/StorageSubsystem.d.ts.map +1 -1
- package/dist/storage/StorageSubsystem.js +20 -4
- package/dist/synchronizer/CollectionSynchronizer.d.ts +15 -2
- package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/CollectionSynchronizer.js +29 -8
- package/dist/synchronizer/DocSynchronizer.d.ts +7 -0
- package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
- package/dist/synchronizer/DocSynchronizer.js +14 -0
- package/dist/synchronizer/Synchronizer.d.ts +11 -0
- package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
- package/dist/types.d.ts +4 -1
- package/dist/types.d.ts.map +1 -1
- package/package.json +3 -3
- package/src/AutomergeUrl.ts +101 -26
- package/src/DocHandle.ts +245 -20
- package/src/RemoteHeadsSubscriptions.ts +11 -9
- package/src/Repo.ts +163 -68
- package/src/entrypoints/fullfat.ts +1 -2
- package/src/helpers/bufferFromHex.ts +14 -0
- package/src/helpers/headsAreSame.ts +2 -2
- package/src/helpers/tests/storage-adapter-tests.ts +44 -86
- package/src/index.ts +2 -0
- package/src/storage/StorageSubsystem.ts +29 -4
- package/src/synchronizer/CollectionSynchronizer.ts +42 -9
- package/src/synchronizer/DocSynchronizer.ts +15 -0
- package/src/synchronizer/Synchronizer.ts +14 -0
- package/src/types.ts +4 -1
- package/test/AutomergeUrl.test.ts +130 -0
- package/test/DocHandle.test.ts +209 -2
- package/test/DocSynchronizer.test.ts +10 -3
- package/test/Repo.test.ts +228 -3
- package/test/StorageSubsystem.test.ts +17 -0
package/src/Repo.ts
CHANGED
|
@@ -2,11 +2,20 @@ import { next as Automerge } from "@automerge/automerge/slim"
|
|
|
2
2
|
import debug from "debug"
|
|
3
3
|
import { EventEmitter } from "eventemitter3"
|
|
4
4
|
import {
|
|
5
|
+
encodeHeads,
|
|
5
6
|
generateAutomergeUrl,
|
|
6
7
|
interpretAsDocumentId,
|
|
8
|
+
isValidAutomergeUrl,
|
|
7
9
|
parseAutomergeUrl,
|
|
8
10
|
} from "./AutomergeUrl.js"
|
|
9
|
-
import {
|
|
11
|
+
import {
|
|
12
|
+
DELETED,
|
|
13
|
+
DocHandle,
|
|
14
|
+
DocHandleEncodedChangePayload,
|
|
15
|
+
READY,
|
|
16
|
+
UNAVAILABLE,
|
|
17
|
+
UNLOADED,
|
|
18
|
+
} from "./DocHandle.js"
|
|
10
19
|
import { RemoteHeadsSubscriptions } from "./RemoteHeadsSubscriptions.js"
|
|
11
20
|
import { headsAreSame } from "./helpers/headsAreSame.js"
|
|
12
21
|
import { throttle } from "./helpers/throttle.js"
|
|
@@ -20,8 +29,16 @@ import { StorageAdapterInterface } from "./storage/StorageAdapterInterface.js"
|
|
|
20
29
|
import { StorageSubsystem } from "./storage/StorageSubsystem.js"
|
|
21
30
|
import { StorageId } from "./storage/types.js"
|
|
22
31
|
import { CollectionSynchronizer } from "./synchronizer/CollectionSynchronizer.js"
|
|
23
|
-
import {
|
|
24
|
-
|
|
32
|
+
import {
|
|
33
|
+
DocSyncMetrics,
|
|
34
|
+
SyncStatePayload,
|
|
35
|
+
} from "./synchronizer/Synchronizer.js"
|
|
36
|
+
import type {
|
|
37
|
+
AnyDocumentId,
|
|
38
|
+
AutomergeUrl,
|
|
39
|
+
DocumentId,
|
|
40
|
+
PeerId,
|
|
41
|
+
} from "./types.js"
|
|
25
42
|
|
|
26
43
|
function randomPeerId() {
|
|
27
44
|
return ("peer-" + Math.random().toString(36).slice(4)) as PeerId
|
|
@@ -49,7 +66,8 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
49
66
|
|
|
50
67
|
#handleCache: Record<DocumentId, DocHandle<any>> = {}
|
|
51
68
|
|
|
52
|
-
|
|
69
|
+
/** @hidden */
|
|
70
|
+
synchronizer: CollectionSynchronizer
|
|
53
71
|
|
|
54
72
|
/** By default, we share generously with all peers. */
|
|
55
73
|
/** @hidden */
|
|
@@ -69,39 +87,13 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
69
87
|
sharePolicy,
|
|
70
88
|
isEphemeral = storage === undefined,
|
|
71
89
|
enableRemoteHeadsGossiping = false,
|
|
90
|
+
denylist = [],
|
|
72
91
|
}: RepoConfig = {}) {
|
|
73
92
|
super()
|
|
74
93
|
this.#remoteHeadsGossipingEnabled = enableRemoteHeadsGossiping
|
|
75
94
|
this.#log = debug(`automerge-repo:repo`)
|
|
76
95
|
this.sharePolicy = sharePolicy ?? this.sharePolicy
|
|
77
96
|
|
|
78
|
-
// DOC COLLECTION
|
|
79
|
-
|
|
80
|
-
// The `document` event is fired by the DocCollection any time we create a new document or look
|
|
81
|
-
// up a document by ID. We listen for it in order to wire up storage and network synchronization.
|
|
82
|
-
this.on("document", async ({ handle }) => {
|
|
83
|
-
if (storageSubsystem) {
|
|
84
|
-
// Save when the document changes, but no more often than saveDebounceRate.
|
|
85
|
-
const saveFn = ({
|
|
86
|
-
handle,
|
|
87
|
-
doc,
|
|
88
|
-
}: DocHandleEncodedChangePayload<any>) => {
|
|
89
|
-
void storageSubsystem.saveDoc(handle.documentId, doc)
|
|
90
|
-
}
|
|
91
|
-
handle.on("heads-changed", throttle(saveFn, this.saveDebounceRate))
|
|
92
|
-
}
|
|
93
|
-
|
|
94
|
-
handle.on("unavailable", () => {
|
|
95
|
-
this.#log("document unavailable", { documentId: handle.documentId })
|
|
96
|
-
this.emit("unavailable-document", {
|
|
97
|
-
documentId: handle.documentId,
|
|
98
|
-
})
|
|
99
|
-
})
|
|
100
|
-
|
|
101
|
-
// Register the document with the synchronizer. This advertises our interest in the document.
|
|
102
|
-
this.#synchronizer.addDocument(handle.documentId)
|
|
103
|
-
})
|
|
104
|
-
|
|
105
97
|
this.on("delete-document", ({ documentId }) => {
|
|
106
98
|
// TODO Pass the delete on to the network
|
|
107
99
|
// synchronizer.removeDocument(documentId)
|
|
@@ -115,16 +107,19 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
115
107
|
|
|
116
108
|
// SYNCHRONIZER
|
|
117
109
|
// The synchronizer uses the network subsystem to keep documents in sync with peers.
|
|
118
|
-
this
|
|
110
|
+
this.synchronizer = new CollectionSynchronizer(this, denylist)
|
|
119
111
|
|
|
120
112
|
// When the synchronizer emits messages, send them to peers
|
|
121
|
-
this
|
|
113
|
+
this.synchronizer.on("message", message => {
|
|
122
114
|
this.#log(`sending ${message.type} message to ${message.targetId}`)
|
|
123
115
|
networkSubsystem.send(message)
|
|
124
116
|
})
|
|
125
117
|
|
|
118
|
+
// Forward metrics from doc synchronizers
|
|
119
|
+
this.synchronizer.on("metrics", event => this.emit("doc-metrics", event))
|
|
120
|
+
|
|
126
121
|
if (this.#remoteHeadsGossipingEnabled) {
|
|
127
|
-
this
|
|
122
|
+
this.synchronizer.on("open-doc", ({ peerId, documentId }) => {
|
|
128
123
|
this.#remoteHeadsSubscriptions.subscribePeerToDoc(peerId, documentId)
|
|
129
124
|
})
|
|
130
125
|
}
|
|
@@ -132,6 +127,12 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
132
127
|
// STORAGE
|
|
133
128
|
// The storage subsystem has access to some form of persistence, and deals with save and loading documents.
|
|
134
129
|
const storageSubsystem = storage ? new StorageSubsystem(storage) : undefined
|
|
130
|
+
if (storageSubsystem) {
|
|
131
|
+
storageSubsystem.on("document-loaded", event =>
|
|
132
|
+
this.emit("doc-metrics", { type: "doc-loaded", ...event })
|
|
133
|
+
)
|
|
134
|
+
}
|
|
135
|
+
|
|
135
136
|
this.storageSubsystem = storageSubsystem
|
|
136
137
|
|
|
137
138
|
// NETWORK
|
|
@@ -167,12 +168,12 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
167
168
|
console.log("error in share policy", { err })
|
|
168
169
|
})
|
|
169
170
|
|
|
170
|
-
this
|
|
171
|
+
this.synchronizer.addPeer(peerId)
|
|
171
172
|
})
|
|
172
173
|
|
|
173
174
|
// When a peer disconnects, remove it from the synchronizer
|
|
174
175
|
networkSubsystem.on("peer-disconnected", ({ peerId }) => {
|
|
175
|
-
this
|
|
176
|
+
this.synchronizer.removePeer(peerId)
|
|
176
177
|
this.#remoteHeadsSubscriptions.removePeer(peerId)
|
|
177
178
|
})
|
|
178
179
|
|
|
@@ -181,7 +182,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
181
182
|
this.#receiveMessage(msg)
|
|
182
183
|
})
|
|
183
184
|
|
|
184
|
-
this
|
|
185
|
+
this.synchronizer.on("sync-state", message => {
|
|
185
186
|
this.#saveSyncState(message)
|
|
186
187
|
|
|
187
188
|
const handle = this.#handleCache[message.documentId]
|
|
@@ -194,16 +195,20 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
194
195
|
const heads = handle.getRemoteHeads(storageId)
|
|
195
196
|
const haveHeadsChanged =
|
|
196
197
|
message.syncState.theirHeads &&
|
|
197
|
-
(!heads ||
|
|
198
|
+
(!heads ||
|
|
199
|
+
!headsAreSame(heads, encodeHeads(message.syncState.theirHeads)))
|
|
198
200
|
|
|
199
201
|
if (haveHeadsChanged && message.syncState.theirHeads) {
|
|
200
|
-
handle.setRemoteHeads(
|
|
202
|
+
handle.setRemoteHeads(
|
|
203
|
+
storageId,
|
|
204
|
+
encodeHeads(message.syncState.theirHeads)
|
|
205
|
+
)
|
|
201
206
|
|
|
202
207
|
if (storageId && this.#remoteHeadsGossipingEnabled) {
|
|
203
208
|
this.#remoteHeadsSubscriptions.handleImmediateRemoteHeadsChanged(
|
|
204
209
|
message.documentId,
|
|
205
210
|
storageId,
|
|
206
|
-
message.syncState.theirHeads
|
|
211
|
+
encodeHeads(message.syncState.theirHeads)
|
|
207
212
|
)
|
|
208
213
|
}
|
|
209
214
|
}
|
|
@@ -243,6 +248,32 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
243
248
|
}
|
|
244
249
|
}
|
|
245
250
|
|
|
251
|
+
// The `document` event is fired by the DocCollection any time we create a new document or look
|
|
252
|
+
// up a document by ID. We listen for it in order to wire up storage and network synchronization.
|
|
253
|
+
#registerHandleWithSubsystems(handle: DocHandle<any>) {
|
|
254
|
+
const { storageSubsystem } = this
|
|
255
|
+
if (storageSubsystem) {
|
|
256
|
+
// Save when the document changes, but no more often than saveDebounceRate.
|
|
257
|
+
const saveFn = ({ handle, doc }: DocHandleEncodedChangePayload<any>) => {
|
|
258
|
+
void storageSubsystem.saveDoc(handle.documentId, doc)
|
|
259
|
+
}
|
|
260
|
+
handle.on("heads-changed", throttle(saveFn, this.saveDebounceRate))
|
|
261
|
+
}
|
|
262
|
+
|
|
263
|
+
handle.on("unavailable", () => {
|
|
264
|
+
this.#log("document unavailable", { documentId: handle.documentId })
|
|
265
|
+
this.emit("unavailable-document", {
|
|
266
|
+
documentId: handle.documentId,
|
|
267
|
+
})
|
|
268
|
+
})
|
|
269
|
+
|
|
270
|
+
// Register the document with the synchronizer. This advertises our interest in the document.
|
|
271
|
+
this.synchronizer.addDocument(handle.documentId)
|
|
272
|
+
|
|
273
|
+
// Preserve the old event in case anyone was using it.
|
|
274
|
+
this.emit("document", { handle })
|
|
275
|
+
}
|
|
276
|
+
|
|
246
277
|
#receiveMessage(message: RepoMessage) {
|
|
247
278
|
switch (message.type) {
|
|
248
279
|
case "remote-subscription-change":
|
|
@@ -259,7 +290,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
259
290
|
case "request":
|
|
260
291
|
case "ephemeral":
|
|
261
292
|
case "doc-unavailable":
|
|
262
|
-
this
|
|
293
|
+
this.synchronizer.receiveMessage(message).catch(err => {
|
|
263
294
|
console.log("error receiving message", { err })
|
|
264
295
|
})
|
|
265
296
|
}
|
|
@@ -324,7 +355,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
324
355
|
|
|
325
356
|
/** Returns a list of all connected peer ids */
|
|
326
357
|
get peers(): PeerId[] {
|
|
327
|
-
return this
|
|
358
|
+
return this.synchronizer.peers
|
|
328
359
|
}
|
|
329
360
|
|
|
330
361
|
getStorageIdOfPeer(peerId: PeerId): StorageId | undefined {
|
|
@@ -343,7 +374,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
343
374
|
documentId,
|
|
344
375
|
}) as DocHandle<T>
|
|
345
376
|
|
|
346
|
-
this
|
|
377
|
+
this.#registerHandleWithSubsystems(handle)
|
|
347
378
|
|
|
348
379
|
handle.update(() => {
|
|
349
380
|
let nextDoc: Automerge.Doc<T>
|
|
@@ -378,7 +409,7 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
378
409
|
if (!clonedHandle.isReady()) {
|
|
379
410
|
throw new Error(
|
|
380
411
|
`Cloned handle is not yet in ready state.
|
|
381
|
-
(Try await handle.
|
|
412
|
+
(Try await handle.whenReady() first.)`
|
|
382
413
|
)
|
|
383
414
|
}
|
|
384
415
|
|
|
@@ -405,19 +436,22 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
405
436
|
/** The url or documentId of the handle to retrieve */
|
|
406
437
|
id: AnyDocumentId
|
|
407
438
|
): DocHandle<T> {
|
|
408
|
-
const documentId =
|
|
439
|
+
const { documentId, heads } = isValidAutomergeUrl(id)
|
|
440
|
+
? parseAutomergeUrl(id)
|
|
441
|
+
: { documentId: interpretAsDocumentId(id), heads: undefined }
|
|
409
442
|
|
|
410
|
-
|
|
411
|
-
if (
|
|
412
|
-
if (
|
|
443
|
+
const cachedHandle = this.#handleCache[documentId]
|
|
444
|
+
if (cachedHandle) {
|
|
445
|
+
if (cachedHandle.isUnavailable()) {
|
|
413
446
|
// this ensures that the event fires after the handle has been returned
|
|
414
447
|
setTimeout(() => {
|
|
415
|
-
|
|
416
|
-
handle:
|
|
448
|
+
cachedHandle.emit("unavailable", {
|
|
449
|
+
handle: cachedHandle,
|
|
417
450
|
})
|
|
418
451
|
})
|
|
419
452
|
}
|
|
420
|
-
return
|
|
453
|
+
// If we already have the handle, return it immediately (or a view of the handle if heads are specified)
|
|
454
|
+
return heads ? cachedHandle.view(heads) : cachedHandle
|
|
421
455
|
}
|
|
422
456
|
|
|
423
457
|
// If we don't already have the handle, make an empty one and try loading it
|
|
@@ -425,30 +459,32 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
425
459
|
documentId,
|
|
426
460
|
}) as DocHandle<T>
|
|
427
461
|
|
|
428
|
-
//
|
|
429
|
-
|
|
430
|
-
|
|
462
|
+
// Loading & network is going to be asynchronous no matter what,
|
|
463
|
+
// but we want to return the handle immediately.
|
|
464
|
+
const attemptLoad = this.storageSubsystem
|
|
465
|
+
? this.storageSubsystem.loadDoc(handle.documentId)
|
|
466
|
+
: Promise.resolve(null)
|
|
467
|
+
|
|
468
|
+
attemptLoad
|
|
469
|
+
.then(async loadedDoc => {
|
|
431
470
|
if (loadedDoc) {
|
|
432
471
|
// uhhhh, sorry if you're reading this because we were lying to the type system
|
|
433
472
|
handle.update(() => loadedDoc as Automerge.Doc<T>)
|
|
434
473
|
handle.doneLoading()
|
|
435
474
|
} else {
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
})
|
|
441
|
-
.catch(err => {
|
|
442
|
-
this.#log("error waiting for network", { err })
|
|
443
|
-
})
|
|
444
|
-
this.emit("document", { handle })
|
|
475
|
+
// we want to wait for the network subsystem to be ready before
|
|
476
|
+
// we request the document. this prevents entering unavailable during initialization.
|
|
477
|
+
await this.networkSubsystem.whenReady()
|
|
478
|
+
handle.request()
|
|
445
479
|
}
|
|
480
|
+
this.#registerHandleWithSubsystems(handle)
|
|
446
481
|
})
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
return handle
|
|
482
|
+
.catch(err => {
|
|
483
|
+
this.#log("error waiting for network", { err })
|
|
484
|
+
})
|
|
485
|
+
|
|
486
|
+
// If we already have the handle, return it immediately (or a view of the handle if heads are specified)
|
|
487
|
+
return heads ? handle.view(heads) : handle
|
|
452
488
|
}
|
|
453
489
|
|
|
454
490
|
delete(
|
|
@@ -539,12 +575,49 @@ export class Repo extends EventEmitter<RepoEvents> {
|
|
|
539
575
|
)
|
|
540
576
|
}
|
|
541
577
|
|
|
578
|
+
/**
|
|
579
|
+
* Removes a DocHandle from the handleCache.
|
|
580
|
+
* @hidden this API is experimental and may change.
|
|
581
|
+
* @param documentId - documentId of the DocHandle to remove from handleCache, if present in cache.
|
|
582
|
+
* @returns Promise<void>
|
|
583
|
+
*/
|
|
584
|
+
async removeFromCache(documentId: DocumentId) {
|
|
585
|
+
if (!this.#handleCache[documentId]) {
|
|
586
|
+
this.#log(
|
|
587
|
+
`WARN: removeFromCache called but handle not found in handleCache for documentId: ${documentId}`
|
|
588
|
+
)
|
|
589
|
+
return
|
|
590
|
+
}
|
|
591
|
+
const handle = this.#getHandle({ documentId })
|
|
592
|
+
const doc = await handle.doc([READY, UNLOADED, DELETED, UNAVAILABLE])
|
|
593
|
+
if (doc) {
|
|
594
|
+
if (handle.isReady()) {
|
|
595
|
+
handle.unload()
|
|
596
|
+
} else {
|
|
597
|
+
this.#log(
|
|
598
|
+
`WARN: removeFromCache called but handle for documentId: ${documentId} in unexpected state: ${handle.state}`
|
|
599
|
+
)
|
|
600
|
+
}
|
|
601
|
+
delete this.#handleCache[documentId]
|
|
602
|
+
// TODO: remove document from synchronizer when removeDocument is implemented
|
|
603
|
+
// this.synchronizer.removeDocument(documentId)
|
|
604
|
+
} else {
|
|
605
|
+
this.#log(
|
|
606
|
+
`WARN: removeFromCache called but doc undefined for documentId: ${documentId}`
|
|
607
|
+
)
|
|
608
|
+
}
|
|
609
|
+
}
|
|
610
|
+
|
|
542
611
|
shutdown(): Promise<void> {
|
|
543
612
|
this.networkSubsystem.adapters.forEach(adapter => {
|
|
544
613
|
adapter.disconnect()
|
|
545
614
|
})
|
|
546
615
|
return this.flush()
|
|
547
616
|
}
|
|
617
|
+
|
|
618
|
+
metrics(): { documents: { [key: string]: any } } {
|
|
619
|
+
return { documents: this.synchronizer.metrics() }
|
|
620
|
+
}
|
|
548
621
|
}
|
|
549
622
|
|
|
550
623
|
export interface RepoConfig {
|
|
@@ -571,6 +644,13 @@ export interface RepoConfig {
|
|
|
571
644
|
* Whether to enable the experimental remote heads gossiping feature
|
|
572
645
|
*/
|
|
573
646
|
enableRemoteHeadsGossiping?: boolean
|
|
647
|
+
|
|
648
|
+
/**
|
|
649
|
+
* A list of automerge URLs which should never be loaded regardless of what
|
|
650
|
+
* messages are received or what the share policy is. This is useful to avoid
|
|
651
|
+
* loading documents that are known to be too resource intensive.
|
|
652
|
+
*/
|
|
653
|
+
denylist?: AutomergeUrl[]
|
|
574
654
|
}
|
|
575
655
|
|
|
576
656
|
/** A function that determines whether we should share a document with a peer
|
|
@@ -594,6 +674,7 @@ export interface RepoEvents {
|
|
|
594
674
|
"delete-document": (arg: DeleteDocumentPayload) => void
|
|
595
675
|
/** A document was marked as unavailable (we don't have it and none of our peers have it) */
|
|
596
676
|
"unavailable-document": (arg: DeleteDocumentPayload) => void
|
|
677
|
+
"doc-metrics": (arg: DocMetrics) => void
|
|
597
678
|
}
|
|
598
679
|
|
|
599
680
|
export interface DocumentPayload {
|
|
@@ -603,3 +684,17 @@ export interface DocumentPayload {
|
|
|
603
684
|
export interface DeleteDocumentPayload {
|
|
604
685
|
documentId: DocumentId
|
|
605
686
|
}
|
|
687
|
+
|
|
688
|
+
export type DocMetrics =
|
|
689
|
+
| DocSyncMetrics
|
|
690
|
+
| {
|
|
691
|
+
type: "doc-loaded"
|
|
692
|
+
documentId: DocumentId
|
|
693
|
+
durationMillis: number
|
|
694
|
+
numOps: number
|
|
695
|
+
numChanges: number
|
|
696
|
+
}
|
|
697
|
+
| {
|
|
698
|
+
type: "doc-denied"
|
|
699
|
+
documentId: DocumentId
|
|
700
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
export const uint8ArrayFromHexString = (hexString: string): Uint8Array => {
|
|
2
|
+
if (hexString.length % 2 !== 0) {
|
|
3
|
+
throw new Error("Hex string must have an even length")
|
|
4
|
+
}
|
|
5
|
+
const bytes = new Uint8Array(hexString.length / 2)
|
|
6
|
+
for (let i = 0; i < hexString.length; i += 2) {
|
|
7
|
+
bytes[i >> 1] = parseInt(hexString.slice(i, i + 2), 16)
|
|
8
|
+
}
|
|
9
|
+
return bytes
|
|
10
|
+
}
|
|
11
|
+
|
|
12
|
+
export const uint8ArrayToHexString = (data: Uint8Array): string => {
|
|
13
|
+
return Array.from(data, byte => byte.toString(16).padStart(2, "0")).join("")
|
|
14
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
|
-
import { Heads } from "@automerge/automerge/slim/next"
|
|
2
1
|
import { arraysAreEqual } from "./arraysAreEqual.js"
|
|
2
|
+
import type { UrlHeads } from "../types.js"
|
|
3
3
|
|
|
4
|
-
export const headsAreSame = (a:
|
|
4
|
+
export const headsAreSame = (a: UrlHeads, b: UrlHeads) => {
|
|
5
5
|
return arraysAreEqual(a, b)
|
|
6
6
|
}
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { describe, expect, it } from "vitest"
|
|
1
|
+
import { describe, expect, beforeEach, it as _it } from "vitest"
|
|
2
2
|
|
|
3
3
|
import type { StorageAdapterInterface } from "../../storage/StorageAdapterInterface.js"
|
|
4
4
|
|
|
@@ -8,120 +8,90 @@ const PAYLOAD_C = () => new Uint8Array([2, 111, 74, 131, 236, 96, 142, 193])
|
|
|
8
8
|
|
|
9
9
|
const LARGE_PAYLOAD = new Uint8Array(100000).map(() => Math.random() * 256)
|
|
10
10
|
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
11
|
+
type AdapterTestContext = {
|
|
12
|
+
adapter: StorageAdapterInterface
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
const it = _it<AdapterTestContext>
|
|
16
|
+
|
|
17
|
+
export function runStorageAdapterTests(setup: SetupFn, title?: string): void {
|
|
18
|
+
beforeEach<AdapterTestContext>(async ctx => {
|
|
19
|
+
const { adapter, teardown = NO_OP } = await setup()
|
|
20
|
+
ctx.adapter = adapter
|
|
21
|
+
return teardown
|
|
22
|
+
})
|
|
16
23
|
|
|
17
24
|
describe(`Storage adapter acceptance tests ${
|
|
18
25
|
title ? `(${title})` : ""
|
|
19
26
|
}`, () => {
|
|
20
27
|
describe("load", () => {
|
|
21
|
-
it("should return undefined if there is no data", async () => {
|
|
22
|
-
const { adapter, teardown } = await setup()
|
|
23
|
-
|
|
28
|
+
it("should return undefined if there is no data", async ({ adapter }) => {
|
|
24
29
|
const actual = await adapter.load(["AAAAA", "sync-state", "xxxxx"])
|
|
25
30
|
expect(actual).toBeUndefined()
|
|
26
|
-
|
|
27
|
-
teardown()
|
|
28
31
|
})
|
|
29
32
|
})
|
|
30
33
|
|
|
31
34
|
describe("save and load", () => {
|
|
32
|
-
it("should return data that was saved", async () => {
|
|
33
|
-
const { adapter, teardown } = await setup()
|
|
34
|
-
|
|
35
|
+
it("should return data that was saved", async ({ adapter }) => {
|
|
35
36
|
await adapter.save(["storage-adapter-id"], PAYLOAD_A())
|
|
36
37
|
const actual = await adapter.load(["storage-adapter-id"])
|
|
37
38
|
expect(actual).toStrictEqual(PAYLOAD_A())
|
|
38
|
-
|
|
39
|
-
teardown()
|
|
40
39
|
})
|
|
41
40
|
|
|
42
|
-
it("should work with composite keys", async () => {
|
|
43
|
-
const { adapter, teardown } = await setup()
|
|
44
|
-
|
|
41
|
+
it("should work with composite keys", async ({ adapter }) => {
|
|
45
42
|
await adapter.save(["AAAAA", "sync-state", "xxxxx"], PAYLOAD_A())
|
|
46
43
|
const actual = await adapter.load(["AAAAA", "sync-state", "xxxxx"])
|
|
47
44
|
expect(actual).toStrictEqual(PAYLOAD_A())
|
|
48
|
-
|
|
49
|
-
teardown()
|
|
50
45
|
})
|
|
51
46
|
|
|
52
|
-
it("should work with a large payload", async () => {
|
|
53
|
-
const { adapter, teardown } = await setup()
|
|
54
|
-
|
|
47
|
+
it("should work with a large payload", async ({ adapter }) => {
|
|
55
48
|
await adapter.save(["AAAAA", "sync-state", "xxxxx"], LARGE_PAYLOAD)
|
|
56
49
|
const actual = await adapter.load(["AAAAA", "sync-state", "xxxxx"])
|
|
57
50
|
expect(actual).toStrictEqual(LARGE_PAYLOAD)
|
|
58
|
-
|
|
59
|
-
teardown()
|
|
60
51
|
})
|
|
61
52
|
})
|
|
62
53
|
|
|
63
54
|
describe("loadRange", () => {
|
|
64
|
-
it("should return an empty array if there is no data", async (
|
|
65
|
-
|
|
66
|
-
|
|
55
|
+
it("should return an empty array if there is no data", async ({
|
|
56
|
+
adapter,
|
|
57
|
+
}) => {
|
|
67
58
|
expect(await adapter.loadRange(["AAAAA"])).toStrictEqual([])
|
|
68
|
-
|
|
69
|
-
teardown()
|
|
70
59
|
})
|
|
71
60
|
})
|
|
72
61
|
|
|
73
62
|
describe("save and loadRange", () => {
|
|
74
|
-
it("should return all the data that matches the key", async (
|
|
75
|
-
|
|
76
|
-
|
|
63
|
+
it("should return all the data that matches the key", async ({
|
|
64
|
+
adapter,
|
|
65
|
+
}) => {
|
|
77
66
|
await adapter.save(["AAAAA", "sync-state", "xxxxx"], PAYLOAD_A())
|
|
78
67
|
await adapter.save(["AAAAA", "snapshot", "yyyyy"], PAYLOAD_B())
|
|
79
68
|
await adapter.save(["AAAAA", "sync-state", "zzzzz"], PAYLOAD_C())
|
|
80
69
|
|
|
81
|
-
expect(await adapter.loadRange(["AAAAA"])).toStrictEqual(
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
])
|
|
87
|
-
)
|
|
88
|
-
|
|
89
|
-
expect(await adapter.loadRange(["AAAAA", "sync-state"])).toStrictEqual(
|
|
90
|
-
expect.arrayContaining([
|
|
91
|
-
{ key: ["AAAAA", "sync-state", "xxxxx"], data: PAYLOAD_A() },
|
|
92
|
-
{ key: ["AAAAA", "sync-state", "zzzzz"], data: PAYLOAD_C() },
|
|
93
|
-
])
|
|
94
|
-
)
|
|
95
|
-
|
|
96
|
-
teardown()
|
|
97
|
-
})
|
|
70
|
+
expect(await adapter.loadRange(["AAAAA"])).toStrictEqual([
|
|
71
|
+
{ key: ["AAAAA", "sync-state", "xxxxx"], data: PAYLOAD_A() },
|
|
72
|
+
{ key: ["AAAAA", "snapshot", "yyyyy"], data: PAYLOAD_B() },
|
|
73
|
+
{ key: ["AAAAA", "sync-state", "zzzzz"], data: PAYLOAD_C() },
|
|
74
|
+
])
|
|
98
75
|
|
|
99
|
-
|
|
100
|
-
|
|
76
|
+
expect(await adapter.loadRange(["AAAAA", "sync-state"])).toStrictEqual([
|
|
77
|
+
{ key: ["AAAAA", "sync-state", "xxxxx"], data: PAYLOAD_A() },
|
|
78
|
+
{ key: ["AAAAA", "sync-state", "zzzzz"], data: PAYLOAD_C() },
|
|
79
|
+
])
|
|
80
|
+
})
|
|
101
81
|
|
|
82
|
+
it("should only load values that match they key", async ({ adapter }) => {
|
|
102
83
|
await adapter.save(["AAAAA", "sync-state", "xxxxx"], PAYLOAD_A())
|
|
103
84
|
await adapter.save(["BBBBB", "sync-state", "zzzzz"], PAYLOAD_C())
|
|
104
85
|
|
|
105
86
|
const actual = await adapter.loadRange(["AAAAA"])
|
|
106
|
-
expect(actual).toStrictEqual(
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
])
|
|
110
|
-
)
|
|
111
|
-
expect(actual).toStrictEqual(
|
|
112
|
-
expect.not.arrayContaining([
|
|
113
|
-
{ key: ["BBBBB", "sync-state", "zzzzz"], data: PAYLOAD_C() },
|
|
114
|
-
])
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
teardown()
|
|
87
|
+
expect(actual).toStrictEqual([
|
|
88
|
+
{ key: ["AAAAA", "sync-state", "xxxxx"], data: PAYLOAD_A() },
|
|
89
|
+
])
|
|
118
90
|
})
|
|
119
91
|
})
|
|
120
92
|
|
|
121
93
|
describe("save and remove", () => {
|
|
122
|
-
it("after removing, should be empty", async () => {
|
|
123
|
-
const { adapter, teardown } = await setup()
|
|
124
|
-
|
|
94
|
+
it("after removing, should be empty", async ({ adapter }) => {
|
|
125
95
|
await adapter.save(["AAAAA", "snapshot", "xxxxx"], PAYLOAD_A())
|
|
126
96
|
await adapter.remove(["AAAAA", "snapshot", "xxxxx"])
|
|
127
97
|
|
|
@@ -129,30 +99,24 @@ export function runStorageAdapterTests(_setup: SetupFn, title?: string): void {
|
|
|
129
99
|
expect(
|
|
130
100
|
await adapter.load(["AAAAA", "snapshot", "xxxxx"])
|
|
131
101
|
).toBeUndefined()
|
|
132
|
-
|
|
133
|
-
teardown()
|
|
134
102
|
})
|
|
135
103
|
})
|
|
136
104
|
|
|
137
105
|
describe("save and save", () => {
|
|
138
|
-
it("should overwrite data saved with the same key", async (
|
|
139
|
-
|
|
140
|
-
|
|
106
|
+
it("should overwrite data saved with the same key", async ({
|
|
107
|
+
adapter,
|
|
108
|
+
}) => {
|
|
141
109
|
await adapter.save(["AAAAA", "sync-state", "xxxxx"], PAYLOAD_A())
|
|
142
110
|
await adapter.save(["AAAAA", "sync-state", "xxxxx"], PAYLOAD_B())
|
|
143
111
|
|
|
144
112
|
expect(await adapter.loadRange(["AAAAA", "sync-state"])).toStrictEqual([
|
|
145
113
|
{ key: ["AAAAA", "sync-state", "xxxxx"], data: PAYLOAD_B() },
|
|
146
114
|
])
|
|
147
|
-
|
|
148
|
-
teardown()
|
|
149
115
|
})
|
|
150
116
|
})
|
|
151
117
|
|
|
152
118
|
describe("removeRange", () => {
|
|
153
|
-
it("should remove a range of records", async () => {
|
|
154
|
-
const { adapter, teardown } = await setup()
|
|
155
|
-
|
|
119
|
+
it("should remove a range of records", async ({ adapter }) => {
|
|
156
120
|
await adapter.save(["AAAAA", "sync-state", "xxxxx"], PAYLOAD_A())
|
|
157
121
|
await adapter.save(["AAAAA", "snapshot", "yyyyy"], PAYLOAD_B())
|
|
158
122
|
await adapter.save(["AAAAA", "sync-state", "zzzzz"], PAYLOAD_C())
|
|
@@ -162,13 +126,9 @@ export function runStorageAdapterTests(_setup: SetupFn, title?: string): void {
|
|
|
162
126
|
expect(await adapter.loadRange(["AAAAA"])).toStrictEqual([
|
|
163
127
|
{ key: ["AAAAA", "snapshot", "yyyyy"], data: PAYLOAD_B() },
|
|
164
128
|
])
|
|
165
|
-
|
|
166
|
-
teardown()
|
|
167
129
|
})
|
|
168
130
|
|
|
169
|
-
it("should not remove records that don't match", async () => {
|
|
170
|
-
const { adapter, teardown } = await setup()
|
|
171
|
-
|
|
131
|
+
it("should not remove records that don't match", async ({ adapter }) => {
|
|
172
132
|
await adapter.save(["AAAAA", "sync-state", "xxxxx"], PAYLOAD_A())
|
|
173
133
|
await adapter.save(["BBBBB", "sync-state", "zzzzz"], PAYLOAD_B())
|
|
174
134
|
|
|
@@ -178,8 +138,6 @@ export function runStorageAdapterTests(_setup: SetupFn, title?: string): void {
|
|
|
178
138
|
expect(actual).toStrictEqual([
|
|
179
139
|
{ key: ["BBBBB", "sync-state", "zzzzz"], data: PAYLOAD_B() },
|
|
180
140
|
])
|
|
181
|
-
|
|
182
|
-
teardown()
|
|
183
141
|
})
|
|
184
142
|
})
|
|
185
143
|
})
|
|
@@ -189,5 +147,5 @@ const NO_OP = () => {}
|
|
|
189
147
|
|
|
190
148
|
export type SetupFn = () => Promise<{
|
|
191
149
|
adapter: StorageAdapterInterface
|
|
192
|
-
teardown?: () => void
|
|
150
|
+
teardown?: () => void | Promise<void>
|
|
193
151
|
}>
|
package/src/index.ts
CHANGED