@automerge/automerge-repo 2.0.0-alpha.2 → 2.0.0-alpha.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (76) hide show
  1. package/README.md +5 -6
  2. package/dist/AutomergeUrl.d.ts +17 -5
  3. package/dist/AutomergeUrl.d.ts.map +1 -1
  4. package/dist/AutomergeUrl.js +71 -24
  5. package/dist/DocHandle.d.ts +89 -20
  6. package/dist/DocHandle.d.ts.map +1 -1
  7. package/dist/DocHandle.js +189 -28
  8. package/dist/FindProgress.d.ts +30 -0
  9. package/dist/FindProgress.d.ts.map +1 -0
  10. package/dist/FindProgress.js +1 -0
  11. package/dist/RemoteHeadsSubscriptions.d.ts +4 -5
  12. package/dist/RemoteHeadsSubscriptions.d.ts.map +1 -1
  13. package/dist/RemoteHeadsSubscriptions.js +4 -1
  14. package/dist/Repo.d.ts +44 -6
  15. package/dist/Repo.d.ts.map +1 -1
  16. package/dist/Repo.js +226 -87
  17. package/dist/entrypoints/fullfat.d.ts +1 -0
  18. package/dist/entrypoints/fullfat.d.ts.map +1 -1
  19. package/dist/entrypoints/fullfat.js +1 -2
  20. package/dist/helpers/abortable.d.ts +39 -0
  21. package/dist/helpers/abortable.d.ts.map +1 -0
  22. package/dist/helpers/abortable.js +45 -0
  23. package/dist/helpers/bufferFromHex.d.ts +3 -0
  24. package/dist/helpers/bufferFromHex.d.ts.map +1 -0
  25. package/dist/helpers/bufferFromHex.js +13 -0
  26. package/dist/helpers/headsAreSame.d.ts +2 -2
  27. package/dist/helpers/headsAreSame.d.ts.map +1 -1
  28. package/dist/helpers/mergeArrays.d.ts +1 -1
  29. package/dist/helpers/mergeArrays.d.ts.map +1 -1
  30. package/dist/helpers/tests/network-adapter-tests.d.ts.map +1 -1
  31. package/dist/helpers/tests/network-adapter-tests.js +13 -13
  32. package/dist/helpers/tests/storage-adapter-tests.d.ts +2 -2
  33. package/dist/helpers/tests/storage-adapter-tests.d.ts.map +1 -1
  34. package/dist/helpers/tests/storage-adapter-tests.js +25 -48
  35. package/dist/index.d.ts +1 -1
  36. package/dist/index.d.ts.map +1 -1
  37. package/dist/index.js +1 -1
  38. package/dist/storage/StorageSubsystem.d.ts +11 -1
  39. package/dist/storage/StorageSubsystem.d.ts.map +1 -1
  40. package/dist/storage/StorageSubsystem.js +20 -4
  41. package/dist/synchronizer/CollectionSynchronizer.d.ts +17 -3
  42. package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
  43. package/dist/synchronizer/CollectionSynchronizer.js +43 -18
  44. package/dist/synchronizer/DocSynchronizer.d.ts +10 -2
  45. package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
  46. package/dist/synchronizer/DocSynchronizer.js +30 -8
  47. package/dist/synchronizer/Synchronizer.d.ts +11 -0
  48. package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
  49. package/dist/types.d.ts +4 -1
  50. package/dist/types.d.ts.map +1 -1
  51. package/fuzz/fuzz.ts +3 -3
  52. package/package.json +3 -3
  53. package/src/AutomergeUrl.ts +101 -26
  54. package/src/DocHandle.ts +256 -38
  55. package/src/FindProgress.ts +48 -0
  56. package/src/RemoteHeadsSubscriptions.ts +11 -9
  57. package/src/Repo.ts +310 -95
  58. package/src/entrypoints/fullfat.ts +1 -2
  59. package/src/helpers/abortable.ts +61 -0
  60. package/src/helpers/bufferFromHex.ts +14 -0
  61. package/src/helpers/headsAreSame.ts +2 -2
  62. package/src/helpers/tests/network-adapter-tests.ts +14 -13
  63. package/src/helpers/tests/storage-adapter-tests.ts +44 -86
  64. package/src/index.ts +2 -0
  65. package/src/storage/StorageSubsystem.ts +29 -4
  66. package/src/synchronizer/CollectionSynchronizer.ts +56 -19
  67. package/src/synchronizer/DocSynchronizer.ts +34 -9
  68. package/src/synchronizer/Synchronizer.ts +14 -0
  69. package/src/types.ts +4 -1
  70. package/test/AutomergeUrl.test.ts +130 -0
  71. package/test/CollectionSynchronizer.test.ts +4 -4
  72. package/test/DocHandle.test.ts +189 -29
  73. package/test/DocSynchronizer.test.ts +10 -3
  74. package/test/Repo.test.ts +377 -191
  75. package/test/StorageSubsystem.test.ts +17 -0
  76. package/test/remoteHeads.test.ts +27 -12
package/src/Repo.ts CHANGED
@@ -2,11 +2,20 @@ import { next as Automerge } from "@automerge/automerge/slim"
2
2
  import debug from "debug"
3
3
  import { EventEmitter } from "eventemitter3"
4
4
  import {
5
+ encodeHeads,
5
6
  generateAutomergeUrl,
6
7
  interpretAsDocumentId,
8
+ isValidAutomergeUrl,
7
9
  parseAutomergeUrl,
8
10
  } from "./AutomergeUrl.js"
9
- import { DocHandle, DocHandleEncodedChangePayload } from "./DocHandle.js"
11
+ import {
12
+ DELETED,
13
+ DocHandle,
14
+ DocHandleEncodedChangePayload,
15
+ READY,
16
+ UNAVAILABLE,
17
+ UNLOADED,
18
+ } from "./DocHandle.js"
10
19
  import { RemoteHeadsSubscriptions } from "./RemoteHeadsSubscriptions.js"
11
20
  import { headsAreSame } from "./helpers/headsAreSame.js"
12
21
  import { throttle } from "./helpers/throttle.js"
@@ -20,8 +29,18 @@ import { StorageAdapterInterface } from "./storage/StorageAdapterInterface.js"
20
29
  import { StorageSubsystem } from "./storage/StorageSubsystem.js"
21
30
  import { StorageId } from "./storage/types.js"
22
31
  import { CollectionSynchronizer } from "./synchronizer/CollectionSynchronizer.js"
23
- import { SyncStatePayload } from "./synchronizer/Synchronizer.js"
24
- import type { AnyDocumentId, DocumentId, PeerId } from "./types.js"
32
+ import {
33
+ DocSyncMetrics,
34
+ SyncStatePayload,
35
+ } from "./synchronizer/Synchronizer.js"
36
+ import type {
37
+ AnyDocumentId,
38
+ AutomergeUrl,
39
+ DocumentId,
40
+ PeerId,
41
+ } from "./types.js"
42
+ import { abortable, AbortOptions } from "./helpers/abortable.js"
43
+ import { FindProgress, FindProgressWithMethods } from "./FindProgress.js"
25
44
 
26
45
  function randomPeerId() {
27
46
  return ("peer-" + Math.random().toString(36).slice(4)) as PeerId
@@ -49,7 +68,8 @@ export class Repo extends EventEmitter<RepoEvents> {
49
68
 
50
69
  #handleCache: Record<DocumentId, DocHandle<any>> = {}
51
70
 
52
- #synchronizer: CollectionSynchronizer
71
+ /** @hidden */
72
+ synchronizer: CollectionSynchronizer
53
73
 
54
74
  /** By default, we share generously with all peers. */
55
75
  /** @hidden */
@@ -69,39 +89,13 @@ export class Repo extends EventEmitter<RepoEvents> {
69
89
  sharePolicy,
70
90
  isEphemeral = storage === undefined,
71
91
  enableRemoteHeadsGossiping = false,
92
+ denylist = [],
72
93
  }: RepoConfig = {}) {
73
94
  super()
74
95
  this.#remoteHeadsGossipingEnabled = enableRemoteHeadsGossiping
75
96
  this.#log = debug(`automerge-repo:repo`)
76
97
  this.sharePolicy = sharePolicy ?? this.sharePolicy
77
98
 
78
- // DOC COLLECTION
79
-
80
- // The `document` event is fired by the DocCollection any time we create a new document or look
81
- // up a document by ID. We listen for it in order to wire up storage and network synchronization.
82
- this.on("document", async ({ handle }) => {
83
- if (storageSubsystem) {
84
- // Save when the document changes, but no more often than saveDebounceRate.
85
- const saveFn = ({
86
- handle,
87
- doc,
88
- }: DocHandleEncodedChangePayload<any>) => {
89
- void storageSubsystem.saveDoc(handle.documentId, doc)
90
- }
91
- handle.on("heads-changed", throttle(saveFn, this.saveDebounceRate))
92
- }
93
-
94
- handle.on("unavailable", () => {
95
- this.#log("document unavailable", { documentId: handle.documentId })
96
- this.emit("unavailable-document", {
97
- documentId: handle.documentId,
98
- })
99
- })
100
-
101
- // Register the document with the synchronizer. This advertises our interest in the document.
102
- this.#synchronizer.addDocument(handle.documentId)
103
- })
104
-
105
99
  this.on("delete-document", ({ documentId }) => {
106
100
  // TODO Pass the delete on to the network
107
101
  // synchronizer.removeDocument(documentId)
@@ -115,16 +109,19 @@ export class Repo extends EventEmitter<RepoEvents> {
115
109
 
116
110
  // SYNCHRONIZER
117
111
  // The synchronizer uses the network subsystem to keep documents in sync with peers.
118
- this.#synchronizer = new CollectionSynchronizer(this)
112
+ this.synchronizer = new CollectionSynchronizer(this, denylist)
119
113
 
120
114
  // When the synchronizer emits messages, send them to peers
121
- this.#synchronizer.on("message", message => {
115
+ this.synchronizer.on("message", message => {
122
116
  this.#log(`sending ${message.type} message to ${message.targetId}`)
123
117
  networkSubsystem.send(message)
124
118
  })
125
119
 
120
+ // Forward metrics from doc synchronizers
121
+ this.synchronizer.on("metrics", event => this.emit("doc-metrics", event))
122
+
126
123
  if (this.#remoteHeadsGossipingEnabled) {
127
- this.#synchronizer.on("open-doc", ({ peerId, documentId }) => {
124
+ this.synchronizer.on("open-doc", ({ peerId, documentId }) => {
128
125
  this.#remoteHeadsSubscriptions.subscribePeerToDoc(peerId, documentId)
129
126
  })
130
127
  }
@@ -132,6 +129,12 @@ export class Repo extends EventEmitter<RepoEvents> {
132
129
  // STORAGE
133
130
  // The storage subsystem has access to some form of persistence, and deals with save and loading documents.
134
131
  const storageSubsystem = storage ? new StorageSubsystem(storage) : undefined
132
+ if (storageSubsystem) {
133
+ storageSubsystem.on("document-loaded", event =>
134
+ this.emit("doc-metrics", { type: "doc-loaded", ...event })
135
+ )
136
+ }
137
+
135
138
  this.storageSubsystem = storageSubsystem
136
139
 
137
140
  // NETWORK
@@ -167,12 +170,12 @@ export class Repo extends EventEmitter<RepoEvents> {
167
170
  console.log("error in share policy", { err })
168
171
  })
169
172
 
170
- this.#synchronizer.addPeer(peerId)
173
+ this.synchronizer.addPeer(peerId)
171
174
  })
172
175
 
173
176
  // When a peer disconnects, remove it from the synchronizer
174
177
  networkSubsystem.on("peer-disconnected", ({ peerId }) => {
175
- this.#synchronizer.removePeer(peerId)
178
+ this.synchronizer.removePeer(peerId)
176
179
  this.#remoteHeadsSubscriptions.removePeer(peerId)
177
180
  })
178
181
 
@@ -181,7 +184,7 @@ export class Repo extends EventEmitter<RepoEvents> {
181
184
  this.#receiveMessage(msg)
182
185
  })
183
186
 
184
- this.#synchronizer.on("sync-state", message => {
187
+ this.synchronizer.on("sync-state", message => {
185
188
  this.#saveSyncState(message)
186
189
 
187
190
  const handle = this.#handleCache[message.documentId]
@@ -194,16 +197,20 @@ export class Repo extends EventEmitter<RepoEvents> {
194
197
  const heads = handle.getRemoteHeads(storageId)
195
198
  const haveHeadsChanged =
196
199
  message.syncState.theirHeads &&
197
- (!heads || !headsAreSame(heads, message.syncState.theirHeads))
200
+ (!heads ||
201
+ !headsAreSame(heads, encodeHeads(message.syncState.theirHeads)))
198
202
 
199
203
  if (haveHeadsChanged && message.syncState.theirHeads) {
200
- handle.setRemoteHeads(storageId, message.syncState.theirHeads)
204
+ handle.setRemoteHeads(
205
+ storageId,
206
+ encodeHeads(message.syncState.theirHeads)
207
+ )
201
208
 
202
209
  if (storageId && this.#remoteHeadsGossipingEnabled) {
203
210
  this.#remoteHeadsSubscriptions.handleImmediateRemoteHeadsChanged(
204
211
  message.documentId,
205
212
  storageId,
206
- message.syncState.theirHeads
213
+ encodeHeads(message.syncState.theirHeads)
207
214
  )
208
215
  }
209
216
  }
@@ -243,6 +250,22 @@ export class Repo extends EventEmitter<RepoEvents> {
243
250
  }
244
251
  }
245
252
 
253
+ // The `document` event is fired by the DocCollection any time we create a new document or look
254
+ // up a document by ID. We listen for it in order to wire up storage and network synchronization.
255
+ #registerHandleWithSubsystems(handle: DocHandle<any>) {
256
+ const { storageSubsystem } = this
257
+ if (storageSubsystem) {
258
+ // Save when the document changes, but no more often than saveDebounceRate.
259
+ const saveFn = ({ handle, doc }: DocHandleEncodedChangePayload<any>) => {
260
+ void storageSubsystem.saveDoc(handle.documentId, doc)
261
+ }
262
+ handle.on("heads-changed", throttle(saveFn, this.saveDebounceRate))
263
+ }
264
+
265
+ // Register the document with the synchronizer. This advertises our interest in the document.
266
+ this.synchronizer.addDocument(handle)
267
+ }
268
+
246
269
  #receiveMessage(message: RepoMessage) {
247
270
  switch (message.type) {
248
271
  case "remote-subscription-change":
@@ -259,7 +282,7 @@ export class Repo extends EventEmitter<RepoEvents> {
259
282
  case "request":
260
283
  case "ephemeral":
261
284
  case "doc-unavailable":
262
- this.#synchronizer.receiveMessage(message).catch(err => {
285
+ this.synchronizer.receiveMessage(message).catch(err => {
263
286
  console.log("error receiving message", { err })
264
287
  })
265
288
  }
@@ -324,7 +347,7 @@ export class Repo extends EventEmitter<RepoEvents> {
324
347
 
325
348
  /** Returns a list of all connected peer ids */
326
349
  get peers(): PeerId[] {
327
- return this.#synchronizer.peers
350
+ return this.synchronizer.peers
328
351
  }
329
352
 
330
353
  getStorageIdOfPeer(peerId: PeerId): StorageId | undefined {
@@ -343,7 +366,7 @@ export class Repo extends EventEmitter<RepoEvents> {
343
366
  documentId,
344
367
  }) as DocHandle<T>
345
368
 
346
- this.emit("document", { handle })
369
+ this.#registerHandleWithSubsystems(handle)
347
370
 
348
371
  handle.update(() => {
349
372
  let nextDoc: Automerge.Doc<T>
@@ -371,22 +394,16 @@ export class Repo extends EventEmitter<RepoEvents> {
371
394
  * Any peers this `Repo` is connected to for whom `sharePolicy` returns `true` will
372
395
  * be notified of the newly created DocHandle.
373
396
  *
374
- * @throws if the cloned handle is not yet ready or if
375
- * `clonedHandle.docSync()` returns `undefined` (i.e. the handle is unavailable).
376
397
  */
377
398
  clone<T>(clonedHandle: DocHandle<T>) {
378
399
  if (!clonedHandle.isReady()) {
379
400
  throw new Error(
380
401
  `Cloned handle is not yet in ready state.
381
- (Try await handle.waitForReady() first.)`
402
+ (Try await handle.whenReady() first.)`
382
403
  )
383
404
  }
384
405
 
385
- const sourceDoc = clonedHandle.docSync()
386
- if (!sourceDoc) {
387
- throw new Error("Cloned handle doesn't have a document.")
388
- }
389
-
406
+ const sourceDoc = clonedHandle.doc()
390
407
  const handle = this.create<T>()
391
408
 
392
409
  handle.update(() => {
@@ -397,60 +414,198 @@ export class Repo extends EventEmitter<RepoEvents> {
397
414
  return handle
398
415
  }
399
416
 
400
- /**
401
- * Retrieves a document by id. It gets data from the local system, but also emits a `document`
402
- * event to advertise interest in the document.
403
- */
404
- find<T>(
405
- /** The url or documentId of the handle to retrieve */
406
- id: AnyDocumentId
407
- ): DocHandle<T> {
408
- const documentId = interpretAsDocumentId(id)
417
+ findWithProgress<T>(
418
+ id: AnyDocumentId,
419
+ options: AbortOptions = {}
420
+ ): FindProgressWithMethods<T> | FindProgress<T> {
421
+ const { signal } = options
422
+ const abortPromise = abortable(signal)
409
423
 
410
- // If we have the handle cached, return it
424
+ const { documentId, heads } = isValidAutomergeUrl(id)
425
+ ? parseAutomergeUrl(id)
426
+ : { documentId: interpretAsDocumentId(id), heads: undefined }
427
+
428
+ // Check cache first - return plain FindStep for terminal states
411
429
  if (this.#handleCache[documentId]) {
412
- if (this.#handleCache[documentId].isUnavailable()) {
413
- // this ensures that the event fires after the handle has been returned
414
- setTimeout(() => {
415
- this.#handleCache[documentId].emit("unavailable", {
416
- handle: this.#handleCache[documentId],
417
- })
418
- })
430
+ const handle = this.#handleCache[documentId]
431
+ if (handle.state === UNAVAILABLE) {
432
+ const result = {
433
+ state: "unavailable" as const,
434
+ error: new Error(`Document ${id} is unavailable`),
435
+ handle,
436
+ }
437
+ return result
438
+ }
439
+ if (handle.state === DELETED) {
440
+ return {
441
+ state: "failed",
442
+ error: new Error(`Document ${id} was deleted`),
443
+ handle,
444
+ }
445
+ }
446
+ if (handle.state === READY) {
447
+ // If we already have the handle, return it immediately (or a view of the handle if heads are specified)
448
+ return {
449
+ state: "ready",
450
+ // TODO: this handle needs to be cached (or at least avoid running clone)
451
+ handle: heads ? handle.view(heads) : handle,
452
+ }
419
453
  }
420
- return this.#handleCache[documentId]
421
454
  }
422
455
 
423
- // If we don't already have the handle, make an empty one and try loading it
424
- const handle = this.#getHandle<T>({
425
- documentId,
426
- }) as DocHandle<T>
456
+ // the generator takes over `this`, so we need an alias to the repo this
457
+ // eslint-disable-next-line @typescript-eslint/no-this-alias
458
+ const that = this
459
+ async function* progressGenerator(): AsyncGenerator<FindProgress<T>> {
460
+ try {
461
+ const handle = that.#getHandle<T>({ documentId })
462
+ yield { state: "loading", progress: 25, handle }
463
+
464
+ const loadingPromise = await (that.storageSubsystem
465
+ ? that.storageSubsystem.loadDoc(handle.documentId)
466
+ : Promise.resolve(null))
467
+
468
+ const loadedDoc = await Promise.race([loadingPromise, abortPromise])
427
469
 
428
- // Try to load from disk before telling anyone else about it
429
- if (this.storageSubsystem) {
430
- void this.storageSubsystem.loadDoc(handle.documentId).then(loadedDoc => {
431
470
  if (loadedDoc) {
432
- // uhhhh, sorry if you're reading this because we were lying to the type system
433
471
  handle.update(() => loadedDoc as Automerge.Doc<T>)
434
472
  handle.doneLoading()
473
+ yield { state: "loading", progress: 50, handle }
435
474
  } else {
436
- this.networkSubsystem
437
- .whenReady()
438
- .then(() => {
439
- handle.request()
440
- })
441
- .catch(err => {
442
- this.#log("error waiting for network", { err })
443
- })
444
- this.emit("document", { handle })
475
+ await Promise.race([that.networkSubsystem.whenReady(), abortPromise])
476
+ handle.request()
477
+ yield { state: "loading", progress: 75, handle }
445
478
  }
446
- })
479
+
480
+ that.#registerHandleWithSubsystems(handle)
481
+
482
+ await Promise.race([
483
+ handle.whenReady([READY, UNAVAILABLE]),
484
+ abortPromise,
485
+ ])
486
+
487
+ if (handle.state === UNAVAILABLE) {
488
+ yield { state: "unavailable", handle }
489
+ }
490
+ if (handle.state === DELETED) {
491
+ throw new Error(`Document ${id} was deleted`)
492
+ }
493
+
494
+ yield { state: "ready", handle }
495
+ } catch (error) {
496
+ yield {
497
+ state: "failed",
498
+ error: error instanceof Error ? error : new Error(String(error)),
499
+ handle,
500
+ }
501
+ }
502
+ }
503
+
504
+ const iterator = progressGenerator()
505
+
506
+ const next = async () => {
507
+ const result = await iterator.next()
508
+ return { ...result.value, next }
509
+ }
510
+
511
+ const untilReady = async (allowableStates: string[]) => {
512
+ for await (const state of iterator) {
513
+ if (allowableStates.includes(state.handle.state)) {
514
+ return state.handle
515
+ }
516
+ if (state.state === "unavailable") {
517
+ throw new Error(`Document ${id} is unavailable`)
518
+ }
519
+ if (state.state === "ready") return state.handle
520
+ if (state.state === "failed") throw state.error
521
+ }
522
+ throw new Error("Iterator completed without reaching ready state")
523
+ }
524
+
525
+ const handle = this.#getHandle<T>({ documentId })
526
+ const initial = { state: "loading" as const, progress: 0, handle }
527
+ return { ...initial, next, untilReady }
528
+ }
529
+
530
+ async find<T>(
531
+ id: AnyDocumentId,
532
+ options: RepoFindOptions & AbortOptions = {}
533
+ ): Promise<DocHandle<T>> {
534
+ const { allowableStates = ["ready"], signal } = options
535
+ const progress = this.findWithProgress<T>(id, { signal })
536
+
537
+ /*if (allowableStates.includes(progress.state)) {
538
+ console.log("returning early")
539
+ return progress.handle
540
+ }*/
541
+
542
+ if ("untilReady" in progress) {
543
+ this.#registerHandleWithSubsystems(progress.handle)
544
+ return progress.untilReady(allowableStates)
447
545
  } else {
546
+ return progress.handle
547
+ }
548
+ }
549
+
550
+ /**
551
+ * Loads a document without waiting for ready state
552
+ */
553
+ async #loadDocument<T>(documentId: DocumentId): Promise<DocHandle<T>> {
554
+ // If we have the handle cached, return it
555
+ if (this.#handleCache[documentId]) {
556
+ return this.#handleCache[documentId]
557
+ }
558
+
559
+ // If we don't already have the handle, make an empty one and try loading it
560
+ const handle = this.#getHandle<T>({ documentId })
561
+ const loadedDoc = await (this.storageSubsystem
562
+ ? this.storageSubsystem.loadDoc(handle.documentId)
563
+ : Promise.resolve(null))
564
+
565
+ if (loadedDoc) {
566
+ // We need to cast this to <T> because loadDoc operates in <unknowns>.
567
+ // This is really where we ought to be validating the input matches <T>.
568
+ handle.update(() => loadedDoc as Automerge.Doc<T>)
569
+ handle.doneLoading()
570
+ } else {
571
+ // Because the network subsystem might still be booting up, we wait
572
+ // here so that we don't immediately give up loading because we're still
573
+ // making our initial connection to a sync server.
574
+ await this.networkSubsystem.whenReady()
448
575
  handle.request()
449
- this.emit("document", { handle })
450
576
  }
577
+
578
+ this.#registerHandleWithSubsystems(handle)
451
579
  return handle
452
580
  }
453
581
 
582
+ /**
583
+ * Retrieves a document by id. It gets data from the local system, but also emits a `document`
584
+ * event to advertise interest in the document.
585
+ */
586
+ async findClassic<T>(
587
+ /** The url or documentId of the handle to retrieve */
588
+ id: AnyDocumentId,
589
+ options: RepoFindOptions & AbortOptions = {}
590
+ ): Promise<DocHandle<T>> {
591
+ const documentId = interpretAsDocumentId(id)
592
+ const { allowableStates, signal } = options
593
+
594
+ return Promise.race([
595
+ (async () => {
596
+ const handle = await this.#loadDocument<T>(documentId)
597
+ if (!allowableStates) {
598
+ await handle.whenReady([READY, UNAVAILABLE])
599
+ if (handle.state === UNAVAILABLE && !signal?.aborted) {
600
+ throw new Error(`Document ${id} is unavailable`)
601
+ }
602
+ }
603
+ return handle
604
+ })(),
605
+ abortable(signal),
606
+ ])
607
+ }
608
+
454
609
  delete(
455
610
  /** The url or documentId of the handle to delete */
456
611
  id: AnyDocumentId
@@ -475,8 +630,7 @@ export class Repo extends EventEmitter<RepoEvents> {
475
630
  const documentId = interpretAsDocumentId(id)
476
631
 
477
632
  const handle = this.#getHandle({ documentId })
478
- const doc = await handle.doc()
479
- if (!doc) return undefined
633
+ const doc = handle.doc()
480
634
  return Automerge.save(doc)
481
635
  }
482
636
 
@@ -530,21 +684,56 @@ export class Repo extends EventEmitter<RepoEvents> {
530
684
  : Object.values(this.#handleCache)
531
685
  await Promise.all(
532
686
  handles.map(async handle => {
533
- const doc = handle.docSync()
534
- if (!doc) {
535
- return
536
- }
537
- return this.storageSubsystem!.saveDoc(handle.documentId, doc)
687
+ return this.storageSubsystem!.saveDoc(handle.documentId, handle.doc())
538
688
  })
539
689
  )
540
690
  }
541
691
 
692
+ /**
693
+ * Removes a DocHandle from the handleCache.
694
+ * @hidden this API is experimental and may change.
695
+ * @param documentId - documentId of the DocHandle to remove from handleCache, if present in cache.
696
+ * @returns Promise<void>
697
+ */
698
+ async removeFromCache(documentId: DocumentId) {
699
+ if (!this.#handleCache[documentId]) {
700
+ this.#log(
701
+ `WARN: removeFromCache called but handle not found in handleCache for documentId: ${documentId}`
702
+ )
703
+ return
704
+ }
705
+ const handle = this.#getHandle({ documentId })
706
+ await handle.whenReady([READY, UNLOADED, DELETED, UNAVAILABLE])
707
+ const doc = handle.doc()
708
+ // because this is an internal-ish function, we'll be extra careful about undefined docs here
709
+ if (doc) {
710
+ if (handle.isReady()) {
711
+ handle.unload()
712
+ } else {
713
+ this.#log(
714
+ `WARN: removeFromCache called but handle for documentId: ${documentId} in unexpected state: ${handle.state}`
715
+ )
716
+ }
717
+ delete this.#handleCache[documentId]
718
+ // TODO: remove document from synchronizer when removeDocument is implemented
719
+ // this.synchronizer.removeDocument(documentId)
720
+ } else {
721
+ this.#log(
722
+ `WARN: removeFromCache called but doc undefined for documentId: ${documentId}`
723
+ )
724
+ }
725
+ }
726
+
542
727
  shutdown(): Promise<void> {
543
728
  this.networkSubsystem.adapters.forEach(adapter => {
544
729
  adapter.disconnect()
545
730
  })
546
731
  return this.flush()
547
732
  }
733
+
734
+ metrics(): { documents: { [key: string]: any } } {
735
+ return { documents: this.synchronizer.metrics() }
736
+ }
548
737
  }
549
738
 
550
739
  export interface RepoConfig {
@@ -571,6 +760,13 @@ export interface RepoConfig {
571
760
  * Whether to enable the experimental remote heads gossiping feature
572
761
  */
573
762
  enableRemoteHeadsGossiping?: boolean
763
+
764
+ /**
765
+ * A list of automerge URLs which should never be loaded regardless of what
766
+ * messages are received or what the share policy is. This is useful to avoid
767
+ * loading documents that are known to be too resource intensive.
768
+ */
769
+ denylist?: AutomergeUrl[]
574
770
  }
575
771
 
576
772
  /** A function that determines whether we should share a document with a peer
@@ -594,6 +790,11 @@ export interface RepoEvents {
594
790
  "delete-document": (arg: DeleteDocumentPayload) => void
595
791
  /** A document was marked as unavailable (we don't have it and none of our peers have it) */
596
792
  "unavailable-document": (arg: DeleteDocumentPayload) => void
793
+ "doc-metrics": (arg: DocMetrics) => void
794
+ }
795
+
796
+ export interface RepoFindOptions {
797
+ allowableStates?: string[]
597
798
  }
598
799
 
599
800
  export interface DocumentPayload {
@@ -603,3 +804,17 @@ export interface DocumentPayload {
603
804
  export interface DeleteDocumentPayload {
604
805
  documentId: DocumentId
605
806
  }
807
+
808
+ export type DocMetrics =
809
+ | DocSyncMetrics
810
+ | {
811
+ type: "doc-loaded"
812
+ documentId: DocumentId
813
+ durationMillis: number
814
+ numOps: number
815
+ numChanges: number
816
+ }
817
+ | {
818
+ type: "doc-denied"
819
+ documentId: DocumentId
820
+ }
@@ -7,5 +7,4 @@ export * from "../index.js"
7
7
  // disable
8
8
  //
9
9
  // eslint-disable-next-line automerge-slimport/enforce-automerge-slim-import
10
- import { next as Am } from "@automerge/automerge"
11
- Am.init()
10
+ import "@automerge/automerge"
@@ -0,0 +1,61 @@
1
+ /**
2
+ * Creates a promise that rejects when the signal is aborted.
3
+ *
4
+ * @remarks
5
+ * This utility creates a promise that rejects when the provided AbortSignal is aborted.
6
+ * It's designed to be used with Promise.race() to make operations abortable.
7
+ *
8
+ * @example
9
+ * ```typescript
10
+ * const controller = new AbortController();
11
+ *
12
+ * try {
13
+ * const result = await Promise.race([
14
+ * fetch('https://api.example.com/data'),
15
+ * abortable(controller.signal)
16
+ * ]);
17
+ * } catch (err) {
18
+ * if (err.name === 'AbortError') {
19
+ * console.log('The operation was aborted');
20
+ * }
21
+ * }
22
+ *
23
+ * // Later, to abort:
24
+ * controller.abort();
25
+ * ```
26
+ *
27
+ * @param signal - An AbortSignal that can be used to abort the operation
28
+ * @param cleanup - Optional cleanup function that will be called if aborted
29
+ * @returns A promise that rejects with AbortError when the signal is aborted
30
+ * @throws {DOMException} With name "AbortError" when aborted
31
+ */
32
+ export function abortable(
33
+ signal?: AbortSignal,
34
+ cleanup?: () => void
35
+ ): Promise<never> {
36
+ if (signal?.aborted) {
37
+ throw new DOMException("Operation aborted", "AbortError")
38
+ }
39
+
40
+ if (!signal) {
41
+ return new Promise(() => {}) // Never resolves
42
+ }
43
+
44
+ return new Promise((_, reject) => {
45
+ signal.addEventListener(
46
+ "abort",
47
+ () => {
48
+ cleanup?.()
49
+ reject(new DOMException("Operation aborted", "AbortError"))
50
+ },
51
+ { once: true }
52
+ )
53
+ })
54
+ }
55
+
56
+ /**
57
+ * Include this type in an options object to pass an AbortSignal to a function.
58
+ */
59
+ export interface AbortOptions {
60
+ signal?: AbortSignal
61
+ }
@@ -0,0 +1,14 @@
1
+ export const uint8ArrayFromHexString = (hexString: string): Uint8Array => {
2
+ if (hexString.length % 2 !== 0) {
3
+ throw new Error("Hex string must have an even length")
4
+ }
5
+ const bytes = new Uint8Array(hexString.length / 2)
6
+ for (let i = 0; i < hexString.length; i += 2) {
7
+ bytes[i >> 1] = parseInt(hexString.slice(i, i + 2), 16)
8
+ }
9
+ return bytes
10
+ }
11
+
12
+ export const uint8ArrayToHexString = (data: Uint8Array): string => {
13
+ return Array.from(data, byte => byte.toString(16).padStart(2, "0")).join("")
14
+ }
@@ -1,6 +1,6 @@
1
- import { Heads } from "@automerge/automerge/slim/next"
2
1
  import { arraysAreEqual } from "./arraysAreEqual.js"
2
+ import type { UrlHeads } from "../types.js"
3
3
 
4
- export const headsAreSame = (a: Heads, b: Heads) => {
4
+ export const headsAreSame = (a: UrlHeads, b: UrlHeads) => {
5
5
  return arraysAreEqual(a, b)
6
6
  }