@automerge/automerge-repo 2.0.0-alpha.1 → 2.0.0-alpha.12

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. package/dist/DocHandle.d.ts +71 -2
  2. package/dist/DocHandle.d.ts.map +1 -1
  3. package/dist/DocHandle.js +116 -2
  4. package/dist/Repo.d.ts +24 -0
  5. package/dist/Repo.d.ts.map +1 -1
  6. package/dist/Repo.js +94 -57
  7. package/dist/entrypoints/fullfat.d.ts +1 -0
  8. package/dist/entrypoints/fullfat.d.ts.map +1 -1
  9. package/dist/entrypoints/fullfat.js +1 -2
  10. package/dist/entrypoints/slim.d.ts +1 -0
  11. package/dist/entrypoints/slim.d.ts.map +1 -1
  12. package/dist/entrypoints/slim.js +2 -0
  13. package/dist/helpers/tests/storage-adapter-tests.d.ts +2 -2
  14. package/dist/helpers/tests/storage-adapter-tests.d.ts.map +1 -1
  15. package/dist/helpers/tests/storage-adapter-tests.js +19 -39
  16. package/dist/storage/StorageSubsystem.d.ts +11 -1
  17. package/dist/storage/StorageSubsystem.d.ts.map +1 -1
  18. package/dist/storage/StorageSubsystem.js +18 -3
  19. package/dist/synchronizer/CollectionSynchronizer.d.ts +13 -0
  20. package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
  21. package/dist/synchronizer/CollectionSynchronizer.js +13 -6
  22. package/dist/synchronizer/DocSynchronizer.d.ts +7 -0
  23. package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
  24. package/dist/synchronizer/DocSynchronizer.js +14 -0
  25. package/dist/synchronizer/Synchronizer.d.ts +8 -0
  26. package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
  27. package/package.json +3 -3
  28. package/src/DocHandle.ts +137 -4
  29. package/src/Repo.ts +123 -56
  30. package/src/entrypoints/fullfat.ts +1 -2
  31. package/src/entrypoints/slim.ts +2 -0
  32. package/src/helpers/tests/storage-adapter-tests.ts +31 -62
  33. package/src/storage/StorageSubsystem.ts +26 -3
  34. package/src/synchronizer/CollectionSynchronizer.ts +23 -6
  35. package/src/synchronizer/DocSynchronizer.ts +15 -0
  36. package/src/synchronizer/Synchronizer.ts +9 -0
  37. package/test/DocHandle.test.ts +141 -0
  38. package/test/Repo.test.ts +73 -0
  39. package/test/StorageSubsystem.test.ts +17 -0
package/src/DocHandle.ts CHANGED
@@ -72,6 +72,9 @@ export class DocHandle<T> extends EventEmitter<DocHandleEvents<T>> {
72
72
  this.emit("delete", { handle: this })
73
73
  return { doc: A.init() }
74
74
  }),
75
+ onUnload: assign(() => {
76
+ return { doc: A.init() }
77
+ }),
75
78
  onUnavailable: () => {
76
79
  this.emit("unavailable", { handle: this })
77
80
  },
@@ -86,6 +89,7 @@ export class DocHandle<T> extends EventEmitter<DocHandleEvents<T>> {
86
89
  context: { documentId, doc },
87
90
  on: {
88
91
  UPDATE: { actions: "onUpdate" },
92
+ UNLOAD: ".unloaded",
89
93
  DELETE: ".deleted",
90
94
  },
91
95
  states: {
@@ -113,6 +117,12 @@ export class DocHandle<T> extends EventEmitter<DocHandleEvents<T>> {
113
117
  on: { DOC_READY: "ready" },
114
118
  },
115
119
  ready: {},
120
+ unloaded: {
121
+ entry: "onUnload",
122
+ on: {
123
+ RELOAD: "loading",
124
+ },
125
+ },
116
126
  deleted: { entry: "onDelete", type: "final" },
117
127
  },
118
128
  })
@@ -131,7 +141,7 @@ export class DocHandle<T> extends EventEmitter<DocHandleEvents<T>> {
131
141
 
132
142
  // Start the machine, and send a create or find event to get things going
133
143
  this.#machine.start()
134
- this.#machine.send({ type: BEGIN })
144
+ this.begin()
135
145
  }
136
146
 
137
147
  // PRIVATE
@@ -203,6 +213,14 @@ export class DocHandle<T> extends EventEmitter<DocHandleEvents<T>> {
203
213
  */
204
214
  isReady = () => this.inState(["ready"])
205
215
 
216
+ /**
217
+ * @returns true if the document has been unloaded.
218
+ *
219
+ * Unloaded documents are freed from memory but not removed from local storage. It's not currently
220
+ * possible at runtime to reload an unloaded document.
221
+ */
222
+ isUnloaded = () => this.inState(["unloaded"])
223
+
206
224
  /**
207
225
  * @returns true if the document has been marked as deleted.
208
226
  *
@@ -291,6 +309,94 @@ export class DocHandle<T> extends EventEmitter<DocHandleEvents<T>> {
291
309
  return A.getHeads(this.#doc)
292
310
  }
293
311
 
312
+ begin() {
313
+ this.#machine.send({ type: BEGIN })
314
+ }
315
+
316
+ /**
317
+ * Creates a fixed "view" of an automerge document at the given point in time represented
318
+ * by the `heads` passed in. The return value is the same type as docSync() and will return
319
+ * undefined if the object hasn't finished loading.
320
+ *
321
+ * @remarks
322
+ * A point-in-time in an automerge document is an *array* of heads since there may be
323
+ * concurrent edits. This API just returns a topologically sorted history of all edits
324
+ * so every previous entry will be (in some sense) before later ones, but the set of all possible
325
+ * history views would be quite large under concurrency (every thing in each branch against each other).
326
+ * There might be a clever way to think about this, but we haven't found it yet, so for now at least
327
+ * we present a single traversable view which excludes concurrency.
328
+ * @returns The individual heads for every change in the document.
329
+ */
330
+ history(): A.Heads[] | undefined {
331
+ if (!this.isReady()) {
332
+ return undefined
333
+ }
334
+ // This just returns all the heads as individual strings.
335
+
336
+ return A.topoHistoryTraversal(this.#doc).map(h => [h]) as A.Heads[]
337
+ }
338
+
339
+ /**
340
+ * Creates a fixed "view" of an automerge document at the given point in time represented
341
+ * by the `heads` passed in. The return value is the same type as docSync() and will return
342
+ * undefined if the object hasn't finished loading.
343
+ *
344
+ * @remarks
345
+ * Note that our Typescript types do not consider change over time and the current version
346
+ * of Automerge doesn't check types at runtime, so if you go back to an old set of heads
347
+ * that doesn't match the heads here, Typescript will not save you.
348
+ *
349
+ * @returns An Automerge.Doc<T> at the point in time.
350
+ */
351
+ view(heads: A.Heads): A.Doc<T> | undefined {
352
+ if (!this.isReady()) {
353
+ return undefined
354
+ }
355
+ return A.view(this.#doc, heads)
356
+ }
357
+
358
+ /**
359
+ * Returns a set of Patch operations that will move a materialized document from one state to another
360
+ * if applied.
361
+ *
362
+ * @remarks
363
+ * We allow specifying both a from/to heads or just a single comparison point, in which case
364
+ * the base will be the current document heads.
365
+ *
366
+ * @returns Automerge patches that go from one document state to the other. Use view() to get the full state.
367
+ */
368
+ diff(first: A.Heads, second?: A.Heads): A.Patch[] | undefined {
369
+ if (!this.isReady()) {
370
+ return undefined
371
+ }
372
+ // We allow only one set of heads to be specified, in which case we use the doc's heads
373
+ const from = second ? first : this.heads() || [] // because we guard above this should always have useful data
374
+ const to = second ? second : first
375
+ return A.diff(this.#doc, from, to)
376
+ }
377
+
378
+ /**
379
+ * `metadata(head?)` allows you to look at the metadata for a change
380
+ * this can be used to build history graphs to find commit messages and edit times.
381
+ * this interface.
382
+ *
383
+ * @remarks
384
+ * I'm really not convinced this is the right way to surface this information so
385
+ * I'm leaving this API "hidden".
386
+ *
387
+ * @hidden
388
+ */
389
+ metadata(change?: string): A.DecodedChange | undefined {
390
+ if (!this.isReady()) {
391
+ return undefined
392
+ }
393
+ if (!change) {
394
+ change = this.heads()![0]
395
+ }
396
+ // we return undefined instead of null by convention in this API
397
+ return A.inspectChange(this.#doc, change) || undefined
398
+ }
399
+
294
400
  /**
295
401
  * `update` is called any time we have a new document state; could be
296
402
  * from a local change, a remote change, or a new document from storage.
@@ -421,6 +527,16 @@ export class DocHandle<T> extends EventEmitter<DocHandleEvents<T>> {
421
527
  if (this.#state === "loading") this.#machine.send({ type: REQUEST })
422
528
  }
423
529
 
530
+ /** Called by the repo to free memory used by the document. */
531
+ unload() {
532
+ this.#machine.send({ type: UNLOAD })
533
+ }
534
+
535
+ /** Called by the repo to reuse an unloaded handle. */
536
+ reload() {
537
+ this.#machine.send({ type: RELOAD })
538
+ }
539
+
424
540
  /** Called by the repo when the document is deleted. */
425
541
  delete() {
426
542
  this.#machine.send({ type: DELETE })
@@ -439,6 +555,10 @@ export class DocHandle<T> extends EventEmitter<DocHandleEvents<T>> {
439
555
  data: encode(message),
440
556
  })
441
557
  }
558
+
559
+ metrics(): { numOps: number; numChanges: number } {
560
+ return A.stats(this.#doc)
561
+ }
442
562
  }
443
563
 
444
564
  // TYPES
@@ -539,6 +659,8 @@ export const HandleState = {
539
659
  REQUESTING: "requesting",
540
660
  /** The document is available */
541
661
  READY: "ready",
662
+ /** The document has been unloaded from the handle, to free memory usage */
663
+ UNLOADED: "unloaded",
542
664
  /** The document has been deleted from the repo */
543
665
  DELETED: "deleted",
544
666
  /** The document was not available in storage or from any connected peers */
@@ -546,8 +668,15 @@ export const HandleState = {
546
668
  } as const
547
669
  export type HandleState = (typeof HandleState)[keyof typeof HandleState]
548
670
 
549
- export const { IDLE, LOADING, REQUESTING, READY, DELETED, UNAVAILABLE } =
550
- HandleState
671
+ export const {
672
+ IDLE,
673
+ LOADING,
674
+ REQUESTING,
675
+ READY,
676
+ UNLOADED,
677
+ DELETED,
678
+ UNAVAILABLE,
679
+ } = HandleState
551
680
 
552
681
  // context
553
682
 
@@ -567,14 +696,18 @@ type DocHandleEvent<T> =
567
696
  type: typeof UPDATE
568
697
  payload: { callback: (doc: A.Doc<T>) => A.Doc<T> }
569
698
  }
570
- | { type: typeof TIMEOUT }
699
+ | { type: typeof UNLOAD }
700
+ | { type: typeof RELOAD }
571
701
  | { type: typeof DELETE }
702
+ | { type: typeof TIMEOUT }
572
703
  | { type: typeof DOC_UNAVAILABLE }
573
704
 
574
705
  const BEGIN = "BEGIN"
575
706
  const REQUEST = "REQUEST"
576
707
  const DOC_READY = "DOC_READY"
577
708
  const UPDATE = "UPDATE"
709
+ const UNLOAD = "UNLOAD"
710
+ const RELOAD = "RELOAD"
578
711
  const DELETE = "DELETE"
579
712
  const TIMEOUT = "TIMEOUT"
580
713
  const DOC_UNAVAILABLE = "DOC_UNAVAILABLE"
package/src/Repo.ts CHANGED
@@ -6,7 +6,14 @@ import {
6
6
  interpretAsDocumentId,
7
7
  parseAutomergeUrl,
8
8
  } from "./AutomergeUrl.js"
9
- import { DocHandle, DocHandleEncodedChangePayload } from "./DocHandle.js"
9
+ import {
10
+ DELETED,
11
+ DocHandle,
12
+ DocHandleEncodedChangePayload,
13
+ READY,
14
+ UNAVAILABLE,
15
+ UNLOADED,
16
+ } from "./DocHandle.js"
10
17
  import { RemoteHeadsSubscriptions } from "./RemoteHeadsSubscriptions.js"
11
18
  import { headsAreSame } from "./helpers/headsAreSame.js"
12
19
  import { throttle } from "./helpers/throttle.js"
@@ -20,7 +27,10 @@ import { StorageAdapterInterface } from "./storage/StorageAdapterInterface.js"
20
27
  import { StorageSubsystem } from "./storage/StorageSubsystem.js"
21
28
  import { StorageId } from "./storage/types.js"
22
29
  import { CollectionSynchronizer } from "./synchronizer/CollectionSynchronizer.js"
23
- import { SyncStatePayload } from "./synchronizer/Synchronizer.js"
30
+ import {
31
+ DocSyncMetrics,
32
+ SyncStatePayload,
33
+ } from "./synchronizer/Synchronizer.js"
24
34
  import type { AnyDocumentId, DocumentId, PeerId } from "./types.js"
25
35
 
26
36
  function randomPeerId() {
@@ -49,7 +59,8 @@ export class Repo extends EventEmitter<RepoEvents> {
49
59
 
50
60
  #handleCache: Record<DocumentId, DocHandle<any>> = {}
51
61
 
52
- #synchronizer: CollectionSynchronizer
62
+ /** @hidden */
63
+ synchronizer: CollectionSynchronizer
53
64
 
54
65
  /** By default, we share generously with all peers. */
55
66
  /** @hidden */
@@ -75,33 +86,6 @@ export class Repo extends EventEmitter<RepoEvents> {
75
86
  this.#log = debug(`automerge-repo:repo`)
76
87
  this.sharePolicy = sharePolicy ?? this.sharePolicy
77
88
 
78
- // DOC COLLECTION
79
-
80
- // The `document` event is fired by the DocCollection any time we create a new document or look
81
- // up a document by ID. We listen for it in order to wire up storage and network synchronization.
82
- this.on("document", async ({ handle }) => {
83
- if (storageSubsystem) {
84
- // Save when the document changes, but no more often than saveDebounceRate.
85
- const saveFn = ({
86
- handle,
87
- doc,
88
- }: DocHandleEncodedChangePayload<any>) => {
89
- void storageSubsystem.saveDoc(handle.documentId, doc)
90
- }
91
- handle.on("heads-changed", throttle(saveFn, this.saveDebounceRate))
92
- }
93
-
94
- handle.on("unavailable", () => {
95
- this.#log("document unavailable", { documentId: handle.documentId })
96
- this.emit("unavailable-document", {
97
- documentId: handle.documentId,
98
- })
99
- })
100
-
101
- // Register the document with the synchronizer. This advertises our interest in the document.
102
- this.#synchronizer.addDocument(handle.documentId)
103
- })
104
-
105
89
  this.on("delete-document", ({ documentId }) => {
106
90
  // TODO Pass the delete on to the network
107
91
  // synchronizer.removeDocument(documentId)
@@ -115,16 +99,19 @@ export class Repo extends EventEmitter<RepoEvents> {
115
99
 
116
100
  // SYNCHRONIZER
117
101
  // The synchronizer uses the network subsystem to keep documents in sync with peers.
118
- this.#synchronizer = new CollectionSynchronizer(this)
102
+ this.synchronizer = new CollectionSynchronizer(this)
119
103
 
120
104
  // When the synchronizer emits messages, send them to peers
121
- this.#synchronizer.on("message", message => {
105
+ this.synchronizer.on("message", message => {
122
106
  this.#log(`sending ${message.type} message to ${message.targetId}`)
123
107
  networkSubsystem.send(message)
124
108
  })
125
109
 
110
+ // Forward metrics from doc synchronizers
111
+ this.synchronizer.on("metrics", event => this.emit("doc-metrics", event))
112
+
126
113
  if (this.#remoteHeadsGossipingEnabled) {
127
- this.#synchronizer.on("open-doc", ({ peerId, documentId }) => {
114
+ this.synchronizer.on("open-doc", ({ peerId, documentId }) => {
128
115
  this.#remoteHeadsSubscriptions.subscribePeerToDoc(peerId, documentId)
129
116
  })
130
117
  }
@@ -132,6 +119,12 @@ export class Repo extends EventEmitter<RepoEvents> {
132
119
  // STORAGE
133
120
  // The storage subsystem has access to some form of persistence, and deals with save and loading documents.
134
121
  const storageSubsystem = storage ? new StorageSubsystem(storage) : undefined
122
+ if (storageSubsystem) {
123
+ storageSubsystem.on("document-loaded", event =>
124
+ this.emit("doc-metrics", { type: "doc-loaded", ...event })
125
+ )
126
+ }
127
+
135
128
  this.storageSubsystem = storageSubsystem
136
129
 
137
130
  // NETWORK
@@ -167,12 +160,12 @@ export class Repo extends EventEmitter<RepoEvents> {
167
160
  console.log("error in share policy", { err })
168
161
  })
169
162
 
170
- this.#synchronizer.addPeer(peerId)
163
+ this.synchronizer.addPeer(peerId)
171
164
  })
172
165
 
173
166
  // When a peer disconnects, remove it from the synchronizer
174
167
  networkSubsystem.on("peer-disconnected", ({ peerId }) => {
175
- this.#synchronizer.removePeer(peerId)
168
+ this.synchronizer.removePeer(peerId)
176
169
  this.#remoteHeadsSubscriptions.removePeer(peerId)
177
170
  })
178
171
 
@@ -181,7 +174,7 @@ export class Repo extends EventEmitter<RepoEvents> {
181
174
  this.#receiveMessage(msg)
182
175
  })
183
176
 
184
- this.#synchronizer.on("sync-state", message => {
177
+ this.synchronizer.on("sync-state", message => {
185
178
  this.#saveSyncState(message)
186
179
 
187
180
  const handle = this.#handleCache[message.documentId]
@@ -243,6 +236,32 @@ export class Repo extends EventEmitter<RepoEvents> {
243
236
  }
244
237
  }
245
238
 
239
+ // The `document` event is fired by the DocCollection any time we create a new document or look
240
+ // up a document by ID. We listen for it in order to wire up storage and network synchronization.
241
+ #registerHandleWithSubsystems(handle: DocHandle<any>) {
242
+ const { storageSubsystem } = this
243
+ if (storageSubsystem) {
244
+ // Save when the document changes, but no more often than saveDebounceRate.
245
+ const saveFn = ({ handle, doc }: DocHandleEncodedChangePayload<any>) => {
246
+ void storageSubsystem.saveDoc(handle.documentId, doc)
247
+ }
248
+ handle.on("heads-changed", throttle(saveFn, this.saveDebounceRate))
249
+ }
250
+
251
+ handle.on("unavailable", () => {
252
+ this.#log("document unavailable", { documentId: handle.documentId })
253
+ this.emit("unavailable-document", {
254
+ documentId: handle.documentId,
255
+ })
256
+ })
257
+
258
+ // Register the document with the synchronizer. This advertises our interest in the document.
259
+ this.synchronizer.addDocument(handle.documentId)
260
+
261
+ // Preserve the old event in case anyone was using it.
262
+ this.emit("document", { handle })
263
+ }
264
+
246
265
  #receiveMessage(message: RepoMessage) {
247
266
  switch (message.type) {
248
267
  case "remote-subscription-change":
@@ -259,7 +278,7 @@ export class Repo extends EventEmitter<RepoEvents> {
259
278
  case "request":
260
279
  case "ephemeral":
261
280
  case "doc-unavailable":
262
- this.#synchronizer.receiveMessage(message).catch(err => {
281
+ this.synchronizer.receiveMessage(message).catch(err => {
263
282
  console.log("error receiving message", { err })
264
283
  })
265
284
  }
@@ -324,7 +343,7 @@ export class Repo extends EventEmitter<RepoEvents> {
324
343
 
325
344
  /** Returns a list of all connected peer ids */
326
345
  get peers(): PeerId[] {
327
- return this.#synchronizer.peers
346
+ return this.synchronizer.peers
328
347
  }
329
348
 
330
349
  getStorageIdOfPeer(peerId: PeerId): StorageId | undefined {
@@ -343,7 +362,7 @@ export class Repo extends EventEmitter<RepoEvents> {
343
362
  documentId,
344
363
  }) as DocHandle<T>
345
364
 
346
- this.emit("document", { handle })
365
+ this.#registerHandleWithSubsystems(handle)
347
366
 
348
367
  handle.update(() => {
349
368
  let nextDoc: Automerge.Doc<T>
@@ -378,7 +397,7 @@ export class Repo extends EventEmitter<RepoEvents> {
378
397
  if (!clonedHandle.isReady()) {
379
398
  throw new Error(
380
399
  `Cloned handle is not yet in ready state.
381
- (Try await handle.waitForReady() first.)`
400
+ (Try await handle.whenReady() first.)`
382
401
  )
383
402
  }
384
403
 
@@ -425,29 +444,29 @@ export class Repo extends EventEmitter<RepoEvents> {
425
444
  documentId,
426
445
  }) as DocHandle<T>
427
446
 
428
- // Try to load from disk before telling anyone else about it
429
- if (this.storageSubsystem) {
430
- void this.storageSubsystem.loadDoc(handle.documentId).then(loadedDoc => {
447
+ // Loading & network is going to be asynchronous no matter what,
448
+ // but we want to return the handle immediately.
449
+ const attemptLoad = this.storageSubsystem
450
+ ? this.storageSubsystem.loadDoc(handle.documentId)
451
+ : Promise.resolve(null)
452
+
453
+ attemptLoad
454
+ .then(async loadedDoc => {
431
455
  if (loadedDoc) {
432
456
  // uhhhh, sorry if you're reading this because we were lying to the type system
433
457
  handle.update(() => loadedDoc as Automerge.Doc<T>)
434
458
  handle.doneLoading()
435
459
  } else {
436
- this.networkSubsystem
437
- .whenReady()
438
- .then(() => {
439
- handle.request()
440
- })
441
- .catch(err => {
442
- this.#log("error waiting for network", { err })
443
- })
444
- this.emit("document", { handle })
460
+ // we want to wait for the network subsystem to be ready before
461
+ // we request the document. this prevents entering unavailable during initialization.
462
+ await this.networkSubsystem.whenReady()
463
+ handle.request()
445
464
  }
465
+ this.#registerHandleWithSubsystems(handle)
466
+ })
467
+ .catch(err => {
468
+ this.#log("error waiting for network", { err })
446
469
  })
447
- } else {
448
- handle.request()
449
- this.emit("document", { handle })
450
- }
451
470
  return handle
452
471
  }
453
472
 
@@ -539,12 +558,49 @@ export class Repo extends EventEmitter<RepoEvents> {
539
558
  )
540
559
  }
541
560
 
561
+ /**
562
+ * Removes a DocHandle from the handleCache.
563
+ * @hidden this API is experimental and may change.
564
+ * @param documentId - documentId of the DocHandle to remove from handleCache, if present in cache.
565
+ * @returns Promise<void>
566
+ */
567
+ async removeFromCache(documentId: DocumentId) {
568
+ if (!this.#handleCache[documentId]) {
569
+ this.#log(
570
+ `WARN: removeFromCache called but handle not found in handleCache for documentId: ${documentId}`
571
+ )
572
+ return
573
+ }
574
+ const handle = this.#getHandle({ documentId })
575
+ const doc = await handle.doc([READY, UNLOADED, DELETED, UNAVAILABLE])
576
+ if (doc) {
577
+ if (handle.isReady()) {
578
+ handle.unload()
579
+ } else {
580
+ this.#log(
581
+ `WARN: removeFromCache called but handle for documentId: ${documentId} in unexpected state: ${handle.state}`
582
+ )
583
+ }
584
+ delete this.#handleCache[documentId]
585
+ // TODO: remove document from synchronizer when removeDocument is implemented
586
+ // this.synchronizer.removeDocument(documentId)
587
+ } else {
588
+ this.#log(
589
+ `WARN: removeFromCache called but doc undefined for documentId: ${documentId}`
590
+ )
591
+ }
592
+ }
593
+
542
594
  shutdown(): Promise<void> {
543
595
  this.networkSubsystem.adapters.forEach(adapter => {
544
596
  adapter.disconnect()
545
597
  })
546
598
  return this.flush()
547
599
  }
600
+
601
+ metrics(): { documents: { [key: string]: any } } {
602
+ return { documents: this.synchronizer.metrics() }
603
+ }
548
604
  }
549
605
 
550
606
  export interface RepoConfig {
@@ -594,6 +650,7 @@ export interface RepoEvents {
594
650
  "delete-document": (arg: DeleteDocumentPayload) => void
595
651
  /** A document was marked as unavailable (we don't have it and none of our peers have it) */
596
652
  "unavailable-document": (arg: DeleteDocumentPayload) => void
653
+ "doc-metrics": (arg: DocMetrics) => void
597
654
  }
598
655
 
599
656
  export interface DocumentPayload {
@@ -603,3 +660,13 @@ export interface DocumentPayload {
603
660
  export interface DeleteDocumentPayload {
604
661
  documentId: DocumentId
605
662
  }
663
+
664
+ export type DocMetrics =
665
+ | DocSyncMetrics
666
+ | {
667
+ type: "doc-loaded"
668
+ documentId: DocumentId
669
+ durationMillis: number
670
+ numOps: number
671
+ numChanges: number
672
+ }
@@ -7,5 +7,4 @@ export * from "../index.js"
7
7
  // disable
8
8
  //
9
9
  // eslint-disable-next-line automerge-slimport/enforce-automerge-slim-import
10
- import { next as Am } from "@automerge/automerge"
11
- Am.init()
10
+ import "@automerge/automerge"
@@ -1,2 +1,4 @@
1
1
  export * from "../index.js"
2
2
  export { initializeBase64Wasm, initializeWasm } from "@automerge/automerge/slim"
3
+ // TODO: temporary work-around during alpha.
4
+ export * as Automerge from "@automerge/automerge/slim"