@automerge/automerge-repo 1.0.0-alpha.3 → 1.0.0-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. package/dist/DocCollection.d.ts +2 -1
  2. package/dist/DocCollection.d.ts.map +1 -1
  3. package/dist/DocCollection.js +3 -3
  4. package/dist/DocHandle.d.ts +8 -3
  5. package/dist/DocHandle.d.ts.map +1 -1
  6. package/dist/DocHandle.js +28 -6
  7. package/dist/DocUrl.d.ts +1 -1
  8. package/dist/DocUrl.d.ts.map +1 -1
  9. package/dist/Repo.d.ts.map +1 -1
  10. package/dist/Repo.js +25 -7
  11. package/dist/helpers/cbor.d.ts +4 -0
  12. package/dist/helpers/cbor.d.ts.map +1 -0
  13. package/dist/helpers/cbor.js +8 -0
  14. package/dist/helpers/eventPromise.d.ts +1 -1
  15. package/dist/helpers/eventPromise.d.ts.map +1 -1
  16. package/dist/helpers/headsAreSame.d.ts +0 -1
  17. package/dist/helpers/headsAreSame.d.ts.map +1 -1
  18. package/dist/index.d.ts +1 -0
  19. package/dist/index.d.ts.map +1 -1
  20. package/dist/index.js +1 -0
  21. package/dist/network/NetworkAdapter.d.ts +4 -5
  22. package/dist/network/NetworkAdapter.d.ts.map +1 -1
  23. package/dist/network/NetworkAdapter.js +1 -1
  24. package/dist/network/NetworkSubsystem.d.ts +4 -4
  25. package/dist/network/NetworkSubsystem.d.ts.map +1 -1
  26. package/dist/network/NetworkSubsystem.js +28 -14
  27. package/dist/network/messages.d.ts +2 -2
  28. package/dist/network/messages.d.ts.map +1 -1
  29. package/dist/storage/StorageSubsystem.d.ts +1 -1
  30. package/dist/storage/StorageSubsystem.d.ts.map +1 -1
  31. package/dist/storage/StorageSubsystem.js +10 -4
  32. package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
  33. package/dist/synchronizer/DocSynchronizer.js +11 -12
  34. package/dist/synchronizer/Synchronizer.d.ts +1 -1
  35. package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
  36. package/dist/synchronizer/Synchronizer.js +1 -1
  37. package/fuzz/fuzz.ts +1 -1
  38. package/package.json +3 -3
  39. package/src/DocCollection.ts +4 -3
  40. package/src/DocHandle.ts +34 -4
  41. package/src/DocUrl.ts +1 -1
  42. package/src/Repo.ts +23 -7
  43. package/src/helpers/cbor.ts +10 -0
  44. package/src/helpers/eventPromise.ts +1 -1
  45. package/src/helpers/headsAreSame.ts +1 -1
  46. package/src/index.ts +2 -0
  47. package/src/network/NetworkAdapter.ts +4 -6
  48. package/src/network/NetworkSubsystem.ts +37 -19
  49. package/src/network/messages.ts +2 -2
  50. package/src/storage/StorageSubsystem.ts +11 -4
  51. package/src/synchronizer/DocSynchronizer.ts +14 -14
  52. package/src/synchronizer/Synchronizer.ts +1 -1
  53. package/test/CollectionSynchronizer.test.ts +1 -1
  54. package/test/DocCollection.test.ts +2 -2
  55. package/test/DocHandle.test.ts +5 -5
  56. package/test/Repo.test.ts +75 -13
  57. package/test/StorageSubsystem.test.ts +2 -3
  58. package/test/helpers/DummyNetworkAdapter.ts +13 -5
  59. package/test/helpers/DummyStorageAdapter.ts +1 -1
  60. package/test/helpers/generate-large-object.ts +13 -0
  61. package/tsconfig.json +2 -2
@@ -1,4 +1,4 @@
1
- import * as A from "@automerge/automerge";
1
+ import * as A from "@automerge/automerge/next";
2
2
  import { READY, REQUESTING, UNAVAILABLE, } from "../DocHandle.js";
3
3
  import { Synchronizer } from "./Synchronizer.js";
4
4
  import debug from "debug";
@@ -124,7 +124,7 @@ export class DocSynchronizer extends Synchronizer {
124
124
  this.#log(logText, decoded);
125
125
  // expanding is expensive, so only do it if we're logging at this level
126
126
  const expanded = this.#opsLog.enabled
127
- ? decoded.changes.flatMap(change => A.decodeChange(change).ops.map(op => JSON.stringify(op)))
127
+ ? decoded.changes.flatMap((change) => A.decodeChange(change).ops.map((op) => JSON.stringify(op)))
128
128
  : null;
129
129
  this.#opsLog(logText, expanded);
130
130
  };
@@ -134,19 +134,18 @@ export class DocSynchronizer extends Synchronizer {
134
134
  }
135
135
  beginSync(peerIds) {
136
136
  this.#log(`beginSync: ${peerIds.join(", ")}`);
137
+ // HACK: if we have a sync state already, we round-trip it through the encoding system to make
138
+ // sure state is preserved. This prevents an infinite loop caused by failed attempts to send
139
+ // messages during disconnection.
140
+ // TODO: cover that case with a test and remove this hack
141
+ peerIds.forEach(peerId => {
142
+ const syncStateRaw = this.#getSyncState(peerId);
143
+ const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw));
144
+ this.#setSyncState(peerId, syncState);
145
+ });
137
146
  // At this point if we don't have anything in our storage, we need to use an empty doc to sync
138
147
  // with; but we don't want to surface that state to the front end
139
148
  void this.handle.doc([READY, REQUESTING, UNAVAILABLE]).then(doc => {
140
- // if we don't have any peers, then we can say the document is unavailable
141
- // HACK: if we have a sync state already, we round-trip it through the encoding system to make
142
- // sure state is preserved. This prevents an infinite loop caused by failed attempts to send
143
- // messages during disconnection.
144
- // TODO: cover that case with a test and remove this hack
145
- peerIds.forEach(peerId => {
146
- const syncStateRaw = this.#getSyncState(peerId);
147
- const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw));
148
- this.#setSyncState(peerId, syncState);
149
- });
150
149
  // we register out peers first, then say that sync has started
151
150
  this.#syncStarted = true;
152
151
  this.#checkDocUnavailable();
@@ -1,4 +1,4 @@
1
- import EventEmitter from "eventemitter3";
1
+ import { EventEmitter } from "eventemitter3";
2
2
  import { Message, MessageContents } from "../network/messages.js";
3
3
  export declare abstract class Synchronizer extends EventEmitter<SynchronizerEvents> {
4
4
  abstract receiveMessage(message: Message): void;
@@ -1 +1 @@
1
- {"version":3,"file":"Synchronizer.d.ts","sourceRoot":"","sources":["../../src/synchronizer/Synchronizer.ts"],"names":[],"mappings":"AAAA,OAAO,YAAY,MAAM,eAAe,CAAA;AACxC,OAAO,EAAE,OAAO,EAAE,eAAe,EAAE,MAAM,wBAAwB,CAAA;AAEjE,8BAAsB,YAAa,SAAQ,YAAY,CAAC,kBAAkB,CAAC;IACzE,QAAQ,CAAC,cAAc,CAAC,OAAO,EAAE,OAAO,GAAG,IAAI;CAChD;AAED,MAAM,WAAW,kBAAkB;IACjC,OAAO,EAAE,CAAC,GAAG,EAAE,eAAe,KAAK,IAAI,CAAA;CACxC"}
1
+ {"version":3,"file":"Synchronizer.d.ts","sourceRoot":"","sources":["../../src/synchronizer/Synchronizer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAC5C,OAAO,EAAE,OAAO,EAAE,eAAe,EAAE,MAAM,wBAAwB,CAAA;AAEjE,8BAAsB,YAAa,SAAQ,YAAY,CAAC,kBAAkB,CAAC;IACzE,QAAQ,CAAC,cAAc,CAAC,OAAO,EAAE,OAAO,GAAG,IAAI;CAChD;AAED,MAAM,WAAW,kBAAkB;IACjC,OAAO,EAAE,CAAC,GAAG,EAAE,eAAe,KAAK,IAAI,CAAA;CACxC"}
@@ -1,3 +1,3 @@
1
- import EventEmitter from "eventemitter3";
1
+ import { EventEmitter } from "eventemitter3";
2
2
  export class Synchronizer extends EventEmitter {
3
3
  }
package/fuzz/fuzz.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  import assert from "assert"
2
2
  import { MessageChannelNetworkAdapter } from "@automerge/automerge-repo-network-messagechannel"
3
- import * as Automerge from "@automerge/automerge"
3
+ import * as Automerge from "@automerge/automerge/next"
4
4
 
5
5
  import { DocHandle, DocumentId, PeerId, SharePolicy } from "../src"
6
6
  import { eventPromise } from "../src/helpers/eventPromise.js"
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@automerge/automerge-repo",
3
- "version": "1.0.0-alpha.3",
3
+ "version": "1.0.0-alpha.5",
4
4
  "description": "A repository object to manage a collection of automerge documents",
5
5
  "repository": "https://github.com/automerge/automerge-repo",
6
6
  "author": "Peter van Hardenberg <pvh@pvh.ca>",
@@ -31,7 +31,7 @@
31
31
  "typescript": "^5.1.6"
32
32
  },
33
33
  "peerDependencies": {
34
- "@automerge/automerge": "^2.1.0-alpha.10"
34
+ "@automerge/automerge": "^2.1.0-alpha.13"
35
35
  },
36
36
  "dependencies": {
37
37
  "bs58check": "^3.0.1",
@@ -65,5 +65,5 @@
65
65
  "publishConfig": {
66
66
  "access": "public"
67
67
  },
68
- "gitHead": "0ed108273084319aeea64ceccb49c3d58709f107"
68
+ "gitHead": "9cd9be160ebda37c8f0d70f2d5cadea5b951a3c3"
69
69
  }
@@ -1,4 +1,4 @@
1
- import EventEmitter from "eventemitter3"
1
+ import { EventEmitter } from "eventemitter3"
2
2
  import { DocHandle } from "./DocHandle.js"
3
3
  import { DocumentId, type BinaryDocumentId, AutomergeUrl } from "./types.js"
4
4
  import { type SharePolicy } from "./Repo.js"
@@ -74,7 +74,7 @@ export class DocCollection extends EventEmitter<DocCollectionEvents> {
74
74
  // Generate a new UUID and store it in the buffer
75
75
  const { documentId } = parseAutomergeUrl(generateAutomergeUrl())
76
76
  const handle = this.#getHandle<T>(documentId, true) as DocHandle<T>
77
- this.emit("document", { handle })
77
+ this.emit("document", { handle, isNew: true })
78
78
  return handle
79
79
  }
80
80
 
@@ -105,7 +105,7 @@ export class DocCollection extends EventEmitter<DocCollectionEvents> {
105
105
  }
106
106
 
107
107
  const handle = this.#getHandle<T>(documentId, false) as DocHandle<T>
108
- this.emit("document", { handle })
108
+ this.emit("document", { handle, isNew: false })
109
109
  return handle
110
110
  }
111
111
 
@@ -136,6 +136,7 @@ interface DocCollectionEvents {
136
136
 
137
137
  interface DocumentPayload {
138
138
  handle: DocHandle<any>
139
+ isNew: boolean
139
140
  }
140
141
 
141
142
  interface DeleteDocumentPayload {
package/src/DocHandle.ts CHANGED
@@ -1,6 +1,6 @@
1
- import * as A from "@automerge/automerge"
1
+ import * as A from "@automerge/automerge/next"
2
2
  import debug from "debug"
3
- import EventEmitter from "eventemitter3"
3
+ import { EventEmitter } from "eventemitter3"
4
4
  import {
5
5
  assign,
6
6
  BaseActionObject,
@@ -19,7 +19,7 @@ import { pause } from "./helpers/pause.js"
19
19
  import { TimeoutError, withTimeout } from "./helpers/withTimeout.js"
20
20
  import type { DocumentId, PeerId, AutomergeUrl } from "./types.js"
21
21
  import { stringifyAutomergeUrl } from "./DocUrl.js"
22
- import { encode } from "cbor-x"
22
+ import { encode } from "./helpers/cbor.js"
23
23
 
24
24
  /** DocHandle is a wrapper around a single Automerge document that lets us listen for changes. */
25
25
  export class DocHandle<T> //
@@ -43,7 +43,12 @@ export class DocHandle<T> //
43
43
  this.#log = debug(`automerge-repo:dochandle:${this.documentId.slice(0, 5)}`)
44
44
 
45
45
  // initial doc
46
- const doc = A.init<T>()
46
+ let doc = A.init<T>()
47
+
48
+ // Make an empty change so that we have something to save to disk
49
+ if (isNew) {
50
+ doc = A.emptyChange(doc, {})
51
+ }
47
52
 
48
53
  /**
49
54
  * Internally we use a state machine to orchestrate document loading and/or syncing, in order to
@@ -82,6 +87,8 @@ export class DocHandle<T> //
82
87
  UPDATE: { actions: "onUpdate", target: READY },
83
88
  // REQUEST is called by the Repo if the document is not found in storage
84
89
  REQUEST: { target: REQUESTING },
90
+ // AWAIT_NETWORK is called by the repo if the document is not found in storage but the network is not yet ready
91
+ AWAIT_NETWORK: { target: AWAITING_NETWORK },
85
92
  DELETE: { actions: "onDelete", target: DELETED },
86
93
  },
87
94
  after: [
@@ -91,6 +98,11 @@ export class DocHandle<T> //
91
98
  },
92
99
  ],
93
100
  },
101
+ awaitingNetwork: {
102
+ on: {
103
+ NETWORK_READY: { target: REQUESTING },
104
+ },
105
+ },
94
106
  requesting: {
95
107
  on: {
96
108
  MARK_UNAVAILABLE: {
@@ -342,6 +354,14 @@ export class DocHandle<T> //
342
354
  if (this.#state === LOADING) this.#machine.send(REQUEST)
343
355
  }
344
356
 
357
+ awaitNetwork() {
358
+ if (this.#state === LOADING) this.#machine.send(AWAIT_NETWORK)
359
+ }
360
+
361
+ networkReady() {
362
+ if (this.#state === AWAITING_NETWORK) this.#machine.send(NETWORK_READY)
363
+ }
364
+
345
365
  /** `delete` is called by the repo when the document is deleted */
346
366
  delete() {
347
367
  this.#machine.send(DELETE)
@@ -419,6 +439,7 @@ export interface DocHandleEvents<T> {
419
439
  export const HandleState = {
420
440
  IDLE: "idle",
421
441
  LOADING: "loading",
442
+ AWAITING_NETWORK: "awaitingNetwork",
422
443
  REQUESTING: "requesting",
423
444
  READY: "ready",
424
445
  FAILED: "failed",
@@ -448,6 +469,8 @@ export const Event = {
448
469
  FIND: "FIND",
449
470
  REQUEST: "REQUEST",
450
471
  REQUEST_COMPLETE: "REQUEST_COMPLETE",
472
+ AWAIT_NETWORK: "AWAIT_NETWORK",
473
+ NETWORK_READY: "NETWORK_READY",
451
474
  UPDATE: "UPDATE",
452
475
  TIMEOUT: "TIMEOUT",
453
476
  DELETE: "DELETE",
@@ -466,6 +489,8 @@ type UpdateEvent<T> = {
466
489
  }
467
490
  type TimeoutEvent = { type: typeof TIMEOUT }
468
491
  type MarkUnavailableEvent = { type: typeof MARK_UNAVAILABLE }
492
+ type AwaitNetworkEvent = { type: typeof AWAIT_NETWORK }
493
+ type NetworkReadyEvent = { type: typeof NETWORK_READY }
469
494
 
470
495
  type DocHandleEvent<T> =
471
496
  | CreateEvent
@@ -476,6 +501,8 @@ type DocHandleEvent<T> =
476
501
  | TimeoutEvent
477
502
  | DeleteEvent
478
503
  | MarkUnavailableEvent
504
+ | AwaitNetworkEvent
505
+ | NetworkReadyEvent
479
506
 
480
507
  type DocHandleXstateMachine<T> = Interpreter<
481
508
  DocHandleContext<T>,
@@ -497,6 +524,7 @@ type DocHandleXstateMachine<T> = Interpreter<
497
524
  export const {
498
525
  IDLE,
499
526
  LOADING,
527
+ AWAITING_NETWORK,
500
528
  REQUESTING,
501
529
  READY,
502
530
  FAILED,
@@ -512,4 +540,6 @@ const {
512
540
  DELETE,
513
541
  REQUEST_COMPLETE,
514
542
  MARK_UNAVAILABLE,
543
+ AWAIT_NETWORK,
544
+ NETWORK_READY,
515
545
  } = Event
package/src/DocUrl.ts CHANGED
@@ -2,7 +2,7 @@ import {
2
2
  type AutomergeUrl,
3
3
  type BinaryDocumentId,
4
4
  type DocumentId,
5
- } from "./types"
5
+ } from "./types.js"
6
6
  import { v4 as uuid } from "uuid"
7
7
  import bs58check from "bs58check"
8
8
 
package/src/Repo.ts CHANGED
@@ -23,17 +23,22 @@ export class Repo extends DocCollection {
23
23
 
24
24
  // The `document` event is fired by the DocCollection any time we create a new document or look
25
25
  // up a document by ID. We listen for it in order to wire up storage and network synchronization.
26
- this.on("document", async ({ handle }) => {
26
+ this.on("document", async ({ handle, isNew }) => {
27
27
  if (storageSubsystem) {
28
28
  // Save when the document changes
29
29
  handle.on("heads-changed", async ({ handle, doc }) => {
30
30
  await storageSubsystem.saveDoc(handle.documentId, doc)
31
31
  })
32
32
 
33
- // Try to load from disk
34
- const loadedDoc = await storageSubsystem.loadDoc(handle.documentId)
35
- if (loadedDoc) {
36
- handle.update(() => loadedDoc)
33
+ if (isNew) {
34
+ // this is a new document, immediately save it
35
+ await storageSubsystem.saveDoc(handle.documentId, handle.docSync()!)
36
+ } else {
37
+ // Try to load from disk
38
+ const loadedDoc = await storageSubsystem.loadDoc(handle.documentId)
39
+ if (loadedDoc) {
40
+ handle.update(() => loadedDoc)
41
+ }
37
42
  }
38
43
  }
39
44
 
@@ -44,7 +49,16 @@ export class Repo extends DocCollection {
44
49
  })
45
50
  })
46
51
 
47
- handle.request()
52
+ if (this.networkSubsystem.isReady()) {
53
+ handle.request()
54
+ } else {
55
+ handle.awaitNetwork()
56
+ this.networkSubsystem.whenReady().then(() => {
57
+ handle.networkReady()
58
+ }).catch(err => {
59
+ this.#log("error waiting for network", { err })
60
+ })
61
+ }
48
62
 
49
63
  // Register the document with the synchronizer. This advertises our interest in the document.
50
64
  synchronizer.addDocument(handle.documentId)
@@ -55,7 +69,9 @@ export class Repo extends DocCollection {
55
69
  // synchronizer.removeDocument(documentId)
56
70
 
57
71
  if (storageSubsystem) {
58
- storageSubsystem.remove(documentId)
72
+ storageSubsystem.remove(documentId).catch(err => {
73
+ this.#log("error deleting document", { documentId, err })
74
+ })
59
75
  }
60
76
  })
61
77
 
@@ -0,0 +1,10 @@
1
+ import { Encoder, decode as cborXdecode } from "cbor-x";
2
+
3
+ export function encode(obj: any): Buffer {
4
+ let encoder = new Encoder({tagUint8Array: false})
5
+ return encoder.encode(obj)
6
+ }
7
+
8
+ export function decode(buf: Buffer | Uint8Array): any {
9
+ return cborXdecode(buf)
10
+ }
@@ -1,4 +1,4 @@
1
- import EventEmitter from "eventemitter3"
1
+ import { EventEmitter } from "eventemitter3"
2
2
 
3
3
  /** Returns a promise that resolves when the given event is emitted on the given emitter. */
4
4
  export const eventPromise = (emitter: EventEmitter, event: string) =>
@@ -1,4 +1,4 @@
1
- import {Heads} from "@automerge/automerge"
1
+ import {Heads} from "@automerge/automerge/next"
2
2
  import { arraysAreEqual } from "./arraysAreEqual.js"
3
3
 
4
4
  export const headsAreSame = (a: Heads, b: Heads) => {
package/src/index.ts CHANGED
@@ -30,3 +30,5 @@ export {
30
30
  stringifyAutomergeUrl as generateAutomergeUrl,
31
31
  } from "./DocUrl.js"
32
32
  export * from "./types.js"
33
+
34
+ export * as cbor from "./helpers/cbor.js"
@@ -1,23 +1,21 @@
1
- import EventEmitter from "eventemitter3"
1
+ import { EventEmitter } from "eventemitter3"
2
2
  import { PeerId } from "../types.js"
3
3
  import { Message } from "./messages.js"
4
4
 
5
5
  export abstract class NetworkAdapter extends EventEmitter<NetworkAdapterEvents> {
6
6
  peerId?: PeerId // hmmm, maybe not
7
7
 
8
- abstract connect(url?: string): void
8
+ abstract connect(peerId: PeerId): void
9
9
 
10
10
  abstract send(message: Message): void
11
11
 
12
- abstract join(): void
13
-
14
- abstract leave(): void
12
+ abstract disconnect(): void
15
13
  }
16
14
 
17
15
  // events & payloads
18
16
 
19
17
  export interface NetworkAdapterEvents {
20
- open: (payload: OpenPayload) => void
18
+ ready: (payload: OpenPayload) => void
21
19
  close: () => void
22
20
  "peer-candidate": (payload: PeerCandidatePayload) => void
23
21
  "peer-disconnected": (payload: PeerDisconnectedPayload) => void
@@ -1,4 +1,4 @@
1
- import EventEmitter from "eventemitter3"
1
+ import { EventEmitter } from "eventemitter3"
2
2
  import { PeerId } from "../types.js"
3
3
  import { NetworkAdapter, PeerDisconnectedPayload } from "./NetworkAdapter.js"
4
4
 
@@ -25,18 +25,29 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
25
25
  #count = 0
26
26
  #sessionId: SessionId = Math.random().toString(36).slice(2) as SessionId
27
27
  #ephemeralSessionCounts: Record<EphemeralMessageSource, number> = {}
28
+ #readyAdapterCount = 0
29
+ #adapters: NetworkAdapter[] = []
28
30
 
29
- constructor(
30
- private adapters: NetworkAdapter[],
31
- public peerId = randomPeerId()
32
- ) {
31
+ constructor(adapters: NetworkAdapter[], public peerId = randomPeerId()) {
33
32
  super()
34
33
  this.#log = debug(`automerge-repo:network:${this.peerId}`)
35
- this.adapters.forEach(a => this.addNetworkAdapter(a))
34
+ adapters.forEach(a => this.addNetworkAdapter(a))
36
35
  }
37
36
 
38
37
  addNetworkAdapter(networkAdapter: NetworkAdapter) {
39
- networkAdapter.connect(this.peerId)
38
+ this.#adapters.push(networkAdapter)
39
+ networkAdapter.once("ready", () => {
40
+ this.#readyAdapterCount++
41
+ this.#log(
42
+ "Adapters ready: ",
43
+ this.#readyAdapterCount,
44
+ "/",
45
+ this.#adapters.length
46
+ )
47
+ if (this.#readyAdapterCount === this.#adapters.length) {
48
+ this.emit("ready")
49
+ }
50
+ })
40
51
 
41
52
  networkAdapter.on("peer-candidate", ({ peerId }) => {
42
53
  this.#log(`peer candidate: ${peerId} `)
@@ -90,7 +101,7 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
90
101
  })
91
102
  })
92
103
 
93
- networkAdapter.join()
104
+ networkAdapter.connect(this.peerId)
94
105
  }
95
106
 
96
107
  send(message: MessageContents) {
@@ -106,11 +117,11 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
106
117
  "count" in message
107
118
  ? message
108
119
  : {
109
- ...message,
110
- count: ++this.#count,
111
- sessionId: this.#sessionId,
112
- senderId: this.peerId,
113
- }
120
+ ...message,
121
+ count: ++this.#count,
122
+ sessionId: this.#sessionId,
123
+ senderId: this.peerId,
124
+ }
114
125
  this.#log("Ephemeral message", outbound)
115
126
  peer.send(outbound)
116
127
  } else {
@@ -120,14 +131,20 @@ export class NetworkSubsystem extends EventEmitter<NetworkSubsystemEvents> {
120
131
  }
121
132
  }
122
133
 
123
- join() {
124
- this.#log(`Joining network`)
125
- this.adapters.forEach(a => a.join())
134
+ isReady = () => {
135
+ return this.#readyAdapterCount === this.#adapters.length
126
136
  }
127
137
 
128
- leave() {
129
- this.#log(`Leaving network`)
130
- this.adapters.forEach(a => a.leave())
138
+ whenReady = async () => {
139
+ if (this.isReady()) {
140
+ return
141
+ } else {
142
+ return new Promise<void>(resolve => {
143
+ this.once("ready", () => {
144
+ resolve()
145
+ })
146
+ })
147
+ }
131
148
  }
132
149
  }
133
150
 
@@ -141,6 +158,7 @@ export interface NetworkSubsystemEvents {
141
158
  peer: (payload: PeerPayload) => void
142
159
  "peer-disconnected": (payload: PeerDisconnectedPayload) => void
143
160
  message: (payload: Message) => void
161
+ ready: () => void
144
162
  }
145
163
 
146
164
  export interface PeerPayload {
@@ -1,6 +1,6 @@
1
1
  // utilities
2
- import { SessionId } from "../EphemeralData"
3
- import { DocumentId, PeerId } from "../types"
2
+ import { SessionId } from "../EphemeralData.js"
3
+ import { DocumentId, PeerId } from "../types.js"
4
4
 
5
5
  export function isValidMessage(
6
6
  message: NetworkAdapterMessage
@@ -1,4 +1,4 @@
1
- import * as A from "@automerge/automerge"
1
+ import * as A from "@automerge/automerge/next"
2
2
  import { StorageAdapter, StorageKey } from "./StorageAdapter.js"
3
3
  import * as sha256 from "fast-sha256"
4
4
  import { type DocumentId } from "../types.js"
@@ -25,7 +25,7 @@ function keyHash(binary: Uint8Array) {
25
25
 
26
26
  function headsHash(heads: A.Heads): string {
27
27
  let encoder = new TextEncoder()
28
- let headsbinary = mergeArrays(heads.map(h => encoder.encode(h)))
28
+ let headsbinary = mergeArrays(heads.map((h: string) => encoder.encode(h)))
29
29
  return keyHash(headsbinary)
30
30
  }
31
31
 
@@ -35,6 +35,8 @@ export class StorageSubsystem {
35
35
  #storedHeads: Map<DocumentId, A.Heads> = new Map()
36
36
  #log = debug(`automerge-repo:storage-subsystem`)
37
37
 
38
+ #snapshotting = false
39
+
38
40
  constructor(storageAdapter: StorageAdapter) {
39
41
  this.#storageAdapter = storageAdapter
40
42
  }
@@ -67,6 +69,7 @@ export class StorageSubsystem {
67
69
  doc: A.Doc<unknown>,
68
70
  sourceChunks: StorageChunkInfo[]
69
71
  ): Promise<void> {
72
+ this.#snapshotting = true
70
73
  const binary = A.save(doc)
71
74
  const snapshotHash = headsHash(A.getHeads(doc))
72
75
  const key = [documentId, "snapshot", snapshotHash]
@@ -86,6 +89,7 @@ export class StorageSubsystem {
86
89
  this.#chunkInfos.get(documentId)?.filter(c => !oldKeys.has(c.key)) ?? []
87
90
  newChunkInfos.push({ key, type: "snapshot", size: binary.length })
88
91
  this.#chunkInfos.set(documentId, newChunkInfos)
92
+ this.#snapshotting = false
89
93
  }
90
94
 
91
95
  async loadDoc(documentId: DocumentId): Promise<A.Doc<unknown> | null> {
@@ -128,7 +132,7 @@ export class StorageSubsystem {
128
132
  }
129
133
 
130
134
  async remove(documentId: DocumentId) {
131
- this.#storageAdapter.remove([documentId, "snapshot"])
135
+ this.#storageAdapter.removeRange([documentId, "snapshot"])
132
136
  this.#storageAdapter.removeRange([documentId, "incremental"])
133
137
  }
134
138
 
@@ -147,6 +151,9 @@ export class StorageSubsystem {
147
151
  }
148
152
 
149
153
  #shouldCompact(sourceChunks: StorageChunkInfo[]) {
154
+ if (this.#snapshotting) {
155
+ return false
156
+ }
150
157
  // compact if the incremental size is greater than the snapshot size
151
158
  let snapshotSize = 0
152
159
  let incrementalSize = 0
@@ -157,7 +164,7 @@ export class StorageSubsystem {
157
164
  incrementalSize += chunk.size
158
165
  }
159
166
  }
160
- return incrementalSize > snapshotSize
167
+ return incrementalSize >= snapshotSize
161
168
  }
162
169
  }
163
170
 
@@ -1,5 +1,6 @@
1
- import * as A from "@automerge/automerge"
1
+ import * as A from "@automerge/automerge/next"
2
2
  import {
3
+ AWAITING_NETWORK,
3
4
  DocHandle,
4
5
  DocHandleOutboundEphemeralMessagePayload,
5
6
  READY,
@@ -174,8 +175,8 @@ export class DocSynchronizer extends Synchronizer {
174
175
 
175
176
  // expanding is expensive, so only do it if we're logging at this level
176
177
  const expanded = this.#opsLog.enabled
177
- ? decoded.changes.flatMap(change =>
178
- A.decodeChange(change).ops.map(op => JSON.stringify(op))
178
+ ? decoded.changes.flatMap((change: A.Change) =>
179
+ A.decodeChange(change).ops.map((op: any) => JSON.stringify(op))
179
180
  )
180
181
  : null
181
182
  this.#opsLog(logText, expanded)
@@ -190,20 +191,19 @@ export class DocSynchronizer extends Synchronizer {
190
191
  beginSync(peerIds: PeerId[]) {
191
192
  this.#log(`beginSync: ${peerIds.join(", ")}`)
192
193
 
194
+ // HACK: if we have a sync state already, we round-trip it through the encoding system to make
195
+ // sure state is preserved. This prevents an infinite loop caused by failed attempts to send
196
+ // messages during disconnection.
197
+ // TODO: cover that case with a test and remove this hack
198
+ peerIds.forEach(peerId => {
199
+ const syncStateRaw = this.#getSyncState(peerId)
200
+ const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw))
201
+ this.#setSyncState(peerId, syncState)
202
+ })
203
+
193
204
  // At this point if we don't have anything in our storage, we need to use an empty doc to sync
194
205
  // with; but we don't want to surface that state to the front end
195
206
  void this.handle.doc([READY, REQUESTING, UNAVAILABLE]).then(doc => {
196
- // if we don't have any peers, then we can say the document is unavailable
197
-
198
- // HACK: if we have a sync state already, we round-trip it through the encoding system to make
199
- // sure state is preserved. This prevents an infinite loop caused by failed attempts to send
200
- // messages during disconnection.
201
- // TODO: cover that case with a test and remove this hack
202
- peerIds.forEach(peerId => {
203
- const syncStateRaw = this.#getSyncState(peerId)
204
- const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw))
205
- this.#setSyncState(peerId, syncState)
206
- })
207
207
 
208
208
  // we register out peers first, then say that sync has started
209
209
  this.#syncStarted = true
@@ -1,4 +1,4 @@
1
- import EventEmitter from "eventemitter3"
1
+ import { EventEmitter } from "eventemitter3"
2
2
  import { Message, MessageContents } from "../network/messages.js"
3
3
 
4
4
  export abstract class Synchronizer extends EventEmitter<SynchronizerEvents> {
@@ -1,6 +1,6 @@
1
1
  import assert from "assert"
2
2
  import { beforeEach } from "mocha"
3
- import { DocCollection, PeerId } from "../src"
3
+ import { DocCollection, PeerId } from "../src/index.js"
4
4
  import { CollectionSynchronizer } from "../src/synchronizer/CollectionSynchronizer.js"
5
5
 
6
6
  describe("CollectionSynchronizer", () => {
@@ -1,7 +1,7 @@
1
1
  import assert from "assert"
2
- import { DocCollection, BinaryDocumentId } from "../src"
2
+ import { DocCollection, BinaryDocumentId } from "../src/index.js"
3
3
  import { TestDoc } from "./types.js"
4
- import { generateAutomergeUrl, stringifyAutomergeUrl } from "../src/DocUrl"
4
+ import { generateAutomergeUrl, stringifyAutomergeUrl } from "../src/DocUrl.js"
5
5
 
6
6
  const MISSING_DOCID = generateAutomergeUrl()
7
7
 
@@ -1,11 +1,11 @@
1
- import * as A from "@automerge/automerge"
1
+ import * as A from "@automerge/automerge/next"
2
2
  import assert from "assert"
3
3
  import { it } from "mocha"
4
- import { DocHandle, DocHandleChangePayload } from "../src"
5
- import { pause } from "../src/helpers/pause"
4
+ import { DocHandle, DocHandleChangePayload } from "../src/index.js"
5
+ import { pause } from "../src/helpers/pause.js"
6
6
  import { TestDoc } from "./types.js"
7
- import { generateAutomergeUrl, parseAutomergeUrl } from "../src/DocUrl"
8
- import { eventPromise } from "../src/helpers/eventPromise"
7
+ import { generateAutomergeUrl, parseAutomergeUrl } from "../src/DocUrl.js"
8
+ import { eventPromise } from "../src/helpers/eventPromise.js"
9
9
  import { decode } from "cbor-x"
10
10
 
11
11
  describe("DocHandle", () => {