@automerge/automerge-repo 1.0.17 → 1.0.19

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/dist/DocHandle.d.ts +16 -3
  2. package/dist/DocHandle.d.ts.map +1 -1
  3. package/dist/DocHandle.js +20 -13
  4. package/dist/Repo.d.ts +2 -0
  5. package/dist/Repo.d.ts.map +1 -1
  6. package/dist/Repo.js +17 -6
  7. package/dist/index.d.ts +1 -1
  8. package/dist/index.d.ts.map +1 -1
  9. package/dist/network/messages.d.ts +7 -0
  10. package/dist/network/messages.d.ts.map +1 -1
  11. package/dist/storage/StorageSubsystem.d.ts +3 -1
  12. package/dist/storage/StorageSubsystem.d.ts.map +1 -1
  13. package/dist/storage/StorageSubsystem.js +10 -0
  14. package/dist/storage/chunkTypeFromKey.d.ts +1 -2
  15. package/dist/storage/chunkTypeFromKey.d.ts.map +1 -1
  16. package/dist/synchronizer/CollectionSynchronizer.d.ts +2 -0
  17. package/dist/synchronizer/CollectionSynchronizer.d.ts.map +1 -1
  18. package/dist/synchronizer/CollectionSynchronizer.js +14 -1
  19. package/dist/synchronizer/DocSynchronizer.d.ts +6 -2
  20. package/dist/synchronizer/DocSynchronizer.d.ts.map +1 -1
  21. package/dist/synchronizer/DocSynchronizer.js +119 -76
  22. package/dist/synchronizer/Synchronizer.d.ts +2 -1
  23. package/dist/synchronizer/Synchronizer.d.ts.map +1 -1
  24. package/package.json +3 -5
  25. package/src/DocHandle.ts +32 -13
  26. package/src/Repo.ts +23 -6
  27. package/src/index.ts +1 -0
  28. package/src/network/messages.ts +8 -0
  29. package/src/storage/StorageSubsystem.ts +20 -2
  30. package/src/storage/chunkTypeFromKey.ts +1 -2
  31. package/src/synchronizer/CollectionSynchronizer.ts +19 -1
  32. package/src/synchronizer/DocSynchronizer.ts +168 -94
  33. package/src/synchronizer/Synchronizer.ts +6 -1
  34. package/test/DocHandle.test.ts +19 -2
  35. package/test/DocSynchronizer.test.ts +47 -16
  36. package/test/Repo.test.ts +159 -4
  37. package/test/StorageSubsystem.test.ts +30 -2
@@ -5,24 +5,29 @@ import { READY, REQUESTING, UNAVAILABLE, } from "../DocHandle.js";
5
5
  import { isRequestMessage, } from "../network/messages.js";
6
6
  import { Synchronizer } from "./Synchronizer.js";
7
7
  import { throttle } from "../helpers/throttle.js";
8
+ import { headsAreSame } from "../helpers/headsAreSame.js";
8
9
  /**
9
10
  * DocSynchronizer takes a handle to an Automerge document, and receives & dispatches sync messages
10
11
  * to bring it inline with all other peers' versions.
11
12
  */
12
13
  export class DocSynchronizer extends Synchronizer {
13
- handle;
14
14
  #log;
15
15
  syncDebounceRate = 100;
16
16
  /** Active peers */
17
17
  #peers = [];
18
+ #pendingSyncStateCallbacks = {};
18
19
  #peerDocumentStatuses = {};
19
20
  /** Sync state for each peer we've communicated with (including inactive peers) */
20
21
  #syncStates = {};
21
22
  #pendingSyncMessages = [];
22
23
  #syncStarted = false;
23
- constructor(handle) {
24
+ #handle;
25
+ #onLoadSyncState;
26
+ constructor({ handle, onLoadSyncState }) {
24
27
  super();
25
- this.handle = handle;
28
+ this.#handle = handle;
29
+ this.#onLoadSyncState =
30
+ onLoadSyncState ?? (() => Promise.resolve(undefined));
26
31
  const docId = handle.documentId.slice(0, 5);
27
32
  this.#log = debug(`automerge-repo:docsync:${docId}`);
28
33
  handle.on("change", throttle(() => this.#syncWithPeers(), this.syncDebounceRate));
@@ -37,12 +42,12 @@ export class DocSynchronizer extends Synchronizer {
37
42
  return this.#peerDocumentStatuses;
38
43
  }
39
44
  get documentId() {
40
- return this.handle.documentId;
45
+ return this.#handle.documentId;
41
46
  }
42
47
  /// PRIVATE
43
48
  async #syncWithPeers() {
44
49
  this.#log(`syncWithPeers`);
45
- const doc = await this.handle.doc();
50
+ const doc = await this.#handle.doc();
46
51
  if (doc === undefined)
47
52
  return;
48
53
  this.#peers.forEach(peerId => this.#sendSyncMessage(peerId, doc));
@@ -56,61 +61,92 @@ export class DocSynchronizer extends Synchronizer {
56
61
  const message = {
57
62
  type: "ephemeral",
58
63
  targetId: peerId,
59
- documentId: this.handle.documentId,
64
+ documentId: this.#handle.documentId,
60
65
  data,
61
66
  };
62
67
  this.emit("message", message);
63
68
  }
64
- #getSyncState(peerId) {
69
+ #withSyncState(peerId, callback) {
65
70
  if (!this.#peers.includes(peerId)) {
66
- this.#log("adding a new peer", peerId);
67
71
  this.#peers.push(peerId);
68
72
  }
69
- // when a peer is added, we don't know if it has the document or not
70
73
  if (!(peerId in this.#peerDocumentStatuses)) {
71
74
  this.#peerDocumentStatuses[peerId] = "unknown";
72
75
  }
73
- return this.#syncStates[peerId] ?? A.initSyncState();
76
+ const syncState = this.#syncStates[peerId];
77
+ if (syncState) {
78
+ callback(syncState);
79
+ return;
80
+ }
81
+ let pendingCallbacks = this.#pendingSyncStateCallbacks[peerId];
82
+ if (!pendingCallbacks) {
83
+ this.#onLoadSyncState(peerId).then(syncState => {
84
+ this.#initSyncState(peerId, syncState ?? A.initSyncState());
85
+ });
86
+ pendingCallbacks = this.#pendingSyncStateCallbacks[peerId] = [];
87
+ }
88
+ pendingCallbacks.push(callback);
89
+ }
90
+ #initSyncState(peerId, syncState) {
91
+ const pendingCallbacks = this.#pendingSyncStateCallbacks[peerId];
92
+ if (pendingCallbacks) {
93
+ for (const callback of pendingCallbacks) {
94
+ callback(syncState);
95
+ }
96
+ }
97
+ delete this.#pendingSyncStateCallbacks[peerId];
98
+ this.#syncStates[peerId] = syncState;
74
99
  }
75
100
  #setSyncState(peerId, syncState) {
76
- // TODO: we maybe should be persisting sync states. But we want to be careful about how often we
77
- // do that, because it can generate a lot of disk activity.
78
- // TODO: we only need to do this on reconnect
101
+ const previousSyncState = this.#syncStates[peerId];
79
102
  this.#syncStates[peerId] = syncState;
103
+ const haveTheirSyncedHeadsChanged = syncState.theirHeads &&
104
+ (!previousSyncState ||
105
+ !previousSyncState.theirHeads ||
106
+ !headsAreSame(previousSyncState.theirHeads, syncState.theirHeads));
107
+ if (haveTheirSyncedHeadsChanged) {
108
+ this.#handle.setRemoteHeads(peerId, syncState.theirHeads);
109
+ }
110
+ this.emit("sync-state", {
111
+ peerId,
112
+ syncState,
113
+ documentId: this.#handle.documentId,
114
+ });
80
115
  }
81
116
  #sendSyncMessage(peerId, doc) {
82
117
  this.#log(`sendSyncMessage ->${peerId}`);
83
- const syncState = this.#getSyncState(peerId);
84
- const [newSyncState, message] = A.generateSyncMessage(doc, syncState);
85
- this.#setSyncState(peerId, newSyncState);
86
- if (message) {
87
- const isNew = A.getHeads(doc).length === 0;
88
- if (!this.handle.isReady() &&
89
- isNew &&
90
- newSyncState.sharedHeads.length === 0 &&
91
- !Object.values(this.#peerDocumentStatuses).includes("has") &&
92
- this.#peerDocumentStatuses[peerId] === "unknown") {
93
- // we don't have the document (or access to it), so we request it
94
- this.emit("message", {
95
- type: "request",
96
- targetId: peerId,
97
- documentId: this.handle.documentId,
98
- data: message,
99
- });
100
- }
101
- else {
102
- this.emit("message", {
103
- type: "sync",
104
- targetId: peerId,
105
- data: message,
106
- documentId: this.handle.documentId,
107
- });
108
- }
109
- // if we have sent heads, then the peer now has or will have the document
110
- if (!isNew) {
111
- this.#peerDocumentStatuses[peerId] = "has";
118
+ this.#withSyncState(peerId, syncState => {
119
+ const [newSyncState, message] = A.generateSyncMessage(doc, syncState);
120
+ if (message) {
121
+ this.#setSyncState(peerId, newSyncState);
122
+ const isNew = A.getHeads(doc).length === 0;
123
+ if (!this.#handle.isReady() &&
124
+ isNew &&
125
+ newSyncState.sharedHeads.length === 0 &&
126
+ !Object.values(this.#peerDocumentStatuses).includes("has") &&
127
+ this.#peerDocumentStatuses[peerId] === "unknown") {
128
+ // we don't have the document (or access to it), so we request it
129
+ this.emit("message", {
130
+ type: "request",
131
+ targetId: peerId,
132
+ documentId: this.#handle.documentId,
133
+ data: message,
134
+ });
135
+ }
136
+ else {
137
+ this.emit("message", {
138
+ type: "sync",
139
+ targetId: peerId,
140
+ data: message,
141
+ documentId: this.#handle.documentId,
142
+ });
143
+ }
144
+ // if we have sent heads, then the peer now has or will have the document
145
+ if (!isNew) {
146
+ this.#peerDocumentStatuses[peerId] = "has";
147
+ }
112
148
  }
113
- }
149
+ });
114
150
  }
115
151
  /// PUBLIC
116
152
  hasPeer(peerId) {
@@ -118,19 +154,11 @@ export class DocSynchronizer extends Synchronizer {
118
154
  }
119
155
  beginSync(peerIds) {
120
156
  const newPeers = new Set(peerIds.filter(peerId => !this.#peers.includes(peerId)));
121
- this.#log(`beginSync: ${peerIds.join(", ")}`);
122
- // HACK: if we have a sync state already, we round-trip it through the encoding system to make
123
- // sure state is preserved. This prevents an infinite loop caused by failed attempts to send
124
- // messages during disconnection.
125
- // TODO: cover that case with a test and remove this hack
126
- peerIds.forEach(peerId => {
127
- const syncStateRaw = this.#getSyncState(peerId);
128
- const syncState = A.decodeSyncState(A.encodeSyncState(syncStateRaw));
129
- this.#setSyncState(peerId, syncState);
130
- });
131
157
  // At this point if we don't have anything in our storage, we need to use an empty doc to sync
132
158
  // with; but we don't want to surface that state to the front end
133
- void this.handle.doc([READY, REQUESTING, UNAVAILABLE]).then(doc => {
159
+ const docPromise = this.#handle
160
+ .doc([READY, REQUESTING, UNAVAILABLE])
161
+ .then(doc => {
134
162
  // we register out peers first, then say that sync has started
135
163
  this.#syncStarted = true;
136
164
  this.#checkDocUnavailable();
@@ -140,9 +168,22 @@ export class DocSynchronizer extends Synchronizer {
140
168
  }
141
169
  // If the doc is unavailable we still need a blank document to generate
142
170
  // the sync message from
143
- const theDoc = doc ?? A.init();
144
- peerIds.forEach(peerId => {
145
- this.#sendSyncMessage(peerId, theDoc);
171
+ return doc ?? A.init();
172
+ });
173
+ this.#log(`beginSync: ${peerIds.join(", ")}`);
174
+ peerIds.forEach(peerId => {
175
+ this.#withSyncState(peerId, syncState => {
176
+ // HACK: if we have a sync state already, we round-trip it through the encoding system to make
177
+ // sure state is preserved. This prevents an infinite loop caused by failed attempts to send
178
+ // messages during disconnection.
179
+ // TODO: cover that case with a test and remove this hack
180
+ const reparsedSyncState = A.decodeSyncState(A.encodeSyncState(syncState));
181
+ this.#setSyncState(peerId, reparsedSyncState);
182
+ docPromise.then(doc => {
183
+ if (doc) {
184
+ this.#sendSyncMessage(peerId, doc);
185
+ }
186
+ });
146
187
  });
147
188
  });
148
189
  }
@@ -168,12 +209,12 @@ export class DocSynchronizer extends Synchronizer {
168
209
  }
169
210
  }
170
211
  receiveEphemeralMessage(message) {
171
- if (message.documentId !== this.handle.documentId)
212
+ if (message.documentId !== this.#handle.documentId)
172
213
  throw new Error(`channelId doesn't match documentId`);
173
214
  const { senderId, data } = message;
174
215
  const contents = decode(new Uint8Array(data));
175
- this.handle.emit("ephemeral-message", {
176
- handle: this.handle,
216
+ this.#handle.emit("ephemeral-message", {
217
+ handle: this.#handle,
177
218
  senderId,
178
219
  message: contents,
179
220
  });
@@ -187,17 +228,17 @@ export class DocSynchronizer extends Synchronizer {
187
228
  });
188
229
  }
189
230
  receiveSyncMessage(message) {
190
- if (message.documentId !== this.handle.documentId)
231
+ if (message.documentId !== this.#handle.documentId)
191
232
  throw new Error(`channelId doesn't match documentId`);
192
233
  // We need to block receiving the syncMessages until we've checked local storage
193
- if (!this.handle.inState([READY, REQUESTING, UNAVAILABLE])) {
194
- this.#pendingSyncMessages.push(message);
234
+ if (!this.#handle.inState([READY, REQUESTING, UNAVAILABLE])) {
235
+ this.#pendingSyncMessages.push({ message, received: new Date() });
195
236
  return;
196
237
  }
197
238
  this.#processAllPendingSyncMessages();
198
- this.#processSyncMessage(message);
239
+ this.#processSyncMessage(message, new Date());
199
240
  }
200
- #processSyncMessage(message) {
241
+ #processSyncMessage(message, received) {
201
242
  if (isRequestMessage(message)) {
202
243
  this.#peerDocumentStatuses[message.senderId] = "wants";
203
244
  }
@@ -206,19 +247,21 @@ export class DocSynchronizer extends Synchronizer {
206
247
  if (A.decodeSyncMessage(message.data).heads.length > 0) {
207
248
  this.#peerDocumentStatuses[message.senderId] = "has";
208
249
  }
209
- this.handle.update(doc => {
210
- const [newDoc, newSyncState] = A.receiveSyncMessage(doc, this.#getSyncState(message.senderId), message.data);
211
- this.#setSyncState(message.senderId, newSyncState);
212
- // respond to just this peer (as required)
213
- this.#sendSyncMessage(message.senderId, doc);
214
- return newDoc;
250
+ this.#withSyncState(message.senderId, syncState => {
251
+ this.#handle.update(doc => {
252
+ const [newDoc, newSyncState] = A.receiveSyncMessage(doc, syncState, message.data);
253
+ this.#setSyncState(message.senderId, newSyncState);
254
+ // respond to just this peer (as required)
255
+ this.#sendSyncMessage(message.senderId, doc);
256
+ return newDoc;
257
+ });
258
+ this.#checkDocUnavailable();
215
259
  });
216
- this.#checkDocUnavailable();
217
260
  }
218
261
  #checkDocUnavailable() {
219
262
  // if we know none of the peers have the document, tell all our peers that we don't either
220
263
  if (this.#syncStarted &&
221
- this.handle.inState([REQUESTING]) &&
264
+ this.#handle.inState([REQUESTING]) &&
222
265
  this.#peers.every(peerId => this.#peerDocumentStatuses[peerId] === "unavailable" ||
223
266
  this.#peerDocumentStatuses[peerId] === "wants")) {
224
267
  this.#peers
@@ -226,17 +269,17 @@ export class DocSynchronizer extends Synchronizer {
226
269
  .forEach(peerId => {
227
270
  const message = {
228
271
  type: "doc-unavailable",
229
- documentId: this.handle.documentId,
272
+ documentId: this.#handle.documentId,
230
273
  targetId: peerId,
231
274
  };
232
275
  this.emit("message", message);
233
276
  });
234
- this.handle.unavailable();
277
+ this.#handle.unavailable();
235
278
  }
236
279
  }
237
280
  #processAllPendingSyncMessages() {
238
281
  for (const message of this.#pendingSyncMessages) {
239
- this.#processSyncMessage(message);
282
+ this.#processSyncMessage(message.message, message.received);
240
283
  }
241
284
  this.#pendingSyncMessages = [];
242
285
  }
@@ -1,9 +1,10 @@
1
1
  import { EventEmitter } from "eventemitter3";
2
- import { MessageContents, RepoMessage } from "../network/messages.js";
2
+ import { MessageContents, RepoMessage, SyncStateMessage } from "../network/messages.js";
3
3
  export declare abstract class Synchronizer extends EventEmitter<SynchronizerEvents> {
4
4
  abstract receiveMessage(message: RepoMessage): void;
5
5
  }
6
6
  export interface SynchronizerEvents {
7
7
  message: (arg: MessageContents) => void;
8
+ "sync-state": (arg: SyncStateMessage) => void;
8
9
  }
9
10
  //# sourceMappingURL=Synchronizer.d.ts.map
@@ -1 +1 @@
1
- {"version":3,"file":"Synchronizer.d.ts","sourceRoot":"","sources":["../../src/synchronizer/Synchronizer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAC5C,OAAO,EAAE,eAAe,EAAE,WAAW,EAAE,MAAM,wBAAwB,CAAA;AAErE,8BAAsB,YAAa,SAAQ,YAAY,CAAC,kBAAkB,CAAC;IACzE,QAAQ,CAAC,cAAc,CAAC,OAAO,EAAE,WAAW,GAAG,IAAI;CACpD;AAED,MAAM,WAAW,kBAAkB;IACjC,OAAO,EAAE,CAAC,GAAG,EAAE,eAAe,KAAK,IAAI,CAAA;CACxC"}
1
+ {"version":3,"file":"Synchronizer.d.ts","sourceRoot":"","sources":["../../src/synchronizer/Synchronizer.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,YAAY,EAAE,MAAM,eAAe,CAAA;AAC5C,OAAO,EACL,eAAe,EACf,WAAW,EACX,gBAAgB,EACjB,MAAM,wBAAwB,CAAA;AAE/B,8BAAsB,YAAa,SAAQ,YAAY,CAAC,kBAAkB,CAAC;IACzE,QAAQ,CAAC,cAAc,CAAC,OAAO,EAAE,WAAW,GAAG,IAAI;CACpD;AAED,MAAM,WAAW,kBAAkB;IACjC,OAAO,EAAE,CAAC,GAAG,EAAE,eAAe,KAAK,IAAI,CAAA;IACvC,YAAY,EAAE,CAAC,GAAG,EAAE,gBAAgB,KAAK,IAAI,CAAA;CAC9C"}
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@automerge/automerge-repo",
3
- "version": "1.0.17",
3
+ "version": "1.0.19",
4
4
  "description": "A repository object to manage a collection of automerge documents",
5
5
  "repository": "https://github.com/automerge/automerge-repo/tree/master/packages/automerge-repo",
6
6
  "author": "Peter van Hardenberg <pvh@pvh.ca>",
@@ -22,10 +22,8 @@
22
22
  "devDependencies": {
23
23
  "http-server": "^14.1.0"
24
24
  },
25
- "peerDependencies": {
26
- "@automerge/automerge": "^2.1.5"
27
- },
28
25
  "dependencies": {
26
+ "@automerge/automerge": "^2.1.7",
29
27
  "bs58check": "^3.0.1",
30
28
  "cbor-x": "^1.3.0",
31
29
  "debug": "^4.3.4",
@@ -57,5 +55,5 @@
57
55
  "publishConfig": {
58
56
  "access": "public"
59
57
  },
60
- "gitHead": "657e5fc1aaf4d7392934c30805ff9d16a2997d03"
58
+ "gitHead": "7d28ca50dfa437ac6f7b1722b89b3f6844b90de7"
61
59
  }
package/src/DocHandle.ts CHANGED
@@ -39,6 +39,7 @@ export class DocHandle<T> //
39
39
 
40
40
  #machine: DocHandleXstateMachine<T>
41
41
  #timeoutDelay: number
42
+ #remoteHeads: Record<PeerId, A.Heads> = {}
42
43
 
43
44
  /** The URL of this document
44
45
  *
@@ -70,9 +71,9 @@ export class DocHandle<T> //
70
71
  * Internally we use a state machine to orchestrate document loading and/or syncing, in order to
71
72
  * avoid requesting data we already have, or surfacing intermediate values to the consumer.
72
73
  *
73
- * ┌─────────────────────┬─────────TIMEOUT────►┌────────┐
74
- * ┌───┴─────┐ ┌───┴────────┐ │ failed
75
- * ┌───────┐ ┌──FIND──┤ loading ├─REQUEST──►│ requesting ├─UPDATE──┐ └────────┘
74
+ * ┌─────────────────────┬─────────TIMEOUT────►┌─────────────┐
75
+ * ┌───┴─────┐ ┌───┴────────┐ │ unavailable
76
+ * ┌───────┐ ┌──FIND──┤ loading ├─REQUEST──►│ requesting ├─UPDATE──┐ └─────────────┘
76
77
  * │ idle ├──┤ └───┬─────┘ └────────────┘ │
77
78
  * └───────┘ │ │ └─►┌────────┐
78
79
  * │ └───────LOAD───────────────────────────────►│ ready │
@@ -110,7 +111,7 @@ export class DocHandle<T> //
110
111
  after: [
111
112
  {
112
113
  delay: this.#timeoutDelay,
113
- target: FAILED,
114
+ target: UNAVAILABLE,
114
115
  },
115
116
  ],
116
117
  },
@@ -134,7 +135,7 @@ export class DocHandle<T> //
134
135
  after: [
135
136
  {
136
137
  delay: this.#timeoutDelay,
137
- target: FAILED,
138
+ target: UNAVAILABLE,
138
139
  },
139
140
  ],
140
141
  },
@@ -145,9 +146,6 @@ export class DocHandle<T> //
145
146
  DELETE: { actions: "onDelete", target: DELETED },
146
147
  },
147
148
  },
148
- failed: {
149
- type: "final",
150
- },
151
149
  deleted: {
152
150
  type: "final",
153
151
  },
@@ -239,7 +237,7 @@ export class DocHandle<T> //
239
237
  return Promise.any(
240
238
  awaitStates.map(state =>
241
239
  waitFor(this.#machine, s => s.matches(state), {
242
- timeout: this.#timeoutDelay * 2000, // longer than the delay above for testing
240
+ timeout: this.#timeoutDelay * 2, // use a longer delay here so as not to race with other delays
243
241
  })
244
242
  )
245
243
  )
@@ -293,7 +291,7 @@ export class DocHandle<T> //
293
291
  // wait for the document to enter one of the desired states
294
292
  await this.#statePromise(awaitStates)
295
293
  } catch (error) {
296
- // if we timed out (or the load has already failed), return undefined
294
+ // if we timed out (or have determined the document is currently unavailable), return undefined
297
295
  return undefined
298
296
  }
299
297
  // Return the document
@@ -327,6 +325,19 @@ export class DocHandle<T> //
327
325
  })
328
326
  }
329
327
 
328
+ /** `setRemoteHeads` is called by the doc synchronizer
329
+ * @hidden
330
+ */
331
+ setRemoteHeads(peerId: PeerId, heads: A.Heads) {
332
+ this.#remoteHeads[peerId] = heads
333
+ this.emit("remote-heads", { peerId, heads })
334
+ }
335
+
336
+ /** Returns the heads of the peer */
337
+ getRemoteHeads(peerId: PeerId): A.Heads | undefined {
338
+ return this.#remoteHeads[peerId]
339
+ }
340
+
330
341
  /** `change` is called by the repo when the document is changed locally */
331
342
  change(callback: A.ChangeFn<T>, options: A.ChangeOptions<T> = {}) {
332
343
  if (!this.isReady()) {
@@ -482,6 +493,16 @@ export interface DocHandleOutboundEphemeralMessagePayload<T> {
482
493
  data: Uint8Array
483
494
  }
484
495
 
496
+ export interface DocHandleRemoteHeadsPayload {
497
+ peerId: PeerId
498
+ heads: A.Heads
499
+ }
500
+
501
+ export interface DocHandleSyncStatePayload {
502
+ peerId: PeerId
503
+ syncState: A.SyncState
504
+ }
505
+
485
506
  export interface DocHandleEvents<T> {
486
507
  "heads-changed": (payload: DocHandleEncodedChangePayload<T>) => void
487
508
  change: (payload: DocHandleChangePayload<T>) => void
@@ -491,6 +512,7 @@ export interface DocHandleEvents<T> {
491
512
  "ephemeral-message-outbound": (
492
513
  payload: DocHandleOutboundEphemeralMessagePayload<T>
493
514
  ) => void
515
+ "remote-heads": (payload: DocHandleRemoteHeadsPayload) => void
494
516
  }
495
517
 
496
518
  // STATE MACHINE TYPES
@@ -513,8 +535,6 @@ export const HandleState = {
513
535
  REQUESTING: "requesting",
514
536
  /** The document is available */
515
537
  READY: "ready",
516
- /** We were unable to load or request the document for some reason */
517
- FAILED: "failed",
518
538
  /** The document has been deleted from the repo */
519
539
  DELETED: "deleted",
520
540
  /** The document was not available in storage or from any connected peers */
@@ -601,7 +621,6 @@ export const {
601
621
  AWAITING_NETWORK,
602
622
  REQUESTING,
603
623
  READY,
604
- FAILED,
605
624
  DELETED,
606
625
  UNAVAILABLE,
607
626
  } = HandleState
package/src/Repo.ts CHANGED
@@ -14,6 +14,7 @@ import { StorageAdapter } from "./storage/StorageAdapter.js"
14
14
  import { StorageSubsystem } from "./storage/StorageSubsystem.js"
15
15
  import { CollectionSynchronizer } from "./synchronizer/CollectionSynchronizer.js"
16
16
  import type { AnyDocumentId, DocumentId, PeerId } from "./types.js"
17
+ import { SyncStateMessage } from "./network/messages.js"
17
18
 
18
19
  /** A Repo is a collection of documents with networking, syncing, and storage capabilities. */
19
20
  /** The `Repo` is the main entry point of this library
@@ -37,6 +38,8 @@ export class Repo extends EventEmitter<RepoEvents> {
37
38
 
38
39
  #handleCache: Record<DocumentId, DocHandle<any>> = {}
39
40
 
41
+ #synchronizer: CollectionSynchronizer
42
+
40
43
  /** By default, we share generously with all peers. */
41
44
  /** @hidden */
42
45
  sharePolicy: SharePolicy = async () => true
@@ -98,7 +101,7 @@ export class Repo extends EventEmitter<RepoEvents> {
98
101
  }
99
102
 
100
103
  // Register the document with the synchronizer. This advertises our interest in the document.
101
- synchronizer.addDocument(handle.documentId)
104
+ this.#synchronizer.addDocument(handle.documentId)
102
105
  })
103
106
 
104
107
  this.on("delete-document", ({ documentId }) => {
@@ -114,10 +117,10 @@ export class Repo extends EventEmitter<RepoEvents> {
114
117
 
115
118
  // SYNCHRONIZER
116
119
  // The synchronizer uses the network subsystem to keep documents in sync with peers.
117
- const synchronizer = new CollectionSynchronizer(this)
120
+ this.#synchronizer = new CollectionSynchronizer(this)
118
121
 
119
122
  // When the synchronizer emits messages, send them to peers
120
- synchronizer.on("message", message => {
123
+ this.#synchronizer.on("message", message => {
121
124
  this.#log(`sending ${message.type} message to ${message.targetId}`)
122
125
  networkSubsystem.send(message)
123
126
  })
@@ -135,18 +138,27 @@ export class Repo extends EventEmitter<RepoEvents> {
135
138
  // When we get a new peer, register it with the synchronizer
136
139
  networkSubsystem.on("peer", async ({ peerId }) => {
137
140
  this.#log("peer connected", { peerId })
138
- synchronizer.addPeer(peerId)
141
+ this.#synchronizer.addPeer(peerId)
139
142
  })
140
143
 
141
144
  // When a peer disconnects, remove it from the synchronizer
142
145
  networkSubsystem.on("peer-disconnected", ({ peerId }) => {
143
- synchronizer.removePeer(peerId)
146
+ this.#synchronizer.removePeer(peerId)
144
147
  })
145
148
 
146
149
  // Handle incoming messages
147
150
  networkSubsystem.on("message", async msg => {
148
- await synchronizer.receiveMessage(msg)
151
+ await this.#synchronizer.receiveMessage(msg)
149
152
  })
153
+
154
+ if (storageSubsystem) {
155
+ const debouncedSaveSyncState: (syncState: SyncStateMessage) => void =
156
+ throttle(({ documentId, peerId, syncState }: SyncStateMessage) => {
157
+ storageSubsystem.saveSyncState(documentId, peerId, syncState)
158
+ }, this.saveDebounceRate)
159
+
160
+ this.#synchronizer.on("sync-state", debouncedSaveSyncState)
161
+ }
150
162
  }
151
163
 
152
164
  /** Returns an existing handle if we have it; creates one otherwise. */
@@ -172,6 +184,11 @@ export class Repo extends EventEmitter<RepoEvents> {
172
184
  return this.#handleCache
173
185
  }
174
186
 
187
+ /** Returns a list of all connected peer ids */
188
+ get peers(): PeerId[] {
189
+ return this.#synchronizer.peers
190
+ }
191
+
175
192
  /**
176
193
  * Creates a new document and returns a handle to it. The initial value of the document is
177
194
  * an empty object `{}`. Its documentId is generated by the system. we emit a `document` event
package/src/index.ts CHANGED
@@ -47,6 +47,7 @@ export type {
47
47
  DocHandleDeletePayload,
48
48
  DocHandleEncodedChangePayload,
49
49
  DocHandleEphemeralMessagePayload,
50
+ DocHandleRemoteHeadsPayload,
50
51
  DocHandleEvents,
51
52
  DocHandleOptions,
52
53
  DocHandleOutboundEphemeralMessagePayload,
@@ -1,3 +1,4 @@
1
+ import { SyncState } from "@automerge/automerge"
1
2
  import { DocumentId, PeerId, SessionId } from "../types.js"
2
3
 
3
4
  /**
@@ -119,6 +120,13 @@ export type MessageContents<T extends Message = Message> =
119
120
  ? Omit<T, "senderId" | "count" | "sessionId">
120
121
  : Omit<T, "senderId">
121
122
 
123
+ /** Notify the repo that the sync state has changed */
124
+ export interface SyncStateMessage {
125
+ peerId: PeerId
126
+ documentId: DocumentId
127
+ syncState: SyncState
128
+ }
129
+
122
130
  // TYPE GUARDS
123
131
 
124
132
  export const isValidRepoMessage = (message: Message): message is RepoMessage =>
@@ -2,7 +2,7 @@ import * as A from "@automerge/automerge/next"
2
2
  import debug from "debug"
3
3
  import { headsAreSame } from "../helpers/headsAreSame.js"
4
4
  import { mergeArrays } from "../helpers/mergeArrays.js"
5
- import { type DocumentId } from "../types.js"
5
+ import { PeerId, type DocumentId } from "../types.js"
6
6
  import { StorageAdapter } from "./StorageAdapter.js"
7
7
  import { ChunkInfo, StorageKey } from "./types.js"
8
8
  import { keyHash, headsHash } from "./keyHash.js"
@@ -146,6 +146,7 @@ export class StorageSubsystem {
146
146
  async removeDoc(documentId: DocumentId) {
147
147
  await this.#storageAdapter.removeRange([documentId, "snapshot"])
148
148
  await this.#storageAdapter.removeRange([documentId, "incremental"])
149
+ await this.#storageAdapter.removeRange([documentId, "sync-state"])
149
150
  }
150
151
 
151
152
  /**
@@ -205,10 +206,27 @@ export class StorageSubsystem {
205
206
  newChunkInfos.push({ key, type: "snapshot", size: binary.length })
206
207
 
207
208
  this.#chunkInfos.set(documentId, newChunkInfos)
208
-
209
209
  this.#compacting = false
210
210
  }
211
211
 
212
+ async loadSyncState(
213
+ documentId: DocumentId,
214
+ peerId: PeerId
215
+ ): Promise<A.SyncState | undefined> {
216
+ const key = [documentId, "sync-state", peerId]
217
+ const loaded = await this.#storageAdapter.load(key)
218
+ return loaded ? A.decodeSyncState(loaded) : undefined
219
+ }
220
+
221
+ async saveSyncState(
222
+ documentId: DocumentId,
223
+ peerId: PeerId,
224
+ syncState: A.SyncState
225
+ ): Promise<void> {
226
+ const key = [documentId, "sync-state", peerId]
227
+ await this.#storageAdapter.save(key, A.encodeSyncState(syncState))
228
+ }
229
+
212
230
  /**
213
231
  * Returns true if the document has changed since the last time it was saved.
214
232
  */
@@ -1,5 +1,4 @@
1
- import { StorageKey } from "./types.js"
2
- import { ChunkType } from "./types.js"
1
+ import type { StorageKey, ChunkType } from "./types.js"
3
2
 
4
3
  /**
5
4
  * Keys for storing Automerge documents are of the form: