@dxos/echo-pipeline 0.6.2 → 0.6.3-main.0308ae2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/dist/lib/browser/{chunk-UJQ5VS5V.mjs → chunk-6MJEONOX.mjs} +2569 -1066
  2. package/dist/lib/browser/chunk-6MJEONOX.mjs.map +7 -0
  3. package/dist/lib/browser/index.mjs +12 -1049
  4. package/dist/lib/browser/index.mjs.map +4 -4
  5. package/dist/lib/browser/meta.json +1 -1
  6. package/dist/lib/browser/testing/index.mjs +224 -2
  7. package/dist/lib/browser/testing/index.mjs.map +4 -4
  8. package/dist/lib/node/{chunk-RH6TDRML.cjs → chunk-PT5LWMPA.cjs} +3185 -1710
  9. package/dist/lib/node/chunk-PT5LWMPA.cjs.map +7 -0
  10. package/dist/lib/node/index.cjs +37 -1056
  11. package/dist/lib/node/index.cjs.map +4 -4
  12. package/dist/lib/node/meta.json +1 -1
  13. package/dist/lib/node/testing/index.cjs +238 -13
  14. package/dist/lib/node/testing/index.cjs.map +4 -4
  15. package/dist/types/src/automerge/automerge-host.d.ts +29 -2
  16. package/dist/types/src/automerge/automerge-host.d.ts.map +1 -1
  17. package/dist/types/src/automerge/collection-synchronizer.d.ts +61 -0
  18. package/dist/types/src/automerge/collection-synchronizer.d.ts.map +1 -0
  19. package/dist/types/src/automerge/collection-synchronizer.test.d.ts +2 -0
  20. package/dist/types/src/automerge/collection-synchronizer.test.d.ts.map +1 -0
  21. package/dist/types/src/automerge/echo-network-adapter.d.ts +9 -2
  22. package/dist/types/src/automerge/echo-network-adapter.d.ts.map +1 -1
  23. package/dist/types/src/automerge/echo-replicator.d.ts +7 -0
  24. package/dist/types/src/automerge/echo-replicator.d.ts.map +1 -1
  25. package/dist/types/src/automerge/heads-store.d.ts +1 -1
  26. package/dist/types/src/automerge/heads-store.d.ts.map +1 -1
  27. package/dist/types/src/automerge/index.d.ts +2 -0
  28. package/dist/types/src/automerge/index.d.ts.map +1 -1
  29. package/dist/types/src/automerge/mesh-echo-replicator-connection.d.ts +3 -1
  30. package/dist/types/src/automerge/mesh-echo-replicator-connection.d.ts.map +1 -1
  31. package/dist/types/src/automerge/mesh-echo-replicator.d.ts +2 -2
  32. package/dist/types/src/automerge/mesh-echo-replicator.d.ts.map +1 -1
  33. package/dist/types/src/automerge/network-protocol.d.ts +31 -0
  34. package/dist/types/src/automerge/network-protocol.d.ts.map +1 -0
  35. package/dist/types/src/automerge/space-collection.d.ts +4 -0
  36. package/dist/types/src/automerge/space-collection.d.ts.map +1 -0
  37. package/dist/types/src/db-host/data-service.d.ts +2 -1
  38. package/dist/types/src/db-host/data-service.d.ts.map +1 -1
  39. package/dist/types/src/db-host/documents-synchronizer.d.ts +1 -1
  40. package/dist/types/src/db-host/documents-synchronizer.d.ts.map +1 -1
  41. package/dist/types/src/testing/index.d.ts +1 -0
  42. package/dist/types/src/testing/index.d.ts.map +1 -1
  43. package/dist/types/src/testing/test-replicator.d.ts +46 -0
  44. package/dist/types/src/testing/test-replicator.d.ts.map +1 -0
  45. package/package.json +33 -33
  46. package/src/automerge/automerge-host.test.ts +76 -14
  47. package/src/automerge/automerge-host.ts +219 -32
  48. package/src/automerge/automerge-repo.test.ts +2 -1
  49. package/src/automerge/collection-synchronizer.test.ts +91 -0
  50. package/src/automerge/collection-synchronizer.ts +204 -0
  51. package/src/automerge/echo-network-adapter.test.ts +5 -1
  52. package/src/automerge/echo-network-adapter.ts +69 -4
  53. package/src/automerge/echo-replicator.ts +9 -0
  54. package/src/automerge/heads-store.ts +6 -9
  55. package/src/automerge/index.ts +2 -0
  56. package/src/automerge/mesh-echo-replicator-connection.ts +6 -1
  57. package/src/automerge/mesh-echo-replicator.ts +28 -7
  58. package/src/automerge/network-protocol.ts +45 -0
  59. package/src/automerge/space-collection.ts +14 -0
  60. package/src/db-host/data-service.ts +26 -12
  61. package/src/db-host/documents-synchronizer.ts +17 -5
  62. package/src/metadata/metadata-store.ts +1 -1
  63. package/src/testing/index.ts +1 -0
  64. package/src/testing/test-replicator.ts +194 -0
  65. package/dist/lib/browser/chunk-UJQ5VS5V.mjs.map +0 -7
  66. package/dist/lib/node/chunk-RH6TDRML.cjs.map +0 -7
@@ -19,8 +19,11 @@ import {
19
19
  type AnyDocumentId,
20
20
  type DocHandle,
21
21
  type DocumentId,
22
+ type PeerCandidatePayload,
23
+ type PeerDisconnectedPayload,
22
24
  type PeerId,
23
25
  type StorageAdapterInterface,
26
+ type StorageKey,
24
27
  } from '@dxos/automerge/automerge-repo';
25
28
  import { Context, Resource, cancelWithContext, type Lifecycle } from '@dxos/context';
26
29
  import { type SpaceDoc } from '@dxos/echo-protocol';
@@ -34,6 +37,7 @@ import { type DocHeadsList, type FlushRequest } from '@dxos/protocols/proto/dxos
34
37
  import { trace } from '@dxos/tracing';
35
38
  import { mapValues } from '@dxos/util';
36
39
 
40
+ import { CollectionSynchronizer, diffCollectionState, type CollectionState } from './collection-synchronizer';
37
41
  import { EchoNetworkAdapter, isEchoPeerMetadata } from './echo-network-adapter';
38
42
  import { type EchoReplicator } from './echo-replicator';
39
43
  import { HeadsStore } from './heads-store';
@@ -65,6 +69,14 @@ export class AutomergeHost extends Resource {
65
69
  private readonly _indexMetadataStore: IndexMetadataStore;
66
70
  private readonly _echoNetworkAdapter = new EchoNetworkAdapter({
67
71
  getContainingSpaceForDocument: this._getContainingSpaceForDocument.bind(this),
72
+ onCollectionStateQueried: this._onCollectionStateQueried.bind(this),
73
+ onCollectionStateReceived: this._onCollectionStateReceived.bind(this),
74
+ });
75
+
76
+ private readonly _collectionSynchronizer = new CollectionSynchronizer({
77
+ queryCollectionState: this._queryCollectionState.bind(this),
78
+ sendCollectionState: this._sendCollectionState.bind(this),
79
+ shouldSyncCollection: this._shouldSyncCollection.bind(this),
68
80
  });
69
81
 
70
82
  private _repo!: Repo;
@@ -72,7 +84,7 @@ export class AutomergeHost extends Resource {
72
84
  private readonly _headsStore: HeadsStore;
73
85
 
74
86
  @trace.info()
75
- private _peerId!: string;
87
+ private _peerId!: PeerId;
76
88
 
77
89
  constructor({ db, indexMetadataStore }: AutomergeHostParams) {
78
90
  super();
@@ -81,7 +93,7 @@ export class AutomergeHost extends Resource {
81
93
  db: db.sublevel('automerge'),
82
94
  callbacks: {
83
95
  beforeSave: async (params) => this._beforeSave(params),
84
- afterSave: async () => this._afterSave(),
96
+ afterSave: async (key) => this._afterSave(key),
85
97
  },
86
98
  });
87
99
  this._headsStore = new HeadsStore({ db: db.sublevel('heads') });
@@ -105,11 +117,23 @@ export class AutomergeHost extends Resource {
105
117
  ],
106
118
  });
107
119
 
120
+ Event.wrap(this._echoNetworkAdapter, 'peer-candidate').on(this._ctx, ((e: PeerCandidatePayload) =>
121
+ this._onPeerConnected(e.peerId)) as any);
122
+ Event.wrap(this._echoNetworkAdapter, 'peer-disconnected').on(this._ctx, ((e: PeerDisconnectedPayload) =>
123
+ this._onPeerDisconnected(e.peerId)) as any);
124
+
125
+ this._collectionSynchronizer.remoteStateUpdated.on(this._ctx, ({ collectionId, peerId }) => {
126
+ this._onRemoteCollectionStateUpdated(collectionId, peerId);
127
+ });
128
+
129
+ await this._echoNetworkAdapter.open();
130
+ await this._collectionSynchronizer.open();
108
131
  await this._echoNetworkAdapter.open();
109
132
  await this._echoNetworkAdapter.whenConnected();
110
133
  }
111
134
 
112
135
  protected override async _close() {
136
+ await this._collectionSynchronizer.close();
113
137
  await this._storage.close?.();
114
138
  await this._echoNetworkAdapter.close();
115
139
  await this._ctx.dispose();
@@ -122,6 +146,10 @@ export class AutomergeHost extends Resource {
122
146
  return this._repo;
123
147
  }
124
148
 
149
+ get peerId(): PeerId {
150
+ return this._peerId;
151
+ }
152
+
125
153
  get loadedDocsCount(): number {
126
154
  return Object.keys(this._repo.handles).length;
127
155
  }
@@ -175,24 +203,31 @@ export class AutomergeHost extends Resource {
175
203
  }
176
204
 
177
205
  async waitUntilHeadsReplicated(heads: DocHeadsList): Promise<void> {
178
- await Promise.all(
179
- heads.entries?.map(async ({ documentId, heads }) => {
180
- if (!heads || heads.length === 0) {
181
- return;
182
- }
183
-
184
- const currentHeads = this.getHeads(documentId as DocumentId);
185
- if (currentHeads !== null && headsEquals(currentHeads, heads)) {
186
- return;
187
- }
188
-
189
- const handle = await this.loadDoc(Context.default(), documentId as DocumentId);
190
- await waitForHeads(handle, heads);
191
- }) ?? [],
192
- );
206
+ const entries = heads.entries;
207
+ if (!entries?.length) {
208
+ return;
209
+ }
210
+ const documentIds = entries.map((entry) => entry.documentId as DocumentId);
211
+ const documentHeads = await this.getHeads(documentIds);
212
+ const headsToWait = entries.filter((entry, index) => {
213
+ const targetHeads = entry.heads;
214
+ if (!targetHeads || targetHeads.length === 0) {
215
+ return false;
216
+ }
217
+ const currentHeads = documentHeads[index];
218
+ return !(currentHeads !== null && headsEquals(currentHeads, targetHeads));
219
+ });
220
+ if (headsToWait.length > 0) {
221
+ await Promise.all(
222
+ headsToWait.map(async (entry, index) => {
223
+ const handle = await this.loadDoc(Context.default(), entry.documentId as DocumentId);
224
+ await waitForHeads(handle, entry.heads!);
225
+ }),
226
+ );
227
+ }
193
228
 
194
- // Flush to disk also so that the indexer can pick up the changes.
195
- await this._repo.flush((heads.entries?.map((entry) => entry.documentId) ?? []) as DocumentId[]);
229
+ // Flush to disk handles loaded to memory also so that the indexer can pick up the changes.
230
+ await this._repo.flush(documentIds.filter((documentId) => !!this._repo.handles[documentId]));
196
231
  }
197
232
 
198
233
  async reIndexHeads(documentIds: DocumentId[]) {
@@ -247,12 +282,10 @@ export class AutomergeHost extends Resource {
247
282
  return;
248
283
  }
249
284
 
250
- const spaceKey = getSpaceKeyFromDoc(doc) ?? undefined;
251
-
252
285
  const heads = getHeads(doc);
253
-
254
286
  this._headsStore.setHeads(handle.documentId, heads, batch);
255
287
 
288
+ const spaceKey = getSpaceKeyFromDoc(doc) ?? undefined;
256
289
  const objectIds = Object.keys(doc.objects ?? {});
257
290
  const encodedIds = objectIds.map((objectId) =>
258
291
  objectPointerCodec.encode({ documentId: handle.documentId, objectId, spaceKey }),
@@ -261,11 +294,27 @@ export class AutomergeHost extends Resource {
261
294
  this._indexMetadataStore.markDirty(idToLastHash, batch);
262
295
  }
263
296
 
297
+ private _shouldSyncCollection(collectionId: string, peerId: PeerId): boolean {
298
+ const peerMetadata = this._repo.peerMetadataByPeerId[peerId];
299
+ if (isEchoPeerMetadata(peerMetadata)) {
300
+ return this._echoNetworkAdapter.shouldSyncCollection(peerId, { collectionId });
301
+ }
302
+
303
+ return false;
304
+ }
305
+
264
306
  /**
265
307
  * Called by AutomergeStorageAdapter after levelDB batch commit.
266
308
  */
267
- private async _afterSave() {
309
+ private async _afterSave(path: StorageKey) {
268
310
  this._indexMetadataStore.notifyMarkedDirty();
311
+
312
+ const documentId = path[0] as DocumentId;
313
+ const document = this._repo.handles[documentId]?.docSync();
314
+ if (document) {
315
+ const heads = getHeads(document);
316
+ this._onHeadsChanged(documentId, heads);
317
+ }
269
318
  }
270
319
 
271
320
  @trace.info({ depth: null })
@@ -323,16 +372,135 @@ export class AutomergeHost extends Resource {
323
372
  await this._repo.flush(documentIds as DocumentId[] | undefined);
324
373
  }
325
374
 
326
- async getHeads(documentId: DocumentId): Promise<Heads | undefined> {
327
- const handle = this._repo.handles[documentId];
328
- if (handle) {
329
- const doc = handle.docSync();
330
- if (!doc) {
331
- return undefined;
375
+ async getHeads(documentIds: DocumentId[]): Promise<(Heads | undefined)[]> {
376
+ const result: (Heads | undefined)[] = [];
377
+ const storeRequestIds: DocumentId[] = [];
378
+ const storeResultIndices: number[] = [];
379
+ for (const documentId of documentIds) {
380
+ const doc = this._repo.handles[documentId]?.docSync();
381
+ if (doc) {
382
+ result.push(getHeads(doc));
383
+ } else {
384
+ storeRequestIds.push(documentId);
385
+ storeResultIndices.push(result.length);
386
+ result.push(undefined);
387
+ }
388
+ }
389
+ if (storeRequestIds.length > 0) {
390
+ const storedHeads = await this._headsStore.getHeads(storeRequestIds);
391
+ for (let i = 0; i < storedHeads.length; i++) {
392
+ result[storeResultIndices[i]] = storedHeads[i];
393
+ }
394
+ }
395
+ return result;
396
+ }
397
+
398
+ //
399
+ // Collection sync.
400
+ //
401
+
402
+ getLocalCollectionState(collectionId: string): CollectionState | undefined {
403
+ return this._collectionSynchronizer.getLocalCollectionState(collectionId);
404
+ }
405
+
406
+ getRemoteCollectionStates(collectionId: string): ReadonlyMap<PeerId, CollectionState> {
407
+ return this._collectionSynchronizer.getRemoteCollectionStates(collectionId);
408
+ }
409
+
410
+ refreshCollection(collectionId: string) {
411
+ this._collectionSynchronizer.refreshCollection(collectionId);
412
+ }
413
+
414
+ async getCollectionSyncState(collectionId: string): Promise<CollectionSyncState> {
415
+ const result: CollectionSyncState = {
416
+ peers: [],
417
+ };
418
+
419
+ const localState = this.getLocalCollectionState(collectionId);
420
+ const remoteState = this.getRemoteCollectionStates(collectionId);
421
+
422
+ if (!localState) {
423
+ return result;
424
+ }
425
+
426
+ for (const [peerId, state] of remoteState) {
427
+ const diff = diffCollectionState(localState, state);
428
+ result.peers.push({
429
+ peerId,
430
+ differentDocuments: diff.different.length,
431
+ });
432
+ }
433
+
434
+ return result;
435
+ }
436
+
437
+ /**
438
+ * Update the local collection state based on the locally stored document heads.
439
+ */
440
+ async updateLocalCollectionState(collectionId: string, documentIds: DocumentId[]) {
441
+ const heads = await this.getHeads(documentIds);
442
+ const documents: Record<DocumentId, Heads> = Object.fromEntries(
443
+ heads.map((heads, index) => [documentIds[index], heads ?? []]),
444
+ );
445
+ this._collectionSynchronizer.setLocalCollectionState(collectionId, { documents });
446
+ }
447
+
448
+ private _onCollectionStateQueried(collectionId: string, peerId: PeerId) {
449
+ this._collectionSynchronizer.onCollectionStateQueried(collectionId, peerId);
450
+ }
451
+
452
+ private _onCollectionStateReceived(collectionId: string, peerId: PeerId, state: unknown) {
453
+ this._collectionSynchronizer.onRemoteStateReceived(collectionId, peerId, decodeCollectionState(state));
454
+ }
455
+
456
+ private _queryCollectionState(collectionId: string, peerId: PeerId) {
457
+ this._echoNetworkAdapter.queryCollectionState(collectionId, peerId);
458
+ }
459
+
460
+ private _sendCollectionState(collectionId: string, peerId: PeerId, state: CollectionState) {
461
+ this._echoNetworkAdapter.sendCollectionState(collectionId, peerId, encodeCollectionState(state));
462
+ }
463
+
464
+ private _onPeerConnected(peerId: PeerId) {
465
+ this._collectionSynchronizer.onConnectionOpen(peerId);
466
+ }
467
+
468
+ private _onPeerDisconnected(peerId: PeerId) {
469
+ this._collectionSynchronizer.onConnectionClosed(peerId);
470
+ }
471
+
472
+ private _onRemoteCollectionStateUpdated(collectionId: string, peerId: PeerId) {
473
+ const localState = this._collectionSynchronizer.getLocalCollectionState(collectionId);
474
+ const remoteState = this._collectionSynchronizer.getRemoteCollectionStates(collectionId).get(peerId);
475
+
476
+ if (!localState || !remoteState) {
477
+ return;
478
+ }
479
+
480
+ const { different } = diffCollectionState(localState, remoteState);
481
+
482
+ if (different.length === 0) {
483
+ return;
484
+ }
485
+
486
+ log.info('replication documents after collection sync', {
487
+ count: different.length,
488
+ });
489
+
490
+ // Load the documents that are different.
491
+ for (const documentId of different) {
492
+ this._repo.find(documentId);
493
+ }
494
+ }
495
+
496
+ private _onHeadsChanged(documentId: DocumentId, heads: Heads) {
497
+ for (const collectionId of this._collectionSynchronizer.getRegisteredCollectionIds()) {
498
+ const state = this._collectionSynchronizer.getLocalCollectionState(collectionId);
499
+ if (state?.documents[documentId]) {
500
+ const newState = structuredClone(state);
501
+ newState.documents[documentId] = heads;
502
+ this._collectionSynchronizer.setLocalCollectionState(collectionId, newState);
332
503
  }
333
- return getHeads(doc);
334
- } else {
335
- return this._headsStore.getHeads(documentId);
336
504
  }
337
505
  }
338
506
  }
@@ -366,3 +534,22 @@ const waitForHeads = async (handle: DocHandle<SpaceDoc>, heads: Heads) => {
366
534
  const changeIsPresentInDoc = (doc: Doc<any>, changeHash: string): boolean => {
367
535
  return !!getBackend(doc).getChangeByHash(changeHash);
368
536
  };
537
+
538
+ const decodeCollectionState = (state: unknown): CollectionState => {
539
+ invariant(typeof state === 'object' && state !== null, 'Invalid state');
540
+
541
+ return state as CollectionState;
542
+ };
543
+
544
+ const encodeCollectionState = (state: CollectionState): unknown => {
545
+ return state;
546
+ };
547
+
548
+ export type CollectionSyncState = {
549
+ peers: PeerSyncState[];
550
+ };
551
+
552
+ export type PeerSyncState = {
553
+ peerId: PeerId;
554
+ differentDocuments: number;
555
+ };
@@ -121,7 +121,6 @@ describe('AutomergeRepo', () => {
121
121
  });
122
122
  const clientAdapter: TestAdapter = new TestAdapter({
123
123
  send: (message: Message) => {
124
- console.log('clientAdapter.send', message);
125
124
  if (message.type !== 'doc-unavailable' && message.type !== 'sync') {
126
125
  hostAdapter.receive(message);
127
126
  }
@@ -376,6 +375,8 @@ describe('AutomergeRepo', () => {
376
375
  const meshAdapter = new MeshEchoReplicator();
377
376
  const echoAdapter = new EchoNetworkAdapter({
378
377
  getContainingSpaceForDocument: async () => spaceKey,
378
+ onCollectionStateQueried: () => {},
379
+ onCollectionStateReceived: () => {},
379
380
  });
380
381
  const repo = new Repo({
381
382
  network: [echoAdapter],
@@ -0,0 +1,91 @@
1
+ //
2
+ // Copyright 2024 DXOS.org
3
+ //
4
+
5
+ import { expect } from 'chai';
6
+
7
+ import { sleep } from '@dxos/async';
8
+ import type { PeerId } from '@dxos/automerge/automerge-repo';
9
+ import { afterTest, describe, test } from '@dxos/test';
10
+
11
+ import { CollectionSynchronizer, diffCollectionState, type CollectionState } from './collection-synchronizer';
12
+
13
+ describe('CollectionSynchronizer', () => {
14
+ test('sync two peers', async () => {
15
+ const LATENCY = 10;
16
+
17
+ const peerId1 = 'peer1' as PeerId;
18
+ const peerId2 = 'peer2' as PeerId;
19
+ const collectionId = 'collection-test';
20
+
21
+ const peer1 = await new CollectionSynchronizer({
22
+ queryCollectionState: (collectionId, peerId) =>
23
+ queueMicrotask(async () => {
24
+ await sleep(LATENCY);
25
+ peer2.onCollectionStateQueried(collectionId, peerId);
26
+ }),
27
+ sendCollectionState: (collectionId, peerId, state) =>
28
+ queueMicrotask(async () => {
29
+ await sleep(LATENCY);
30
+ peer2.onRemoteStateReceived(collectionId, peerId, structuredClone(state));
31
+ }),
32
+ shouldSyncCollection: () => true,
33
+ }).open();
34
+ afterTest(() => peer1.close());
35
+ const peer2 = await new CollectionSynchronizer({
36
+ queryCollectionState: (collectionId, peerId) =>
37
+ queueMicrotask(async () => {
38
+ await sleep(LATENCY);
39
+ peer1.onCollectionStateQueried(collectionId, peerId);
40
+ }),
41
+ sendCollectionState: (collectionId, peerId, state) =>
42
+ queueMicrotask(async () => {
43
+ await sleep(LATENCY);
44
+ peer1.onRemoteStateReceived(collectionId, peerId, structuredClone(state));
45
+ }),
46
+ shouldSyncCollection: () => true,
47
+ }).open();
48
+ afterTest(() => peer2.close());
49
+
50
+ peer1.onConnectionOpen(peerId2);
51
+ peer2.onConnectionOpen(peerId1);
52
+
53
+ peer1.setLocalCollectionState(collectionId, STATE_1);
54
+ peer2.setLocalCollectionState(collectionId, STATE_2);
55
+
56
+ peer1.refreshCollection(collectionId);
57
+ peer2.refreshCollection(collectionId);
58
+
59
+ await Promise.all([
60
+ peer1.remoteStateUpdated.waitFor((ev) => ev.collectionId === collectionId && ev.peerId === peerId2),
61
+ peer2.remoteStateUpdated.waitFor((ev) => ev.collectionId === collectionId && ev.peerId === peerId1),
62
+ ]);
63
+
64
+ expect(peer1.getRemoteCollectionStates(collectionId).get(peerId2)).to.deep.equal(STATE_2);
65
+ expect(peer2.getRemoteCollectionStates(collectionId).get(peerId1)).to.deep.equal(STATE_1);
66
+ });
67
+
68
+ test('diff collection state', () => {
69
+ const diff = diffCollectionState(STATE_1, STATE_2);
70
+
71
+ expect(diff).to.deep.equal({
72
+ different: ['b', 'c', 'd'],
73
+ });
74
+ });
75
+ });
76
+
77
+ const STATE_1: CollectionState = {
78
+ documents: {
79
+ a: ['1'],
80
+ b: ['2'],
81
+ c: ['3'],
82
+ },
83
+ };
84
+
85
+ const STATE_2: CollectionState = {
86
+ documents: {
87
+ a: ['1'],
88
+ b: ['4'],
89
+ d: ['3'],
90
+ },
91
+ };
@@ -0,0 +1,204 @@
1
+ //
2
+ // Copyright 2024 DXOS.org
3
+ //
4
+
5
+ import { asyncReturn, Event, scheduleTask, scheduleTaskInterval } from '@dxos/async';
6
+ import { next as am } from '@dxos/automerge/automerge';
7
+ import type { DocumentId, PeerId } from '@dxos/automerge/automerge-repo';
8
+ import { Resource, type Context } from '@dxos/context';
9
+ import { defaultMap } from '@dxos/util';
10
+
11
+ const MIN_QUERY_INTERVAL = 5_000;
12
+
13
+ const POLL_INTERVAL = 30_000;
14
+
15
+ export type CollectionSynchronizerParams = {
16
+ sendCollectionState: (collectionId: string, peerId: PeerId, state: CollectionState) => void;
17
+ queryCollectionState: (collectionId: string, peerId: PeerId) => void;
18
+ shouldSyncCollection: (collectionId: string, peerId: PeerId) => boolean;
19
+ };
20
+
21
+ /**
22
+ * Implements collection sync protocol.
23
+ */
24
+ export class CollectionSynchronizer extends Resource {
25
+ private readonly _sendCollectionState: CollectionSynchronizerParams['sendCollectionState'];
26
+ private readonly _queryCollectionState: CollectionSynchronizerParams['queryCollectionState'];
27
+ private readonly _shouldSyncCollection: CollectionSynchronizerParams['shouldSyncCollection'];
28
+
29
+ /**
30
+ * CollectionId -> State.
31
+ */
32
+ private readonly _perCollectionStates = new Map<string, PerCollectionState>();
33
+
34
+ private readonly _connectedPeers = new Set<PeerId>();
35
+
36
+ public readonly remoteStateUpdated = new Event<{ collectionId: string; peerId: PeerId }>();
37
+
38
+ constructor(params: CollectionSynchronizerParams) {
39
+ super();
40
+ this._sendCollectionState = params.sendCollectionState;
41
+ this._queryCollectionState = params.queryCollectionState;
42
+ this._shouldSyncCollection = params.shouldSyncCollection;
43
+ }
44
+
45
+ protected override async _open(ctx: Context): Promise<void> {
46
+ scheduleTaskInterval(
47
+ this._ctx,
48
+ async () => {
49
+ for (const collectionId of this._perCollectionStates.keys()) {
50
+ this.refreshCollection(collectionId);
51
+ await asyncReturn();
52
+ }
53
+ },
54
+ POLL_INTERVAL,
55
+ );
56
+ }
57
+
58
+ getRegisteredCollectionIds(): string[] {
59
+ return [...this._perCollectionStates.keys()];
60
+ }
61
+
62
+ getLocalCollectionState(collectionId: string): CollectionState | undefined {
63
+ return this._getPerCollectionState(collectionId).localState;
64
+ }
65
+
66
+ setLocalCollectionState(collectionId: string, state: CollectionState) {
67
+ this._getPerCollectionState(collectionId).localState = state;
68
+
69
+ queueMicrotask(async () => {
70
+ if (!this._ctx.disposed) {
71
+ this._refreshInterestedPeers(collectionId);
72
+ this.refreshCollection(collectionId);
73
+ }
74
+ });
75
+ }
76
+
77
+ getRemoteCollectionStates(collectionId: string): ReadonlyMap<PeerId, CollectionState> {
78
+ return this._getPerCollectionState(collectionId).remoteStates;
79
+ }
80
+
81
+ refreshCollection(collectionId: string) {
82
+ let scheduleAnotherRefresh = false;
83
+ const state = this._getPerCollectionState(collectionId);
84
+ for (const peerId of this._connectedPeers) {
85
+ if (state.interestedPeers.has(peerId)) {
86
+ const lastQueried = state.lastQueried.get(peerId) ?? 0;
87
+ if (Date.now() - lastQueried > MIN_QUERY_INTERVAL) {
88
+ state.lastQueried.set(peerId, Date.now());
89
+ this._queryCollectionState(collectionId, peerId);
90
+ } else {
91
+ scheduleAnotherRefresh = true;
92
+ }
93
+ }
94
+ }
95
+ if (scheduleAnotherRefresh) {
96
+ scheduleTask(this._ctx, () => this.refreshCollection(collectionId), MIN_QUERY_INTERVAL);
97
+ }
98
+ }
99
+
100
+ /**
101
+ * Callback when a connection to a peer is established.
102
+ */
103
+ onConnectionOpen(peerId: PeerId) {
104
+ this._connectedPeers.add(peerId);
105
+
106
+ queueMicrotask(async () => {
107
+ if (this._ctx.disposed) {
108
+ return;
109
+ }
110
+ for (const [collectionId, state] of this._perCollectionStates.entries()) {
111
+ if (this._shouldSyncCollection(collectionId, peerId)) {
112
+ state.interestedPeers.add(peerId);
113
+ state.lastQueried.set(peerId, Date.now());
114
+ this._queryCollectionState(collectionId, peerId);
115
+ }
116
+ }
117
+ });
118
+ }
119
+
120
+ /**
121
+ * Callback when a connection to a peer is closed.
122
+ */
123
+ onConnectionClosed(peerId: PeerId) {
124
+ this._connectedPeers.delete(peerId);
125
+
126
+ for (const perCollectionState of this._perCollectionStates.values()) {
127
+ perCollectionState.remoteStates.delete(peerId);
128
+ }
129
+ }
130
+
131
+ /**
132
+ * Callback when a peer queries the state of a collection.
133
+ */
134
+ onCollectionStateQueried(collectionId: string, peerId: PeerId) {
135
+ const perCollectionState = this._getPerCollectionState(collectionId);
136
+
137
+ if (perCollectionState.localState) {
138
+ this._sendCollectionState(collectionId, peerId, perCollectionState.localState);
139
+ }
140
+ }
141
+
142
+ /**
143
+ * Callback when a peer sends the state of a collection.
144
+ */
145
+ onRemoteStateReceived(collectionId: string, peerId: PeerId, state: CollectionState) {
146
+ const perCollectionState = this._getPerCollectionState(collectionId);
147
+ perCollectionState.remoteStates.set(peerId, state);
148
+ this.remoteStateUpdated.emit({ peerId, collectionId });
149
+ }
150
+
151
+ private _getPerCollectionState(collectionId: string): PerCollectionState {
152
+ return defaultMap(this._perCollectionStates, collectionId, () => ({
153
+ localState: undefined,
154
+ remoteStates: new Map(),
155
+ interestedPeers: new Set(),
156
+ lastQueried: new Map(),
157
+ }));
158
+ }
159
+
160
+ private _refreshInterestedPeers(collectionId: string) {
161
+ for (const peerId of this._connectedPeers) {
162
+ if (this._shouldSyncCollection(collectionId, peerId)) {
163
+ this._getPerCollectionState(collectionId).interestedPeers.add(peerId);
164
+ } else {
165
+ this._getPerCollectionState(collectionId).interestedPeers.delete(peerId);
166
+ }
167
+ }
168
+ }
169
+ }
170
+
171
+ type PerCollectionState = {
172
+ localState?: CollectionState;
173
+ remoteStates: Map<PeerId, CollectionState>;
174
+ interestedPeers: Set<PeerId>;
175
+ lastQueried: Map<PeerId, number>;
176
+ };
177
+
178
+ export type CollectionState = {
179
+ /**
180
+ * DocumentId -> Heads.
181
+ */
182
+ documents: Record<string, string[]>;
183
+ };
184
+
185
+ export type CollectionStateDiff = {
186
+ different: DocumentId[];
187
+ };
188
+
189
+ export const diffCollectionState = (local: CollectionState, remote: CollectionState): CollectionStateDiff => {
190
+ const allDocuments = new Set<DocumentId>([...Object.keys(local.documents), ...Object.keys(remote.documents)] as any);
191
+
192
+ const different: DocumentId[] = [];
193
+ for (const documentId of allDocuments) {
194
+ if (
195
+ !local.documents[documentId] ||
196
+ !remote.documents[documentId] ||
197
+ !am.equals(local.documents[documentId], remote.documents[documentId])
198
+ ) {
199
+ different.push(documentId as DocumentId);
200
+ }
201
+ }
202
+
203
+ return { different };
204
+ };
@@ -103,7 +103,11 @@ describe('EchoNetworkAdapter', () => {
103
103
  });
104
104
 
105
105
  const createConnectedAdapter = async (replicator: MeshEchoReplicator) => {
106
- const adapter = new EchoNetworkAdapter({ getContainingSpaceForDocument: async () => null });
106
+ const adapter = new EchoNetworkAdapter({
107
+ getContainingSpaceForDocument: async () => null,
108
+ onCollectionStateQueried: () => {},
109
+ onCollectionStateReceived: () => {},
110
+ });
107
111
  adapter.connect(PEER_ID);
108
112
  await adapter.open();
109
113
  afterTest(() => adapter.close());