@dxos/echo-pipeline 0.6.7 → 0.6.8-main.3be982f
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/browser/chunk-NE5LORNQ.mjs +2028 -0
- package/dist/lib/browser/chunk-NE5LORNQ.mjs.map +7 -0
- package/dist/lib/browser/chunk-Q4B5JN6L.mjs +2150 -0
- package/dist/lib/browser/chunk-Q4B5JN6L.mjs.map +7 -0
- package/dist/lib/browser/chunk-XPCF2V5U.mjs +31 -0
- package/dist/lib/browser/chunk-XPCF2V5U.mjs.map +7 -0
- package/dist/lib/browser/index.mjs +16 -16
- package/dist/lib/browser/light.mjs +32 -0
- package/dist/lib/browser/light.mjs.map +7 -0
- package/dist/lib/browser/meta.json +1 -1
- package/dist/lib/browser/testing/index.mjs +3 -7
- package/dist/lib/browser/testing/index.mjs.map +3 -3
- package/dist/lib/node/chunk-5KNTTBQK.cjs +2146 -0
- package/dist/lib/node/chunk-5KNTTBQK.cjs.map +7 -0
- package/dist/lib/node/chunk-DZVH7HDD.cjs +43 -0
- package/dist/lib/node/chunk-DZVH7HDD.cjs.map +7 -0
- package/dist/lib/node/chunk-IHR4UMVA.cjs +2043 -0
- package/dist/lib/node/chunk-IHR4UMVA.cjs.map +7 -0
- package/dist/lib/node/index.cjs +35 -37
- package/dist/lib/node/index.cjs.map +2 -2
- package/dist/lib/node/light.cjs +52 -0
- package/dist/lib/node/light.cjs.map +7 -0
- package/dist/lib/node/meta.json +1 -1
- package/dist/lib/node/testing/index.cjs +12 -15
- package/dist/lib/node/testing/index.cjs.map +3 -3
- package/dist/types/src/automerge/automerge-host.d.ts.map +1 -1
- package/dist/types/src/automerge/network-protocol.d.ts.map +1 -1
- package/dist/types/src/common/codec.d.ts +0 -1
- package/dist/types/src/common/codec.d.ts.map +1 -1
- package/dist/types/src/common/feeds.d.ts.map +1 -1
- package/dist/types/src/common/index.d.ts +1 -0
- package/dist/types/src/common/index.d.ts.map +1 -1
- package/dist/types/src/common/space-id.d.ts +7 -0
- package/dist/types/src/common/space-id.d.ts.map +1 -0
- package/dist/types/src/db-host/index.d.ts +0 -2
- package/dist/types/src/db-host/index.d.ts.map +1 -1
- package/dist/types/src/light.d.ts +4 -0
- package/dist/types/src/light.d.ts.map +1 -0
- package/dist/types/src/pipeline/message-selector.d.ts.map +1 -1
- package/dist/types/src/space/space-manager.d.ts +1 -7
- package/dist/types/src/space/space-manager.d.ts.map +1 -1
- package/dist/types/src/space/space-protocol.d.ts +0 -1
- package/dist/types/src/space/space-protocol.d.ts.map +1 -1
- package/dist/types/src/space/space.d.ts +1 -10
- package/dist/types/src/space/space.d.ts.map +1 -1
- package/dist/types/src/testing/test-agent-builder.d.ts +0 -3
- package/dist/types/src/testing/test-agent-builder.d.ts.map +1 -1
- package/package.json +43 -33
- package/src/automerge/echo-network-adapter.ts +1 -1
- package/src/automerge/mesh-echo-replicator.ts +1 -1
- package/src/common/index.ts +1 -0
- package/src/common/space-id.ts +27 -0
- package/src/db-host/index.ts +0 -2
- package/src/light.ts +7 -0
- package/src/space/space-manager.ts +3 -19
- package/src/space/space.ts +2 -32
- package/src/testing/test-agent-builder.ts +0 -7
- package/dist/lib/browser/chunk-P6XSIJKM.mjs +0 -4281
- package/dist/lib/browser/chunk-P6XSIJKM.mjs.map +0 -7
- package/dist/lib/node/chunk-IYTGTZ7D.cjs +0 -4255
- package/dist/lib/node/chunk-IYTGTZ7D.cjs.map +0 -7
- package/dist/types/src/db-host/snapshot-manager.d.ts +0 -19
- package/dist/types/src/db-host/snapshot-manager.d.ts.map +0 -1
- package/dist/types/src/db-host/snapshot-store.d.ts +0 -16
- package/dist/types/src/db-host/snapshot-store.d.ts.map +0 -1
- package/dist/types/src/db-host/snapshot-store.test.d.ts +0 -2
- package/dist/types/src/db-host/snapshot-store.test.d.ts.map +0 -1
- package/src/db-host/snapshot-manager.ts +0 -54
- package/src/db-host/snapshot-store.test.ts +0 -31
- package/src/db-host/snapshot-store.ts +0 -61
|
@@ -0,0 +1,2150 @@
|
|
|
1
|
+
import "@dxos/node-std/globals";
|
|
2
|
+
import {
|
|
3
|
+
Buffer,
|
|
4
|
+
createIdFromSpaceKey
|
|
5
|
+
} from "./chunk-XPCF2V5U.mjs";
|
|
6
|
+
|
|
7
|
+
// packages/core/echo/echo-pipeline/src/db-host/documents-synchronizer.ts
|
|
8
|
+
import { UpdateScheduler } from "@dxos/async";
|
|
9
|
+
import { next as A } from "@dxos/automerge/automerge";
|
|
10
|
+
import { Resource } from "@dxos/context";
|
|
11
|
+
import { invariant } from "@dxos/invariant";
|
|
12
|
+
import { log } from "@dxos/log";
|
|
13
|
+
var __dxlog_file = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/db-host/documents-synchronizer.ts";
|
|
14
|
+
var MAX_UPDATE_FREQ = 10;
|
|
15
|
+
var DocumentsSynchronizer = class extends Resource {
|
|
16
|
+
constructor(_params) {
|
|
17
|
+
super();
|
|
18
|
+
this._params = _params;
|
|
19
|
+
this._syncStates = /* @__PURE__ */ new Map();
|
|
20
|
+
this._pendingUpdates = /* @__PURE__ */ new Set();
|
|
21
|
+
this._sendUpdatesJob = void 0;
|
|
22
|
+
}
|
|
23
|
+
addDocuments(documentIds, retryCounter = 0) {
|
|
24
|
+
if (retryCounter > 3) {
|
|
25
|
+
log.warn("Failed to load document, retry limit reached", {
|
|
26
|
+
documentIds
|
|
27
|
+
}, {
|
|
28
|
+
F: __dxlog_file,
|
|
29
|
+
L: 49,
|
|
30
|
+
S: this,
|
|
31
|
+
C: (f, a) => f(...a)
|
|
32
|
+
});
|
|
33
|
+
return;
|
|
34
|
+
}
|
|
35
|
+
for (const documentId of documentIds) {
|
|
36
|
+
const doc = this._params.repo.find(documentId);
|
|
37
|
+
doc.whenReady().then(() => {
|
|
38
|
+
this._startSync(doc);
|
|
39
|
+
this._pendingUpdates.add(doc.documentId);
|
|
40
|
+
this._sendUpdatesJob.trigger();
|
|
41
|
+
}).catch((error) => {
|
|
42
|
+
log.warn("Failed to load document, wraparound", {
|
|
43
|
+
documentId,
|
|
44
|
+
error
|
|
45
|
+
}, {
|
|
46
|
+
F: __dxlog_file,
|
|
47
|
+
L: 63,
|
|
48
|
+
S: this,
|
|
49
|
+
C: (f, a) => f(...a)
|
|
50
|
+
});
|
|
51
|
+
this.addDocuments([
|
|
52
|
+
documentId
|
|
53
|
+
], retryCounter + 1);
|
|
54
|
+
});
|
|
55
|
+
}
|
|
56
|
+
}
|
|
57
|
+
removeDocuments(documentIds) {
|
|
58
|
+
for (const documentId of documentIds) {
|
|
59
|
+
this._syncStates.get(documentId)?.clearSubscriptions?.();
|
|
60
|
+
this._syncStates.delete(documentId);
|
|
61
|
+
this._pendingUpdates.delete(documentId);
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
async _open() {
|
|
65
|
+
this._sendUpdatesJob = new UpdateScheduler(this._ctx, this._checkAndSendUpdates.bind(this), {
|
|
66
|
+
maxFrequency: MAX_UPDATE_FREQ
|
|
67
|
+
});
|
|
68
|
+
}
|
|
69
|
+
async _close() {
|
|
70
|
+
await this._sendUpdatesJob.join();
|
|
71
|
+
this._syncStates.clear();
|
|
72
|
+
}
|
|
73
|
+
update(updates) {
|
|
74
|
+
for (const { documentId, mutation, isNew } of updates) {
|
|
75
|
+
if (isNew) {
|
|
76
|
+
const doc = this._params.repo.find(documentId);
|
|
77
|
+
doc.update((doc2) => A.loadIncremental(doc2, mutation));
|
|
78
|
+
this._startSync(doc);
|
|
79
|
+
} else {
|
|
80
|
+
this._writeMutation(documentId, mutation);
|
|
81
|
+
}
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
_startSync(doc) {
|
|
85
|
+
if (this._syncStates.has(doc.documentId)) {
|
|
86
|
+
log.info("Document already being synced", {
|
|
87
|
+
documentId: doc.documentId
|
|
88
|
+
}, {
|
|
89
|
+
F: __dxlog_file,
|
|
90
|
+
L: 102,
|
|
91
|
+
S: this,
|
|
92
|
+
C: (f, a) => f(...a)
|
|
93
|
+
});
|
|
94
|
+
return;
|
|
95
|
+
}
|
|
96
|
+
const syncState = {
|
|
97
|
+
handle: doc
|
|
98
|
+
};
|
|
99
|
+
this._subscribeForChanges(syncState);
|
|
100
|
+
this._syncStates.set(doc.documentId, syncState);
|
|
101
|
+
}
|
|
102
|
+
_subscribeForChanges(syncState) {
|
|
103
|
+
const handler = () => {
|
|
104
|
+
this._pendingUpdates.add(syncState.handle.documentId);
|
|
105
|
+
this._sendUpdatesJob.trigger();
|
|
106
|
+
};
|
|
107
|
+
syncState.handle.on("heads-changed", handler);
|
|
108
|
+
syncState.clearSubscriptions = () => syncState.handle.off("heads-changed", handler);
|
|
109
|
+
}
|
|
110
|
+
async _checkAndSendUpdates() {
|
|
111
|
+
const updates = [];
|
|
112
|
+
const docsWithPendingUpdates = Array.from(this._pendingUpdates);
|
|
113
|
+
this._pendingUpdates.clear();
|
|
114
|
+
for (const documentId of docsWithPendingUpdates) {
|
|
115
|
+
const update = this._getPendingChanges(documentId);
|
|
116
|
+
if (update) {
|
|
117
|
+
updates.push({
|
|
118
|
+
documentId,
|
|
119
|
+
mutation: update
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
if (updates.length > 0) {
|
|
124
|
+
this._params.sendUpdates({
|
|
125
|
+
updates
|
|
126
|
+
});
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
_getPendingChanges(documentId) {
|
|
130
|
+
const syncState = this._syncStates.get(documentId);
|
|
131
|
+
invariant(syncState, "Sync state for document not found", {
|
|
132
|
+
F: __dxlog_file,
|
|
133
|
+
L: 143,
|
|
134
|
+
S: this,
|
|
135
|
+
A: [
|
|
136
|
+
"syncState",
|
|
137
|
+
"'Sync state for document not found'"
|
|
138
|
+
]
|
|
139
|
+
});
|
|
140
|
+
const doc = syncState.handle.docSync();
|
|
141
|
+
if (!doc) {
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
const mutation = syncState.lastSentHead ? A.saveSince(doc, syncState.lastSentHead) : A.save(doc);
|
|
145
|
+
if (mutation.length === 0) {
|
|
146
|
+
return;
|
|
147
|
+
}
|
|
148
|
+
syncState.lastSentHead = A.getHeads(doc);
|
|
149
|
+
return mutation;
|
|
150
|
+
}
|
|
151
|
+
_writeMutation(documentId, mutation) {
|
|
152
|
+
const syncState = this._syncStates.get(documentId);
|
|
153
|
+
invariant(syncState, "Sync state for document not found", {
|
|
154
|
+
F: __dxlog_file,
|
|
155
|
+
L: 158,
|
|
156
|
+
S: this,
|
|
157
|
+
A: [
|
|
158
|
+
"syncState",
|
|
159
|
+
"'Sync state for document not found'"
|
|
160
|
+
]
|
|
161
|
+
});
|
|
162
|
+
syncState.handle.update((doc) => {
|
|
163
|
+
const headsBefore = A.getHeads(doc);
|
|
164
|
+
const newDoc = A.loadIncremental(doc, mutation);
|
|
165
|
+
if (A.equals(headsBefore, syncState.lastSentHead)) {
|
|
166
|
+
syncState.lastSentHead = A.getHeads(newDoc);
|
|
167
|
+
}
|
|
168
|
+
return newDoc;
|
|
169
|
+
});
|
|
170
|
+
}
|
|
171
|
+
};
|
|
172
|
+
|
|
173
|
+
// packages/core/echo/echo-pipeline/src/automerge/collection-synchronizer.ts
|
|
174
|
+
import { asyncReturn, Event, scheduleTask, scheduleTaskInterval } from "@dxos/async";
|
|
175
|
+
import { next as am } from "@dxos/automerge/automerge";
|
|
176
|
+
import { Resource as Resource2 } from "@dxos/context";
|
|
177
|
+
import { defaultMap } from "@dxos/util";
|
|
178
|
+
var MIN_QUERY_INTERVAL = 5e3;
|
|
179
|
+
var POLL_INTERVAL = 3e4;
|
|
180
|
+
var CollectionSynchronizer = class extends Resource2 {
|
|
181
|
+
constructor(params) {
|
|
182
|
+
super();
|
|
183
|
+
/**
|
|
184
|
+
* CollectionId -> State.
|
|
185
|
+
*/
|
|
186
|
+
this._perCollectionStates = /* @__PURE__ */ new Map();
|
|
187
|
+
this._connectedPeers = /* @__PURE__ */ new Set();
|
|
188
|
+
this.remoteStateUpdated = new Event();
|
|
189
|
+
this._sendCollectionState = params.sendCollectionState;
|
|
190
|
+
this._queryCollectionState = params.queryCollectionState;
|
|
191
|
+
this._shouldSyncCollection = params.shouldSyncCollection;
|
|
192
|
+
}
|
|
193
|
+
async _open(ctx) {
|
|
194
|
+
scheduleTaskInterval(this._ctx, async () => {
|
|
195
|
+
for (const collectionId of this._perCollectionStates.keys()) {
|
|
196
|
+
this.refreshCollection(collectionId);
|
|
197
|
+
await asyncReturn();
|
|
198
|
+
}
|
|
199
|
+
}, POLL_INTERVAL);
|
|
200
|
+
}
|
|
201
|
+
getRegisteredCollectionIds() {
|
|
202
|
+
return [
|
|
203
|
+
...this._perCollectionStates.keys()
|
|
204
|
+
];
|
|
205
|
+
}
|
|
206
|
+
getLocalCollectionState(collectionId) {
|
|
207
|
+
return this._getPerCollectionState(collectionId).localState;
|
|
208
|
+
}
|
|
209
|
+
setLocalCollectionState(collectionId, state) {
|
|
210
|
+
this._getPerCollectionState(collectionId).localState = state;
|
|
211
|
+
queueMicrotask(async () => {
|
|
212
|
+
if (!this._ctx.disposed) {
|
|
213
|
+
this._refreshInterestedPeers(collectionId);
|
|
214
|
+
this.refreshCollection(collectionId);
|
|
215
|
+
}
|
|
216
|
+
});
|
|
217
|
+
}
|
|
218
|
+
getRemoteCollectionStates(collectionId) {
|
|
219
|
+
return this._getPerCollectionState(collectionId).remoteStates;
|
|
220
|
+
}
|
|
221
|
+
refreshCollection(collectionId) {
|
|
222
|
+
let scheduleAnotherRefresh = false;
|
|
223
|
+
const state = this._getPerCollectionState(collectionId);
|
|
224
|
+
for (const peerId of this._connectedPeers) {
|
|
225
|
+
if (state.interestedPeers.has(peerId)) {
|
|
226
|
+
const lastQueried = state.lastQueried.get(peerId) ?? 0;
|
|
227
|
+
if (Date.now() - lastQueried > MIN_QUERY_INTERVAL) {
|
|
228
|
+
state.lastQueried.set(peerId, Date.now());
|
|
229
|
+
this._queryCollectionState(collectionId, peerId);
|
|
230
|
+
} else {
|
|
231
|
+
scheduleAnotherRefresh = true;
|
|
232
|
+
}
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
if (scheduleAnotherRefresh) {
|
|
236
|
+
scheduleTask(this._ctx, () => this.refreshCollection(collectionId), MIN_QUERY_INTERVAL);
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
/**
|
|
240
|
+
* Callback when a connection to a peer is established.
|
|
241
|
+
*/
|
|
242
|
+
onConnectionOpen(peerId) {
|
|
243
|
+
this._connectedPeers.add(peerId);
|
|
244
|
+
queueMicrotask(async () => {
|
|
245
|
+
if (this._ctx.disposed) {
|
|
246
|
+
return;
|
|
247
|
+
}
|
|
248
|
+
for (const [collectionId, state] of this._perCollectionStates.entries()) {
|
|
249
|
+
if (this._shouldSyncCollection(collectionId, peerId)) {
|
|
250
|
+
state.interestedPeers.add(peerId);
|
|
251
|
+
state.lastQueried.set(peerId, Date.now());
|
|
252
|
+
this._queryCollectionState(collectionId, peerId);
|
|
253
|
+
}
|
|
254
|
+
}
|
|
255
|
+
});
|
|
256
|
+
}
|
|
257
|
+
/**
|
|
258
|
+
* Callback when a connection to a peer is closed.
|
|
259
|
+
*/
|
|
260
|
+
onConnectionClosed(peerId) {
|
|
261
|
+
this._connectedPeers.delete(peerId);
|
|
262
|
+
for (const perCollectionState of this._perCollectionStates.values()) {
|
|
263
|
+
perCollectionState.remoteStates.delete(peerId);
|
|
264
|
+
}
|
|
265
|
+
}
|
|
266
|
+
/**
|
|
267
|
+
* Callback when a peer queries the state of a collection.
|
|
268
|
+
*/
|
|
269
|
+
onCollectionStateQueried(collectionId, peerId) {
|
|
270
|
+
const perCollectionState = this._getPerCollectionState(collectionId);
|
|
271
|
+
if (perCollectionState.localState) {
|
|
272
|
+
this._sendCollectionState(collectionId, peerId, perCollectionState.localState);
|
|
273
|
+
}
|
|
274
|
+
}
|
|
275
|
+
/**
|
|
276
|
+
* Callback when a peer sends the state of a collection.
|
|
277
|
+
*/
|
|
278
|
+
onRemoteStateReceived(collectionId, peerId, state) {
|
|
279
|
+
const perCollectionState = this._getPerCollectionState(collectionId);
|
|
280
|
+
perCollectionState.remoteStates.set(peerId, state);
|
|
281
|
+
this.remoteStateUpdated.emit({
|
|
282
|
+
peerId,
|
|
283
|
+
collectionId
|
|
284
|
+
});
|
|
285
|
+
}
|
|
286
|
+
_getPerCollectionState(collectionId) {
|
|
287
|
+
return defaultMap(this._perCollectionStates, collectionId, () => ({
|
|
288
|
+
localState: void 0,
|
|
289
|
+
remoteStates: /* @__PURE__ */ new Map(),
|
|
290
|
+
interestedPeers: /* @__PURE__ */ new Set(),
|
|
291
|
+
lastQueried: /* @__PURE__ */ new Map()
|
|
292
|
+
}));
|
|
293
|
+
}
|
|
294
|
+
_refreshInterestedPeers(collectionId) {
|
|
295
|
+
for (const peerId of this._connectedPeers) {
|
|
296
|
+
if (this._shouldSyncCollection(collectionId, peerId)) {
|
|
297
|
+
this._getPerCollectionState(collectionId).interestedPeers.add(peerId);
|
|
298
|
+
} else {
|
|
299
|
+
this._getPerCollectionState(collectionId).interestedPeers.delete(peerId);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
}
|
|
303
|
+
};
|
|
304
|
+
var diffCollectionState = (local, remote) => {
|
|
305
|
+
const allDocuments = /* @__PURE__ */ new Set([
|
|
306
|
+
...Object.keys(local.documents),
|
|
307
|
+
...Object.keys(remote.documents)
|
|
308
|
+
]);
|
|
309
|
+
const different = [];
|
|
310
|
+
for (const documentId of allDocuments) {
|
|
311
|
+
if (!local.documents[documentId] || !remote.documents[documentId] || !am.equals(local.documents[documentId], remote.documents[documentId])) {
|
|
312
|
+
different.push(documentId);
|
|
313
|
+
}
|
|
314
|
+
}
|
|
315
|
+
return {
|
|
316
|
+
different
|
|
317
|
+
};
|
|
318
|
+
};
|
|
319
|
+
|
|
320
|
+
// packages/core/echo/echo-pipeline/src/automerge/leveldb-storage-adapter.ts
|
|
321
|
+
import { LifecycleState, Resource as Resource3 } from "@dxos/context";
|
|
322
|
+
var LevelDBStorageAdapter = class extends Resource3 {
|
|
323
|
+
constructor(_params) {
|
|
324
|
+
super();
|
|
325
|
+
this._params = _params;
|
|
326
|
+
}
|
|
327
|
+
async load(keyArray) {
|
|
328
|
+
try {
|
|
329
|
+
if (this._lifecycleState !== LifecycleState.OPEN) {
|
|
330
|
+
return void 0;
|
|
331
|
+
}
|
|
332
|
+
const startMs = Date.now();
|
|
333
|
+
const chunk = await this._params.db.get(keyArray, {
|
|
334
|
+
...encodingOptions
|
|
335
|
+
});
|
|
336
|
+
this._params.monitor?.recordBytesLoaded(chunk.byteLength);
|
|
337
|
+
this._params.monitor?.recordLoadDuration(Date.now() - startMs);
|
|
338
|
+
return chunk;
|
|
339
|
+
} catch (err) {
|
|
340
|
+
if (isLevelDbNotFoundError(err)) {
|
|
341
|
+
return void 0;
|
|
342
|
+
}
|
|
343
|
+
throw err;
|
|
344
|
+
}
|
|
345
|
+
}
|
|
346
|
+
async save(keyArray, binary) {
|
|
347
|
+
if (this._lifecycleState !== LifecycleState.OPEN) {
|
|
348
|
+
return void 0;
|
|
349
|
+
}
|
|
350
|
+
const startMs = Date.now();
|
|
351
|
+
const batch = this._params.db.batch();
|
|
352
|
+
await this._params.callbacks?.beforeSave?.({
|
|
353
|
+
path: keyArray,
|
|
354
|
+
batch
|
|
355
|
+
});
|
|
356
|
+
batch.put(keyArray, Buffer.from(binary), {
|
|
357
|
+
...encodingOptions
|
|
358
|
+
});
|
|
359
|
+
await batch.write();
|
|
360
|
+
this._params.monitor?.recordBytesStored(binary.byteLength);
|
|
361
|
+
await this._params.callbacks?.afterSave?.(keyArray);
|
|
362
|
+
this._params.monitor?.recordStoreDuration(Date.now() - startMs);
|
|
363
|
+
}
|
|
364
|
+
async remove(keyArray) {
|
|
365
|
+
if (this._lifecycleState !== LifecycleState.OPEN) {
|
|
366
|
+
return void 0;
|
|
367
|
+
}
|
|
368
|
+
await this._params.db.del(keyArray, {
|
|
369
|
+
...encodingOptions
|
|
370
|
+
});
|
|
371
|
+
}
|
|
372
|
+
async loadRange(keyPrefix) {
|
|
373
|
+
if (this._lifecycleState !== LifecycleState.OPEN) {
|
|
374
|
+
return [];
|
|
375
|
+
}
|
|
376
|
+
const startMs = Date.now();
|
|
377
|
+
const result = [];
|
|
378
|
+
for await (const [key, value] of this._params.db.iterator({
|
|
379
|
+
gte: keyPrefix,
|
|
380
|
+
lte: [
|
|
381
|
+
...keyPrefix,
|
|
382
|
+
"\uFFFF"
|
|
383
|
+
],
|
|
384
|
+
...encodingOptions
|
|
385
|
+
})) {
|
|
386
|
+
result.push({
|
|
387
|
+
key,
|
|
388
|
+
data: value
|
|
389
|
+
});
|
|
390
|
+
this._params.monitor?.recordBytesLoaded(value.byteLength);
|
|
391
|
+
}
|
|
392
|
+
this._params.monitor?.recordLoadDuration(Date.now() - startMs);
|
|
393
|
+
return result;
|
|
394
|
+
}
|
|
395
|
+
async removeRange(keyPrefix) {
|
|
396
|
+
if (this._lifecycleState !== LifecycleState.OPEN) {
|
|
397
|
+
return void 0;
|
|
398
|
+
}
|
|
399
|
+
const batch = this._params.db.batch();
|
|
400
|
+
for await (const [key] of this._params.db.iterator({
|
|
401
|
+
gte: keyPrefix,
|
|
402
|
+
lte: [
|
|
403
|
+
...keyPrefix,
|
|
404
|
+
"\uFFFF"
|
|
405
|
+
],
|
|
406
|
+
...encodingOptions
|
|
407
|
+
})) {
|
|
408
|
+
batch.del(key, {
|
|
409
|
+
...encodingOptions
|
|
410
|
+
});
|
|
411
|
+
}
|
|
412
|
+
await batch.write();
|
|
413
|
+
}
|
|
414
|
+
};
|
|
415
|
+
var keyEncoder = {
|
|
416
|
+
encode: (key) => Buffer.from(key.map((k) => k.replaceAll("%", "%25").replaceAll("-", "%2D")).join("-")),
|
|
417
|
+
decode: (key) => Buffer.from(key).toString().split("-").map((k) => k.replaceAll("%2D", "-").replaceAll("%25", "%")),
|
|
418
|
+
format: "buffer"
|
|
419
|
+
};
|
|
420
|
+
var encodingOptions = {
|
|
421
|
+
keyEncoding: keyEncoder,
|
|
422
|
+
valueEncoding: "buffer"
|
|
423
|
+
};
|
|
424
|
+
var isLevelDbNotFoundError = (err) => err.code === "LEVEL_NOT_FOUND";
|
|
425
|
+
|
|
426
|
+
// packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts
|
|
427
|
+
import { Event as Event2, asyncTimeout } from "@dxos/async";
|
|
428
|
+
import { getBackend, getHeads, isAutomerge, equals as headsEquals, save } from "@dxos/automerge/automerge";
|
|
429
|
+
import { Repo } from "@dxos/automerge/automerge-repo";
|
|
430
|
+
import { Context, Resource as Resource4, cancelWithContext } from "@dxos/context";
|
|
431
|
+
import { invariant as invariant3 } from "@dxos/invariant";
|
|
432
|
+
import { PublicKey } from "@dxos/keys";
|
|
433
|
+
import { log as log3 } from "@dxos/log";
|
|
434
|
+
import { objectPointerCodec } from "@dxos/protocols";
|
|
435
|
+
import { trace } from "@dxos/tracing";
|
|
436
|
+
|
|
437
|
+
// packages/core/echo/echo-pipeline/src/automerge/echo-network-adapter.ts
|
|
438
|
+
import { synchronized, Trigger } from "@dxos/async";
|
|
439
|
+
import { NetworkAdapter } from "@dxos/automerge/automerge-repo";
|
|
440
|
+
import { LifecycleState as LifecycleState2 } from "@dxos/context";
|
|
441
|
+
import { invariant as invariant2 } from "@dxos/invariant";
|
|
442
|
+
import { log as log2 } from "@dxos/log";
|
|
443
|
+
import { nonNullable } from "@dxos/util";
|
|
444
|
+
|
|
445
|
+
// packages/core/echo/echo-pipeline/src/automerge/network-protocol.ts
|
|
446
|
+
import { MESSAGE_TYPE_COLLECTION_QUERY, MESSAGE_TYPE_COLLECTION_STATE } from "@dxos/protocols";
|
|
447
|
+
var isCollectionQueryMessage = (message) => message.type === MESSAGE_TYPE_COLLECTION_QUERY;
|
|
448
|
+
var isCollectionStateMessage = (message) => message.type === MESSAGE_TYPE_COLLECTION_STATE;
|
|
449
|
+
|
|
450
|
+
// packages/core/echo/echo-pipeline/src/automerge/echo-network-adapter.ts
|
|
451
|
+
function _ts_decorate(decorators, target, key, desc) {
|
|
452
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
453
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function")
|
|
454
|
+
r = Reflect.decorate(decorators, target, key, desc);
|
|
455
|
+
else
|
|
456
|
+
for (var i = decorators.length - 1; i >= 0; i--)
|
|
457
|
+
if (d = decorators[i])
|
|
458
|
+
r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
459
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
460
|
+
}
|
|
461
|
+
var __dxlog_file2 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/echo-network-adapter.ts";
|
|
462
|
+
var EchoNetworkAdapter = class extends NetworkAdapter {
|
|
463
|
+
constructor(_params) {
|
|
464
|
+
super();
|
|
465
|
+
this._params = _params;
|
|
466
|
+
this._replicators = /* @__PURE__ */ new Set();
|
|
467
|
+
this._connections = /* @__PURE__ */ new Map();
|
|
468
|
+
this._lifecycleState = LifecycleState2.CLOSED;
|
|
469
|
+
this._connected = new Trigger();
|
|
470
|
+
}
|
|
471
|
+
connect(peerId, peerMetadata) {
|
|
472
|
+
this.peerId = peerId;
|
|
473
|
+
this.peerMetadata = peerMetadata;
|
|
474
|
+
this._connected.wake();
|
|
475
|
+
}
|
|
476
|
+
send(message) {
|
|
477
|
+
this._send(message);
|
|
478
|
+
}
|
|
479
|
+
disconnect() {
|
|
480
|
+
}
|
|
481
|
+
async open() {
|
|
482
|
+
if (this._lifecycleState === LifecycleState2.OPEN) {
|
|
483
|
+
return;
|
|
484
|
+
}
|
|
485
|
+
this._lifecycleState = LifecycleState2.OPEN;
|
|
486
|
+
log2("emit ready", void 0, {
|
|
487
|
+
F: __dxlog_file2,
|
|
488
|
+
L: 81,
|
|
489
|
+
S: this,
|
|
490
|
+
C: (f, a) => f(...a)
|
|
491
|
+
});
|
|
492
|
+
this.emit("ready", {
|
|
493
|
+
network: this
|
|
494
|
+
});
|
|
495
|
+
}
|
|
496
|
+
async close() {
|
|
497
|
+
if (this._lifecycleState === LifecycleState2.CLOSED) {
|
|
498
|
+
return this;
|
|
499
|
+
}
|
|
500
|
+
for (const replicator of this._replicators) {
|
|
501
|
+
await replicator.disconnect();
|
|
502
|
+
}
|
|
503
|
+
this._replicators.clear();
|
|
504
|
+
this._lifecycleState = LifecycleState2.CLOSED;
|
|
505
|
+
}
|
|
506
|
+
async whenConnected() {
|
|
507
|
+
await this._connected.wait({
|
|
508
|
+
timeout: 1e4
|
|
509
|
+
});
|
|
510
|
+
}
|
|
511
|
+
async addReplicator(replicator) {
|
|
512
|
+
invariant2(this._lifecycleState === LifecycleState2.OPEN, void 0, {
|
|
513
|
+
F: __dxlog_file2,
|
|
514
|
+
L: 107,
|
|
515
|
+
S: this,
|
|
516
|
+
A: [
|
|
517
|
+
"this._lifecycleState === LifecycleState.OPEN",
|
|
518
|
+
""
|
|
519
|
+
]
|
|
520
|
+
});
|
|
521
|
+
invariant2(this.peerId, void 0, {
|
|
522
|
+
F: __dxlog_file2,
|
|
523
|
+
L: 108,
|
|
524
|
+
S: this,
|
|
525
|
+
A: [
|
|
526
|
+
"this.peerId",
|
|
527
|
+
""
|
|
528
|
+
]
|
|
529
|
+
});
|
|
530
|
+
invariant2(!this._replicators.has(replicator), void 0, {
|
|
531
|
+
F: __dxlog_file2,
|
|
532
|
+
L: 109,
|
|
533
|
+
S: this,
|
|
534
|
+
A: [
|
|
535
|
+
"!this._replicators.has(replicator)",
|
|
536
|
+
""
|
|
537
|
+
]
|
|
538
|
+
});
|
|
539
|
+
this._replicators.add(replicator);
|
|
540
|
+
await replicator.connect({
|
|
541
|
+
peerId: this.peerId,
|
|
542
|
+
onConnectionOpen: this._onConnectionOpen.bind(this),
|
|
543
|
+
onConnectionClosed: this._onConnectionClosed.bind(this),
|
|
544
|
+
onConnectionAuthScopeChanged: this._onConnectionAuthScopeChanged.bind(this),
|
|
545
|
+
isDocumentInRemoteCollection: this._params.isDocumentInRemoteCollection,
|
|
546
|
+
getContainingSpaceForDocument: this._params.getContainingSpaceForDocument,
|
|
547
|
+
getContainingSpaceIdForDocument: async (documentId) => {
|
|
548
|
+
const key = await this._params.getContainingSpaceForDocument(documentId);
|
|
549
|
+
return key ? createIdFromSpaceKey(key) : null;
|
|
550
|
+
}
|
|
551
|
+
});
|
|
552
|
+
}
|
|
553
|
+
async removeReplicator(replicator) {
|
|
554
|
+
invariant2(this._lifecycleState === LifecycleState2.OPEN, void 0, {
|
|
555
|
+
F: __dxlog_file2,
|
|
556
|
+
L: 128,
|
|
557
|
+
S: this,
|
|
558
|
+
A: [
|
|
559
|
+
"this._lifecycleState === LifecycleState.OPEN",
|
|
560
|
+
""
|
|
561
|
+
]
|
|
562
|
+
});
|
|
563
|
+
invariant2(this._replicators.has(replicator), void 0, {
|
|
564
|
+
F: __dxlog_file2,
|
|
565
|
+
L: 129,
|
|
566
|
+
S: this,
|
|
567
|
+
A: [
|
|
568
|
+
"this._replicators.has(replicator)",
|
|
569
|
+
""
|
|
570
|
+
]
|
|
571
|
+
});
|
|
572
|
+
await replicator.disconnect();
|
|
573
|
+
this._replicators.delete(replicator);
|
|
574
|
+
}
|
|
575
|
+
async shouldAdvertise(peerId, params) {
|
|
576
|
+
const connection = this._connections.get(peerId);
|
|
577
|
+
if (!connection) {
|
|
578
|
+
return false;
|
|
579
|
+
}
|
|
580
|
+
return connection.connection.shouldAdvertise(params);
|
|
581
|
+
}
|
|
582
|
+
shouldSyncCollection(peerId, params) {
|
|
583
|
+
const connection = this._connections.get(peerId);
|
|
584
|
+
if (!connection) {
|
|
585
|
+
return false;
|
|
586
|
+
}
|
|
587
|
+
return connection.connection.shouldSyncCollection(params);
|
|
588
|
+
}
|
|
589
|
+
queryCollectionState(collectionId, targetId) {
|
|
590
|
+
const message = {
|
|
591
|
+
type: "collection-query",
|
|
592
|
+
senderId: this.peerId,
|
|
593
|
+
targetId,
|
|
594
|
+
collectionId
|
|
595
|
+
};
|
|
596
|
+
this._send(message);
|
|
597
|
+
}
|
|
598
|
+
sendCollectionState(collectionId, targetId, state) {
|
|
599
|
+
const message = {
|
|
600
|
+
type: "collection-state",
|
|
601
|
+
senderId: this.peerId,
|
|
602
|
+
targetId,
|
|
603
|
+
collectionId,
|
|
604
|
+
state
|
|
605
|
+
};
|
|
606
|
+
this._send(message);
|
|
607
|
+
}
|
|
608
|
+
_send(message) {
|
|
609
|
+
const connectionEntry = this._connections.get(message.targetId);
|
|
610
|
+
if (!connectionEntry) {
|
|
611
|
+
throw new Error("Connection not found.");
|
|
612
|
+
}
|
|
613
|
+
const writeStart = Date.now();
|
|
614
|
+
connectionEntry.writer.write(message).then(() => {
|
|
615
|
+
const durationMs = Date.now() - writeStart;
|
|
616
|
+
this._params.monitor?.recordMessageSent(message, durationMs);
|
|
617
|
+
}).catch((err) => {
|
|
618
|
+
if (connectionEntry.isOpen) {
|
|
619
|
+
log2.catch(err, void 0, {
|
|
620
|
+
F: __dxlog_file2,
|
|
621
|
+
L: 189,
|
|
622
|
+
S: this,
|
|
623
|
+
C: (f, a) => f(...a)
|
|
624
|
+
});
|
|
625
|
+
}
|
|
626
|
+
this._params.monitor?.recordMessageSendingFailed(message);
|
|
627
|
+
});
|
|
628
|
+
}
|
|
629
|
+
// TODO(dmaretskyi): Remove.
|
|
630
|
+
getPeersInterestedInCollection(collectionId) {
|
|
631
|
+
return Array.from(this._connections.values()).map((connection) => {
|
|
632
|
+
return connection.connection.shouldSyncCollection({
|
|
633
|
+
collectionId
|
|
634
|
+
}) ? connection.connection.peerId : null;
|
|
635
|
+
}).filter(nonNullable);
|
|
636
|
+
}
|
|
637
|
+
_onConnectionOpen(connection) {
|
|
638
|
+
log2("Connection opened", {
|
|
639
|
+
peerId: connection.peerId
|
|
640
|
+
}, {
|
|
641
|
+
F: __dxlog_file2,
|
|
642
|
+
L: 207,
|
|
643
|
+
S: this,
|
|
644
|
+
C: (f, a) => f(...a)
|
|
645
|
+
});
|
|
646
|
+
invariant2(!this._connections.has(connection.peerId), void 0, {
|
|
647
|
+
F: __dxlog_file2,
|
|
648
|
+
L: 208,
|
|
649
|
+
S: this,
|
|
650
|
+
A: [
|
|
651
|
+
"!this._connections.has(connection.peerId as PeerId)",
|
|
652
|
+
""
|
|
653
|
+
]
|
|
654
|
+
});
|
|
655
|
+
const reader = connection.readable.getReader();
|
|
656
|
+
const writer = connection.writable.getWriter();
|
|
657
|
+
const connectionEntry = {
|
|
658
|
+
connection,
|
|
659
|
+
reader,
|
|
660
|
+
writer,
|
|
661
|
+
isOpen: true
|
|
662
|
+
};
|
|
663
|
+
this._connections.set(connection.peerId, connectionEntry);
|
|
664
|
+
queueMicrotask(async () => {
|
|
665
|
+
try {
|
|
666
|
+
while (true) {
|
|
667
|
+
const { done, value } = await reader.read();
|
|
668
|
+
if (done) {
|
|
669
|
+
break;
|
|
670
|
+
}
|
|
671
|
+
this._onMessage(value);
|
|
672
|
+
}
|
|
673
|
+
} catch (err) {
|
|
674
|
+
if (connectionEntry.isOpen) {
|
|
675
|
+
log2.catch(err, void 0, {
|
|
676
|
+
F: __dxlog_file2,
|
|
677
|
+
L: 227,
|
|
678
|
+
S: this,
|
|
679
|
+
C: (f, a) => f(...a)
|
|
680
|
+
});
|
|
681
|
+
}
|
|
682
|
+
}
|
|
683
|
+
});
|
|
684
|
+
log2("emit peer-candidate", {
|
|
685
|
+
peerId: connection.peerId
|
|
686
|
+
}, {
|
|
687
|
+
F: __dxlog_file2,
|
|
688
|
+
L: 232,
|
|
689
|
+
S: this,
|
|
690
|
+
C: (f, a) => f(...a)
|
|
691
|
+
});
|
|
692
|
+
this._emitPeerCandidate(connection);
|
|
693
|
+
this._params.monitor?.recordPeerConnected(connection.peerId);
|
|
694
|
+
}
|
|
695
|
+
_onMessage(message) {
|
|
696
|
+
if (isCollectionQueryMessage(message)) {
|
|
697
|
+
this._params.onCollectionStateQueried(message.collectionId, message.senderId);
|
|
698
|
+
} else if (isCollectionStateMessage(message)) {
|
|
699
|
+
this._params.onCollectionStateReceived(message.collectionId, message.senderId, message.state);
|
|
700
|
+
} else {
|
|
701
|
+
this.emit("message", message);
|
|
702
|
+
}
|
|
703
|
+
this._params.monitor?.recordMessageReceived(message);
|
|
704
|
+
}
|
|
705
|
+
/**
|
|
706
|
+
* Trigger doc-synchronizer shared documents set recalculation. Happens on peer-candidate.
|
|
707
|
+
* TODO(y): replace with a proper API call when sharePolicy update becomes supported by automerge-repo
|
|
708
|
+
*/
|
|
709
|
+
_onConnectionAuthScopeChanged(connection) {
|
|
710
|
+
log2("Connection auth scope changed", {
|
|
711
|
+
peerId: connection.peerId
|
|
712
|
+
}, {
|
|
713
|
+
F: __dxlog_file2,
|
|
714
|
+
L: 253,
|
|
715
|
+
S: this,
|
|
716
|
+
C: (f, a) => f(...a)
|
|
717
|
+
});
|
|
718
|
+
const entry = this._connections.get(connection.peerId);
|
|
719
|
+
invariant2(entry, void 0, {
|
|
720
|
+
F: __dxlog_file2,
|
|
721
|
+
L: 255,
|
|
722
|
+
S: this,
|
|
723
|
+
A: [
|
|
724
|
+
"entry",
|
|
725
|
+
""
|
|
726
|
+
]
|
|
727
|
+
});
|
|
728
|
+
this.emit("peer-disconnected", {
|
|
729
|
+
peerId: connection.peerId
|
|
730
|
+
});
|
|
731
|
+
this._emitPeerCandidate(connection);
|
|
732
|
+
}
|
|
733
|
+
_onConnectionClosed(connection) {
|
|
734
|
+
log2("Connection closed", {
|
|
735
|
+
peerId: connection.peerId
|
|
736
|
+
}, {
|
|
737
|
+
F: __dxlog_file2,
|
|
738
|
+
L: 261,
|
|
739
|
+
S: this,
|
|
740
|
+
C: (f, a) => f(...a)
|
|
741
|
+
});
|
|
742
|
+
const entry = this._connections.get(connection.peerId);
|
|
743
|
+
invariant2(entry, void 0, {
|
|
744
|
+
F: __dxlog_file2,
|
|
745
|
+
L: 263,
|
|
746
|
+
S: this,
|
|
747
|
+
A: [
|
|
748
|
+
"entry",
|
|
749
|
+
""
|
|
750
|
+
]
|
|
751
|
+
});
|
|
752
|
+
entry.isOpen = false;
|
|
753
|
+
this.emit("peer-disconnected", {
|
|
754
|
+
peerId: connection.peerId
|
|
755
|
+
});
|
|
756
|
+
this._params.monitor?.recordPeerDisconnected(connection.peerId);
|
|
757
|
+
void entry.reader.cancel().catch((err) => log2.catch(err, void 0, {
|
|
758
|
+
F: __dxlog_file2,
|
|
759
|
+
L: 269,
|
|
760
|
+
S: this,
|
|
761
|
+
C: (f, a) => f(...a)
|
|
762
|
+
}));
|
|
763
|
+
void entry.writer.abort().catch((err) => log2.catch(err, void 0, {
|
|
764
|
+
F: __dxlog_file2,
|
|
765
|
+
L: 270,
|
|
766
|
+
S: this,
|
|
767
|
+
C: (f, a) => f(...a)
|
|
768
|
+
}));
|
|
769
|
+
this._connections.delete(connection.peerId);
|
|
770
|
+
}
|
|
771
|
+
_emitPeerCandidate(connection) {
|
|
772
|
+
this.emit("peer-candidate", {
|
|
773
|
+
peerId: connection.peerId,
|
|
774
|
+
peerMetadata: createEchoPeerMetadata()
|
|
775
|
+
});
|
|
776
|
+
}
|
|
777
|
+
};
|
|
778
|
+
_ts_decorate([
|
|
779
|
+
synchronized
|
|
780
|
+
], EchoNetworkAdapter.prototype, "open", null);
|
|
781
|
+
_ts_decorate([
|
|
782
|
+
synchronized
|
|
783
|
+
], EchoNetworkAdapter.prototype, "close", null);
|
|
784
|
+
_ts_decorate([
|
|
785
|
+
synchronized
|
|
786
|
+
], EchoNetworkAdapter.prototype, "addReplicator", null);
|
|
787
|
+
_ts_decorate([
|
|
788
|
+
synchronized
|
|
789
|
+
], EchoNetworkAdapter.prototype, "removeReplicator", null);
|
|
790
|
+
var createEchoPeerMetadata = () => ({
|
|
791
|
+
// TODO(dmaretskyi): Refactor this.
|
|
792
|
+
dxos_peerSource: "EchoNetworkAdapter"
|
|
793
|
+
});
|
|
794
|
+
var isEchoPeerMetadata = (metadata) => metadata?.dxos_peerSource === "EchoNetworkAdapter";
|
|
795
|
+
|
|
796
|
+
// packages/core/echo/echo-pipeline/src/automerge/heads-store.ts
|
|
797
|
+
import { headsEncoding } from "@dxos/indexing";
|
|
798
|
+
var HeadsStore = class {
|
|
799
|
+
constructor({ db }) {
|
|
800
|
+
this._db = db;
|
|
801
|
+
}
|
|
802
|
+
setHeads(documentId, heads, batch) {
|
|
803
|
+
batch.put(documentId, heads, {
|
|
804
|
+
sublevel: this._db,
|
|
805
|
+
keyEncoding: "utf8",
|
|
806
|
+
valueEncoding: headsEncoding
|
|
807
|
+
});
|
|
808
|
+
}
|
|
809
|
+
// TODO(dmaretskyi): Make batched.
|
|
810
|
+
async getHeads(documentIds) {
|
|
811
|
+
return this._db.getMany(documentIds, {
|
|
812
|
+
keyEncoding: "utf8",
|
|
813
|
+
valueEncoding: headsEncoding
|
|
814
|
+
});
|
|
815
|
+
}
|
|
816
|
+
};
|
|
817
|
+
|
|
818
|
+
// packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts
|
|
819
|
+
function _ts_decorate2(decorators, target, key, desc) {
|
|
820
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
821
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function")
|
|
822
|
+
r = Reflect.decorate(decorators, target, key, desc);
|
|
823
|
+
else
|
|
824
|
+
for (var i = decorators.length - 1; i >= 0; i--)
|
|
825
|
+
if (d = decorators[i])
|
|
826
|
+
r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
827
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
828
|
+
}
|
|
829
|
+
var __dxlog_file3 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts";
|
|
830
|
+
var AutomergeHost = class extends Resource4 {
|
|
831
|
+
constructor({ db, indexMetadataStore, dataMonitor }) {
|
|
832
|
+
super();
|
|
833
|
+
this._collectionSynchronizer = new CollectionSynchronizer({
|
|
834
|
+
queryCollectionState: this._queryCollectionState.bind(this),
|
|
835
|
+
sendCollectionState: this._sendCollectionState.bind(this),
|
|
836
|
+
shouldSyncCollection: this._shouldSyncCollection.bind(this)
|
|
837
|
+
});
|
|
838
|
+
this._db = db;
|
|
839
|
+
this._storage = new LevelDBStorageAdapter({
|
|
840
|
+
db: db.sublevel("automerge"),
|
|
841
|
+
callbacks: {
|
|
842
|
+
beforeSave: async (params) => this._beforeSave(params),
|
|
843
|
+
afterSave: async (key) => this._afterSave(key)
|
|
844
|
+
},
|
|
845
|
+
monitor: dataMonitor
|
|
846
|
+
});
|
|
847
|
+
this._echoNetworkAdapter = new EchoNetworkAdapter({
|
|
848
|
+
getContainingSpaceForDocument: this._getContainingSpaceForDocument.bind(this),
|
|
849
|
+
isDocumentInRemoteCollection: this._isDocumentInRemoteCollection.bind(this),
|
|
850
|
+
onCollectionStateQueried: this._onCollectionStateQueried.bind(this),
|
|
851
|
+
onCollectionStateReceived: this._onCollectionStateReceived.bind(this),
|
|
852
|
+
monitor: dataMonitor
|
|
853
|
+
});
|
|
854
|
+
this._headsStore = new HeadsStore({
|
|
855
|
+
db: db.sublevel("heads")
|
|
856
|
+
});
|
|
857
|
+
this._indexMetadataStore = indexMetadataStore;
|
|
858
|
+
}
|
|
859
|
+
async _open() {
|
|
860
|
+
this._peerId = `host-${PublicKey.random().toHex()}`;
|
|
861
|
+
await this._storage.open?.();
|
|
862
|
+
this._repo = new Repo({
|
|
863
|
+
peerId: this._peerId,
|
|
864
|
+
sharePolicy: this._sharePolicy.bind(this),
|
|
865
|
+
storage: this._storage,
|
|
866
|
+
network: [
|
|
867
|
+
// Upstream swarm.
|
|
868
|
+
this._echoNetworkAdapter
|
|
869
|
+
]
|
|
870
|
+
});
|
|
871
|
+
Event2.wrap(this._echoNetworkAdapter, "peer-candidate").on(this._ctx, (e) => this._onPeerConnected(e.peerId));
|
|
872
|
+
Event2.wrap(this._echoNetworkAdapter, "peer-disconnected").on(this._ctx, (e) => this._onPeerDisconnected(e.peerId));
|
|
873
|
+
this._collectionSynchronizer.remoteStateUpdated.on(this._ctx, ({ collectionId, peerId }) => {
|
|
874
|
+
this._onRemoteCollectionStateUpdated(collectionId, peerId);
|
|
875
|
+
});
|
|
876
|
+
await this._echoNetworkAdapter.open();
|
|
877
|
+
await this._collectionSynchronizer.open();
|
|
878
|
+
await this._echoNetworkAdapter.open();
|
|
879
|
+
await this._echoNetworkAdapter.whenConnected();
|
|
880
|
+
}
|
|
881
|
+
async _close() {
|
|
882
|
+
await this._collectionSynchronizer.close();
|
|
883
|
+
await this._storage.close?.();
|
|
884
|
+
await this._echoNetworkAdapter.close();
|
|
885
|
+
await this._ctx.dispose();
|
|
886
|
+
}
|
|
887
|
+
/**
|
|
888
|
+
* @deprecated To be abstracted away.
|
|
889
|
+
*/
|
|
890
|
+
get repo() {
|
|
891
|
+
return this._repo;
|
|
892
|
+
}
|
|
893
|
+
get peerId() {
|
|
894
|
+
return this._peerId;
|
|
895
|
+
}
|
|
896
|
+
get loadedDocsCount() {
|
|
897
|
+
return Object.keys(this._repo.handles).length;
|
|
898
|
+
}
|
|
899
|
+
async addReplicator(replicator) {
|
|
900
|
+
await this._echoNetworkAdapter.addReplicator(replicator);
|
|
901
|
+
}
|
|
902
|
+
async removeReplicator(replicator) {
|
|
903
|
+
await this._echoNetworkAdapter.removeReplicator(replicator);
|
|
904
|
+
}
|
|
905
|
+
/**
|
|
906
|
+
* Loads the document handle from the repo and waits for it to be ready.
|
|
907
|
+
*/
|
|
908
|
+
async loadDoc(ctx, documentId, opts) {
|
|
909
|
+
let handle;
|
|
910
|
+
if (typeof documentId === "string") {
|
|
911
|
+
handle = this._repo.handles[documentId];
|
|
912
|
+
}
|
|
913
|
+
if (!handle) {
|
|
914
|
+
handle = this._repo.find(documentId);
|
|
915
|
+
}
|
|
916
|
+
if (!handle.isReady()) {
|
|
917
|
+
if (!opts?.timeout) {
|
|
918
|
+
await cancelWithContext(ctx, handle.whenReady());
|
|
919
|
+
} else {
|
|
920
|
+
await cancelWithContext(ctx, asyncTimeout(handle.whenReady(), opts.timeout));
|
|
921
|
+
}
|
|
922
|
+
}
|
|
923
|
+
return handle;
|
|
924
|
+
}
|
|
925
|
+
/**
|
|
926
|
+
* Create new persisted document.
|
|
927
|
+
*/
|
|
928
|
+
createDoc(initialValue, opts) {
|
|
929
|
+
if (opts?.preserveHistory) {
|
|
930
|
+
if (!isAutomerge(initialValue)) {
|
|
931
|
+
throw new TypeError("Initial value must be an Automerge document");
|
|
932
|
+
}
|
|
933
|
+
return this._repo.import(save(initialValue));
|
|
934
|
+
} else {
|
|
935
|
+
return this._repo.create(initialValue);
|
|
936
|
+
}
|
|
937
|
+
}
|
|
938
|
+
async waitUntilHeadsReplicated(heads) {
|
|
939
|
+
const entries = heads.entries;
|
|
940
|
+
if (!entries?.length) {
|
|
941
|
+
return;
|
|
942
|
+
}
|
|
943
|
+
const documentIds = entries.map((entry) => entry.documentId);
|
|
944
|
+
const documentHeads = await this.getHeads(documentIds);
|
|
945
|
+
const headsToWait = entries.filter((entry, index) => {
|
|
946
|
+
const targetHeads = entry.heads;
|
|
947
|
+
if (!targetHeads || targetHeads.length === 0) {
|
|
948
|
+
return false;
|
|
949
|
+
}
|
|
950
|
+
const currentHeads = documentHeads[index];
|
|
951
|
+
return !(currentHeads !== null && headsEquals(currentHeads, targetHeads));
|
|
952
|
+
});
|
|
953
|
+
if (headsToWait.length > 0) {
|
|
954
|
+
await Promise.all(headsToWait.map(async (entry, index) => {
|
|
955
|
+
const handle = await this.loadDoc(Context.default(void 0, {
|
|
956
|
+
F: __dxlog_file3,
|
|
957
|
+
L: 227
|
|
958
|
+
}), entry.documentId);
|
|
959
|
+
await waitForHeads(handle, entry.heads);
|
|
960
|
+
}));
|
|
961
|
+
}
|
|
962
|
+
await this._repo.flush(documentIds.filter((documentId) => !!this._repo.handles[documentId]));
|
|
963
|
+
}
|
|
964
|
+
async reIndexHeads(documentIds) {
|
|
965
|
+
for (const documentId of documentIds) {
|
|
966
|
+
log3.info("re-indexing heads for document", {
|
|
967
|
+
documentId
|
|
968
|
+
}, {
|
|
969
|
+
F: __dxlog_file3,
|
|
970
|
+
L: 239,
|
|
971
|
+
S: this,
|
|
972
|
+
C: (f, a) => f(...a)
|
|
973
|
+
});
|
|
974
|
+
const handle = this._repo.find(documentId);
|
|
975
|
+
await handle.whenReady([
|
|
976
|
+
"ready",
|
|
977
|
+
"requesting"
|
|
978
|
+
]);
|
|
979
|
+
if (handle.inState([
|
|
980
|
+
"requesting"
|
|
981
|
+
])) {
|
|
982
|
+
log3.warn("document is not available locally, skipping", {
|
|
983
|
+
documentId
|
|
984
|
+
}, {
|
|
985
|
+
F: __dxlog_file3,
|
|
986
|
+
L: 243,
|
|
987
|
+
S: this,
|
|
988
|
+
C: (f, a) => f(...a)
|
|
989
|
+
});
|
|
990
|
+
continue;
|
|
991
|
+
}
|
|
992
|
+
const doc = handle.docSync();
|
|
993
|
+
invariant3(doc, void 0, {
|
|
994
|
+
F: __dxlog_file3,
|
|
995
|
+
L: 248,
|
|
996
|
+
S: this,
|
|
997
|
+
A: [
|
|
998
|
+
"doc",
|
|
999
|
+
""
|
|
1000
|
+
]
|
|
1001
|
+
});
|
|
1002
|
+
const heads = getHeads(doc);
|
|
1003
|
+
const batch = this._db.batch();
|
|
1004
|
+
this._headsStore.setHeads(documentId, heads, batch);
|
|
1005
|
+
await batch.write();
|
|
1006
|
+
}
|
|
1007
|
+
log3.info("done re-indexing heads", void 0, {
|
|
1008
|
+
F: __dxlog_file3,
|
|
1009
|
+
L: 255,
|
|
1010
|
+
S: this,
|
|
1011
|
+
C: (f, a) => f(...a)
|
|
1012
|
+
});
|
|
1013
|
+
}
|
|
1014
|
+
// TODO(dmaretskyi): Share based on HALO permissions and space affinity.
|
|
1015
|
+
// Hosts, running in the worker, don't share documents unless requested by other peers.
|
|
1016
|
+
// NOTE: If both peers return sharePolicy=false the replication will not happen
|
|
1017
|
+
// https://github.com/automerge/automerge-repo/pull/292
|
|
1018
|
+
async _sharePolicy(peerId, documentId) {
|
|
1019
|
+
if (peerId.startsWith("client-")) {
|
|
1020
|
+
return false;
|
|
1021
|
+
}
|
|
1022
|
+
if (!documentId) {
|
|
1023
|
+
return false;
|
|
1024
|
+
}
|
|
1025
|
+
const peerMetadata = this.repo.peerMetadataByPeerId[peerId];
|
|
1026
|
+
if (isEchoPeerMetadata(peerMetadata)) {
|
|
1027
|
+
return this._echoNetworkAdapter.shouldAdvertise(peerId, {
|
|
1028
|
+
documentId
|
|
1029
|
+
});
|
|
1030
|
+
}
|
|
1031
|
+
return false;
|
|
1032
|
+
}
|
|
1033
|
+
async _beforeSave({ path, batch }) {
|
|
1034
|
+
const handle = this._repo.handles[path[0]];
|
|
1035
|
+
if (!handle) {
|
|
1036
|
+
return;
|
|
1037
|
+
}
|
|
1038
|
+
const doc = handle.docSync();
|
|
1039
|
+
if (!doc) {
|
|
1040
|
+
return;
|
|
1041
|
+
}
|
|
1042
|
+
const heads = getHeads(doc);
|
|
1043
|
+
this._headsStore.setHeads(handle.documentId, heads, batch);
|
|
1044
|
+
const spaceKey = getSpaceKeyFromDoc(doc) ?? void 0;
|
|
1045
|
+
const objectIds = Object.keys(doc.objects ?? {});
|
|
1046
|
+
const encodedIds = objectIds.map((objectId) => objectPointerCodec.encode({
|
|
1047
|
+
documentId: handle.documentId,
|
|
1048
|
+
objectId,
|
|
1049
|
+
spaceKey
|
|
1050
|
+
}));
|
|
1051
|
+
const idToLastHash = new Map(encodedIds.map((id) => [
|
|
1052
|
+
id,
|
|
1053
|
+
heads
|
|
1054
|
+
]));
|
|
1055
|
+
this._indexMetadataStore.markDirty(idToLastHash, batch);
|
|
1056
|
+
}
|
|
1057
|
+
_shouldSyncCollection(collectionId, peerId) {
|
|
1058
|
+
const peerMetadata = this._repo.peerMetadataByPeerId[peerId];
|
|
1059
|
+
if (isEchoPeerMetadata(peerMetadata)) {
|
|
1060
|
+
return this._echoNetworkAdapter.shouldSyncCollection(peerId, {
|
|
1061
|
+
collectionId
|
|
1062
|
+
});
|
|
1063
|
+
}
|
|
1064
|
+
return false;
|
|
1065
|
+
}
|
|
1066
|
+
/**
|
|
1067
|
+
* Called by AutomergeStorageAdapter after levelDB batch commit.
|
|
1068
|
+
*/
|
|
1069
|
+
async _afterSave(path) {
|
|
1070
|
+
this._indexMetadataStore.notifyMarkedDirty();
|
|
1071
|
+
const documentId = path[0];
|
|
1072
|
+
const document = this._repo.handles[documentId]?.docSync();
|
|
1073
|
+
if (document) {
|
|
1074
|
+
const heads = getHeads(document);
|
|
1075
|
+
this._onHeadsChanged(documentId, heads);
|
|
1076
|
+
}
|
|
1077
|
+
}
|
|
1078
|
+
_automergePeers() {
|
|
1079
|
+
return this._repo.peers;
|
|
1080
|
+
}
|
|
1081
|
+
async _isDocumentInRemoteCollection(params) {
|
|
1082
|
+
for (const collectionId of this._collectionSynchronizer.getRegisteredCollectionIds()) {
|
|
1083
|
+
const remoteCollections = this._collectionSynchronizer.getRemoteCollectionStates(collectionId);
|
|
1084
|
+
const remotePeerDocs = remoteCollections.get(params.peerId)?.documents;
|
|
1085
|
+
if (remotePeerDocs && params.documentId in remotePeerDocs) {
|
|
1086
|
+
return true;
|
|
1087
|
+
}
|
|
1088
|
+
}
|
|
1089
|
+
return false;
|
|
1090
|
+
}
|
|
1091
|
+
async _getContainingSpaceForDocument(documentId) {
|
|
1092
|
+
const doc = this._repo.handles[documentId]?.docSync();
|
|
1093
|
+
if (!doc) {
|
|
1094
|
+
return null;
|
|
1095
|
+
}
|
|
1096
|
+
const spaceKeyHex = getSpaceKeyFromDoc(doc);
|
|
1097
|
+
if (!spaceKeyHex) {
|
|
1098
|
+
return null;
|
|
1099
|
+
}
|
|
1100
|
+
return PublicKey.from(spaceKeyHex);
|
|
1101
|
+
}
|
|
1102
|
+
/**
|
|
1103
|
+
* Flush documents to disk.
|
|
1104
|
+
*/
|
|
1105
|
+
async flush({ documentIds } = {}) {
|
|
1106
|
+
await this._repo.flush(documentIds);
|
|
1107
|
+
}
|
|
1108
|
+
async getHeads(documentIds) {
|
|
1109
|
+
const result = [];
|
|
1110
|
+
const storeRequestIds = [];
|
|
1111
|
+
const storeResultIndices = [];
|
|
1112
|
+
for (const documentId of documentIds) {
|
|
1113
|
+
const doc = this._repo.handles[documentId]?.docSync();
|
|
1114
|
+
if (doc) {
|
|
1115
|
+
result.push(getHeads(doc));
|
|
1116
|
+
} else {
|
|
1117
|
+
storeRequestIds.push(documentId);
|
|
1118
|
+
storeResultIndices.push(result.length);
|
|
1119
|
+
result.push(void 0);
|
|
1120
|
+
}
|
|
1121
|
+
}
|
|
1122
|
+
if (storeRequestIds.length > 0) {
|
|
1123
|
+
const storedHeads = await this._headsStore.getHeads(storeRequestIds);
|
|
1124
|
+
for (let i = 0; i < storedHeads.length; i++) {
|
|
1125
|
+
result[storeResultIndices[i]] = storedHeads[i];
|
|
1126
|
+
}
|
|
1127
|
+
}
|
|
1128
|
+
return result;
|
|
1129
|
+
}
|
|
1130
|
+
//
|
|
1131
|
+
// Collection sync.
|
|
1132
|
+
//
|
|
1133
|
+
getLocalCollectionState(collectionId) {
|
|
1134
|
+
return this._collectionSynchronizer.getLocalCollectionState(collectionId);
|
|
1135
|
+
}
|
|
1136
|
+
getRemoteCollectionStates(collectionId) {
|
|
1137
|
+
return this._collectionSynchronizer.getRemoteCollectionStates(collectionId);
|
|
1138
|
+
}
|
|
1139
|
+
refreshCollection(collectionId) {
|
|
1140
|
+
this._collectionSynchronizer.refreshCollection(collectionId);
|
|
1141
|
+
}
|
|
1142
|
+
async getCollectionSyncState(collectionId) {
|
|
1143
|
+
const result = {
|
|
1144
|
+
peers: []
|
|
1145
|
+
};
|
|
1146
|
+
const localState = this.getLocalCollectionState(collectionId);
|
|
1147
|
+
const remoteState = this.getRemoteCollectionStates(collectionId);
|
|
1148
|
+
if (!localState) {
|
|
1149
|
+
return result;
|
|
1150
|
+
}
|
|
1151
|
+
for (const [peerId, state] of remoteState) {
|
|
1152
|
+
const diff = diffCollectionState(localState, state);
|
|
1153
|
+
result.peers.push({
|
|
1154
|
+
peerId,
|
|
1155
|
+
differentDocuments: diff.different.length
|
|
1156
|
+
});
|
|
1157
|
+
}
|
|
1158
|
+
return result;
|
|
1159
|
+
}
|
|
1160
|
+
/**
|
|
1161
|
+
* Update the local collection state based on the locally stored document heads.
|
|
1162
|
+
*/
|
|
1163
|
+
async updateLocalCollectionState(collectionId, documentIds) {
|
|
1164
|
+
const heads = await this.getHeads(documentIds);
|
|
1165
|
+
const documents = Object.fromEntries(heads.map((heads2, index) => [
|
|
1166
|
+
documentIds[index],
|
|
1167
|
+
heads2 ?? []
|
|
1168
|
+
]));
|
|
1169
|
+
this._collectionSynchronizer.setLocalCollectionState(collectionId, {
|
|
1170
|
+
documents
|
|
1171
|
+
});
|
|
1172
|
+
}
|
|
1173
|
+
_onCollectionStateQueried(collectionId, peerId) {
|
|
1174
|
+
this._collectionSynchronizer.onCollectionStateQueried(collectionId, peerId);
|
|
1175
|
+
}
|
|
1176
|
+
_onCollectionStateReceived(collectionId, peerId, state) {
|
|
1177
|
+
this._collectionSynchronizer.onRemoteStateReceived(collectionId, peerId, decodeCollectionState(state));
|
|
1178
|
+
}
|
|
1179
|
+
_queryCollectionState(collectionId, peerId) {
|
|
1180
|
+
this._echoNetworkAdapter.queryCollectionState(collectionId, peerId);
|
|
1181
|
+
}
|
|
1182
|
+
_sendCollectionState(collectionId, peerId, state) {
|
|
1183
|
+
this._echoNetworkAdapter.sendCollectionState(collectionId, peerId, encodeCollectionState(state));
|
|
1184
|
+
}
|
|
1185
|
+
_onPeerConnected(peerId) {
|
|
1186
|
+
this._collectionSynchronizer.onConnectionOpen(peerId);
|
|
1187
|
+
}
|
|
1188
|
+
_onPeerDisconnected(peerId) {
|
|
1189
|
+
this._collectionSynchronizer.onConnectionClosed(peerId);
|
|
1190
|
+
}
|
|
1191
|
+
_onRemoteCollectionStateUpdated(collectionId, peerId) {
|
|
1192
|
+
const localState = this._collectionSynchronizer.getLocalCollectionState(collectionId);
|
|
1193
|
+
const remoteState = this._collectionSynchronizer.getRemoteCollectionStates(collectionId).get(peerId);
|
|
1194
|
+
if (!localState || !remoteState) {
|
|
1195
|
+
return;
|
|
1196
|
+
}
|
|
1197
|
+
const { different } = diffCollectionState(localState, remoteState);
|
|
1198
|
+
if (different.length === 0) {
|
|
1199
|
+
return;
|
|
1200
|
+
}
|
|
1201
|
+
log3.info("replication documents after collection sync", {
|
|
1202
|
+
count: different.length
|
|
1203
|
+
}, {
|
|
1204
|
+
F: __dxlog_file3,
|
|
1205
|
+
L: 475,
|
|
1206
|
+
S: this,
|
|
1207
|
+
C: (f, a) => f(...a)
|
|
1208
|
+
});
|
|
1209
|
+
for (const documentId of different) {
|
|
1210
|
+
this._repo.find(documentId);
|
|
1211
|
+
}
|
|
1212
|
+
}
|
|
1213
|
+
_onHeadsChanged(documentId, heads) {
|
|
1214
|
+
for (const collectionId of this._collectionSynchronizer.getRegisteredCollectionIds()) {
|
|
1215
|
+
const state = this._collectionSynchronizer.getLocalCollectionState(collectionId);
|
|
1216
|
+
if (state?.documents[documentId]) {
|
|
1217
|
+
const newState = structuredClone(state);
|
|
1218
|
+
newState.documents[documentId] = heads;
|
|
1219
|
+
this._collectionSynchronizer.setLocalCollectionState(collectionId, newState);
|
|
1220
|
+
}
|
|
1221
|
+
}
|
|
1222
|
+
}
|
|
1223
|
+
};
|
|
1224
|
+
_ts_decorate2([
|
|
1225
|
+
trace.info()
|
|
1226
|
+
], AutomergeHost.prototype, "_peerId", void 0);
|
|
1227
|
+
_ts_decorate2([
|
|
1228
|
+
trace.info({
|
|
1229
|
+
depth: null
|
|
1230
|
+
})
|
|
1231
|
+
], AutomergeHost.prototype, "_automergePeers", null);
|
|
1232
|
+
_ts_decorate2([
|
|
1233
|
+
trace.span({
|
|
1234
|
+
showInBrowserTimeline: true
|
|
1235
|
+
})
|
|
1236
|
+
], AutomergeHost.prototype, "flush", null);
|
|
1237
|
+
AutomergeHost = _ts_decorate2([
|
|
1238
|
+
trace.resource()
|
|
1239
|
+
], AutomergeHost);
|
|
1240
|
+
var getSpaceKeyFromDoc = (doc) => {
|
|
1241
|
+
const rawSpaceKey = doc.access?.spaceKey ?? doc.experimental_spaceKey;
|
|
1242
|
+
if (rawSpaceKey == null) {
|
|
1243
|
+
return null;
|
|
1244
|
+
}
|
|
1245
|
+
return String(rawSpaceKey);
|
|
1246
|
+
};
|
|
1247
|
+
var waitForHeads = async (handle, heads) => {
|
|
1248
|
+
const unavailableHeads = new Set(heads);
|
|
1249
|
+
await handle.whenReady();
|
|
1250
|
+
await Event2.wrap(handle, "change").waitForCondition(() => {
|
|
1251
|
+
for (const changeHash of unavailableHeads.values()) {
|
|
1252
|
+
if (changeIsPresentInDoc(handle.docSync(), changeHash)) {
|
|
1253
|
+
unavailableHeads.delete(changeHash);
|
|
1254
|
+
}
|
|
1255
|
+
}
|
|
1256
|
+
return unavailableHeads.size === 0;
|
|
1257
|
+
});
|
|
1258
|
+
};
|
|
1259
|
+
var changeIsPresentInDoc = (doc, changeHash) => {
|
|
1260
|
+
return !!getBackend(doc).getChangeByHash(changeHash);
|
|
1261
|
+
};
|
|
1262
|
+
var decodeCollectionState = (state) => {
|
|
1263
|
+
invariant3(typeof state === "object" && state !== null, "Invalid state", {
|
|
1264
|
+
F: __dxlog_file3,
|
|
1265
|
+
L: 528,
|
|
1266
|
+
S: void 0,
|
|
1267
|
+
A: [
|
|
1268
|
+
"typeof state === 'object' && state !== null",
|
|
1269
|
+
"'Invalid state'"
|
|
1270
|
+
]
|
|
1271
|
+
});
|
|
1272
|
+
return state;
|
|
1273
|
+
};
|
|
1274
|
+
var encodeCollectionState = (state) => {
|
|
1275
|
+
return state;
|
|
1276
|
+
};
|
|
1277
|
+
|
|
1278
|
+
// packages/core/echo/echo-pipeline/src/automerge/space-collection.ts
|
|
1279
|
+
import { invariant as invariant4 } from "@dxos/invariant";
|
|
1280
|
+
import { SpaceId } from "@dxos/keys";
|
|
1281
|
+
var __dxlog_file4 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/space-collection.ts";
|
|
1282
|
+
var deriveCollectionIdFromSpaceId = (spaceId) => `space:${spaceId}`;
|
|
1283
|
+
var getSpaceIdFromCollectionId = (collectionId) => {
|
|
1284
|
+
const spaceId = collectionId.replace(/^space:/, "");
|
|
1285
|
+
invariant4(SpaceId.isValid(spaceId), void 0, {
|
|
1286
|
+
F: __dxlog_file4,
|
|
1287
|
+
L: 12,
|
|
1288
|
+
S: void 0,
|
|
1289
|
+
A: [
|
|
1290
|
+
"SpaceId.isValid(spaceId)",
|
|
1291
|
+
""
|
|
1292
|
+
]
|
|
1293
|
+
});
|
|
1294
|
+
return spaceId;
|
|
1295
|
+
};
|
|
1296
|
+
|
|
1297
|
+
// packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts
|
|
1298
|
+
import { invariant as invariant6 } from "@dxos/invariant";
|
|
1299
|
+
import { PublicKey as PublicKey2 } from "@dxos/keys";
|
|
1300
|
+
import { log as log5 } from "@dxos/log";
|
|
1301
|
+
import { ComplexSet, defaultMap as defaultMap2 } from "@dxos/util";
|
|
1302
|
+
|
|
1303
|
+
// packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator-connection.ts
|
|
1304
|
+
import * as A2 from "@dxos/automerge/automerge";
|
|
1305
|
+
import { cbor } from "@dxos/automerge/automerge-repo";
|
|
1306
|
+
import { Resource as Resource5 } from "@dxos/context";
|
|
1307
|
+
import { invariant as invariant5 } from "@dxos/invariant";
|
|
1308
|
+
import { log as log4 } from "@dxos/log";
|
|
1309
|
+
import { AutomergeReplicator } from "@dxos/teleport-extension-automerge-replicator";
|
|
1310
|
+
var __dxlog_file5 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator-connection.ts";
|
|
1311
|
+
var DEFAULT_FACTORY = (params) => new AutomergeReplicator(...params);
|
|
1312
|
+
var MeshReplicatorConnection = class extends Resource5 {
|
|
1313
|
+
constructor(_params) {
|
|
1314
|
+
super();
|
|
1315
|
+
this._params = _params;
|
|
1316
|
+
this.remoteDeviceKey = null;
|
|
1317
|
+
this._remotePeerId = null;
|
|
1318
|
+
this._isEnabled = false;
|
|
1319
|
+
let readableStreamController;
|
|
1320
|
+
this.readable = new ReadableStream({
|
|
1321
|
+
start: (controller) => {
|
|
1322
|
+
readableStreamController = controller;
|
|
1323
|
+
this._ctx.onDispose(() => controller.close());
|
|
1324
|
+
}
|
|
1325
|
+
});
|
|
1326
|
+
this.writable = new WritableStream({
|
|
1327
|
+
write: async (message, controller) => {
|
|
1328
|
+
invariant5(this._isEnabled, "Writing to a disabled connection", {
|
|
1329
|
+
F: __dxlog_file5,
|
|
1330
|
+
L: 49,
|
|
1331
|
+
S: this,
|
|
1332
|
+
A: [
|
|
1333
|
+
"this._isEnabled",
|
|
1334
|
+
"'Writing to a disabled connection'"
|
|
1335
|
+
]
|
|
1336
|
+
});
|
|
1337
|
+
try {
|
|
1338
|
+
logSendSync(message);
|
|
1339
|
+
await this.replicatorExtension.sendSyncMessage({
|
|
1340
|
+
payload: cbor.encode(message)
|
|
1341
|
+
});
|
|
1342
|
+
} catch (err) {
|
|
1343
|
+
controller.error(err);
|
|
1344
|
+
this._disconnectIfEnabled();
|
|
1345
|
+
}
|
|
1346
|
+
}
|
|
1347
|
+
});
|
|
1348
|
+
const createAutomergeReplicator = this._params.replicatorFactory ?? DEFAULT_FACTORY;
|
|
1349
|
+
this.replicatorExtension = createAutomergeReplicator([
|
|
1350
|
+
{
|
|
1351
|
+
peerId: this._params.ownPeerId
|
|
1352
|
+
},
|
|
1353
|
+
{
|
|
1354
|
+
onStartReplication: async (info, remotePeerId) => {
|
|
1355
|
+
this.remoteDeviceKey = remotePeerId;
|
|
1356
|
+
this._remotePeerId = info.id;
|
|
1357
|
+
log4("onStartReplication", {
|
|
1358
|
+
id: info.id,
|
|
1359
|
+
thisPeerId: this.peerId,
|
|
1360
|
+
remotePeerId: remotePeerId.toHex()
|
|
1361
|
+
}, {
|
|
1362
|
+
F: __dxlog_file5,
|
|
1363
|
+
L: 84,
|
|
1364
|
+
S: this,
|
|
1365
|
+
C: (f, a) => f(...a)
|
|
1366
|
+
});
|
|
1367
|
+
this._params.onRemoteConnected();
|
|
1368
|
+
},
|
|
1369
|
+
onSyncMessage: async ({ payload }) => {
|
|
1370
|
+
if (!this._isEnabled) {
|
|
1371
|
+
return;
|
|
1372
|
+
}
|
|
1373
|
+
const message = cbor.decode(payload);
|
|
1374
|
+
readableStreamController.enqueue(message);
|
|
1375
|
+
},
|
|
1376
|
+
onClose: async () => {
|
|
1377
|
+
this._disconnectIfEnabled();
|
|
1378
|
+
}
|
|
1379
|
+
}
|
|
1380
|
+
]);
|
|
1381
|
+
}
|
|
1382
|
+
_disconnectIfEnabled() {
|
|
1383
|
+
if (this._isEnabled) {
|
|
1384
|
+
this._params.onRemoteDisconnected();
|
|
1385
|
+
}
|
|
1386
|
+
}
|
|
1387
|
+
get peerId() {
|
|
1388
|
+
invariant5(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
1389
|
+
F: __dxlog_file5,
|
|
1390
|
+
L: 110,
|
|
1391
|
+
S: this,
|
|
1392
|
+
A: [
|
|
1393
|
+
"this._remotePeerId != null",
|
|
1394
|
+
"'Remote peer has not connected yet.'"
|
|
1395
|
+
]
|
|
1396
|
+
});
|
|
1397
|
+
return this._remotePeerId;
|
|
1398
|
+
}
|
|
1399
|
+
async shouldAdvertise(params) {
|
|
1400
|
+
return this._params.shouldAdvertise(params);
|
|
1401
|
+
}
|
|
1402
|
+
shouldSyncCollection(params) {
|
|
1403
|
+
return this._params.shouldSyncCollection(params);
|
|
1404
|
+
}
|
|
1405
|
+
/**
|
|
1406
|
+
* Start exchanging messages with the remote peer.
|
|
1407
|
+
* Call after the remote peer has connected.
|
|
1408
|
+
*/
|
|
1409
|
+
enable() {
|
|
1410
|
+
invariant5(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
1411
|
+
F: __dxlog_file5,
|
|
1412
|
+
L: 127,
|
|
1413
|
+
S: this,
|
|
1414
|
+
A: [
|
|
1415
|
+
"this._remotePeerId != null",
|
|
1416
|
+
"'Remote peer has not connected yet.'"
|
|
1417
|
+
]
|
|
1418
|
+
});
|
|
1419
|
+
this._isEnabled = true;
|
|
1420
|
+
}
|
|
1421
|
+
/**
|
|
1422
|
+
* Stop exchanging messages with the remote peer.
|
|
1423
|
+
*/
|
|
1424
|
+
disable() {
|
|
1425
|
+
this._isEnabled = false;
|
|
1426
|
+
}
|
|
1427
|
+
};
|
|
1428
|
+
var logSendSync = (message) => {
|
|
1429
|
+
log4("sendSyncMessage", () => {
|
|
1430
|
+
const decodedSyncMessage = message.type === "sync" && message.data ? A2.decodeSyncMessage(message.data) : void 0;
|
|
1431
|
+
return {
|
|
1432
|
+
sync: decodedSyncMessage && {
|
|
1433
|
+
headsLength: decodedSyncMessage.heads.length,
|
|
1434
|
+
requesting: decodedSyncMessage.need.length > 0,
|
|
1435
|
+
sendingChanges: decodedSyncMessage.changes.length > 0
|
|
1436
|
+
},
|
|
1437
|
+
type: message.type,
|
|
1438
|
+
from: message.senderId,
|
|
1439
|
+
to: message.targetId
|
|
1440
|
+
};
|
|
1441
|
+
}, {
|
|
1442
|
+
F: __dxlog_file5,
|
|
1443
|
+
L: 140,
|
|
1444
|
+
S: void 0,
|
|
1445
|
+
C: (f, a) => f(...a)
|
|
1446
|
+
});
|
|
1447
|
+
};
|
|
1448
|
+
|
|
1449
|
+
// packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts
|
|
1450
|
+
var __dxlog_file6 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts";
|
|
1451
|
+
var MeshEchoReplicator = class {
|
|
1452
|
+
constructor() {
|
|
1453
|
+
this._connections = /* @__PURE__ */ new Set();
|
|
1454
|
+
/**
|
|
1455
|
+
* Using automerge peerId as a key.
|
|
1456
|
+
*/
|
|
1457
|
+
this._connectionsPerPeer = /* @__PURE__ */ new Map();
|
|
1458
|
+
/**
|
|
1459
|
+
* spaceId -> deviceKey[]
|
|
1460
|
+
*/
|
|
1461
|
+
this._authorizedDevices = /* @__PURE__ */ new Map();
|
|
1462
|
+
this._context = null;
|
|
1463
|
+
}
|
|
1464
|
+
async connect(context) {
|
|
1465
|
+
this._context = context;
|
|
1466
|
+
}
|
|
1467
|
+
async disconnect() {
|
|
1468
|
+
for (const connection of this._connectionsPerPeer.values()) {
|
|
1469
|
+
this._context?.onConnectionClosed(connection);
|
|
1470
|
+
}
|
|
1471
|
+
for (const connection of this._connections) {
|
|
1472
|
+
await connection.close();
|
|
1473
|
+
}
|
|
1474
|
+
this._connections.clear();
|
|
1475
|
+
this._connectionsPerPeer.clear();
|
|
1476
|
+
this._context = null;
|
|
1477
|
+
}
|
|
1478
|
+
createExtension(extensionFactory) {
|
|
1479
|
+
invariant6(this._context, void 0, {
|
|
1480
|
+
F: __dxlog_file6,
|
|
1481
|
+
L: 56,
|
|
1482
|
+
S: this,
|
|
1483
|
+
A: [
|
|
1484
|
+
"this._context",
|
|
1485
|
+
""
|
|
1486
|
+
]
|
|
1487
|
+
});
|
|
1488
|
+
const connection = new MeshReplicatorConnection({
|
|
1489
|
+
ownPeerId: this._context.peerId,
|
|
1490
|
+
replicatorFactory: extensionFactory,
|
|
1491
|
+
onRemoteConnected: async () => {
|
|
1492
|
+
log5("onRemoteConnected", {
|
|
1493
|
+
peerId: connection.peerId
|
|
1494
|
+
}, {
|
|
1495
|
+
F: __dxlog_file6,
|
|
1496
|
+
L: 62,
|
|
1497
|
+
S: this,
|
|
1498
|
+
C: (f, a) => f(...a)
|
|
1499
|
+
});
|
|
1500
|
+
invariant6(this._context, void 0, {
|
|
1501
|
+
F: __dxlog_file6,
|
|
1502
|
+
L: 63,
|
|
1503
|
+
S: this,
|
|
1504
|
+
A: [
|
|
1505
|
+
"this._context",
|
|
1506
|
+
""
|
|
1507
|
+
]
|
|
1508
|
+
});
|
|
1509
|
+
if (this._connectionsPerPeer.has(connection.peerId)) {
|
|
1510
|
+
this._context.onConnectionAuthScopeChanged(connection);
|
|
1511
|
+
} else {
|
|
1512
|
+
this._connectionsPerPeer.set(connection.peerId, connection);
|
|
1513
|
+
this._context.onConnectionOpen(connection);
|
|
1514
|
+
connection.enable();
|
|
1515
|
+
}
|
|
1516
|
+
},
|
|
1517
|
+
onRemoteDisconnected: async () => {
|
|
1518
|
+
log5("onRemoteDisconnected", {
|
|
1519
|
+
peerId: connection.peerId
|
|
1520
|
+
}, {
|
|
1521
|
+
F: __dxlog_file6,
|
|
1522
|
+
L: 74,
|
|
1523
|
+
S: this,
|
|
1524
|
+
C: (f, a) => f(...a)
|
|
1525
|
+
});
|
|
1526
|
+
this._context?.onConnectionClosed(connection);
|
|
1527
|
+
this._connectionsPerPeer.delete(connection.peerId);
|
|
1528
|
+
connection.disable();
|
|
1529
|
+
this._connections.delete(connection);
|
|
1530
|
+
},
|
|
1531
|
+
shouldAdvertise: async (params) => {
|
|
1532
|
+
log5("shouldAdvertise", {
|
|
1533
|
+
peerId: connection.peerId,
|
|
1534
|
+
documentId: params.documentId
|
|
1535
|
+
}, {
|
|
1536
|
+
F: __dxlog_file6,
|
|
1537
|
+
L: 81,
|
|
1538
|
+
S: this,
|
|
1539
|
+
C: (f, a) => f(...a)
|
|
1540
|
+
});
|
|
1541
|
+
invariant6(this._context, void 0, {
|
|
1542
|
+
F: __dxlog_file6,
|
|
1543
|
+
L: 82,
|
|
1544
|
+
S: this,
|
|
1545
|
+
A: [
|
|
1546
|
+
"this._context",
|
|
1547
|
+
""
|
|
1548
|
+
]
|
|
1549
|
+
});
|
|
1550
|
+
try {
|
|
1551
|
+
const spaceKey = await this._context.getContainingSpaceForDocument(params.documentId);
|
|
1552
|
+
if (!spaceKey) {
|
|
1553
|
+
const remoteDocumentExists = await this._context.isDocumentInRemoteCollection({
|
|
1554
|
+
documentId: params.documentId,
|
|
1555
|
+
peerId: connection.peerId
|
|
1556
|
+
});
|
|
1557
|
+
log5("document not found locally for share policy check, accepting the remote document", {
|
|
1558
|
+
peerId: connection.peerId,
|
|
1559
|
+
documentId: params.documentId,
|
|
1560
|
+
remoteDocumentExists
|
|
1561
|
+
}, {
|
|
1562
|
+
F: __dxlog_file6,
|
|
1563
|
+
L: 90,
|
|
1564
|
+
S: this,
|
|
1565
|
+
C: (f, a) => f(...a)
|
|
1566
|
+
});
|
|
1567
|
+
return remoteDocumentExists;
|
|
1568
|
+
}
|
|
1569
|
+
const spaceId = await createIdFromSpaceKey(spaceKey);
|
|
1570
|
+
const authorizedDevices = this._authorizedDevices.get(spaceId);
|
|
1571
|
+
if (!connection.remoteDeviceKey) {
|
|
1572
|
+
log5("device key not found for share policy check", {
|
|
1573
|
+
peerId: connection.peerId,
|
|
1574
|
+
documentId: params.documentId
|
|
1575
|
+
}, {
|
|
1576
|
+
F: __dxlog_file6,
|
|
1577
|
+
L: 106,
|
|
1578
|
+
S: this,
|
|
1579
|
+
C: (f, a) => f(...a)
|
|
1580
|
+
});
|
|
1581
|
+
return false;
|
|
1582
|
+
}
|
|
1583
|
+
const isAuthorized = authorizedDevices?.has(connection.remoteDeviceKey) ?? false;
|
|
1584
|
+
log5("share policy check", {
|
|
1585
|
+
localPeer: this._context.peerId,
|
|
1586
|
+
remotePeer: connection.peerId,
|
|
1587
|
+
documentId: params.documentId,
|
|
1588
|
+
deviceKey: connection.remoteDeviceKey,
|
|
1589
|
+
spaceKey,
|
|
1590
|
+
isAuthorized
|
|
1591
|
+
}, {
|
|
1592
|
+
F: __dxlog_file6,
|
|
1593
|
+
L: 114,
|
|
1594
|
+
S: this,
|
|
1595
|
+
C: (f, a) => f(...a)
|
|
1596
|
+
});
|
|
1597
|
+
return isAuthorized;
|
|
1598
|
+
} catch (err) {
|
|
1599
|
+
log5.catch(err, void 0, {
|
|
1600
|
+
F: __dxlog_file6,
|
|
1601
|
+
L: 124,
|
|
1602
|
+
S: this,
|
|
1603
|
+
C: (f, a) => f(...a)
|
|
1604
|
+
});
|
|
1605
|
+
return false;
|
|
1606
|
+
}
|
|
1607
|
+
},
|
|
1608
|
+
shouldSyncCollection: ({ collectionId }) => {
|
|
1609
|
+
const spaceId = getSpaceIdFromCollectionId(collectionId);
|
|
1610
|
+
const authorizedDevices = this._authorizedDevices.get(spaceId);
|
|
1611
|
+
if (!connection.remoteDeviceKey) {
|
|
1612
|
+
log5("device key not found for collection sync check", {
|
|
1613
|
+
peerId: connection.peerId,
|
|
1614
|
+
collectionId
|
|
1615
|
+
}, {
|
|
1616
|
+
F: __dxlog_file6,
|
|
1617
|
+
L: 134,
|
|
1618
|
+
S: this,
|
|
1619
|
+
C: (f, a) => f(...a)
|
|
1620
|
+
});
|
|
1621
|
+
return false;
|
|
1622
|
+
}
|
|
1623
|
+
const isAuthorized = authorizedDevices?.has(connection.remoteDeviceKey) ?? false;
|
|
1624
|
+
return isAuthorized;
|
|
1625
|
+
}
|
|
1626
|
+
});
|
|
1627
|
+
this._connections.add(connection);
|
|
1628
|
+
return connection.replicatorExtension;
|
|
1629
|
+
}
|
|
1630
|
+
async authorizeDevice(spaceKey, deviceKey) {
|
|
1631
|
+
log5("authorizeDevice", {
|
|
1632
|
+
spaceKey,
|
|
1633
|
+
deviceKey
|
|
1634
|
+
}, {
|
|
1635
|
+
F: __dxlog_file6,
|
|
1636
|
+
L: 151,
|
|
1637
|
+
S: this,
|
|
1638
|
+
C: (f, a) => f(...a)
|
|
1639
|
+
});
|
|
1640
|
+
const spaceId = await createIdFromSpaceKey(spaceKey);
|
|
1641
|
+
defaultMap2(this._authorizedDevices, spaceId, () => new ComplexSet(PublicKey2.hash)).add(deviceKey);
|
|
1642
|
+
for (const connection of this._connections) {
|
|
1643
|
+
if (connection.remoteDeviceKey && connection.remoteDeviceKey.equals(deviceKey)) {
|
|
1644
|
+
if (this._connectionsPerPeer.has(connection.peerId)) {
|
|
1645
|
+
this._context?.onConnectionAuthScopeChanged(connection);
|
|
1646
|
+
}
|
|
1647
|
+
}
|
|
1648
|
+
}
|
|
1649
|
+
}
|
|
1650
|
+
};
|
|
1651
|
+
|
|
1652
|
+
// packages/core/echo/echo-pipeline/src/automerge/echo-data-monitor.ts
|
|
1653
|
+
import { trace as trace2 } from "@dxos/tracing";
|
|
1654
|
+
import { CircularBuffer, mapValues, SlidingWindowSummary } from "@dxos/util";
|
|
1655
|
+
function _ts_decorate3(decorators, target, key, desc) {
|
|
1656
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
1657
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function")
|
|
1658
|
+
r = Reflect.decorate(decorators, target, key, desc);
|
|
1659
|
+
else
|
|
1660
|
+
for (var i = decorators.length - 1; i >= 0; i--)
|
|
1661
|
+
if (d = decorators[i])
|
|
1662
|
+
r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
1663
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
1664
|
+
}
|
|
1665
|
+
var PER_SECOND_RATE_AVG_WINDOW_SIZE = 5;
|
|
1666
|
+
var DEFAULT_AVG_WINDOW_SIZE = 25;
|
|
1667
|
+
var EchoDataMonitor = class {
|
|
1668
|
+
constructor(_params = {
|
|
1669
|
+
timeSeriesLength: 30
|
|
1670
|
+
}) {
|
|
1671
|
+
this._params = _params;
|
|
1672
|
+
this._lastTick = 0;
|
|
1673
|
+
this._activeCounters = createLocalCounters();
|
|
1674
|
+
this._localTimeSeries = createLocalTimeSeries();
|
|
1675
|
+
this._storageAverages = createStorageAverages();
|
|
1676
|
+
this._replicationAverages = createNetworkAverages();
|
|
1677
|
+
this._sizeByMessageType = {};
|
|
1678
|
+
this._lastReceivedMessages = new CircularBuffer(100);
|
|
1679
|
+
this._lastSentMessages = new CircularBuffer(100);
|
|
1680
|
+
this._connectionsCount = 0;
|
|
1681
|
+
}
|
|
1682
|
+
tick(timeMs) {
|
|
1683
|
+
this._advanceTimeWindow(timeMs - this._lastTick);
|
|
1684
|
+
this._lastTick = timeMs;
|
|
1685
|
+
}
|
|
1686
|
+
computeStats() {
|
|
1687
|
+
return {
|
|
1688
|
+
meta: {
|
|
1689
|
+
rateAverageOverSeconds: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
1690
|
+
},
|
|
1691
|
+
storage: {
|
|
1692
|
+
reads: {
|
|
1693
|
+
payloadSize: this._storageAverages.loadedChunkSize.average(),
|
|
1694
|
+
opDuration: this._storageAverages.loadDuration.average(),
|
|
1695
|
+
countPerSecond: this._storageAverages.loadsPerSecond.average()
|
|
1696
|
+
},
|
|
1697
|
+
writes: {
|
|
1698
|
+
payloadSize: this._storageAverages.storedChunkSize.average(),
|
|
1699
|
+
opDuration: this._storageAverages.storeDuration.average(),
|
|
1700
|
+
countPerSecond: this._storageAverages.storesPerSecond.average()
|
|
1701
|
+
}
|
|
1702
|
+
},
|
|
1703
|
+
replicator: {
|
|
1704
|
+
connections: this._connectionsCount,
|
|
1705
|
+
receivedMessages: {
|
|
1706
|
+
payloadSize: this._replicationAverages.receivedMessageSize.average(),
|
|
1707
|
+
countPerSecond: this._replicationAverages.receivedPerSecond.average()
|
|
1708
|
+
},
|
|
1709
|
+
sentMessages: {
|
|
1710
|
+
payloadSize: this._replicationAverages.sentMessageSize.average(),
|
|
1711
|
+
opDuration: this._replicationAverages.sendDuration.average(),
|
|
1712
|
+
countPerSecond: this._replicationAverages.sentPerSecond.average(),
|
|
1713
|
+
failedPerSecond: this._replicationAverages.sendsFailedPerSecond.average()
|
|
1714
|
+
},
|
|
1715
|
+
countByMessageType: this._computeMessageHistogram("type"),
|
|
1716
|
+
avgSizeByMessageType: mapValues(this._sizeByMessageType, (summary) => summary.average())
|
|
1717
|
+
}
|
|
1718
|
+
};
|
|
1719
|
+
}
|
|
1720
|
+
get connectionsCount() {
|
|
1721
|
+
return this._connectionsCount;
|
|
1722
|
+
}
|
|
1723
|
+
/**
|
|
1724
|
+
* @internal
|
|
1725
|
+
*/
|
|
1726
|
+
get lastPerSecondStats() {
|
|
1727
|
+
return this._lastCompleteCounters;
|
|
1728
|
+
}
|
|
1729
|
+
/**
|
|
1730
|
+
* @internal
|
|
1731
|
+
*/
|
|
1732
|
+
get timeSeries() {
|
|
1733
|
+
return {
|
|
1734
|
+
...this._localTimeSeries.storage,
|
|
1735
|
+
...this._localTimeSeries.replication
|
|
1736
|
+
};
|
|
1737
|
+
}
|
|
1738
|
+
/**
|
|
1739
|
+
* @internal
|
|
1740
|
+
*/
|
|
1741
|
+
get messagesByPeerId() {
|
|
1742
|
+
return this._computeMessageHistogram("peerId");
|
|
1743
|
+
}
|
|
1744
|
+
_advanceTimeWindow(millisPassed) {
|
|
1745
|
+
const oldMetrics = Object.freeze(this._activeCounters);
|
|
1746
|
+
this._activeCounters = createLocalCounters();
|
|
1747
|
+
this._lastCompleteCounters = oldMetrics;
|
|
1748
|
+
for (const peerId of Object.keys(oldMetrics.byPeerId)) {
|
|
1749
|
+
this._activeCounters.byPeerId[peerId] = createMessageCounter();
|
|
1750
|
+
}
|
|
1751
|
+
this._addToTimeSeries(oldMetrics.replication, this._localTimeSeries.replication);
|
|
1752
|
+
this._addToTimeSeries(oldMetrics.storage, this._localTimeSeries.storage);
|
|
1753
|
+
if (Math.abs(millisPassed - 1e3) < 100) {
|
|
1754
|
+
this._reportPerSecondRate(oldMetrics);
|
|
1755
|
+
}
|
|
1756
|
+
}
|
|
1757
|
+
_addToTimeSeries(values, timeSeries) {
|
|
1758
|
+
for (const [key, value] of Object.entries(values)) {
|
|
1759
|
+
const values2 = timeSeries[key];
|
|
1760
|
+
values2.push(value);
|
|
1761
|
+
if (values2.length > this._params.timeSeriesLength) {
|
|
1762
|
+
values2.shift();
|
|
1763
|
+
}
|
|
1764
|
+
}
|
|
1765
|
+
}
|
|
1766
|
+
_reportPerSecondRate(metrics) {
|
|
1767
|
+
const toReport = [
|
|
1768
|
+
[
|
|
1769
|
+
"storage.load",
|
|
1770
|
+
metrics.storage.loadedChunks,
|
|
1771
|
+
this._storageAverages.loadsPerSecond
|
|
1772
|
+
],
|
|
1773
|
+
[
|
|
1774
|
+
"storage.store",
|
|
1775
|
+
metrics.storage.storedChunks,
|
|
1776
|
+
this._storageAverages.storesPerSecond
|
|
1777
|
+
],
|
|
1778
|
+
[
|
|
1779
|
+
"network.receive",
|
|
1780
|
+
metrics.replication.received,
|
|
1781
|
+
this._replicationAverages.receivedPerSecond
|
|
1782
|
+
],
|
|
1783
|
+
[
|
|
1784
|
+
"network.send",
|
|
1785
|
+
metrics.replication.sent,
|
|
1786
|
+
this._replicationAverages.sentPerSecond
|
|
1787
|
+
]
|
|
1788
|
+
];
|
|
1789
|
+
for (const [metricName, metric, summary] of toReport) {
|
|
1790
|
+
summary.record(metric);
|
|
1791
|
+
if (metric > 0) {
|
|
1792
|
+
trace2.metrics.distribution(`dxos.echo.${metricName}-rate`, metric);
|
|
1793
|
+
trace2.metrics.increment(`dxos.echo.${metricName}`, 1, {
|
|
1794
|
+
tags: {
|
|
1795
|
+
status: "busy"
|
|
1796
|
+
}
|
|
1797
|
+
});
|
|
1798
|
+
} else {
|
|
1799
|
+
trace2.metrics.increment(`dxos.echo.${metricName}`, 1, {
|
|
1800
|
+
tags: {
|
|
1801
|
+
status: "idle"
|
|
1802
|
+
}
|
|
1803
|
+
});
|
|
1804
|
+
}
|
|
1805
|
+
}
|
|
1806
|
+
this._replicationAverages.sendsFailedPerSecond.record(metrics.replication.failed);
|
|
1807
|
+
}
|
|
1808
|
+
recordPeerConnected(peerId) {
|
|
1809
|
+
this._activeCounters.byPeerId[peerId] = createMessageCounter();
|
|
1810
|
+
this._connectionsCount++;
|
|
1811
|
+
}
|
|
1812
|
+
recordPeerDisconnected(peerId) {
|
|
1813
|
+
this._connectionsCount--;
|
|
1814
|
+
delete this._activeCounters.byPeerId[peerId];
|
|
1815
|
+
}
|
|
1816
|
+
recordBytesStored(count) {
|
|
1817
|
+
this._activeCounters.storage.storedChunks++;
|
|
1818
|
+
this._activeCounters.storage.storedBytes += count;
|
|
1819
|
+
this._storageAverages.storedChunkSize.record(count);
|
|
1820
|
+
trace2.metrics.distribution("dxos.echo.storage.bytes-stored", count, {
|
|
1821
|
+
unit: "bytes"
|
|
1822
|
+
});
|
|
1823
|
+
}
|
|
1824
|
+
recordLoadDuration(durationMs) {
|
|
1825
|
+
this._storageAverages.loadDuration.record(durationMs);
|
|
1826
|
+
}
|
|
1827
|
+
recordStoreDuration(durationMs) {
|
|
1828
|
+
this._storageAverages.storeDuration.record(durationMs);
|
|
1829
|
+
}
|
|
1830
|
+
recordBytesLoaded(count) {
|
|
1831
|
+
this._activeCounters.storage.loadedChunks++;
|
|
1832
|
+
this._activeCounters.storage.loadedBytes += count;
|
|
1833
|
+
this._storageAverages.loadedChunkSize.record(count);
|
|
1834
|
+
trace2.metrics.distribution("dxos.echo.storage.bytes-loaded", count, {
|
|
1835
|
+
unit: "bytes"
|
|
1836
|
+
});
|
|
1837
|
+
}
|
|
1838
|
+
recordMessageSent(message, duration) {
|
|
1839
|
+
let metricsGroupName;
|
|
1840
|
+
const bytes = getByteCount(message);
|
|
1841
|
+
const tags = {
|
|
1842
|
+
type: message.type
|
|
1843
|
+
};
|
|
1844
|
+
if (isAutomergeProtocolMessage(message)) {
|
|
1845
|
+
this._activeCounters.replication.sent++;
|
|
1846
|
+
this._replicationAverages.sendDuration.record(duration);
|
|
1847
|
+
this._replicationAverages.sentMessageSize.record(bytes);
|
|
1848
|
+
metricsGroupName = "replication";
|
|
1849
|
+
} else {
|
|
1850
|
+
metricsGroupName = "collection-sync";
|
|
1851
|
+
}
|
|
1852
|
+
trace2.metrics.distribution(`dxos.echo.${metricsGroupName}.bytes-sent`, bytes, {
|
|
1853
|
+
unit: "bytes",
|
|
1854
|
+
tags
|
|
1855
|
+
});
|
|
1856
|
+
trace2.metrics.distribution(`dxos.echo.${metricsGroupName}.send-duration`, duration, {
|
|
1857
|
+
unit: "millisecond",
|
|
1858
|
+
tags
|
|
1859
|
+
});
|
|
1860
|
+
trace2.metrics.increment(`dxos.echo.${metricsGroupName}.send-status`, 1, {
|
|
1861
|
+
tags: {
|
|
1862
|
+
...tags,
|
|
1863
|
+
success: true
|
|
1864
|
+
}
|
|
1865
|
+
});
|
|
1866
|
+
const { messageSize, messageCounts } = this._getStatsForType(message);
|
|
1867
|
+
messageSize.record(bytes);
|
|
1868
|
+
messageCounts.sent++;
|
|
1869
|
+
this._lastSentMessages.push({
|
|
1870
|
+
type: message.type,
|
|
1871
|
+
peerId: message.targetId
|
|
1872
|
+
});
|
|
1873
|
+
}
|
|
1874
|
+
recordMessageReceived(message) {
|
|
1875
|
+
const bytes = getByteCount(message);
|
|
1876
|
+
const tags = {
|
|
1877
|
+
type: message.type
|
|
1878
|
+
};
|
|
1879
|
+
if (isAutomergeProtocolMessage(message)) {
|
|
1880
|
+
this._activeCounters.replication.received++;
|
|
1881
|
+
this._replicationAverages.receivedMessageSize.record(bytes);
|
|
1882
|
+
trace2.metrics.distribution("dxos.echo.replication.bytes-received", bytes, {
|
|
1883
|
+
unit: "bytes",
|
|
1884
|
+
tags
|
|
1885
|
+
});
|
|
1886
|
+
} else {
|
|
1887
|
+
trace2.metrics.distribution("dxos.echo.collection-sync.bytes-received", bytes, {
|
|
1888
|
+
unit: "bytes",
|
|
1889
|
+
tags
|
|
1890
|
+
});
|
|
1891
|
+
}
|
|
1892
|
+
const { messageSize, messageCounts } = this._getStatsForType(message);
|
|
1893
|
+
messageSize.record(bytes);
|
|
1894
|
+
messageCounts.received++;
|
|
1895
|
+
this._lastReceivedMessages.push({
|
|
1896
|
+
type: message.type,
|
|
1897
|
+
peerId: message.senderId
|
|
1898
|
+
});
|
|
1899
|
+
}
|
|
1900
|
+
recordMessageSendingFailed(message) {
|
|
1901
|
+
const tags = {
|
|
1902
|
+
type: message.type,
|
|
1903
|
+
success: false
|
|
1904
|
+
};
|
|
1905
|
+
if (isAutomergeProtocolMessage(message)) {
|
|
1906
|
+
this._activeCounters.replication.failed++;
|
|
1907
|
+
trace2.metrics.increment("dxos.echo.replication.send-status", 1, {
|
|
1908
|
+
unit: "bytes",
|
|
1909
|
+
tags
|
|
1910
|
+
});
|
|
1911
|
+
} else {
|
|
1912
|
+
trace2.metrics.increment("dxos.echo.collection-sync.send-status", 1, {
|
|
1913
|
+
unit: "bytes",
|
|
1914
|
+
tags
|
|
1915
|
+
});
|
|
1916
|
+
}
|
|
1917
|
+
const { messageCounts } = this._getStatsForType(message);
|
|
1918
|
+
messageCounts.failed++;
|
|
1919
|
+
}
|
|
1920
|
+
_getStatsForType(message) {
|
|
1921
|
+
const messageSize = this._sizeByMessageType[message.type] ??= createSlidingWindow();
|
|
1922
|
+
const messageCounts = this._activeCounters.byType[message.type] ??= createMessageCounter();
|
|
1923
|
+
return {
|
|
1924
|
+
messageCounts,
|
|
1925
|
+
messageSize
|
|
1926
|
+
};
|
|
1927
|
+
}
|
|
1928
|
+
_computeMessageHistogram(groupKey) {
|
|
1929
|
+
const result = {};
|
|
1930
|
+
for (const receivedMessage of this._lastReceivedMessages) {
|
|
1931
|
+
const counters = result[receivedMessage[groupKey]] ??= {
|
|
1932
|
+
received: 0,
|
|
1933
|
+
sent: 0
|
|
1934
|
+
};
|
|
1935
|
+
counters.received++;
|
|
1936
|
+
}
|
|
1937
|
+
for (const receivedMessage of this._lastSentMessages) {
|
|
1938
|
+
const counters = result[receivedMessage[groupKey]] ??= {
|
|
1939
|
+
received: 0,
|
|
1940
|
+
sent: 0
|
|
1941
|
+
};
|
|
1942
|
+
counters.sent++;
|
|
1943
|
+
}
|
|
1944
|
+
return result;
|
|
1945
|
+
}
|
|
1946
|
+
};
|
|
1947
|
+
EchoDataMonitor = _ts_decorate3([
|
|
1948
|
+
trace2.resource()
|
|
1949
|
+
], EchoDataMonitor);
|
|
1950
|
+
var isAutomergeProtocolMessage = (message) => {
|
|
1951
|
+
return !(isCollectionQueryMessage(message) || isCollectionStateMessage(message));
|
|
1952
|
+
};
|
|
1953
|
+
var createSlidingWindow = (overrides) => new SlidingWindowSummary({
|
|
1954
|
+
dataPoints: DEFAULT_AVG_WINDOW_SIZE,
|
|
1955
|
+
precision: 2,
|
|
1956
|
+
...overrides
|
|
1957
|
+
});
|
|
1958
|
+
var createLocalCounters = () => ({
|
|
1959
|
+
storage: {
|
|
1960
|
+
loadedBytes: 0,
|
|
1961
|
+
storedBytes: 0,
|
|
1962
|
+
storedChunks: 0,
|
|
1963
|
+
loadedChunks: 0
|
|
1964
|
+
},
|
|
1965
|
+
replication: createMessageCounter(),
|
|
1966
|
+
byPeerId: {},
|
|
1967
|
+
byType: {}
|
|
1968
|
+
});
|
|
1969
|
+
var createLocalTimeSeries = () => ({
|
|
1970
|
+
storage: {
|
|
1971
|
+
loadedBytes: [],
|
|
1972
|
+
storedBytes: [],
|
|
1973
|
+
storedChunks: [],
|
|
1974
|
+
loadedChunks: []
|
|
1975
|
+
},
|
|
1976
|
+
replication: {
|
|
1977
|
+
sent: [],
|
|
1978
|
+
failed: [],
|
|
1979
|
+
received: []
|
|
1980
|
+
}
|
|
1981
|
+
});
|
|
1982
|
+
var createMessageCounter = () => ({
|
|
1983
|
+
sent: 0,
|
|
1984
|
+
received: 0,
|
|
1985
|
+
failed: 0
|
|
1986
|
+
});
|
|
1987
|
+
var createNetworkAverages = () => ({
|
|
1988
|
+
receivedMessageSize: createSlidingWindow(),
|
|
1989
|
+
sentMessageSize: createSlidingWindow(),
|
|
1990
|
+
sendDuration: createSlidingWindow(),
|
|
1991
|
+
receivedPerSecond: createSlidingWindow({
|
|
1992
|
+
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
1993
|
+
}),
|
|
1994
|
+
sentPerSecond: createSlidingWindow({
|
|
1995
|
+
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
1996
|
+
}),
|
|
1997
|
+
sendsFailedPerSecond: createSlidingWindow({
|
|
1998
|
+
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
1999
|
+
})
|
|
2000
|
+
});
|
|
2001
|
+
var createStorageAverages = () => ({
|
|
2002
|
+
storedChunkSize: createSlidingWindow(),
|
|
2003
|
+
loadedChunkSize: createSlidingWindow(),
|
|
2004
|
+
loadDuration: createSlidingWindow(),
|
|
2005
|
+
storeDuration: createSlidingWindow(),
|
|
2006
|
+
loadsPerSecond: createSlidingWindow({
|
|
2007
|
+
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
2008
|
+
}),
|
|
2009
|
+
storesPerSecond: createSlidingWindow({
|
|
2010
|
+
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
2011
|
+
})
|
|
2012
|
+
});
|
|
2013
|
+
var getByteCount = (message) => {
|
|
2014
|
+
return message.type.length + message.senderId.length + message.targetId.length + (message.data?.byteLength ?? 0) + (message.documentId?.length ?? 0);
|
|
2015
|
+
};
|
|
2016
|
+
|
|
2017
|
+
// packages/core/echo/echo-pipeline/src/db-host/data-service.ts
|
|
2018
|
+
import { Stream } from "@dxos/codec-protobuf";
|
|
2019
|
+
import { invariant as invariant7 } from "@dxos/invariant";
|
|
2020
|
+
import { SpaceId as SpaceId2 } from "@dxos/keys";
|
|
2021
|
+
import { log as log6 } from "@dxos/log";
|
|
2022
|
+
var __dxlog_file7 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/db-host/data-service.ts";
|
|
2023
|
+
var DataServiceImpl = class {
|
|
2024
|
+
constructor(params) {
|
|
2025
|
+
/**
|
|
2026
|
+
* Map of subscriptions.
|
|
2027
|
+
* subscriptionId -> DocumentsSynchronizer
|
|
2028
|
+
*/
|
|
2029
|
+
this._subscriptions = /* @__PURE__ */ new Map();
|
|
2030
|
+
this._automergeHost = params.automergeHost;
|
|
2031
|
+
this._updateIndexes = params.updateIndexes;
|
|
2032
|
+
}
|
|
2033
|
+
subscribe(request) {
|
|
2034
|
+
return new Stream(({ next, ready }) => {
|
|
2035
|
+
const synchronizer = new DocumentsSynchronizer({
|
|
2036
|
+
repo: this._automergeHost.repo,
|
|
2037
|
+
sendUpdates: (updates) => next(updates)
|
|
2038
|
+
});
|
|
2039
|
+
synchronizer.open().then(() => {
|
|
2040
|
+
this._subscriptions.set(request.subscriptionId, synchronizer);
|
|
2041
|
+
ready();
|
|
2042
|
+
}).catch((err) => log6.catch(err, void 0, {
|
|
2043
|
+
F: __dxlog_file7,
|
|
2044
|
+
L: 64,
|
|
2045
|
+
S: this,
|
|
2046
|
+
C: (f, a) => f(...a)
|
|
2047
|
+
}));
|
|
2048
|
+
return () => synchronizer.close();
|
|
2049
|
+
});
|
|
2050
|
+
}
|
|
2051
|
+
async updateSubscription(request) {
|
|
2052
|
+
const synchronizer = this._subscriptions.get(request.subscriptionId);
|
|
2053
|
+
invariant7(synchronizer, "Subscription not found", {
|
|
2054
|
+
F: __dxlog_file7,
|
|
2055
|
+
L: 71,
|
|
2056
|
+
S: this,
|
|
2057
|
+
A: [
|
|
2058
|
+
"synchronizer",
|
|
2059
|
+
"'Subscription not found'"
|
|
2060
|
+
]
|
|
2061
|
+
});
|
|
2062
|
+
if (request.addIds?.length) {
|
|
2063
|
+
await synchronizer.addDocuments(request.addIds);
|
|
2064
|
+
}
|
|
2065
|
+
if (request.removeIds?.length) {
|
|
2066
|
+
await synchronizer.removeDocuments(request.removeIds);
|
|
2067
|
+
}
|
|
2068
|
+
}
|
|
2069
|
+
async update(request) {
|
|
2070
|
+
if (!request.updates) {
|
|
2071
|
+
return;
|
|
2072
|
+
}
|
|
2073
|
+
const synchronizer = this._subscriptions.get(request.subscriptionId);
|
|
2074
|
+
invariant7(synchronizer, "Subscription not found", {
|
|
2075
|
+
F: __dxlog_file7,
|
|
2076
|
+
L: 86,
|
|
2077
|
+
S: this,
|
|
2078
|
+
A: [
|
|
2079
|
+
"synchronizer",
|
|
2080
|
+
"'Subscription not found'"
|
|
2081
|
+
]
|
|
2082
|
+
});
|
|
2083
|
+
synchronizer.update(request.updates);
|
|
2084
|
+
}
|
|
2085
|
+
async flush(request) {
|
|
2086
|
+
await this._automergeHost.flush(request);
|
|
2087
|
+
}
|
|
2088
|
+
async getDocumentHeads(request) {
|
|
2089
|
+
const documentIds = request.documentIds;
|
|
2090
|
+
if (!documentIds) {
|
|
2091
|
+
return {
|
|
2092
|
+
heads: {
|
|
2093
|
+
entries: []
|
|
2094
|
+
}
|
|
2095
|
+
};
|
|
2096
|
+
}
|
|
2097
|
+
const heads = await this._automergeHost.getHeads(documentIds);
|
|
2098
|
+
return {
|
|
2099
|
+
heads: {
|
|
2100
|
+
entries: heads.map((heads2, idx) => ({
|
|
2101
|
+
documentId: documentIds[idx],
|
|
2102
|
+
heads: heads2
|
|
2103
|
+
}))
|
|
2104
|
+
}
|
|
2105
|
+
};
|
|
2106
|
+
}
|
|
2107
|
+
async waitUntilHeadsReplicated(request, options) {
|
|
2108
|
+
await this._automergeHost.waitUntilHeadsReplicated(request.heads);
|
|
2109
|
+
}
|
|
2110
|
+
async reIndexHeads(request, options) {
|
|
2111
|
+
await this._automergeHost.reIndexHeads(request.documentIds ?? []);
|
|
2112
|
+
}
|
|
2113
|
+
async updateIndexes() {
|
|
2114
|
+
await this._updateIndexes();
|
|
2115
|
+
}
|
|
2116
|
+
async getSpaceSyncState(request, options) {
|
|
2117
|
+
invariant7(SpaceId2.isValid(request.spaceId), void 0, {
|
|
2118
|
+
F: __dxlog_file7,
|
|
2119
|
+
L: 127,
|
|
2120
|
+
S: this,
|
|
2121
|
+
A: [
|
|
2122
|
+
"SpaceId.isValid(request.spaceId)",
|
|
2123
|
+
""
|
|
2124
|
+
]
|
|
2125
|
+
});
|
|
2126
|
+
const collectionId = deriveCollectionIdFromSpaceId(request.spaceId);
|
|
2127
|
+
const state = await this._automergeHost.getCollectionSyncState(collectionId);
|
|
2128
|
+
return {
|
|
2129
|
+
peers: state.peers.map((peer) => ({
|
|
2130
|
+
peerId: peer.peerId,
|
|
2131
|
+
documentsToReconcile: peer.differentDocuments
|
|
2132
|
+
}))
|
|
2133
|
+
};
|
|
2134
|
+
}
|
|
2135
|
+
};
|
|
2136
|
+
|
|
2137
|
+
export {
|
|
2138
|
+
DocumentsSynchronizer,
|
|
2139
|
+
diffCollectionState,
|
|
2140
|
+
LevelDBStorageAdapter,
|
|
2141
|
+
encodingOptions,
|
|
2142
|
+
AutomergeHost,
|
|
2143
|
+
getSpaceKeyFromDoc,
|
|
2144
|
+
deriveCollectionIdFromSpaceId,
|
|
2145
|
+
getSpaceIdFromCollectionId,
|
|
2146
|
+
MeshEchoReplicator,
|
|
2147
|
+
EchoDataMonitor,
|
|
2148
|
+
DataServiceImpl
|
|
2149
|
+
};
|
|
2150
|
+
//# sourceMappingURL=chunk-Q4B5JN6L.mjs.map
|