@dxos/echo-pipeline 0.6.12 → 0.6.13-main.548ca8d
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/browser/chunk-PESZVYAN.mjs +2050 -0
- package/dist/lib/browser/chunk-PESZVYAN.mjs.map +7 -0
- package/dist/lib/browser/index.mjs +3463 -17
- package/dist/lib/browser/index.mjs.map +4 -4
- package/dist/lib/browser/meta.json +1 -1
- package/dist/lib/browser/testing/index.mjs +3 -4
- package/dist/lib/browser/testing/index.mjs.map +3 -3
- package/dist/lib/node/{chunk-7HHYCGUR.cjs → chunk-6EZVIJNE.cjs} +89 -47
- package/dist/lib/node/chunk-6EZVIJNE.cjs.map +7 -0
- package/dist/lib/node/index.cjs +3440 -35
- package/dist/lib/node/index.cjs.map +4 -4
- package/dist/lib/node/meta.json +1 -1
- package/dist/lib/node/testing/index.cjs +11 -12
- package/dist/lib/node/testing/index.cjs.map +3 -3
- package/dist/lib/{browser/chunk-UKXIJW43.mjs → node-esm/chunk-4LW7MDPZ.mjs} +76 -36
- package/dist/lib/node-esm/chunk-4LW7MDPZ.mjs.map +7 -0
- package/dist/lib/{browser/chunk-MPWFDDQK.mjs → node-esm/index.mjs} +1702 -335
- package/dist/lib/node-esm/index.mjs.map +7 -0
- package/dist/lib/node-esm/meta.json +1 -0
- package/dist/lib/node-esm/testing/index.mjs +551 -0
- package/dist/lib/node-esm/testing/index.mjs.map +7 -0
- package/dist/types/src/automerge/automerge-host.d.ts +24 -1
- package/dist/types/src/automerge/automerge-host.d.ts.map +1 -1
- package/dist/types/src/automerge/collection-synchronizer.d.ts +2 -0
- package/dist/types/src/automerge/collection-synchronizer.d.ts.map +1 -1
- package/dist/types/src/automerge/echo-network-adapter.d.ts.map +1 -1
- package/dist/types/src/automerge/echo-replicator.d.ts +3 -3
- package/dist/types/src/automerge/echo-replicator.d.ts.map +1 -1
- package/dist/types/src/automerge/mesh-echo-replicator-connection.d.ts +3 -3
- package/dist/types/src/automerge/mesh-echo-replicator-connection.d.ts.map +1 -1
- package/dist/types/src/automerge/mesh-echo-replicator.d.ts.map +1 -1
- package/dist/types/src/automerge/space-collection.d.ts +3 -2
- package/dist/types/src/automerge/space-collection.d.ts.map +1 -1
- package/dist/types/src/db-host/automerge-metrics.d.ts +11 -0
- package/dist/types/src/db-host/automerge-metrics.d.ts.map +1 -0
- package/dist/types/src/db-host/data-service.d.ts +3 -2
- package/dist/types/src/db-host/data-service.d.ts.map +1 -1
- package/dist/types/src/db-host/database-root.d.ts +20 -0
- package/dist/types/src/db-host/database-root.d.ts.map +1 -0
- package/dist/types/src/db-host/documents-iterator.d.ts +7 -0
- package/dist/types/src/db-host/documents-iterator.d.ts.map +1 -0
- package/dist/types/src/db-host/echo-host.d.ts +73 -0
- package/dist/types/src/db-host/echo-host.d.ts.map +1 -0
- package/dist/types/src/db-host/index.d.ts +5 -0
- package/dist/types/src/db-host/index.d.ts.map +1 -1
- package/dist/types/src/db-host/migration.d.ts +8 -0
- package/dist/types/src/db-host/migration.d.ts.map +1 -0
- package/dist/types/src/db-host/query-service.d.ts +25 -0
- package/dist/types/src/db-host/query-service.d.ts.map +1 -0
- package/dist/types/src/db-host/query-state.d.ts +41 -0
- package/dist/types/src/db-host/query-state.d.ts.map +1 -0
- package/dist/types/src/db-host/space-state-manager.d.ts +23 -0
- package/dist/types/src/db-host/space-state-manager.d.ts.map +1 -0
- package/dist/types/src/edge/echo-edge-replicator.d.ts +23 -0
- package/dist/types/src/edge/echo-edge-replicator.d.ts.map +1 -0
- package/dist/types/src/edge/echo-edge-replicator.test.d.ts +2 -0
- package/dist/types/src/edge/echo-edge-replicator.test.d.ts.map +1 -0
- package/dist/types/src/edge/index.d.ts +2 -0
- package/dist/types/src/edge/index.d.ts.map +1 -0
- package/dist/types/src/index.d.ts +1 -0
- package/dist/types/src/index.d.ts.map +1 -1
- package/dist/types/src/metadata/metadata-store.d.ts +4 -1
- package/dist/types/src/metadata/metadata-store.d.ts.map +1 -1
- package/dist/types/src/testing/test-agent-builder.d.ts.map +1 -1
- package/dist/types/src/testing/test-replicator.d.ts +4 -4
- package/dist/types/src/testing/test-replicator.d.ts.map +1 -1
- package/package.json +40 -50
- package/src/automerge/automerge-host.test.ts +8 -9
- package/src/automerge/automerge-host.ts +46 -7
- package/src/automerge/automerge-repo.test.ts +18 -16
- package/src/automerge/collection-synchronizer.test.ts +10 -5
- package/src/automerge/collection-synchronizer.ts +17 -6
- package/src/automerge/echo-data-monitor.test.ts +1 -3
- package/src/automerge/echo-network-adapter.test.ts +4 -3
- package/src/automerge/echo-network-adapter.ts +5 -4
- package/src/automerge/echo-replicator.ts +3 -3
- package/src/automerge/mesh-echo-replicator-connection.ts +10 -9
- package/src/automerge/mesh-echo-replicator.ts +2 -1
- package/src/automerge/space-collection.ts +3 -2
- package/src/automerge/storage-adapter.test.ts +2 -3
- package/src/db-host/automerge-metrics.ts +38 -0
- package/src/db-host/data-service.ts +29 -14
- package/src/db-host/database-root.ts +86 -0
- package/src/db-host/documents-iterator.ts +73 -0
- package/src/db-host/documents-synchronizer.test.ts +2 -2
- package/src/db-host/echo-host.ts +257 -0
- package/src/db-host/index.ts +6 -1
- package/src/db-host/migration.ts +57 -0
- package/src/db-host/query-service.ts +208 -0
- package/src/db-host/query-state.ts +200 -0
- package/src/db-host/space-state-manager.ts +90 -0
- package/src/edge/echo-edge-replicator.test.ts +96 -0
- package/src/edge/echo-edge-replicator.ts +337 -0
- package/src/edge/index.ts +5 -0
- package/src/index.ts +1 -0
- package/src/metadata/metadata-store.ts +20 -0
- package/src/pipeline/pipeline-stress.test.ts +44 -47
- package/src/pipeline/pipeline.test.ts +3 -4
- package/src/space/control-pipeline.test.ts +2 -3
- package/src/space/control-pipeline.ts +10 -1
- package/src/space/replication.browser.test.ts +2 -8
- package/src/space/space-manager.browser.test.ts +6 -5
- package/src/space/space-protocol.browser.test.ts +29 -34
- package/src/space/space-protocol.test.ts +29 -27
- package/src/space/space.test.ts +28 -11
- package/src/testing/test-agent-builder.ts +2 -2
- package/src/testing/test-replicator.ts +3 -3
- package/dist/lib/browser/chunk-MPWFDDQK.mjs.map +0 -7
- package/dist/lib/browser/chunk-UKXIJW43.mjs.map +0 -7
- package/dist/lib/browser/chunk-XPCF2V5U.mjs +0 -31
- package/dist/lib/browser/chunk-XPCF2V5U.mjs.map +0 -7
- package/dist/lib/browser/light.mjs +0 -32
- package/dist/lib/browser/light.mjs.map +0 -7
- package/dist/lib/node/chunk-5DH4KR2S.cjs +0 -2148
- package/dist/lib/node/chunk-5DH4KR2S.cjs.map +0 -7
- package/dist/lib/node/chunk-7HHYCGUR.cjs.map +0 -7
- package/dist/lib/node/chunk-DZVH7HDD.cjs +0 -43
- package/dist/lib/node/chunk-DZVH7HDD.cjs.map +0 -7
- package/dist/lib/node/light.cjs +0 -52
- package/dist/lib/node/light.cjs.map +0 -7
- package/dist/types/src/light.d.ts +0 -4
- package/dist/types/src/light.d.ts.map +0 -1
- package/src/light.ts +0 -7
|
@@ -1,2148 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
var __create = Object.create;
|
|
3
|
-
var __defProp = Object.defineProperty;
|
|
4
|
-
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
5
|
-
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
6
|
-
var __getProtoOf = Object.getPrototypeOf;
|
|
7
|
-
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
8
|
-
var __export = (target, all) => {
|
|
9
|
-
for (var name in all)
|
|
10
|
-
__defProp(target, name, { get: all[name], enumerable: true });
|
|
11
|
-
};
|
|
12
|
-
var __copyProps = (to, from, except, desc) => {
|
|
13
|
-
if (from && typeof from === "object" || typeof from === "function") {
|
|
14
|
-
for (let key of __getOwnPropNames(from))
|
|
15
|
-
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
16
|
-
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
17
|
-
}
|
|
18
|
-
return to;
|
|
19
|
-
};
|
|
20
|
-
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
21
|
-
// If the importer is in node compatibility mode or this is not an ESM
|
|
22
|
-
// file that has been converted to a CommonJS file using a Babel-
|
|
23
|
-
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
24
|
-
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
25
|
-
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
26
|
-
mod
|
|
27
|
-
));
|
|
28
|
-
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
29
|
-
var chunk_5DH4KR2S_exports = {};
|
|
30
|
-
__export(chunk_5DH4KR2S_exports, {
|
|
31
|
-
AutomergeHost: () => AutomergeHost,
|
|
32
|
-
DataServiceImpl: () => DataServiceImpl,
|
|
33
|
-
DocumentsSynchronizer: () => DocumentsSynchronizer,
|
|
34
|
-
EchoDataMonitor: () => EchoDataMonitor,
|
|
35
|
-
LevelDBStorageAdapter: () => LevelDBStorageAdapter,
|
|
36
|
-
MeshEchoReplicator: () => MeshEchoReplicator,
|
|
37
|
-
deriveCollectionIdFromSpaceId: () => deriveCollectionIdFromSpaceId,
|
|
38
|
-
diffCollectionState: () => diffCollectionState,
|
|
39
|
-
encodingOptions: () => encodingOptions,
|
|
40
|
-
getSpaceIdFromCollectionId: () => getSpaceIdFromCollectionId,
|
|
41
|
-
getSpaceKeyFromDoc: () => getSpaceKeyFromDoc
|
|
42
|
-
});
|
|
43
|
-
module.exports = __toCommonJS(chunk_5DH4KR2S_exports);
|
|
44
|
-
var import_chunk_DZVH7HDD = require("./chunk-DZVH7HDD.cjs");
|
|
45
|
-
var import_async = require("@dxos/async");
|
|
46
|
-
var import_automerge = require("@dxos/automerge/automerge");
|
|
47
|
-
var import_context = require("@dxos/context");
|
|
48
|
-
var import_invariant = require("@dxos/invariant");
|
|
49
|
-
var import_log = require("@dxos/log");
|
|
50
|
-
var import_async2 = require("@dxos/async");
|
|
51
|
-
var import_automerge2 = require("@dxos/automerge/automerge");
|
|
52
|
-
var import_context2 = require("@dxos/context");
|
|
53
|
-
var import_util = require("@dxos/util");
|
|
54
|
-
var import_context3 = require("@dxos/context");
|
|
55
|
-
var import_async3 = require("@dxos/async");
|
|
56
|
-
var import_automerge3 = require("@dxos/automerge/automerge");
|
|
57
|
-
var import_automerge_repo = require("@dxos/automerge/automerge-repo");
|
|
58
|
-
var import_context4 = require("@dxos/context");
|
|
59
|
-
var import_invariant2 = require("@dxos/invariant");
|
|
60
|
-
var import_keys = require("@dxos/keys");
|
|
61
|
-
var import_log2 = require("@dxos/log");
|
|
62
|
-
var import_protocols = require("@dxos/protocols");
|
|
63
|
-
var import_tracing = require("@dxos/tracing");
|
|
64
|
-
var import_async4 = require("@dxos/async");
|
|
65
|
-
var import_automerge_repo2 = require("@dxos/automerge/automerge-repo");
|
|
66
|
-
var import_context5 = require("@dxos/context");
|
|
67
|
-
var import_invariant3 = require("@dxos/invariant");
|
|
68
|
-
var import_log3 = require("@dxos/log");
|
|
69
|
-
var import_util2 = require("@dxos/util");
|
|
70
|
-
var import_protocols2 = require("@dxos/protocols");
|
|
71
|
-
var import_indexing = require("@dxos/indexing");
|
|
72
|
-
var import_invariant4 = require("@dxos/invariant");
|
|
73
|
-
var import_keys2 = require("@dxos/keys");
|
|
74
|
-
var import_invariant5 = require("@dxos/invariant");
|
|
75
|
-
var import_keys3 = require("@dxos/keys");
|
|
76
|
-
var import_log4 = require("@dxos/log");
|
|
77
|
-
var import_util3 = require("@dxos/util");
|
|
78
|
-
var A2 = __toESM(require("@dxos/automerge/automerge"));
|
|
79
|
-
var import_automerge_repo3 = require("@dxos/automerge/automerge-repo");
|
|
80
|
-
var import_context6 = require("@dxos/context");
|
|
81
|
-
var import_invariant6 = require("@dxos/invariant");
|
|
82
|
-
var import_log5 = require("@dxos/log");
|
|
83
|
-
var import_teleport_extension_automerge_replicator = require("@dxos/teleport-extension-automerge-replicator");
|
|
84
|
-
var import_tracing2 = require("@dxos/tracing");
|
|
85
|
-
var import_util4 = require("@dxos/util");
|
|
86
|
-
var import_codec_protobuf = require("@dxos/codec-protobuf");
|
|
87
|
-
var import_invariant7 = require("@dxos/invariant");
|
|
88
|
-
var import_keys4 = require("@dxos/keys");
|
|
89
|
-
var import_log6 = require("@dxos/log");
|
|
90
|
-
var __dxlog_file = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/db-host/documents-synchronizer.ts";
|
|
91
|
-
var MAX_UPDATE_FREQ = 10;
|
|
92
|
-
var DocumentsSynchronizer = class extends import_context.Resource {
|
|
93
|
-
constructor(_params) {
|
|
94
|
-
super();
|
|
95
|
-
this._params = _params;
|
|
96
|
-
this._syncStates = /* @__PURE__ */ new Map();
|
|
97
|
-
this._pendingUpdates = /* @__PURE__ */ new Set();
|
|
98
|
-
this._sendUpdatesJob = void 0;
|
|
99
|
-
}
|
|
100
|
-
addDocuments(documentIds, retryCounter = 0) {
|
|
101
|
-
if (retryCounter > 3) {
|
|
102
|
-
import_log.log.warn("Failed to load document, retry limit reached", {
|
|
103
|
-
documentIds
|
|
104
|
-
}, {
|
|
105
|
-
F: __dxlog_file,
|
|
106
|
-
L: 49,
|
|
107
|
-
S: this,
|
|
108
|
-
C: (f, a) => f(...a)
|
|
109
|
-
});
|
|
110
|
-
return;
|
|
111
|
-
}
|
|
112
|
-
for (const documentId of documentIds) {
|
|
113
|
-
const doc = this._params.repo.find(documentId);
|
|
114
|
-
doc.whenReady().then(() => {
|
|
115
|
-
this._startSync(doc);
|
|
116
|
-
this._pendingUpdates.add(doc.documentId);
|
|
117
|
-
this._sendUpdatesJob.trigger();
|
|
118
|
-
}).catch((error) => {
|
|
119
|
-
import_log.log.warn("Failed to load document, wraparound", {
|
|
120
|
-
documentId,
|
|
121
|
-
error
|
|
122
|
-
}, {
|
|
123
|
-
F: __dxlog_file,
|
|
124
|
-
L: 63,
|
|
125
|
-
S: this,
|
|
126
|
-
C: (f, a) => f(...a)
|
|
127
|
-
});
|
|
128
|
-
this.addDocuments([
|
|
129
|
-
documentId
|
|
130
|
-
], retryCounter + 1);
|
|
131
|
-
});
|
|
132
|
-
}
|
|
133
|
-
}
|
|
134
|
-
removeDocuments(documentIds) {
|
|
135
|
-
for (const documentId of documentIds) {
|
|
136
|
-
this._syncStates.get(documentId)?.clearSubscriptions?.();
|
|
137
|
-
this._syncStates.delete(documentId);
|
|
138
|
-
this._pendingUpdates.delete(documentId);
|
|
139
|
-
}
|
|
140
|
-
}
|
|
141
|
-
async _open() {
|
|
142
|
-
this._sendUpdatesJob = new import_async.UpdateScheduler(this._ctx, this._checkAndSendUpdates.bind(this), {
|
|
143
|
-
maxFrequency: MAX_UPDATE_FREQ
|
|
144
|
-
});
|
|
145
|
-
}
|
|
146
|
-
async _close() {
|
|
147
|
-
await this._sendUpdatesJob.join();
|
|
148
|
-
this._syncStates.clear();
|
|
149
|
-
}
|
|
150
|
-
update(updates) {
|
|
151
|
-
for (const { documentId, mutation, isNew } of updates) {
|
|
152
|
-
if (isNew) {
|
|
153
|
-
const doc = this._params.repo.find(documentId);
|
|
154
|
-
doc.update((doc2) => import_automerge.next.loadIncremental(doc2, mutation));
|
|
155
|
-
this._startSync(doc);
|
|
156
|
-
} else {
|
|
157
|
-
this._writeMutation(documentId, mutation);
|
|
158
|
-
}
|
|
159
|
-
}
|
|
160
|
-
}
|
|
161
|
-
_startSync(doc) {
|
|
162
|
-
if (this._syncStates.has(doc.documentId)) {
|
|
163
|
-
import_log.log.info("Document already being synced", {
|
|
164
|
-
documentId: doc.documentId
|
|
165
|
-
}, {
|
|
166
|
-
F: __dxlog_file,
|
|
167
|
-
L: 102,
|
|
168
|
-
S: this,
|
|
169
|
-
C: (f, a) => f(...a)
|
|
170
|
-
});
|
|
171
|
-
return;
|
|
172
|
-
}
|
|
173
|
-
const syncState = {
|
|
174
|
-
handle: doc
|
|
175
|
-
};
|
|
176
|
-
this._subscribeForChanges(syncState);
|
|
177
|
-
this._syncStates.set(doc.documentId, syncState);
|
|
178
|
-
}
|
|
179
|
-
_subscribeForChanges(syncState) {
|
|
180
|
-
const handler = () => {
|
|
181
|
-
this._pendingUpdates.add(syncState.handle.documentId);
|
|
182
|
-
this._sendUpdatesJob.trigger();
|
|
183
|
-
};
|
|
184
|
-
syncState.handle.on("heads-changed", handler);
|
|
185
|
-
syncState.clearSubscriptions = () => syncState.handle.off("heads-changed", handler);
|
|
186
|
-
}
|
|
187
|
-
async _checkAndSendUpdates() {
|
|
188
|
-
const updates = [];
|
|
189
|
-
const docsWithPendingUpdates = Array.from(this._pendingUpdates);
|
|
190
|
-
this._pendingUpdates.clear();
|
|
191
|
-
for (const documentId of docsWithPendingUpdates) {
|
|
192
|
-
const update = this._getPendingChanges(documentId);
|
|
193
|
-
if (update) {
|
|
194
|
-
updates.push({
|
|
195
|
-
documentId,
|
|
196
|
-
mutation: update
|
|
197
|
-
});
|
|
198
|
-
}
|
|
199
|
-
}
|
|
200
|
-
if (updates.length > 0) {
|
|
201
|
-
this._params.sendUpdates({
|
|
202
|
-
updates
|
|
203
|
-
});
|
|
204
|
-
}
|
|
205
|
-
}
|
|
206
|
-
_getPendingChanges(documentId) {
|
|
207
|
-
const syncState = this._syncStates.get(documentId);
|
|
208
|
-
(0, import_invariant.invariant)(syncState, "Sync state for document not found", {
|
|
209
|
-
F: __dxlog_file,
|
|
210
|
-
L: 143,
|
|
211
|
-
S: this,
|
|
212
|
-
A: [
|
|
213
|
-
"syncState",
|
|
214
|
-
"'Sync state for document not found'"
|
|
215
|
-
]
|
|
216
|
-
});
|
|
217
|
-
const doc = syncState.handle.docSync();
|
|
218
|
-
if (!doc) {
|
|
219
|
-
return;
|
|
220
|
-
}
|
|
221
|
-
const mutation = syncState.lastSentHead ? import_automerge.next.saveSince(doc, syncState.lastSentHead) : import_automerge.next.save(doc);
|
|
222
|
-
if (mutation.length === 0) {
|
|
223
|
-
return;
|
|
224
|
-
}
|
|
225
|
-
syncState.lastSentHead = import_automerge.next.getHeads(doc);
|
|
226
|
-
return mutation;
|
|
227
|
-
}
|
|
228
|
-
_writeMutation(documentId, mutation) {
|
|
229
|
-
const syncState = this._syncStates.get(documentId);
|
|
230
|
-
(0, import_invariant.invariant)(syncState, "Sync state for document not found", {
|
|
231
|
-
F: __dxlog_file,
|
|
232
|
-
L: 158,
|
|
233
|
-
S: this,
|
|
234
|
-
A: [
|
|
235
|
-
"syncState",
|
|
236
|
-
"'Sync state for document not found'"
|
|
237
|
-
]
|
|
238
|
-
});
|
|
239
|
-
syncState.handle.update((doc) => {
|
|
240
|
-
const headsBefore = import_automerge.next.getHeads(doc);
|
|
241
|
-
const newDoc = import_automerge.next.loadIncremental(doc, mutation);
|
|
242
|
-
if (import_automerge.next.equals(headsBefore, syncState.lastSentHead)) {
|
|
243
|
-
syncState.lastSentHead = import_automerge.next.getHeads(newDoc);
|
|
244
|
-
}
|
|
245
|
-
return newDoc;
|
|
246
|
-
});
|
|
247
|
-
}
|
|
248
|
-
};
|
|
249
|
-
var MIN_QUERY_INTERVAL = 5e3;
|
|
250
|
-
var POLL_INTERVAL = 3e4;
|
|
251
|
-
var CollectionSynchronizer = class extends import_context2.Resource {
|
|
252
|
-
constructor(params) {
|
|
253
|
-
super();
|
|
254
|
-
this._perCollectionStates = /* @__PURE__ */ new Map();
|
|
255
|
-
this._connectedPeers = /* @__PURE__ */ new Set();
|
|
256
|
-
this.remoteStateUpdated = new import_async2.Event();
|
|
257
|
-
this._sendCollectionState = params.sendCollectionState;
|
|
258
|
-
this._queryCollectionState = params.queryCollectionState;
|
|
259
|
-
this._shouldSyncCollection = params.shouldSyncCollection;
|
|
260
|
-
}
|
|
261
|
-
async _open(ctx) {
|
|
262
|
-
(0, import_async2.scheduleTaskInterval)(this._ctx, async () => {
|
|
263
|
-
for (const collectionId of this._perCollectionStates.keys()) {
|
|
264
|
-
this.refreshCollection(collectionId);
|
|
265
|
-
await (0, import_async2.asyncReturn)();
|
|
266
|
-
}
|
|
267
|
-
}, POLL_INTERVAL);
|
|
268
|
-
}
|
|
269
|
-
getRegisteredCollectionIds() {
|
|
270
|
-
return [
|
|
271
|
-
...this._perCollectionStates.keys()
|
|
272
|
-
];
|
|
273
|
-
}
|
|
274
|
-
getLocalCollectionState(collectionId) {
|
|
275
|
-
return this._getPerCollectionState(collectionId).localState;
|
|
276
|
-
}
|
|
277
|
-
setLocalCollectionState(collectionId, state) {
|
|
278
|
-
this._getPerCollectionState(collectionId).localState = state;
|
|
279
|
-
queueMicrotask(async () => {
|
|
280
|
-
if (!this._ctx.disposed) {
|
|
281
|
-
this._refreshInterestedPeers(collectionId);
|
|
282
|
-
this.refreshCollection(collectionId);
|
|
283
|
-
}
|
|
284
|
-
});
|
|
285
|
-
}
|
|
286
|
-
getRemoteCollectionStates(collectionId) {
|
|
287
|
-
return this._getPerCollectionState(collectionId).remoteStates;
|
|
288
|
-
}
|
|
289
|
-
refreshCollection(collectionId) {
|
|
290
|
-
let scheduleAnotherRefresh = false;
|
|
291
|
-
const state = this._getPerCollectionState(collectionId);
|
|
292
|
-
for (const peerId of this._connectedPeers) {
|
|
293
|
-
if (state.interestedPeers.has(peerId)) {
|
|
294
|
-
const lastQueried = state.lastQueried.get(peerId) ?? 0;
|
|
295
|
-
if (Date.now() - lastQueried > MIN_QUERY_INTERVAL) {
|
|
296
|
-
state.lastQueried.set(peerId, Date.now());
|
|
297
|
-
this._queryCollectionState(collectionId, peerId);
|
|
298
|
-
} else {
|
|
299
|
-
scheduleAnotherRefresh = true;
|
|
300
|
-
}
|
|
301
|
-
}
|
|
302
|
-
}
|
|
303
|
-
if (scheduleAnotherRefresh) {
|
|
304
|
-
(0, import_async2.scheduleTask)(this._ctx, () => this.refreshCollection(collectionId), MIN_QUERY_INTERVAL);
|
|
305
|
-
}
|
|
306
|
-
}
|
|
307
|
-
/**
|
|
308
|
-
* Callback when a connection to a peer is established.
|
|
309
|
-
*/
|
|
310
|
-
onConnectionOpen(peerId) {
|
|
311
|
-
this._connectedPeers.add(peerId);
|
|
312
|
-
queueMicrotask(async () => {
|
|
313
|
-
if (this._ctx.disposed) {
|
|
314
|
-
return;
|
|
315
|
-
}
|
|
316
|
-
for (const [collectionId, state] of this._perCollectionStates.entries()) {
|
|
317
|
-
if (this._shouldSyncCollection(collectionId, peerId)) {
|
|
318
|
-
state.interestedPeers.add(peerId);
|
|
319
|
-
state.lastQueried.set(peerId, Date.now());
|
|
320
|
-
this._queryCollectionState(collectionId, peerId);
|
|
321
|
-
}
|
|
322
|
-
}
|
|
323
|
-
});
|
|
324
|
-
}
|
|
325
|
-
/**
|
|
326
|
-
* Callback when a connection to a peer is closed.
|
|
327
|
-
*/
|
|
328
|
-
onConnectionClosed(peerId) {
|
|
329
|
-
this._connectedPeers.delete(peerId);
|
|
330
|
-
for (const perCollectionState of this._perCollectionStates.values()) {
|
|
331
|
-
perCollectionState.remoteStates.delete(peerId);
|
|
332
|
-
}
|
|
333
|
-
}
|
|
334
|
-
/**
|
|
335
|
-
* Callback when a peer queries the state of a collection.
|
|
336
|
-
*/
|
|
337
|
-
onCollectionStateQueried(collectionId, peerId) {
|
|
338
|
-
const perCollectionState = this._getPerCollectionState(collectionId);
|
|
339
|
-
if (perCollectionState.localState) {
|
|
340
|
-
this._sendCollectionState(collectionId, peerId, perCollectionState.localState);
|
|
341
|
-
}
|
|
342
|
-
}
|
|
343
|
-
/**
|
|
344
|
-
* Callback when a peer sends the state of a collection.
|
|
345
|
-
*/
|
|
346
|
-
onRemoteStateReceived(collectionId, peerId, state) {
|
|
347
|
-
validateCollectionState(state);
|
|
348
|
-
const perCollectionState = this._getPerCollectionState(collectionId);
|
|
349
|
-
perCollectionState.remoteStates.set(peerId, state);
|
|
350
|
-
this.remoteStateUpdated.emit({
|
|
351
|
-
peerId,
|
|
352
|
-
collectionId
|
|
353
|
-
});
|
|
354
|
-
}
|
|
355
|
-
_getPerCollectionState(collectionId) {
|
|
356
|
-
return (0, import_util.defaultMap)(this._perCollectionStates, collectionId, () => ({
|
|
357
|
-
localState: void 0,
|
|
358
|
-
remoteStates: /* @__PURE__ */ new Map(),
|
|
359
|
-
interestedPeers: /* @__PURE__ */ new Set(),
|
|
360
|
-
lastQueried: /* @__PURE__ */ new Map()
|
|
361
|
-
}));
|
|
362
|
-
}
|
|
363
|
-
_refreshInterestedPeers(collectionId) {
|
|
364
|
-
for (const peerId of this._connectedPeers) {
|
|
365
|
-
if (this._shouldSyncCollection(collectionId, peerId)) {
|
|
366
|
-
this._getPerCollectionState(collectionId).interestedPeers.add(peerId);
|
|
367
|
-
} else {
|
|
368
|
-
this._getPerCollectionState(collectionId).interestedPeers.delete(peerId);
|
|
369
|
-
}
|
|
370
|
-
}
|
|
371
|
-
}
|
|
372
|
-
};
|
|
373
|
-
var diffCollectionState = (local, remote) => {
|
|
374
|
-
const allDocuments = /* @__PURE__ */ new Set([
|
|
375
|
-
...Object.keys(local.documents),
|
|
376
|
-
...Object.keys(remote.documents)
|
|
377
|
-
]);
|
|
378
|
-
const different = [];
|
|
379
|
-
for (const documentId of allDocuments) {
|
|
380
|
-
if (!local.documents[documentId] || !remote.documents[documentId] || !import_automerge2.next.equals(local.documents[documentId], remote.documents[documentId])) {
|
|
381
|
-
different.push(documentId);
|
|
382
|
-
}
|
|
383
|
-
}
|
|
384
|
-
return {
|
|
385
|
-
different
|
|
386
|
-
};
|
|
387
|
-
};
|
|
388
|
-
var validateCollectionState = (state) => {
|
|
389
|
-
Object.entries(state.documents).forEach(([documentId, heads]) => {
|
|
390
|
-
if (!isValidDocumentId(documentId)) {
|
|
391
|
-
throw new Error(`Invalid documentId: ${documentId}`);
|
|
392
|
-
}
|
|
393
|
-
if (Array.isArray(heads) && heads.some((head) => typeof head !== "string")) {
|
|
394
|
-
throw new Error(`Invalid heads: ${heads}`);
|
|
395
|
-
}
|
|
396
|
-
});
|
|
397
|
-
};
|
|
398
|
-
var isValidDocumentId = (documentId) => {
|
|
399
|
-
return typeof documentId === "string" && !documentId.includes(":");
|
|
400
|
-
};
|
|
401
|
-
var LevelDBStorageAdapter = class extends import_context3.Resource {
|
|
402
|
-
constructor(_params) {
|
|
403
|
-
super();
|
|
404
|
-
this._params = _params;
|
|
405
|
-
}
|
|
406
|
-
async load(keyArray) {
|
|
407
|
-
try {
|
|
408
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
409
|
-
return void 0;
|
|
410
|
-
}
|
|
411
|
-
const startMs = Date.now();
|
|
412
|
-
const chunk = await this._params.db.get(keyArray, {
|
|
413
|
-
...encodingOptions
|
|
414
|
-
});
|
|
415
|
-
this._params.monitor?.recordBytesLoaded(chunk.byteLength);
|
|
416
|
-
this._params.monitor?.recordLoadDuration(Date.now() - startMs);
|
|
417
|
-
return chunk;
|
|
418
|
-
} catch (err) {
|
|
419
|
-
if (isLevelDbNotFoundError(err)) {
|
|
420
|
-
return void 0;
|
|
421
|
-
}
|
|
422
|
-
throw err;
|
|
423
|
-
}
|
|
424
|
-
}
|
|
425
|
-
async save(keyArray, binary) {
|
|
426
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
427
|
-
return void 0;
|
|
428
|
-
}
|
|
429
|
-
const startMs = Date.now();
|
|
430
|
-
const batch = this._params.db.batch();
|
|
431
|
-
await this._params.callbacks?.beforeSave?.({
|
|
432
|
-
path: keyArray,
|
|
433
|
-
batch
|
|
434
|
-
});
|
|
435
|
-
batch.put(keyArray, Buffer.from(binary), {
|
|
436
|
-
...encodingOptions
|
|
437
|
-
});
|
|
438
|
-
await batch.write();
|
|
439
|
-
this._params.monitor?.recordBytesStored(binary.byteLength);
|
|
440
|
-
await this._params.callbacks?.afterSave?.(keyArray);
|
|
441
|
-
this._params.monitor?.recordStoreDuration(Date.now() - startMs);
|
|
442
|
-
}
|
|
443
|
-
async remove(keyArray) {
|
|
444
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
445
|
-
return void 0;
|
|
446
|
-
}
|
|
447
|
-
await this._params.db.del(keyArray, {
|
|
448
|
-
...encodingOptions
|
|
449
|
-
});
|
|
450
|
-
}
|
|
451
|
-
async loadRange(keyPrefix) {
|
|
452
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
453
|
-
return [];
|
|
454
|
-
}
|
|
455
|
-
const startMs = Date.now();
|
|
456
|
-
const result = [];
|
|
457
|
-
for await (const [key, value] of this._params.db.iterator({
|
|
458
|
-
gte: keyPrefix,
|
|
459
|
-
lte: [
|
|
460
|
-
...keyPrefix,
|
|
461
|
-
"\uFFFF"
|
|
462
|
-
],
|
|
463
|
-
...encodingOptions
|
|
464
|
-
})) {
|
|
465
|
-
result.push({
|
|
466
|
-
key,
|
|
467
|
-
data: value
|
|
468
|
-
});
|
|
469
|
-
this._params.monitor?.recordBytesLoaded(value.byteLength);
|
|
470
|
-
}
|
|
471
|
-
this._params.monitor?.recordLoadDuration(Date.now() - startMs);
|
|
472
|
-
return result;
|
|
473
|
-
}
|
|
474
|
-
async removeRange(keyPrefix) {
|
|
475
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
476
|
-
return void 0;
|
|
477
|
-
}
|
|
478
|
-
const batch = this._params.db.batch();
|
|
479
|
-
for await (const [key] of this._params.db.iterator({
|
|
480
|
-
gte: keyPrefix,
|
|
481
|
-
lte: [
|
|
482
|
-
...keyPrefix,
|
|
483
|
-
"\uFFFF"
|
|
484
|
-
],
|
|
485
|
-
...encodingOptions
|
|
486
|
-
})) {
|
|
487
|
-
batch.del(key, {
|
|
488
|
-
...encodingOptions
|
|
489
|
-
});
|
|
490
|
-
}
|
|
491
|
-
await batch.write();
|
|
492
|
-
}
|
|
493
|
-
};
|
|
494
|
-
var keyEncoder = {
|
|
495
|
-
encode: (key) => Buffer.from(key.map((k) => k.replaceAll("%", "%25").replaceAll("-", "%2D")).join("-")),
|
|
496
|
-
decode: (key) => Buffer.from(key).toString().split("-").map((k) => k.replaceAll("%2D", "-").replaceAll("%25", "%")),
|
|
497
|
-
format: "buffer"
|
|
498
|
-
};
|
|
499
|
-
var encodingOptions = {
|
|
500
|
-
keyEncoding: keyEncoder,
|
|
501
|
-
valueEncoding: "buffer"
|
|
502
|
-
};
|
|
503
|
-
var isLevelDbNotFoundError = (err) => err.code === "LEVEL_NOT_FOUND";
|
|
504
|
-
var isCollectionQueryMessage = (message) => message.type === import_protocols2.MESSAGE_TYPE_COLLECTION_QUERY;
|
|
505
|
-
var isCollectionStateMessage = (message) => message.type === import_protocols2.MESSAGE_TYPE_COLLECTION_STATE;
|
|
506
|
-
function _ts_decorate(decorators, target, key, desc) {
|
|
507
|
-
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
508
|
-
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
509
|
-
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
510
|
-
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
511
|
-
}
|
|
512
|
-
var __dxlog_file2 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/echo-network-adapter.ts";
|
|
513
|
-
var EchoNetworkAdapter = class extends import_automerge_repo2.NetworkAdapter {
|
|
514
|
-
constructor(_params) {
|
|
515
|
-
super();
|
|
516
|
-
this._params = _params;
|
|
517
|
-
this._replicators = /* @__PURE__ */ new Set();
|
|
518
|
-
this._connections = /* @__PURE__ */ new Map();
|
|
519
|
-
this._lifecycleState = import_context5.LifecycleState.CLOSED;
|
|
520
|
-
this._connected = new import_async4.Trigger();
|
|
521
|
-
}
|
|
522
|
-
connect(peerId, peerMetadata) {
|
|
523
|
-
this.peerId = peerId;
|
|
524
|
-
this.peerMetadata = peerMetadata;
|
|
525
|
-
this._connected.wake();
|
|
526
|
-
}
|
|
527
|
-
send(message) {
|
|
528
|
-
this._send(message);
|
|
529
|
-
}
|
|
530
|
-
disconnect() {
|
|
531
|
-
}
|
|
532
|
-
async open() {
|
|
533
|
-
if (this._lifecycleState === import_context5.LifecycleState.OPEN) {
|
|
534
|
-
return;
|
|
535
|
-
}
|
|
536
|
-
this._lifecycleState = import_context5.LifecycleState.OPEN;
|
|
537
|
-
(0, import_log3.log)("emit ready", void 0, {
|
|
538
|
-
F: __dxlog_file2,
|
|
539
|
-
L: 81,
|
|
540
|
-
S: this,
|
|
541
|
-
C: (f, a) => f(...a)
|
|
542
|
-
});
|
|
543
|
-
this.emit("ready", {
|
|
544
|
-
network: this
|
|
545
|
-
});
|
|
546
|
-
}
|
|
547
|
-
async close() {
|
|
548
|
-
if (this._lifecycleState === import_context5.LifecycleState.CLOSED) {
|
|
549
|
-
return this;
|
|
550
|
-
}
|
|
551
|
-
for (const replicator of this._replicators) {
|
|
552
|
-
await replicator.disconnect();
|
|
553
|
-
}
|
|
554
|
-
this._replicators.clear();
|
|
555
|
-
this._lifecycleState = import_context5.LifecycleState.CLOSED;
|
|
556
|
-
}
|
|
557
|
-
async whenConnected() {
|
|
558
|
-
await this._connected.wait({
|
|
559
|
-
timeout: 1e4
|
|
560
|
-
});
|
|
561
|
-
}
|
|
562
|
-
async addReplicator(replicator) {
|
|
563
|
-
(0, import_invariant3.invariant)(this._lifecycleState === import_context5.LifecycleState.OPEN, void 0, {
|
|
564
|
-
F: __dxlog_file2,
|
|
565
|
-
L: 107,
|
|
566
|
-
S: this,
|
|
567
|
-
A: [
|
|
568
|
-
"this._lifecycleState === LifecycleState.OPEN",
|
|
569
|
-
""
|
|
570
|
-
]
|
|
571
|
-
});
|
|
572
|
-
(0, import_invariant3.invariant)(this.peerId, void 0, {
|
|
573
|
-
F: __dxlog_file2,
|
|
574
|
-
L: 108,
|
|
575
|
-
S: this,
|
|
576
|
-
A: [
|
|
577
|
-
"this.peerId",
|
|
578
|
-
""
|
|
579
|
-
]
|
|
580
|
-
});
|
|
581
|
-
(0, import_invariant3.invariant)(!this._replicators.has(replicator), void 0, {
|
|
582
|
-
F: __dxlog_file2,
|
|
583
|
-
L: 109,
|
|
584
|
-
S: this,
|
|
585
|
-
A: [
|
|
586
|
-
"!this._replicators.has(replicator)",
|
|
587
|
-
""
|
|
588
|
-
]
|
|
589
|
-
});
|
|
590
|
-
this._replicators.add(replicator);
|
|
591
|
-
await replicator.connect({
|
|
592
|
-
peerId: this.peerId,
|
|
593
|
-
onConnectionOpen: this._onConnectionOpen.bind(this),
|
|
594
|
-
onConnectionClosed: this._onConnectionClosed.bind(this),
|
|
595
|
-
onConnectionAuthScopeChanged: this._onConnectionAuthScopeChanged.bind(this),
|
|
596
|
-
isDocumentInRemoteCollection: this._params.isDocumentInRemoteCollection,
|
|
597
|
-
getContainingSpaceForDocument: this._params.getContainingSpaceForDocument,
|
|
598
|
-
getContainingSpaceIdForDocument: async (documentId) => {
|
|
599
|
-
const key = await this._params.getContainingSpaceForDocument(documentId);
|
|
600
|
-
return key ? (0, import_chunk_DZVH7HDD.createIdFromSpaceKey)(key) : null;
|
|
601
|
-
}
|
|
602
|
-
});
|
|
603
|
-
}
|
|
604
|
-
async removeReplicator(replicator) {
|
|
605
|
-
(0, import_invariant3.invariant)(this._lifecycleState === import_context5.LifecycleState.OPEN, void 0, {
|
|
606
|
-
F: __dxlog_file2,
|
|
607
|
-
L: 128,
|
|
608
|
-
S: this,
|
|
609
|
-
A: [
|
|
610
|
-
"this._lifecycleState === LifecycleState.OPEN",
|
|
611
|
-
""
|
|
612
|
-
]
|
|
613
|
-
});
|
|
614
|
-
(0, import_invariant3.invariant)(this._replicators.has(replicator), void 0, {
|
|
615
|
-
F: __dxlog_file2,
|
|
616
|
-
L: 129,
|
|
617
|
-
S: this,
|
|
618
|
-
A: [
|
|
619
|
-
"this._replicators.has(replicator)",
|
|
620
|
-
""
|
|
621
|
-
]
|
|
622
|
-
});
|
|
623
|
-
await replicator.disconnect();
|
|
624
|
-
this._replicators.delete(replicator);
|
|
625
|
-
}
|
|
626
|
-
async shouldAdvertise(peerId, params) {
|
|
627
|
-
const connection = this._connections.get(peerId);
|
|
628
|
-
if (!connection) {
|
|
629
|
-
return false;
|
|
630
|
-
}
|
|
631
|
-
return connection.connection.shouldAdvertise(params);
|
|
632
|
-
}
|
|
633
|
-
shouldSyncCollection(peerId, params) {
|
|
634
|
-
const connection = this._connections.get(peerId);
|
|
635
|
-
if (!connection) {
|
|
636
|
-
return false;
|
|
637
|
-
}
|
|
638
|
-
return connection.connection.shouldSyncCollection(params);
|
|
639
|
-
}
|
|
640
|
-
queryCollectionState(collectionId, targetId) {
|
|
641
|
-
const message = {
|
|
642
|
-
type: "collection-query",
|
|
643
|
-
senderId: this.peerId,
|
|
644
|
-
targetId,
|
|
645
|
-
collectionId
|
|
646
|
-
};
|
|
647
|
-
this._send(message);
|
|
648
|
-
}
|
|
649
|
-
sendCollectionState(collectionId, targetId, state) {
|
|
650
|
-
const message = {
|
|
651
|
-
type: "collection-state",
|
|
652
|
-
senderId: this.peerId,
|
|
653
|
-
targetId,
|
|
654
|
-
collectionId,
|
|
655
|
-
state
|
|
656
|
-
};
|
|
657
|
-
this._send(message);
|
|
658
|
-
}
|
|
659
|
-
_send(message) {
|
|
660
|
-
const connectionEntry = this._connections.get(message.targetId);
|
|
661
|
-
if (!connectionEntry) {
|
|
662
|
-
throw new Error("Connection not found.");
|
|
663
|
-
}
|
|
664
|
-
const writeStart = Date.now();
|
|
665
|
-
connectionEntry.writer.write(message).then(() => {
|
|
666
|
-
const durationMs = Date.now() - writeStart;
|
|
667
|
-
this._params.monitor?.recordMessageSent(message, durationMs);
|
|
668
|
-
}).catch((err) => {
|
|
669
|
-
if (connectionEntry.isOpen) {
|
|
670
|
-
import_log3.log.catch(err, void 0, {
|
|
671
|
-
F: __dxlog_file2,
|
|
672
|
-
L: 189,
|
|
673
|
-
S: this,
|
|
674
|
-
C: (f, a) => f(...a)
|
|
675
|
-
});
|
|
676
|
-
}
|
|
677
|
-
this._params.monitor?.recordMessageSendingFailed(message);
|
|
678
|
-
});
|
|
679
|
-
}
|
|
680
|
-
// TODO(dmaretskyi): Remove.
|
|
681
|
-
getPeersInterestedInCollection(collectionId) {
|
|
682
|
-
return Array.from(this._connections.values()).map((connection) => {
|
|
683
|
-
return connection.connection.shouldSyncCollection({
|
|
684
|
-
collectionId
|
|
685
|
-
}) ? connection.connection.peerId : null;
|
|
686
|
-
}).filter(import_util2.nonNullable);
|
|
687
|
-
}
|
|
688
|
-
_onConnectionOpen(connection) {
|
|
689
|
-
(0, import_log3.log)("Connection opened", {
|
|
690
|
-
peerId: connection.peerId
|
|
691
|
-
}, {
|
|
692
|
-
F: __dxlog_file2,
|
|
693
|
-
L: 207,
|
|
694
|
-
S: this,
|
|
695
|
-
C: (f, a) => f(...a)
|
|
696
|
-
});
|
|
697
|
-
(0, import_invariant3.invariant)(!this._connections.has(connection.peerId), void 0, {
|
|
698
|
-
F: __dxlog_file2,
|
|
699
|
-
L: 208,
|
|
700
|
-
S: this,
|
|
701
|
-
A: [
|
|
702
|
-
"!this._connections.has(connection.peerId as PeerId)",
|
|
703
|
-
""
|
|
704
|
-
]
|
|
705
|
-
});
|
|
706
|
-
const reader = connection.readable.getReader();
|
|
707
|
-
const writer = connection.writable.getWriter();
|
|
708
|
-
const connectionEntry = {
|
|
709
|
-
connection,
|
|
710
|
-
reader,
|
|
711
|
-
writer,
|
|
712
|
-
isOpen: true
|
|
713
|
-
};
|
|
714
|
-
this._connections.set(connection.peerId, connectionEntry);
|
|
715
|
-
queueMicrotask(async () => {
|
|
716
|
-
try {
|
|
717
|
-
while (true) {
|
|
718
|
-
const { done, value } = await reader.read();
|
|
719
|
-
if (done) {
|
|
720
|
-
break;
|
|
721
|
-
}
|
|
722
|
-
this._onMessage(value);
|
|
723
|
-
}
|
|
724
|
-
} catch (err) {
|
|
725
|
-
if (connectionEntry.isOpen) {
|
|
726
|
-
import_log3.log.catch(err, void 0, {
|
|
727
|
-
F: __dxlog_file2,
|
|
728
|
-
L: 227,
|
|
729
|
-
S: this,
|
|
730
|
-
C: (f, a) => f(...a)
|
|
731
|
-
});
|
|
732
|
-
}
|
|
733
|
-
}
|
|
734
|
-
});
|
|
735
|
-
(0, import_log3.log)("emit peer-candidate", {
|
|
736
|
-
peerId: connection.peerId
|
|
737
|
-
}, {
|
|
738
|
-
F: __dxlog_file2,
|
|
739
|
-
L: 232,
|
|
740
|
-
S: this,
|
|
741
|
-
C: (f, a) => f(...a)
|
|
742
|
-
});
|
|
743
|
-
this._emitPeerCandidate(connection);
|
|
744
|
-
this._params.monitor?.recordPeerConnected(connection.peerId);
|
|
745
|
-
}
|
|
746
|
-
_onMessage(message) {
|
|
747
|
-
if (isCollectionQueryMessage(message)) {
|
|
748
|
-
this._params.onCollectionStateQueried(message.collectionId, message.senderId);
|
|
749
|
-
} else if (isCollectionStateMessage(message)) {
|
|
750
|
-
this._params.onCollectionStateReceived(message.collectionId, message.senderId, message.state);
|
|
751
|
-
} else {
|
|
752
|
-
this.emit("message", message);
|
|
753
|
-
}
|
|
754
|
-
this._params.monitor?.recordMessageReceived(message);
|
|
755
|
-
}
|
|
756
|
-
/**
|
|
757
|
-
* Trigger doc-synchronizer shared documents set recalculation. Happens on peer-candidate.
|
|
758
|
-
* TODO(y): replace with a proper API call when sharePolicy update becomes supported by automerge-repo
|
|
759
|
-
*/
|
|
760
|
-
_onConnectionAuthScopeChanged(connection) {
|
|
761
|
-
(0, import_log3.log)("Connection auth scope changed", {
|
|
762
|
-
peerId: connection.peerId
|
|
763
|
-
}, {
|
|
764
|
-
F: __dxlog_file2,
|
|
765
|
-
L: 253,
|
|
766
|
-
S: this,
|
|
767
|
-
C: (f, a) => f(...a)
|
|
768
|
-
});
|
|
769
|
-
const entry = this._connections.get(connection.peerId);
|
|
770
|
-
(0, import_invariant3.invariant)(entry, void 0, {
|
|
771
|
-
F: __dxlog_file2,
|
|
772
|
-
L: 255,
|
|
773
|
-
S: this,
|
|
774
|
-
A: [
|
|
775
|
-
"entry",
|
|
776
|
-
""
|
|
777
|
-
]
|
|
778
|
-
});
|
|
779
|
-
this.emit("peer-disconnected", {
|
|
780
|
-
peerId: connection.peerId
|
|
781
|
-
});
|
|
782
|
-
this._emitPeerCandidate(connection);
|
|
783
|
-
}
|
|
784
|
-
_onConnectionClosed(connection) {
|
|
785
|
-
(0, import_log3.log)("Connection closed", {
|
|
786
|
-
peerId: connection.peerId
|
|
787
|
-
}, {
|
|
788
|
-
F: __dxlog_file2,
|
|
789
|
-
L: 261,
|
|
790
|
-
S: this,
|
|
791
|
-
C: (f, a) => f(...a)
|
|
792
|
-
});
|
|
793
|
-
const entry = this._connections.get(connection.peerId);
|
|
794
|
-
(0, import_invariant3.invariant)(entry, void 0, {
|
|
795
|
-
F: __dxlog_file2,
|
|
796
|
-
L: 263,
|
|
797
|
-
S: this,
|
|
798
|
-
A: [
|
|
799
|
-
"entry",
|
|
800
|
-
""
|
|
801
|
-
]
|
|
802
|
-
});
|
|
803
|
-
entry.isOpen = false;
|
|
804
|
-
this.emit("peer-disconnected", {
|
|
805
|
-
peerId: connection.peerId
|
|
806
|
-
});
|
|
807
|
-
this._params.monitor?.recordPeerDisconnected(connection.peerId);
|
|
808
|
-
void entry.reader.cancel().catch((err) => import_log3.log.catch(err, void 0, {
|
|
809
|
-
F: __dxlog_file2,
|
|
810
|
-
L: 269,
|
|
811
|
-
S: this,
|
|
812
|
-
C: (f, a) => f(...a)
|
|
813
|
-
}));
|
|
814
|
-
void entry.writer.abort().catch((err) => import_log3.log.catch(err, void 0, {
|
|
815
|
-
F: __dxlog_file2,
|
|
816
|
-
L: 270,
|
|
817
|
-
S: this,
|
|
818
|
-
C: (f, a) => f(...a)
|
|
819
|
-
}));
|
|
820
|
-
this._connections.delete(connection.peerId);
|
|
821
|
-
}
|
|
822
|
-
_emitPeerCandidate(connection) {
|
|
823
|
-
this.emit("peer-candidate", {
|
|
824
|
-
peerId: connection.peerId,
|
|
825
|
-
peerMetadata: createEchoPeerMetadata()
|
|
826
|
-
});
|
|
827
|
-
}
|
|
828
|
-
};
|
|
829
|
-
_ts_decorate([
|
|
830
|
-
import_async4.synchronized
|
|
831
|
-
], EchoNetworkAdapter.prototype, "open", null);
|
|
832
|
-
_ts_decorate([
|
|
833
|
-
import_async4.synchronized
|
|
834
|
-
], EchoNetworkAdapter.prototype, "close", null);
|
|
835
|
-
_ts_decorate([
|
|
836
|
-
import_async4.synchronized
|
|
837
|
-
], EchoNetworkAdapter.prototype, "addReplicator", null);
|
|
838
|
-
_ts_decorate([
|
|
839
|
-
import_async4.synchronized
|
|
840
|
-
], EchoNetworkAdapter.prototype, "removeReplicator", null);
|
|
841
|
-
var createEchoPeerMetadata = () => ({
|
|
842
|
-
// TODO(dmaretskyi): Refactor this.
|
|
843
|
-
dxos_peerSource: "EchoNetworkAdapter"
|
|
844
|
-
});
|
|
845
|
-
var isEchoPeerMetadata = (metadata) => metadata?.dxos_peerSource === "EchoNetworkAdapter";
|
|
846
|
-
var HeadsStore = class {
|
|
847
|
-
constructor({ db }) {
|
|
848
|
-
this._db = db;
|
|
849
|
-
}
|
|
850
|
-
setHeads(documentId, heads, batch) {
|
|
851
|
-
batch.put(documentId, heads, {
|
|
852
|
-
sublevel: this._db,
|
|
853
|
-
keyEncoding: "utf8",
|
|
854
|
-
valueEncoding: import_indexing.headsEncoding
|
|
855
|
-
});
|
|
856
|
-
}
|
|
857
|
-
// TODO(dmaretskyi): Make batched.
|
|
858
|
-
async getHeads(documentIds) {
|
|
859
|
-
return this._db.getMany(documentIds, {
|
|
860
|
-
keyEncoding: "utf8",
|
|
861
|
-
valueEncoding: import_indexing.headsEncoding
|
|
862
|
-
});
|
|
863
|
-
}
|
|
864
|
-
};
|
|
865
|
-
function _ts_decorate2(decorators, target, key, desc) {
|
|
866
|
-
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
867
|
-
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
868
|
-
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
869
|
-
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
870
|
-
}
|
|
871
|
-
var __dxlog_file3 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts";
|
|
872
|
-
var AutomergeHost = class extends import_context4.Resource {
|
|
873
|
-
constructor({ db, indexMetadataStore, dataMonitor }) {
|
|
874
|
-
super();
|
|
875
|
-
this._collectionSynchronizer = new CollectionSynchronizer({
|
|
876
|
-
queryCollectionState: this._queryCollectionState.bind(this),
|
|
877
|
-
sendCollectionState: this._sendCollectionState.bind(this),
|
|
878
|
-
shouldSyncCollection: this._shouldSyncCollection.bind(this)
|
|
879
|
-
});
|
|
880
|
-
this._db = db;
|
|
881
|
-
this._storage = new LevelDBStorageAdapter({
|
|
882
|
-
db: db.sublevel("automerge"),
|
|
883
|
-
callbacks: {
|
|
884
|
-
beforeSave: async (params) => this._beforeSave(params),
|
|
885
|
-
afterSave: async (key) => this._afterSave(key)
|
|
886
|
-
},
|
|
887
|
-
monitor: dataMonitor
|
|
888
|
-
});
|
|
889
|
-
this._echoNetworkAdapter = new EchoNetworkAdapter({
|
|
890
|
-
getContainingSpaceForDocument: this._getContainingSpaceForDocument.bind(this),
|
|
891
|
-
isDocumentInRemoteCollection: this._isDocumentInRemoteCollection.bind(this),
|
|
892
|
-
onCollectionStateQueried: this._onCollectionStateQueried.bind(this),
|
|
893
|
-
onCollectionStateReceived: this._onCollectionStateReceived.bind(this),
|
|
894
|
-
monitor: dataMonitor
|
|
895
|
-
});
|
|
896
|
-
this._headsStore = new HeadsStore({
|
|
897
|
-
db: db.sublevel("heads")
|
|
898
|
-
});
|
|
899
|
-
this._indexMetadataStore = indexMetadataStore;
|
|
900
|
-
}
|
|
901
|
-
async _open() {
|
|
902
|
-
this._peerId = `host-${import_keys.PublicKey.random().toHex()}`;
|
|
903
|
-
await this._storage.open?.();
|
|
904
|
-
this._repo = new import_automerge_repo.Repo({
|
|
905
|
-
peerId: this._peerId,
|
|
906
|
-
sharePolicy: this._sharePolicy.bind(this),
|
|
907
|
-
storage: this._storage,
|
|
908
|
-
network: [
|
|
909
|
-
// Upstream swarm.
|
|
910
|
-
this._echoNetworkAdapter
|
|
911
|
-
]
|
|
912
|
-
});
|
|
913
|
-
import_async3.Event.wrap(this._echoNetworkAdapter, "peer-candidate").on(this._ctx, (e) => this._onPeerConnected(e.peerId));
|
|
914
|
-
import_async3.Event.wrap(this._echoNetworkAdapter, "peer-disconnected").on(this._ctx, (e) => this._onPeerDisconnected(e.peerId));
|
|
915
|
-
this._collectionSynchronizer.remoteStateUpdated.on(this._ctx, ({ collectionId, peerId }) => {
|
|
916
|
-
this._onRemoteCollectionStateUpdated(collectionId, peerId);
|
|
917
|
-
});
|
|
918
|
-
await this._echoNetworkAdapter.open();
|
|
919
|
-
await this._collectionSynchronizer.open();
|
|
920
|
-
await this._echoNetworkAdapter.open();
|
|
921
|
-
await this._echoNetworkAdapter.whenConnected();
|
|
922
|
-
}
|
|
923
|
-
async _close() {
|
|
924
|
-
await this._collectionSynchronizer.close();
|
|
925
|
-
await this._storage.close?.();
|
|
926
|
-
await this._echoNetworkAdapter.close();
|
|
927
|
-
await this._ctx.dispose();
|
|
928
|
-
}
|
|
929
|
-
/**
|
|
930
|
-
* @deprecated To be abstracted away.
|
|
931
|
-
*/
|
|
932
|
-
get repo() {
|
|
933
|
-
return this._repo;
|
|
934
|
-
}
|
|
935
|
-
get peerId() {
|
|
936
|
-
return this._peerId;
|
|
937
|
-
}
|
|
938
|
-
get loadedDocsCount() {
|
|
939
|
-
return Object.keys(this._repo.handles).length;
|
|
940
|
-
}
|
|
941
|
-
async addReplicator(replicator) {
|
|
942
|
-
await this._echoNetworkAdapter.addReplicator(replicator);
|
|
943
|
-
}
|
|
944
|
-
async removeReplicator(replicator) {
|
|
945
|
-
await this._echoNetworkAdapter.removeReplicator(replicator);
|
|
946
|
-
}
|
|
947
|
-
/**
|
|
948
|
-
* Loads the document handle from the repo and waits for it to be ready.
|
|
949
|
-
*/
|
|
950
|
-
async loadDoc(ctx, documentId, opts) {
|
|
951
|
-
let handle;
|
|
952
|
-
if (typeof documentId === "string") {
|
|
953
|
-
handle = this._repo.handles[documentId];
|
|
954
|
-
}
|
|
955
|
-
if (!handle) {
|
|
956
|
-
handle = this._repo.find(documentId);
|
|
957
|
-
}
|
|
958
|
-
if (!handle.isReady()) {
|
|
959
|
-
if (!opts?.timeout) {
|
|
960
|
-
await (0, import_context4.cancelWithContext)(ctx, handle.whenReady());
|
|
961
|
-
} else {
|
|
962
|
-
await (0, import_context4.cancelWithContext)(ctx, (0, import_async3.asyncTimeout)(handle.whenReady(), opts.timeout));
|
|
963
|
-
}
|
|
964
|
-
}
|
|
965
|
-
return handle;
|
|
966
|
-
}
|
|
967
|
-
/**
|
|
968
|
-
* Create new persisted document.
|
|
969
|
-
*/
|
|
970
|
-
createDoc(initialValue, opts) {
|
|
971
|
-
if (opts?.preserveHistory) {
|
|
972
|
-
if (!(0, import_automerge3.isAutomerge)(initialValue)) {
|
|
973
|
-
throw new TypeError("Initial value must be an Automerge document");
|
|
974
|
-
}
|
|
975
|
-
return this._repo.import((0, import_automerge3.save)(initialValue));
|
|
976
|
-
} else {
|
|
977
|
-
return this._repo.create(initialValue);
|
|
978
|
-
}
|
|
979
|
-
}
|
|
980
|
-
async waitUntilHeadsReplicated(heads) {
|
|
981
|
-
const entries = heads.entries;
|
|
982
|
-
if (!entries?.length) {
|
|
983
|
-
return;
|
|
984
|
-
}
|
|
985
|
-
const documentIds = entries.map((entry) => entry.documentId);
|
|
986
|
-
const documentHeads = await this.getHeads(documentIds);
|
|
987
|
-
const headsToWait = entries.filter((entry, index) => {
|
|
988
|
-
const targetHeads = entry.heads;
|
|
989
|
-
if (!targetHeads || targetHeads.length === 0) {
|
|
990
|
-
return false;
|
|
991
|
-
}
|
|
992
|
-
const currentHeads = documentHeads[index];
|
|
993
|
-
return !(currentHeads !== null && (0, import_automerge3.equals)(currentHeads, targetHeads));
|
|
994
|
-
});
|
|
995
|
-
if (headsToWait.length > 0) {
|
|
996
|
-
await Promise.all(headsToWait.map(async (entry, index) => {
|
|
997
|
-
const handle = await this.loadDoc(import_context4.Context.default(void 0, {
|
|
998
|
-
F: __dxlog_file3,
|
|
999
|
-
L: 227
|
|
1000
|
-
}), entry.documentId);
|
|
1001
|
-
await waitForHeads(handle, entry.heads);
|
|
1002
|
-
}));
|
|
1003
|
-
}
|
|
1004
|
-
await this._repo.flush(documentIds.filter((documentId) => !!this._repo.handles[documentId]));
|
|
1005
|
-
}
|
|
1006
|
-
async reIndexHeads(documentIds) {
|
|
1007
|
-
for (const documentId of documentIds) {
|
|
1008
|
-
import_log2.log.info("re-indexing heads for document", {
|
|
1009
|
-
documentId
|
|
1010
|
-
}, {
|
|
1011
|
-
F: __dxlog_file3,
|
|
1012
|
-
L: 239,
|
|
1013
|
-
S: this,
|
|
1014
|
-
C: (f, a) => f(...a)
|
|
1015
|
-
});
|
|
1016
|
-
const handle = this._repo.find(documentId);
|
|
1017
|
-
await handle.whenReady([
|
|
1018
|
-
"ready",
|
|
1019
|
-
"requesting"
|
|
1020
|
-
]);
|
|
1021
|
-
if (handle.inState([
|
|
1022
|
-
"requesting"
|
|
1023
|
-
])) {
|
|
1024
|
-
import_log2.log.warn("document is not available locally, skipping", {
|
|
1025
|
-
documentId
|
|
1026
|
-
}, {
|
|
1027
|
-
F: __dxlog_file3,
|
|
1028
|
-
L: 243,
|
|
1029
|
-
S: this,
|
|
1030
|
-
C: (f, a) => f(...a)
|
|
1031
|
-
});
|
|
1032
|
-
continue;
|
|
1033
|
-
}
|
|
1034
|
-
const doc = handle.docSync();
|
|
1035
|
-
(0, import_invariant2.invariant)(doc, void 0, {
|
|
1036
|
-
F: __dxlog_file3,
|
|
1037
|
-
L: 248,
|
|
1038
|
-
S: this,
|
|
1039
|
-
A: [
|
|
1040
|
-
"doc",
|
|
1041
|
-
""
|
|
1042
|
-
]
|
|
1043
|
-
});
|
|
1044
|
-
const heads = (0, import_automerge3.getHeads)(doc);
|
|
1045
|
-
const batch = this._db.batch();
|
|
1046
|
-
this._headsStore.setHeads(documentId, heads, batch);
|
|
1047
|
-
await batch.write();
|
|
1048
|
-
}
|
|
1049
|
-
import_log2.log.info("done re-indexing heads", void 0, {
|
|
1050
|
-
F: __dxlog_file3,
|
|
1051
|
-
L: 255,
|
|
1052
|
-
S: this,
|
|
1053
|
-
C: (f, a) => f(...a)
|
|
1054
|
-
});
|
|
1055
|
-
}
|
|
1056
|
-
// TODO(dmaretskyi): Share based on HALO permissions and space affinity.
|
|
1057
|
-
// Hosts, running in the worker, don't share documents unless requested by other peers.
|
|
1058
|
-
// NOTE: If both peers return sharePolicy=false the replication will not happen
|
|
1059
|
-
// https://github.com/automerge/automerge-repo/pull/292
|
|
1060
|
-
async _sharePolicy(peerId, documentId) {
|
|
1061
|
-
if (peerId.startsWith("client-")) {
|
|
1062
|
-
return false;
|
|
1063
|
-
}
|
|
1064
|
-
if (!documentId) {
|
|
1065
|
-
return false;
|
|
1066
|
-
}
|
|
1067
|
-
const peerMetadata = this.repo.peerMetadataByPeerId[peerId];
|
|
1068
|
-
if (isEchoPeerMetadata(peerMetadata)) {
|
|
1069
|
-
return this._echoNetworkAdapter.shouldAdvertise(peerId, {
|
|
1070
|
-
documentId
|
|
1071
|
-
});
|
|
1072
|
-
}
|
|
1073
|
-
return false;
|
|
1074
|
-
}
|
|
1075
|
-
async _beforeSave({ path, batch }) {
|
|
1076
|
-
const handle = this._repo.handles[path[0]];
|
|
1077
|
-
if (!handle) {
|
|
1078
|
-
return;
|
|
1079
|
-
}
|
|
1080
|
-
const doc = handle.docSync();
|
|
1081
|
-
if (!doc) {
|
|
1082
|
-
return;
|
|
1083
|
-
}
|
|
1084
|
-
const heads = (0, import_automerge3.getHeads)(doc);
|
|
1085
|
-
this._headsStore.setHeads(handle.documentId, heads, batch);
|
|
1086
|
-
const spaceKey = getSpaceKeyFromDoc(doc) ?? void 0;
|
|
1087
|
-
const objectIds = Object.keys(doc.objects ?? {});
|
|
1088
|
-
const encodedIds = objectIds.map((objectId) => import_protocols.objectPointerCodec.encode({
|
|
1089
|
-
documentId: handle.documentId,
|
|
1090
|
-
objectId,
|
|
1091
|
-
spaceKey
|
|
1092
|
-
}));
|
|
1093
|
-
const idToLastHash = new Map(encodedIds.map((id) => [
|
|
1094
|
-
id,
|
|
1095
|
-
heads
|
|
1096
|
-
]));
|
|
1097
|
-
this._indexMetadataStore.markDirty(idToLastHash, batch);
|
|
1098
|
-
}
|
|
1099
|
-
_shouldSyncCollection(collectionId, peerId) {
|
|
1100
|
-
const peerMetadata = this._repo.peerMetadataByPeerId[peerId];
|
|
1101
|
-
if (isEchoPeerMetadata(peerMetadata)) {
|
|
1102
|
-
return this._echoNetworkAdapter.shouldSyncCollection(peerId, {
|
|
1103
|
-
collectionId
|
|
1104
|
-
});
|
|
1105
|
-
}
|
|
1106
|
-
return false;
|
|
1107
|
-
}
|
|
1108
|
-
/**
|
|
1109
|
-
* Called by AutomergeStorageAdapter after levelDB batch commit.
|
|
1110
|
-
*/
|
|
1111
|
-
async _afterSave(path) {
|
|
1112
|
-
this._indexMetadataStore.notifyMarkedDirty();
|
|
1113
|
-
const documentId = path[0];
|
|
1114
|
-
const document = this._repo.handles[documentId]?.docSync();
|
|
1115
|
-
if (document) {
|
|
1116
|
-
const heads = (0, import_automerge3.getHeads)(document);
|
|
1117
|
-
this._onHeadsChanged(documentId, heads);
|
|
1118
|
-
}
|
|
1119
|
-
}
|
|
1120
|
-
_automergePeers() {
|
|
1121
|
-
return this._repo.peers;
|
|
1122
|
-
}
|
|
1123
|
-
async _isDocumentInRemoteCollection(params) {
|
|
1124
|
-
for (const collectionId of this._collectionSynchronizer.getRegisteredCollectionIds()) {
|
|
1125
|
-
const remoteCollections = this._collectionSynchronizer.getRemoteCollectionStates(collectionId);
|
|
1126
|
-
const remotePeerDocs = remoteCollections.get(params.peerId)?.documents;
|
|
1127
|
-
if (remotePeerDocs && params.documentId in remotePeerDocs) {
|
|
1128
|
-
return true;
|
|
1129
|
-
}
|
|
1130
|
-
}
|
|
1131
|
-
return false;
|
|
1132
|
-
}
|
|
1133
|
-
async _getContainingSpaceForDocument(documentId) {
|
|
1134
|
-
const doc = this._repo.handles[documentId]?.docSync();
|
|
1135
|
-
if (!doc) {
|
|
1136
|
-
return null;
|
|
1137
|
-
}
|
|
1138
|
-
const spaceKeyHex = getSpaceKeyFromDoc(doc);
|
|
1139
|
-
if (!spaceKeyHex) {
|
|
1140
|
-
return null;
|
|
1141
|
-
}
|
|
1142
|
-
return import_keys.PublicKey.from(spaceKeyHex);
|
|
1143
|
-
}
|
|
1144
|
-
/**
|
|
1145
|
-
* Flush documents to disk.
|
|
1146
|
-
*/
|
|
1147
|
-
async flush({ documentIds } = {}) {
|
|
1148
|
-
await this._repo.flush(documentIds);
|
|
1149
|
-
}
|
|
1150
|
-
async getHeads(documentIds) {
|
|
1151
|
-
const result = [];
|
|
1152
|
-
const storeRequestIds = [];
|
|
1153
|
-
const storeResultIndices = [];
|
|
1154
|
-
for (const documentId of documentIds) {
|
|
1155
|
-
const doc = this._repo.handles[documentId]?.docSync();
|
|
1156
|
-
if (doc) {
|
|
1157
|
-
result.push((0, import_automerge3.getHeads)(doc));
|
|
1158
|
-
} else {
|
|
1159
|
-
storeRequestIds.push(documentId);
|
|
1160
|
-
storeResultIndices.push(result.length);
|
|
1161
|
-
result.push(void 0);
|
|
1162
|
-
}
|
|
1163
|
-
}
|
|
1164
|
-
if (storeRequestIds.length > 0) {
|
|
1165
|
-
const storedHeads = await this._headsStore.getHeads(storeRequestIds);
|
|
1166
|
-
for (let i = 0; i < storedHeads.length; i++) {
|
|
1167
|
-
result[storeResultIndices[i]] = storedHeads[i];
|
|
1168
|
-
}
|
|
1169
|
-
}
|
|
1170
|
-
return result;
|
|
1171
|
-
}
|
|
1172
|
-
//
|
|
1173
|
-
// Collection sync.
|
|
1174
|
-
//
|
|
1175
|
-
getLocalCollectionState(collectionId) {
|
|
1176
|
-
return this._collectionSynchronizer.getLocalCollectionState(collectionId);
|
|
1177
|
-
}
|
|
1178
|
-
getRemoteCollectionStates(collectionId) {
|
|
1179
|
-
return this._collectionSynchronizer.getRemoteCollectionStates(collectionId);
|
|
1180
|
-
}
|
|
1181
|
-
refreshCollection(collectionId) {
|
|
1182
|
-
this._collectionSynchronizer.refreshCollection(collectionId);
|
|
1183
|
-
}
|
|
1184
|
-
async getCollectionSyncState(collectionId) {
|
|
1185
|
-
const result = {
|
|
1186
|
-
peers: []
|
|
1187
|
-
};
|
|
1188
|
-
const localState = this.getLocalCollectionState(collectionId);
|
|
1189
|
-
const remoteState = this.getRemoteCollectionStates(collectionId);
|
|
1190
|
-
if (!localState) {
|
|
1191
|
-
return result;
|
|
1192
|
-
}
|
|
1193
|
-
for (const [peerId, state] of remoteState) {
|
|
1194
|
-
const diff = diffCollectionState(localState, state);
|
|
1195
|
-
result.peers.push({
|
|
1196
|
-
peerId,
|
|
1197
|
-
differentDocuments: diff.different.length
|
|
1198
|
-
});
|
|
1199
|
-
}
|
|
1200
|
-
return result;
|
|
1201
|
-
}
|
|
1202
|
-
/**
|
|
1203
|
-
* Update the local collection state based on the locally stored document heads.
|
|
1204
|
-
*/
|
|
1205
|
-
async updateLocalCollectionState(collectionId, documentIds) {
|
|
1206
|
-
const heads = await this.getHeads(documentIds);
|
|
1207
|
-
const documents = Object.fromEntries(heads.map((heads2, index) => [
|
|
1208
|
-
documentIds[index],
|
|
1209
|
-
heads2 ?? []
|
|
1210
|
-
]));
|
|
1211
|
-
this._collectionSynchronizer.setLocalCollectionState(collectionId, {
|
|
1212
|
-
documents
|
|
1213
|
-
});
|
|
1214
|
-
}
|
|
1215
|
-
_onCollectionStateQueried(collectionId, peerId) {
|
|
1216
|
-
this._collectionSynchronizer.onCollectionStateQueried(collectionId, peerId);
|
|
1217
|
-
}
|
|
1218
|
-
_onCollectionStateReceived(collectionId, peerId, state) {
|
|
1219
|
-
this._collectionSynchronizer.onRemoteStateReceived(collectionId, peerId, decodeCollectionState(state));
|
|
1220
|
-
}
|
|
1221
|
-
_queryCollectionState(collectionId, peerId) {
|
|
1222
|
-
this._echoNetworkAdapter.queryCollectionState(collectionId, peerId);
|
|
1223
|
-
}
|
|
1224
|
-
_sendCollectionState(collectionId, peerId, state) {
|
|
1225
|
-
this._echoNetworkAdapter.sendCollectionState(collectionId, peerId, encodeCollectionState(state));
|
|
1226
|
-
}
|
|
1227
|
-
_onPeerConnected(peerId) {
|
|
1228
|
-
this._collectionSynchronizer.onConnectionOpen(peerId);
|
|
1229
|
-
}
|
|
1230
|
-
_onPeerDisconnected(peerId) {
|
|
1231
|
-
this._collectionSynchronizer.onConnectionClosed(peerId);
|
|
1232
|
-
}
|
|
1233
|
-
_onRemoteCollectionStateUpdated(collectionId, peerId) {
|
|
1234
|
-
const localState = this._collectionSynchronizer.getLocalCollectionState(collectionId);
|
|
1235
|
-
const remoteState = this._collectionSynchronizer.getRemoteCollectionStates(collectionId).get(peerId);
|
|
1236
|
-
if (!localState || !remoteState) {
|
|
1237
|
-
return;
|
|
1238
|
-
}
|
|
1239
|
-
const { different } = diffCollectionState(localState, remoteState);
|
|
1240
|
-
if (different.length === 0) {
|
|
1241
|
-
return;
|
|
1242
|
-
}
|
|
1243
|
-
import_log2.log.info("replication documents after collection sync", {
|
|
1244
|
-
count: different.length
|
|
1245
|
-
}, {
|
|
1246
|
-
F: __dxlog_file3,
|
|
1247
|
-
L: 475,
|
|
1248
|
-
S: this,
|
|
1249
|
-
C: (f, a) => f(...a)
|
|
1250
|
-
});
|
|
1251
|
-
for (const documentId of different) {
|
|
1252
|
-
this._repo.find(documentId);
|
|
1253
|
-
}
|
|
1254
|
-
}
|
|
1255
|
-
_onHeadsChanged(documentId, heads) {
|
|
1256
|
-
for (const collectionId of this._collectionSynchronizer.getRegisteredCollectionIds()) {
|
|
1257
|
-
const state = this._collectionSynchronizer.getLocalCollectionState(collectionId);
|
|
1258
|
-
if (state?.documents[documentId]) {
|
|
1259
|
-
const newState = structuredClone(state);
|
|
1260
|
-
newState.documents[documentId] = heads;
|
|
1261
|
-
this._collectionSynchronizer.setLocalCollectionState(collectionId, newState);
|
|
1262
|
-
}
|
|
1263
|
-
}
|
|
1264
|
-
}
|
|
1265
|
-
};
|
|
1266
|
-
_ts_decorate2([
|
|
1267
|
-
import_tracing.trace.info()
|
|
1268
|
-
], AutomergeHost.prototype, "_peerId", void 0);
|
|
1269
|
-
_ts_decorate2([
|
|
1270
|
-
import_tracing.trace.info({
|
|
1271
|
-
depth: null
|
|
1272
|
-
})
|
|
1273
|
-
], AutomergeHost.prototype, "_automergePeers", null);
|
|
1274
|
-
_ts_decorate2([
|
|
1275
|
-
import_tracing.trace.span({
|
|
1276
|
-
showInBrowserTimeline: true
|
|
1277
|
-
})
|
|
1278
|
-
], AutomergeHost.prototype, "flush", null);
|
|
1279
|
-
AutomergeHost = _ts_decorate2([
|
|
1280
|
-
import_tracing.trace.resource()
|
|
1281
|
-
], AutomergeHost);
|
|
1282
|
-
var getSpaceKeyFromDoc = (doc) => {
|
|
1283
|
-
const rawSpaceKey = doc.access?.spaceKey ?? doc.experimental_spaceKey;
|
|
1284
|
-
if (rawSpaceKey == null) {
|
|
1285
|
-
return null;
|
|
1286
|
-
}
|
|
1287
|
-
return String(rawSpaceKey);
|
|
1288
|
-
};
|
|
1289
|
-
var waitForHeads = async (handle, heads) => {
|
|
1290
|
-
const unavailableHeads = new Set(heads);
|
|
1291
|
-
await handle.whenReady();
|
|
1292
|
-
await import_async3.Event.wrap(handle, "change").waitForCondition(() => {
|
|
1293
|
-
for (const changeHash of unavailableHeads.values()) {
|
|
1294
|
-
if (changeIsPresentInDoc(handle.docSync(), changeHash)) {
|
|
1295
|
-
unavailableHeads.delete(changeHash);
|
|
1296
|
-
}
|
|
1297
|
-
}
|
|
1298
|
-
return unavailableHeads.size === 0;
|
|
1299
|
-
});
|
|
1300
|
-
};
|
|
1301
|
-
var changeIsPresentInDoc = (doc, changeHash) => {
|
|
1302
|
-
return !!(0, import_automerge3.getBackend)(doc).getChangeByHash(changeHash);
|
|
1303
|
-
};
|
|
1304
|
-
var decodeCollectionState = (state) => {
|
|
1305
|
-
(0, import_invariant2.invariant)(typeof state === "object" && state !== null, "Invalid state", {
|
|
1306
|
-
F: __dxlog_file3,
|
|
1307
|
-
L: 528,
|
|
1308
|
-
S: void 0,
|
|
1309
|
-
A: [
|
|
1310
|
-
"typeof state === 'object' && state !== null",
|
|
1311
|
-
"'Invalid state'"
|
|
1312
|
-
]
|
|
1313
|
-
});
|
|
1314
|
-
return state;
|
|
1315
|
-
};
|
|
1316
|
-
var encodeCollectionState = (state) => {
|
|
1317
|
-
return state;
|
|
1318
|
-
};
|
|
1319
|
-
var __dxlog_file4 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/space-collection.ts";
|
|
1320
|
-
var deriveCollectionIdFromSpaceId = (spaceId) => `space:${spaceId}`;
|
|
1321
|
-
var getSpaceIdFromCollectionId = (collectionId) => {
|
|
1322
|
-
const spaceId = collectionId.replace(/^space:/, "");
|
|
1323
|
-
(0, import_invariant4.invariant)(import_keys2.SpaceId.isValid(spaceId), void 0, {
|
|
1324
|
-
F: __dxlog_file4,
|
|
1325
|
-
L: 12,
|
|
1326
|
-
S: void 0,
|
|
1327
|
-
A: [
|
|
1328
|
-
"SpaceId.isValid(spaceId)",
|
|
1329
|
-
""
|
|
1330
|
-
]
|
|
1331
|
-
});
|
|
1332
|
-
return spaceId;
|
|
1333
|
-
};
|
|
1334
|
-
var __dxlog_file5 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator-connection.ts";
|
|
1335
|
-
var DEFAULT_FACTORY = (params) => new import_teleport_extension_automerge_replicator.AutomergeReplicator(...params);
|
|
1336
|
-
var MeshReplicatorConnection = class extends import_context6.Resource {
|
|
1337
|
-
constructor(_params) {
|
|
1338
|
-
super();
|
|
1339
|
-
this._params = _params;
|
|
1340
|
-
this.remoteDeviceKey = null;
|
|
1341
|
-
this._remotePeerId = null;
|
|
1342
|
-
this._isEnabled = false;
|
|
1343
|
-
let readableStreamController;
|
|
1344
|
-
this.readable = new ReadableStream({
|
|
1345
|
-
start: (controller) => {
|
|
1346
|
-
readableStreamController = controller;
|
|
1347
|
-
this._ctx.onDispose(() => controller.close());
|
|
1348
|
-
}
|
|
1349
|
-
});
|
|
1350
|
-
this.writable = new WritableStream({
|
|
1351
|
-
write: async (message, controller) => {
|
|
1352
|
-
(0, import_invariant6.invariant)(this._isEnabled, "Writing to a disabled connection", {
|
|
1353
|
-
F: __dxlog_file5,
|
|
1354
|
-
L: 49,
|
|
1355
|
-
S: this,
|
|
1356
|
-
A: [
|
|
1357
|
-
"this._isEnabled",
|
|
1358
|
-
"'Writing to a disabled connection'"
|
|
1359
|
-
]
|
|
1360
|
-
});
|
|
1361
|
-
try {
|
|
1362
|
-
logSendSync(message);
|
|
1363
|
-
await this.replicatorExtension.sendSyncMessage({
|
|
1364
|
-
payload: import_automerge_repo3.cbor.encode(message)
|
|
1365
|
-
});
|
|
1366
|
-
} catch (err) {
|
|
1367
|
-
controller.error(err);
|
|
1368
|
-
this._disconnectIfEnabled();
|
|
1369
|
-
}
|
|
1370
|
-
}
|
|
1371
|
-
});
|
|
1372
|
-
const createAutomergeReplicator = this._params.replicatorFactory ?? DEFAULT_FACTORY;
|
|
1373
|
-
this.replicatorExtension = createAutomergeReplicator([
|
|
1374
|
-
{
|
|
1375
|
-
peerId: this._params.ownPeerId
|
|
1376
|
-
},
|
|
1377
|
-
{
|
|
1378
|
-
onStartReplication: async (info, remotePeerId) => {
|
|
1379
|
-
this.remoteDeviceKey = remotePeerId;
|
|
1380
|
-
this._remotePeerId = info.id;
|
|
1381
|
-
(0, import_log5.log)("onStartReplication", {
|
|
1382
|
-
id: info.id,
|
|
1383
|
-
thisPeerId: this.peerId,
|
|
1384
|
-
remotePeerId: remotePeerId.toHex()
|
|
1385
|
-
}, {
|
|
1386
|
-
F: __dxlog_file5,
|
|
1387
|
-
L: 84,
|
|
1388
|
-
S: this,
|
|
1389
|
-
C: (f, a) => f(...a)
|
|
1390
|
-
});
|
|
1391
|
-
this._params.onRemoteConnected();
|
|
1392
|
-
},
|
|
1393
|
-
onSyncMessage: async ({ payload }) => {
|
|
1394
|
-
if (!this._isEnabled) {
|
|
1395
|
-
return;
|
|
1396
|
-
}
|
|
1397
|
-
const message = import_automerge_repo3.cbor.decode(payload);
|
|
1398
|
-
readableStreamController.enqueue(message);
|
|
1399
|
-
},
|
|
1400
|
-
onClose: async () => {
|
|
1401
|
-
this._disconnectIfEnabled();
|
|
1402
|
-
}
|
|
1403
|
-
}
|
|
1404
|
-
]);
|
|
1405
|
-
}
|
|
1406
|
-
_disconnectIfEnabled() {
|
|
1407
|
-
if (this._isEnabled) {
|
|
1408
|
-
this._params.onRemoteDisconnected();
|
|
1409
|
-
}
|
|
1410
|
-
}
|
|
1411
|
-
get peerId() {
|
|
1412
|
-
(0, import_invariant6.invariant)(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
1413
|
-
F: __dxlog_file5,
|
|
1414
|
-
L: 110,
|
|
1415
|
-
S: this,
|
|
1416
|
-
A: [
|
|
1417
|
-
"this._remotePeerId != null",
|
|
1418
|
-
"'Remote peer has not connected yet.'"
|
|
1419
|
-
]
|
|
1420
|
-
});
|
|
1421
|
-
return this._remotePeerId;
|
|
1422
|
-
}
|
|
1423
|
-
async shouldAdvertise(params) {
|
|
1424
|
-
return this._params.shouldAdvertise(params);
|
|
1425
|
-
}
|
|
1426
|
-
shouldSyncCollection(params) {
|
|
1427
|
-
return this._params.shouldSyncCollection(params);
|
|
1428
|
-
}
|
|
1429
|
-
/**
|
|
1430
|
-
* Start exchanging messages with the remote peer.
|
|
1431
|
-
* Call after the remote peer has connected.
|
|
1432
|
-
*/
|
|
1433
|
-
enable() {
|
|
1434
|
-
(0, import_invariant6.invariant)(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
1435
|
-
F: __dxlog_file5,
|
|
1436
|
-
L: 127,
|
|
1437
|
-
S: this,
|
|
1438
|
-
A: [
|
|
1439
|
-
"this._remotePeerId != null",
|
|
1440
|
-
"'Remote peer has not connected yet.'"
|
|
1441
|
-
]
|
|
1442
|
-
});
|
|
1443
|
-
this._isEnabled = true;
|
|
1444
|
-
}
|
|
1445
|
-
/**
|
|
1446
|
-
* Stop exchanging messages with the remote peer.
|
|
1447
|
-
*/
|
|
1448
|
-
disable() {
|
|
1449
|
-
this._isEnabled = false;
|
|
1450
|
-
}
|
|
1451
|
-
};
|
|
1452
|
-
var logSendSync = (message) => {
|
|
1453
|
-
(0, import_log5.log)("sendSyncMessage", () => {
|
|
1454
|
-
const decodedSyncMessage = message.type === "sync" && message.data ? A2.decodeSyncMessage(message.data) : void 0;
|
|
1455
|
-
return {
|
|
1456
|
-
sync: decodedSyncMessage && {
|
|
1457
|
-
headsLength: decodedSyncMessage.heads.length,
|
|
1458
|
-
requesting: decodedSyncMessage.need.length > 0,
|
|
1459
|
-
sendingChanges: decodedSyncMessage.changes.length > 0
|
|
1460
|
-
},
|
|
1461
|
-
type: message.type,
|
|
1462
|
-
from: message.senderId,
|
|
1463
|
-
to: message.targetId
|
|
1464
|
-
};
|
|
1465
|
-
}, {
|
|
1466
|
-
F: __dxlog_file5,
|
|
1467
|
-
L: 140,
|
|
1468
|
-
S: void 0,
|
|
1469
|
-
C: (f, a) => f(...a)
|
|
1470
|
-
});
|
|
1471
|
-
};
|
|
1472
|
-
var __dxlog_file6 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts";
|
|
1473
|
-
var MeshEchoReplicator = class {
|
|
1474
|
-
constructor() {
|
|
1475
|
-
this._connections = /* @__PURE__ */ new Set();
|
|
1476
|
-
this._connectionsPerPeer = /* @__PURE__ */ new Map();
|
|
1477
|
-
this._authorizedDevices = /* @__PURE__ */ new Map();
|
|
1478
|
-
this._context = null;
|
|
1479
|
-
}
|
|
1480
|
-
async connect(context) {
|
|
1481
|
-
this._context = context;
|
|
1482
|
-
}
|
|
1483
|
-
async disconnect() {
|
|
1484
|
-
for (const connection of this._connectionsPerPeer.values()) {
|
|
1485
|
-
this._context?.onConnectionClosed(connection);
|
|
1486
|
-
}
|
|
1487
|
-
for (const connection of this._connections) {
|
|
1488
|
-
await connection.close();
|
|
1489
|
-
}
|
|
1490
|
-
this._connections.clear();
|
|
1491
|
-
this._connectionsPerPeer.clear();
|
|
1492
|
-
this._context = null;
|
|
1493
|
-
}
|
|
1494
|
-
createExtension(extensionFactory) {
|
|
1495
|
-
(0, import_invariant5.invariant)(this._context, void 0, {
|
|
1496
|
-
F: __dxlog_file6,
|
|
1497
|
-
L: 56,
|
|
1498
|
-
S: this,
|
|
1499
|
-
A: [
|
|
1500
|
-
"this._context",
|
|
1501
|
-
""
|
|
1502
|
-
]
|
|
1503
|
-
});
|
|
1504
|
-
const connection = new MeshReplicatorConnection({
|
|
1505
|
-
ownPeerId: this._context.peerId,
|
|
1506
|
-
replicatorFactory: extensionFactory,
|
|
1507
|
-
onRemoteConnected: async () => {
|
|
1508
|
-
(0, import_log4.log)("onRemoteConnected", {
|
|
1509
|
-
peerId: connection.peerId
|
|
1510
|
-
}, {
|
|
1511
|
-
F: __dxlog_file6,
|
|
1512
|
-
L: 62,
|
|
1513
|
-
S: this,
|
|
1514
|
-
C: (f, a) => f(...a)
|
|
1515
|
-
});
|
|
1516
|
-
(0, import_invariant5.invariant)(this._context, void 0, {
|
|
1517
|
-
F: __dxlog_file6,
|
|
1518
|
-
L: 63,
|
|
1519
|
-
S: this,
|
|
1520
|
-
A: [
|
|
1521
|
-
"this._context",
|
|
1522
|
-
""
|
|
1523
|
-
]
|
|
1524
|
-
});
|
|
1525
|
-
if (this._connectionsPerPeer.has(connection.peerId)) {
|
|
1526
|
-
this._context.onConnectionAuthScopeChanged(connection);
|
|
1527
|
-
} else {
|
|
1528
|
-
this._connectionsPerPeer.set(connection.peerId, connection);
|
|
1529
|
-
this._context.onConnectionOpen(connection);
|
|
1530
|
-
connection.enable();
|
|
1531
|
-
}
|
|
1532
|
-
},
|
|
1533
|
-
onRemoteDisconnected: async () => {
|
|
1534
|
-
(0, import_log4.log)("onRemoteDisconnected", {
|
|
1535
|
-
peerId: connection.peerId
|
|
1536
|
-
}, {
|
|
1537
|
-
F: __dxlog_file6,
|
|
1538
|
-
L: 74,
|
|
1539
|
-
S: this,
|
|
1540
|
-
C: (f, a) => f(...a)
|
|
1541
|
-
});
|
|
1542
|
-
this._context?.onConnectionClosed(connection);
|
|
1543
|
-
this._connectionsPerPeer.delete(connection.peerId);
|
|
1544
|
-
connection.disable();
|
|
1545
|
-
this._connections.delete(connection);
|
|
1546
|
-
},
|
|
1547
|
-
shouldAdvertise: async (params) => {
|
|
1548
|
-
(0, import_log4.log)("shouldAdvertise", {
|
|
1549
|
-
peerId: connection.peerId,
|
|
1550
|
-
documentId: params.documentId
|
|
1551
|
-
}, {
|
|
1552
|
-
F: __dxlog_file6,
|
|
1553
|
-
L: 81,
|
|
1554
|
-
S: this,
|
|
1555
|
-
C: (f, a) => f(...a)
|
|
1556
|
-
});
|
|
1557
|
-
(0, import_invariant5.invariant)(this._context, void 0, {
|
|
1558
|
-
F: __dxlog_file6,
|
|
1559
|
-
L: 82,
|
|
1560
|
-
S: this,
|
|
1561
|
-
A: [
|
|
1562
|
-
"this._context",
|
|
1563
|
-
""
|
|
1564
|
-
]
|
|
1565
|
-
});
|
|
1566
|
-
try {
|
|
1567
|
-
const spaceKey = await this._context.getContainingSpaceForDocument(params.documentId);
|
|
1568
|
-
if (!spaceKey) {
|
|
1569
|
-
const remoteDocumentExists = await this._context.isDocumentInRemoteCollection({
|
|
1570
|
-
documentId: params.documentId,
|
|
1571
|
-
peerId: connection.peerId
|
|
1572
|
-
});
|
|
1573
|
-
(0, import_log4.log)("document not found locally for share policy check, accepting the remote document", {
|
|
1574
|
-
peerId: connection.peerId,
|
|
1575
|
-
documentId: params.documentId,
|
|
1576
|
-
remoteDocumentExists
|
|
1577
|
-
}, {
|
|
1578
|
-
F: __dxlog_file6,
|
|
1579
|
-
L: 90,
|
|
1580
|
-
S: this,
|
|
1581
|
-
C: (f, a) => f(...a)
|
|
1582
|
-
});
|
|
1583
|
-
return remoteDocumentExists;
|
|
1584
|
-
}
|
|
1585
|
-
const spaceId = await (0, import_chunk_DZVH7HDD.createIdFromSpaceKey)(spaceKey);
|
|
1586
|
-
const authorizedDevices = this._authorizedDevices.get(spaceId);
|
|
1587
|
-
if (!connection.remoteDeviceKey) {
|
|
1588
|
-
(0, import_log4.log)("device key not found for share policy check", {
|
|
1589
|
-
peerId: connection.peerId,
|
|
1590
|
-
documentId: params.documentId
|
|
1591
|
-
}, {
|
|
1592
|
-
F: __dxlog_file6,
|
|
1593
|
-
L: 106,
|
|
1594
|
-
S: this,
|
|
1595
|
-
C: (f, a) => f(...a)
|
|
1596
|
-
});
|
|
1597
|
-
return false;
|
|
1598
|
-
}
|
|
1599
|
-
const isAuthorized = authorizedDevices?.has(connection.remoteDeviceKey) ?? false;
|
|
1600
|
-
(0, import_log4.log)("share policy check", {
|
|
1601
|
-
localPeer: this._context.peerId,
|
|
1602
|
-
remotePeer: connection.peerId,
|
|
1603
|
-
documentId: params.documentId,
|
|
1604
|
-
deviceKey: connection.remoteDeviceKey,
|
|
1605
|
-
spaceKey,
|
|
1606
|
-
isAuthorized
|
|
1607
|
-
}, {
|
|
1608
|
-
F: __dxlog_file6,
|
|
1609
|
-
L: 114,
|
|
1610
|
-
S: this,
|
|
1611
|
-
C: (f, a) => f(...a)
|
|
1612
|
-
});
|
|
1613
|
-
return isAuthorized;
|
|
1614
|
-
} catch (err) {
|
|
1615
|
-
import_log4.log.catch(err, void 0, {
|
|
1616
|
-
F: __dxlog_file6,
|
|
1617
|
-
L: 124,
|
|
1618
|
-
S: this,
|
|
1619
|
-
C: (f, a) => f(...a)
|
|
1620
|
-
});
|
|
1621
|
-
return false;
|
|
1622
|
-
}
|
|
1623
|
-
},
|
|
1624
|
-
shouldSyncCollection: ({ collectionId }) => {
|
|
1625
|
-
const spaceId = getSpaceIdFromCollectionId(collectionId);
|
|
1626
|
-
const authorizedDevices = this._authorizedDevices.get(spaceId);
|
|
1627
|
-
if (!connection.remoteDeviceKey) {
|
|
1628
|
-
(0, import_log4.log)("device key not found for collection sync check", {
|
|
1629
|
-
peerId: connection.peerId,
|
|
1630
|
-
collectionId
|
|
1631
|
-
}, {
|
|
1632
|
-
F: __dxlog_file6,
|
|
1633
|
-
L: 134,
|
|
1634
|
-
S: this,
|
|
1635
|
-
C: (f, a) => f(...a)
|
|
1636
|
-
});
|
|
1637
|
-
return false;
|
|
1638
|
-
}
|
|
1639
|
-
const isAuthorized = authorizedDevices?.has(connection.remoteDeviceKey) ?? false;
|
|
1640
|
-
return isAuthorized;
|
|
1641
|
-
}
|
|
1642
|
-
});
|
|
1643
|
-
this._connections.add(connection);
|
|
1644
|
-
return connection.replicatorExtension;
|
|
1645
|
-
}
|
|
1646
|
-
async authorizeDevice(spaceKey, deviceKey) {
|
|
1647
|
-
(0, import_log4.log)("authorizeDevice", {
|
|
1648
|
-
spaceKey,
|
|
1649
|
-
deviceKey
|
|
1650
|
-
}, {
|
|
1651
|
-
F: __dxlog_file6,
|
|
1652
|
-
L: 151,
|
|
1653
|
-
S: this,
|
|
1654
|
-
C: (f, a) => f(...a)
|
|
1655
|
-
});
|
|
1656
|
-
const spaceId = await (0, import_chunk_DZVH7HDD.createIdFromSpaceKey)(spaceKey);
|
|
1657
|
-
(0, import_util3.defaultMap)(this._authorizedDevices, spaceId, () => new import_util3.ComplexSet(import_keys3.PublicKey.hash)).add(deviceKey);
|
|
1658
|
-
for (const connection of this._connections) {
|
|
1659
|
-
if (connection.remoteDeviceKey && connection.remoteDeviceKey.equals(deviceKey)) {
|
|
1660
|
-
if (this._connectionsPerPeer.has(connection.peerId)) {
|
|
1661
|
-
this._context?.onConnectionAuthScopeChanged(connection);
|
|
1662
|
-
}
|
|
1663
|
-
}
|
|
1664
|
-
}
|
|
1665
|
-
}
|
|
1666
|
-
};
|
|
1667
|
-
function _ts_decorate3(decorators, target, key, desc) {
|
|
1668
|
-
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
1669
|
-
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
1670
|
-
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
1671
|
-
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
1672
|
-
}
|
|
1673
|
-
var PER_SECOND_RATE_AVG_WINDOW_SIZE = 5;
|
|
1674
|
-
var DEFAULT_AVG_WINDOW_SIZE = 25;
|
|
1675
|
-
var EchoDataMonitor = class {
|
|
1676
|
-
constructor(_params = {
|
|
1677
|
-
timeSeriesLength: 30
|
|
1678
|
-
}) {
|
|
1679
|
-
this._params = _params;
|
|
1680
|
-
this._lastTick = 0;
|
|
1681
|
-
this._activeCounters = createLocalCounters();
|
|
1682
|
-
this._localTimeSeries = createLocalTimeSeries();
|
|
1683
|
-
this._storageAverages = createStorageAverages();
|
|
1684
|
-
this._replicationAverages = createNetworkAverages();
|
|
1685
|
-
this._sizeByMessageType = {};
|
|
1686
|
-
this._lastReceivedMessages = new import_util4.CircularBuffer(100);
|
|
1687
|
-
this._lastSentMessages = new import_util4.CircularBuffer(100);
|
|
1688
|
-
this._connectionsCount = 0;
|
|
1689
|
-
}
|
|
1690
|
-
tick(timeMs) {
|
|
1691
|
-
this._advanceTimeWindow(timeMs - this._lastTick);
|
|
1692
|
-
this._lastTick = timeMs;
|
|
1693
|
-
}
|
|
1694
|
-
computeStats() {
|
|
1695
|
-
return {
|
|
1696
|
-
meta: {
|
|
1697
|
-
rateAverageOverSeconds: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
1698
|
-
},
|
|
1699
|
-
storage: {
|
|
1700
|
-
reads: {
|
|
1701
|
-
payloadSize: this._storageAverages.loadedChunkSize.average(),
|
|
1702
|
-
opDuration: this._storageAverages.loadDuration.average(),
|
|
1703
|
-
countPerSecond: this._storageAverages.loadsPerSecond.average()
|
|
1704
|
-
},
|
|
1705
|
-
writes: {
|
|
1706
|
-
payloadSize: this._storageAverages.storedChunkSize.average(),
|
|
1707
|
-
opDuration: this._storageAverages.storeDuration.average(),
|
|
1708
|
-
countPerSecond: this._storageAverages.storesPerSecond.average()
|
|
1709
|
-
}
|
|
1710
|
-
},
|
|
1711
|
-
replicator: {
|
|
1712
|
-
connections: this._connectionsCount,
|
|
1713
|
-
receivedMessages: {
|
|
1714
|
-
payloadSize: this._replicationAverages.receivedMessageSize.average(),
|
|
1715
|
-
countPerSecond: this._replicationAverages.receivedPerSecond.average()
|
|
1716
|
-
},
|
|
1717
|
-
sentMessages: {
|
|
1718
|
-
payloadSize: this._replicationAverages.sentMessageSize.average(),
|
|
1719
|
-
opDuration: this._replicationAverages.sendDuration.average(),
|
|
1720
|
-
countPerSecond: this._replicationAverages.sentPerSecond.average(),
|
|
1721
|
-
failedPerSecond: this._replicationAverages.sendsFailedPerSecond.average()
|
|
1722
|
-
},
|
|
1723
|
-
countByMessageType: this._computeMessageHistogram("type"),
|
|
1724
|
-
avgSizeByMessageType: (0, import_util4.mapValues)(this._sizeByMessageType, (summary) => summary.average())
|
|
1725
|
-
}
|
|
1726
|
-
};
|
|
1727
|
-
}
|
|
1728
|
-
get connectionsCount() {
|
|
1729
|
-
return this._connectionsCount;
|
|
1730
|
-
}
|
|
1731
|
-
/**
|
|
1732
|
-
* @internal
|
|
1733
|
-
*/
|
|
1734
|
-
get lastPerSecondStats() {
|
|
1735
|
-
return this._lastCompleteCounters;
|
|
1736
|
-
}
|
|
1737
|
-
/**
|
|
1738
|
-
* @internal
|
|
1739
|
-
*/
|
|
1740
|
-
get timeSeries() {
|
|
1741
|
-
return {
|
|
1742
|
-
...this._localTimeSeries.storage,
|
|
1743
|
-
...this._localTimeSeries.replication
|
|
1744
|
-
};
|
|
1745
|
-
}
|
|
1746
|
-
/**
|
|
1747
|
-
* @internal
|
|
1748
|
-
*/
|
|
1749
|
-
get messagesByPeerId() {
|
|
1750
|
-
return this._computeMessageHistogram("peerId");
|
|
1751
|
-
}
|
|
1752
|
-
_advanceTimeWindow(millisPassed) {
|
|
1753
|
-
const oldMetrics = Object.freeze(this._activeCounters);
|
|
1754
|
-
this._activeCounters = createLocalCounters();
|
|
1755
|
-
this._lastCompleteCounters = oldMetrics;
|
|
1756
|
-
for (const peerId of Object.keys(oldMetrics.byPeerId)) {
|
|
1757
|
-
this._activeCounters.byPeerId[peerId] = createMessageCounter();
|
|
1758
|
-
}
|
|
1759
|
-
this._addToTimeSeries(oldMetrics.replication, this._localTimeSeries.replication);
|
|
1760
|
-
this._addToTimeSeries(oldMetrics.storage, this._localTimeSeries.storage);
|
|
1761
|
-
if (Math.abs(millisPassed - 1e3) < 100) {
|
|
1762
|
-
this._reportPerSecondRate(oldMetrics);
|
|
1763
|
-
}
|
|
1764
|
-
}
|
|
1765
|
-
_addToTimeSeries(values, timeSeries) {
|
|
1766
|
-
for (const [key, value] of Object.entries(values)) {
|
|
1767
|
-
const values2 = timeSeries[key];
|
|
1768
|
-
values2.push(value);
|
|
1769
|
-
if (values2.length > this._params.timeSeriesLength) {
|
|
1770
|
-
values2.shift();
|
|
1771
|
-
}
|
|
1772
|
-
}
|
|
1773
|
-
}
|
|
1774
|
-
_reportPerSecondRate(metrics) {
|
|
1775
|
-
const toReport = [
|
|
1776
|
-
[
|
|
1777
|
-
"storage.load",
|
|
1778
|
-
metrics.storage.loadedChunks,
|
|
1779
|
-
this._storageAverages.loadsPerSecond
|
|
1780
|
-
],
|
|
1781
|
-
[
|
|
1782
|
-
"storage.store",
|
|
1783
|
-
metrics.storage.storedChunks,
|
|
1784
|
-
this._storageAverages.storesPerSecond
|
|
1785
|
-
],
|
|
1786
|
-
[
|
|
1787
|
-
"network.receive",
|
|
1788
|
-
metrics.replication.received,
|
|
1789
|
-
this._replicationAverages.receivedPerSecond
|
|
1790
|
-
],
|
|
1791
|
-
[
|
|
1792
|
-
"network.send",
|
|
1793
|
-
metrics.replication.sent,
|
|
1794
|
-
this._replicationAverages.sentPerSecond
|
|
1795
|
-
]
|
|
1796
|
-
];
|
|
1797
|
-
for (const [metricName, metric, summary] of toReport) {
|
|
1798
|
-
summary.record(metric);
|
|
1799
|
-
if (metric > 0) {
|
|
1800
|
-
import_tracing2.trace.metrics.distribution(`dxos.echo.${metricName}-rate`, metric);
|
|
1801
|
-
import_tracing2.trace.metrics.increment(`dxos.echo.${metricName}`, 1, {
|
|
1802
|
-
tags: {
|
|
1803
|
-
status: "busy"
|
|
1804
|
-
}
|
|
1805
|
-
});
|
|
1806
|
-
} else {
|
|
1807
|
-
import_tracing2.trace.metrics.increment(`dxos.echo.${metricName}`, 1, {
|
|
1808
|
-
tags: {
|
|
1809
|
-
status: "idle"
|
|
1810
|
-
}
|
|
1811
|
-
});
|
|
1812
|
-
}
|
|
1813
|
-
}
|
|
1814
|
-
this._replicationAverages.sendsFailedPerSecond.record(metrics.replication.failed);
|
|
1815
|
-
}
|
|
1816
|
-
recordPeerConnected(peerId) {
|
|
1817
|
-
this._activeCounters.byPeerId[peerId] = createMessageCounter();
|
|
1818
|
-
this._connectionsCount++;
|
|
1819
|
-
}
|
|
1820
|
-
recordPeerDisconnected(peerId) {
|
|
1821
|
-
this._connectionsCount--;
|
|
1822
|
-
delete this._activeCounters.byPeerId[peerId];
|
|
1823
|
-
}
|
|
1824
|
-
recordBytesStored(count) {
|
|
1825
|
-
this._activeCounters.storage.storedChunks++;
|
|
1826
|
-
this._activeCounters.storage.storedBytes += count;
|
|
1827
|
-
this._storageAverages.storedChunkSize.record(count);
|
|
1828
|
-
import_tracing2.trace.metrics.distribution("dxos.echo.storage.bytes-stored", count, {
|
|
1829
|
-
unit: "bytes"
|
|
1830
|
-
});
|
|
1831
|
-
}
|
|
1832
|
-
recordLoadDuration(durationMs) {
|
|
1833
|
-
this._storageAverages.loadDuration.record(durationMs);
|
|
1834
|
-
}
|
|
1835
|
-
recordStoreDuration(durationMs) {
|
|
1836
|
-
this._storageAverages.storeDuration.record(durationMs);
|
|
1837
|
-
}
|
|
1838
|
-
recordBytesLoaded(count) {
|
|
1839
|
-
this._activeCounters.storage.loadedChunks++;
|
|
1840
|
-
this._activeCounters.storage.loadedBytes += count;
|
|
1841
|
-
this._storageAverages.loadedChunkSize.record(count);
|
|
1842
|
-
import_tracing2.trace.metrics.distribution("dxos.echo.storage.bytes-loaded", count, {
|
|
1843
|
-
unit: "bytes"
|
|
1844
|
-
});
|
|
1845
|
-
}
|
|
1846
|
-
recordMessageSent(message, duration) {
|
|
1847
|
-
let metricsGroupName;
|
|
1848
|
-
const bytes = getByteCount(message);
|
|
1849
|
-
const tags = {
|
|
1850
|
-
type: message.type
|
|
1851
|
-
};
|
|
1852
|
-
if (isAutomergeProtocolMessage(message)) {
|
|
1853
|
-
this._activeCounters.replication.sent++;
|
|
1854
|
-
this._replicationAverages.sendDuration.record(duration);
|
|
1855
|
-
this._replicationAverages.sentMessageSize.record(bytes);
|
|
1856
|
-
metricsGroupName = "replication";
|
|
1857
|
-
} else {
|
|
1858
|
-
metricsGroupName = "collection-sync";
|
|
1859
|
-
}
|
|
1860
|
-
import_tracing2.trace.metrics.distribution(`dxos.echo.${metricsGroupName}.bytes-sent`, bytes, {
|
|
1861
|
-
unit: "bytes",
|
|
1862
|
-
tags
|
|
1863
|
-
});
|
|
1864
|
-
import_tracing2.trace.metrics.distribution(`dxos.echo.${metricsGroupName}.send-duration`, duration, {
|
|
1865
|
-
unit: "millisecond",
|
|
1866
|
-
tags
|
|
1867
|
-
});
|
|
1868
|
-
import_tracing2.trace.metrics.increment(`dxos.echo.${metricsGroupName}.send-status`, 1, {
|
|
1869
|
-
tags: {
|
|
1870
|
-
...tags,
|
|
1871
|
-
success: true
|
|
1872
|
-
}
|
|
1873
|
-
});
|
|
1874
|
-
const { messageSize, messageCounts } = this._getStatsForType(message);
|
|
1875
|
-
messageSize.record(bytes);
|
|
1876
|
-
messageCounts.sent++;
|
|
1877
|
-
this._lastSentMessages.push({
|
|
1878
|
-
type: message.type,
|
|
1879
|
-
peerId: message.targetId
|
|
1880
|
-
});
|
|
1881
|
-
}
|
|
1882
|
-
recordMessageReceived(message) {
|
|
1883
|
-
const bytes = getByteCount(message);
|
|
1884
|
-
const tags = {
|
|
1885
|
-
type: message.type
|
|
1886
|
-
};
|
|
1887
|
-
if (isAutomergeProtocolMessage(message)) {
|
|
1888
|
-
this._activeCounters.replication.received++;
|
|
1889
|
-
this._replicationAverages.receivedMessageSize.record(bytes);
|
|
1890
|
-
import_tracing2.trace.metrics.distribution("dxos.echo.replication.bytes-received", bytes, {
|
|
1891
|
-
unit: "bytes",
|
|
1892
|
-
tags
|
|
1893
|
-
});
|
|
1894
|
-
} else {
|
|
1895
|
-
import_tracing2.trace.metrics.distribution("dxos.echo.collection-sync.bytes-received", bytes, {
|
|
1896
|
-
unit: "bytes",
|
|
1897
|
-
tags
|
|
1898
|
-
});
|
|
1899
|
-
}
|
|
1900
|
-
const { messageSize, messageCounts } = this._getStatsForType(message);
|
|
1901
|
-
messageSize.record(bytes);
|
|
1902
|
-
messageCounts.received++;
|
|
1903
|
-
this._lastReceivedMessages.push({
|
|
1904
|
-
type: message.type,
|
|
1905
|
-
peerId: message.senderId
|
|
1906
|
-
});
|
|
1907
|
-
}
|
|
1908
|
-
recordMessageSendingFailed(message) {
|
|
1909
|
-
const tags = {
|
|
1910
|
-
type: message.type,
|
|
1911
|
-
success: false
|
|
1912
|
-
};
|
|
1913
|
-
if (isAutomergeProtocolMessage(message)) {
|
|
1914
|
-
this._activeCounters.replication.failed++;
|
|
1915
|
-
import_tracing2.trace.metrics.increment("dxos.echo.replication.send-status", 1, {
|
|
1916
|
-
unit: "bytes",
|
|
1917
|
-
tags
|
|
1918
|
-
});
|
|
1919
|
-
} else {
|
|
1920
|
-
import_tracing2.trace.metrics.increment("dxos.echo.collection-sync.send-status", 1, {
|
|
1921
|
-
unit: "bytes",
|
|
1922
|
-
tags
|
|
1923
|
-
});
|
|
1924
|
-
}
|
|
1925
|
-
const { messageCounts } = this._getStatsForType(message);
|
|
1926
|
-
messageCounts.failed++;
|
|
1927
|
-
}
|
|
1928
|
-
_getStatsForType(message) {
|
|
1929
|
-
const messageSize = this._sizeByMessageType[message.type] ??= createSlidingWindow();
|
|
1930
|
-
const messageCounts = this._activeCounters.byType[message.type] ??= createMessageCounter();
|
|
1931
|
-
return {
|
|
1932
|
-
messageCounts,
|
|
1933
|
-
messageSize
|
|
1934
|
-
};
|
|
1935
|
-
}
|
|
1936
|
-
_computeMessageHistogram(groupKey) {
|
|
1937
|
-
const result = {};
|
|
1938
|
-
for (const receivedMessage of this._lastReceivedMessages) {
|
|
1939
|
-
const counters = result[receivedMessage[groupKey]] ??= {
|
|
1940
|
-
received: 0,
|
|
1941
|
-
sent: 0
|
|
1942
|
-
};
|
|
1943
|
-
counters.received++;
|
|
1944
|
-
}
|
|
1945
|
-
for (const receivedMessage of this._lastSentMessages) {
|
|
1946
|
-
const counters = result[receivedMessage[groupKey]] ??= {
|
|
1947
|
-
received: 0,
|
|
1948
|
-
sent: 0
|
|
1949
|
-
};
|
|
1950
|
-
counters.sent++;
|
|
1951
|
-
}
|
|
1952
|
-
return result;
|
|
1953
|
-
}
|
|
1954
|
-
};
|
|
1955
|
-
EchoDataMonitor = _ts_decorate3([
|
|
1956
|
-
import_tracing2.trace.resource()
|
|
1957
|
-
], EchoDataMonitor);
|
|
1958
|
-
var isAutomergeProtocolMessage = (message) => {
|
|
1959
|
-
return !(isCollectionQueryMessage(message) || isCollectionStateMessage(message));
|
|
1960
|
-
};
|
|
1961
|
-
var createSlidingWindow = (overrides) => new import_util4.SlidingWindowSummary({
|
|
1962
|
-
dataPoints: DEFAULT_AVG_WINDOW_SIZE,
|
|
1963
|
-
precision: 2,
|
|
1964
|
-
...overrides
|
|
1965
|
-
});
|
|
1966
|
-
var createLocalCounters = () => ({
|
|
1967
|
-
storage: {
|
|
1968
|
-
loadedBytes: 0,
|
|
1969
|
-
storedBytes: 0,
|
|
1970
|
-
storedChunks: 0,
|
|
1971
|
-
loadedChunks: 0
|
|
1972
|
-
},
|
|
1973
|
-
replication: createMessageCounter(),
|
|
1974
|
-
byPeerId: {},
|
|
1975
|
-
byType: {}
|
|
1976
|
-
});
|
|
1977
|
-
var createLocalTimeSeries = () => ({
|
|
1978
|
-
storage: {
|
|
1979
|
-
loadedBytes: [],
|
|
1980
|
-
storedBytes: [],
|
|
1981
|
-
storedChunks: [],
|
|
1982
|
-
loadedChunks: []
|
|
1983
|
-
},
|
|
1984
|
-
replication: {
|
|
1985
|
-
sent: [],
|
|
1986
|
-
failed: [],
|
|
1987
|
-
received: []
|
|
1988
|
-
}
|
|
1989
|
-
});
|
|
1990
|
-
var createMessageCounter = () => ({
|
|
1991
|
-
sent: 0,
|
|
1992
|
-
received: 0,
|
|
1993
|
-
failed: 0
|
|
1994
|
-
});
|
|
1995
|
-
var createNetworkAverages = () => ({
|
|
1996
|
-
receivedMessageSize: createSlidingWindow(),
|
|
1997
|
-
sentMessageSize: createSlidingWindow(),
|
|
1998
|
-
sendDuration: createSlidingWindow(),
|
|
1999
|
-
receivedPerSecond: createSlidingWindow({
|
|
2000
|
-
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
2001
|
-
}),
|
|
2002
|
-
sentPerSecond: createSlidingWindow({
|
|
2003
|
-
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
2004
|
-
}),
|
|
2005
|
-
sendsFailedPerSecond: createSlidingWindow({
|
|
2006
|
-
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
2007
|
-
})
|
|
2008
|
-
});
|
|
2009
|
-
var createStorageAverages = () => ({
|
|
2010
|
-
storedChunkSize: createSlidingWindow(),
|
|
2011
|
-
loadedChunkSize: createSlidingWindow(),
|
|
2012
|
-
loadDuration: createSlidingWindow(),
|
|
2013
|
-
storeDuration: createSlidingWindow(),
|
|
2014
|
-
loadsPerSecond: createSlidingWindow({
|
|
2015
|
-
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
2016
|
-
}),
|
|
2017
|
-
storesPerSecond: createSlidingWindow({
|
|
2018
|
-
dataPoints: PER_SECOND_RATE_AVG_WINDOW_SIZE
|
|
2019
|
-
})
|
|
2020
|
-
});
|
|
2021
|
-
var getByteCount = (message) => {
|
|
2022
|
-
return message.type.length + message.senderId.length + message.targetId.length + (message.data?.byteLength ?? 0) + (message.documentId?.length ?? 0);
|
|
2023
|
-
};
|
|
2024
|
-
var __dxlog_file7 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/db-host/data-service.ts";
|
|
2025
|
-
var DataServiceImpl = class {
|
|
2026
|
-
constructor(params) {
|
|
2027
|
-
this._subscriptions = /* @__PURE__ */ new Map();
|
|
2028
|
-
this._automergeHost = params.automergeHost;
|
|
2029
|
-
this._updateIndexes = params.updateIndexes;
|
|
2030
|
-
}
|
|
2031
|
-
subscribe(request) {
|
|
2032
|
-
return new import_codec_protobuf.Stream(({ next, ready }) => {
|
|
2033
|
-
const synchronizer = new DocumentsSynchronizer({
|
|
2034
|
-
repo: this._automergeHost.repo,
|
|
2035
|
-
sendUpdates: (updates) => next(updates)
|
|
2036
|
-
});
|
|
2037
|
-
synchronizer.open().then(() => {
|
|
2038
|
-
this._subscriptions.set(request.subscriptionId, synchronizer);
|
|
2039
|
-
ready();
|
|
2040
|
-
}).catch((err) => import_log6.log.catch(err, void 0, {
|
|
2041
|
-
F: __dxlog_file7,
|
|
2042
|
-
L: 64,
|
|
2043
|
-
S: this,
|
|
2044
|
-
C: (f, a) => f(...a)
|
|
2045
|
-
}));
|
|
2046
|
-
return () => synchronizer.close();
|
|
2047
|
-
});
|
|
2048
|
-
}
|
|
2049
|
-
async updateSubscription(request) {
|
|
2050
|
-
const synchronizer = this._subscriptions.get(request.subscriptionId);
|
|
2051
|
-
(0, import_invariant7.invariant)(synchronizer, "Subscription not found", {
|
|
2052
|
-
F: __dxlog_file7,
|
|
2053
|
-
L: 71,
|
|
2054
|
-
S: this,
|
|
2055
|
-
A: [
|
|
2056
|
-
"synchronizer",
|
|
2057
|
-
"'Subscription not found'"
|
|
2058
|
-
]
|
|
2059
|
-
});
|
|
2060
|
-
if (request.addIds?.length) {
|
|
2061
|
-
await synchronizer.addDocuments(request.addIds);
|
|
2062
|
-
}
|
|
2063
|
-
if (request.removeIds?.length) {
|
|
2064
|
-
await synchronizer.removeDocuments(request.removeIds);
|
|
2065
|
-
}
|
|
2066
|
-
}
|
|
2067
|
-
async update(request) {
|
|
2068
|
-
if (!request.updates) {
|
|
2069
|
-
return;
|
|
2070
|
-
}
|
|
2071
|
-
const synchronizer = this._subscriptions.get(request.subscriptionId);
|
|
2072
|
-
(0, import_invariant7.invariant)(synchronizer, "Subscription not found", {
|
|
2073
|
-
F: __dxlog_file7,
|
|
2074
|
-
L: 86,
|
|
2075
|
-
S: this,
|
|
2076
|
-
A: [
|
|
2077
|
-
"synchronizer",
|
|
2078
|
-
"'Subscription not found'"
|
|
2079
|
-
]
|
|
2080
|
-
});
|
|
2081
|
-
synchronizer.update(request.updates);
|
|
2082
|
-
}
|
|
2083
|
-
async flush(request) {
|
|
2084
|
-
await this._automergeHost.flush(request);
|
|
2085
|
-
}
|
|
2086
|
-
async getDocumentHeads(request) {
|
|
2087
|
-
const documentIds = request.documentIds;
|
|
2088
|
-
if (!documentIds) {
|
|
2089
|
-
return {
|
|
2090
|
-
heads: {
|
|
2091
|
-
entries: []
|
|
2092
|
-
}
|
|
2093
|
-
};
|
|
2094
|
-
}
|
|
2095
|
-
const heads = await this._automergeHost.getHeads(documentIds);
|
|
2096
|
-
return {
|
|
2097
|
-
heads: {
|
|
2098
|
-
entries: heads.map((heads2, idx) => ({
|
|
2099
|
-
documentId: documentIds[idx],
|
|
2100
|
-
heads: heads2
|
|
2101
|
-
}))
|
|
2102
|
-
}
|
|
2103
|
-
};
|
|
2104
|
-
}
|
|
2105
|
-
async waitUntilHeadsReplicated(request, options) {
|
|
2106
|
-
await this._automergeHost.waitUntilHeadsReplicated(request.heads);
|
|
2107
|
-
}
|
|
2108
|
-
async reIndexHeads(request, options) {
|
|
2109
|
-
await this._automergeHost.reIndexHeads(request.documentIds ?? []);
|
|
2110
|
-
}
|
|
2111
|
-
async updateIndexes() {
|
|
2112
|
-
await this._updateIndexes();
|
|
2113
|
-
}
|
|
2114
|
-
async getSpaceSyncState(request, options) {
|
|
2115
|
-
(0, import_invariant7.invariant)(import_keys4.SpaceId.isValid(request.spaceId), void 0, {
|
|
2116
|
-
F: __dxlog_file7,
|
|
2117
|
-
L: 127,
|
|
2118
|
-
S: this,
|
|
2119
|
-
A: [
|
|
2120
|
-
"SpaceId.isValid(request.spaceId)",
|
|
2121
|
-
""
|
|
2122
|
-
]
|
|
2123
|
-
});
|
|
2124
|
-
const collectionId = deriveCollectionIdFromSpaceId(request.spaceId);
|
|
2125
|
-
const state = await this._automergeHost.getCollectionSyncState(collectionId);
|
|
2126
|
-
return {
|
|
2127
|
-
peers: state.peers.map((peer) => ({
|
|
2128
|
-
peerId: peer.peerId,
|
|
2129
|
-
documentsToReconcile: peer.differentDocuments
|
|
2130
|
-
}))
|
|
2131
|
-
};
|
|
2132
|
-
}
|
|
2133
|
-
};
|
|
2134
|
-
// Annotate the CommonJS export names for ESM import in node:
|
|
2135
|
-
0 && (module.exports = {
|
|
2136
|
-
AutomergeHost,
|
|
2137
|
-
DataServiceImpl,
|
|
2138
|
-
DocumentsSynchronizer,
|
|
2139
|
-
EchoDataMonitor,
|
|
2140
|
-
LevelDBStorageAdapter,
|
|
2141
|
-
MeshEchoReplicator,
|
|
2142
|
-
deriveCollectionIdFromSpaceId,
|
|
2143
|
-
diffCollectionState,
|
|
2144
|
-
encodingOptions,
|
|
2145
|
-
getSpaceIdFromCollectionId,
|
|
2146
|
-
getSpaceKeyFromDoc
|
|
2147
|
-
});
|
|
2148
|
-
//# sourceMappingURL=chunk-5DH4KR2S.cjs.map
|