@dxos/echo-pipeline 0.8.2 → 0.8.3-main.7f5a14c
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/browser/{chunk-3XSXS5EX.mjs → chunk-35I6ERLG.mjs} +2 -2
- package/dist/lib/browser/chunk-35I6ERLG.mjs.map +7 -0
- package/dist/lib/browser/index.mjs +355 -352
- package/dist/lib/browser/index.mjs.map +3 -3
- package/dist/lib/browser/meta.json +1 -1
- package/dist/lib/browser/testing/index.mjs +4 -4
- package/dist/lib/browser/testing/index.mjs.map +3 -3
- package/dist/lib/node/{chunk-SG2PL5RH.cjs → chunk-JXX6LF5U.cjs} +5 -5
- package/dist/lib/node/chunk-JXX6LF5U.cjs.map +7 -0
- package/dist/lib/node/index.cjs +321 -320
- package/dist/lib/node/index.cjs.map +3 -3
- package/dist/lib/node/meta.json +1 -1
- package/dist/lib/node/testing/index.cjs +13 -13
- package/dist/lib/node/testing/index.cjs.map +3 -3
- package/dist/lib/node-esm/{chunk-3BZP75TJ.mjs → chunk-5BHLPT24.mjs} +2 -2
- package/dist/lib/node-esm/chunk-5BHLPT24.mjs.map +7 -0
- package/dist/lib/node-esm/index.mjs +355 -352
- package/dist/lib/node-esm/index.mjs.map +3 -3
- package/dist/lib/node-esm/meta.json +1 -1
- package/dist/lib/node-esm/testing/index.mjs +4 -4
- package/dist/lib/node-esm/testing/index.mjs.map +3 -3
- package/dist/types/src/automerge/echo-network-adapter.d.ts +3 -3
- package/dist/types/src/automerge/echo-network-adapter.d.ts.map +1 -1
- package/dist/types/src/automerge/mesh-echo-replicator.d.ts.map +1 -1
- package/dist/types/src/db-host/documents-synchronizer.d.ts +1 -1
- package/dist/types/src/db-host/documents-synchronizer.d.ts.map +1 -1
- package/dist/types/src/edge/echo-edge-replicator.d.ts.map +1 -1
- package/dist/types/src/query/query-executor.d.ts.map +1 -1
- package/package.json +35 -35
- package/src/automerge/automerge-host.ts +3 -3
- package/src/automerge/automerge-repo.test.ts +48 -4
- package/src/automerge/echo-network-adapter.test.ts +7 -8
- package/src/automerge/echo-network-adapter.ts +50 -46
- package/src/automerge/mesh-echo-replicator.ts +1 -0
- package/src/db-host/data-service.ts +1 -1
- package/src/db-host/documents-iterator.ts +1 -1
- package/src/db-host/documents-synchronizer.test.ts +1 -1
- package/src/db-host/documents-synchronizer.ts +5 -3
- package/src/db-host/query-service.ts +3 -3
- package/src/edge/echo-edge-replicator.ts +2 -3
- package/src/pipeline/pipeline.ts +1 -1
- package/src/query/query-executor.ts +6 -6
- package/src/testing/change-metadata.ts +1 -1
- package/src/testing/test-replicator.ts +2 -2
- package/dist/lib/browser/chunk-3XSXS5EX.mjs.map +0 -7
- package/dist/lib/node/chunk-SG2PL5RH.cjs.map +0 -7
- package/dist/lib/node-esm/chunk-3BZP75TJ.mjs.map +0 -7
|
@@ -25,7 +25,7 @@ import {
|
|
|
25
25
|
mapTimeframeToFeedIndexes,
|
|
26
26
|
startAfter,
|
|
27
27
|
valueEncoding
|
|
28
|
-
} from "./chunk-
|
|
28
|
+
} from "./chunk-35I6ERLG.mjs";
|
|
29
29
|
import "./chunk-CGS2ULMK.mjs";
|
|
30
30
|
|
|
31
31
|
// packages/core/echo/echo-pipeline/src/db-host/data-service.ts
|
|
@@ -36,177 +36,21 @@ import { SpaceId as SpaceId2 } from "@dxos/keys";
|
|
|
36
36
|
import { log as log7 } from "@dxos/log";
|
|
37
37
|
|
|
38
38
|
// packages/core/echo/echo-pipeline/src/db-host/documents-synchronizer.ts
|
|
39
|
-
import { next as
|
|
39
|
+
import { next as A2 } from "@automerge/automerge";
|
|
40
40
|
import { UpdateScheduler } from "@dxos/async";
|
|
41
|
-
import { Resource } from "@dxos/context";
|
|
42
|
-
import { invariant } from "@dxos/invariant";
|
|
43
|
-
import { log } from "@dxos/log";
|
|
44
|
-
var __dxlog_file = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/db-host/documents-synchronizer.ts";
|
|
45
|
-
var MAX_UPDATE_FREQ = 10;
|
|
46
|
-
var DocumentsSynchronizer = class extends Resource {
|
|
47
|
-
constructor(_params) {
|
|
48
|
-
super(), this._params = _params, this._syncStates = /* @__PURE__ */ new Map(), this._pendingUpdates = /* @__PURE__ */ new Set(), this._sendUpdatesJob = void 0;
|
|
49
|
-
}
|
|
50
|
-
addDocuments(documentIds, retryCounter = 0) {
|
|
51
|
-
if (retryCounter > 3) {
|
|
52
|
-
log.warn("Failed to load document, retry limit reached", {
|
|
53
|
-
documentIds
|
|
54
|
-
}, {
|
|
55
|
-
F: __dxlog_file,
|
|
56
|
-
L: 50,
|
|
57
|
-
S: this,
|
|
58
|
-
C: (f, a) => f(...a)
|
|
59
|
-
});
|
|
60
|
-
return;
|
|
61
|
-
}
|
|
62
|
-
for (const documentId of documentIds) {
|
|
63
|
-
this._params.repo.find(documentId).then(async (doc) => {
|
|
64
|
-
await doc.whenReady();
|
|
65
|
-
this._startSync(doc);
|
|
66
|
-
this._pendingUpdates.add(doc.documentId);
|
|
67
|
-
this._sendUpdatesJob.trigger();
|
|
68
|
-
}).catch((error) => {
|
|
69
|
-
log.warn("Failed to load document, wraparound", {
|
|
70
|
-
documentId,
|
|
71
|
-
error
|
|
72
|
-
}, {
|
|
73
|
-
F: __dxlog_file,
|
|
74
|
-
L: 64,
|
|
75
|
-
S: this,
|
|
76
|
-
C: (f, a) => f(...a)
|
|
77
|
-
});
|
|
78
|
-
this.addDocuments([
|
|
79
|
-
documentId
|
|
80
|
-
], retryCounter + 1);
|
|
81
|
-
});
|
|
82
|
-
}
|
|
83
|
-
}
|
|
84
|
-
removeDocuments(documentIds) {
|
|
85
|
-
for (const documentId of documentIds) {
|
|
86
|
-
this._syncStates.get(documentId)?.clearSubscriptions?.();
|
|
87
|
-
this._syncStates.delete(documentId);
|
|
88
|
-
this._pendingUpdates.delete(documentId);
|
|
89
|
-
}
|
|
90
|
-
}
|
|
91
|
-
async _open() {
|
|
92
|
-
this._sendUpdatesJob = new UpdateScheduler(this._ctx, this._checkAndSendUpdates.bind(this), {
|
|
93
|
-
maxFrequency: MAX_UPDATE_FREQ
|
|
94
|
-
});
|
|
95
|
-
}
|
|
96
|
-
async _close() {
|
|
97
|
-
await this._sendUpdatesJob.join();
|
|
98
|
-
this._syncStates.clear();
|
|
99
|
-
}
|
|
100
|
-
update(updates) {
|
|
101
|
-
for (const { documentId, mutation, isNew } of updates) {
|
|
102
|
-
if (isNew) {
|
|
103
|
-
const { handle: doc } = this._params.repo.findWithProgress(documentId);
|
|
104
|
-
doc.update((doc2) => A.loadIncremental(doc2, mutation));
|
|
105
|
-
this._startSync(doc);
|
|
106
|
-
} else {
|
|
107
|
-
this._writeMutation(documentId, mutation);
|
|
108
|
-
}
|
|
109
|
-
}
|
|
110
|
-
}
|
|
111
|
-
_startSync(doc) {
|
|
112
|
-
if (this._syncStates.has(doc.documentId)) {
|
|
113
|
-
log.info("Document already being synced", {
|
|
114
|
-
documentId: doc.documentId
|
|
115
|
-
}, {
|
|
116
|
-
F: __dxlog_file,
|
|
117
|
-
L: 103,
|
|
118
|
-
S: this,
|
|
119
|
-
C: (f, a) => f(...a)
|
|
120
|
-
});
|
|
121
|
-
return;
|
|
122
|
-
}
|
|
123
|
-
const syncState = {
|
|
124
|
-
handle: doc
|
|
125
|
-
};
|
|
126
|
-
this._subscribeForChanges(syncState);
|
|
127
|
-
this._syncStates.set(doc.documentId, syncState);
|
|
128
|
-
}
|
|
129
|
-
_subscribeForChanges(syncState) {
|
|
130
|
-
const handler = () => {
|
|
131
|
-
this._pendingUpdates.add(syncState.handle.documentId);
|
|
132
|
-
this._sendUpdatesJob.trigger();
|
|
133
|
-
};
|
|
134
|
-
syncState.handle.on("heads-changed", handler);
|
|
135
|
-
syncState.clearSubscriptions = () => syncState.handle.off("heads-changed", handler);
|
|
136
|
-
}
|
|
137
|
-
async _checkAndSendUpdates() {
|
|
138
|
-
const updates = [];
|
|
139
|
-
const docsWithPendingUpdates = Array.from(this._pendingUpdates);
|
|
140
|
-
this._pendingUpdates.clear();
|
|
141
|
-
for (const documentId of docsWithPendingUpdates) {
|
|
142
|
-
const update = this._getPendingChanges(documentId);
|
|
143
|
-
if (update) {
|
|
144
|
-
updates.push({
|
|
145
|
-
documentId,
|
|
146
|
-
mutation: update
|
|
147
|
-
});
|
|
148
|
-
}
|
|
149
|
-
}
|
|
150
|
-
if (updates.length > 0) {
|
|
151
|
-
this._params.sendUpdates({
|
|
152
|
-
updates
|
|
153
|
-
});
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
_getPendingChanges(documentId) {
|
|
157
|
-
const syncState = this._syncStates.get(documentId);
|
|
158
|
-
invariant(syncState, "Sync state for document not found", {
|
|
159
|
-
F: __dxlog_file,
|
|
160
|
-
L: 144,
|
|
161
|
-
S: this,
|
|
162
|
-
A: [
|
|
163
|
-
"syncState",
|
|
164
|
-
"'Sync state for document not found'"
|
|
165
|
-
]
|
|
166
|
-
});
|
|
167
|
-
const handle = syncState.handle;
|
|
168
|
-
if (!handle || !handle.isReady() || !handle.doc()) {
|
|
169
|
-
return;
|
|
170
|
-
}
|
|
171
|
-
const doc = handle.doc();
|
|
172
|
-
const mutation = syncState.lastSentHead ? A.saveSince(doc, syncState.lastSentHead) : A.save(doc);
|
|
173
|
-
if (mutation.length === 0) {
|
|
174
|
-
return;
|
|
175
|
-
}
|
|
176
|
-
syncState.lastSentHead = A.getHeads(doc);
|
|
177
|
-
return mutation;
|
|
178
|
-
}
|
|
179
|
-
_writeMutation(documentId, mutation) {
|
|
180
|
-
const syncState = this._syncStates.get(documentId);
|
|
181
|
-
invariant(syncState, "Sync state for document not found", {
|
|
182
|
-
F: __dxlog_file,
|
|
183
|
-
L: 160,
|
|
184
|
-
S: this,
|
|
185
|
-
A: [
|
|
186
|
-
"syncState",
|
|
187
|
-
"'Sync state for document not found'"
|
|
188
|
-
]
|
|
189
|
-
});
|
|
190
|
-
syncState.handle.update((doc) => {
|
|
191
|
-
const headsBefore = A.getHeads(doc);
|
|
192
|
-
const newDoc = A.loadIncremental(doc, mutation);
|
|
193
|
-
if (A.equals(headsBefore, syncState.lastSentHead)) {
|
|
194
|
-
syncState.lastSentHead = A.getHeads(newDoc);
|
|
195
|
-
}
|
|
196
|
-
return newDoc;
|
|
197
|
-
});
|
|
198
|
-
}
|
|
199
|
-
};
|
|
41
|
+
import { Resource as Resource5 } from "@dxos/context";
|
|
42
|
+
import { invariant as invariant6 } from "@dxos/invariant";
|
|
43
|
+
import { log as log6 } from "@dxos/log";
|
|
200
44
|
|
|
201
45
|
// packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts
|
|
202
46
|
import { getBackend, getHeads, isAutomerge, equals as headsEquals, save } from "@automerge/automerge";
|
|
203
47
|
import { Repo, interpretAsDocumentId } from "@automerge/automerge-repo";
|
|
204
48
|
import { Event as Event2, asyncTimeout } from "@dxos/async";
|
|
205
|
-
import { Context, Resource as
|
|
49
|
+
import { Context, Resource as Resource3, cancelWithContext } from "@dxos/context";
|
|
206
50
|
import { DatabaseDirectory } from "@dxos/echo-protocol";
|
|
207
|
-
import { invariant as
|
|
51
|
+
import { invariant as invariant2 } from "@dxos/invariant";
|
|
208
52
|
import { PublicKey } from "@dxos/keys";
|
|
209
|
-
import { log as
|
|
53
|
+
import { log as log3 } from "@dxos/log";
|
|
210
54
|
import { objectPointerCodec } from "@dxos/protocols";
|
|
211
55
|
import { trace as trace2 } from "@dxos/tracing";
|
|
212
56
|
import { bufferToArray } from "@dxos/util";
|
|
@@ -214,8 +58,8 @@ import { bufferToArray } from "@dxos/util";
|
|
|
214
58
|
// packages/core/echo/echo-pipeline/src/automerge/collection-synchronizer.ts
|
|
215
59
|
import { next as am } from "@automerge/automerge";
|
|
216
60
|
import { asyncReturn, Event, scheduleTask, scheduleTaskInterval } from "@dxos/async";
|
|
217
|
-
import { Resource
|
|
218
|
-
import { log
|
|
61
|
+
import { Resource } from "@dxos/context";
|
|
62
|
+
import { log } from "@dxos/log";
|
|
219
63
|
import { trace } from "@dxos/tracing";
|
|
220
64
|
import { defaultMap } from "@dxos/util";
|
|
221
65
|
function _ts_decorate(decorators, target, key, desc) {
|
|
@@ -224,10 +68,10 @@ function _ts_decorate(decorators, target, key, desc) {
|
|
|
224
68
|
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
225
69
|
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
226
70
|
}
|
|
227
|
-
var
|
|
71
|
+
var __dxlog_file = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/collection-synchronizer.ts";
|
|
228
72
|
var MIN_QUERY_INTERVAL = 5e3;
|
|
229
73
|
var POLL_INTERVAL = 3e4;
|
|
230
|
-
var CollectionSynchronizer = class extends
|
|
74
|
+
var CollectionSynchronizer = class extends Resource {
|
|
231
75
|
constructor(params) {
|
|
232
76
|
super();
|
|
233
77
|
/**
|
|
@@ -261,11 +105,11 @@ var CollectionSynchronizer = class extends Resource2 {
|
|
|
261
105
|
}
|
|
262
106
|
setLocalCollectionState(collectionId, state) {
|
|
263
107
|
this._activeCollections.add(collectionId);
|
|
264
|
-
|
|
108
|
+
log("setLocalCollectionState", {
|
|
265
109
|
collectionId,
|
|
266
110
|
state
|
|
267
111
|
}, {
|
|
268
|
-
F:
|
|
112
|
+
F: __dxlog_file,
|
|
269
113
|
L: 76,
|
|
270
114
|
S: this,
|
|
271
115
|
C: (f, a) => f(...a)
|
|
@@ -281,10 +125,10 @@ var CollectionSynchronizer = class extends Resource2 {
|
|
|
281
125
|
clearLocalCollectionState(collectionId) {
|
|
282
126
|
this._activeCollections.delete(collectionId);
|
|
283
127
|
this._perCollectionStates.delete(collectionId);
|
|
284
|
-
|
|
128
|
+
log("clearLocalCollectionState", {
|
|
285
129
|
collectionId
|
|
286
130
|
}, {
|
|
287
|
-
F:
|
|
131
|
+
F: __dxlog_file,
|
|
288
132
|
L: 90,
|
|
289
133
|
S: this,
|
|
290
134
|
C: (f, a) => f(...a)
|
|
@@ -362,12 +206,12 @@ var CollectionSynchronizer = class extends Resource2 {
|
|
|
362
206
|
* Callback when a peer sends the state of a collection.
|
|
363
207
|
*/
|
|
364
208
|
onRemoteStateReceived(collectionId, peerId, state) {
|
|
365
|
-
|
|
209
|
+
log("onRemoteStateReceived", {
|
|
366
210
|
collectionId,
|
|
367
211
|
peerId,
|
|
368
212
|
state
|
|
369
213
|
}, {
|
|
370
|
-
F:
|
|
214
|
+
F: __dxlog_file,
|
|
371
215
|
L: 171,
|
|
372
216
|
S: this,
|
|
373
217
|
C: (f, a) => f(...a)
|
|
@@ -467,8 +311,8 @@ var getSpanName = (peerId) => {
|
|
|
467
311
|
import { NetworkAdapter } from "@automerge/automerge-repo";
|
|
468
312
|
import { synchronized, Trigger } from "@dxos/async";
|
|
469
313
|
import { LifecycleState } from "@dxos/context";
|
|
470
|
-
import { invariant
|
|
471
|
-
import { log as
|
|
314
|
+
import { invariant } from "@dxos/invariant";
|
|
315
|
+
import { log as log2 } from "@dxos/log";
|
|
472
316
|
import { isNonNullable } from "@dxos/util";
|
|
473
317
|
|
|
474
318
|
// packages/core/echo/echo-pipeline/src/automerge/network-protocol.ts
|
|
@@ -483,7 +327,7 @@ function _ts_decorate2(decorators, target, key, desc) {
|
|
|
483
327
|
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
484
328
|
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
485
329
|
}
|
|
486
|
-
var
|
|
330
|
+
var __dxlog_file2 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/echo-network-adapter.ts";
|
|
487
331
|
var EchoNetworkAdapter = class extends NetworkAdapter {
|
|
488
332
|
constructor(_params) {
|
|
489
333
|
super(), this._params = _params, this._replicators = /* @__PURE__ */ new Set(), this._connections = /* @__PURE__ */ new Map(), this._lifecycleState = LifecycleState.CLOSED, this._connected = new Trigger(), this._ready = new Trigger();
|
|
@@ -527,28 +371,34 @@ var EchoNetworkAdapter = class extends NetworkAdapter {
|
|
|
527
371
|
timeout: 1e4
|
|
528
372
|
});
|
|
529
373
|
}
|
|
374
|
+
onConnectionAuthScopeChanged(peer) {
|
|
375
|
+
const entry = this._connections.get(peer);
|
|
376
|
+
if (entry) {
|
|
377
|
+
this._onConnectionAuthScopeChanged(entry.connection);
|
|
378
|
+
}
|
|
379
|
+
}
|
|
530
380
|
async addReplicator(replicator) {
|
|
531
|
-
|
|
532
|
-
F:
|
|
533
|
-
L:
|
|
381
|
+
invariant(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
382
|
+
F: __dxlog_file2,
|
|
383
|
+
L: 129,
|
|
534
384
|
S: this,
|
|
535
385
|
A: [
|
|
536
386
|
"this._lifecycleState === LifecycleState.OPEN",
|
|
537
387
|
""
|
|
538
388
|
]
|
|
539
389
|
});
|
|
540
|
-
|
|
541
|
-
F:
|
|
542
|
-
L:
|
|
390
|
+
invariant(this.peerId, void 0, {
|
|
391
|
+
F: __dxlog_file2,
|
|
392
|
+
L: 130,
|
|
543
393
|
S: this,
|
|
544
394
|
A: [
|
|
545
395
|
"this.peerId",
|
|
546
396
|
""
|
|
547
397
|
]
|
|
548
398
|
});
|
|
549
|
-
|
|
550
|
-
F:
|
|
551
|
-
L:
|
|
399
|
+
invariant(!this._replicators.has(replicator), void 0, {
|
|
400
|
+
F: __dxlog_file2,
|
|
401
|
+
L: 131,
|
|
552
402
|
S: this,
|
|
553
403
|
A: [
|
|
554
404
|
"!this._replicators.has(replicator)",
|
|
@@ -570,18 +420,18 @@ var EchoNetworkAdapter = class extends NetworkAdapter {
|
|
|
570
420
|
});
|
|
571
421
|
}
|
|
572
422
|
async removeReplicator(replicator) {
|
|
573
|
-
|
|
574
|
-
F:
|
|
575
|
-
L:
|
|
423
|
+
invariant(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
424
|
+
F: __dxlog_file2,
|
|
425
|
+
L: 150,
|
|
576
426
|
S: this,
|
|
577
427
|
A: [
|
|
578
428
|
"this._lifecycleState === LifecycleState.OPEN",
|
|
579
429
|
""
|
|
580
430
|
]
|
|
581
431
|
});
|
|
582
|
-
|
|
583
|
-
F:
|
|
584
|
-
L:
|
|
432
|
+
invariant(this._replicators.has(replicator), void 0, {
|
|
433
|
+
F: __dxlog_file2,
|
|
434
|
+
L: 151,
|
|
585
435
|
S: this,
|
|
586
436
|
A: [
|
|
587
437
|
"this._replicators.has(replicator)",
|
|
@@ -624,20 +474,27 @@ var EchoNetworkAdapter = class extends NetworkAdapter {
|
|
|
624
474
|
};
|
|
625
475
|
this._send(message);
|
|
626
476
|
}
|
|
477
|
+
// TODO(dmaretskyi): Remove.
|
|
478
|
+
getPeersInterestedInCollection(collectionId) {
|
|
479
|
+
return Array.from(this._connections.values()).map((connection) => {
|
|
480
|
+
return connection.connection.shouldSyncCollection({
|
|
481
|
+
collectionId
|
|
482
|
+
}) ? connection.connection.peerId : null;
|
|
483
|
+
}).filter(isNonNullable);
|
|
484
|
+
}
|
|
627
485
|
_send(message) {
|
|
628
486
|
const connectionEntry = this._connections.get(message.targetId);
|
|
629
487
|
if (!connectionEntry) {
|
|
630
488
|
throw new Error("Connection not found.");
|
|
631
489
|
}
|
|
632
|
-
const
|
|
490
|
+
const start = Date.now();
|
|
633
491
|
connectionEntry.writer.write(message).then(() => {
|
|
634
|
-
|
|
635
|
-
this._params.monitor?.recordMessageSent(message, durationMs);
|
|
492
|
+
this._params.monitor?.recordMessageSent(message, Date.now() - start);
|
|
636
493
|
}).catch((err) => {
|
|
637
494
|
if (connectionEntry.isOpen) {
|
|
638
|
-
|
|
639
|
-
F:
|
|
640
|
-
L:
|
|
495
|
+
log2.catch(err, void 0, {
|
|
496
|
+
F: __dxlog_file2,
|
|
497
|
+
L: 221,
|
|
641
498
|
S: this,
|
|
642
499
|
C: (f, a) => f(...a)
|
|
643
500
|
});
|
|
@@ -645,45 +502,35 @@ var EchoNetworkAdapter = class extends NetworkAdapter {
|
|
|
645
502
|
this._params.monitor?.recordMessageSendingFailed(message);
|
|
646
503
|
});
|
|
647
504
|
}
|
|
648
|
-
// TODO(dmaretskyi): Remove.
|
|
649
|
-
getPeersInterestedInCollection(collectionId) {
|
|
650
|
-
return Array.from(this._connections.values()).map((connection) => {
|
|
651
|
-
return connection.connection.shouldSyncCollection({
|
|
652
|
-
collectionId
|
|
653
|
-
}) ? connection.connection.peerId : null;
|
|
654
|
-
}).filter(isNonNullable);
|
|
655
|
-
}
|
|
656
505
|
_onConnectionOpen(connection) {
|
|
657
|
-
|
|
506
|
+
log2("connection opened", {
|
|
658
507
|
peerId: connection.peerId
|
|
659
508
|
}, {
|
|
660
|
-
F:
|
|
661
|
-
L:
|
|
509
|
+
F: __dxlog_file2,
|
|
510
|
+
L: 229,
|
|
662
511
|
S: this,
|
|
663
512
|
C: (f, a) => f(...a)
|
|
664
513
|
});
|
|
665
|
-
|
|
666
|
-
F:
|
|
667
|
-
L:
|
|
514
|
+
invariant(!this._connections.has(connection.peerId), void 0, {
|
|
515
|
+
F: __dxlog_file2,
|
|
516
|
+
L: 230,
|
|
668
517
|
S: this,
|
|
669
518
|
A: [
|
|
670
519
|
"!this._connections.has(connection.peerId as PeerId)",
|
|
671
520
|
""
|
|
672
521
|
]
|
|
673
522
|
});
|
|
674
|
-
const reader = connection.readable.getReader();
|
|
675
|
-
const writer = connection.writable.getWriter();
|
|
676
523
|
const connectionEntry = {
|
|
524
|
+
isOpen: true,
|
|
677
525
|
connection,
|
|
678
|
-
reader,
|
|
679
|
-
writer
|
|
680
|
-
isOpen: true
|
|
526
|
+
reader: connection.readable.getReader(),
|
|
527
|
+
writer: connection.writable.getWriter()
|
|
681
528
|
};
|
|
682
529
|
this._connections.set(connection.peerId, connectionEntry);
|
|
683
530
|
queueMicrotask(async () => {
|
|
684
531
|
try {
|
|
685
532
|
while (true) {
|
|
686
|
-
const { done, value } = await reader.read();
|
|
533
|
+
const { done, value } = await connectionEntry.reader.read();
|
|
687
534
|
if (done) {
|
|
688
535
|
break;
|
|
689
536
|
}
|
|
@@ -691,20 +538,20 @@ var EchoNetworkAdapter = class extends NetworkAdapter {
|
|
|
691
538
|
}
|
|
692
539
|
} catch (err) {
|
|
693
540
|
if (connectionEntry.isOpen) {
|
|
694
|
-
|
|
695
|
-
F:
|
|
696
|
-
L:
|
|
541
|
+
log2.catch(err, void 0, {
|
|
542
|
+
F: __dxlog_file2,
|
|
543
|
+
L: 254,
|
|
697
544
|
S: this,
|
|
698
545
|
C: (f, a) => f(...a)
|
|
699
546
|
});
|
|
700
547
|
}
|
|
701
548
|
}
|
|
702
549
|
});
|
|
703
|
-
|
|
550
|
+
log2("emit peer-candidate", {
|
|
704
551
|
peerId: connection.peerId
|
|
705
552
|
}, {
|
|
706
|
-
F:
|
|
707
|
-
L:
|
|
553
|
+
F: __dxlog_file2,
|
|
554
|
+
L: 259,
|
|
708
555
|
S: this,
|
|
709
556
|
C: (f, a) => f(...a)
|
|
710
557
|
});
|
|
@@ -721,77 +568,71 @@ var EchoNetworkAdapter = class extends NetworkAdapter {
|
|
|
721
568
|
}
|
|
722
569
|
this._params.monitor?.recordMessageReceived(message);
|
|
723
570
|
}
|
|
724
|
-
|
|
725
|
-
|
|
726
|
-
if (entry) {
|
|
727
|
-
this._onConnectionAuthScopeChanged(entry.connection);
|
|
728
|
-
}
|
|
729
|
-
}
|
|
730
|
-
/**
|
|
731
|
-
* Trigger doc-synchronizer shared documents set recalculation. Happens on peer-candidate.
|
|
732
|
-
* TODO(y): replace with a proper API call when sharePolicy update becomes supported by automerge-repo
|
|
733
|
-
*/
|
|
734
|
-
_onConnectionAuthScopeChanged(connection) {
|
|
735
|
-
log3("Connection auth scope changed", {
|
|
571
|
+
_onConnectionClosed(connection) {
|
|
572
|
+
log2("connection closed", {
|
|
736
573
|
peerId: connection.peerId
|
|
737
574
|
}, {
|
|
738
|
-
F:
|
|
739
|
-
L:
|
|
575
|
+
F: __dxlog_file2,
|
|
576
|
+
L: 276,
|
|
740
577
|
S: this,
|
|
741
578
|
C: (f, a) => f(...a)
|
|
742
579
|
});
|
|
743
580
|
const entry = this._connections.get(connection.peerId);
|
|
744
|
-
|
|
745
|
-
F:
|
|
746
|
-
L:
|
|
581
|
+
invariant(entry, void 0, {
|
|
582
|
+
F: __dxlog_file2,
|
|
583
|
+
L: 278,
|
|
747
584
|
S: this,
|
|
748
585
|
A: [
|
|
749
586
|
"entry",
|
|
750
587
|
""
|
|
751
588
|
]
|
|
752
589
|
});
|
|
590
|
+
entry.isOpen = false;
|
|
753
591
|
this.emit("peer-disconnected", {
|
|
754
592
|
peerId: connection.peerId
|
|
755
593
|
});
|
|
756
|
-
this.
|
|
594
|
+
this._params.monitor?.recordPeerDisconnected(connection.peerId);
|
|
595
|
+
void entry.reader.cancel().catch((err) => log2.catch(err, void 0, {
|
|
596
|
+
F: __dxlog_file2,
|
|
597
|
+
L: 284,
|
|
598
|
+
S: this,
|
|
599
|
+
C: (f, a) => f(...a)
|
|
600
|
+
}));
|
|
601
|
+
void entry.writer.abort().catch((err) => log2.catch(err, void 0, {
|
|
602
|
+
F: __dxlog_file2,
|
|
603
|
+
L: 285,
|
|
604
|
+
S: this,
|
|
605
|
+
C: (f, a) => f(...a)
|
|
606
|
+
}));
|
|
607
|
+
this._connections.delete(connection.peerId);
|
|
757
608
|
}
|
|
758
|
-
|
|
759
|
-
|
|
609
|
+
/**
|
|
610
|
+
* Trigger doc-synchronizer shared documents set recalculation. Happens on peer-candidate.
|
|
611
|
+
* TODO(y): replace with a proper API call when sharePolicy update becomes supported by automerge-repo
|
|
612
|
+
*/
|
|
613
|
+
_onConnectionAuthScopeChanged(connection) {
|
|
614
|
+
log2("Connection auth scope changed", {
|
|
760
615
|
peerId: connection.peerId
|
|
761
616
|
}, {
|
|
762
|
-
F:
|
|
763
|
-
L:
|
|
617
|
+
F: __dxlog_file2,
|
|
618
|
+
L: 294,
|
|
764
619
|
S: this,
|
|
765
620
|
C: (f, a) => f(...a)
|
|
766
621
|
});
|
|
767
622
|
const entry = this._connections.get(connection.peerId);
|
|
768
|
-
|
|
769
|
-
F:
|
|
770
|
-
L:
|
|
623
|
+
invariant(entry, void 0, {
|
|
624
|
+
F: __dxlog_file2,
|
|
625
|
+
L: 296,
|
|
771
626
|
S: this,
|
|
772
627
|
A: [
|
|
773
628
|
"entry",
|
|
774
629
|
""
|
|
775
630
|
]
|
|
776
631
|
});
|
|
777
|
-
entry.isOpen = false;
|
|
778
632
|
this.emit("peer-disconnected", {
|
|
779
633
|
peerId: connection.peerId
|
|
780
634
|
});
|
|
781
|
-
this.
|
|
782
|
-
void entry.reader.cancel().catch((err) => log3.catch(err, void 0, {
|
|
783
|
-
F: __dxlog_file3,
|
|
784
|
-
L: 284,
|
|
785
|
-
S: this,
|
|
786
|
-
C: (f, a) => f(...a)
|
|
787
|
-
}));
|
|
788
|
-
void entry.writer.abort().catch((err) => log3.catch(err, void 0, {
|
|
789
|
-
F: __dxlog_file3,
|
|
790
|
-
L: 285,
|
|
791
|
-
S: this,
|
|
792
|
-
C: (f, a) => f(...a)
|
|
793
|
-
}));
|
|
794
|
-
this._connections.delete(connection.peerId);
|
|
635
|
+
this._emitPeerCandidate(connection);
|
|
795
636
|
}
|
|
796
637
|
_emitPeerCandidate(connection) {
|
|
797
638
|
this.emit("peer-candidate", {
|
|
@@ -841,8 +682,8 @@ var HeadsStore = class {
|
|
|
841
682
|
};
|
|
842
683
|
|
|
843
684
|
// packages/core/echo/echo-pipeline/src/automerge/leveldb-storage-adapter.ts
|
|
844
|
-
import { LifecycleState as LifecycleState2, Resource as
|
|
845
|
-
var LevelDBStorageAdapter = class extends
|
|
685
|
+
import { LifecycleState as LifecycleState2, Resource as Resource2 } from "@dxos/context";
|
|
686
|
+
var LevelDBStorageAdapter = class extends Resource2 {
|
|
846
687
|
constructor(_params) {
|
|
847
688
|
super(), this._params = _params;
|
|
848
689
|
}
|
|
@@ -952,14 +793,14 @@ function _ts_decorate3(decorators, target, key, desc) {
|
|
|
952
793
|
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
953
794
|
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
954
795
|
}
|
|
955
|
-
var
|
|
796
|
+
var __dxlog_file3 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts";
|
|
956
797
|
var FIND_PARAMS = {
|
|
957
798
|
allowableStates: [
|
|
958
799
|
"ready",
|
|
959
800
|
"requesting"
|
|
960
801
|
]
|
|
961
802
|
};
|
|
962
|
-
var AutomergeHost = class extends
|
|
803
|
+
var AutomergeHost = class extends Resource3 {
|
|
963
804
|
constructor({ db, indexMetadataStore, dataMonitor, peerIdProvider, getSpaceKeyByRootDocumentId }) {
|
|
964
805
|
super();
|
|
965
806
|
this._collectionSynchronizer = new CollectionSynchronizer({
|
|
@@ -1113,7 +954,7 @@ var AutomergeHost = class extends Resource4 {
|
|
|
1113
954
|
if (headsToWait.length > 0) {
|
|
1114
955
|
await Promise.all(headsToWait.map(async (entry, index) => {
|
|
1115
956
|
const handle = await this.loadDoc(Context.default(void 0, {
|
|
1116
|
-
F:
|
|
957
|
+
F: __dxlog_file3,
|
|
1117
958
|
L: 288
|
|
1118
959
|
}), entry.documentId);
|
|
1119
960
|
await waitForHeads(handle, entry.heads);
|
|
@@ -1123,20 +964,20 @@ var AutomergeHost = class extends Resource4 {
|
|
|
1123
964
|
}
|
|
1124
965
|
async reIndexHeads(documentIds) {
|
|
1125
966
|
for (const documentId of documentIds) {
|
|
1126
|
-
|
|
967
|
+
log3("re-indexing heads for document", {
|
|
1127
968
|
documentId
|
|
1128
969
|
}, {
|
|
1129
|
-
F:
|
|
970
|
+
F: __dxlog_file3,
|
|
1130
971
|
L: 302,
|
|
1131
972
|
S: this,
|
|
1132
973
|
C: (f, a) => f(...a)
|
|
1133
974
|
});
|
|
1134
975
|
const handle = await this._repo.find(documentId, FIND_PARAMS);
|
|
1135
976
|
if (!handle.isReady()) {
|
|
1136
|
-
|
|
977
|
+
log3.warn("document is not available locally, skipping", {
|
|
1137
978
|
documentId
|
|
1138
979
|
}, {
|
|
1139
|
-
F:
|
|
980
|
+
F: __dxlog_file3,
|
|
1140
981
|
L: 305,
|
|
1141
982
|
S: this,
|
|
1142
983
|
C: (f, a) => f(...a)
|
|
@@ -1148,8 +989,8 @@ var AutomergeHost = class extends Resource4 {
|
|
|
1148
989
|
this._headsStore.setHeads(documentId, heads, batch);
|
|
1149
990
|
await batch.write();
|
|
1150
991
|
}
|
|
1151
|
-
|
|
1152
|
-
F:
|
|
992
|
+
log3("done re-indexing heads", void 0, {
|
|
993
|
+
F: __dxlog_file3,
|
|
1153
994
|
L: 314,
|
|
1154
995
|
S: this,
|
|
1155
996
|
C: (f, a) => f(...a)
|
|
@@ -1364,13 +1205,13 @@ var AutomergeHost = class extends Resource4 {
|
|
|
1364
1205
|
if (toReplicate.length === 0) {
|
|
1365
1206
|
return;
|
|
1366
1207
|
}
|
|
1367
|
-
|
|
1208
|
+
log3("replicating documents after collection sync", {
|
|
1368
1209
|
collectionId,
|
|
1369
1210
|
peerId,
|
|
1370
1211
|
toReplicate,
|
|
1371
1212
|
count: toReplicate.length
|
|
1372
1213
|
}, {
|
|
1373
|
-
F:
|
|
1214
|
+
F: __dxlog_file3,
|
|
1374
1215
|
L: 557,
|
|
1375
1216
|
S: this,
|
|
1376
1217
|
C: (f, a) => f(...a)
|
|
@@ -1429,8 +1270,8 @@ var changeIsPresentInDoc = (doc, changeHash) => {
|
|
|
1429
1270
|
return !!getBackend(doc).getChangeByHash(changeHash);
|
|
1430
1271
|
};
|
|
1431
1272
|
var decodeCollectionState = (state) => {
|
|
1432
|
-
|
|
1433
|
-
F:
|
|
1273
|
+
invariant2(typeof state === "object" && state !== null, "Invalid state", {
|
|
1274
|
+
F: __dxlog_file3,
|
|
1434
1275
|
L: 608,
|
|
1435
1276
|
S: void 0,
|
|
1436
1277
|
A: [
|
|
@@ -1445,21 +1286,21 @@ var encodeCollectionState = (state) => {
|
|
|
1445
1286
|
};
|
|
1446
1287
|
|
|
1447
1288
|
// packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts
|
|
1448
|
-
import { invariant as
|
|
1289
|
+
import { invariant as invariant5 } from "@dxos/invariant";
|
|
1449
1290
|
import { PublicKey as PublicKey2 } from "@dxos/keys";
|
|
1450
|
-
import { log as
|
|
1291
|
+
import { log as log5 } from "@dxos/log";
|
|
1451
1292
|
import { ComplexSet, defaultMap as defaultMap2 } from "@dxos/util";
|
|
1452
1293
|
|
|
1453
1294
|
// packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator-connection.ts
|
|
1454
|
-
import * as
|
|
1295
|
+
import * as A from "@automerge/automerge";
|
|
1455
1296
|
import { cbor } from "@automerge/automerge-repo";
|
|
1456
|
-
import { Resource as
|
|
1457
|
-
import { invariant as
|
|
1458
|
-
import { log as
|
|
1297
|
+
import { Resource as Resource4 } from "@dxos/context";
|
|
1298
|
+
import { invariant as invariant3 } from "@dxos/invariant";
|
|
1299
|
+
import { log as log4 } from "@dxos/log";
|
|
1459
1300
|
import { AutomergeReplicator } from "@dxos/teleport-extension-automerge-replicator";
|
|
1460
|
-
var
|
|
1301
|
+
var __dxlog_file4 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator-connection.ts";
|
|
1461
1302
|
var DEFAULT_FACTORY = (params) => new AutomergeReplicator(...params);
|
|
1462
|
-
var MeshReplicatorConnection = class extends
|
|
1303
|
+
var MeshReplicatorConnection = class extends Resource4 {
|
|
1463
1304
|
constructor(_params) {
|
|
1464
1305
|
super(), this._params = _params, this.remoteDeviceKey = null, this._remotePeerId = null, this._isEnabled = false;
|
|
1465
1306
|
let readableStreamController;
|
|
@@ -1471,8 +1312,8 @@ var MeshReplicatorConnection = class extends Resource5 {
|
|
|
1471
1312
|
});
|
|
1472
1313
|
this.writable = new WritableStream({
|
|
1473
1314
|
write: async (message, controller) => {
|
|
1474
|
-
|
|
1475
|
-
F:
|
|
1315
|
+
invariant3(this._isEnabled, "Writing to a disabled connection", {
|
|
1316
|
+
F: __dxlog_file4,
|
|
1476
1317
|
L: 51,
|
|
1477
1318
|
S: this,
|
|
1478
1319
|
A: [
|
|
@@ -1500,12 +1341,12 @@ var MeshReplicatorConnection = class extends Resource5 {
|
|
|
1500
1341
|
onStartReplication: async (info, remotePeerId) => {
|
|
1501
1342
|
this.remoteDeviceKey = remotePeerId;
|
|
1502
1343
|
this._remotePeerId = info.id;
|
|
1503
|
-
|
|
1344
|
+
log4("onStartReplication", {
|
|
1504
1345
|
id: info.id,
|
|
1505
1346
|
thisPeerId: this.peerId,
|
|
1506
1347
|
remotePeerId: remotePeerId.toHex()
|
|
1507
1348
|
}, {
|
|
1508
|
-
F:
|
|
1349
|
+
F: __dxlog_file4,
|
|
1509
1350
|
L: 80,
|
|
1510
1351
|
S: this,
|
|
1511
1352
|
C: (f, a) => f(...a)
|
|
@@ -1531,8 +1372,8 @@ var MeshReplicatorConnection = class extends Resource5 {
|
|
|
1531
1372
|
}
|
|
1532
1373
|
}
|
|
1533
1374
|
get peerId() {
|
|
1534
|
-
|
|
1535
|
-
F:
|
|
1375
|
+
invariant3(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
1376
|
+
F: __dxlog_file4,
|
|
1536
1377
|
L: 106,
|
|
1537
1378
|
S: this,
|
|
1538
1379
|
A: [
|
|
@@ -1556,8 +1397,8 @@ var MeshReplicatorConnection = class extends Resource5 {
|
|
|
1556
1397
|
* Call after the remote peer has connected.
|
|
1557
1398
|
*/
|
|
1558
1399
|
enable() {
|
|
1559
|
-
|
|
1560
|
-
F:
|
|
1400
|
+
invariant3(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
1401
|
+
F: __dxlog_file4,
|
|
1561
1402
|
L: 127,
|
|
1562
1403
|
S: this,
|
|
1563
1404
|
A: [
|
|
@@ -1575,8 +1416,8 @@ var MeshReplicatorConnection = class extends Resource5 {
|
|
|
1575
1416
|
}
|
|
1576
1417
|
};
|
|
1577
1418
|
var logSendSync = (message) => {
|
|
1578
|
-
|
|
1579
|
-
const decodedSyncMessage = message.type === "sync" && message.data ?
|
|
1419
|
+
log4("sendSyncMessage", () => {
|
|
1420
|
+
const decodedSyncMessage = message.type === "sync" && message.data ? A.decodeSyncMessage(message.data) : void 0;
|
|
1580
1421
|
return {
|
|
1581
1422
|
sync: decodedSyncMessage && {
|
|
1582
1423
|
headsLength: decodedSyncMessage.heads.length,
|
|
@@ -1588,7 +1429,7 @@ var logSendSync = (message) => {
|
|
|
1588
1429
|
to: message.targetId
|
|
1589
1430
|
};
|
|
1590
1431
|
}, {
|
|
1591
|
-
F:
|
|
1432
|
+
F: __dxlog_file4,
|
|
1592
1433
|
L: 140,
|
|
1593
1434
|
S: void 0,
|
|
1594
1435
|
C: (f, a) => f(...a)
|
|
@@ -1596,14 +1437,14 @@ var logSendSync = (message) => {
|
|
|
1596
1437
|
};
|
|
1597
1438
|
|
|
1598
1439
|
// packages/core/echo/echo-pipeline/src/automerge/space-collection.ts
|
|
1599
|
-
import { invariant as
|
|
1440
|
+
import { invariant as invariant4 } from "@dxos/invariant";
|
|
1600
1441
|
import { SpaceId } from "@dxos/keys";
|
|
1601
|
-
var
|
|
1442
|
+
var __dxlog_file5 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/space-collection.ts";
|
|
1602
1443
|
var deriveCollectionIdFromSpaceId = (spaceId, rootDocumentId) => rootDocumentId ? `space:${spaceId}:${rootDocumentId}` : `space:${spaceId}`;
|
|
1603
1444
|
var getSpaceIdFromCollectionId = (collectionId) => {
|
|
1604
1445
|
const spaceId = collectionId.split(":")[1];
|
|
1605
|
-
|
|
1606
|
-
F:
|
|
1446
|
+
invariant4(SpaceId.isValid(spaceId), void 0, {
|
|
1447
|
+
F: __dxlog_file5,
|
|
1607
1448
|
L: 16,
|
|
1608
1449
|
S: void 0,
|
|
1609
1450
|
A: [
|
|
@@ -1615,7 +1456,7 @@ var getSpaceIdFromCollectionId = (collectionId) => {
|
|
|
1615
1456
|
};
|
|
1616
1457
|
|
|
1617
1458
|
// packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts
|
|
1618
|
-
var
|
|
1459
|
+
var __dxlog_file6 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts";
|
|
1619
1460
|
var MeshEchoReplicator = class {
|
|
1620
1461
|
constructor() {
|
|
1621
1462
|
/**
|
|
@@ -1652,8 +1493,8 @@ var MeshEchoReplicator = class {
|
|
|
1652
1493
|
this._context = null;
|
|
1653
1494
|
}
|
|
1654
1495
|
createExtension(extensionFactory) {
|
|
1655
|
-
|
|
1656
|
-
F:
|
|
1496
|
+
invariant5(this._context, void 0, {
|
|
1497
|
+
F: __dxlog_file6,
|
|
1657
1498
|
L: 67,
|
|
1658
1499
|
S: this,
|
|
1659
1500
|
A: [
|
|
@@ -1665,16 +1506,16 @@ var MeshEchoReplicator = class {
|
|
|
1665
1506
|
ownPeerId: this._context.peerId,
|
|
1666
1507
|
replicatorFactory: extensionFactory,
|
|
1667
1508
|
onRemoteConnected: async () => {
|
|
1668
|
-
|
|
1509
|
+
log5("onRemoteConnected", {
|
|
1669
1510
|
peerId: connection.peerId
|
|
1670
1511
|
}, {
|
|
1671
|
-
F:
|
|
1512
|
+
F: __dxlog_file6,
|
|
1672
1513
|
L: 73,
|
|
1673
1514
|
S: this,
|
|
1674
1515
|
C: (f, a) => f(...a)
|
|
1675
1516
|
});
|
|
1676
|
-
|
|
1677
|
-
F:
|
|
1517
|
+
invariant5(this._context, void 0, {
|
|
1518
|
+
F: __dxlog_file6,
|
|
1678
1519
|
L: 74,
|
|
1679
1520
|
S: this,
|
|
1680
1521
|
A: [
|
|
@@ -1696,10 +1537,10 @@ var MeshEchoReplicator = class {
|
|
|
1696
1537
|
}
|
|
1697
1538
|
},
|
|
1698
1539
|
onRemoteDisconnected: async () => {
|
|
1699
|
-
|
|
1540
|
+
log5("onRemoteDisconnected", {
|
|
1700
1541
|
peerId: connection.peerId
|
|
1701
1542
|
}, {
|
|
1702
|
-
F:
|
|
1543
|
+
F: __dxlog_file6,
|
|
1703
1544
|
L: 88,
|
|
1704
1545
|
S: this,
|
|
1705
1546
|
C: (f, a) => f(...a)
|
|
@@ -1708,10 +1549,10 @@ var MeshEchoReplicator = class {
|
|
|
1708
1549
|
const existingConnections = this._connectionsPerPeer.get(connection.peerId) ?? [];
|
|
1709
1550
|
const index = existingConnections.indexOf(connection);
|
|
1710
1551
|
if (index < 0) {
|
|
1711
|
-
|
|
1552
|
+
log5.warn("disconnected connection not found", {
|
|
1712
1553
|
peerId: connection.peerId
|
|
1713
1554
|
}, {
|
|
1714
|
-
F:
|
|
1555
|
+
F: __dxlog_file6,
|
|
1715
1556
|
L: 96,
|
|
1716
1557
|
S: this,
|
|
1717
1558
|
C: (f, a) => f(...a)
|
|
@@ -1729,17 +1570,17 @@ var MeshEchoReplicator = class {
|
|
|
1729
1570
|
}
|
|
1730
1571
|
},
|
|
1731
1572
|
shouldAdvertise: async (params) => {
|
|
1732
|
-
|
|
1573
|
+
log5("shouldAdvertise", {
|
|
1733
1574
|
peerId: connection.peerId,
|
|
1734
1575
|
documentId: params.documentId
|
|
1735
1576
|
}, {
|
|
1736
|
-
F:
|
|
1577
|
+
F: __dxlog_file6,
|
|
1737
1578
|
L: 114,
|
|
1738
1579
|
S: this,
|
|
1739
1580
|
C: (f, a) => f(...a)
|
|
1740
1581
|
});
|
|
1741
|
-
|
|
1742
|
-
F:
|
|
1582
|
+
invariant5(this._context, void 0, {
|
|
1583
|
+
F: __dxlog_file6,
|
|
1743
1584
|
L: 115,
|
|
1744
1585
|
S: this,
|
|
1745
1586
|
A: [
|
|
@@ -1754,12 +1595,12 @@ var MeshEchoReplicator = class {
|
|
|
1754
1595
|
documentId: params.documentId,
|
|
1755
1596
|
peerId: connection.peerId
|
|
1756
1597
|
});
|
|
1757
|
-
|
|
1598
|
+
log5("document not found locally for share policy check", {
|
|
1758
1599
|
peerId: connection.peerId,
|
|
1759
1600
|
documentId: params.documentId,
|
|
1760
1601
|
acceptDocument: remoteDocumentExists
|
|
1761
1602
|
}, {
|
|
1762
|
-
F:
|
|
1603
|
+
F: __dxlog_file6,
|
|
1763
1604
|
L: 123,
|
|
1764
1605
|
S: this,
|
|
1765
1606
|
C: (f, a) => f(...a)
|
|
@@ -1769,19 +1610,19 @@ var MeshEchoReplicator = class {
|
|
|
1769
1610
|
const spaceId = await createIdFromSpaceKey(spaceKey);
|
|
1770
1611
|
const authorizedDevices = this._authorizedDevices.get(spaceId);
|
|
1771
1612
|
if (!connection.remoteDeviceKey) {
|
|
1772
|
-
|
|
1613
|
+
log5("device key not found for share policy check", {
|
|
1773
1614
|
peerId: connection.peerId,
|
|
1774
1615
|
documentId: params.documentId
|
|
1775
1616
|
}, {
|
|
1776
|
-
F:
|
|
1777
|
-
L:
|
|
1617
|
+
F: __dxlog_file6,
|
|
1618
|
+
L: 140,
|
|
1778
1619
|
S: this,
|
|
1779
1620
|
C: (f, a) => f(...a)
|
|
1780
1621
|
});
|
|
1781
1622
|
return false;
|
|
1782
1623
|
}
|
|
1783
1624
|
const isAuthorized = authorizedDevices?.has(connection.remoteDeviceKey) ?? false;
|
|
1784
|
-
|
|
1625
|
+
log5("share policy check", {
|
|
1785
1626
|
localPeer: this._context.peerId,
|
|
1786
1627
|
remotePeer: connection.peerId,
|
|
1787
1628
|
documentId: params.documentId,
|
|
@@ -1789,16 +1630,16 @@ var MeshEchoReplicator = class {
|
|
|
1789
1630
|
spaceKey,
|
|
1790
1631
|
isAuthorized
|
|
1791
1632
|
}, {
|
|
1792
|
-
F:
|
|
1793
|
-
L:
|
|
1633
|
+
F: __dxlog_file6,
|
|
1634
|
+
L: 148,
|
|
1794
1635
|
S: this,
|
|
1795
1636
|
C: (f, a) => f(...a)
|
|
1796
1637
|
});
|
|
1797
1638
|
return isAuthorized;
|
|
1798
1639
|
} catch (err) {
|
|
1799
|
-
|
|
1800
|
-
F:
|
|
1801
|
-
L:
|
|
1640
|
+
log5.catch(err, void 0, {
|
|
1641
|
+
F: __dxlog_file6,
|
|
1642
|
+
L: 158,
|
|
1802
1643
|
S: this,
|
|
1803
1644
|
C: (f, a) => f(...a)
|
|
1804
1645
|
});
|
|
@@ -1809,12 +1650,12 @@ var MeshEchoReplicator = class {
|
|
|
1809
1650
|
const spaceId = getSpaceIdFromCollectionId(collectionId);
|
|
1810
1651
|
const authorizedDevices = this._authorizedDevices.get(spaceId);
|
|
1811
1652
|
if (!connection.remoteDeviceKey) {
|
|
1812
|
-
|
|
1653
|
+
log5("device key not found for collection sync check", {
|
|
1813
1654
|
peerId: connection.peerId,
|
|
1814
1655
|
collectionId
|
|
1815
1656
|
}, {
|
|
1816
|
-
F:
|
|
1817
|
-
L:
|
|
1657
|
+
F: __dxlog_file6,
|
|
1658
|
+
L: 168,
|
|
1818
1659
|
S: this,
|
|
1819
1660
|
C: (f, a) => f(...a)
|
|
1820
1661
|
});
|
|
@@ -1828,12 +1669,12 @@ var MeshEchoReplicator = class {
|
|
|
1828
1669
|
return connection.replicatorExtension;
|
|
1829
1670
|
}
|
|
1830
1671
|
async authorizeDevice(spaceKey, deviceKey) {
|
|
1831
|
-
|
|
1672
|
+
log5("authorizeDevice", {
|
|
1832
1673
|
spaceKey,
|
|
1833
1674
|
deviceKey
|
|
1834
1675
|
}, {
|
|
1835
|
-
F:
|
|
1836
|
-
L:
|
|
1676
|
+
F: __dxlog_file6,
|
|
1677
|
+
L: 185,
|
|
1837
1678
|
S: this,
|
|
1838
1679
|
C: (f, a) => f(...a)
|
|
1839
1680
|
});
|
|
@@ -2210,6 +2051,164 @@ var getByteCount = (message) => {
|
|
|
2210
2051
|
return message.type.length + message.senderId.length + message.targetId.length + (message.data?.byteLength ?? 0) + (message.documentId?.length ?? 0);
|
|
2211
2052
|
};
|
|
2212
2053
|
|
|
2054
|
+
// packages/core/echo/echo-pipeline/src/db-host/documents-synchronizer.ts
|
|
2055
|
+
var __dxlog_file7 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/db-host/documents-synchronizer.ts";
|
|
2056
|
+
var MAX_UPDATE_FREQ = 10;
|
|
2057
|
+
var DocumentsSynchronizer = class extends Resource5 {
|
|
2058
|
+
constructor(_params) {
|
|
2059
|
+
super(), this._params = _params, this._syncStates = /* @__PURE__ */ new Map(), this._pendingUpdates = /* @__PURE__ */ new Set(), this._sendUpdatesJob = void 0;
|
|
2060
|
+
}
|
|
2061
|
+
addDocuments(documentIds, retryCounter = 0) {
|
|
2062
|
+
if (retryCounter > 3) {
|
|
2063
|
+
log6.warn("Failed to load document, retry limit reached", {
|
|
2064
|
+
documentIds
|
|
2065
|
+
}, {
|
|
2066
|
+
F: __dxlog_file7,
|
|
2067
|
+
L: 52,
|
|
2068
|
+
S: this,
|
|
2069
|
+
C: (f, a) => f(...a)
|
|
2070
|
+
});
|
|
2071
|
+
return;
|
|
2072
|
+
}
|
|
2073
|
+
for (const documentId of documentIds) {
|
|
2074
|
+
this._params.repo.find(documentId).then(async (doc) => {
|
|
2075
|
+
await doc.whenReady();
|
|
2076
|
+
this._startSync(doc);
|
|
2077
|
+
this._pendingUpdates.add(doc.documentId);
|
|
2078
|
+
this._sendUpdatesJob.trigger();
|
|
2079
|
+
}).catch((error) => {
|
|
2080
|
+
log6.warn("Failed to load document, wraparound", {
|
|
2081
|
+
documentId,
|
|
2082
|
+
error
|
|
2083
|
+
}, {
|
|
2084
|
+
F: __dxlog_file7,
|
|
2085
|
+
L: 66,
|
|
2086
|
+
S: this,
|
|
2087
|
+
C: (f, a) => f(...a)
|
|
2088
|
+
});
|
|
2089
|
+
this.addDocuments([
|
|
2090
|
+
documentId
|
|
2091
|
+
], retryCounter + 1);
|
|
2092
|
+
});
|
|
2093
|
+
}
|
|
2094
|
+
}
|
|
2095
|
+
removeDocuments(documentIds) {
|
|
2096
|
+
for (const documentId of documentIds) {
|
|
2097
|
+
this._syncStates.get(documentId)?.clearSubscriptions?.();
|
|
2098
|
+
this._syncStates.delete(documentId);
|
|
2099
|
+
this._pendingUpdates.delete(documentId);
|
|
2100
|
+
}
|
|
2101
|
+
}
|
|
2102
|
+
async _open() {
|
|
2103
|
+
this._sendUpdatesJob = new UpdateScheduler(this._ctx, this._checkAndSendUpdates.bind(this), {
|
|
2104
|
+
maxFrequency: MAX_UPDATE_FREQ
|
|
2105
|
+
});
|
|
2106
|
+
}
|
|
2107
|
+
async _close() {
|
|
2108
|
+
await this._sendUpdatesJob.join();
|
|
2109
|
+
this._syncStates.clear();
|
|
2110
|
+
}
|
|
2111
|
+
async update(updates) {
|
|
2112
|
+
for (const { documentId, mutation, isNew } of updates) {
|
|
2113
|
+
if (isNew) {
|
|
2114
|
+
const doc = await this._params.repo.find(documentId, FIND_PARAMS);
|
|
2115
|
+
doc.update((doc2) => A2.loadIncremental(doc2, mutation));
|
|
2116
|
+
this._startSync(doc);
|
|
2117
|
+
} else {
|
|
2118
|
+
this._writeMutation(documentId, mutation);
|
|
2119
|
+
}
|
|
2120
|
+
}
|
|
2121
|
+
}
|
|
2122
|
+
_startSync(doc) {
|
|
2123
|
+
if (this._syncStates.has(doc.documentId)) {
|
|
2124
|
+
log6("Document already being synced", {
|
|
2125
|
+
documentId: doc.documentId
|
|
2126
|
+
}, {
|
|
2127
|
+
F: __dxlog_file7,
|
|
2128
|
+
L: 105,
|
|
2129
|
+
S: this,
|
|
2130
|
+
C: (f, a) => f(...a)
|
|
2131
|
+
});
|
|
2132
|
+
return;
|
|
2133
|
+
}
|
|
2134
|
+
const syncState = {
|
|
2135
|
+
handle: doc
|
|
2136
|
+
};
|
|
2137
|
+
this._subscribeForChanges(syncState);
|
|
2138
|
+
this._syncStates.set(doc.documentId, syncState);
|
|
2139
|
+
}
|
|
2140
|
+
_subscribeForChanges(syncState) {
|
|
2141
|
+
const handler = () => {
|
|
2142
|
+
this._pendingUpdates.add(syncState.handle.documentId);
|
|
2143
|
+
this._sendUpdatesJob.trigger();
|
|
2144
|
+
};
|
|
2145
|
+
syncState.handle.on("heads-changed", handler);
|
|
2146
|
+
syncState.clearSubscriptions = () => syncState.handle.off("heads-changed", handler);
|
|
2147
|
+
}
|
|
2148
|
+
async _checkAndSendUpdates() {
|
|
2149
|
+
const updates = [];
|
|
2150
|
+
const docsWithPendingUpdates = Array.from(this._pendingUpdates);
|
|
2151
|
+
this._pendingUpdates.clear();
|
|
2152
|
+
for (const documentId of docsWithPendingUpdates) {
|
|
2153
|
+
const update = this._getPendingChanges(documentId);
|
|
2154
|
+
if (update) {
|
|
2155
|
+
updates.push({
|
|
2156
|
+
documentId,
|
|
2157
|
+
mutation: update
|
|
2158
|
+
});
|
|
2159
|
+
}
|
|
2160
|
+
}
|
|
2161
|
+
if (updates.length > 0) {
|
|
2162
|
+
this._params.sendUpdates({
|
|
2163
|
+
updates
|
|
2164
|
+
});
|
|
2165
|
+
}
|
|
2166
|
+
}
|
|
2167
|
+
_getPendingChanges(documentId) {
|
|
2168
|
+
const syncState = this._syncStates.get(documentId);
|
|
2169
|
+
invariant6(syncState, "Sync state for document not found", {
|
|
2170
|
+
F: __dxlog_file7,
|
|
2171
|
+
L: 146,
|
|
2172
|
+
S: this,
|
|
2173
|
+
A: [
|
|
2174
|
+
"syncState",
|
|
2175
|
+
"'Sync state for document not found'"
|
|
2176
|
+
]
|
|
2177
|
+
});
|
|
2178
|
+
const handle = syncState.handle;
|
|
2179
|
+
if (!handle || !handle.isReady() || !handle.doc()) {
|
|
2180
|
+
return;
|
|
2181
|
+
}
|
|
2182
|
+
const doc = handle.doc();
|
|
2183
|
+
const mutation = syncState.lastSentHead ? A2.saveSince(doc, syncState.lastSentHead) : A2.save(doc);
|
|
2184
|
+
if (mutation.length === 0) {
|
|
2185
|
+
return;
|
|
2186
|
+
}
|
|
2187
|
+
syncState.lastSentHead = A2.getHeads(doc);
|
|
2188
|
+
return mutation;
|
|
2189
|
+
}
|
|
2190
|
+
_writeMutation(documentId, mutation) {
|
|
2191
|
+
const syncState = this._syncStates.get(documentId);
|
|
2192
|
+
invariant6(syncState, "Sync state for document not found", {
|
|
2193
|
+
F: __dxlog_file7,
|
|
2194
|
+
L: 162,
|
|
2195
|
+
S: this,
|
|
2196
|
+
A: [
|
|
2197
|
+
"syncState",
|
|
2198
|
+
"'Sync state for document not found'"
|
|
2199
|
+
]
|
|
2200
|
+
});
|
|
2201
|
+
syncState.handle.update((doc) => {
|
|
2202
|
+
const headsBefore = A2.getHeads(doc);
|
|
2203
|
+
const newDoc = A2.loadIncremental(doc, mutation);
|
|
2204
|
+
if (A2.equals(headsBefore, syncState.lastSentHead)) {
|
|
2205
|
+
syncState.lastSentHead = A2.getHeads(newDoc);
|
|
2206
|
+
}
|
|
2207
|
+
return newDoc;
|
|
2208
|
+
});
|
|
2209
|
+
}
|
|
2210
|
+
};
|
|
2211
|
+
|
|
2213
2212
|
// packages/core/echo/echo-pipeline/src/db-host/data-service.ts
|
|
2214
2213
|
var __dxlog_file8 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/db-host/data-service.ts";
|
|
2215
2214
|
var DataServiceImpl = class {
|
|
@@ -2273,7 +2272,7 @@ var DataServiceImpl = class {
|
|
|
2273
2272
|
"'Subscription not found'"
|
|
2274
2273
|
]
|
|
2275
2274
|
});
|
|
2276
|
-
synchronizer.update(request.updates);
|
|
2275
|
+
await synchronizer.update(request.updates);
|
|
2277
2276
|
}
|
|
2278
2277
|
async flush(request) {
|
|
2279
2278
|
await this._automergeHost.flush(request);
|
|
@@ -2399,7 +2398,7 @@ var createSelectedDocumentsIterator = (automergeHost) => (
|
|
|
2399
2398
|
doc = A3.view(doc, heads);
|
|
2400
2399
|
const end = Date.now();
|
|
2401
2400
|
if (end - begin > LOG_VIEW_OPERATION_THRESHOLD) {
|
|
2402
|
-
log8
|
|
2401
|
+
log8("Checking out document version is taking too long", {
|
|
2403
2402
|
duration: end - begin,
|
|
2404
2403
|
requestedHeads: heads,
|
|
2405
2404
|
originalHeads: currentHeads
|
|
@@ -2961,6 +2960,7 @@ var ExecutionTrace = Object.freeze({
|
|
|
2961
2960
|
return go(trace6, 0);
|
|
2962
2961
|
}
|
|
2963
2962
|
});
|
|
2963
|
+
var TRACE_QUERY_EXECUTION = false;
|
|
2964
2964
|
var QueryExecutor = class extends Resource6 {
|
|
2965
2965
|
constructor(options) {
|
|
2966
2966
|
super();
|
|
@@ -3000,7 +3000,7 @@ var QueryExecutor = class extends Resource6 {
|
|
|
3000
3000
|
async execQuery() {
|
|
3001
3001
|
invariant10(this._lifecycleState === LifecycleState3.OPEN, void 0, {
|
|
3002
3002
|
F: __dxlog_file11,
|
|
3003
|
-
L:
|
|
3003
|
+
L: 173,
|
|
3004
3004
|
S: this,
|
|
3005
3005
|
A: [
|
|
3006
3006
|
"this._lifecycleState === LifecycleState.OPEN",
|
|
@@ -3016,6 +3016,9 @@ var QueryExecutor = class extends Resource6 {
|
|
|
3016
3016
|
});
|
|
3017
3017
|
this._trace = trace6;
|
|
3018
3018
|
const changed = prevResultSet.length !== workingSet.length || prevResultSet.some((item, index) => workingSet[index].objectId !== item.objectId || workingSet[index].spaceId !== item.spaceId || workingSet[index].documentId !== item.documentId);
|
|
3019
|
+
if (TRACE_QUERY_EXECUTION) {
|
|
3020
|
+
console.log(ExecutionTrace.format(trace6));
|
|
3021
|
+
}
|
|
3019
3022
|
return {
|
|
3020
3023
|
changed
|
|
3021
3024
|
};
|
|
@@ -3544,7 +3547,7 @@ var QueryServiceImpl = class extends Resource7 {
|
|
|
3544
3547
|
* Re-index all loaded documents.
|
|
3545
3548
|
*/
|
|
3546
3549
|
async reindex() {
|
|
3547
|
-
log10
|
|
3550
|
+
log10("Reindexing all documents...", void 0, {
|
|
3548
3551
|
F: __dxlog_file12,
|
|
3549
3552
|
L: 141,
|
|
3550
3553
|
S: this,
|
|
@@ -3557,7 +3560,7 @@ var QueryServiceImpl = class extends Resource7 {
|
|
|
3557
3560
|
ids.set(id, heads);
|
|
3558
3561
|
}
|
|
3559
3562
|
if (ids.size % 100 === 0) {
|
|
3560
|
-
log10
|
|
3563
|
+
log10("Collected documents...", {
|
|
3561
3564
|
count: ids.size
|
|
3562
3565
|
}, {
|
|
3563
3566
|
F: __dxlog_file12,
|
|
@@ -3567,7 +3570,7 @@ var QueryServiceImpl = class extends Resource7 {
|
|
|
3567
3570
|
});
|
|
3568
3571
|
}
|
|
3569
3572
|
}
|
|
3570
|
-
log10
|
|
3573
|
+
log10("Marking all documents as dirty...", {
|
|
3571
3574
|
count: ids.size
|
|
3572
3575
|
}, {
|
|
3573
3576
|
F: __dxlog_file12,
|
|
@@ -4230,7 +4233,7 @@ var EchoEdgeReplicator = class {
|
|
|
4230
4233
|
this._sharePolicyEnabled = !disableSharePolicy;
|
|
4231
4234
|
}
|
|
4232
4235
|
async connect(context) {
|
|
4233
|
-
log13
|
|
4236
|
+
log13("connecting...", {
|
|
4234
4237
|
peerId: context.peerId,
|
|
4235
4238
|
connectedSpaces: this._connectedSpaces.size
|
|
4236
4239
|
}, {
|
|
@@ -4242,7 +4245,7 @@ var EchoEdgeReplicator = class {
|
|
|
4242
4245
|
this._context = context;
|
|
4243
4246
|
this._ctx = Context6.default(void 0, {
|
|
4244
4247
|
F: __dxlog_file18,
|
|
4245
|
-
L:
|
|
4248
|
+
L: 63
|
|
4246
4249
|
});
|
|
4247
4250
|
this._ctx.onDispose(this._edgeConnection.onReconnected(() => {
|
|
4248
4251
|
this._ctx && scheduleMicroTask(this._ctx, () => this._handleReconnect());
|
|
@@ -4341,7 +4344,7 @@ var EchoEdgeReplicator = class {
|
|
|
4341
4344
|
async _openConnection(spaceId, reconnects = 0) {
|
|
4342
4345
|
invariant14(this._context, void 0, {
|
|
4343
4346
|
F: __dxlog_file18,
|
|
4344
|
-
L:
|
|
4347
|
+
L: 124,
|
|
4345
4348
|
S: this,
|
|
4346
4349
|
A: [
|
|
4347
4350
|
"this._context",
|
|
@@ -4350,7 +4353,7 @@ var EchoEdgeReplicator = class {
|
|
|
4350
4353
|
});
|
|
4351
4354
|
invariant14(!this._connections.has(spaceId), void 0, {
|
|
4352
4355
|
F: __dxlog_file18,
|
|
4353
|
-
L:
|
|
4356
|
+
L: 125,
|
|
4354
4357
|
S: this,
|
|
4355
4358
|
A: [
|
|
4356
4359
|
"!this._connections.has(spaceId)",
|
|
@@ -4374,13 +4377,13 @@ var EchoEdgeReplicator = class {
|
|
|
4374
4377
|
return;
|
|
4375
4378
|
}
|
|
4376
4379
|
const restartDelay = Math.min(MAX_RESTART_DELAY, INITIAL_RESTART_DELAY * reconnects) + Math.random() * RESTART_DELAY_JITTER;
|
|
4377
|
-
log13
|
|
4380
|
+
log13("connection restart scheduled", {
|
|
4378
4381
|
spaceId,
|
|
4379
4382
|
reconnects,
|
|
4380
4383
|
restartDelay
|
|
4381
4384
|
}, {
|
|
4382
4385
|
F: __dxlog_file18,
|
|
4383
|
-
L:
|
|
4386
|
+
L: 148,
|
|
4384
4387
|
S: this,
|
|
4385
4388
|
C: (f, a) => f(...a)
|
|
4386
4389
|
});
|
|
@@ -4450,7 +4453,7 @@ var EdgeReplicatorConnection = class extends Resource11 {
|
|
|
4450
4453
|
async _open(ctx) {
|
|
4451
4454
|
log13("opening...", void 0, {
|
|
4452
4455
|
F: __dxlog_file18,
|
|
4453
|
-
L:
|
|
4456
|
+
L: 251,
|
|
4454
4457
|
S: this,
|
|
4455
4458
|
C: (f, a) => f(...a)
|
|
4456
4459
|
});
|
|
@@ -4463,7 +4466,7 @@ var EdgeReplicatorConnection = class extends Resource11 {
|
|
|
4463
4466
|
async _close() {
|
|
4464
4467
|
log13("closing...", void 0, {
|
|
4465
4468
|
F: __dxlog_file18,
|
|
4466
|
-
L:
|
|
4469
|
+
L: 266,
|
|
4467
4470
|
S: this,
|
|
4468
4471
|
C: (f, a) => f(...a)
|
|
4469
4472
|
});
|
|
@@ -4474,7 +4477,7 @@ var EdgeReplicatorConnection = class extends Resource11 {
|
|
|
4474
4477
|
get peerId() {
|
|
4475
4478
|
invariant14(this._remotePeerId, "Not connected", {
|
|
4476
4479
|
F: __dxlog_file18,
|
|
4477
|
-
L:
|
|
4480
|
+
L: 275,
|
|
4478
4481
|
S: this,
|
|
4479
4482
|
A: [
|
|
4480
4483
|
"this._remotePeerId",
|
|
@@ -4499,7 +4502,7 @@ var EdgeReplicatorConnection = class extends Resource11 {
|
|
|
4499
4502
|
remoteId: this._remotePeerId
|
|
4500
4503
|
}, {
|
|
4501
4504
|
F: __dxlog_file18,
|
|
4502
|
-
L:
|
|
4505
|
+
L: 290,
|
|
4503
4506
|
S: this,
|
|
4504
4507
|
C: (f, a) => f(...a)
|
|
4505
4508
|
});
|
|
@@ -4525,7 +4528,7 @@ var EdgeReplicatorConnection = class extends Resource11 {
|
|
|
4525
4528
|
remoteId: this._remotePeerId
|
|
4526
4529
|
}, {
|
|
4527
4530
|
F: __dxlog_file18,
|
|
4528
|
-
L:
|
|
4531
|
+
L: 319,
|
|
4529
4532
|
S: this,
|
|
4530
4533
|
C: (f, a) => f(...a)
|
|
4531
4534
|
});
|
|
@@ -4548,7 +4551,7 @@ var EdgeReplicatorConnection = class extends Resource11 {
|
|
|
4548
4551
|
remoteId: this._remotePeerId
|
|
4549
4552
|
}, {
|
|
4550
4553
|
F: __dxlog_file18,
|
|
4551
|
-
L:
|
|
4554
|
+
L: 348,
|
|
4552
4555
|
S: this,
|
|
4553
4556
|
C: (f, a) => f(...a)
|
|
4554
4557
|
});
|