@dxos/echo-pipeline 0.6.2 → 0.6.3-main.0308ae2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/browser/{chunk-UJQ5VS5V.mjs → chunk-6MJEONOX.mjs} +2569 -1066
- package/dist/lib/browser/chunk-6MJEONOX.mjs.map +7 -0
- package/dist/lib/browser/index.mjs +12 -1049
- package/dist/lib/browser/index.mjs.map +4 -4
- package/dist/lib/browser/meta.json +1 -1
- package/dist/lib/browser/testing/index.mjs +224 -2
- package/dist/lib/browser/testing/index.mjs.map +4 -4
- package/dist/lib/node/{chunk-RH6TDRML.cjs → chunk-PT5LWMPA.cjs} +3185 -1710
- package/dist/lib/node/chunk-PT5LWMPA.cjs.map +7 -0
- package/dist/lib/node/index.cjs +37 -1056
- package/dist/lib/node/index.cjs.map +4 -4
- package/dist/lib/node/meta.json +1 -1
- package/dist/lib/node/testing/index.cjs +238 -13
- package/dist/lib/node/testing/index.cjs.map +4 -4
- package/dist/types/src/automerge/automerge-host.d.ts +29 -2
- package/dist/types/src/automerge/automerge-host.d.ts.map +1 -1
- package/dist/types/src/automerge/collection-synchronizer.d.ts +61 -0
- package/dist/types/src/automerge/collection-synchronizer.d.ts.map +1 -0
- package/dist/types/src/automerge/collection-synchronizer.test.d.ts +2 -0
- package/dist/types/src/automerge/collection-synchronizer.test.d.ts.map +1 -0
- package/dist/types/src/automerge/echo-network-adapter.d.ts +9 -2
- package/dist/types/src/automerge/echo-network-adapter.d.ts.map +1 -1
- package/dist/types/src/automerge/echo-replicator.d.ts +7 -0
- package/dist/types/src/automerge/echo-replicator.d.ts.map +1 -1
- package/dist/types/src/automerge/heads-store.d.ts +1 -1
- package/dist/types/src/automerge/heads-store.d.ts.map +1 -1
- package/dist/types/src/automerge/index.d.ts +2 -0
- package/dist/types/src/automerge/index.d.ts.map +1 -1
- package/dist/types/src/automerge/mesh-echo-replicator-connection.d.ts +3 -1
- package/dist/types/src/automerge/mesh-echo-replicator-connection.d.ts.map +1 -1
- package/dist/types/src/automerge/mesh-echo-replicator.d.ts +2 -2
- package/dist/types/src/automerge/mesh-echo-replicator.d.ts.map +1 -1
- package/dist/types/src/automerge/network-protocol.d.ts +31 -0
- package/dist/types/src/automerge/network-protocol.d.ts.map +1 -0
- package/dist/types/src/automerge/space-collection.d.ts +4 -0
- package/dist/types/src/automerge/space-collection.d.ts.map +1 -0
- package/dist/types/src/db-host/data-service.d.ts +2 -1
- package/dist/types/src/db-host/data-service.d.ts.map +1 -1
- package/dist/types/src/db-host/documents-synchronizer.d.ts +1 -1
- package/dist/types/src/db-host/documents-synchronizer.d.ts.map +1 -1
- package/dist/types/src/testing/index.d.ts +1 -0
- package/dist/types/src/testing/index.d.ts.map +1 -1
- package/dist/types/src/testing/test-replicator.d.ts +46 -0
- package/dist/types/src/testing/test-replicator.d.ts.map +1 -0
- package/package.json +33 -33
- package/src/automerge/automerge-host.test.ts +76 -14
- package/src/automerge/automerge-host.ts +219 -32
- package/src/automerge/automerge-repo.test.ts +2 -1
- package/src/automerge/collection-synchronizer.test.ts +91 -0
- package/src/automerge/collection-synchronizer.ts +204 -0
- package/src/automerge/echo-network-adapter.test.ts +5 -1
- package/src/automerge/echo-network-adapter.ts +69 -4
- package/src/automerge/echo-replicator.ts +9 -0
- package/src/automerge/heads-store.ts +6 -9
- package/src/automerge/index.ts +2 -0
- package/src/automerge/mesh-echo-replicator-connection.ts +6 -1
- package/src/automerge/mesh-echo-replicator.ts +28 -7
- package/src/automerge/network-protocol.ts +45 -0
- package/src/automerge/space-collection.ts +14 -0
- package/src/db-host/data-service.ts +26 -12
- package/src/db-host/documents-synchronizer.ts +17 -5
- package/src/metadata/metadata-store.ts +1 -1
- package/src/testing/index.ts +1 -0
- package/src/testing/test-replicator.ts +194 -0
- package/dist/lib/browser/chunk-UJQ5VS5V.mjs.map +0 -7
- package/dist/lib/node/chunk-RH6TDRML.cjs.map +0 -7
|
@@ -2,13 +2,15 @@ import "@dxos/node-std/globals";
|
|
|
2
2
|
import {
|
|
3
3
|
AuthExtension,
|
|
4
4
|
AuthStatus,
|
|
5
|
-
|
|
5
|
+
AutomergeHost,
|
|
6
6
|
CredentialRetrieverExtension,
|
|
7
7
|
CredentialServerExtension,
|
|
8
8
|
DataServiceImpl,
|
|
9
9
|
DocumentsSynchronizer,
|
|
10
|
+
LevelDBStorageAdapter,
|
|
10
11
|
MOCK_AUTH_PROVIDER,
|
|
11
12
|
MOCK_AUTH_VERIFIER,
|
|
13
|
+
MeshEchoReplicator,
|
|
12
14
|
MetadataStore,
|
|
13
15
|
Pipeline,
|
|
14
16
|
SnapshotManager,
|
|
@@ -21,1059 +23,17 @@ import {
|
|
|
21
23
|
codec,
|
|
22
24
|
createIdFromSpaceKey,
|
|
23
25
|
createMappedFeedWriter,
|
|
26
|
+
deriveCollectionIdFromSpaceId,
|
|
27
|
+
diffCollectionState,
|
|
28
|
+
encodingOptions,
|
|
29
|
+
getSpaceIdFromCollectionId,
|
|
30
|
+
getSpaceKeyFromDoc,
|
|
24
31
|
hasInvitationExpired,
|
|
25
32
|
mapFeedIndexesToTimeframe,
|
|
26
33
|
mapTimeframeToFeedIndexes,
|
|
27
34
|
startAfter,
|
|
28
35
|
valueEncoding
|
|
29
|
-
} from "./chunk-
|
|
30
|
-
|
|
31
|
-
// packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts
|
|
32
|
-
import { Event, asyncTimeout } from "@dxos/async";
|
|
33
|
-
import { next as automerge, getBackend, getHeads, isAutomerge, equals as headsEquals, save } from "@dxos/automerge/automerge";
|
|
34
|
-
import { Repo } from "@dxos/automerge/automerge-repo";
|
|
35
|
-
import { Context, Resource as Resource2, cancelWithContext } from "@dxos/context";
|
|
36
|
-
import { invariant as invariant2 } from "@dxos/invariant";
|
|
37
|
-
import { PublicKey } from "@dxos/keys";
|
|
38
|
-
import { log as log2 } from "@dxos/log";
|
|
39
|
-
import { objectPointerCodec } from "@dxos/protocols";
|
|
40
|
-
import { trace } from "@dxos/tracing";
|
|
41
|
-
import { mapValues } from "@dxos/util";
|
|
42
|
-
|
|
43
|
-
// packages/core/echo/echo-pipeline/src/automerge/echo-network-adapter.ts
|
|
44
|
-
import { synchronized, Trigger } from "@dxos/async";
|
|
45
|
-
import { NetworkAdapter } from "@dxos/automerge/automerge-repo";
|
|
46
|
-
import { LifecycleState } from "@dxos/context";
|
|
47
|
-
import { invariant } from "@dxos/invariant";
|
|
48
|
-
import { log } from "@dxos/log";
|
|
49
|
-
function _ts_decorate(decorators, target, key, desc) {
|
|
50
|
-
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
51
|
-
if (typeof Reflect === "object" && typeof Reflect.decorate === "function")
|
|
52
|
-
r = Reflect.decorate(decorators, target, key, desc);
|
|
53
|
-
else
|
|
54
|
-
for (var i = decorators.length - 1; i >= 0; i--)
|
|
55
|
-
if (d = decorators[i])
|
|
56
|
-
r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
57
|
-
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
58
|
-
}
|
|
59
|
-
var __dxlog_file = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/echo-network-adapter.ts";
|
|
60
|
-
var EchoNetworkAdapter = class extends NetworkAdapter {
|
|
61
|
-
constructor(_params) {
|
|
62
|
-
super();
|
|
63
|
-
this._params = _params;
|
|
64
|
-
this._replicators = /* @__PURE__ */ new Set();
|
|
65
|
-
this._connections = /* @__PURE__ */ new Map();
|
|
66
|
-
this._lifecycleState = LifecycleState.CLOSED;
|
|
67
|
-
this._connected = new Trigger();
|
|
68
|
-
}
|
|
69
|
-
connect(peerId, peerMetadata) {
|
|
70
|
-
this.peerId = peerId;
|
|
71
|
-
this.peerMetadata = peerMetadata;
|
|
72
|
-
this._connected.wake();
|
|
73
|
-
}
|
|
74
|
-
send(message) {
|
|
75
|
-
const connectionEntry = this._connections.get(message.targetId);
|
|
76
|
-
if (!connectionEntry) {
|
|
77
|
-
throw new Error("Connection not found.");
|
|
78
|
-
}
|
|
79
|
-
connectionEntry.writer.write(message).catch((err) => {
|
|
80
|
-
if (connectionEntry.isOpen) {
|
|
81
|
-
log.catch(err, void 0, {
|
|
82
|
-
F: __dxlog_file,
|
|
83
|
-
L: 49,
|
|
84
|
-
S: this,
|
|
85
|
-
C: (f, a) => f(...a)
|
|
86
|
-
});
|
|
87
|
-
}
|
|
88
|
-
});
|
|
89
|
-
}
|
|
90
|
-
disconnect() {
|
|
91
|
-
}
|
|
92
|
-
async open() {
|
|
93
|
-
if (this._lifecycleState === LifecycleState.OPEN) {
|
|
94
|
-
return;
|
|
95
|
-
}
|
|
96
|
-
this._lifecycleState = LifecycleState.OPEN;
|
|
97
|
-
log("emit ready", void 0, {
|
|
98
|
-
F: __dxlog_file,
|
|
99
|
-
L: 65,
|
|
100
|
-
S: this,
|
|
101
|
-
C: (f, a) => f(...a)
|
|
102
|
-
});
|
|
103
|
-
this.emit("ready", {
|
|
104
|
-
network: this
|
|
105
|
-
});
|
|
106
|
-
}
|
|
107
|
-
async close() {
|
|
108
|
-
if (this._lifecycleState === LifecycleState.CLOSED) {
|
|
109
|
-
return this;
|
|
110
|
-
}
|
|
111
|
-
for (const replicator of this._replicators) {
|
|
112
|
-
await replicator.disconnect();
|
|
113
|
-
}
|
|
114
|
-
this._replicators.clear();
|
|
115
|
-
this._lifecycleState = LifecycleState.CLOSED;
|
|
116
|
-
}
|
|
117
|
-
async whenConnected() {
|
|
118
|
-
await this._connected.wait({
|
|
119
|
-
timeout: 1e4
|
|
120
|
-
});
|
|
121
|
-
}
|
|
122
|
-
async addReplicator(replicator) {
|
|
123
|
-
invariant(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
124
|
-
F: __dxlog_file,
|
|
125
|
-
L: 91,
|
|
126
|
-
S: this,
|
|
127
|
-
A: [
|
|
128
|
-
"this._lifecycleState === LifecycleState.OPEN",
|
|
129
|
-
""
|
|
130
|
-
]
|
|
131
|
-
});
|
|
132
|
-
invariant(this.peerId, void 0, {
|
|
133
|
-
F: __dxlog_file,
|
|
134
|
-
L: 92,
|
|
135
|
-
S: this,
|
|
136
|
-
A: [
|
|
137
|
-
"this.peerId",
|
|
138
|
-
""
|
|
139
|
-
]
|
|
140
|
-
});
|
|
141
|
-
invariant(!this._replicators.has(replicator), void 0, {
|
|
142
|
-
F: __dxlog_file,
|
|
143
|
-
L: 93,
|
|
144
|
-
S: this,
|
|
145
|
-
A: [
|
|
146
|
-
"!this._replicators.has(replicator)",
|
|
147
|
-
""
|
|
148
|
-
]
|
|
149
|
-
});
|
|
150
|
-
this._replicators.add(replicator);
|
|
151
|
-
await replicator.connect({
|
|
152
|
-
peerId: this.peerId,
|
|
153
|
-
onConnectionOpen: this._onConnectionOpen.bind(this),
|
|
154
|
-
onConnectionClosed: this._onConnectionClosed.bind(this),
|
|
155
|
-
onConnectionAuthScopeChanged: this._onConnectionAuthScopeChanged.bind(this),
|
|
156
|
-
getContainingSpaceForDocument: this._params.getContainingSpaceForDocument
|
|
157
|
-
});
|
|
158
|
-
}
|
|
159
|
-
async removeReplicator(replicator) {
|
|
160
|
-
invariant(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
161
|
-
F: __dxlog_file,
|
|
162
|
-
L: 107,
|
|
163
|
-
S: this,
|
|
164
|
-
A: [
|
|
165
|
-
"this._lifecycleState === LifecycleState.OPEN",
|
|
166
|
-
""
|
|
167
|
-
]
|
|
168
|
-
});
|
|
169
|
-
invariant(this._replicators.has(replicator), void 0, {
|
|
170
|
-
F: __dxlog_file,
|
|
171
|
-
L: 108,
|
|
172
|
-
S: this,
|
|
173
|
-
A: [
|
|
174
|
-
"this._replicators.has(replicator)",
|
|
175
|
-
""
|
|
176
|
-
]
|
|
177
|
-
});
|
|
178
|
-
await replicator.disconnect();
|
|
179
|
-
this._replicators.delete(replicator);
|
|
180
|
-
}
|
|
181
|
-
async shouldAdvertise(peerId, params) {
|
|
182
|
-
const connection = this._connections.get(peerId);
|
|
183
|
-
if (!connection) {
|
|
184
|
-
return false;
|
|
185
|
-
}
|
|
186
|
-
return connection.connection.shouldAdvertise(params);
|
|
187
|
-
}
|
|
188
|
-
_onConnectionOpen(connection) {
|
|
189
|
-
log("Connection opened", {
|
|
190
|
-
peerId: connection.peerId
|
|
191
|
-
}, {
|
|
192
|
-
F: __dxlog_file,
|
|
193
|
-
L: 123,
|
|
194
|
-
S: this,
|
|
195
|
-
C: (f, a) => f(...a)
|
|
196
|
-
});
|
|
197
|
-
invariant(!this._connections.has(connection.peerId), void 0, {
|
|
198
|
-
F: __dxlog_file,
|
|
199
|
-
L: 124,
|
|
200
|
-
S: this,
|
|
201
|
-
A: [
|
|
202
|
-
"!this._connections.has(connection.peerId as PeerId)",
|
|
203
|
-
""
|
|
204
|
-
]
|
|
205
|
-
});
|
|
206
|
-
const reader = connection.readable.getReader();
|
|
207
|
-
const writer = connection.writable.getWriter();
|
|
208
|
-
const connectionEntry = {
|
|
209
|
-
connection,
|
|
210
|
-
reader,
|
|
211
|
-
writer,
|
|
212
|
-
isOpen: true
|
|
213
|
-
};
|
|
214
|
-
this._connections.set(connection.peerId, connectionEntry);
|
|
215
|
-
queueMicrotask(async () => {
|
|
216
|
-
try {
|
|
217
|
-
while (true) {
|
|
218
|
-
const { done, value } = await reader.read();
|
|
219
|
-
if (done) {
|
|
220
|
-
break;
|
|
221
|
-
}
|
|
222
|
-
this.emit("message", value);
|
|
223
|
-
}
|
|
224
|
-
} catch (err) {
|
|
225
|
-
if (connectionEntry.isOpen) {
|
|
226
|
-
log.catch(err, void 0, {
|
|
227
|
-
F: __dxlog_file,
|
|
228
|
-
L: 143,
|
|
229
|
-
S: this,
|
|
230
|
-
C: (f, a) => f(...a)
|
|
231
|
-
});
|
|
232
|
-
}
|
|
233
|
-
}
|
|
234
|
-
});
|
|
235
|
-
log("emit peer-candidate", {
|
|
236
|
-
peerId: connection.peerId
|
|
237
|
-
}, {
|
|
238
|
-
F: __dxlog_file,
|
|
239
|
-
L: 148,
|
|
240
|
-
S: this,
|
|
241
|
-
C: (f, a) => f(...a)
|
|
242
|
-
});
|
|
243
|
-
this._emitPeerCandidate(connection);
|
|
244
|
-
}
|
|
245
|
-
/**
|
|
246
|
-
* Trigger doc-synchronizer shared documents set recalculation. Happens on peer-candidate.
|
|
247
|
-
* TODO(y): replace with a proper API call when sharePolicy update becomes supported by automerge-repo
|
|
248
|
-
*/
|
|
249
|
-
_onConnectionAuthScopeChanged(connection) {
|
|
250
|
-
log("Connection auth scope changed", {
|
|
251
|
-
peerId: connection.peerId
|
|
252
|
-
}, {
|
|
253
|
-
F: __dxlog_file,
|
|
254
|
-
L: 157,
|
|
255
|
-
S: this,
|
|
256
|
-
C: (f, a) => f(...a)
|
|
257
|
-
});
|
|
258
|
-
const entry = this._connections.get(connection.peerId);
|
|
259
|
-
invariant(entry, void 0, {
|
|
260
|
-
F: __dxlog_file,
|
|
261
|
-
L: 159,
|
|
262
|
-
S: this,
|
|
263
|
-
A: [
|
|
264
|
-
"entry",
|
|
265
|
-
""
|
|
266
|
-
]
|
|
267
|
-
});
|
|
268
|
-
this.emit("peer-disconnected", {
|
|
269
|
-
peerId: connection.peerId
|
|
270
|
-
});
|
|
271
|
-
this._emitPeerCandidate(connection);
|
|
272
|
-
}
|
|
273
|
-
_onConnectionClosed(connection) {
|
|
274
|
-
log("Connection closed", {
|
|
275
|
-
peerId: connection.peerId
|
|
276
|
-
}, {
|
|
277
|
-
F: __dxlog_file,
|
|
278
|
-
L: 165,
|
|
279
|
-
S: this,
|
|
280
|
-
C: (f, a) => f(...a)
|
|
281
|
-
});
|
|
282
|
-
const entry = this._connections.get(connection.peerId);
|
|
283
|
-
invariant(entry, void 0, {
|
|
284
|
-
F: __dxlog_file,
|
|
285
|
-
L: 167,
|
|
286
|
-
S: this,
|
|
287
|
-
A: [
|
|
288
|
-
"entry",
|
|
289
|
-
""
|
|
290
|
-
]
|
|
291
|
-
});
|
|
292
|
-
entry.isOpen = false;
|
|
293
|
-
this.emit("peer-disconnected", {
|
|
294
|
-
peerId: connection.peerId
|
|
295
|
-
});
|
|
296
|
-
void entry.reader.cancel().catch((err) => log.catch(err, void 0, {
|
|
297
|
-
F: __dxlog_file,
|
|
298
|
-
L: 172,
|
|
299
|
-
S: this,
|
|
300
|
-
C: (f, a) => f(...a)
|
|
301
|
-
}));
|
|
302
|
-
void entry.writer.abort().catch((err) => log.catch(err, void 0, {
|
|
303
|
-
F: __dxlog_file,
|
|
304
|
-
L: 173,
|
|
305
|
-
S: this,
|
|
306
|
-
C: (f, a) => f(...a)
|
|
307
|
-
}));
|
|
308
|
-
this._connections.delete(connection.peerId);
|
|
309
|
-
}
|
|
310
|
-
_emitPeerCandidate(connection) {
|
|
311
|
-
this.emit("peer-candidate", {
|
|
312
|
-
peerId: connection.peerId,
|
|
313
|
-
peerMetadata: createEchoPeerMetadata()
|
|
314
|
-
});
|
|
315
|
-
}
|
|
316
|
-
};
|
|
317
|
-
_ts_decorate([
|
|
318
|
-
synchronized
|
|
319
|
-
], EchoNetworkAdapter.prototype, "open", null);
|
|
320
|
-
_ts_decorate([
|
|
321
|
-
synchronized
|
|
322
|
-
], EchoNetworkAdapter.prototype, "close", null);
|
|
323
|
-
_ts_decorate([
|
|
324
|
-
synchronized
|
|
325
|
-
], EchoNetworkAdapter.prototype, "addReplicator", null);
|
|
326
|
-
_ts_decorate([
|
|
327
|
-
synchronized
|
|
328
|
-
], EchoNetworkAdapter.prototype, "removeReplicator", null);
|
|
329
|
-
var createEchoPeerMetadata = () => ({
|
|
330
|
-
// TODO(dmaretskyi): Refactor this.
|
|
331
|
-
dxos_peerSource: "EchoNetworkAdapter"
|
|
332
|
-
});
|
|
333
|
-
var isEchoPeerMetadata = (metadata) => metadata?.dxos_peerSource === "EchoNetworkAdapter";
|
|
334
|
-
|
|
335
|
-
// packages/core/echo/echo-pipeline/src/automerge/heads-store.ts
|
|
336
|
-
import { headsEncoding } from "@dxos/indexing";
|
|
337
|
-
var HeadsStore = class {
|
|
338
|
-
constructor({ db }) {
|
|
339
|
-
this._db = db;
|
|
340
|
-
}
|
|
341
|
-
setHeads(documentId, heads, batch) {
|
|
342
|
-
batch.put(documentId, heads, {
|
|
343
|
-
sublevel: this._db,
|
|
344
|
-
keyEncoding: "utf8",
|
|
345
|
-
valueEncoding: headsEncoding
|
|
346
|
-
});
|
|
347
|
-
}
|
|
348
|
-
async getHeads(documentId) {
|
|
349
|
-
try {
|
|
350
|
-
return await this._db.get(documentId, {
|
|
351
|
-
keyEncoding: "utf8",
|
|
352
|
-
valueEncoding: headsEncoding
|
|
353
|
-
});
|
|
354
|
-
} catch (err) {
|
|
355
|
-
if (err.notFound) {
|
|
356
|
-
return void 0;
|
|
357
|
-
}
|
|
358
|
-
throw err;
|
|
359
|
-
}
|
|
360
|
-
}
|
|
361
|
-
};
|
|
362
|
-
|
|
363
|
-
// packages/core/echo/echo-pipeline/src/automerge/leveldb-storage-adapter.ts
|
|
364
|
-
import { LifecycleState as LifecycleState2, Resource } from "@dxos/context";
|
|
365
|
-
var LevelDBStorageAdapter = class extends Resource {
|
|
366
|
-
constructor(_params) {
|
|
367
|
-
super();
|
|
368
|
-
this._params = _params;
|
|
369
|
-
}
|
|
370
|
-
async load(keyArray) {
|
|
371
|
-
try {
|
|
372
|
-
if (this._lifecycleState !== LifecycleState2.OPEN) {
|
|
373
|
-
return void 0;
|
|
374
|
-
}
|
|
375
|
-
return await this._params.db.get(keyArray, {
|
|
376
|
-
...encodingOptions
|
|
377
|
-
});
|
|
378
|
-
} catch (err) {
|
|
379
|
-
if (isLevelDbNotFoundError(err)) {
|
|
380
|
-
return void 0;
|
|
381
|
-
}
|
|
382
|
-
throw err;
|
|
383
|
-
}
|
|
384
|
-
}
|
|
385
|
-
async save(keyArray, binary) {
|
|
386
|
-
if (this._lifecycleState !== LifecycleState2.OPEN) {
|
|
387
|
-
return void 0;
|
|
388
|
-
}
|
|
389
|
-
const batch = this._params.db.batch();
|
|
390
|
-
await this._params.callbacks?.beforeSave?.({
|
|
391
|
-
path: keyArray,
|
|
392
|
-
batch
|
|
393
|
-
});
|
|
394
|
-
batch.put(keyArray, Buffer.from(binary), {
|
|
395
|
-
...encodingOptions
|
|
396
|
-
});
|
|
397
|
-
await batch.write();
|
|
398
|
-
await this._params.callbacks?.afterSave?.(keyArray);
|
|
399
|
-
}
|
|
400
|
-
async remove(keyArray) {
|
|
401
|
-
if (this._lifecycleState !== LifecycleState2.OPEN) {
|
|
402
|
-
return void 0;
|
|
403
|
-
}
|
|
404
|
-
await this._params.db.del(keyArray, {
|
|
405
|
-
...encodingOptions
|
|
406
|
-
});
|
|
407
|
-
}
|
|
408
|
-
async loadRange(keyPrefix) {
|
|
409
|
-
if (this._lifecycleState !== LifecycleState2.OPEN) {
|
|
410
|
-
return [];
|
|
411
|
-
}
|
|
412
|
-
const result = [];
|
|
413
|
-
for await (const [key, value] of this._params.db.iterator({
|
|
414
|
-
gte: keyPrefix,
|
|
415
|
-
lte: [
|
|
416
|
-
...keyPrefix,
|
|
417
|
-
"\uFFFF"
|
|
418
|
-
],
|
|
419
|
-
...encodingOptions
|
|
420
|
-
})) {
|
|
421
|
-
result.push({
|
|
422
|
-
key,
|
|
423
|
-
data: value
|
|
424
|
-
});
|
|
425
|
-
}
|
|
426
|
-
return result;
|
|
427
|
-
}
|
|
428
|
-
async removeRange(keyPrefix) {
|
|
429
|
-
if (this._lifecycleState !== LifecycleState2.OPEN) {
|
|
430
|
-
return void 0;
|
|
431
|
-
}
|
|
432
|
-
const batch = this._params.db.batch();
|
|
433
|
-
for await (const [key] of this._params.db.iterator({
|
|
434
|
-
gte: keyPrefix,
|
|
435
|
-
lte: [
|
|
436
|
-
...keyPrefix,
|
|
437
|
-
"\uFFFF"
|
|
438
|
-
],
|
|
439
|
-
...encodingOptions
|
|
440
|
-
})) {
|
|
441
|
-
batch.del(key, {
|
|
442
|
-
...encodingOptions
|
|
443
|
-
});
|
|
444
|
-
}
|
|
445
|
-
await batch.write();
|
|
446
|
-
}
|
|
447
|
-
};
|
|
448
|
-
var keyEncoder = {
|
|
449
|
-
encode: (key) => Buffer.from(key.map((k) => k.replaceAll("%", "%25").replaceAll("-", "%2D")).join("-")),
|
|
450
|
-
decode: (key) => Buffer.from(key).toString().split("-").map((k) => k.replaceAll("%2D", "-").replaceAll("%25", "%")),
|
|
451
|
-
format: "buffer"
|
|
452
|
-
};
|
|
453
|
-
var encodingOptions = {
|
|
454
|
-
keyEncoding: keyEncoder,
|
|
455
|
-
valueEncoding: "buffer"
|
|
456
|
-
};
|
|
457
|
-
var isLevelDbNotFoundError = (err) => err.code === "LEVEL_NOT_FOUND";
|
|
458
|
-
|
|
459
|
-
// packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts
|
|
460
|
-
function _ts_decorate2(decorators, target, key, desc) {
|
|
461
|
-
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
462
|
-
if (typeof Reflect === "object" && typeof Reflect.decorate === "function")
|
|
463
|
-
r = Reflect.decorate(decorators, target, key, desc);
|
|
464
|
-
else
|
|
465
|
-
for (var i = decorators.length - 1; i >= 0; i--)
|
|
466
|
-
if (d = decorators[i])
|
|
467
|
-
r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
468
|
-
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
469
|
-
}
|
|
470
|
-
var __dxlog_file2 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts";
|
|
471
|
-
var AutomergeHost = class extends Resource2 {
|
|
472
|
-
constructor({ db, indexMetadataStore }) {
|
|
473
|
-
super();
|
|
474
|
-
this._echoNetworkAdapter = new EchoNetworkAdapter({
|
|
475
|
-
getContainingSpaceForDocument: this._getContainingSpaceForDocument.bind(this)
|
|
476
|
-
});
|
|
477
|
-
this._db = db;
|
|
478
|
-
this._storage = new LevelDBStorageAdapter({
|
|
479
|
-
db: db.sublevel("automerge"),
|
|
480
|
-
callbacks: {
|
|
481
|
-
beforeSave: async (params) => this._beforeSave(params),
|
|
482
|
-
afterSave: async () => this._afterSave()
|
|
483
|
-
}
|
|
484
|
-
});
|
|
485
|
-
this._headsStore = new HeadsStore({
|
|
486
|
-
db: db.sublevel("heads")
|
|
487
|
-
});
|
|
488
|
-
this._indexMetadataStore = indexMetadataStore;
|
|
489
|
-
}
|
|
490
|
-
async _open() {
|
|
491
|
-
this._peerId = `host-${PublicKey.random().toHex()}`;
|
|
492
|
-
await this._storage.open?.();
|
|
493
|
-
this._repo = new Repo({
|
|
494
|
-
peerId: this._peerId,
|
|
495
|
-
sharePolicy: this._sharePolicy.bind(this),
|
|
496
|
-
storage: this._storage,
|
|
497
|
-
network: [
|
|
498
|
-
// Upstream swarm.
|
|
499
|
-
this._echoNetworkAdapter
|
|
500
|
-
]
|
|
501
|
-
});
|
|
502
|
-
await this._echoNetworkAdapter.open();
|
|
503
|
-
await this._echoNetworkAdapter.whenConnected();
|
|
504
|
-
}
|
|
505
|
-
async _close() {
|
|
506
|
-
await this._storage.close?.();
|
|
507
|
-
await this._echoNetworkAdapter.close();
|
|
508
|
-
await this._ctx.dispose();
|
|
509
|
-
}
|
|
510
|
-
/**
|
|
511
|
-
* @deprecated To be abstracted away.
|
|
512
|
-
*/
|
|
513
|
-
get repo() {
|
|
514
|
-
return this._repo;
|
|
515
|
-
}
|
|
516
|
-
get loadedDocsCount() {
|
|
517
|
-
return Object.keys(this._repo.handles).length;
|
|
518
|
-
}
|
|
519
|
-
async addReplicator(replicator) {
|
|
520
|
-
await this._echoNetworkAdapter.addReplicator(replicator);
|
|
521
|
-
}
|
|
522
|
-
async removeReplicator(replicator) {
|
|
523
|
-
await this._echoNetworkAdapter.removeReplicator(replicator);
|
|
524
|
-
}
|
|
525
|
-
/**
|
|
526
|
-
* Loads the document handle from the repo and waits for it to be ready.
|
|
527
|
-
*/
|
|
528
|
-
async loadDoc(ctx, documentId, opts) {
|
|
529
|
-
let handle;
|
|
530
|
-
if (typeof documentId === "string") {
|
|
531
|
-
handle = this._repo.handles[documentId];
|
|
532
|
-
}
|
|
533
|
-
if (!handle) {
|
|
534
|
-
handle = this._repo.find(documentId);
|
|
535
|
-
}
|
|
536
|
-
if (!handle.isReady()) {
|
|
537
|
-
if (!opts?.timeout) {
|
|
538
|
-
await cancelWithContext(ctx, handle.whenReady());
|
|
539
|
-
} else {
|
|
540
|
-
await cancelWithContext(ctx, asyncTimeout(handle.whenReady(), opts.timeout));
|
|
541
|
-
}
|
|
542
|
-
}
|
|
543
|
-
return handle;
|
|
544
|
-
}
|
|
545
|
-
/**
|
|
546
|
-
* Create new persisted document.
|
|
547
|
-
*/
|
|
548
|
-
createDoc(initialValue, opts) {
|
|
549
|
-
if (opts?.preserveHistory) {
|
|
550
|
-
if (!isAutomerge(initialValue)) {
|
|
551
|
-
throw new TypeError("Initial value must be an Automerge document");
|
|
552
|
-
}
|
|
553
|
-
return this._repo.import(save(initialValue));
|
|
554
|
-
} else {
|
|
555
|
-
return this._repo.create(initialValue);
|
|
556
|
-
}
|
|
557
|
-
}
|
|
558
|
-
async waitUntilHeadsReplicated(heads) {
|
|
559
|
-
await Promise.all(heads.entries?.map(async ({ documentId, heads: heads2 }) => {
|
|
560
|
-
if (!heads2 || heads2.length === 0) {
|
|
561
|
-
return;
|
|
562
|
-
}
|
|
563
|
-
const currentHeads = this.getHeads(documentId);
|
|
564
|
-
if (currentHeads !== null && headsEquals(currentHeads, heads2)) {
|
|
565
|
-
return;
|
|
566
|
-
}
|
|
567
|
-
const handle = await this.loadDoc(Context.default(void 0, {
|
|
568
|
-
F: __dxlog_file2,
|
|
569
|
-
L: 189
|
|
570
|
-
}), documentId);
|
|
571
|
-
await waitForHeads(handle, heads2);
|
|
572
|
-
}) ?? []);
|
|
573
|
-
await this._repo.flush(heads.entries?.map((entry) => entry.documentId) ?? []);
|
|
574
|
-
}
|
|
575
|
-
async reIndexHeads(documentIds) {
|
|
576
|
-
for (const documentId of documentIds) {
|
|
577
|
-
log2.info("re-indexing heads for document", {
|
|
578
|
-
documentId
|
|
579
|
-
}, {
|
|
580
|
-
F: __dxlog_file2,
|
|
581
|
-
L: 200,
|
|
582
|
-
S: this,
|
|
583
|
-
C: (f, a) => f(...a)
|
|
584
|
-
});
|
|
585
|
-
const handle = this._repo.find(documentId);
|
|
586
|
-
await handle.whenReady([
|
|
587
|
-
"ready",
|
|
588
|
-
"requesting"
|
|
589
|
-
]);
|
|
590
|
-
if (handle.inState([
|
|
591
|
-
"requesting"
|
|
592
|
-
])) {
|
|
593
|
-
log2.warn("document is not available locally, skipping", {
|
|
594
|
-
documentId
|
|
595
|
-
}, {
|
|
596
|
-
F: __dxlog_file2,
|
|
597
|
-
L: 204,
|
|
598
|
-
S: this,
|
|
599
|
-
C: (f, a) => f(...a)
|
|
600
|
-
});
|
|
601
|
-
continue;
|
|
602
|
-
}
|
|
603
|
-
const doc = handle.docSync();
|
|
604
|
-
invariant2(doc, void 0, {
|
|
605
|
-
F: __dxlog_file2,
|
|
606
|
-
L: 209,
|
|
607
|
-
S: this,
|
|
608
|
-
A: [
|
|
609
|
-
"doc",
|
|
610
|
-
""
|
|
611
|
-
]
|
|
612
|
-
});
|
|
613
|
-
const heads = getHeads(doc);
|
|
614
|
-
const batch = this._db.batch();
|
|
615
|
-
this._headsStore.setHeads(documentId, heads, batch);
|
|
616
|
-
await batch.write();
|
|
617
|
-
}
|
|
618
|
-
log2.info("done re-indexing heads", void 0, {
|
|
619
|
-
F: __dxlog_file2,
|
|
620
|
-
L: 216,
|
|
621
|
-
S: this,
|
|
622
|
-
C: (f, a) => f(...a)
|
|
623
|
-
});
|
|
624
|
-
}
|
|
625
|
-
// TODO(dmaretskyi): Share based on HALO permissions and space affinity.
|
|
626
|
-
// Hosts, running in the worker, don't share documents unless requested by other peers.
|
|
627
|
-
// NOTE: If both peers return sharePolicy=false the replication will not happen
|
|
628
|
-
// https://github.com/automerge/automerge-repo/pull/292
|
|
629
|
-
async _sharePolicy(peerId, documentId) {
|
|
630
|
-
if (peerId.startsWith("client-")) {
|
|
631
|
-
return false;
|
|
632
|
-
}
|
|
633
|
-
if (!documentId) {
|
|
634
|
-
return false;
|
|
635
|
-
}
|
|
636
|
-
const peerMetadata = this.repo.peerMetadataByPeerId[peerId];
|
|
637
|
-
if (isEchoPeerMetadata(peerMetadata)) {
|
|
638
|
-
return this._echoNetworkAdapter.shouldAdvertise(peerId, {
|
|
639
|
-
documentId
|
|
640
|
-
});
|
|
641
|
-
}
|
|
642
|
-
return false;
|
|
643
|
-
}
|
|
644
|
-
async _beforeSave({ path, batch }) {
|
|
645
|
-
const handle = this._repo.handles[path[0]];
|
|
646
|
-
if (!handle) {
|
|
647
|
-
return;
|
|
648
|
-
}
|
|
649
|
-
const doc = handle.docSync();
|
|
650
|
-
if (!doc) {
|
|
651
|
-
return;
|
|
652
|
-
}
|
|
653
|
-
const spaceKey = getSpaceKeyFromDoc(doc) ?? void 0;
|
|
654
|
-
const heads = getHeads(doc);
|
|
655
|
-
this._headsStore.setHeads(handle.documentId, heads, batch);
|
|
656
|
-
const objectIds = Object.keys(doc.objects ?? {});
|
|
657
|
-
const encodedIds = objectIds.map((objectId) => objectPointerCodec.encode({
|
|
658
|
-
documentId: handle.documentId,
|
|
659
|
-
objectId,
|
|
660
|
-
spaceKey
|
|
661
|
-
}));
|
|
662
|
-
const idToLastHash = new Map(encodedIds.map((id) => [
|
|
663
|
-
id,
|
|
664
|
-
heads
|
|
665
|
-
]));
|
|
666
|
-
this._indexMetadataStore.markDirty(idToLastHash, batch);
|
|
667
|
-
}
|
|
668
|
-
/**
|
|
669
|
-
* Called by AutomergeStorageAdapter after levelDB batch commit.
|
|
670
|
-
*/
|
|
671
|
-
async _afterSave() {
|
|
672
|
-
this._indexMetadataStore.notifyMarkedDirty();
|
|
673
|
-
}
|
|
674
|
-
_automergeDocs() {
|
|
675
|
-
return mapValues(this._repo.handles, (handle) => ({
|
|
676
|
-
state: handle.state,
|
|
677
|
-
hasDoc: !!handle.docSync(),
|
|
678
|
-
heads: handle.docSync() ? automerge.getHeads(handle.docSync()) : null,
|
|
679
|
-
data: handle.docSync() && mapValues(handle.docSync(), (value, key) => {
|
|
680
|
-
try {
|
|
681
|
-
switch (key) {
|
|
682
|
-
case "access":
|
|
683
|
-
case "links":
|
|
684
|
-
return value;
|
|
685
|
-
case "objects":
|
|
686
|
-
return Object.keys(value);
|
|
687
|
-
default:
|
|
688
|
-
return `${value}`;
|
|
689
|
-
}
|
|
690
|
-
} catch (err) {
|
|
691
|
-
return `${err}`;
|
|
692
|
-
}
|
|
693
|
-
})
|
|
694
|
-
}));
|
|
695
|
-
}
|
|
696
|
-
_automergePeers() {
|
|
697
|
-
return this._repo.peers;
|
|
698
|
-
}
|
|
699
|
-
async _getContainingSpaceForDocument(documentId) {
|
|
700
|
-
const doc = this._repo.handles[documentId]?.docSync();
|
|
701
|
-
if (!doc) {
|
|
702
|
-
return null;
|
|
703
|
-
}
|
|
704
|
-
const spaceKeyHex = getSpaceKeyFromDoc(doc);
|
|
705
|
-
if (!spaceKeyHex) {
|
|
706
|
-
return null;
|
|
707
|
-
}
|
|
708
|
-
return PublicKey.from(spaceKeyHex);
|
|
709
|
-
}
|
|
710
|
-
/**
|
|
711
|
-
* Flush documents to disk.
|
|
712
|
-
*/
|
|
713
|
-
async flush({ documentIds } = {}) {
|
|
714
|
-
await this._repo.flush(documentIds);
|
|
715
|
-
}
|
|
716
|
-
async getHeads(documentId) {
|
|
717
|
-
const handle = this._repo.handles[documentId];
|
|
718
|
-
if (handle) {
|
|
719
|
-
const doc = handle.docSync();
|
|
720
|
-
if (!doc) {
|
|
721
|
-
return void 0;
|
|
722
|
-
}
|
|
723
|
-
return getHeads(doc);
|
|
724
|
-
} else {
|
|
725
|
-
return this._headsStore.getHeads(documentId);
|
|
726
|
-
}
|
|
727
|
-
}
|
|
728
|
-
};
|
|
729
|
-
_ts_decorate2([
|
|
730
|
-
trace.info()
|
|
731
|
-
], AutomergeHost.prototype, "_peerId", void 0);
|
|
732
|
-
_ts_decorate2([
|
|
733
|
-
trace.info({
|
|
734
|
-
depth: null
|
|
735
|
-
})
|
|
736
|
-
], AutomergeHost.prototype, "_automergeDocs", null);
|
|
737
|
-
_ts_decorate2([
|
|
738
|
-
trace.info({
|
|
739
|
-
depth: null
|
|
740
|
-
})
|
|
741
|
-
], AutomergeHost.prototype, "_automergePeers", null);
|
|
742
|
-
_ts_decorate2([
|
|
743
|
-
trace.span({
|
|
744
|
-
showInBrowserTimeline: true
|
|
745
|
-
})
|
|
746
|
-
], AutomergeHost.prototype, "flush", null);
|
|
747
|
-
AutomergeHost = _ts_decorate2([
|
|
748
|
-
trace.resource()
|
|
749
|
-
], AutomergeHost);
|
|
750
|
-
var getSpaceKeyFromDoc = (doc) => {
|
|
751
|
-
const rawSpaceKey = doc.access?.spaceKey ?? doc.experimental_spaceKey;
|
|
752
|
-
if (rawSpaceKey == null) {
|
|
753
|
-
return null;
|
|
754
|
-
}
|
|
755
|
-
return String(rawSpaceKey);
|
|
756
|
-
};
|
|
757
|
-
var waitForHeads = async (handle, heads) => {
|
|
758
|
-
const unavailableHeads = new Set(heads);
|
|
759
|
-
await handle.whenReady();
|
|
760
|
-
await Event.wrap(handle, "change").waitForCondition(() => {
|
|
761
|
-
for (const changeHash of unavailableHeads.values()) {
|
|
762
|
-
if (changeIsPresentInDoc(handle.docSync(), changeHash)) {
|
|
763
|
-
unavailableHeads.delete(changeHash);
|
|
764
|
-
}
|
|
765
|
-
}
|
|
766
|
-
return unavailableHeads.size === 0;
|
|
767
|
-
});
|
|
768
|
-
};
|
|
769
|
-
var changeIsPresentInDoc = (doc, changeHash) => {
|
|
770
|
-
return !!getBackend(doc).getChangeByHash(changeHash);
|
|
771
|
-
};
|
|
772
|
-
|
|
773
|
-
// packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts
|
|
774
|
-
import { invariant as invariant4 } from "@dxos/invariant";
|
|
775
|
-
import { PublicKey as PublicKey2 } from "@dxos/keys";
|
|
776
|
-
import { log as log4 } from "@dxos/log";
|
|
777
|
-
import { ComplexMap, ComplexSet, defaultMap } from "@dxos/util";
|
|
778
|
-
|
|
779
|
-
// packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator-connection.ts
|
|
780
|
-
import { cbor } from "@dxos/automerge/automerge-repo";
|
|
781
|
-
import { Resource as Resource3 } from "@dxos/context";
|
|
782
|
-
import { invariant as invariant3 } from "@dxos/invariant";
|
|
783
|
-
import { log as log3 } from "@dxos/log";
|
|
784
|
-
import { AutomergeReplicator } from "@dxos/teleport-extension-automerge-replicator";
|
|
785
|
-
var __dxlog_file3 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator-connection.ts";
|
|
786
|
-
var DEFAULT_FACTORY = (params) => new AutomergeReplicator(...params);
|
|
787
|
-
var MeshReplicatorConnection = class extends Resource3 {
|
|
788
|
-
constructor(_params) {
|
|
789
|
-
super();
|
|
790
|
-
this._params = _params;
|
|
791
|
-
this.remoteDeviceKey = null;
|
|
792
|
-
this._remotePeerId = null;
|
|
793
|
-
this._isEnabled = false;
|
|
794
|
-
let readableStreamController;
|
|
795
|
-
this.readable = new ReadableStream({
|
|
796
|
-
start: (controller) => {
|
|
797
|
-
readableStreamController = controller;
|
|
798
|
-
this._ctx.onDispose(() => controller.close());
|
|
799
|
-
}
|
|
800
|
-
});
|
|
801
|
-
this.writable = new WritableStream({
|
|
802
|
-
write: async (message, controller) => {
|
|
803
|
-
invariant3(this._isEnabled, "Writing to a disabled connection", {
|
|
804
|
-
F: __dxlog_file3,
|
|
805
|
-
L: 47,
|
|
806
|
-
S: this,
|
|
807
|
-
A: [
|
|
808
|
-
"this._isEnabled",
|
|
809
|
-
"'Writing to a disabled connection'"
|
|
810
|
-
]
|
|
811
|
-
});
|
|
812
|
-
try {
|
|
813
|
-
await this.replicatorExtension.sendSyncMessage({
|
|
814
|
-
payload: cbor.encode(message)
|
|
815
|
-
});
|
|
816
|
-
} catch (err) {
|
|
817
|
-
controller.error(err);
|
|
818
|
-
this._disconnectIfEnabled();
|
|
819
|
-
}
|
|
820
|
-
}
|
|
821
|
-
});
|
|
822
|
-
const createAutomergeReplicator = this._params.replicatorFactory ?? DEFAULT_FACTORY;
|
|
823
|
-
this.replicatorExtension = createAutomergeReplicator([
|
|
824
|
-
{
|
|
825
|
-
peerId: this._params.ownPeerId
|
|
826
|
-
},
|
|
827
|
-
{
|
|
828
|
-
onStartReplication: async (info, remotePeerId) => {
|
|
829
|
-
this.remoteDeviceKey = remotePeerId;
|
|
830
|
-
this._remotePeerId = info.id;
|
|
831
|
-
log3("onStartReplication", {
|
|
832
|
-
id: info.id,
|
|
833
|
-
thisPeerId: this.peerId,
|
|
834
|
-
remotePeerId: remotePeerId.toHex()
|
|
835
|
-
}, {
|
|
836
|
-
F: __dxlog_file3,
|
|
837
|
-
L: 81,
|
|
838
|
-
S: this,
|
|
839
|
-
C: (f, a) => f(...a)
|
|
840
|
-
});
|
|
841
|
-
this._params.onRemoteConnected();
|
|
842
|
-
},
|
|
843
|
-
onSyncMessage: async ({ payload }) => {
|
|
844
|
-
if (!this._isEnabled) {
|
|
845
|
-
return;
|
|
846
|
-
}
|
|
847
|
-
const message = cbor.decode(payload);
|
|
848
|
-
readableStreamController.enqueue(message);
|
|
849
|
-
},
|
|
850
|
-
onClose: async () => {
|
|
851
|
-
this._disconnectIfEnabled();
|
|
852
|
-
}
|
|
853
|
-
}
|
|
854
|
-
]);
|
|
855
|
-
}
|
|
856
|
-
_disconnectIfEnabled() {
|
|
857
|
-
if (this._isEnabled) {
|
|
858
|
-
this._params.onRemoteDisconnected();
|
|
859
|
-
}
|
|
860
|
-
}
|
|
861
|
-
get peerId() {
|
|
862
|
-
invariant3(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
863
|
-
F: __dxlog_file3,
|
|
864
|
-
L: 107,
|
|
865
|
-
S: this,
|
|
866
|
-
A: [
|
|
867
|
-
"this._remotePeerId != null",
|
|
868
|
-
"'Remote peer has not connected yet.'"
|
|
869
|
-
]
|
|
870
|
-
});
|
|
871
|
-
return this._remotePeerId;
|
|
872
|
-
}
|
|
873
|
-
async shouldAdvertise(params) {
|
|
874
|
-
return this._params.shouldAdvertise(params);
|
|
875
|
-
}
|
|
876
|
-
/**
|
|
877
|
-
* Start exchanging messages with the remote peer.
|
|
878
|
-
* Call after the remote peer has connected.
|
|
879
|
-
*/
|
|
880
|
-
enable() {
|
|
881
|
-
invariant3(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
882
|
-
F: __dxlog_file3,
|
|
883
|
-
L: 120,
|
|
884
|
-
S: this,
|
|
885
|
-
A: [
|
|
886
|
-
"this._remotePeerId != null",
|
|
887
|
-
"'Remote peer has not connected yet.'"
|
|
888
|
-
]
|
|
889
|
-
});
|
|
890
|
-
this._isEnabled = true;
|
|
891
|
-
}
|
|
892
|
-
/**
|
|
893
|
-
* Stop exchanging messages with the remote peer.
|
|
894
|
-
*/
|
|
895
|
-
disable() {
|
|
896
|
-
this._isEnabled = false;
|
|
897
|
-
}
|
|
898
|
-
};
|
|
899
|
-
|
|
900
|
-
// packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts
|
|
901
|
-
var __dxlog_file4 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts";
|
|
902
|
-
var MeshEchoReplicator = class {
|
|
903
|
-
constructor() {
|
|
904
|
-
this._connections = /* @__PURE__ */ new Set();
|
|
905
|
-
/**
|
|
906
|
-
* Using automerge peerId as a key.
|
|
907
|
-
*/
|
|
908
|
-
this._connectionsPerPeer = /* @__PURE__ */ new Map();
|
|
909
|
-
/**
|
|
910
|
-
* spaceKey -> deviceKey[]
|
|
911
|
-
*/
|
|
912
|
-
this._authorizedDevices = new ComplexMap(PublicKey2.hash);
|
|
913
|
-
this._context = null;
|
|
914
|
-
}
|
|
915
|
-
async connect(context) {
|
|
916
|
-
this._context = context;
|
|
917
|
-
}
|
|
918
|
-
async disconnect() {
|
|
919
|
-
for (const connection of this._connectionsPerPeer.values()) {
|
|
920
|
-
this._context?.onConnectionClosed(connection);
|
|
921
|
-
}
|
|
922
|
-
for (const connection of this._connections) {
|
|
923
|
-
await connection.close();
|
|
924
|
-
}
|
|
925
|
-
this._connections.clear();
|
|
926
|
-
this._connectionsPerPeer.clear();
|
|
927
|
-
this._context = null;
|
|
928
|
-
}
|
|
929
|
-
createExtension(extensionFactory) {
|
|
930
|
-
invariant4(this._context, void 0, {
|
|
931
|
-
F: __dxlog_file4,
|
|
932
|
-
L: 54,
|
|
933
|
-
S: this,
|
|
934
|
-
A: [
|
|
935
|
-
"this._context",
|
|
936
|
-
""
|
|
937
|
-
]
|
|
938
|
-
});
|
|
939
|
-
const connection = new MeshReplicatorConnection({
|
|
940
|
-
ownPeerId: this._context.peerId,
|
|
941
|
-
replicatorFactory: extensionFactory,
|
|
942
|
-
onRemoteConnected: async () => {
|
|
943
|
-
log4("onRemoteConnected", {
|
|
944
|
-
peerId: connection.peerId
|
|
945
|
-
}, {
|
|
946
|
-
F: __dxlog_file4,
|
|
947
|
-
L: 60,
|
|
948
|
-
S: this,
|
|
949
|
-
C: (f, a) => f(...a)
|
|
950
|
-
});
|
|
951
|
-
invariant4(this._context, void 0, {
|
|
952
|
-
F: __dxlog_file4,
|
|
953
|
-
L: 61,
|
|
954
|
-
S: this,
|
|
955
|
-
A: [
|
|
956
|
-
"this._context",
|
|
957
|
-
""
|
|
958
|
-
]
|
|
959
|
-
});
|
|
960
|
-
if (this._connectionsPerPeer.has(connection.peerId)) {
|
|
961
|
-
this._context.onConnectionAuthScopeChanged(connection);
|
|
962
|
-
} else {
|
|
963
|
-
this._connectionsPerPeer.set(connection.peerId, connection);
|
|
964
|
-
this._context.onConnectionOpen(connection);
|
|
965
|
-
connection.enable();
|
|
966
|
-
}
|
|
967
|
-
},
|
|
968
|
-
onRemoteDisconnected: async () => {
|
|
969
|
-
log4("onRemoteDisconnected", {
|
|
970
|
-
peerId: connection.peerId
|
|
971
|
-
}, {
|
|
972
|
-
F: __dxlog_file4,
|
|
973
|
-
L: 72,
|
|
974
|
-
S: this,
|
|
975
|
-
C: (f, a) => f(...a)
|
|
976
|
-
});
|
|
977
|
-
this._context?.onConnectionClosed(connection);
|
|
978
|
-
this._connectionsPerPeer.delete(connection.peerId);
|
|
979
|
-
connection.disable();
|
|
980
|
-
this._connections.delete(connection);
|
|
981
|
-
},
|
|
982
|
-
shouldAdvertise: async (params) => {
|
|
983
|
-
log4("shouldAdvertise", {
|
|
984
|
-
peerId: connection.peerId,
|
|
985
|
-
documentId: params.documentId
|
|
986
|
-
}, {
|
|
987
|
-
F: __dxlog_file4,
|
|
988
|
-
L: 79,
|
|
989
|
-
S: this,
|
|
990
|
-
C: (f, a) => f(...a)
|
|
991
|
-
});
|
|
992
|
-
invariant4(this._context, void 0, {
|
|
993
|
-
F: __dxlog_file4,
|
|
994
|
-
L: 80,
|
|
995
|
-
S: this,
|
|
996
|
-
A: [
|
|
997
|
-
"this._context",
|
|
998
|
-
""
|
|
999
|
-
]
|
|
1000
|
-
});
|
|
1001
|
-
try {
|
|
1002
|
-
const spaceKey = await this._context.getContainingSpaceForDocument(params.documentId);
|
|
1003
|
-
if (!spaceKey) {
|
|
1004
|
-
log4("space key not found for share policy check", {
|
|
1005
|
-
peerId: connection.peerId,
|
|
1006
|
-
documentId: params.documentId
|
|
1007
|
-
}, {
|
|
1008
|
-
F: __dxlog_file4,
|
|
1009
|
-
L: 84,
|
|
1010
|
-
S: this,
|
|
1011
|
-
C: (f, a) => f(...a)
|
|
1012
|
-
});
|
|
1013
|
-
return false;
|
|
1014
|
-
}
|
|
1015
|
-
const authorizedDevices = this._authorizedDevices.get(spaceKey);
|
|
1016
|
-
if (!connection.remoteDeviceKey) {
|
|
1017
|
-
log4("device key not found for share policy check", {
|
|
1018
|
-
peerId: connection.peerId,
|
|
1019
|
-
documentId: params.documentId
|
|
1020
|
-
}, {
|
|
1021
|
-
F: __dxlog_file4,
|
|
1022
|
-
L: 94,
|
|
1023
|
-
S: this,
|
|
1024
|
-
C: (f, a) => f(...a)
|
|
1025
|
-
});
|
|
1026
|
-
return false;
|
|
1027
|
-
}
|
|
1028
|
-
const isAuthorized = authorizedDevices?.has(connection.remoteDeviceKey) ?? false;
|
|
1029
|
-
log4("share policy check", {
|
|
1030
|
-
localPeer: this._context.peerId,
|
|
1031
|
-
remotePeer: connection.peerId,
|
|
1032
|
-
documentId: params.documentId,
|
|
1033
|
-
deviceKey: connection.remoteDeviceKey,
|
|
1034
|
-
spaceKey,
|
|
1035
|
-
isAuthorized
|
|
1036
|
-
}, {
|
|
1037
|
-
F: __dxlog_file4,
|
|
1038
|
-
L: 102,
|
|
1039
|
-
S: this,
|
|
1040
|
-
C: (f, a) => f(...a)
|
|
1041
|
-
});
|
|
1042
|
-
return isAuthorized;
|
|
1043
|
-
} catch (err) {
|
|
1044
|
-
log4.catch(err, void 0, {
|
|
1045
|
-
F: __dxlog_file4,
|
|
1046
|
-
L: 112,
|
|
1047
|
-
S: this,
|
|
1048
|
-
C: (f, a) => f(...a)
|
|
1049
|
-
});
|
|
1050
|
-
return false;
|
|
1051
|
-
}
|
|
1052
|
-
}
|
|
1053
|
-
});
|
|
1054
|
-
this._connections.add(connection);
|
|
1055
|
-
return connection.replicatorExtension;
|
|
1056
|
-
}
|
|
1057
|
-
authorizeDevice(spaceKey, deviceKey) {
|
|
1058
|
-
log4("authorizeDevice", {
|
|
1059
|
-
spaceKey,
|
|
1060
|
-
deviceKey
|
|
1061
|
-
}, {
|
|
1062
|
-
F: __dxlog_file4,
|
|
1063
|
-
L: 123,
|
|
1064
|
-
S: this,
|
|
1065
|
-
C: (f, a) => f(...a)
|
|
1066
|
-
});
|
|
1067
|
-
defaultMap(this._authorizedDevices, spaceKey, () => new ComplexSet(PublicKey2.hash)).add(deviceKey);
|
|
1068
|
-
for (const connection of this._connections) {
|
|
1069
|
-
if (connection.remoteDeviceKey && connection.remoteDeviceKey.equals(deviceKey)) {
|
|
1070
|
-
if (this._connectionsPerPeer.has(connection.peerId)) {
|
|
1071
|
-
this._context?.onConnectionAuthScopeChanged(connection);
|
|
1072
|
-
}
|
|
1073
|
-
}
|
|
1074
|
-
}
|
|
1075
|
-
}
|
|
1076
|
-
};
|
|
36
|
+
} from "./chunk-6MJEONOX.mjs";
|
|
1077
37
|
export {
|
|
1078
38
|
AuthExtension,
|
|
1079
39
|
AuthStatus,
|
|
@@ -1098,7 +58,10 @@ export {
|
|
|
1098
58
|
codec,
|
|
1099
59
|
createIdFromSpaceKey,
|
|
1100
60
|
createMappedFeedWriter,
|
|
61
|
+
deriveCollectionIdFromSpaceId,
|
|
62
|
+
diffCollectionState,
|
|
1101
63
|
encodingOptions,
|
|
64
|
+
getSpaceIdFromCollectionId,
|
|
1102
65
|
getSpaceKeyFromDoc,
|
|
1103
66
|
hasInvitationExpired,
|
|
1104
67
|
mapFeedIndexesToTimeframe,
|