@dxos/echo-pipeline 0.6.2 → 0.6.3-main.0308ae2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/browser/{chunk-UJQ5VS5V.mjs → chunk-6MJEONOX.mjs} +2569 -1066
- package/dist/lib/browser/chunk-6MJEONOX.mjs.map +7 -0
- package/dist/lib/browser/index.mjs +12 -1049
- package/dist/lib/browser/index.mjs.map +4 -4
- package/dist/lib/browser/meta.json +1 -1
- package/dist/lib/browser/testing/index.mjs +224 -2
- package/dist/lib/browser/testing/index.mjs.map +4 -4
- package/dist/lib/node/{chunk-RH6TDRML.cjs → chunk-PT5LWMPA.cjs} +3185 -1710
- package/dist/lib/node/chunk-PT5LWMPA.cjs.map +7 -0
- package/dist/lib/node/index.cjs +37 -1056
- package/dist/lib/node/index.cjs.map +4 -4
- package/dist/lib/node/meta.json +1 -1
- package/dist/lib/node/testing/index.cjs +238 -13
- package/dist/lib/node/testing/index.cjs.map +4 -4
- package/dist/types/src/automerge/automerge-host.d.ts +29 -2
- package/dist/types/src/automerge/automerge-host.d.ts.map +1 -1
- package/dist/types/src/automerge/collection-synchronizer.d.ts +61 -0
- package/dist/types/src/automerge/collection-synchronizer.d.ts.map +1 -0
- package/dist/types/src/automerge/collection-synchronizer.test.d.ts +2 -0
- package/dist/types/src/automerge/collection-synchronizer.test.d.ts.map +1 -0
- package/dist/types/src/automerge/echo-network-adapter.d.ts +9 -2
- package/dist/types/src/automerge/echo-network-adapter.d.ts.map +1 -1
- package/dist/types/src/automerge/echo-replicator.d.ts +7 -0
- package/dist/types/src/automerge/echo-replicator.d.ts.map +1 -1
- package/dist/types/src/automerge/heads-store.d.ts +1 -1
- package/dist/types/src/automerge/heads-store.d.ts.map +1 -1
- package/dist/types/src/automerge/index.d.ts +2 -0
- package/dist/types/src/automerge/index.d.ts.map +1 -1
- package/dist/types/src/automerge/mesh-echo-replicator-connection.d.ts +3 -1
- package/dist/types/src/automerge/mesh-echo-replicator-connection.d.ts.map +1 -1
- package/dist/types/src/automerge/mesh-echo-replicator.d.ts +2 -2
- package/dist/types/src/automerge/mesh-echo-replicator.d.ts.map +1 -1
- package/dist/types/src/automerge/network-protocol.d.ts +31 -0
- package/dist/types/src/automerge/network-protocol.d.ts.map +1 -0
- package/dist/types/src/automerge/space-collection.d.ts +4 -0
- package/dist/types/src/automerge/space-collection.d.ts.map +1 -0
- package/dist/types/src/db-host/data-service.d.ts +2 -1
- package/dist/types/src/db-host/data-service.d.ts.map +1 -1
- package/dist/types/src/db-host/documents-synchronizer.d.ts +1 -1
- package/dist/types/src/db-host/documents-synchronizer.d.ts.map +1 -1
- package/dist/types/src/testing/index.d.ts +1 -0
- package/dist/types/src/testing/index.d.ts.map +1 -1
- package/dist/types/src/testing/test-replicator.d.ts +46 -0
- package/dist/types/src/testing/test-replicator.d.ts.map +1 -0
- package/package.json +33 -33
- package/src/automerge/automerge-host.test.ts +76 -14
- package/src/automerge/automerge-host.ts +219 -32
- package/src/automerge/automerge-repo.test.ts +2 -1
- package/src/automerge/collection-synchronizer.test.ts +91 -0
- package/src/automerge/collection-synchronizer.ts +204 -0
- package/src/automerge/echo-network-adapter.test.ts +5 -1
- package/src/automerge/echo-network-adapter.ts +69 -4
- package/src/automerge/echo-replicator.ts +9 -0
- package/src/automerge/heads-store.ts +6 -9
- package/src/automerge/index.ts +2 -0
- package/src/automerge/mesh-echo-replicator-connection.ts +6 -1
- package/src/automerge/mesh-echo-replicator.ts +28 -7
- package/src/automerge/network-protocol.ts +45 -0
- package/src/automerge/space-collection.ts +14 -0
- package/src/db-host/data-service.ts +26 -12
- package/src/db-host/documents-synchronizer.ts +17 -5
- package/src/metadata/metadata-store.ts +1 -1
- package/src/testing/index.ts +1 -0
- package/src/testing/test-replicator.ts +194 -0
- package/dist/lib/browser/chunk-UJQ5VS5V.mjs.map +0 -7
- package/dist/lib/node/chunk-RH6TDRML.cjs.map +0 -7
package/dist/lib/node/index.cjs
CHANGED
|
@@ -18,1064 +18,42 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
18
18
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
19
|
var node_exports = {};
|
|
20
20
|
__export(node_exports, {
|
|
21
|
-
AuthExtension: () =>
|
|
22
|
-
AuthStatus: () =>
|
|
23
|
-
AutomergeHost: () => AutomergeHost,
|
|
24
|
-
CredentialRetrieverExtension: () =>
|
|
25
|
-
CredentialServerExtension: () =>
|
|
26
|
-
DataServiceImpl: () =>
|
|
27
|
-
DocumentsSynchronizer: () =>
|
|
28
|
-
LevelDBStorageAdapter: () => LevelDBStorageAdapter,
|
|
29
|
-
MOCK_AUTH_PROVIDER: () =>
|
|
30
|
-
MOCK_AUTH_VERIFIER: () =>
|
|
31
|
-
MeshEchoReplicator: () => MeshEchoReplicator,
|
|
32
|
-
MetadataStore: () =>
|
|
33
|
-
Pipeline: () =>
|
|
34
|
-
SnapshotManager: () =>
|
|
35
|
-
SnapshotStore: () =>
|
|
36
|
-
Space: () =>
|
|
37
|
-
SpaceManager: () =>
|
|
38
|
-
SpaceProtocol: () =>
|
|
39
|
-
SpaceProtocolSession: () =>
|
|
40
|
-
TimeframeClock: () =>
|
|
41
|
-
codec: () =>
|
|
42
|
-
createIdFromSpaceKey: () =>
|
|
43
|
-
createMappedFeedWriter: () =>
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
21
|
+
AuthExtension: () => import_chunk_PT5LWMPA.AuthExtension,
|
|
22
|
+
AuthStatus: () => import_chunk_PT5LWMPA.AuthStatus,
|
|
23
|
+
AutomergeHost: () => import_chunk_PT5LWMPA.AutomergeHost,
|
|
24
|
+
CredentialRetrieverExtension: () => import_chunk_PT5LWMPA.CredentialRetrieverExtension,
|
|
25
|
+
CredentialServerExtension: () => import_chunk_PT5LWMPA.CredentialServerExtension,
|
|
26
|
+
DataServiceImpl: () => import_chunk_PT5LWMPA.DataServiceImpl,
|
|
27
|
+
DocumentsSynchronizer: () => import_chunk_PT5LWMPA.DocumentsSynchronizer,
|
|
28
|
+
LevelDBStorageAdapter: () => import_chunk_PT5LWMPA.LevelDBStorageAdapter,
|
|
29
|
+
MOCK_AUTH_PROVIDER: () => import_chunk_PT5LWMPA.MOCK_AUTH_PROVIDER,
|
|
30
|
+
MOCK_AUTH_VERIFIER: () => import_chunk_PT5LWMPA.MOCK_AUTH_VERIFIER,
|
|
31
|
+
MeshEchoReplicator: () => import_chunk_PT5LWMPA.MeshEchoReplicator,
|
|
32
|
+
MetadataStore: () => import_chunk_PT5LWMPA.MetadataStore,
|
|
33
|
+
Pipeline: () => import_chunk_PT5LWMPA.Pipeline,
|
|
34
|
+
SnapshotManager: () => import_chunk_PT5LWMPA.SnapshotManager,
|
|
35
|
+
SnapshotStore: () => import_chunk_PT5LWMPA.SnapshotStore,
|
|
36
|
+
Space: () => import_chunk_PT5LWMPA.Space,
|
|
37
|
+
SpaceManager: () => import_chunk_PT5LWMPA.SpaceManager,
|
|
38
|
+
SpaceProtocol: () => import_chunk_PT5LWMPA.SpaceProtocol,
|
|
39
|
+
SpaceProtocolSession: () => import_chunk_PT5LWMPA.SpaceProtocolSession,
|
|
40
|
+
TimeframeClock: () => import_chunk_PT5LWMPA.TimeframeClock,
|
|
41
|
+
codec: () => import_chunk_PT5LWMPA.codec,
|
|
42
|
+
createIdFromSpaceKey: () => import_chunk_PT5LWMPA.createIdFromSpaceKey,
|
|
43
|
+
createMappedFeedWriter: () => import_chunk_PT5LWMPA.createMappedFeedWriter,
|
|
44
|
+
deriveCollectionIdFromSpaceId: () => import_chunk_PT5LWMPA.deriveCollectionIdFromSpaceId,
|
|
45
|
+
diffCollectionState: () => import_chunk_PT5LWMPA.diffCollectionState,
|
|
46
|
+
encodingOptions: () => import_chunk_PT5LWMPA.encodingOptions,
|
|
47
|
+
getSpaceIdFromCollectionId: () => import_chunk_PT5LWMPA.getSpaceIdFromCollectionId,
|
|
48
|
+
getSpaceKeyFromDoc: () => import_chunk_PT5LWMPA.getSpaceKeyFromDoc,
|
|
49
|
+
hasInvitationExpired: () => import_chunk_PT5LWMPA.hasInvitationExpired,
|
|
50
|
+
mapFeedIndexesToTimeframe: () => import_chunk_PT5LWMPA.mapFeedIndexesToTimeframe,
|
|
51
|
+
mapTimeframeToFeedIndexes: () => import_chunk_PT5LWMPA.mapTimeframeToFeedIndexes,
|
|
52
|
+
startAfter: () => import_chunk_PT5LWMPA.startAfter,
|
|
53
|
+
valueEncoding: () => import_chunk_PT5LWMPA.valueEncoding
|
|
51
54
|
});
|
|
52
55
|
module.exports = __toCommonJS(node_exports);
|
|
53
|
-
var
|
|
54
|
-
var import_async = require("@dxos/async");
|
|
55
|
-
var import_automerge = require("@dxos/automerge/automerge");
|
|
56
|
-
var import_automerge_repo = require("@dxos/automerge/automerge-repo");
|
|
57
|
-
var import_context = require("@dxos/context");
|
|
58
|
-
var import_invariant = require("@dxos/invariant");
|
|
59
|
-
var import_keys = require("@dxos/keys");
|
|
60
|
-
var import_log = require("@dxos/log");
|
|
61
|
-
var import_protocols = require("@dxos/protocols");
|
|
62
|
-
var import_tracing = require("@dxos/tracing");
|
|
63
|
-
var import_util = require("@dxos/util");
|
|
64
|
-
var import_async2 = require("@dxos/async");
|
|
65
|
-
var import_automerge_repo2 = require("@dxos/automerge/automerge-repo");
|
|
66
|
-
var import_context2 = require("@dxos/context");
|
|
67
|
-
var import_invariant2 = require("@dxos/invariant");
|
|
68
|
-
var import_log2 = require("@dxos/log");
|
|
69
|
-
var import_indexing = require("@dxos/indexing");
|
|
70
|
-
var import_context3 = require("@dxos/context");
|
|
71
|
-
var import_invariant3 = require("@dxos/invariant");
|
|
72
|
-
var import_keys2 = require("@dxos/keys");
|
|
73
|
-
var import_log3 = require("@dxos/log");
|
|
74
|
-
var import_util2 = require("@dxos/util");
|
|
75
|
-
var import_automerge_repo3 = require("@dxos/automerge/automerge-repo");
|
|
76
|
-
var import_context4 = require("@dxos/context");
|
|
77
|
-
var import_invariant4 = require("@dxos/invariant");
|
|
78
|
-
var import_log4 = require("@dxos/log");
|
|
79
|
-
var import_teleport_extension_automerge_replicator = require("@dxos/teleport-extension-automerge-replicator");
|
|
80
|
-
function _ts_decorate(decorators, target, key, desc) {
|
|
81
|
-
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
82
|
-
if (typeof Reflect === "object" && typeof Reflect.decorate === "function")
|
|
83
|
-
r = Reflect.decorate(decorators, target, key, desc);
|
|
84
|
-
else
|
|
85
|
-
for (var i = decorators.length - 1; i >= 0; i--)
|
|
86
|
-
if (d = decorators[i])
|
|
87
|
-
r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
88
|
-
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
89
|
-
}
|
|
90
|
-
var __dxlog_file = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/echo-network-adapter.ts";
|
|
91
|
-
var EchoNetworkAdapter = class extends import_automerge_repo2.NetworkAdapter {
|
|
92
|
-
constructor(_params) {
|
|
93
|
-
super();
|
|
94
|
-
this._params = _params;
|
|
95
|
-
this._replicators = /* @__PURE__ */ new Set();
|
|
96
|
-
this._connections = /* @__PURE__ */ new Map();
|
|
97
|
-
this._lifecycleState = import_context2.LifecycleState.CLOSED;
|
|
98
|
-
this._connected = new import_async2.Trigger();
|
|
99
|
-
}
|
|
100
|
-
connect(peerId, peerMetadata) {
|
|
101
|
-
this.peerId = peerId;
|
|
102
|
-
this.peerMetadata = peerMetadata;
|
|
103
|
-
this._connected.wake();
|
|
104
|
-
}
|
|
105
|
-
send(message) {
|
|
106
|
-
const connectionEntry = this._connections.get(message.targetId);
|
|
107
|
-
if (!connectionEntry) {
|
|
108
|
-
throw new Error("Connection not found.");
|
|
109
|
-
}
|
|
110
|
-
connectionEntry.writer.write(message).catch((err) => {
|
|
111
|
-
if (connectionEntry.isOpen) {
|
|
112
|
-
import_log2.log.catch(err, void 0, {
|
|
113
|
-
F: __dxlog_file,
|
|
114
|
-
L: 49,
|
|
115
|
-
S: this,
|
|
116
|
-
C: (f, a) => f(...a)
|
|
117
|
-
});
|
|
118
|
-
}
|
|
119
|
-
});
|
|
120
|
-
}
|
|
121
|
-
disconnect() {
|
|
122
|
-
}
|
|
123
|
-
async open() {
|
|
124
|
-
if (this._lifecycleState === import_context2.LifecycleState.OPEN) {
|
|
125
|
-
return;
|
|
126
|
-
}
|
|
127
|
-
this._lifecycleState = import_context2.LifecycleState.OPEN;
|
|
128
|
-
(0, import_log2.log)("emit ready", void 0, {
|
|
129
|
-
F: __dxlog_file,
|
|
130
|
-
L: 65,
|
|
131
|
-
S: this,
|
|
132
|
-
C: (f, a) => f(...a)
|
|
133
|
-
});
|
|
134
|
-
this.emit("ready", {
|
|
135
|
-
network: this
|
|
136
|
-
});
|
|
137
|
-
}
|
|
138
|
-
async close() {
|
|
139
|
-
if (this._lifecycleState === import_context2.LifecycleState.CLOSED) {
|
|
140
|
-
return this;
|
|
141
|
-
}
|
|
142
|
-
for (const replicator of this._replicators) {
|
|
143
|
-
await replicator.disconnect();
|
|
144
|
-
}
|
|
145
|
-
this._replicators.clear();
|
|
146
|
-
this._lifecycleState = import_context2.LifecycleState.CLOSED;
|
|
147
|
-
}
|
|
148
|
-
async whenConnected() {
|
|
149
|
-
await this._connected.wait({
|
|
150
|
-
timeout: 1e4
|
|
151
|
-
});
|
|
152
|
-
}
|
|
153
|
-
async addReplicator(replicator) {
|
|
154
|
-
(0, import_invariant2.invariant)(this._lifecycleState === import_context2.LifecycleState.OPEN, void 0, {
|
|
155
|
-
F: __dxlog_file,
|
|
156
|
-
L: 91,
|
|
157
|
-
S: this,
|
|
158
|
-
A: [
|
|
159
|
-
"this._lifecycleState === LifecycleState.OPEN",
|
|
160
|
-
""
|
|
161
|
-
]
|
|
162
|
-
});
|
|
163
|
-
(0, import_invariant2.invariant)(this.peerId, void 0, {
|
|
164
|
-
F: __dxlog_file,
|
|
165
|
-
L: 92,
|
|
166
|
-
S: this,
|
|
167
|
-
A: [
|
|
168
|
-
"this.peerId",
|
|
169
|
-
""
|
|
170
|
-
]
|
|
171
|
-
});
|
|
172
|
-
(0, import_invariant2.invariant)(!this._replicators.has(replicator), void 0, {
|
|
173
|
-
F: __dxlog_file,
|
|
174
|
-
L: 93,
|
|
175
|
-
S: this,
|
|
176
|
-
A: [
|
|
177
|
-
"!this._replicators.has(replicator)",
|
|
178
|
-
""
|
|
179
|
-
]
|
|
180
|
-
});
|
|
181
|
-
this._replicators.add(replicator);
|
|
182
|
-
await replicator.connect({
|
|
183
|
-
peerId: this.peerId,
|
|
184
|
-
onConnectionOpen: this._onConnectionOpen.bind(this),
|
|
185
|
-
onConnectionClosed: this._onConnectionClosed.bind(this),
|
|
186
|
-
onConnectionAuthScopeChanged: this._onConnectionAuthScopeChanged.bind(this),
|
|
187
|
-
getContainingSpaceForDocument: this._params.getContainingSpaceForDocument
|
|
188
|
-
});
|
|
189
|
-
}
|
|
190
|
-
async removeReplicator(replicator) {
|
|
191
|
-
(0, import_invariant2.invariant)(this._lifecycleState === import_context2.LifecycleState.OPEN, void 0, {
|
|
192
|
-
F: __dxlog_file,
|
|
193
|
-
L: 107,
|
|
194
|
-
S: this,
|
|
195
|
-
A: [
|
|
196
|
-
"this._lifecycleState === LifecycleState.OPEN",
|
|
197
|
-
""
|
|
198
|
-
]
|
|
199
|
-
});
|
|
200
|
-
(0, import_invariant2.invariant)(this._replicators.has(replicator), void 0, {
|
|
201
|
-
F: __dxlog_file,
|
|
202
|
-
L: 108,
|
|
203
|
-
S: this,
|
|
204
|
-
A: [
|
|
205
|
-
"this._replicators.has(replicator)",
|
|
206
|
-
""
|
|
207
|
-
]
|
|
208
|
-
});
|
|
209
|
-
await replicator.disconnect();
|
|
210
|
-
this._replicators.delete(replicator);
|
|
211
|
-
}
|
|
212
|
-
async shouldAdvertise(peerId, params) {
|
|
213
|
-
const connection = this._connections.get(peerId);
|
|
214
|
-
if (!connection) {
|
|
215
|
-
return false;
|
|
216
|
-
}
|
|
217
|
-
return connection.connection.shouldAdvertise(params);
|
|
218
|
-
}
|
|
219
|
-
_onConnectionOpen(connection) {
|
|
220
|
-
(0, import_log2.log)("Connection opened", {
|
|
221
|
-
peerId: connection.peerId
|
|
222
|
-
}, {
|
|
223
|
-
F: __dxlog_file,
|
|
224
|
-
L: 123,
|
|
225
|
-
S: this,
|
|
226
|
-
C: (f, a) => f(...a)
|
|
227
|
-
});
|
|
228
|
-
(0, import_invariant2.invariant)(!this._connections.has(connection.peerId), void 0, {
|
|
229
|
-
F: __dxlog_file,
|
|
230
|
-
L: 124,
|
|
231
|
-
S: this,
|
|
232
|
-
A: [
|
|
233
|
-
"!this._connections.has(connection.peerId as PeerId)",
|
|
234
|
-
""
|
|
235
|
-
]
|
|
236
|
-
});
|
|
237
|
-
const reader = connection.readable.getReader();
|
|
238
|
-
const writer = connection.writable.getWriter();
|
|
239
|
-
const connectionEntry = {
|
|
240
|
-
connection,
|
|
241
|
-
reader,
|
|
242
|
-
writer,
|
|
243
|
-
isOpen: true
|
|
244
|
-
};
|
|
245
|
-
this._connections.set(connection.peerId, connectionEntry);
|
|
246
|
-
queueMicrotask(async () => {
|
|
247
|
-
try {
|
|
248
|
-
while (true) {
|
|
249
|
-
const { done, value } = await reader.read();
|
|
250
|
-
if (done) {
|
|
251
|
-
break;
|
|
252
|
-
}
|
|
253
|
-
this.emit("message", value);
|
|
254
|
-
}
|
|
255
|
-
} catch (err) {
|
|
256
|
-
if (connectionEntry.isOpen) {
|
|
257
|
-
import_log2.log.catch(err, void 0, {
|
|
258
|
-
F: __dxlog_file,
|
|
259
|
-
L: 143,
|
|
260
|
-
S: this,
|
|
261
|
-
C: (f, a) => f(...a)
|
|
262
|
-
});
|
|
263
|
-
}
|
|
264
|
-
}
|
|
265
|
-
});
|
|
266
|
-
(0, import_log2.log)("emit peer-candidate", {
|
|
267
|
-
peerId: connection.peerId
|
|
268
|
-
}, {
|
|
269
|
-
F: __dxlog_file,
|
|
270
|
-
L: 148,
|
|
271
|
-
S: this,
|
|
272
|
-
C: (f, a) => f(...a)
|
|
273
|
-
});
|
|
274
|
-
this._emitPeerCandidate(connection);
|
|
275
|
-
}
|
|
276
|
-
/**
|
|
277
|
-
* Trigger doc-synchronizer shared documents set recalculation. Happens on peer-candidate.
|
|
278
|
-
* TODO(y): replace with a proper API call when sharePolicy update becomes supported by automerge-repo
|
|
279
|
-
*/
|
|
280
|
-
_onConnectionAuthScopeChanged(connection) {
|
|
281
|
-
(0, import_log2.log)("Connection auth scope changed", {
|
|
282
|
-
peerId: connection.peerId
|
|
283
|
-
}, {
|
|
284
|
-
F: __dxlog_file,
|
|
285
|
-
L: 157,
|
|
286
|
-
S: this,
|
|
287
|
-
C: (f, a) => f(...a)
|
|
288
|
-
});
|
|
289
|
-
const entry = this._connections.get(connection.peerId);
|
|
290
|
-
(0, import_invariant2.invariant)(entry, void 0, {
|
|
291
|
-
F: __dxlog_file,
|
|
292
|
-
L: 159,
|
|
293
|
-
S: this,
|
|
294
|
-
A: [
|
|
295
|
-
"entry",
|
|
296
|
-
""
|
|
297
|
-
]
|
|
298
|
-
});
|
|
299
|
-
this.emit("peer-disconnected", {
|
|
300
|
-
peerId: connection.peerId
|
|
301
|
-
});
|
|
302
|
-
this._emitPeerCandidate(connection);
|
|
303
|
-
}
|
|
304
|
-
_onConnectionClosed(connection) {
|
|
305
|
-
(0, import_log2.log)("Connection closed", {
|
|
306
|
-
peerId: connection.peerId
|
|
307
|
-
}, {
|
|
308
|
-
F: __dxlog_file,
|
|
309
|
-
L: 165,
|
|
310
|
-
S: this,
|
|
311
|
-
C: (f, a) => f(...a)
|
|
312
|
-
});
|
|
313
|
-
const entry = this._connections.get(connection.peerId);
|
|
314
|
-
(0, import_invariant2.invariant)(entry, void 0, {
|
|
315
|
-
F: __dxlog_file,
|
|
316
|
-
L: 167,
|
|
317
|
-
S: this,
|
|
318
|
-
A: [
|
|
319
|
-
"entry",
|
|
320
|
-
""
|
|
321
|
-
]
|
|
322
|
-
});
|
|
323
|
-
entry.isOpen = false;
|
|
324
|
-
this.emit("peer-disconnected", {
|
|
325
|
-
peerId: connection.peerId
|
|
326
|
-
});
|
|
327
|
-
void entry.reader.cancel().catch((err) => import_log2.log.catch(err, void 0, {
|
|
328
|
-
F: __dxlog_file,
|
|
329
|
-
L: 172,
|
|
330
|
-
S: this,
|
|
331
|
-
C: (f, a) => f(...a)
|
|
332
|
-
}));
|
|
333
|
-
void entry.writer.abort().catch((err) => import_log2.log.catch(err, void 0, {
|
|
334
|
-
F: __dxlog_file,
|
|
335
|
-
L: 173,
|
|
336
|
-
S: this,
|
|
337
|
-
C: (f, a) => f(...a)
|
|
338
|
-
}));
|
|
339
|
-
this._connections.delete(connection.peerId);
|
|
340
|
-
}
|
|
341
|
-
_emitPeerCandidate(connection) {
|
|
342
|
-
this.emit("peer-candidate", {
|
|
343
|
-
peerId: connection.peerId,
|
|
344
|
-
peerMetadata: createEchoPeerMetadata()
|
|
345
|
-
});
|
|
346
|
-
}
|
|
347
|
-
};
|
|
348
|
-
_ts_decorate([
|
|
349
|
-
import_async2.synchronized
|
|
350
|
-
], EchoNetworkAdapter.prototype, "open", null);
|
|
351
|
-
_ts_decorate([
|
|
352
|
-
import_async2.synchronized
|
|
353
|
-
], EchoNetworkAdapter.prototype, "close", null);
|
|
354
|
-
_ts_decorate([
|
|
355
|
-
import_async2.synchronized
|
|
356
|
-
], EchoNetworkAdapter.prototype, "addReplicator", null);
|
|
357
|
-
_ts_decorate([
|
|
358
|
-
import_async2.synchronized
|
|
359
|
-
], EchoNetworkAdapter.prototype, "removeReplicator", null);
|
|
360
|
-
var createEchoPeerMetadata = () => ({
|
|
361
|
-
// TODO(dmaretskyi): Refactor this.
|
|
362
|
-
dxos_peerSource: "EchoNetworkAdapter"
|
|
363
|
-
});
|
|
364
|
-
var isEchoPeerMetadata = (metadata) => metadata?.dxos_peerSource === "EchoNetworkAdapter";
|
|
365
|
-
var HeadsStore = class {
|
|
366
|
-
constructor({ db }) {
|
|
367
|
-
this._db = db;
|
|
368
|
-
}
|
|
369
|
-
setHeads(documentId, heads, batch) {
|
|
370
|
-
batch.put(documentId, heads, {
|
|
371
|
-
sublevel: this._db,
|
|
372
|
-
keyEncoding: "utf8",
|
|
373
|
-
valueEncoding: import_indexing.headsEncoding
|
|
374
|
-
});
|
|
375
|
-
}
|
|
376
|
-
async getHeads(documentId) {
|
|
377
|
-
try {
|
|
378
|
-
return await this._db.get(documentId, {
|
|
379
|
-
keyEncoding: "utf8",
|
|
380
|
-
valueEncoding: import_indexing.headsEncoding
|
|
381
|
-
});
|
|
382
|
-
} catch (err) {
|
|
383
|
-
if (err.notFound) {
|
|
384
|
-
return void 0;
|
|
385
|
-
}
|
|
386
|
-
throw err;
|
|
387
|
-
}
|
|
388
|
-
}
|
|
389
|
-
};
|
|
390
|
-
var LevelDBStorageAdapter = class extends import_context3.Resource {
|
|
391
|
-
constructor(_params) {
|
|
392
|
-
super();
|
|
393
|
-
this._params = _params;
|
|
394
|
-
}
|
|
395
|
-
async load(keyArray) {
|
|
396
|
-
try {
|
|
397
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
398
|
-
return void 0;
|
|
399
|
-
}
|
|
400
|
-
return await this._params.db.get(keyArray, {
|
|
401
|
-
...encodingOptions
|
|
402
|
-
});
|
|
403
|
-
} catch (err) {
|
|
404
|
-
if (isLevelDbNotFoundError(err)) {
|
|
405
|
-
return void 0;
|
|
406
|
-
}
|
|
407
|
-
throw err;
|
|
408
|
-
}
|
|
409
|
-
}
|
|
410
|
-
async save(keyArray, binary) {
|
|
411
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
412
|
-
return void 0;
|
|
413
|
-
}
|
|
414
|
-
const batch = this._params.db.batch();
|
|
415
|
-
await this._params.callbacks?.beforeSave?.({
|
|
416
|
-
path: keyArray,
|
|
417
|
-
batch
|
|
418
|
-
});
|
|
419
|
-
batch.put(keyArray, Buffer.from(binary), {
|
|
420
|
-
...encodingOptions
|
|
421
|
-
});
|
|
422
|
-
await batch.write();
|
|
423
|
-
await this._params.callbacks?.afterSave?.(keyArray);
|
|
424
|
-
}
|
|
425
|
-
async remove(keyArray) {
|
|
426
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
427
|
-
return void 0;
|
|
428
|
-
}
|
|
429
|
-
await this._params.db.del(keyArray, {
|
|
430
|
-
...encodingOptions
|
|
431
|
-
});
|
|
432
|
-
}
|
|
433
|
-
async loadRange(keyPrefix) {
|
|
434
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
435
|
-
return [];
|
|
436
|
-
}
|
|
437
|
-
const result = [];
|
|
438
|
-
for await (const [key, value] of this._params.db.iterator({
|
|
439
|
-
gte: keyPrefix,
|
|
440
|
-
lte: [
|
|
441
|
-
...keyPrefix,
|
|
442
|
-
"\uFFFF"
|
|
443
|
-
],
|
|
444
|
-
...encodingOptions
|
|
445
|
-
})) {
|
|
446
|
-
result.push({
|
|
447
|
-
key,
|
|
448
|
-
data: value
|
|
449
|
-
});
|
|
450
|
-
}
|
|
451
|
-
return result;
|
|
452
|
-
}
|
|
453
|
-
async removeRange(keyPrefix) {
|
|
454
|
-
if (this._lifecycleState !== import_context3.LifecycleState.OPEN) {
|
|
455
|
-
return void 0;
|
|
456
|
-
}
|
|
457
|
-
const batch = this._params.db.batch();
|
|
458
|
-
for await (const [key] of this._params.db.iterator({
|
|
459
|
-
gte: keyPrefix,
|
|
460
|
-
lte: [
|
|
461
|
-
...keyPrefix,
|
|
462
|
-
"\uFFFF"
|
|
463
|
-
],
|
|
464
|
-
...encodingOptions
|
|
465
|
-
})) {
|
|
466
|
-
batch.del(key, {
|
|
467
|
-
...encodingOptions
|
|
468
|
-
});
|
|
469
|
-
}
|
|
470
|
-
await batch.write();
|
|
471
|
-
}
|
|
472
|
-
};
|
|
473
|
-
var keyEncoder = {
|
|
474
|
-
encode: (key) => Buffer.from(key.map((k) => k.replaceAll("%", "%25").replaceAll("-", "%2D")).join("-")),
|
|
475
|
-
decode: (key) => Buffer.from(key).toString().split("-").map((k) => k.replaceAll("%2D", "-").replaceAll("%25", "%")),
|
|
476
|
-
format: "buffer"
|
|
477
|
-
};
|
|
478
|
-
var encodingOptions = {
|
|
479
|
-
keyEncoding: keyEncoder,
|
|
480
|
-
valueEncoding: "buffer"
|
|
481
|
-
};
|
|
482
|
-
var isLevelDbNotFoundError = (err) => err.code === "LEVEL_NOT_FOUND";
|
|
483
|
-
function _ts_decorate2(decorators, target, key, desc) {
|
|
484
|
-
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
485
|
-
if (typeof Reflect === "object" && typeof Reflect.decorate === "function")
|
|
486
|
-
r = Reflect.decorate(decorators, target, key, desc);
|
|
487
|
-
else
|
|
488
|
-
for (var i = decorators.length - 1; i >= 0; i--)
|
|
489
|
-
if (d = decorators[i])
|
|
490
|
-
r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
491
|
-
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
492
|
-
}
|
|
493
|
-
var __dxlog_file2 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/automerge-host.ts";
|
|
494
|
-
var AutomergeHost = class extends import_context.Resource {
|
|
495
|
-
constructor({ db, indexMetadataStore }) {
|
|
496
|
-
super();
|
|
497
|
-
this._echoNetworkAdapter = new EchoNetworkAdapter({
|
|
498
|
-
getContainingSpaceForDocument: this._getContainingSpaceForDocument.bind(this)
|
|
499
|
-
});
|
|
500
|
-
this._db = db;
|
|
501
|
-
this._storage = new LevelDBStorageAdapter({
|
|
502
|
-
db: db.sublevel("automerge"),
|
|
503
|
-
callbacks: {
|
|
504
|
-
beforeSave: async (params) => this._beforeSave(params),
|
|
505
|
-
afterSave: async () => this._afterSave()
|
|
506
|
-
}
|
|
507
|
-
});
|
|
508
|
-
this._headsStore = new HeadsStore({
|
|
509
|
-
db: db.sublevel("heads")
|
|
510
|
-
});
|
|
511
|
-
this._indexMetadataStore = indexMetadataStore;
|
|
512
|
-
}
|
|
513
|
-
async _open() {
|
|
514
|
-
this._peerId = `host-${import_keys.PublicKey.random().toHex()}`;
|
|
515
|
-
await this._storage.open?.();
|
|
516
|
-
this._repo = new import_automerge_repo.Repo({
|
|
517
|
-
peerId: this._peerId,
|
|
518
|
-
sharePolicy: this._sharePolicy.bind(this),
|
|
519
|
-
storage: this._storage,
|
|
520
|
-
network: [
|
|
521
|
-
// Upstream swarm.
|
|
522
|
-
this._echoNetworkAdapter
|
|
523
|
-
]
|
|
524
|
-
});
|
|
525
|
-
await this._echoNetworkAdapter.open();
|
|
526
|
-
await this._echoNetworkAdapter.whenConnected();
|
|
527
|
-
}
|
|
528
|
-
async _close() {
|
|
529
|
-
await this._storage.close?.();
|
|
530
|
-
await this._echoNetworkAdapter.close();
|
|
531
|
-
await this._ctx.dispose();
|
|
532
|
-
}
|
|
533
|
-
/**
|
|
534
|
-
* @deprecated To be abstracted away.
|
|
535
|
-
*/
|
|
536
|
-
get repo() {
|
|
537
|
-
return this._repo;
|
|
538
|
-
}
|
|
539
|
-
get loadedDocsCount() {
|
|
540
|
-
return Object.keys(this._repo.handles).length;
|
|
541
|
-
}
|
|
542
|
-
async addReplicator(replicator) {
|
|
543
|
-
await this._echoNetworkAdapter.addReplicator(replicator);
|
|
544
|
-
}
|
|
545
|
-
async removeReplicator(replicator) {
|
|
546
|
-
await this._echoNetworkAdapter.removeReplicator(replicator);
|
|
547
|
-
}
|
|
548
|
-
/**
|
|
549
|
-
* Loads the document handle from the repo and waits for it to be ready.
|
|
550
|
-
*/
|
|
551
|
-
async loadDoc(ctx, documentId, opts) {
|
|
552
|
-
let handle;
|
|
553
|
-
if (typeof documentId === "string") {
|
|
554
|
-
handle = this._repo.handles[documentId];
|
|
555
|
-
}
|
|
556
|
-
if (!handle) {
|
|
557
|
-
handle = this._repo.find(documentId);
|
|
558
|
-
}
|
|
559
|
-
if (!handle.isReady()) {
|
|
560
|
-
if (!opts?.timeout) {
|
|
561
|
-
await (0, import_context.cancelWithContext)(ctx, handle.whenReady());
|
|
562
|
-
} else {
|
|
563
|
-
await (0, import_context.cancelWithContext)(ctx, (0, import_async.asyncTimeout)(handle.whenReady(), opts.timeout));
|
|
564
|
-
}
|
|
565
|
-
}
|
|
566
|
-
return handle;
|
|
567
|
-
}
|
|
568
|
-
/**
|
|
569
|
-
* Create new persisted document.
|
|
570
|
-
*/
|
|
571
|
-
createDoc(initialValue, opts) {
|
|
572
|
-
if (opts?.preserveHistory) {
|
|
573
|
-
if (!(0, import_automerge.isAutomerge)(initialValue)) {
|
|
574
|
-
throw new TypeError("Initial value must be an Automerge document");
|
|
575
|
-
}
|
|
576
|
-
return this._repo.import((0, import_automerge.save)(initialValue));
|
|
577
|
-
} else {
|
|
578
|
-
return this._repo.create(initialValue);
|
|
579
|
-
}
|
|
580
|
-
}
|
|
581
|
-
async waitUntilHeadsReplicated(heads) {
|
|
582
|
-
await Promise.all(heads.entries?.map(async ({ documentId, heads: heads2 }) => {
|
|
583
|
-
if (!heads2 || heads2.length === 0) {
|
|
584
|
-
return;
|
|
585
|
-
}
|
|
586
|
-
const currentHeads = this.getHeads(documentId);
|
|
587
|
-
if (currentHeads !== null && (0, import_automerge.equals)(currentHeads, heads2)) {
|
|
588
|
-
return;
|
|
589
|
-
}
|
|
590
|
-
const handle = await this.loadDoc(import_context.Context.default(void 0, {
|
|
591
|
-
F: __dxlog_file2,
|
|
592
|
-
L: 189
|
|
593
|
-
}), documentId);
|
|
594
|
-
await waitForHeads(handle, heads2);
|
|
595
|
-
}) ?? []);
|
|
596
|
-
await this._repo.flush(heads.entries?.map((entry) => entry.documentId) ?? []);
|
|
597
|
-
}
|
|
598
|
-
async reIndexHeads(documentIds) {
|
|
599
|
-
for (const documentId of documentIds) {
|
|
600
|
-
import_log.log.info("re-indexing heads for document", {
|
|
601
|
-
documentId
|
|
602
|
-
}, {
|
|
603
|
-
F: __dxlog_file2,
|
|
604
|
-
L: 200,
|
|
605
|
-
S: this,
|
|
606
|
-
C: (f, a) => f(...a)
|
|
607
|
-
});
|
|
608
|
-
const handle = this._repo.find(documentId);
|
|
609
|
-
await handle.whenReady([
|
|
610
|
-
"ready",
|
|
611
|
-
"requesting"
|
|
612
|
-
]);
|
|
613
|
-
if (handle.inState([
|
|
614
|
-
"requesting"
|
|
615
|
-
])) {
|
|
616
|
-
import_log.log.warn("document is not available locally, skipping", {
|
|
617
|
-
documentId
|
|
618
|
-
}, {
|
|
619
|
-
F: __dxlog_file2,
|
|
620
|
-
L: 204,
|
|
621
|
-
S: this,
|
|
622
|
-
C: (f, a) => f(...a)
|
|
623
|
-
});
|
|
624
|
-
continue;
|
|
625
|
-
}
|
|
626
|
-
const doc = handle.docSync();
|
|
627
|
-
(0, import_invariant.invariant)(doc, void 0, {
|
|
628
|
-
F: __dxlog_file2,
|
|
629
|
-
L: 209,
|
|
630
|
-
S: this,
|
|
631
|
-
A: [
|
|
632
|
-
"doc",
|
|
633
|
-
""
|
|
634
|
-
]
|
|
635
|
-
});
|
|
636
|
-
const heads = (0, import_automerge.getHeads)(doc);
|
|
637
|
-
const batch = this._db.batch();
|
|
638
|
-
this._headsStore.setHeads(documentId, heads, batch);
|
|
639
|
-
await batch.write();
|
|
640
|
-
}
|
|
641
|
-
import_log.log.info("done re-indexing heads", void 0, {
|
|
642
|
-
F: __dxlog_file2,
|
|
643
|
-
L: 216,
|
|
644
|
-
S: this,
|
|
645
|
-
C: (f, a) => f(...a)
|
|
646
|
-
});
|
|
647
|
-
}
|
|
648
|
-
// TODO(dmaretskyi): Share based on HALO permissions and space affinity.
|
|
649
|
-
// Hosts, running in the worker, don't share documents unless requested by other peers.
|
|
650
|
-
// NOTE: If both peers return sharePolicy=false the replication will not happen
|
|
651
|
-
// https://github.com/automerge/automerge-repo/pull/292
|
|
652
|
-
async _sharePolicy(peerId, documentId) {
|
|
653
|
-
if (peerId.startsWith("client-")) {
|
|
654
|
-
return false;
|
|
655
|
-
}
|
|
656
|
-
if (!documentId) {
|
|
657
|
-
return false;
|
|
658
|
-
}
|
|
659
|
-
const peerMetadata = this.repo.peerMetadataByPeerId[peerId];
|
|
660
|
-
if (isEchoPeerMetadata(peerMetadata)) {
|
|
661
|
-
return this._echoNetworkAdapter.shouldAdvertise(peerId, {
|
|
662
|
-
documentId
|
|
663
|
-
});
|
|
664
|
-
}
|
|
665
|
-
return false;
|
|
666
|
-
}
|
|
667
|
-
async _beforeSave({ path, batch }) {
|
|
668
|
-
const handle = this._repo.handles[path[0]];
|
|
669
|
-
if (!handle) {
|
|
670
|
-
return;
|
|
671
|
-
}
|
|
672
|
-
const doc = handle.docSync();
|
|
673
|
-
if (!doc) {
|
|
674
|
-
return;
|
|
675
|
-
}
|
|
676
|
-
const spaceKey = getSpaceKeyFromDoc(doc) ?? void 0;
|
|
677
|
-
const heads = (0, import_automerge.getHeads)(doc);
|
|
678
|
-
this._headsStore.setHeads(handle.documentId, heads, batch);
|
|
679
|
-
const objectIds = Object.keys(doc.objects ?? {});
|
|
680
|
-
const encodedIds = objectIds.map((objectId) => import_protocols.objectPointerCodec.encode({
|
|
681
|
-
documentId: handle.documentId,
|
|
682
|
-
objectId,
|
|
683
|
-
spaceKey
|
|
684
|
-
}));
|
|
685
|
-
const idToLastHash = new Map(encodedIds.map((id) => [
|
|
686
|
-
id,
|
|
687
|
-
heads
|
|
688
|
-
]));
|
|
689
|
-
this._indexMetadataStore.markDirty(idToLastHash, batch);
|
|
690
|
-
}
|
|
691
|
-
/**
|
|
692
|
-
* Called by AutomergeStorageAdapter after levelDB batch commit.
|
|
693
|
-
*/
|
|
694
|
-
async _afterSave() {
|
|
695
|
-
this._indexMetadataStore.notifyMarkedDirty();
|
|
696
|
-
}
|
|
697
|
-
_automergeDocs() {
|
|
698
|
-
return (0, import_util.mapValues)(this._repo.handles, (handle) => ({
|
|
699
|
-
state: handle.state,
|
|
700
|
-
hasDoc: !!handle.docSync(),
|
|
701
|
-
heads: handle.docSync() ? import_automerge.next.getHeads(handle.docSync()) : null,
|
|
702
|
-
data: handle.docSync() && (0, import_util.mapValues)(handle.docSync(), (value, key) => {
|
|
703
|
-
try {
|
|
704
|
-
switch (key) {
|
|
705
|
-
case "access":
|
|
706
|
-
case "links":
|
|
707
|
-
return value;
|
|
708
|
-
case "objects":
|
|
709
|
-
return Object.keys(value);
|
|
710
|
-
default:
|
|
711
|
-
return `${value}`;
|
|
712
|
-
}
|
|
713
|
-
} catch (err) {
|
|
714
|
-
return `${err}`;
|
|
715
|
-
}
|
|
716
|
-
})
|
|
717
|
-
}));
|
|
718
|
-
}
|
|
719
|
-
_automergePeers() {
|
|
720
|
-
return this._repo.peers;
|
|
721
|
-
}
|
|
722
|
-
async _getContainingSpaceForDocument(documentId) {
|
|
723
|
-
const doc = this._repo.handles[documentId]?.docSync();
|
|
724
|
-
if (!doc) {
|
|
725
|
-
return null;
|
|
726
|
-
}
|
|
727
|
-
const spaceKeyHex = getSpaceKeyFromDoc(doc);
|
|
728
|
-
if (!spaceKeyHex) {
|
|
729
|
-
return null;
|
|
730
|
-
}
|
|
731
|
-
return import_keys.PublicKey.from(spaceKeyHex);
|
|
732
|
-
}
|
|
733
|
-
/**
|
|
734
|
-
* Flush documents to disk.
|
|
735
|
-
*/
|
|
736
|
-
async flush({ documentIds } = {}) {
|
|
737
|
-
await this._repo.flush(documentIds);
|
|
738
|
-
}
|
|
739
|
-
async getHeads(documentId) {
|
|
740
|
-
const handle = this._repo.handles[documentId];
|
|
741
|
-
if (handle) {
|
|
742
|
-
const doc = handle.docSync();
|
|
743
|
-
if (!doc) {
|
|
744
|
-
return void 0;
|
|
745
|
-
}
|
|
746
|
-
return (0, import_automerge.getHeads)(doc);
|
|
747
|
-
} else {
|
|
748
|
-
return this._headsStore.getHeads(documentId);
|
|
749
|
-
}
|
|
750
|
-
}
|
|
751
|
-
};
|
|
752
|
-
_ts_decorate2([
|
|
753
|
-
import_tracing.trace.info()
|
|
754
|
-
], AutomergeHost.prototype, "_peerId", void 0);
|
|
755
|
-
_ts_decorate2([
|
|
756
|
-
import_tracing.trace.info({
|
|
757
|
-
depth: null
|
|
758
|
-
})
|
|
759
|
-
], AutomergeHost.prototype, "_automergeDocs", null);
|
|
760
|
-
_ts_decorate2([
|
|
761
|
-
import_tracing.trace.info({
|
|
762
|
-
depth: null
|
|
763
|
-
})
|
|
764
|
-
], AutomergeHost.prototype, "_automergePeers", null);
|
|
765
|
-
_ts_decorate2([
|
|
766
|
-
import_tracing.trace.span({
|
|
767
|
-
showInBrowserTimeline: true
|
|
768
|
-
})
|
|
769
|
-
], AutomergeHost.prototype, "flush", null);
|
|
770
|
-
AutomergeHost = _ts_decorate2([
|
|
771
|
-
import_tracing.trace.resource()
|
|
772
|
-
], AutomergeHost);
|
|
773
|
-
var getSpaceKeyFromDoc = (doc) => {
|
|
774
|
-
const rawSpaceKey = doc.access?.spaceKey ?? doc.experimental_spaceKey;
|
|
775
|
-
if (rawSpaceKey == null) {
|
|
776
|
-
return null;
|
|
777
|
-
}
|
|
778
|
-
return String(rawSpaceKey);
|
|
779
|
-
};
|
|
780
|
-
var waitForHeads = async (handle, heads) => {
|
|
781
|
-
const unavailableHeads = new Set(heads);
|
|
782
|
-
await handle.whenReady();
|
|
783
|
-
await import_async.Event.wrap(handle, "change").waitForCondition(() => {
|
|
784
|
-
for (const changeHash of unavailableHeads.values()) {
|
|
785
|
-
if (changeIsPresentInDoc(handle.docSync(), changeHash)) {
|
|
786
|
-
unavailableHeads.delete(changeHash);
|
|
787
|
-
}
|
|
788
|
-
}
|
|
789
|
-
return unavailableHeads.size === 0;
|
|
790
|
-
});
|
|
791
|
-
};
|
|
792
|
-
var changeIsPresentInDoc = (doc, changeHash) => {
|
|
793
|
-
return !!(0, import_automerge.getBackend)(doc).getChangeByHash(changeHash);
|
|
794
|
-
};
|
|
795
|
-
var __dxlog_file3 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator-connection.ts";
|
|
796
|
-
var DEFAULT_FACTORY = (params) => new import_teleport_extension_automerge_replicator.AutomergeReplicator(...params);
|
|
797
|
-
var MeshReplicatorConnection = class extends import_context4.Resource {
|
|
798
|
-
constructor(_params) {
|
|
799
|
-
super();
|
|
800
|
-
this._params = _params;
|
|
801
|
-
this.remoteDeviceKey = null;
|
|
802
|
-
this._remotePeerId = null;
|
|
803
|
-
this._isEnabled = false;
|
|
804
|
-
let readableStreamController;
|
|
805
|
-
this.readable = new ReadableStream({
|
|
806
|
-
start: (controller) => {
|
|
807
|
-
readableStreamController = controller;
|
|
808
|
-
this._ctx.onDispose(() => controller.close());
|
|
809
|
-
}
|
|
810
|
-
});
|
|
811
|
-
this.writable = new WritableStream({
|
|
812
|
-
write: async (message, controller) => {
|
|
813
|
-
(0, import_invariant4.invariant)(this._isEnabled, "Writing to a disabled connection", {
|
|
814
|
-
F: __dxlog_file3,
|
|
815
|
-
L: 47,
|
|
816
|
-
S: this,
|
|
817
|
-
A: [
|
|
818
|
-
"this._isEnabled",
|
|
819
|
-
"'Writing to a disabled connection'"
|
|
820
|
-
]
|
|
821
|
-
});
|
|
822
|
-
try {
|
|
823
|
-
await this.replicatorExtension.sendSyncMessage({
|
|
824
|
-
payload: import_automerge_repo3.cbor.encode(message)
|
|
825
|
-
});
|
|
826
|
-
} catch (err) {
|
|
827
|
-
controller.error(err);
|
|
828
|
-
this._disconnectIfEnabled();
|
|
829
|
-
}
|
|
830
|
-
}
|
|
831
|
-
});
|
|
832
|
-
const createAutomergeReplicator = this._params.replicatorFactory ?? DEFAULT_FACTORY;
|
|
833
|
-
this.replicatorExtension = createAutomergeReplicator([
|
|
834
|
-
{
|
|
835
|
-
peerId: this._params.ownPeerId
|
|
836
|
-
},
|
|
837
|
-
{
|
|
838
|
-
onStartReplication: async (info, remotePeerId) => {
|
|
839
|
-
this.remoteDeviceKey = remotePeerId;
|
|
840
|
-
this._remotePeerId = info.id;
|
|
841
|
-
(0, import_log4.log)("onStartReplication", {
|
|
842
|
-
id: info.id,
|
|
843
|
-
thisPeerId: this.peerId,
|
|
844
|
-
remotePeerId: remotePeerId.toHex()
|
|
845
|
-
}, {
|
|
846
|
-
F: __dxlog_file3,
|
|
847
|
-
L: 81,
|
|
848
|
-
S: this,
|
|
849
|
-
C: (f, a) => f(...a)
|
|
850
|
-
});
|
|
851
|
-
this._params.onRemoteConnected();
|
|
852
|
-
},
|
|
853
|
-
onSyncMessage: async ({ payload }) => {
|
|
854
|
-
if (!this._isEnabled) {
|
|
855
|
-
return;
|
|
856
|
-
}
|
|
857
|
-
const message = import_automerge_repo3.cbor.decode(payload);
|
|
858
|
-
readableStreamController.enqueue(message);
|
|
859
|
-
},
|
|
860
|
-
onClose: async () => {
|
|
861
|
-
this._disconnectIfEnabled();
|
|
862
|
-
}
|
|
863
|
-
}
|
|
864
|
-
]);
|
|
865
|
-
}
|
|
866
|
-
_disconnectIfEnabled() {
|
|
867
|
-
if (this._isEnabled) {
|
|
868
|
-
this._params.onRemoteDisconnected();
|
|
869
|
-
}
|
|
870
|
-
}
|
|
871
|
-
get peerId() {
|
|
872
|
-
(0, import_invariant4.invariant)(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
873
|
-
F: __dxlog_file3,
|
|
874
|
-
L: 107,
|
|
875
|
-
S: this,
|
|
876
|
-
A: [
|
|
877
|
-
"this._remotePeerId != null",
|
|
878
|
-
"'Remote peer has not connected yet.'"
|
|
879
|
-
]
|
|
880
|
-
});
|
|
881
|
-
return this._remotePeerId;
|
|
882
|
-
}
|
|
883
|
-
async shouldAdvertise(params) {
|
|
884
|
-
return this._params.shouldAdvertise(params);
|
|
885
|
-
}
|
|
886
|
-
/**
|
|
887
|
-
* Start exchanging messages with the remote peer.
|
|
888
|
-
* Call after the remote peer has connected.
|
|
889
|
-
*/
|
|
890
|
-
enable() {
|
|
891
|
-
(0, import_invariant4.invariant)(this._remotePeerId != null, "Remote peer has not connected yet.", {
|
|
892
|
-
F: __dxlog_file3,
|
|
893
|
-
L: 120,
|
|
894
|
-
S: this,
|
|
895
|
-
A: [
|
|
896
|
-
"this._remotePeerId != null",
|
|
897
|
-
"'Remote peer has not connected yet.'"
|
|
898
|
-
]
|
|
899
|
-
});
|
|
900
|
-
this._isEnabled = true;
|
|
901
|
-
}
|
|
902
|
-
/**
|
|
903
|
-
* Stop exchanging messages with the remote peer.
|
|
904
|
-
*/
|
|
905
|
-
disable() {
|
|
906
|
-
this._isEnabled = false;
|
|
907
|
-
}
|
|
908
|
-
};
|
|
909
|
-
var __dxlog_file4 = "/home/runner/work/dxos/dxos/packages/core/echo/echo-pipeline/src/automerge/mesh-echo-replicator.ts";
|
|
910
|
-
var MeshEchoReplicator = class {
|
|
911
|
-
constructor() {
|
|
912
|
-
this._connections = /* @__PURE__ */ new Set();
|
|
913
|
-
this._connectionsPerPeer = /* @__PURE__ */ new Map();
|
|
914
|
-
this._authorizedDevices = new import_util2.ComplexMap(import_keys2.PublicKey.hash);
|
|
915
|
-
this._context = null;
|
|
916
|
-
}
|
|
917
|
-
async connect(context) {
|
|
918
|
-
this._context = context;
|
|
919
|
-
}
|
|
920
|
-
async disconnect() {
|
|
921
|
-
for (const connection of this._connectionsPerPeer.values()) {
|
|
922
|
-
this._context?.onConnectionClosed(connection);
|
|
923
|
-
}
|
|
924
|
-
for (const connection of this._connections) {
|
|
925
|
-
await connection.close();
|
|
926
|
-
}
|
|
927
|
-
this._connections.clear();
|
|
928
|
-
this._connectionsPerPeer.clear();
|
|
929
|
-
this._context = null;
|
|
930
|
-
}
|
|
931
|
-
createExtension(extensionFactory) {
|
|
932
|
-
(0, import_invariant3.invariant)(this._context, void 0, {
|
|
933
|
-
F: __dxlog_file4,
|
|
934
|
-
L: 54,
|
|
935
|
-
S: this,
|
|
936
|
-
A: [
|
|
937
|
-
"this._context",
|
|
938
|
-
""
|
|
939
|
-
]
|
|
940
|
-
});
|
|
941
|
-
const connection = new MeshReplicatorConnection({
|
|
942
|
-
ownPeerId: this._context.peerId,
|
|
943
|
-
replicatorFactory: extensionFactory,
|
|
944
|
-
onRemoteConnected: async () => {
|
|
945
|
-
(0, import_log3.log)("onRemoteConnected", {
|
|
946
|
-
peerId: connection.peerId
|
|
947
|
-
}, {
|
|
948
|
-
F: __dxlog_file4,
|
|
949
|
-
L: 60,
|
|
950
|
-
S: this,
|
|
951
|
-
C: (f, a) => f(...a)
|
|
952
|
-
});
|
|
953
|
-
(0, import_invariant3.invariant)(this._context, void 0, {
|
|
954
|
-
F: __dxlog_file4,
|
|
955
|
-
L: 61,
|
|
956
|
-
S: this,
|
|
957
|
-
A: [
|
|
958
|
-
"this._context",
|
|
959
|
-
""
|
|
960
|
-
]
|
|
961
|
-
});
|
|
962
|
-
if (this._connectionsPerPeer.has(connection.peerId)) {
|
|
963
|
-
this._context.onConnectionAuthScopeChanged(connection);
|
|
964
|
-
} else {
|
|
965
|
-
this._connectionsPerPeer.set(connection.peerId, connection);
|
|
966
|
-
this._context.onConnectionOpen(connection);
|
|
967
|
-
connection.enable();
|
|
968
|
-
}
|
|
969
|
-
},
|
|
970
|
-
onRemoteDisconnected: async () => {
|
|
971
|
-
(0, import_log3.log)("onRemoteDisconnected", {
|
|
972
|
-
peerId: connection.peerId
|
|
973
|
-
}, {
|
|
974
|
-
F: __dxlog_file4,
|
|
975
|
-
L: 72,
|
|
976
|
-
S: this,
|
|
977
|
-
C: (f, a) => f(...a)
|
|
978
|
-
});
|
|
979
|
-
this._context?.onConnectionClosed(connection);
|
|
980
|
-
this._connectionsPerPeer.delete(connection.peerId);
|
|
981
|
-
connection.disable();
|
|
982
|
-
this._connections.delete(connection);
|
|
983
|
-
},
|
|
984
|
-
shouldAdvertise: async (params) => {
|
|
985
|
-
(0, import_log3.log)("shouldAdvertise", {
|
|
986
|
-
peerId: connection.peerId,
|
|
987
|
-
documentId: params.documentId
|
|
988
|
-
}, {
|
|
989
|
-
F: __dxlog_file4,
|
|
990
|
-
L: 79,
|
|
991
|
-
S: this,
|
|
992
|
-
C: (f, a) => f(...a)
|
|
993
|
-
});
|
|
994
|
-
(0, import_invariant3.invariant)(this._context, void 0, {
|
|
995
|
-
F: __dxlog_file4,
|
|
996
|
-
L: 80,
|
|
997
|
-
S: this,
|
|
998
|
-
A: [
|
|
999
|
-
"this._context",
|
|
1000
|
-
""
|
|
1001
|
-
]
|
|
1002
|
-
});
|
|
1003
|
-
try {
|
|
1004
|
-
const spaceKey = await this._context.getContainingSpaceForDocument(params.documentId);
|
|
1005
|
-
if (!spaceKey) {
|
|
1006
|
-
(0, import_log3.log)("space key not found for share policy check", {
|
|
1007
|
-
peerId: connection.peerId,
|
|
1008
|
-
documentId: params.documentId
|
|
1009
|
-
}, {
|
|
1010
|
-
F: __dxlog_file4,
|
|
1011
|
-
L: 84,
|
|
1012
|
-
S: this,
|
|
1013
|
-
C: (f, a) => f(...a)
|
|
1014
|
-
});
|
|
1015
|
-
return false;
|
|
1016
|
-
}
|
|
1017
|
-
const authorizedDevices = this._authorizedDevices.get(spaceKey);
|
|
1018
|
-
if (!connection.remoteDeviceKey) {
|
|
1019
|
-
(0, import_log3.log)("device key not found for share policy check", {
|
|
1020
|
-
peerId: connection.peerId,
|
|
1021
|
-
documentId: params.documentId
|
|
1022
|
-
}, {
|
|
1023
|
-
F: __dxlog_file4,
|
|
1024
|
-
L: 94,
|
|
1025
|
-
S: this,
|
|
1026
|
-
C: (f, a) => f(...a)
|
|
1027
|
-
});
|
|
1028
|
-
return false;
|
|
1029
|
-
}
|
|
1030
|
-
const isAuthorized = authorizedDevices?.has(connection.remoteDeviceKey) ?? false;
|
|
1031
|
-
(0, import_log3.log)("share policy check", {
|
|
1032
|
-
localPeer: this._context.peerId,
|
|
1033
|
-
remotePeer: connection.peerId,
|
|
1034
|
-
documentId: params.documentId,
|
|
1035
|
-
deviceKey: connection.remoteDeviceKey,
|
|
1036
|
-
spaceKey,
|
|
1037
|
-
isAuthorized
|
|
1038
|
-
}, {
|
|
1039
|
-
F: __dxlog_file4,
|
|
1040
|
-
L: 102,
|
|
1041
|
-
S: this,
|
|
1042
|
-
C: (f, a) => f(...a)
|
|
1043
|
-
});
|
|
1044
|
-
return isAuthorized;
|
|
1045
|
-
} catch (err) {
|
|
1046
|
-
import_log3.log.catch(err, void 0, {
|
|
1047
|
-
F: __dxlog_file4,
|
|
1048
|
-
L: 112,
|
|
1049
|
-
S: this,
|
|
1050
|
-
C: (f, a) => f(...a)
|
|
1051
|
-
});
|
|
1052
|
-
return false;
|
|
1053
|
-
}
|
|
1054
|
-
}
|
|
1055
|
-
});
|
|
1056
|
-
this._connections.add(connection);
|
|
1057
|
-
return connection.replicatorExtension;
|
|
1058
|
-
}
|
|
1059
|
-
authorizeDevice(spaceKey, deviceKey) {
|
|
1060
|
-
(0, import_log3.log)("authorizeDevice", {
|
|
1061
|
-
spaceKey,
|
|
1062
|
-
deviceKey
|
|
1063
|
-
}, {
|
|
1064
|
-
F: __dxlog_file4,
|
|
1065
|
-
L: 123,
|
|
1066
|
-
S: this,
|
|
1067
|
-
C: (f, a) => f(...a)
|
|
1068
|
-
});
|
|
1069
|
-
(0, import_util2.defaultMap)(this._authorizedDevices, spaceKey, () => new import_util2.ComplexSet(import_keys2.PublicKey.hash)).add(deviceKey);
|
|
1070
|
-
for (const connection of this._connections) {
|
|
1071
|
-
if (connection.remoteDeviceKey && connection.remoteDeviceKey.equals(deviceKey)) {
|
|
1072
|
-
if (this._connectionsPerPeer.has(connection.peerId)) {
|
|
1073
|
-
this._context?.onConnectionAuthScopeChanged(connection);
|
|
1074
|
-
}
|
|
1075
|
-
}
|
|
1076
|
-
}
|
|
1077
|
-
}
|
|
1078
|
-
};
|
|
56
|
+
var import_chunk_PT5LWMPA = require("./chunk-PT5LWMPA.cjs");
|
|
1079
57
|
// Annotate the CommonJS export names for ESM import in node:
|
|
1080
58
|
0 && (module.exports = {
|
|
1081
59
|
AuthExtension,
|
|
@@ -1101,7 +79,10 @@ var MeshEchoReplicator = class {
|
|
|
1101
79
|
codec,
|
|
1102
80
|
createIdFromSpaceKey,
|
|
1103
81
|
createMappedFeedWriter,
|
|
82
|
+
deriveCollectionIdFromSpaceId,
|
|
83
|
+
diffCollectionState,
|
|
1104
84
|
encodingOptions,
|
|
85
|
+
getSpaceIdFromCollectionId,
|
|
1105
86
|
getSpaceKeyFromDoc,
|
|
1106
87
|
hasInvitationExpired,
|
|
1107
88
|
mapFeedIndexesToTimeframe,
|