@dxos/messaging 0.6.12 → 0.6.13-main.548ca8d
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/browser/chunk-BN7UMWB4.mjs +2299 -0
- package/dist/lib/browser/chunk-BN7UMWB4.mjs.map +7 -0
- package/dist/lib/browser/index.mjs +9 -2286
- package/dist/lib/browser/index.mjs.map +4 -4
- package/dist/lib/browser/meta.json +1 -1
- package/dist/lib/browser/testing/index.mjs +147 -0
- package/dist/lib/browser/testing/index.mjs.map +7 -0
- package/dist/lib/node/chunk-DBUGAS7J.cjs +2276 -0
- package/dist/lib/node/chunk-DBUGAS7J.cjs.map +7 -0
- package/dist/lib/node/index.cjs +9 -2242
- package/dist/lib/node/index.cjs.map +4 -4
- package/dist/lib/node/meta.json +1 -1
- package/dist/lib/node/testing/index.cjs +162 -0
- package/dist/lib/node/testing/index.cjs.map +7 -0
- package/dist/lib/node-esm/chunk-5LCDIQ7T.mjs +2291 -0
- package/dist/lib/node-esm/chunk-5LCDIQ7T.mjs.map +7 -0
- package/dist/lib/node-esm/index.mjs +22 -0
- package/dist/lib/node-esm/index.mjs.map +7 -0
- package/dist/lib/node-esm/meta.json +1 -0
- package/dist/lib/node-esm/testing/index.mjs +146 -0
- package/dist/lib/node-esm/testing/index.mjs.map +7 -0
- package/dist/types/src/messenger.blueprint-test.d.ts +2 -3
- package/dist/types/src/messenger.blueprint-test.d.ts.map +1 -1
- package/dist/types/src/messenger.d.ts.map +1 -1
- package/dist/types/src/messenger.node.test.d.ts +2 -0
- package/dist/types/src/messenger.node.test.d.ts.map +1 -0
- package/dist/types/src/signal-client/signal-client.node.test.d.ts +2 -0
- package/dist/types/src/signal-client/signal-client.node.test.d.ts.map +1 -0
- package/dist/types/src/signal-client/signal-rpc-client.node.test.d.ts +2 -0
- package/dist/types/src/signal-client/signal-rpc-client.node.test.d.ts.map +1 -0
- package/dist/types/src/signal-manager/edge-signal-manager.d.ts.map +1 -1
- package/dist/types/src/signal-manager/edge-signal-manager.node.test.d.ts +2 -0
- package/dist/types/src/signal-manager/edge-signal-manager.node.test.d.ts.map +1 -0
- package/dist/types/src/signal-manager/websocket-signal-manager.node.test.d.ts +2 -0
- package/dist/types/src/signal-manager/websocket-signal-manager.node.test.d.ts.map +1 -0
- package/dist/types/src/testing/test-builder.d.ts +3 -4
- package/dist/types/src/testing/test-builder.d.ts.map +1 -1
- package/dist/types/src/testing/test-peer.d.ts +0 -1
- package/dist/types/src/testing/test-peer.d.ts.map +1 -1
- package/package.json +32 -21
- package/src/messenger.blueprint-test.ts +25 -33
- package/src/{messenger.test.ts → messenger.node.test.ts} +9 -8
- package/src/messenger.ts +1 -1
- package/src/signal-client/{signal-client.test.ts → signal-client.node.test.ts} +9 -8
- package/src/signal-client/{signal-rpc-client.test.ts → signal-rpc-client.node.test.ts} +8 -13
- package/src/signal-manager/{edge-signal-manager.test.ts → edge-signal-manager.node.test.ts} +14 -7
- package/src/signal-manager/edge-signal-manager.ts +5 -6
- package/src/signal-manager/{websocket-signal-manager.test.ts → websocket-signal-manager.node.test.ts} +12 -30
- package/src/testing/test-builder.ts +3 -6
- package/src/testing/test-peer.ts +5 -7
- package/dist/types/src/messenger.test.d.ts +0 -2
- package/dist/types/src/messenger.test.d.ts.map +0 -1
- package/dist/types/src/signal-client/signal-client.test.d.ts +0 -2
- package/dist/types/src/signal-client/signal-client.test.d.ts.map +0 -1
- package/dist/types/src/signal-client/signal-rpc-client.test.d.ts +0 -2
- package/dist/types/src/signal-client/signal-rpc-client.test.d.ts.map +0 -1
- package/dist/types/src/signal-manager/edge-signal-manager.test.d.ts +0 -2
- package/dist/types/src/signal-manager/edge-signal-manager.test.d.ts.map +0 -1
- package/dist/types/src/signal-manager/websocket-signal-manager.test.d.ts +0 -2
- package/dist/types/src/signal-manager/websocket-signal-manager.test.d.ts.map +0 -1
|
@@ -0,0 +1,2299 @@
|
|
|
1
|
+
import "@dxos/node-std/globals";
|
|
2
|
+
|
|
3
|
+
// inject-globals:@inject-globals
|
|
4
|
+
import {
|
|
5
|
+
global,
|
|
6
|
+
Buffer,
|
|
7
|
+
process
|
|
8
|
+
} from "@dxos/node-std/inject-globals";
|
|
9
|
+
|
|
10
|
+
// packages/core/mesh/messaging/src/messenger.ts
|
|
11
|
+
import { TimeoutError, scheduleExponentialBackoffTaskInterval, scheduleTask, scheduleTaskInterval } from "@dxos/async";
|
|
12
|
+
import { Context } from "@dxos/context";
|
|
13
|
+
import { invariant } from "@dxos/invariant";
|
|
14
|
+
import { PublicKey } from "@dxos/keys";
|
|
15
|
+
import { log } from "@dxos/log";
|
|
16
|
+
import { TimeoutError as ProtocolTimeoutError, trace as trace2 } from "@dxos/protocols";
|
|
17
|
+
import { schema } from "@dxos/protocols/proto";
|
|
18
|
+
import { ComplexMap, ComplexSet } from "@dxos/util";
|
|
19
|
+
|
|
20
|
+
// packages/core/mesh/messaging/src/messenger-monitor.ts
|
|
21
|
+
import { trace } from "@dxos/tracing";
|
|
22
|
+
var MessengerMonitor = class {
|
|
23
|
+
recordMessageAckFailed() {
|
|
24
|
+
trace.metrics.increment("dxos.mesh.signal.messenger.failed-ack", 1);
|
|
25
|
+
}
|
|
26
|
+
recordReliableMessage(params) {
|
|
27
|
+
trace.metrics.increment("dxos.mesh.signal.messenger.reliable-send", 1, {
|
|
28
|
+
tags: {
|
|
29
|
+
success: params.sent,
|
|
30
|
+
attempts: params.sendAttempts
|
|
31
|
+
}
|
|
32
|
+
});
|
|
33
|
+
}
|
|
34
|
+
};
|
|
35
|
+
|
|
36
|
+
// packages/core/mesh/messaging/src/timeouts.ts
|
|
37
|
+
var MESSAGE_TIMEOUT = 1e4;
|
|
38
|
+
|
|
39
|
+
// packages/core/mesh/messaging/src/messenger.ts
|
|
40
|
+
var __dxlog_file = "/home/runner/work/dxos/dxos/packages/core/mesh/messaging/src/messenger.ts";
|
|
41
|
+
var ReliablePayload = schema.getCodecForType("dxos.mesh.messaging.ReliablePayload");
|
|
42
|
+
var Acknowledgement = schema.getCodecForType("dxos.mesh.messaging.Acknowledgement");
|
|
43
|
+
var RECEIVED_MESSAGES_GC_INTERVAL = 12e4;
|
|
44
|
+
var Messenger = class {
|
|
45
|
+
constructor({ signalManager, retryDelay = 1e3 }) {
|
|
46
|
+
this._monitor = new MessengerMonitor();
|
|
47
|
+
// { peerId, payloadType } => listeners set
|
|
48
|
+
this._listeners = new ComplexMap(({ peerId, payloadType }) => peerId + payloadType);
|
|
49
|
+
// peerId => listeners set
|
|
50
|
+
this._defaultListeners = /* @__PURE__ */ new Map();
|
|
51
|
+
this._onAckCallbacks = new ComplexMap(PublicKey.hash);
|
|
52
|
+
this._receivedMessages = new ComplexSet(PublicKey.hash);
|
|
53
|
+
/**
|
|
54
|
+
* Keys scheduled to be cleared from _receivedMessages on the next iteration.
|
|
55
|
+
*/
|
|
56
|
+
this._toClear = new ComplexSet(PublicKey.hash);
|
|
57
|
+
this._closed = true;
|
|
58
|
+
this._signalManager = signalManager;
|
|
59
|
+
this._retryDelay = retryDelay;
|
|
60
|
+
this.open();
|
|
61
|
+
}
|
|
62
|
+
open() {
|
|
63
|
+
if (!this._closed) {
|
|
64
|
+
return;
|
|
65
|
+
}
|
|
66
|
+
const traceId = PublicKey.random().toHex();
|
|
67
|
+
log.trace("dxos.mesh.messenger.open", trace2.begin({
|
|
68
|
+
id: traceId
|
|
69
|
+
}), {
|
|
70
|
+
F: __dxlog_file,
|
|
71
|
+
L: 72,
|
|
72
|
+
S: this,
|
|
73
|
+
C: (f, a) => f(...a)
|
|
74
|
+
});
|
|
75
|
+
this._ctx = new Context({
|
|
76
|
+
onError: (err) => log.catch(err, void 0, {
|
|
77
|
+
F: __dxlog_file,
|
|
78
|
+
L: 74,
|
|
79
|
+
S: this,
|
|
80
|
+
C: (f, a) => f(...a)
|
|
81
|
+
})
|
|
82
|
+
}, {
|
|
83
|
+
F: __dxlog_file,
|
|
84
|
+
L: 73
|
|
85
|
+
});
|
|
86
|
+
this._ctx.onDispose(this._signalManager.onMessage.on(async (message) => {
|
|
87
|
+
log("received message", {
|
|
88
|
+
from: message.author
|
|
89
|
+
}, {
|
|
90
|
+
F: __dxlog_file,
|
|
91
|
+
L: 78,
|
|
92
|
+
S: this,
|
|
93
|
+
C: (f, a) => f(...a)
|
|
94
|
+
});
|
|
95
|
+
await this._handleMessage(message);
|
|
96
|
+
}));
|
|
97
|
+
scheduleTaskInterval(this._ctx, async () => {
|
|
98
|
+
this._performGc();
|
|
99
|
+
}, RECEIVED_MESSAGES_GC_INTERVAL);
|
|
100
|
+
this._closed = false;
|
|
101
|
+
log.trace("dxos.mesh.messenger.open", trace2.end({
|
|
102
|
+
id: traceId
|
|
103
|
+
}), {
|
|
104
|
+
F: __dxlog_file,
|
|
105
|
+
L: 93,
|
|
106
|
+
S: this,
|
|
107
|
+
C: (f, a) => f(...a)
|
|
108
|
+
});
|
|
109
|
+
}
|
|
110
|
+
async close() {
|
|
111
|
+
if (this._closed) {
|
|
112
|
+
return;
|
|
113
|
+
}
|
|
114
|
+
this._closed = true;
|
|
115
|
+
await this._ctx.dispose();
|
|
116
|
+
}
|
|
117
|
+
async sendMessage({ author, recipient, payload }) {
|
|
118
|
+
invariant(!this._closed, "Closed", {
|
|
119
|
+
F: __dxlog_file,
|
|
120
|
+
L: 105,
|
|
121
|
+
S: this,
|
|
122
|
+
A: [
|
|
123
|
+
"!this._closed",
|
|
124
|
+
"'Closed'"
|
|
125
|
+
]
|
|
126
|
+
});
|
|
127
|
+
const messageContext = this._ctx.derive();
|
|
128
|
+
const reliablePayload = {
|
|
129
|
+
messageId: PublicKey.random(),
|
|
130
|
+
payload
|
|
131
|
+
};
|
|
132
|
+
invariant(!this._onAckCallbacks.has(reliablePayload.messageId), void 0, {
|
|
133
|
+
F: __dxlog_file,
|
|
134
|
+
L: 112,
|
|
135
|
+
S: this,
|
|
136
|
+
A: [
|
|
137
|
+
"!this._onAckCallbacks.has(reliablePayload.messageId!)",
|
|
138
|
+
""
|
|
139
|
+
]
|
|
140
|
+
});
|
|
141
|
+
log("send message", {
|
|
142
|
+
messageId: reliablePayload.messageId,
|
|
143
|
+
author,
|
|
144
|
+
recipient
|
|
145
|
+
}, {
|
|
146
|
+
F: __dxlog_file,
|
|
147
|
+
L: 113,
|
|
148
|
+
S: this,
|
|
149
|
+
C: (f, a) => f(...a)
|
|
150
|
+
});
|
|
151
|
+
let messageReceived;
|
|
152
|
+
let timeoutHit;
|
|
153
|
+
let sendAttempts = 0;
|
|
154
|
+
const promise = new Promise((resolve, reject) => {
|
|
155
|
+
messageReceived = resolve;
|
|
156
|
+
timeoutHit = reject;
|
|
157
|
+
});
|
|
158
|
+
scheduleExponentialBackoffTaskInterval(messageContext, async () => {
|
|
159
|
+
log("retrying message", {
|
|
160
|
+
messageId: reliablePayload.messageId
|
|
161
|
+
}, {
|
|
162
|
+
F: __dxlog_file,
|
|
163
|
+
L: 128,
|
|
164
|
+
S: this,
|
|
165
|
+
C: (f, a) => f(...a)
|
|
166
|
+
});
|
|
167
|
+
sendAttempts++;
|
|
168
|
+
await this._encodeAndSend({
|
|
169
|
+
author,
|
|
170
|
+
recipient,
|
|
171
|
+
reliablePayload
|
|
172
|
+
}).catch((err) => log("failed to send message", {
|
|
173
|
+
err
|
|
174
|
+
}, {
|
|
175
|
+
F: __dxlog_file,
|
|
176
|
+
L: 131,
|
|
177
|
+
S: this,
|
|
178
|
+
C: (f, a) => f(...a)
|
|
179
|
+
}));
|
|
180
|
+
}, this._retryDelay);
|
|
181
|
+
scheduleTask(messageContext, () => {
|
|
182
|
+
log("message not delivered", {
|
|
183
|
+
messageId: reliablePayload.messageId
|
|
184
|
+
}, {
|
|
185
|
+
F: __dxlog_file,
|
|
186
|
+
L: 140,
|
|
187
|
+
S: this,
|
|
188
|
+
C: (f, a) => f(...a)
|
|
189
|
+
});
|
|
190
|
+
this._onAckCallbacks.delete(reliablePayload.messageId);
|
|
191
|
+
timeoutHit(new ProtocolTimeoutError("signaling message not delivered", new TimeoutError(MESSAGE_TIMEOUT, "Message not delivered")));
|
|
192
|
+
void messageContext.dispose();
|
|
193
|
+
this._monitor.recordReliableMessage({
|
|
194
|
+
sendAttempts,
|
|
195
|
+
sent: false
|
|
196
|
+
});
|
|
197
|
+
}, MESSAGE_TIMEOUT);
|
|
198
|
+
this._onAckCallbacks.set(reliablePayload.messageId, () => {
|
|
199
|
+
messageReceived();
|
|
200
|
+
this._onAckCallbacks.delete(reliablePayload.messageId);
|
|
201
|
+
void messageContext.dispose();
|
|
202
|
+
this._monitor.recordReliableMessage({
|
|
203
|
+
sendAttempts,
|
|
204
|
+
sent: true
|
|
205
|
+
});
|
|
206
|
+
});
|
|
207
|
+
await this._encodeAndSend({
|
|
208
|
+
author,
|
|
209
|
+
recipient,
|
|
210
|
+
reliablePayload
|
|
211
|
+
});
|
|
212
|
+
return promise;
|
|
213
|
+
}
|
|
214
|
+
/**
|
|
215
|
+
* Subscribes onMessage function to messages that contains payload with payloadType.
|
|
216
|
+
* @param payloadType if not specified, onMessage will be subscribed to all types of messages.
|
|
217
|
+
*/
|
|
218
|
+
async listen({ peer, payloadType, onMessage }) {
|
|
219
|
+
invariant(!this._closed, "Closed", {
|
|
220
|
+
F: __dxlog_file,
|
|
221
|
+
L: 178,
|
|
222
|
+
S: this,
|
|
223
|
+
A: [
|
|
224
|
+
"!this._closed",
|
|
225
|
+
"'Closed'"
|
|
226
|
+
]
|
|
227
|
+
});
|
|
228
|
+
await this._signalManager.subscribeMessages(peer);
|
|
229
|
+
let listeners;
|
|
230
|
+
invariant(peer.peerKey, "Peer key is required", {
|
|
231
|
+
F: __dxlog_file,
|
|
232
|
+
L: 182,
|
|
233
|
+
S: this,
|
|
234
|
+
A: [
|
|
235
|
+
"peer.peerKey",
|
|
236
|
+
"'Peer key is required'"
|
|
237
|
+
]
|
|
238
|
+
});
|
|
239
|
+
if (!payloadType) {
|
|
240
|
+
listeners = this._defaultListeners.get(peer.peerKey);
|
|
241
|
+
if (!listeners) {
|
|
242
|
+
listeners = /* @__PURE__ */ new Set();
|
|
243
|
+
this._defaultListeners.set(peer.peerKey, listeners);
|
|
244
|
+
}
|
|
245
|
+
} else {
|
|
246
|
+
listeners = this._listeners.get({
|
|
247
|
+
peerId: peer.peerKey,
|
|
248
|
+
payloadType
|
|
249
|
+
});
|
|
250
|
+
if (!listeners) {
|
|
251
|
+
listeners = /* @__PURE__ */ new Set();
|
|
252
|
+
this._listeners.set({
|
|
253
|
+
peerId: peer.peerKey,
|
|
254
|
+
payloadType
|
|
255
|
+
}, listeners);
|
|
256
|
+
}
|
|
257
|
+
}
|
|
258
|
+
listeners.add(onMessage);
|
|
259
|
+
return {
|
|
260
|
+
unsubscribe: async () => {
|
|
261
|
+
listeners.delete(onMessage);
|
|
262
|
+
}
|
|
263
|
+
};
|
|
264
|
+
}
|
|
265
|
+
async _encodeAndSend({ author, recipient, reliablePayload }) {
|
|
266
|
+
await this._signalManager.sendMessage({
|
|
267
|
+
author,
|
|
268
|
+
recipient,
|
|
269
|
+
payload: {
|
|
270
|
+
type_url: "dxos.mesh.messaging.ReliablePayload",
|
|
271
|
+
value: ReliablePayload.encode(reliablePayload, {
|
|
272
|
+
preserveAny: true
|
|
273
|
+
})
|
|
274
|
+
}
|
|
275
|
+
});
|
|
276
|
+
}
|
|
277
|
+
async _handleMessage(message) {
|
|
278
|
+
switch (message.payload.type_url) {
|
|
279
|
+
case "dxos.mesh.messaging.ReliablePayload": {
|
|
280
|
+
await this._handleReliablePayload(message);
|
|
281
|
+
break;
|
|
282
|
+
}
|
|
283
|
+
case "dxos.mesh.messaging.Acknowledgement": {
|
|
284
|
+
await this._handleAcknowledgement({
|
|
285
|
+
payload: message.payload
|
|
286
|
+
});
|
|
287
|
+
break;
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
async _handleReliablePayload({ author, recipient, payload }) {
|
|
292
|
+
invariant(payload.type_url === "dxos.mesh.messaging.ReliablePayload", void 0, {
|
|
293
|
+
F: __dxlog_file,
|
|
294
|
+
L: 240,
|
|
295
|
+
S: this,
|
|
296
|
+
A: [
|
|
297
|
+
"payload.type_url === 'dxos.mesh.messaging.ReliablePayload'",
|
|
298
|
+
""
|
|
299
|
+
]
|
|
300
|
+
});
|
|
301
|
+
const reliablePayload = ReliablePayload.decode(payload.value, {
|
|
302
|
+
preserveAny: true
|
|
303
|
+
});
|
|
304
|
+
log("handling message", {
|
|
305
|
+
messageId: reliablePayload.messageId
|
|
306
|
+
}, {
|
|
307
|
+
F: __dxlog_file,
|
|
308
|
+
L: 243,
|
|
309
|
+
S: this,
|
|
310
|
+
C: (f, a) => f(...a)
|
|
311
|
+
});
|
|
312
|
+
try {
|
|
313
|
+
await this._sendAcknowledgement({
|
|
314
|
+
author,
|
|
315
|
+
recipient,
|
|
316
|
+
messageId: reliablePayload.messageId
|
|
317
|
+
});
|
|
318
|
+
} catch (err) {
|
|
319
|
+
this._monitor.recordMessageAckFailed();
|
|
320
|
+
throw err;
|
|
321
|
+
}
|
|
322
|
+
if (this._receivedMessages.has(reliablePayload.messageId)) {
|
|
323
|
+
return;
|
|
324
|
+
}
|
|
325
|
+
this._receivedMessages.add(reliablePayload.messageId);
|
|
326
|
+
await this._callListeners({
|
|
327
|
+
author,
|
|
328
|
+
recipient,
|
|
329
|
+
payload: reliablePayload.payload
|
|
330
|
+
});
|
|
331
|
+
}
|
|
332
|
+
async _handleAcknowledgement({ payload }) {
|
|
333
|
+
invariant(payload.type_url === "dxos.mesh.messaging.Acknowledgement", void 0, {
|
|
334
|
+
F: __dxlog_file,
|
|
335
|
+
L: 271,
|
|
336
|
+
S: this,
|
|
337
|
+
A: [
|
|
338
|
+
"payload.type_url === 'dxos.mesh.messaging.Acknowledgement'",
|
|
339
|
+
""
|
|
340
|
+
]
|
|
341
|
+
});
|
|
342
|
+
this._onAckCallbacks.get(Acknowledgement.decode(payload.value).messageId)?.();
|
|
343
|
+
}
|
|
344
|
+
async _sendAcknowledgement({ author, recipient, messageId }) {
|
|
345
|
+
log("sending ACK", {
|
|
346
|
+
messageId,
|
|
347
|
+
from: recipient,
|
|
348
|
+
to: author
|
|
349
|
+
}, {
|
|
350
|
+
F: __dxlog_file,
|
|
351
|
+
L: 284,
|
|
352
|
+
S: this,
|
|
353
|
+
C: (f, a) => f(...a)
|
|
354
|
+
});
|
|
355
|
+
await this._signalManager.sendMessage({
|
|
356
|
+
author: recipient,
|
|
357
|
+
recipient: author,
|
|
358
|
+
payload: {
|
|
359
|
+
type_url: "dxos.mesh.messaging.Acknowledgement",
|
|
360
|
+
value: Acknowledgement.encode({
|
|
361
|
+
messageId
|
|
362
|
+
})
|
|
363
|
+
}
|
|
364
|
+
});
|
|
365
|
+
}
|
|
366
|
+
async _callListeners(message) {
|
|
367
|
+
{
|
|
368
|
+
invariant(message.recipient.peerKey, "Peer key is required", {
|
|
369
|
+
F: __dxlog_file,
|
|
370
|
+
L: 298,
|
|
371
|
+
S: this,
|
|
372
|
+
A: [
|
|
373
|
+
"message.recipient.peerKey",
|
|
374
|
+
"'Peer key is required'"
|
|
375
|
+
]
|
|
376
|
+
});
|
|
377
|
+
const defaultListenerMap = this._defaultListeners.get(message.recipient.peerKey);
|
|
378
|
+
if (defaultListenerMap) {
|
|
379
|
+
for (const listener of defaultListenerMap) {
|
|
380
|
+
await listener(message);
|
|
381
|
+
}
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
{
|
|
385
|
+
const listenerMap = this._listeners.get({
|
|
386
|
+
peerId: message.recipient.peerKey,
|
|
387
|
+
payloadType: message.payload.type_url
|
|
388
|
+
});
|
|
389
|
+
if (listenerMap) {
|
|
390
|
+
for (const listener of listenerMap) {
|
|
391
|
+
await listener(message);
|
|
392
|
+
}
|
|
393
|
+
}
|
|
394
|
+
}
|
|
395
|
+
}
|
|
396
|
+
_performGc() {
|
|
397
|
+
const start = performance.now();
|
|
398
|
+
for (const key of this._toClear.keys()) {
|
|
399
|
+
this._receivedMessages.delete(key);
|
|
400
|
+
}
|
|
401
|
+
this._toClear.clear();
|
|
402
|
+
for (const key of this._receivedMessages.keys()) {
|
|
403
|
+
this._toClear.add(key);
|
|
404
|
+
}
|
|
405
|
+
const elapsed = performance.now() - start;
|
|
406
|
+
if (elapsed > 100) {
|
|
407
|
+
log.warn("GC took too long", {
|
|
408
|
+
elapsed
|
|
409
|
+
}, {
|
|
410
|
+
F: __dxlog_file,
|
|
411
|
+
L: 333,
|
|
412
|
+
S: this,
|
|
413
|
+
C: (f, a) => f(...a)
|
|
414
|
+
});
|
|
415
|
+
}
|
|
416
|
+
}
|
|
417
|
+
};
|
|
418
|
+
|
|
419
|
+
// packages/core/mesh/messaging/src/signal-client/signal-client.ts
|
|
420
|
+
import { DeferredTask, Event as Event2, Trigger as Trigger2, scheduleTask as scheduleTask2, scheduleTaskInterval as scheduleTaskInterval3, sleep } from "@dxos/async";
|
|
421
|
+
import { cancelWithContext as cancelWithContext2, Resource } from "@dxos/context";
|
|
422
|
+
import { invariant as invariant3 } from "@dxos/invariant";
|
|
423
|
+
import { PublicKey as PublicKey4 } from "@dxos/keys";
|
|
424
|
+
import { log as log4 } from "@dxos/log";
|
|
425
|
+
import { trace as trace6 } from "@dxos/protocols";
|
|
426
|
+
import { SignalState } from "@dxos/protocols/proto/dxos/mesh/signal";
|
|
427
|
+
|
|
428
|
+
// packages/core/mesh/messaging/src/signal-client/signal-client-monitor.ts
|
|
429
|
+
import { trace as trace3 } from "@dxos/tracing";
|
|
430
|
+
var SignalClientMonitor = class {
|
|
431
|
+
constructor() {
|
|
432
|
+
this._performance = {
|
|
433
|
+
sentMessages: 0,
|
|
434
|
+
receivedMessages: 0,
|
|
435
|
+
reconnectCounter: 0,
|
|
436
|
+
joinCounter: 0,
|
|
437
|
+
leaveCounter: 0
|
|
438
|
+
};
|
|
439
|
+
/**
|
|
440
|
+
* Timestamp of when the connection attempt was began.
|
|
441
|
+
*/
|
|
442
|
+
this._connectionStarted = /* @__PURE__ */ new Date();
|
|
443
|
+
/**
|
|
444
|
+
* Timestamp of last state change.
|
|
445
|
+
*/
|
|
446
|
+
this._lastStateChange = /* @__PURE__ */ new Date();
|
|
447
|
+
}
|
|
448
|
+
getRecordedTimestamps() {
|
|
449
|
+
return {
|
|
450
|
+
connectionStarted: this._connectionStarted,
|
|
451
|
+
lastStateChange: this._lastStateChange
|
|
452
|
+
};
|
|
453
|
+
}
|
|
454
|
+
recordStateChangeTime() {
|
|
455
|
+
this._lastStateChange = /* @__PURE__ */ new Date();
|
|
456
|
+
}
|
|
457
|
+
recordConnectionStartTime() {
|
|
458
|
+
this._connectionStarted = /* @__PURE__ */ new Date();
|
|
459
|
+
}
|
|
460
|
+
recordReconnect(params) {
|
|
461
|
+
this._performance.reconnectCounter++;
|
|
462
|
+
trace3.metrics.increment("dxos.mesh.signal.signal-client.reconnect", 1, {
|
|
463
|
+
tags: {
|
|
464
|
+
success: params.success
|
|
465
|
+
}
|
|
466
|
+
});
|
|
467
|
+
}
|
|
468
|
+
recordJoin() {
|
|
469
|
+
this._performance.joinCounter++;
|
|
470
|
+
}
|
|
471
|
+
recordLeave() {
|
|
472
|
+
this._performance.leaveCounter++;
|
|
473
|
+
}
|
|
474
|
+
recordMessageReceived(message) {
|
|
475
|
+
this._performance.receivedMessages++;
|
|
476
|
+
trace3.metrics.increment("dxos.mesh.signal.signal-client.received-total", 1, {
|
|
477
|
+
tags: createIdentityTags(message)
|
|
478
|
+
});
|
|
479
|
+
trace3.metrics.distribution("dxos.mesh.signal.signal-client.bytes-in", getByteCount(message), {
|
|
480
|
+
tags: createIdentityTags(message)
|
|
481
|
+
});
|
|
482
|
+
}
|
|
483
|
+
async recordMessageSending(message, sendMessage) {
|
|
484
|
+
this._performance.sentMessages++;
|
|
485
|
+
const tags = createIdentityTags(message);
|
|
486
|
+
let success = true;
|
|
487
|
+
try {
|
|
488
|
+
const reqStart = Date.now();
|
|
489
|
+
await sendMessage();
|
|
490
|
+
const reqDuration = Date.now() - reqStart;
|
|
491
|
+
trace3.metrics.distribution("dxos.mesh.signal.signal-client.send-duration", reqDuration, {
|
|
492
|
+
tags
|
|
493
|
+
});
|
|
494
|
+
trace3.metrics.distribution("dxos.mesh.signal.signal-client.bytes-out", getByteCount(message), {
|
|
495
|
+
tags
|
|
496
|
+
});
|
|
497
|
+
} catch (err) {
|
|
498
|
+
success = false;
|
|
499
|
+
}
|
|
500
|
+
trace3.metrics.increment("dxos.mesh.signal.signal-client.sent-total", 1, {
|
|
501
|
+
tags: {
|
|
502
|
+
...tags,
|
|
503
|
+
success
|
|
504
|
+
}
|
|
505
|
+
});
|
|
506
|
+
}
|
|
507
|
+
recordStreamCloseErrors(count) {
|
|
508
|
+
trace3.metrics.increment("dxos.mesh.signal.signal-client.stream-close-errors", count);
|
|
509
|
+
}
|
|
510
|
+
recordReconciliation(params) {
|
|
511
|
+
trace3.metrics.increment("dxos.mesh.signal.signal-client.reconciliation", 1, {
|
|
512
|
+
tags: {
|
|
513
|
+
success: params.success
|
|
514
|
+
}
|
|
515
|
+
});
|
|
516
|
+
}
|
|
517
|
+
};
|
|
518
|
+
var getByteCount = (message) => {
|
|
519
|
+
return message.author.peerKey.length + message.recipient.peerKey.length + message.payload.type_url.length + message.payload.value.length;
|
|
520
|
+
};
|
|
521
|
+
var createIdentityTags = (message) => {
|
|
522
|
+
return {
|
|
523
|
+
peer: message.author.peerKey
|
|
524
|
+
};
|
|
525
|
+
};
|
|
526
|
+
|
|
527
|
+
// packages/core/mesh/messaging/src/signal-client/signal-local-state.ts
|
|
528
|
+
import { asyncTimeout, Event } from "@dxos/async";
|
|
529
|
+
import { cancelWithContext } from "@dxos/context";
|
|
530
|
+
import { PublicKey as PublicKey2 } from "@dxos/keys";
|
|
531
|
+
import { log as log2 } from "@dxos/log";
|
|
532
|
+
import { ComplexMap as ComplexMap2, ComplexSet as ComplexSet2, safeAwaitAll } from "@dxos/util";
|
|
533
|
+
var __dxlog_file2 = "/home/runner/work/dxos/dxos/packages/core/mesh/messaging/src/signal-client/signal-local-state.ts";
|
|
534
|
+
var SignalLocalState = class {
|
|
535
|
+
constructor(_onMessage, _onSwarmEvent) {
|
|
536
|
+
this._onMessage = _onMessage;
|
|
537
|
+
this._onSwarmEvent = _onSwarmEvent;
|
|
538
|
+
this._swarmStreams = new ComplexMap2(({ topic, peerId }) => topic.toHex() + peerId.toHex());
|
|
539
|
+
this._joinedTopics = new ComplexSet2(({ topic, peerId }) => topic.toHex() + peerId.toHex());
|
|
540
|
+
this._subscribedMessages = new ComplexSet2(({ peerId }) => peerId.toHex());
|
|
541
|
+
this.messageStreams = new ComplexMap2((key) => key.toHex());
|
|
542
|
+
this.reconciled = new Event();
|
|
543
|
+
}
|
|
544
|
+
async safeCloseStreams() {
|
|
545
|
+
const streams = [
|
|
546
|
+
...this._swarmStreams.values()
|
|
547
|
+
].concat([
|
|
548
|
+
...this.messageStreams.values()
|
|
549
|
+
]);
|
|
550
|
+
this._swarmStreams.clear();
|
|
551
|
+
this.messageStreams.clear();
|
|
552
|
+
const failureCount = (await safeAwaitAll(streams, (s) => s.close())).length;
|
|
553
|
+
return {
|
|
554
|
+
failureCount
|
|
555
|
+
};
|
|
556
|
+
}
|
|
557
|
+
join({ topic, peerId }) {
|
|
558
|
+
this._joinedTopics.add({
|
|
559
|
+
topic,
|
|
560
|
+
peerId
|
|
561
|
+
});
|
|
562
|
+
}
|
|
563
|
+
leave({ topic, peerId }) {
|
|
564
|
+
void this._swarmStreams.get({
|
|
565
|
+
topic,
|
|
566
|
+
peerId
|
|
567
|
+
})?.close();
|
|
568
|
+
this._swarmStreams.delete({
|
|
569
|
+
topic,
|
|
570
|
+
peerId
|
|
571
|
+
});
|
|
572
|
+
this._joinedTopics.delete({
|
|
573
|
+
topic,
|
|
574
|
+
peerId
|
|
575
|
+
});
|
|
576
|
+
}
|
|
577
|
+
subscribeMessages(peerId) {
|
|
578
|
+
this._subscribedMessages.add({
|
|
579
|
+
peerId
|
|
580
|
+
});
|
|
581
|
+
}
|
|
582
|
+
unsubscribeMessages(peerId) {
|
|
583
|
+
log2("unsubscribing from messages", {
|
|
584
|
+
peerId
|
|
585
|
+
}, {
|
|
586
|
+
F: __dxlog_file2,
|
|
587
|
+
L: 79,
|
|
588
|
+
S: this,
|
|
589
|
+
C: (f, a) => f(...a)
|
|
590
|
+
});
|
|
591
|
+
this._subscribedMessages.delete({
|
|
592
|
+
peerId
|
|
593
|
+
});
|
|
594
|
+
void this.messageStreams.get(peerId)?.close();
|
|
595
|
+
this.messageStreams.delete(peerId);
|
|
596
|
+
}
|
|
597
|
+
async reconcile(ctx, client) {
|
|
598
|
+
await this._reconcileSwarmSubscriptions(ctx, client);
|
|
599
|
+
await this._reconcileMessageSubscriptions(ctx, client);
|
|
600
|
+
this.reconciled.emit();
|
|
601
|
+
}
|
|
602
|
+
async _reconcileSwarmSubscriptions(ctx, client) {
|
|
603
|
+
for (const { topic, peerId } of this._swarmStreams.keys()) {
|
|
604
|
+
if (this._joinedTopics.has({
|
|
605
|
+
topic,
|
|
606
|
+
peerId
|
|
607
|
+
})) {
|
|
608
|
+
continue;
|
|
609
|
+
}
|
|
610
|
+
void this._swarmStreams.get({
|
|
611
|
+
topic,
|
|
612
|
+
peerId
|
|
613
|
+
})?.close();
|
|
614
|
+
this._swarmStreams.delete({
|
|
615
|
+
topic,
|
|
616
|
+
peerId
|
|
617
|
+
});
|
|
618
|
+
}
|
|
619
|
+
for (const { topic, peerId } of this._joinedTopics.values()) {
|
|
620
|
+
if (this._swarmStreams.has({
|
|
621
|
+
topic,
|
|
622
|
+
peerId
|
|
623
|
+
})) {
|
|
624
|
+
continue;
|
|
625
|
+
}
|
|
626
|
+
const swarmStream = await asyncTimeout(cancelWithContext(ctx, client.join({
|
|
627
|
+
topic,
|
|
628
|
+
peerId
|
|
629
|
+
})), 5e3);
|
|
630
|
+
swarmStream.subscribe(async (swarmEvent) => {
|
|
631
|
+
if (this._joinedTopics.has({
|
|
632
|
+
topic,
|
|
633
|
+
peerId
|
|
634
|
+
})) {
|
|
635
|
+
log2("swarm event", {
|
|
636
|
+
swarmEvent
|
|
637
|
+
}, {
|
|
638
|
+
F: __dxlog_file2,
|
|
639
|
+
L: 115,
|
|
640
|
+
S: this,
|
|
641
|
+
C: (f, a) => f(...a)
|
|
642
|
+
});
|
|
643
|
+
const event = swarmEvent.peerAvailable ? {
|
|
644
|
+
topic,
|
|
645
|
+
peerAvailable: {
|
|
646
|
+
...swarmEvent.peerAvailable,
|
|
647
|
+
peer: {
|
|
648
|
+
peerKey: PublicKey2.from(swarmEvent.peerAvailable.peer).toHex()
|
|
649
|
+
}
|
|
650
|
+
}
|
|
651
|
+
} : {
|
|
652
|
+
topic,
|
|
653
|
+
peerLeft: {
|
|
654
|
+
...swarmEvent.peerLeft,
|
|
655
|
+
peer: {
|
|
656
|
+
peerKey: PublicKey2.from(swarmEvent.peerLeft.peer).toHex()
|
|
657
|
+
}
|
|
658
|
+
}
|
|
659
|
+
};
|
|
660
|
+
await this._onSwarmEvent(event);
|
|
661
|
+
}
|
|
662
|
+
});
|
|
663
|
+
this._swarmStreams.set({
|
|
664
|
+
topic,
|
|
665
|
+
peerId
|
|
666
|
+
}, swarmStream);
|
|
667
|
+
}
|
|
668
|
+
}
|
|
669
|
+
async _reconcileMessageSubscriptions(ctx, client) {
|
|
670
|
+
for (const peerId of this.messageStreams.keys()) {
|
|
671
|
+
if (this._subscribedMessages.has({
|
|
672
|
+
peerId
|
|
673
|
+
})) {
|
|
674
|
+
continue;
|
|
675
|
+
}
|
|
676
|
+
void this.messageStreams.get(peerId)?.close();
|
|
677
|
+
this.messageStreams.delete(peerId);
|
|
678
|
+
}
|
|
679
|
+
for (const { peerId } of this._subscribedMessages.values()) {
|
|
680
|
+
if (this.messageStreams.has(peerId)) {
|
|
681
|
+
continue;
|
|
682
|
+
}
|
|
683
|
+
const messageStream = await asyncTimeout(cancelWithContext(ctx, client.receiveMessages(peerId)), 5e3);
|
|
684
|
+
messageStream.subscribe(async (signalMessage) => {
|
|
685
|
+
if (this._subscribedMessages.has({
|
|
686
|
+
peerId
|
|
687
|
+
})) {
|
|
688
|
+
const message = {
|
|
689
|
+
author: {
|
|
690
|
+
peerKey: PublicKey2.from(signalMessage.author).toHex()
|
|
691
|
+
},
|
|
692
|
+
recipient: {
|
|
693
|
+
peerKey: PublicKey2.from(signalMessage.recipient).toHex()
|
|
694
|
+
},
|
|
695
|
+
payload: signalMessage.payload
|
|
696
|
+
};
|
|
697
|
+
await this._onMessage(message);
|
|
698
|
+
}
|
|
699
|
+
});
|
|
700
|
+
this.messageStreams.set(peerId, messageStream);
|
|
701
|
+
}
|
|
702
|
+
}
|
|
703
|
+
};
|
|
704
|
+
|
|
705
|
+
// packages/core/mesh/messaging/src/signal-client/signal-rpc-client.ts
|
|
706
|
+
import WebSocket from "isomorphic-ws";
|
|
707
|
+
import { scheduleTaskInterval as scheduleTaskInterval2, TimeoutError as TimeoutError2, Trigger } from "@dxos/async";
|
|
708
|
+
import { Context as Context2 } from "@dxos/context";
|
|
709
|
+
import { invariant as invariant2 } from "@dxos/invariant";
|
|
710
|
+
import { PublicKey as PublicKey3 } from "@dxos/keys";
|
|
711
|
+
import { log as log3 } from "@dxos/log";
|
|
712
|
+
import { trace as trace5 } from "@dxos/protocols";
|
|
713
|
+
import { schema as schema2 } from "@dxos/protocols/proto";
|
|
714
|
+
import { createProtoRpcPeer } from "@dxos/rpc";
|
|
715
|
+
|
|
716
|
+
// packages/core/mesh/messaging/src/signal-client/signal-rpc-client-monitor.ts
|
|
717
|
+
import { trace as trace4 } from "@dxos/tracing";
|
|
718
|
+
var SignalRpcClientMonitor = class {
|
|
719
|
+
recordClientCloseFailure(params) {
|
|
720
|
+
trace4.metrics.increment("dxos.mesh.signal.signal-rpc-client.close-failure", 1, {
|
|
721
|
+
tags: {
|
|
722
|
+
reason: params.failureReason
|
|
723
|
+
}
|
|
724
|
+
});
|
|
725
|
+
}
|
|
726
|
+
};
|
|
727
|
+
|
|
728
|
+
// packages/core/mesh/messaging/src/signal-client/signal-rpc-client.ts
|
|
729
|
+
var __dxlog_file3 = "/home/runner/work/dxos/dxos/packages/core/mesh/messaging/src/signal-client/signal-rpc-client.ts";
|
|
730
|
+
var SIGNAL_KEEPALIVE_INTERVAL = 1e4;
|
|
731
|
+
var SignalRPCClient = class {
|
|
732
|
+
constructor({ url, callbacks = {} }) {
|
|
733
|
+
this._connectTrigger = new Trigger();
|
|
734
|
+
this._closed = false;
|
|
735
|
+
this._closeComplete = new Trigger();
|
|
736
|
+
this._monitor = new SignalRpcClientMonitor();
|
|
737
|
+
const traceId = PublicKey3.random().toHex();
|
|
738
|
+
log3.trace("dxos.mesh.signal-rpc-client.constructor", trace5.begin({
|
|
739
|
+
id: traceId
|
|
740
|
+
}), {
|
|
741
|
+
F: __dxlog_file3,
|
|
742
|
+
L: 61,
|
|
743
|
+
S: this,
|
|
744
|
+
C: (f, a) => f(...a)
|
|
745
|
+
});
|
|
746
|
+
this._url = url;
|
|
747
|
+
this._callbacks = callbacks;
|
|
748
|
+
this._socket = new WebSocket(this._url);
|
|
749
|
+
this._rpc = createProtoRpcPeer({
|
|
750
|
+
requested: {
|
|
751
|
+
Signal: schema2.getService("dxos.mesh.signal.Signal")
|
|
752
|
+
},
|
|
753
|
+
noHandshake: true,
|
|
754
|
+
port: {
|
|
755
|
+
send: (msg) => {
|
|
756
|
+
if (this._closed) {
|
|
757
|
+
return;
|
|
758
|
+
}
|
|
759
|
+
try {
|
|
760
|
+
this._socket.send(msg);
|
|
761
|
+
} catch (err) {
|
|
762
|
+
log3.warn("send error", err, {
|
|
763
|
+
F: __dxlog_file3,
|
|
764
|
+
L: 80,
|
|
765
|
+
S: this,
|
|
766
|
+
C: (f, a) => f(...a)
|
|
767
|
+
});
|
|
768
|
+
}
|
|
769
|
+
},
|
|
770
|
+
subscribe: (cb) => {
|
|
771
|
+
this._socket.onmessage = async (msg) => {
|
|
772
|
+
if (typeof Blob !== "undefined" && msg.data instanceof Blob) {
|
|
773
|
+
cb(Buffer.from(await msg.data.arrayBuffer()));
|
|
774
|
+
} else {
|
|
775
|
+
cb(msg.data);
|
|
776
|
+
}
|
|
777
|
+
};
|
|
778
|
+
}
|
|
779
|
+
},
|
|
780
|
+
encodingOptions: {
|
|
781
|
+
preserveAny: true
|
|
782
|
+
}
|
|
783
|
+
});
|
|
784
|
+
this._socket.onopen = async () => {
|
|
785
|
+
try {
|
|
786
|
+
await this._rpc.open();
|
|
787
|
+
if (this._closed) {
|
|
788
|
+
await this._safeCloseRpc();
|
|
789
|
+
return;
|
|
790
|
+
}
|
|
791
|
+
log3(`RPC open ${this._url}`, void 0, {
|
|
792
|
+
F: __dxlog_file3,
|
|
793
|
+
L: 105,
|
|
794
|
+
S: this,
|
|
795
|
+
C: (f, a) => f(...a)
|
|
796
|
+
});
|
|
797
|
+
this._callbacks.onConnected?.();
|
|
798
|
+
this._connectTrigger.wake();
|
|
799
|
+
this._keepaliveCtx = new Context2(void 0, {
|
|
800
|
+
F: __dxlog_file3,
|
|
801
|
+
L: 108
|
|
802
|
+
});
|
|
803
|
+
scheduleTaskInterval2(this._keepaliveCtx, async () => {
|
|
804
|
+
this._socket?.send("__ping__");
|
|
805
|
+
}, SIGNAL_KEEPALIVE_INTERVAL);
|
|
806
|
+
} catch (err) {
|
|
807
|
+
this._callbacks.onError?.(err);
|
|
808
|
+
this._socket.close();
|
|
809
|
+
this._closed = true;
|
|
810
|
+
}
|
|
811
|
+
};
|
|
812
|
+
this._socket.onclose = async () => {
|
|
813
|
+
log3(`Disconnected ${this._url}`, void 0, {
|
|
814
|
+
F: __dxlog_file3,
|
|
815
|
+
L: 128,
|
|
816
|
+
S: this,
|
|
817
|
+
C: (f, a) => f(...a)
|
|
818
|
+
});
|
|
819
|
+
this._callbacks.onDisconnected?.();
|
|
820
|
+
this._closeComplete.wake();
|
|
821
|
+
await this.close();
|
|
822
|
+
};
|
|
823
|
+
this._socket.onerror = async (event) => {
|
|
824
|
+
if (this._closed) {
|
|
825
|
+
this._socket.close();
|
|
826
|
+
return;
|
|
827
|
+
}
|
|
828
|
+
this._closed = true;
|
|
829
|
+
this._callbacks.onError?.(event.error ?? new Error(event.message));
|
|
830
|
+
await this._safeCloseRpc();
|
|
831
|
+
log3.warn(`Socket ${event.type ?? "unknown"} error`, {
|
|
832
|
+
message: event.message,
|
|
833
|
+
url: this._url
|
|
834
|
+
}, {
|
|
835
|
+
F: __dxlog_file3,
|
|
836
|
+
L: 144,
|
|
837
|
+
S: this,
|
|
838
|
+
C: (f, a) => f(...a)
|
|
839
|
+
});
|
|
840
|
+
};
|
|
841
|
+
log3.trace("dxos.mesh.signal-rpc-client.constructor", trace5.end({
|
|
842
|
+
id: traceId
|
|
843
|
+
}), {
|
|
844
|
+
F: __dxlog_file3,
|
|
845
|
+
L: 147,
|
|
846
|
+
S: this,
|
|
847
|
+
C: (f, a) => f(...a)
|
|
848
|
+
});
|
|
849
|
+
}
|
|
850
|
+
async close() {
|
|
851
|
+
if (this._closed) {
|
|
852
|
+
return;
|
|
853
|
+
}
|
|
854
|
+
this._closed = true;
|
|
855
|
+
await this._keepaliveCtx?.dispose();
|
|
856
|
+
try {
|
|
857
|
+
await this._safeCloseRpc();
|
|
858
|
+
if (this._socket.readyState === WebSocket.OPEN || this._socket.readyState === WebSocket.CONNECTING) {
|
|
859
|
+
this._socket.close();
|
|
860
|
+
}
|
|
861
|
+
await this._closeComplete.wait({
|
|
862
|
+
timeout: 1e3
|
|
863
|
+
});
|
|
864
|
+
} catch (err) {
|
|
865
|
+
const failureReason = err instanceof TimeoutError2 ? "timeout" : err?.constructor?.name ?? "unknown";
|
|
866
|
+
this._monitor.recordClientCloseFailure({
|
|
867
|
+
failureReason
|
|
868
|
+
});
|
|
869
|
+
}
|
|
870
|
+
}
|
|
871
|
+
async join({ topic, peerId }) {
|
|
872
|
+
log3("join", {
|
|
873
|
+
topic,
|
|
874
|
+
peerId,
|
|
875
|
+
metadata: this._callbacks?.getMetadata?.()
|
|
876
|
+
}, {
|
|
877
|
+
F: __dxlog_file3,
|
|
878
|
+
L: 173,
|
|
879
|
+
S: this,
|
|
880
|
+
C: (f, a) => f(...a)
|
|
881
|
+
});
|
|
882
|
+
invariant2(!this._closed, "SignalRPCClient is closed", {
|
|
883
|
+
F: __dxlog_file3,
|
|
884
|
+
L: 174,
|
|
885
|
+
S: this,
|
|
886
|
+
A: [
|
|
887
|
+
"!this._closed",
|
|
888
|
+
"'SignalRPCClient is closed'"
|
|
889
|
+
]
|
|
890
|
+
});
|
|
891
|
+
await this._connectTrigger.wait();
|
|
892
|
+
const swarmStream = this._rpc.rpc.Signal.join({
|
|
893
|
+
swarm: topic.asUint8Array(),
|
|
894
|
+
peer: peerId.asUint8Array(),
|
|
895
|
+
metadata: this._callbacks?.getMetadata?.()
|
|
896
|
+
});
|
|
897
|
+
await swarmStream.waitUntilReady();
|
|
898
|
+
return swarmStream;
|
|
899
|
+
}
|
|
900
|
+
async receiveMessages(peerId) {
|
|
901
|
+
log3("receiveMessages", {
|
|
902
|
+
peerId
|
|
903
|
+
}, {
|
|
904
|
+
F: __dxlog_file3,
|
|
905
|
+
L: 186,
|
|
906
|
+
S: this,
|
|
907
|
+
C: (f, a) => f(...a)
|
|
908
|
+
});
|
|
909
|
+
invariant2(!this._closed, "SignalRPCClient is closed", {
|
|
910
|
+
F: __dxlog_file3,
|
|
911
|
+
L: 187,
|
|
912
|
+
S: this,
|
|
913
|
+
A: [
|
|
914
|
+
"!this._closed",
|
|
915
|
+
"'SignalRPCClient is closed'"
|
|
916
|
+
]
|
|
917
|
+
});
|
|
918
|
+
await this._connectTrigger.wait();
|
|
919
|
+
const messageStream = this._rpc.rpc.Signal.receiveMessages({
|
|
920
|
+
peer: peerId.asUint8Array()
|
|
921
|
+
});
|
|
922
|
+
await messageStream.waitUntilReady();
|
|
923
|
+
return messageStream;
|
|
924
|
+
}
|
|
925
|
+
async sendMessage({ author, recipient, payload }) {
|
|
926
|
+
log3("sendMessage", {
|
|
927
|
+
author,
|
|
928
|
+
recipient,
|
|
929
|
+
payload,
|
|
930
|
+
metadata: this._callbacks?.getMetadata?.()
|
|
931
|
+
}, {
|
|
932
|
+
F: __dxlog_file3,
|
|
933
|
+
L: 197,
|
|
934
|
+
S: this,
|
|
935
|
+
C: (f, a) => f(...a)
|
|
936
|
+
});
|
|
937
|
+
invariant2(!this._closed, "SignalRPCClient is closed", {
|
|
938
|
+
F: __dxlog_file3,
|
|
939
|
+
L: 198,
|
|
940
|
+
S: this,
|
|
941
|
+
A: [
|
|
942
|
+
"!this._closed",
|
|
943
|
+
"'SignalRPCClient is closed'"
|
|
944
|
+
]
|
|
945
|
+
});
|
|
946
|
+
await this._connectTrigger.wait();
|
|
947
|
+
await this._rpc.rpc.Signal.sendMessage({
|
|
948
|
+
author: author.asUint8Array(),
|
|
949
|
+
recipient: recipient.asUint8Array(),
|
|
950
|
+
payload,
|
|
951
|
+
metadata: this._callbacks?.getMetadata?.()
|
|
952
|
+
});
|
|
953
|
+
}
|
|
954
|
+
async _safeCloseRpc() {
|
|
955
|
+
try {
|
|
956
|
+
this._connectTrigger.reset();
|
|
957
|
+
await this._rpc.close();
|
|
958
|
+
} catch (err) {
|
|
959
|
+
log3.catch(err, void 0, {
|
|
960
|
+
F: __dxlog_file3,
|
|
961
|
+
L: 213,
|
|
962
|
+
S: this,
|
|
963
|
+
C: (f, a) => f(...a)
|
|
964
|
+
});
|
|
965
|
+
}
|
|
966
|
+
}
|
|
967
|
+
};
|
|
968
|
+
|
|
969
|
+
// packages/core/mesh/messaging/src/signal-client/signal-client.ts
|
|
970
|
+
var __dxlog_file4 = "/home/runner/work/dxos/dxos/packages/core/mesh/messaging/src/signal-client/signal-client.ts";
|
|
971
|
+
var DEFAULT_RECONNECT_TIMEOUT = 100;
|
|
972
|
+
var MAX_RECONNECT_TIMEOUT = 5e3;
|
|
973
|
+
var ERROR_RECONCILE_DELAY = 1e3;
|
|
974
|
+
var RECONCILE_INTERVAL = 5e3;
|
|
975
|
+
var SignalClient = class extends Resource {
|
|
976
|
+
/**
|
|
977
|
+
* @param _host Signal server websocket URL.
|
|
978
|
+
* @param onMessage called when a new message is received.
|
|
979
|
+
* @param onSwarmEvent called when a new swarm event is received.
|
|
980
|
+
* @param _getMetadata signal-message metadata provider, called for every message.
|
|
981
|
+
*/
|
|
982
|
+
constructor(_host, _getMetadata) {
|
|
983
|
+
super();
|
|
984
|
+
this._host = _host;
|
|
985
|
+
this._getMetadata = _getMetadata;
|
|
986
|
+
this._monitor = new SignalClientMonitor();
|
|
987
|
+
this._state = SignalState.CLOSED;
|
|
988
|
+
this._lastReconciliationFailed = false;
|
|
989
|
+
this._clientReady = new Trigger2();
|
|
990
|
+
this._reconnectAfter = DEFAULT_RECONNECT_TIMEOUT;
|
|
991
|
+
this._instanceId = PublicKey4.random().toHex();
|
|
992
|
+
this.statusChanged = new Event2();
|
|
993
|
+
this.onMessage = new Event2();
|
|
994
|
+
this.swarmEvent = new Event2();
|
|
995
|
+
if (!this._host.startsWith("wss://") && !this._host.startsWith("ws://")) {
|
|
996
|
+
throw new Error(`Signal server requires a websocket URL. Provided: ${this._host}`);
|
|
997
|
+
}
|
|
998
|
+
this.localState = new SignalLocalState(async (message) => {
|
|
999
|
+
this._monitor.recordMessageReceived(message);
|
|
1000
|
+
this.onMessage.emit(message);
|
|
1001
|
+
}, async (event) => this.swarmEvent.emit(event));
|
|
1002
|
+
}
|
|
1003
|
+
async _open() {
|
|
1004
|
+
log4.trace("dxos.mesh.signal-client.open", trace6.begin({
|
|
1005
|
+
id: this._instanceId
|
|
1006
|
+
}), {
|
|
1007
|
+
F: __dxlog_file4,
|
|
1008
|
+
L: 92,
|
|
1009
|
+
S: this,
|
|
1010
|
+
C: (f, a) => f(...a)
|
|
1011
|
+
});
|
|
1012
|
+
if ([
|
|
1013
|
+
SignalState.CONNECTED,
|
|
1014
|
+
SignalState.CONNECTING
|
|
1015
|
+
].includes(this._state)) {
|
|
1016
|
+
return;
|
|
1017
|
+
}
|
|
1018
|
+
this._setState(SignalState.CONNECTING);
|
|
1019
|
+
this._reconcileTask = new DeferredTask(this._ctx, async () => {
|
|
1020
|
+
try {
|
|
1021
|
+
await cancelWithContext2(this._connectionCtx, this._clientReady.wait({
|
|
1022
|
+
timeout: 5e3
|
|
1023
|
+
}));
|
|
1024
|
+
invariant3(this._state === SignalState.CONNECTED, "Not connected to Signal Server", {
|
|
1025
|
+
F: __dxlog_file4,
|
|
1026
|
+
L: 102,
|
|
1027
|
+
S: this,
|
|
1028
|
+
A: [
|
|
1029
|
+
"this._state === SignalState.CONNECTED",
|
|
1030
|
+
"'Not connected to Signal Server'"
|
|
1031
|
+
]
|
|
1032
|
+
});
|
|
1033
|
+
await this.localState.reconcile(this._connectionCtx, this._client);
|
|
1034
|
+
this._monitor.recordReconciliation({
|
|
1035
|
+
success: true
|
|
1036
|
+
});
|
|
1037
|
+
this._lastReconciliationFailed = false;
|
|
1038
|
+
} catch (err) {
|
|
1039
|
+
this._lastReconciliationFailed = true;
|
|
1040
|
+
this._monitor.recordReconciliation({
|
|
1041
|
+
success: false
|
|
1042
|
+
});
|
|
1043
|
+
throw err;
|
|
1044
|
+
}
|
|
1045
|
+
});
|
|
1046
|
+
scheduleTaskInterval3(this._ctx, async () => {
|
|
1047
|
+
if (this._state === SignalState.CONNECTED) {
|
|
1048
|
+
this._reconcileTask.schedule();
|
|
1049
|
+
}
|
|
1050
|
+
}, RECONCILE_INTERVAL);
|
|
1051
|
+
this._reconnectTask = new DeferredTask(this._ctx, async () => {
|
|
1052
|
+
try {
|
|
1053
|
+
await this._reconnect();
|
|
1054
|
+
this._monitor.recordReconnect({
|
|
1055
|
+
success: true
|
|
1056
|
+
});
|
|
1057
|
+
} catch (err) {
|
|
1058
|
+
this._monitor.recordReconnect({
|
|
1059
|
+
success: false
|
|
1060
|
+
});
|
|
1061
|
+
throw err;
|
|
1062
|
+
}
|
|
1063
|
+
});
|
|
1064
|
+
this._createClient();
|
|
1065
|
+
log4.trace("dxos.mesh.signal-client.open", trace6.end({
|
|
1066
|
+
id: this._instanceId
|
|
1067
|
+
}), {
|
|
1068
|
+
F: __dxlog_file4,
|
|
1069
|
+
L: 135,
|
|
1070
|
+
S: this,
|
|
1071
|
+
C: (f, a) => f(...a)
|
|
1072
|
+
});
|
|
1073
|
+
}
|
|
1074
|
+
async _catch(err) {
|
|
1075
|
+
if (this._state === SignalState.CLOSED || this._ctx.disposed) {
|
|
1076
|
+
return;
|
|
1077
|
+
}
|
|
1078
|
+
if (this._state === SignalState.CONNECTED && !this._lastReconciliationFailed) {
|
|
1079
|
+
log4.warn("SignalClient error:", err, {
|
|
1080
|
+
F: __dxlog_file4,
|
|
1081
|
+
L: 144,
|
|
1082
|
+
S: this,
|
|
1083
|
+
C: (f, a) => f(...a)
|
|
1084
|
+
});
|
|
1085
|
+
}
|
|
1086
|
+
this._scheduleReconcileAfterError();
|
|
1087
|
+
}
|
|
1088
|
+
async _close() {
|
|
1089
|
+
log4("closing...", void 0, {
|
|
1090
|
+
F: __dxlog_file4,
|
|
1091
|
+
L: 150,
|
|
1092
|
+
S: this,
|
|
1093
|
+
C: (f, a) => f(...a)
|
|
1094
|
+
});
|
|
1095
|
+
if ([
|
|
1096
|
+
SignalState.CLOSED
|
|
1097
|
+
].includes(this._state)) {
|
|
1098
|
+
return;
|
|
1099
|
+
}
|
|
1100
|
+
this._setState(SignalState.CLOSED);
|
|
1101
|
+
await this._safeResetClient();
|
|
1102
|
+
log4("closed", void 0, {
|
|
1103
|
+
F: __dxlog_file4,
|
|
1104
|
+
L: 158,
|
|
1105
|
+
S: this,
|
|
1106
|
+
C: (f, a) => f(...a)
|
|
1107
|
+
});
|
|
1108
|
+
}
|
|
1109
|
+
getStatus() {
|
|
1110
|
+
return {
|
|
1111
|
+
host: this._host,
|
|
1112
|
+
state: this._state,
|
|
1113
|
+
error: this._lastError?.message,
|
|
1114
|
+
reconnectIn: this._reconnectAfter,
|
|
1115
|
+
...this._monitor.getRecordedTimestamps()
|
|
1116
|
+
};
|
|
1117
|
+
}
|
|
1118
|
+
async join(args) {
|
|
1119
|
+
log4("joining", {
|
|
1120
|
+
topic: args.topic,
|
|
1121
|
+
peerId: args.peer.peerKey
|
|
1122
|
+
}, {
|
|
1123
|
+
F: __dxlog_file4,
|
|
1124
|
+
L: 172,
|
|
1125
|
+
S: this,
|
|
1126
|
+
C: (f, a) => f(...a)
|
|
1127
|
+
});
|
|
1128
|
+
this._monitor.recordJoin();
|
|
1129
|
+
this.localState.join({
|
|
1130
|
+
topic: args.topic,
|
|
1131
|
+
peerId: PublicKey4.from(args.peer.peerKey)
|
|
1132
|
+
});
|
|
1133
|
+
this._reconcileTask?.schedule();
|
|
1134
|
+
}
|
|
1135
|
+
async leave(args) {
|
|
1136
|
+
log4("leaving", {
|
|
1137
|
+
topic: args.topic,
|
|
1138
|
+
peerId: args.peer.peerKey
|
|
1139
|
+
}, {
|
|
1140
|
+
F: __dxlog_file4,
|
|
1141
|
+
L: 179,
|
|
1142
|
+
S: this,
|
|
1143
|
+
C: (f, a) => f(...a)
|
|
1144
|
+
});
|
|
1145
|
+
this._monitor.recordLeave();
|
|
1146
|
+
this.localState.leave({
|
|
1147
|
+
topic: args.topic,
|
|
1148
|
+
peerId: PublicKey4.from(args.peer.peerKey)
|
|
1149
|
+
});
|
|
1150
|
+
}
|
|
1151
|
+
async sendMessage(msg) {
|
|
1152
|
+
return this._monitor.recordMessageSending(msg, async () => {
|
|
1153
|
+
await this._clientReady.wait();
|
|
1154
|
+
invariant3(this._state === SignalState.CONNECTED, "Not connected to Signal Server", {
|
|
1155
|
+
F: __dxlog_file4,
|
|
1156
|
+
L: 187,
|
|
1157
|
+
S: this,
|
|
1158
|
+
A: [
|
|
1159
|
+
"this._state === SignalState.CONNECTED",
|
|
1160
|
+
"'Not connected to Signal Server'"
|
|
1161
|
+
]
|
|
1162
|
+
});
|
|
1163
|
+
invariant3(msg.author.peerKey, "Author key required", {
|
|
1164
|
+
F: __dxlog_file4,
|
|
1165
|
+
L: 188,
|
|
1166
|
+
S: this,
|
|
1167
|
+
A: [
|
|
1168
|
+
"msg.author.peerKey",
|
|
1169
|
+
"'Author key required'"
|
|
1170
|
+
]
|
|
1171
|
+
});
|
|
1172
|
+
invariant3(msg.recipient.peerKey, "Recipient key required", {
|
|
1173
|
+
F: __dxlog_file4,
|
|
1174
|
+
L: 189,
|
|
1175
|
+
S: this,
|
|
1176
|
+
A: [
|
|
1177
|
+
"msg.recipient.peerKey",
|
|
1178
|
+
"'Recipient key required'"
|
|
1179
|
+
]
|
|
1180
|
+
});
|
|
1181
|
+
await this._client.sendMessage({
|
|
1182
|
+
author: PublicKey4.from(msg.author.peerKey),
|
|
1183
|
+
recipient: PublicKey4.from(msg.recipient.peerKey),
|
|
1184
|
+
payload: msg.payload
|
|
1185
|
+
});
|
|
1186
|
+
});
|
|
1187
|
+
}
|
|
1188
|
+
async subscribeMessages(peer) {
|
|
1189
|
+
invariant3(peer.peerKey, "Peer key required", {
|
|
1190
|
+
F: __dxlog_file4,
|
|
1191
|
+
L: 199,
|
|
1192
|
+
S: this,
|
|
1193
|
+
A: [
|
|
1194
|
+
"peer.peerKey",
|
|
1195
|
+
"'Peer key required'"
|
|
1196
|
+
]
|
|
1197
|
+
});
|
|
1198
|
+
log4("subscribing to messages", {
|
|
1199
|
+
peer
|
|
1200
|
+
}, {
|
|
1201
|
+
F: __dxlog_file4,
|
|
1202
|
+
L: 200,
|
|
1203
|
+
S: this,
|
|
1204
|
+
C: (f, a) => f(...a)
|
|
1205
|
+
});
|
|
1206
|
+
this.localState.subscribeMessages(PublicKey4.from(peer.peerKey));
|
|
1207
|
+
this._reconcileTask?.schedule();
|
|
1208
|
+
}
|
|
1209
|
+
async unsubscribeMessages(peer) {
|
|
1210
|
+
invariant3(peer.peerKey, "Peer key required", {
|
|
1211
|
+
F: __dxlog_file4,
|
|
1212
|
+
L: 206,
|
|
1213
|
+
S: this,
|
|
1214
|
+
A: [
|
|
1215
|
+
"peer.peerKey",
|
|
1216
|
+
"'Peer key required'"
|
|
1217
|
+
]
|
|
1218
|
+
});
|
|
1219
|
+
log4("unsubscribing from messages", {
|
|
1220
|
+
peer
|
|
1221
|
+
}, {
|
|
1222
|
+
F: __dxlog_file4,
|
|
1223
|
+
L: 207,
|
|
1224
|
+
S: this,
|
|
1225
|
+
C: (f, a) => f(...a)
|
|
1226
|
+
});
|
|
1227
|
+
this.localState.unsubscribeMessages(PublicKey4.from(peer.peerKey));
|
|
1228
|
+
}
|
|
1229
|
+
_scheduleReconcileAfterError() {
|
|
1230
|
+
scheduleTask2(this._ctx, () => this._reconcileTask.schedule(), ERROR_RECONCILE_DELAY);
|
|
1231
|
+
}
|
|
1232
|
+
_createClient() {
|
|
1233
|
+
log4("creating client", {
|
|
1234
|
+
host: this._host,
|
|
1235
|
+
state: this._state
|
|
1236
|
+
}, {
|
|
1237
|
+
F: __dxlog_file4,
|
|
1238
|
+
L: 216,
|
|
1239
|
+
S: this,
|
|
1240
|
+
C: (f, a) => f(...a)
|
|
1241
|
+
});
|
|
1242
|
+
invariant3(!this._client, "Client already created", {
|
|
1243
|
+
F: __dxlog_file4,
|
|
1244
|
+
L: 217,
|
|
1245
|
+
S: this,
|
|
1246
|
+
A: [
|
|
1247
|
+
"!this._client",
|
|
1248
|
+
"'Client already created'"
|
|
1249
|
+
]
|
|
1250
|
+
});
|
|
1251
|
+
this._monitor.recordConnectionStartTime();
|
|
1252
|
+
this._connectionCtx = this._ctx.derive();
|
|
1253
|
+
this._connectionCtx.onDispose(async () => {
|
|
1254
|
+
log4("connection context disposed", void 0, {
|
|
1255
|
+
F: __dxlog_file4,
|
|
1256
|
+
L: 224,
|
|
1257
|
+
S: this,
|
|
1258
|
+
C: (f, a) => f(...a)
|
|
1259
|
+
});
|
|
1260
|
+
const { failureCount } = await this.localState.safeCloseStreams();
|
|
1261
|
+
this._monitor.recordStreamCloseErrors(failureCount);
|
|
1262
|
+
});
|
|
1263
|
+
try {
|
|
1264
|
+
const client = new SignalRPCClient({
|
|
1265
|
+
url: this._host,
|
|
1266
|
+
callbacks: {
|
|
1267
|
+
onConnected: () => {
|
|
1268
|
+
if (client === this._client) {
|
|
1269
|
+
log4("socket connected", void 0, {
|
|
1270
|
+
F: __dxlog_file4,
|
|
1271
|
+
L: 235,
|
|
1272
|
+
S: this,
|
|
1273
|
+
C: (f, a) => f(...a)
|
|
1274
|
+
});
|
|
1275
|
+
this._onConnected();
|
|
1276
|
+
}
|
|
1277
|
+
},
|
|
1278
|
+
onDisconnected: () => {
|
|
1279
|
+
if (client !== this._client) {
|
|
1280
|
+
return;
|
|
1281
|
+
}
|
|
1282
|
+
log4("socket disconnected", {
|
|
1283
|
+
state: this._state
|
|
1284
|
+
}, {
|
|
1285
|
+
F: __dxlog_file4,
|
|
1286
|
+
L: 244,
|
|
1287
|
+
S: this,
|
|
1288
|
+
C: (f, a) => f(...a)
|
|
1289
|
+
});
|
|
1290
|
+
if (this._state === SignalState.ERROR) {
|
|
1291
|
+
this._setState(SignalState.DISCONNECTED);
|
|
1292
|
+
} else {
|
|
1293
|
+
this._onDisconnected();
|
|
1294
|
+
}
|
|
1295
|
+
},
|
|
1296
|
+
onError: (error) => {
|
|
1297
|
+
if (client === this._client) {
|
|
1298
|
+
log4("socket error", {
|
|
1299
|
+
error,
|
|
1300
|
+
state: this._state
|
|
1301
|
+
}, {
|
|
1302
|
+
F: __dxlog_file4,
|
|
1303
|
+
L: 256,
|
|
1304
|
+
S: this,
|
|
1305
|
+
C: (f, a) => f(...a)
|
|
1306
|
+
});
|
|
1307
|
+
this._onDisconnected({
|
|
1308
|
+
error
|
|
1309
|
+
});
|
|
1310
|
+
}
|
|
1311
|
+
},
|
|
1312
|
+
getMetadata: this._getMetadata
|
|
1313
|
+
}
|
|
1314
|
+
});
|
|
1315
|
+
this._client = client;
|
|
1316
|
+
} catch (error) {
|
|
1317
|
+
this._client = void 0;
|
|
1318
|
+
this._onDisconnected({
|
|
1319
|
+
error
|
|
1320
|
+
});
|
|
1321
|
+
}
|
|
1322
|
+
}
|
|
1323
|
+
async _reconnect() {
|
|
1324
|
+
log4(`reconnecting in ${this._reconnectAfter}ms`, {
|
|
1325
|
+
state: this._state
|
|
1326
|
+
}, {
|
|
1327
|
+
F: __dxlog_file4,
|
|
1328
|
+
L: 271,
|
|
1329
|
+
S: this,
|
|
1330
|
+
C: (f, a) => f(...a)
|
|
1331
|
+
});
|
|
1332
|
+
if (this._state === SignalState.RECONNECTING) {
|
|
1333
|
+
log4.info("Signal api already reconnecting.", void 0, {
|
|
1334
|
+
F: __dxlog_file4,
|
|
1335
|
+
L: 274,
|
|
1336
|
+
S: this,
|
|
1337
|
+
C: (f, a) => f(...a)
|
|
1338
|
+
});
|
|
1339
|
+
return;
|
|
1340
|
+
}
|
|
1341
|
+
if (this._state === SignalState.CLOSED) {
|
|
1342
|
+
return;
|
|
1343
|
+
}
|
|
1344
|
+
this._setState(SignalState.RECONNECTING);
|
|
1345
|
+
await this._safeResetClient();
|
|
1346
|
+
await cancelWithContext2(this._ctx, sleep(this._reconnectAfter));
|
|
1347
|
+
this._createClient();
|
|
1348
|
+
}
|
|
1349
|
+
_onConnected() {
|
|
1350
|
+
this._lastError = void 0;
|
|
1351
|
+
this._lastReconciliationFailed = false;
|
|
1352
|
+
this._reconnectAfter = DEFAULT_RECONNECT_TIMEOUT;
|
|
1353
|
+
this._setState(SignalState.CONNECTED);
|
|
1354
|
+
this._clientReady.wake();
|
|
1355
|
+
this._reconcileTask.schedule();
|
|
1356
|
+
}
|
|
1357
|
+
_onDisconnected(options) {
|
|
1358
|
+
this._updateReconnectTimeout();
|
|
1359
|
+
if (this._state === SignalState.CLOSED) {
|
|
1360
|
+
return;
|
|
1361
|
+
}
|
|
1362
|
+
if (options?.error) {
|
|
1363
|
+
this._lastError = options.error;
|
|
1364
|
+
this._setState(SignalState.ERROR);
|
|
1365
|
+
} else {
|
|
1366
|
+
this._setState(SignalState.DISCONNECTED);
|
|
1367
|
+
}
|
|
1368
|
+
this._reconnectTask.schedule();
|
|
1369
|
+
}
|
|
1370
|
+
_setState(newState) {
|
|
1371
|
+
this._state = newState;
|
|
1372
|
+
this._monitor.recordStateChangeTime();
|
|
1373
|
+
log4("signal state changed", {
|
|
1374
|
+
status: this.getStatus()
|
|
1375
|
+
}, {
|
|
1376
|
+
F: __dxlog_file4,
|
|
1377
|
+
L: 315,
|
|
1378
|
+
S: this,
|
|
1379
|
+
C: (f, a) => f(...a)
|
|
1380
|
+
});
|
|
1381
|
+
this.statusChanged.emit(this.getStatus());
|
|
1382
|
+
}
|
|
1383
|
+
_updateReconnectTimeout() {
|
|
1384
|
+
if (this._state !== SignalState.CONNECTED && this._state !== SignalState.CONNECTING) {
|
|
1385
|
+
this._reconnectAfter *= 2;
|
|
1386
|
+
this._reconnectAfter = Math.min(this._reconnectAfter, MAX_RECONNECT_TIMEOUT);
|
|
1387
|
+
}
|
|
1388
|
+
}
|
|
1389
|
+
async _safeResetClient() {
|
|
1390
|
+
await this._connectionCtx?.dispose();
|
|
1391
|
+
this._connectionCtx = void 0;
|
|
1392
|
+
this._clientReady.reset();
|
|
1393
|
+
await this._client?.close().catch(() => {
|
|
1394
|
+
});
|
|
1395
|
+
this._client = void 0;
|
|
1396
|
+
}
|
|
1397
|
+
};
|
|
1398
|
+
|
|
1399
|
+
// packages/core/mesh/messaging/src/signal-methods.ts
|
|
1400
|
+
var PeerInfoHash = ({ peerKey }) => peerKey;
|
|
1401
|
+
|
|
1402
|
+
// packages/core/mesh/messaging/src/signal-manager/memory-signal-manager.ts
|
|
1403
|
+
import { Event as Event3, Trigger as Trigger3 } from "@dxos/async";
|
|
1404
|
+
import { Context as Context3 } from "@dxos/context";
|
|
1405
|
+
import { invariant as invariant4 } from "@dxos/invariant";
|
|
1406
|
+
import { PublicKey as PublicKey5 } from "@dxos/keys";
|
|
1407
|
+
import { log as log5 } from "@dxos/log";
|
|
1408
|
+
import { schema as schema3 } from "@dxos/protocols/proto";
|
|
1409
|
+
import { ComplexMap as ComplexMap3, ComplexSet as ComplexSet3 } from "@dxos/util";
|
|
1410
|
+
var __dxlog_file5 = "/home/runner/work/dxos/dxos/packages/core/mesh/messaging/src/signal-manager/memory-signal-manager.ts";
|
|
1411
|
+
var MemorySignalManagerContext = class {
|
|
1412
|
+
constructor() {
|
|
1413
|
+
// Swarm messages.
|
|
1414
|
+
this.swarmEvent = new Event3();
|
|
1415
|
+
// Mapping from topic to set of peers.
|
|
1416
|
+
this.swarms = new ComplexMap3(PublicKey5.hash);
|
|
1417
|
+
// Map of connections for each peer for signaling.
|
|
1418
|
+
this.connections = new ComplexMap3(PeerInfoHash);
|
|
1419
|
+
}
|
|
1420
|
+
};
|
|
1421
|
+
var MemorySignalManager = class {
|
|
1422
|
+
constructor(_context) {
|
|
1423
|
+
this._context = _context;
|
|
1424
|
+
this.statusChanged = new Event3();
|
|
1425
|
+
this.swarmEvent = new Event3();
|
|
1426
|
+
this.onMessage = new Event3();
|
|
1427
|
+
this._joinedSwarms = new ComplexSet3(({ topic, peer }) => topic.toHex() + peer.peerKey);
|
|
1428
|
+
this._freezeTrigger = new Trigger3().wake();
|
|
1429
|
+
this._ctx = new Context3(void 0, {
|
|
1430
|
+
F: __dxlog_file5,
|
|
1431
|
+
L: 51
|
|
1432
|
+
});
|
|
1433
|
+
this._ctx.onDispose(this._context.swarmEvent.on((data) => this.swarmEvent.emit(data)));
|
|
1434
|
+
}
|
|
1435
|
+
async open() {
|
|
1436
|
+
if (!this._ctx.disposed) {
|
|
1437
|
+
return;
|
|
1438
|
+
}
|
|
1439
|
+
this._ctx = new Context3(void 0, {
|
|
1440
|
+
F: __dxlog_file5,
|
|
1441
|
+
L: 60
|
|
1442
|
+
});
|
|
1443
|
+
this._ctx.onDispose(this._context.swarmEvent.on((data) => this.swarmEvent.emit(data)));
|
|
1444
|
+
await Promise.all([
|
|
1445
|
+
...this._joinedSwarms.values()
|
|
1446
|
+
].map((value) => this.join(value)));
|
|
1447
|
+
}
|
|
1448
|
+
async close() {
|
|
1449
|
+
if (this._ctx.disposed) {
|
|
1450
|
+
return;
|
|
1451
|
+
}
|
|
1452
|
+
const joinedSwarmsCopy = new ComplexSet3(({ topic, peer }) => topic.toHex() + peer.peerKey, [
|
|
1453
|
+
...this._joinedSwarms.values()
|
|
1454
|
+
]);
|
|
1455
|
+
await Promise.all([
|
|
1456
|
+
...this._joinedSwarms.values()
|
|
1457
|
+
].map((value) => this.leave(value)));
|
|
1458
|
+
this._joinedSwarms = joinedSwarmsCopy;
|
|
1459
|
+
await this._ctx.dispose();
|
|
1460
|
+
}
|
|
1461
|
+
getStatus() {
|
|
1462
|
+
return [];
|
|
1463
|
+
}
|
|
1464
|
+
async join({ topic, peer }) {
|
|
1465
|
+
invariant4(!this._ctx.disposed, "Closed", {
|
|
1466
|
+
F: __dxlog_file5,
|
|
1467
|
+
L: 89,
|
|
1468
|
+
S: this,
|
|
1469
|
+
A: [
|
|
1470
|
+
"!this._ctx.disposed",
|
|
1471
|
+
"'Closed'"
|
|
1472
|
+
]
|
|
1473
|
+
});
|
|
1474
|
+
this._joinedSwarms.add({
|
|
1475
|
+
topic,
|
|
1476
|
+
peer
|
|
1477
|
+
});
|
|
1478
|
+
if (!this._context.swarms.has(topic)) {
|
|
1479
|
+
this._context.swarms.set(topic, new ComplexSet3(PeerInfoHash));
|
|
1480
|
+
}
|
|
1481
|
+
this._context.swarms.get(topic).add(peer);
|
|
1482
|
+
this._context.swarmEvent.emit({
|
|
1483
|
+
topic,
|
|
1484
|
+
peerAvailable: {
|
|
1485
|
+
peer,
|
|
1486
|
+
since: /* @__PURE__ */ new Date()
|
|
1487
|
+
}
|
|
1488
|
+
});
|
|
1489
|
+
for (const [topic2, peers] of this._context.swarms) {
|
|
1490
|
+
Array.from(peers).forEach((peer2) => {
|
|
1491
|
+
this.swarmEvent.emit({
|
|
1492
|
+
topic: topic2,
|
|
1493
|
+
peerAvailable: {
|
|
1494
|
+
peer: peer2,
|
|
1495
|
+
since: /* @__PURE__ */ new Date()
|
|
1496
|
+
}
|
|
1497
|
+
});
|
|
1498
|
+
});
|
|
1499
|
+
}
|
|
1500
|
+
}
|
|
1501
|
+
async leave({ topic, peer }) {
|
|
1502
|
+
invariant4(!this._ctx.disposed, "Closed", {
|
|
1503
|
+
F: __dxlog_file5,
|
|
1504
|
+
L: 121,
|
|
1505
|
+
S: this,
|
|
1506
|
+
A: [
|
|
1507
|
+
"!this._ctx.disposed",
|
|
1508
|
+
"'Closed'"
|
|
1509
|
+
]
|
|
1510
|
+
});
|
|
1511
|
+
this._joinedSwarms.delete({
|
|
1512
|
+
topic,
|
|
1513
|
+
peer
|
|
1514
|
+
});
|
|
1515
|
+
if (!this._context.swarms.has(topic)) {
|
|
1516
|
+
this._context.swarms.set(topic, new ComplexSet3(PeerInfoHash));
|
|
1517
|
+
}
|
|
1518
|
+
this._context.swarms.get(topic).delete(peer);
|
|
1519
|
+
const swarmEvent = {
|
|
1520
|
+
topic,
|
|
1521
|
+
peerLeft: {
|
|
1522
|
+
peer
|
|
1523
|
+
}
|
|
1524
|
+
};
|
|
1525
|
+
this._context.swarmEvent.emit(swarmEvent);
|
|
1526
|
+
}
|
|
1527
|
+
async sendMessage({ author, recipient, payload }) {
|
|
1528
|
+
log5("send message", {
|
|
1529
|
+
author,
|
|
1530
|
+
recipient,
|
|
1531
|
+
...dec(payload)
|
|
1532
|
+
}, {
|
|
1533
|
+
F: __dxlog_file5,
|
|
1534
|
+
L: 142,
|
|
1535
|
+
S: this,
|
|
1536
|
+
C: (f, a) => f(...a)
|
|
1537
|
+
});
|
|
1538
|
+
invariant4(recipient, void 0, {
|
|
1539
|
+
F: __dxlog_file5,
|
|
1540
|
+
L: 144,
|
|
1541
|
+
S: this,
|
|
1542
|
+
A: [
|
|
1543
|
+
"recipient",
|
|
1544
|
+
""
|
|
1545
|
+
]
|
|
1546
|
+
});
|
|
1547
|
+
invariant4(!this._ctx.disposed, "Closed", {
|
|
1548
|
+
F: __dxlog_file5,
|
|
1549
|
+
L: 145,
|
|
1550
|
+
S: this,
|
|
1551
|
+
A: [
|
|
1552
|
+
"!this._ctx.disposed",
|
|
1553
|
+
"'Closed'"
|
|
1554
|
+
]
|
|
1555
|
+
});
|
|
1556
|
+
await this._freezeTrigger.wait();
|
|
1557
|
+
const remote = this._context.connections.get(recipient);
|
|
1558
|
+
if (!remote) {
|
|
1559
|
+
log5.warn("recipient is not subscribed for messages", {
|
|
1560
|
+
author,
|
|
1561
|
+
recipient
|
|
1562
|
+
}, {
|
|
1563
|
+
F: __dxlog_file5,
|
|
1564
|
+
L: 151,
|
|
1565
|
+
S: this,
|
|
1566
|
+
C: (f, a) => f(...a)
|
|
1567
|
+
});
|
|
1568
|
+
return;
|
|
1569
|
+
}
|
|
1570
|
+
if (remote._ctx.disposed) {
|
|
1571
|
+
log5.warn("recipient is disposed", {
|
|
1572
|
+
author,
|
|
1573
|
+
recipient
|
|
1574
|
+
}, {
|
|
1575
|
+
F: __dxlog_file5,
|
|
1576
|
+
L: 156,
|
|
1577
|
+
S: this,
|
|
1578
|
+
C: (f, a) => f(...a)
|
|
1579
|
+
});
|
|
1580
|
+
return;
|
|
1581
|
+
}
|
|
1582
|
+
remote._freezeTrigger.wait().then(() => {
|
|
1583
|
+
if (remote._ctx.disposed) {
|
|
1584
|
+
log5.warn("recipient is disposed", {
|
|
1585
|
+
author,
|
|
1586
|
+
recipient
|
|
1587
|
+
}, {
|
|
1588
|
+
F: __dxlog_file5,
|
|
1589
|
+
L: 164,
|
|
1590
|
+
S: this,
|
|
1591
|
+
C: (f, a) => f(...a)
|
|
1592
|
+
});
|
|
1593
|
+
return;
|
|
1594
|
+
}
|
|
1595
|
+
log5("receive message", {
|
|
1596
|
+
author,
|
|
1597
|
+
recipient,
|
|
1598
|
+
...dec(payload)
|
|
1599
|
+
}, {
|
|
1600
|
+
F: __dxlog_file5,
|
|
1601
|
+
L: 168,
|
|
1602
|
+
S: this,
|
|
1603
|
+
C: (f, a) => f(...a)
|
|
1604
|
+
});
|
|
1605
|
+
remote.onMessage.emit({
|
|
1606
|
+
author,
|
|
1607
|
+
recipient,
|
|
1608
|
+
payload
|
|
1609
|
+
});
|
|
1610
|
+
}).catch((err) => {
|
|
1611
|
+
log5.error("error while waiting for freeze", {
|
|
1612
|
+
err
|
|
1613
|
+
}, {
|
|
1614
|
+
F: __dxlog_file5,
|
|
1615
|
+
L: 173,
|
|
1616
|
+
S: this,
|
|
1617
|
+
C: (f, a) => f(...a)
|
|
1618
|
+
});
|
|
1619
|
+
});
|
|
1620
|
+
}
|
|
1621
|
+
async subscribeMessages(peerInfo) {
|
|
1622
|
+
log5("subscribing", {
|
|
1623
|
+
peerInfo
|
|
1624
|
+
}, {
|
|
1625
|
+
F: __dxlog_file5,
|
|
1626
|
+
L: 178,
|
|
1627
|
+
S: this,
|
|
1628
|
+
C: (f, a) => f(...a)
|
|
1629
|
+
});
|
|
1630
|
+
this._context.connections.set(peerInfo, this);
|
|
1631
|
+
}
|
|
1632
|
+
async unsubscribeMessages(peerInfo) {
|
|
1633
|
+
log5("unsubscribing", {
|
|
1634
|
+
peerInfo
|
|
1635
|
+
}, {
|
|
1636
|
+
F: __dxlog_file5,
|
|
1637
|
+
L: 183,
|
|
1638
|
+
S: this,
|
|
1639
|
+
C: (f, a) => f(...a)
|
|
1640
|
+
});
|
|
1641
|
+
this._context.connections.delete(peerInfo);
|
|
1642
|
+
}
|
|
1643
|
+
freeze() {
|
|
1644
|
+
this._freezeTrigger.reset();
|
|
1645
|
+
}
|
|
1646
|
+
unfreeze() {
|
|
1647
|
+
this._freezeTrigger.wake();
|
|
1648
|
+
}
|
|
1649
|
+
};
|
|
1650
|
+
var dec = (payload) => {
|
|
1651
|
+
if (!payload.type_url.endsWith("ReliablePayload")) {
|
|
1652
|
+
return {};
|
|
1653
|
+
}
|
|
1654
|
+
const relPayload = schema3.getCodecForType("dxos.mesh.messaging.ReliablePayload").decode(payload.value);
|
|
1655
|
+
if (typeof relPayload?.payload?.data === "object") {
|
|
1656
|
+
return {
|
|
1657
|
+
payload: Object.keys(relPayload?.payload?.data)[0],
|
|
1658
|
+
sessionId: relPayload?.payload?.sessionId
|
|
1659
|
+
};
|
|
1660
|
+
}
|
|
1661
|
+
return {};
|
|
1662
|
+
};
|
|
1663
|
+
|
|
1664
|
+
// packages/core/mesh/messaging/src/signal-manager/websocket-signal-manager.ts
|
|
1665
|
+
import { Event as Event4, sleep as sleep2, synchronized } from "@dxos/async";
|
|
1666
|
+
import { LifecycleState, Resource as Resource2 } from "@dxos/context";
|
|
1667
|
+
import { invariant as invariant5 } from "@dxos/invariant";
|
|
1668
|
+
import { PublicKey as PublicKey6 } from "@dxos/keys";
|
|
1669
|
+
import { log as log6 } from "@dxos/log";
|
|
1670
|
+
import { RateLimitExceededError, TimeoutError as TimeoutError3, trace as trace8 } from "@dxos/protocols";
|
|
1671
|
+
import { BitField, safeAwaitAll as safeAwaitAll2 } from "@dxos/util";
|
|
1672
|
+
|
|
1673
|
+
// packages/core/mesh/messaging/src/signal-manager/websocket-signal-manager-monitor.ts
|
|
1674
|
+
import { trace as trace7 } from "@dxos/tracing";
|
|
1675
|
+
var WebsocketSignalManagerMonitor = class {
|
|
1676
|
+
recordRateLimitExceeded() {
|
|
1677
|
+
trace7.metrics.increment("dxos.mesh.signal.signal-manager.rate-limit-hit", 1);
|
|
1678
|
+
}
|
|
1679
|
+
recordServerFailure(params) {
|
|
1680
|
+
trace7.metrics.increment("dxos.mesh.signal.signal-manager.server-failure", 1, {
|
|
1681
|
+
tags: {
|
|
1682
|
+
server: params.serverName,
|
|
1683
|
+
restarted: params.willRestart
|
|
1684
|
+
}
|
|
1685
|
+
});
|
|
1686
|
+
}
|
|
1687
|
+
};
|
|
1688
|
+
|
|
1689
|
+
// packages/core/mesh/messaging/src/signal-manager/websocket-signal-manager.ts
|
|
1690
|
+
function _ts_decorate(decorators, target, key, desc) {
|
|
1691
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
|
1692
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
|
1693
|
+
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
|
1694
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
|
1695
|
+
}
|
|
1696
|
+
var __dxlog_file6 = "/home/runner/work/dxos/dxos/packages/core/mesh/messaging/src/signal-manager/websocket-signal-manager.ts";
|
|
1697
|
+
var MAX_SERVER_FAILURES = 5;
|
|
1698
|
+
var WSS_SIGNAL_SERVER_REBOOT_DELAY = 3e3;
|
|
1699
|
+
var WebsocketSignalManager = class extends Resource2 {
|
|
1700
|
+
constructor(_hosts, _getMetadata) {
|
|
1701
|
+
super();
|
|
1702
|
+
this._hosts = _hosts;
|
|
1703
|
+
this._getMetadata = _getMetadata;
|
|
1704
|
+
this._servers = /* @__PURE__ */ new Map();
|
|
1705
|
+
this._monitor = new WebsocketSignalManagerMonitor();
|
|
1706
|
+
this.failureCount = /* @__PURE__ */ new Map();
|
|
1707
|
+
this.statusChanged = new Event4();
|
|
1708
|
+
this.swarmEvent = new Event4();
|
|
1709
|
+
this.onMessage = new Event4();
|
|
1710
|
+
this._instanceId = PublicKey6.random().toHex();
|
|
1711
|
+
log6("Created WebsocketSignalManager", {
|
|
1712
|
+
hosts: this._hosts
|
|
1713
|
+
}, {
|
|
1714
|
+
F: __dxlog_file6,
|
|
1715
|
+
L: 54,
|
|
1716
|
+
S: this,
|
|
1717
|
+
C: (f, a) => f(...a)
|
|
1718
|
+
});
|
|
1719
|
+
for (const host of this._hosts) {
|
|
1720
|
+
if (this._servers.has(host.server)) {
|
|
1721
|
+
continue;
|
|
1722
|
+
}
|
|
1723
|
+
const server = new SignalClient(host.server, this._getMetadata);
|
|
1724
|
+
server.swarmEvent.on((data) => this.swarmEvent.emit(data));
|
|
1725
|
+
server.onMessage.on((data) => this.onMessage.emit(data));
|
|
1726
|
+
server.statusChanged.on(() => this.statusChanged.emit(this.getStatus()));
|
|
1727
|
+
this._servers.set(host.server, server);
|
|
1728
|
+
this.failureCount.set(host.server, 0);
|
|
1729
|
+
}
|
|
1730
|
+
this._failedServersBitfield = BitField.zeros(this._hosts.length);
|
|
1731
|
+
}
|
|
1732
|
+
async _open() {
|
|
1733
|
+
log6("open signal manager", {
|
|
1734
|
+
hosts: this._hosts
|
|
1735
|
+
}, {
|
|
1736
|
+
F: __dxlog_file6,
|
|
1737
|
+
L: 74,
|
|
1738
|
+
S: this,
|
|
1739
|
+
C: (f, a) => f(...a)
|
|
1740
|
+
});
|
|
1741
|
+
log6.trace("dxos.mesh.websocket-signal-manager.open", trace8.begin({
|
|
1742
|
+
id: this._instanceId
|
|
1743
|
+
}), {
|
|
1744
|
+
F: __dxlog_file6,
|
|
1745
|
+
L: 75,
|
|
1746
|
+
S: this,
|
|
1747
|
+
C: (f, a) => f(...a)
|
|
1748
|
+
});
|
|
1749
|
+
await safeAwaitAll2(this._servers.values(), (server) => server.open());
|
|
1750
|
+
log6.trace("dxos.mesh.websocket-signal-manager.open", trace8.end({
|
|
1751
|
+
id: this._instanceId
|
|
1752
|
+
}), {
|
|
1753
|
+
F: __dxlog_file6,
|
|
1754
|
+
L: 79,
|
|
1755
|
+
S: this,
|
|
1756
|
+
C: (f, a) => f(...a)
|
|
1757
|
+
});
|
|
1758
|
+
}
|
|
1759
|
+
async _close() {
|
|
1760
|
+
await safeAwaitAll2(this._servers.values(), (server) => server.close());
|
|
1761
|
+
}
|
|
1762
|
+
async restartServer(serverName) {
|
|
1763
|
+
log6("restarting server", {
|
|
1764
|
+
serverName
|
|
1765
|
+
}, {
|
|
1766
|
+
F: __dxlog_file6,
|
|
1767
|
+
L: 87,
|
|
1768
|
+
S: this,
|
|
1769
|
+
C: (f, a) => f(...a)
|
|
1770
|
+
});
|
|
1771
|
+
invariant5(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
1772
|
+
F: __dxlog_file6,
|
|
1773
|
+
L: 88,
|
|
1774
|
+
S: this,
|
|
1775
|
+
A: [
|
|
1776
|
+
"this._lifecycleState === LifecycleState.OPEN",
|
|
1777
|
+
""
|
|
1778
|
+
]
|
|
1779
|
+
});
|
|
1780
|
+
const server = this._servers.get(serverName);
|
|
1781
|
+
invariant5(server, "server not found", {
|
|
1782
|
+
F: __dxlog_file6,
|
|
1783
|
+
L: 91,
|
|
1784
|
+
S: this,
|
|
1785
|
+
A: [
|
|
1786
|
+
"server",
|
|
1787
|
+
"'server not found'"
|
|
1788
|
+
]
|
|
1789
|
+
});
|
|
1790
|
+
await server.close();
|
|
1791
|
+
await sleep2(WSS_SIGNAL_SERVER_REBOOT_DELAY);
|
|
1792
|
+
await server.open();
|
|
1793
|
+
}
|
|
1794
|
+
getStatus() {
|
|
1795
|
+
return Array.from(this._servers.values()).map((server) => server.getStatus());
|
|
1796
|
+
}
|
|
1797
|
+
async join({ topic, peer }) {
|
|
1798
|
+
log6("join", {
|
|
1799
|
+
topic,
|
|
1800
|
+
peer
|
|
1801
|
+
}, {
|
|
1802
|
+
F: __dxlog_file6,
|
|
1803
|
+
L: 104,
|
|
1804
|
+
S: this,
|
|
1805
|
+
C: (f, a) => f(...a)
|
|
1806
|
+
});
|
|
1807
|
+
invariant5(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
1808
|
+
F: __dxlog_file6,
|
|
1809
|
+
L: 105,
|
|
1810
|
+
S: this,
|
|
1811
|
+
A: [
|
|
1812
|
+
"this._lifecycleState === LifecycleState.OPEN",
|
|
1813
|
+
""
|
|
1814
|
+
]
|
|
1815
|
+
});
|
|
1816
|
+
await this._forEachServer((server) => server.join({
|
|
1817
|
+
topic,
|
|
1818
|
+
peer
|
|
1819
|
+
}));
|
|
1820
|
+
}
|
|
1821
|
+
async leave({ topic, peer }) {
|
|
1822
|
+
log6("leaving", {
|
|
1823
|
+
topic,
|
|
1824
|
+
peer
|
|
1825
|
+
}, {
|
|
1826
|
+
F: __dxlog_file6,
|
|
1827
|
+
L: 111,
|
|
1828
|
+
S: this,
|
|
1829
|
+
C: (f, a) => f(...a)
|
|
1830
|
+
});
|
|
1831
|
+
invariant5(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
1832
|
+
F: __dxlog_file6,
|
|
1833
|
+
L: 112,
|
|
1834
|
+
S: this,
|
|
1835
|
+
A: [
|
|
1836
|
+
"this._lifecycleState === LifecycleState.OPEN",
|
|
1837
|
+
""
|
|
1838
|
+
]
|
|
1839
|
+
});
|
|
1840
|
+
await this._forEachServer((server) => server.leave({
|
|
1841
|
+
topic,
|
|
1842
|
+
peer
|
|
1843
|
+
}));
|
|
1844
|
+
}
|
|
1845
|
+
async sendMessage({ author, recipient, payload }) {
|
|
1846
|
+
log6("signal", {
|
|
1847
|
+
recipient
|
|
1848
|
+
}, {
|
|
1849
|
+
F: __dxlog_file6,
|
|
1850
|
+
L: 117,
|
|
1851
|
+
S: this,
|
|
1852
|
+
C: (f, a) => f(...a)
|
|
1853
|
+
});
|
|
1854
|
+
invariant5(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
1855
|
+
F: __dxlog_file6,
|
|
1856
|
+
L: 118,
|
|
1857
|
+
S: this,
|
|
1858
|
+
A: [
|
|
1859
|
+
"this._lifecycleState === LifecycleState.OPEN",
|
|
1860
|
+
""
|
|
1861
|
+
]
|
|
1862
|
+
});
|
|
1863
|
+
void this._forEachServer(async (server, serverName, index) => {
|
|
1864
|
+
void server.sendMessage({
|
|
1865
|
+
author,
|
|
1866
|
+
recipient,
|
|
1867
|
+
payload
|
|
1868
|
+
}).then(() => this._clearServerFailedFlag(serverName, index)).catch((err) => {
|
|
1869
|
+
if (err instanceof RateLimitExceededError) {
|
|
1870
|
+
log6.info("WSS rate limit exceeded", {
|
|
1871
|
+
err
|
|
1872
|
+
}, {
|
|
1873
|
+
F: __dxlog_file6,
|
|
1874
|
+
L: 126,
|
|
1875
|
+
S: this,
|
|
1876
|
+
C: (f, a) => f(...a)
|
|
1877
|
+
});
|
|
1878
|
+
this._monitor.recordRateLimitExceeded();
|
|
1879
|
+
} else if (err instanceof TimeoutError3 || err.constructor.name === "TimeoutError") {
|
|
1880
|
+
log6.info("WSS sendMessage timeout", {
|
|
1881
|
+
err
|
|
1882
|
+
}, {
|
|
1883
|
+
F: __dxlog_file6,
|
|
1884
|
+
L: 129,
|
|
1885
|
+
S: this,
|
|
1886
|
+
C: (f, a) => f(...a)
|
|
1887
|
+
});
|
|
1888
|
+
void this.checkServerFailure(serverName, index);
|
|
1889
|
+
} else {
|
|
1890
|
+
log6.warn(`error sending to ${serverName}`, {
|
|
1891
|
+
err
|
|
1892
|
+
}, {
|
|
1893
|
+
F: __dxlog_file6,
|
|
1894
|
+
L: 132,
|
|
1895
|
+
S: this,
|
|
1896
|
+
C: (f, a) => f(...a)
|
|
1897
|
+
});
|
|
1898
|
+
void this.checkServerFailure(serverName, index);
|
|
1899
|
+
}
|
|
1900
|
+
});
|
|
1901
|
+
});
|
|
1902
|
+
}
|
|
1903
|
+
async checkServerFailure(serverName, index) {
|
|
1904
|
+
const failureCount = this.failureCount.get(serverName) ?? 0;
|
|
1905
|
+
const isRestartRequired = failureCount > MAX_SERVER_FAILURES;
|
|
1906
|
+
this._monitor.recordServerFailure({
|
|
1907
|
+
serverName,
|
|
1908
|
+
willRestart: isRestartRequired
|
|
1909
|
+
});
|
|
1910
|
+
if (isRestartRequired) {
|
|
1911
|
+
if (!BitField.get(this._failedServersBitfield, index)) {
|
|
1912
|
+
log6.warn("too many failures for ws-server, restarting", {
|
|
1913
|
+
serverName,
|
|
1914
|
+
failureCount
|
|
1915
|
+
}, {
|
|
1916
|
+
F: __dxlog_file6,
|
|
1917
|
+
L: 146,
|
|
1918
|
+
S: this,
|
|
1919
|
+
C: (f, a) => f(...a)
|
|
1920
|
+
});
|
|
1921
|
+
BitField.set(this._failedServersBitfield, index, true);
|
|
1922
|
+
}
|
|
1923
|
+
await this.restartServer(serverName);
|
|
1924
|
+
this.failureCount.set(serverName, 0);
|
|
1925
|
+
return;
|
|
1926
|
+
}
|
|
1927
|
+
this.failureCount.set(serverName, (this.failureCount.get(serverName) ?? 0) + 1);
|
|
1928
|
+
}
|
|
1929
|
+
_clearServerFailedFlag(serverName, index) {
|
|
1930
|
+
if (BitField.get(this._failedServersBitfield, index)) {
|
|
1931
|
+
log6.info("server connection restored", {
|
|
1932
|
+
serverName
|
|
1933
|
+
}, {
|
|
1934
|
+
F: __dxlog_file6,
|
|
1935
|
+
L: 159,
|
|
1936
|
+
S: this,
|
|
1937
|
+
C: (f, a) => f(...a)
|
|
1938
|
+
});
|
|
1939
|
+
BitField.set(this._failedServersBitfield, index, false);
|
|
1940
|
+
this.failureCount.set(serverName, 0);
|
|
1941
|
+
}
|
|
1942
|
+
}
|
|
1943
|
+
async subscribeMessages(peer) {
|
|
1944
|
+
log6("subscribed for message stream", {
|
|
1945
|
+
peer
|
|
1946
|
+
}, {
|
|
1947
|
+
F: __dxlog_file6,
|
|
1948
|
+
L: 166,
|
|
1949
|
+
S: this,
|
|
1950
|
+
C: (f, a) => f(...a)
|
|
1951
|
+
});
|
|
1952
|
+
invariant5(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
1953
|
+
F: __dxlog_file6,
|
|
1954
|
+
L: 167,
|
|
1955
|
+
S: this,
|
|
1956
|
+
A: [
|
|
1957
|
+
"this._lifecycleState === LifecycleState.OPEN",
|
|
1958
|
+
""
|
|
1959
|
+
]
|
|
1960
|
+
});
|
|
1961
|
+
await this._forEachServer(async (server) => server.subscribeMessages(peer));
|
|
1962
|
+
}
|
|
1963
|
+
async unsubscribeMessages(peer) {
|
|
1964
|
+
log6("subscribed for message stream", {
|
|
1965
|
+
peer
|
|
1966
|
+
}, {
|
|
1967
|
+
F: __dxlog_file6,
|
|
1968
|
+
L: 173,
|
|
1969
|
+
S: this,
|
|
1970
|
+
C: (f, a) => f(...a)
|
|
1971
|
+
});
|
|
1972
|
+
invariant5(this._lifecycleState === LifecycleState.OPEN, void 0, {
|
|
1973
|
+
F: __dxlog_file6,
|
|
1974
|
+
L: 174,
|
|
1975
|
+
S: this,
|
|
1976
|
+
A: [
|
|
1977
|
+
"this._lifecycleState === LifecycleState.OPEN",
|
|
1978
|
+
""
|
|
1979
|
+
]
|
|
1980
|
+
});
|
|
1981
|
+
await this._forEachServer(async (server) => server.unsubscribeMessages(peer));
|
|
1982
|
+
}
|
|
1983
|
+
async _forEachServer(fn) {
|
|
1984
|
+
return Promise.all(Array.from(this._servers.entries()).map(([serverName, server], idx) => fn(server, serverName, idx)));
|
|
1985
|
+
}
|
|
1986
|
+
};
|
|
1987
|
+
_ts_decorate([
|
|
1988
|
+
synchronized
|
|
1989
|
+
], WebsocketSignalManager.prototype, "join", null);
|
|
1990
|
+
_ts_decorate([
|
|
1991
|
+
synchronized
|
|
1992
|
+
], WebsocketSignalManager.prototype, "leave", null);
|
|
1993
|
+
_ts_decorate([
|
|
1994
|
+
synchronized
|
|
1995
|
+
], WebsocketSignalManager.prototype, "checkServerFailure", null);
|
|
1996
|
+
|
|
1997
|
+
// packages/core/mesh/messaging/src/signal-manager/edge-signal-manager.ts
|
|
1998
|
+
import { Event as Event5 } from "@dxos/async";
|
|
1999
|
+
import { Resource as Resource3 } from "@dxos/context";
|
|
2000
|
+
import { protocol } from "@dxos/edge-client";
|
|
2001
|
+
import { invariant as invariant6 } from "@dxos/invariant";
|
|
2002
|
+
import { PublicKey as PublicKey7 } from "@dxos/keys";
|
|
2003
|
+
import { log as log7 } from "@dxos/log";
|
|
2004
|
+
import { EdgeService } from "@dxos/protocols";
|
|
2005
|
+
import { bufWkt } from "@dxos/protocols/buf";
|
|
2006
|
+
import { SwarmRequestSchema, SwarmRequest_Action as SwarmRequestAction, SwarmResponseSchema } from "@dxos/protocols/buf/dxos/edge/messenger_pb";
|
|
2007
|
+
import { ComplexMap as ComplexMap4, ComplexSet as ComplexSet4 } from "@dxos/util";
|
|
2008
|
+
var __dxlog_file7 = "/home/runner/work/dxos/dxos/packages/core/mesh/messaging/src/signal-manager/edge-signal-manager.ts";
|
|
2009
|
+
var EdgeSignalManager = class extends Resource3 {
|
|
2010
|
+
constructor({ edgeConnection }) {
|
|
2011
|
+
super();
|
|
2012
|
+
this.swarmEvent = new Event5();
|
|
2013
|
+
this.onMessage = new Event5();
|
|
2014
|
+
/**
|
|
2015
|
+
* swarm key -> peerKeys in the swarm
|
|
2016
|
+
*/
|
|
2017
|
+
// TODO(mykola): This class should not contain swarm state. Temporary before network-manager API changes to accept list of peers.
|
|
2018
|
+
this._swarmPeers = new ComplexMap4(PublicKey7.hash);
|
|
2019
|
+
this._edgeConnection = edgeConnection;
|
|
2020
|
+
}
|
|
2021
|
+
async _open() {
|
|
2022
|
+
this._ctx.onDispose(this._edgeConnection.addListener((message) => this._onMessage(message)));
|
|
2023
|
+
this._edgeConnection.reconnect.on(this._ctx, () => this._rejoinAllSwarms());
|
|
2024
|
+
await this._rejoinAllSwarms();
|
|
2025
|
+
}
|
|
2026
|
+
/**
|
|
2027
|
+
* Warning: PeerInfo is inferred from edgeConnection.
|
|
2028
|
+
*/
|
|
2029
|
+
async join({ topic, peer }) {
|
|
2030
|
+
if (!this._matchSelfPeerInfo(peer)) {
|
|
2031
|
+
log7.warn("ignoring peer info on join request", {
|
|
2032
|
+
peer,
|
|
2033
|
+
expected: {
|
|
2034
|
+
peerKey: this._edgeConnection.peerKey,
|
|
2035
|
+
identityKey: this._edgeConnection.identityKey
|
|
2036
|
+
}
|
|
2037
|
+
}, {
|
|
2038
|
+
F: __dxlog_file7,
|
|
2039
|
+
L: 52,
|
|
2040
|
+
S: this,
|
|
2041
|
+
C: (f, a) => f(...a)
|
|
2042
|
+
});
|
|
2043
|
+
}
|
|
2044
|
+
this._swarmPeers.set(topic, new ComplexSet4(PeerInfoHash));
|
|
2045
|
+
await this._edgeConnection.send(protocol.createMessage(SwarmRequestSchema, {
|
|
2046
|
+
serviceId: EdgeService.SWARM_SERVICE_ID,
|
|
2047
|
+
payload: {
|
|
2048
|
+
action: SwarmRequestAction.JOIN,
|
|
2049
|
+
swarmKeys: [
|
|
2050
|
+
topic.toHex()
|
|
2051
|
+
]
|
|
2052
|
+
}
|
|
2053
|
+
}));
|
|
2054
|
+
}
|
|
2055
|
+
async leave({ topic, peer }) {
|
|
2056
|
+
this._swarmPeers.delete(topic);
|
|
2057
|
+
await this._edgeConnection.send(protocol.createMessage(SwarmRequestSchema, {
|
|
2058
|
+
serviceId: EdgeService.SWARM_SERVICE_ID,
|
|
2059
|
+
payload: {
|
|
2060
|
+
action: SwarmRequestAction.LEAVE,
|
|
2061
|
+
swarmKeys: [
|
|
2062
|
+
topic.toHex()
|
|
2063
|
+
]
|
|
2064
|
+
}
|
|
2065
|
+
}));
|
|
2066
|
+
}
|
|
2067
|
+
async sendMessage(message) {
|
|
2068
|
+
if (!this._matchSelfPeerInfo(message.author)) {
|
|
2069
|
+
log7.warn("ignoring author on send request", {
|
|
2070
|
+
author: message.author,
|
|
2071
|
+
expected: {
|
|
2072
|
+
peerKey: this._edgeConnection.peerKey,
|
|
2073
|
+
identityKey: this._edgeConnection.identityKey
|
|
2074
|
+
}
|
|
2075
|
+
}, {
|
|
2076
|
+
F: __dxlog_file7,
|
|
2077
|
+
L: 83,
|
|
2078
|
+
S: this,
|
|
2079
|
+
C: (f, a) => f(...a)
|
|
2080
|
+
});
|
|
2081
|
+
}
|
|
2082
|
+
await this._edgeConnection.send(protocol.createMessage(bufWkt.AnySchema, {
|
|
2083
|
+
serviceId: EdgeService.SIGNAL_SERVICE_ID,
|
|
2084
|
+
source: message.author,
|
|
2085
|
+
target: [
|
|
2086
|
+
message.recipient
|
|
2087
|
+
],
|
|
2088
|
+
payload: {
|
|
2089
|
+
typeUrl: message.payload.type_url,
|
|
2090
|
+
value: message.payload.value
|
|
2091
|
+
}
|
|
2092
|
+
}));
|
|
2093
|
+
}
|
|
2094
|
+
async subscribeMessages(peerInfo) {
|
|
2095
|
+
}
|
|
2096
|
+
async unsubscribeMessages(peerInfo) {
|
|
2097
|
+
}
|
|
2098
|
+
_onMessage(message) {
|
|
2099
|
+
switch (message.serviceId) {
|
|
2100
|
+
case EdgeService.SWARM_SERVICE_ID: {
|
|
2101
|
+
this._processSwarmResponse(message);
|
|
2102
|
+
break;
|
|
2103
|
+
}
|
|
2104
|
+
case EdgeService.SIGNAL_SERVICE_ID: {
|
|
2105
|
+
this._processMessage(message);
|
|
2106
|
+
}
|
|
2107
|
+
}
|
|
2108
|
+
}
|
|
2109
|
+
_processSwarmResponse(message) {
|
|
2110
|
+
invariant6(protocol.getPayloadType(message) === SwarmResponseSchema.typeName, "Wrong payload type", {
|
|
2111
|
+
F: __dxlog_file7,
|
|
2112
|
+
L: 120,
|
|
2113
|
+
S: this,
|
|
2114
|
+
A: [
|
|
2115
|
+
"protocol.getPayloadType(message) === SwarmResponseSchema.typeName",
|
|
2116
|
+
"'Wrong payload type'"
|
|
2117
|
+
]
|
|
2118
|
+
});
|
|
2119
|
+
const payload = protocol.getPayload(message, SwarmResponseSchema);
|
|
2120
|
+
const topic = PublicKey7.from(payload.swarmKey);
|
|
2121
|
+
if (!this._swarmPeers.has(topic)) {
|
|
2122
|
+
log7.warn("Received message from wrong topic", {
|
|
2123
|
+
topic
|
|
2124
|
+
}, {
|
|
2125
|
+
F: __dxlog_file7,
|
|
2126
|
+
L: 124,
|
|
2127
|
+
S: this,
|
|
2128
|
+
C: (f, a) => f(...a)
|
|
2129
|
+
});
|
|
2130
|
+
return;
|
|
2131
|
+
}
|
|
2132
|
+
const oldPeers = this._swarmPeers.get(topic);
|
|
2133
|
+
const timestamp = message.timestamp ? new Date(Date.parse(message.timestamp)) : /* @__PURE__ */ new Date();
|
|
2134
|
+
const newPeers = new ComplexSet4(PeerInfoHash, payload.peers);
|
|
2135
|
+
for (const peer of newPeers) {
|
|
2136
|
+
if (oldPeers.has(peer)) {
|
|
2137
|
+
continue;
|
|
2138
|
+
}
|
|
2139
|
+
this.swarmEvent.emit({
|
|
2140
|
+
topic,
|
|
2141
|
+
peerAvailable: {
|
|
2142
|
+
peer,
|
|
2143
|
+
since: timestamp
|
|
2144
|
+
}
|
|
2145
|
+
});
|
|
2146
|
+
}
|
|
2147
|
+
for (const peer of oldPeers) {
|
|
2148
|
+
if (newPeers.has(peer)) {
|
|
2149
|
+
continue;
|
|
2150
|
+
}
|
|
2151
|
+
this.swarmEvent.emit({
|
|
2152
|
+
topic,
|
|
2153
|
+
peerLeft: {
|
|
2154
|
+
peer
|
|
2155
|
+
}
|
|
2156
|
+
});
|
|
2157
|
+
}
|
|
2158
|
+
this._swarmPeers.set(topic, newPeers);
|
|
2159
|
+
}
|
|
2160
|
+
_processMessage(message) {
|
|
2161
|
+
invariant6(protocol.getPayloadType(message) === bufWkt.AnySchema.typeName, "Wrong payload type", {
|
|
2162
|
+
F: __dxlog_file7,
|
|
2163
|
+
L: 157,
|
|
2164
|
+
S: this,
|
|
2165
|
+
A: [
|
|
2166
|
+
"protocol.getPayloadType(message) === bufWkt.AnySchema.typeName",
|
|
2167
|
+
"'Wrong payload type'"
|
|
2168
|
+
]
|
|
2169
|
+
});
|
|
2170
|
+
const payload = protocol.getPayload(message, bufWkt.AnySchema);
|
|
2171
|
+
invariant6(message.source, "source is missing", {
|
|
2172
|
+
F: __dxlog_file7,
|
|
2173
|
+
L: 159,
|
|
2174
|
+
S: this,
|
|
2175
|
+
A: [
|
|
2176
|
+
"message.source",
|
|
2177
|
+
"'source is missing'"
|
|
2178
|
+
]
|
|
2179
|
+
});
|
|
2180
|
+
invariant6(message.target, "target is missing", {
|
|
2181
|
+
F: __dxlog_file7,
|
|
2182
|
+
L: 160,
|
|
2183
|
+
S: this,
|
|
2184
|
+
A: [
|
|
2185
|
+
"message.target",
|
|
2186
|
+
"'target is missing'"
|
|
2187
|
+
]
|
|
2188
|
+
});
|
|
2189
|
+
invariant6(message.target.length === 1, "target should have exactly one item", {
|
|
2190
|
+
F: __dxlog_file7,
|
|
2191
|
+
L: 161,
|
|
2192
|
+
S: this,
|
|
2193
|
+
A: [
|
|
2194
|
+
"message.target.length === 1",
|
|
2195
|
+
"'target should have exactly one item'"
|
|
2196
|
+
]
|
|
2197
|
+
});
|
|
2198
|
+
this.onMessage.emit({
|
|
2199
|
+
author: message.source,
|
|
2200
|
+
recipient: message.target[0],
|
|
2201
|
+
payload: {
|
|
2202
|
+
type_url: payload.typeUrl,
|
|
2203
|
+
value: payload.value
|
|
2204
|
+
}
|
|
2205
|
+
});
|
|
2206
|
+
}
|
|
2207
|
+
_matchSelfPeerInfo(peer) {
|
|
2208
|
+
return peer && (peer.peerKey === this._edgeConnection.peerKey || peer.identityKey === this._edgeConnection.identityKey);
|
|
2209
|
+
}
|
|
2210
|
+
async _rejoinAllSwarms() {
|
|
2211
|
+
log7("rejoin swarms", {
|
|
2212
|
+
swarms: Array.from(this._swarmPeers.keys())
|
|
2213
|
+
}, {
|
|
2214
|
+
F: __dxlog_file7,
|
|
2215
|
+
L: 180,
|
|
2216
|
+
S: this,
|
|
2217
|
+
C: (f, a) => f(...a)
|
|
2218
|
+
});
|
|
2219
|
+
for (const topic of this._swarmPeers.keys()) {
|
|
2220
|
+
await this.join({
|
|
2221
|
+
topic,
|
|
2222
|
+
peer: {
|
|
2223
|
+
peerKey: this._edgeConnection.peerKey,
|
|
2224
|
+
identityKey: this._edgeConnection.identityKey
|
|
2225
|
+
}
|
|
2226
|
+
});
|
|
2227
|
+
}
|
|
2228
|
+
}
|
|
2229
|
+
};
|
|
2230
|
+
|
|
2231
|
+
// packages/core/mesh/messaging/src/signal-manager/utils.ts
|
|
2232
|
+
import { invariant as invariant7 } from "@dxos/invariant";
|
|
2233
|
+
import { log as log8 } from "@dxos/log";
|
|
2234
|
+
import { DeviceKind } from "@dxos/protocols/proto/dxos/client/services";
|
|
2235
|
+
var __dxlog_file8 = "/home/runner/work/dxos/dxos/packages/core/mesh/messaging/src/signal-manager/utils.ts";
|
|
2236
|
+
var setIdentityTags = ({ identityService, devicesService, setTag }) => {
|
|
2237
|
+
identityService.queryIdentity().subscribe((idqr) => {
|
|
2238
|
+
if (!idqr?.identity?.identityKey) {
|
|
2239
|
+
log8("empty response from identity service", {
|
|
2240
|
+
idqr
|
|
2241
|
+
}, {
|
|
2242
|
+
F: __dxlog_file8,
|
|
2243
|
+
L: 21,
|
|
2244
|
+
S: void 0,
|
|
2245
|
+
C: (f, a) => f(...a)
|
|
2246
|
+
});
|
|
2247
|
+
return;
|
|
2248
|
+
}
|
|
2249
|
+
setTag("identityKey", idqr.identity.identityKey.truncate());
|
|
2250
|
+
});
|
|
2251
|
+
devicesService.queryDevices().subscribe((dqr) => {
|
|
2252
|
+
if (!dqr || !dqr.devices || dqr.devices.length === 0) {
|
|
2253
|
+
log8("empty response from device service", {
|
|
2254
|
+
device: dqr
|
|
2255
|
+
}, {
|
|
2256
|
+
F: __dxlog_file8,
|
|
2257
|
+
L: 30,
|
|
2258
|
+
S: void 0,
|
|
2259
|
+
C: (f, a) => f(...a)
|
|
2260
|
+
});
|
|
2261
|
+
return;
|
|
2262
|
+
}
|
|
2263
|
+
invariant7(dqr, "empty response from device service", {
|
|
2264
|
+
F: __dxlog_file8,
|
|
2265
|
+
L: 33,
|
|
2266
|
+
S: void 0,
|
|
2267
|
+
A: [
|
|
2268
|
+
"dqr",
|
|
2269
|
+
"'empty response from device service'"
|
|
2270
|
+
]
|
|
2271
|
+
});
|
|
2272
|
+
const thisDevice = dqr.devices.find((device) => device.kind === DeviceKind.CURRENT);
|
|
2273
|
+
if (!thisDevice) {
|
|
2274
|
+
log8("no current device", {
|
|
2275
|
+
device: dqr
|
|
2276
|
+
}, {
|
|
2277
|
+
F: __dxlog_file8,
|
|
2278
|
+
L: 37,
|
|
2279
|
+
S: void 0,
|
|
2280
|
+
C: (f, a) => f(...a)
|
|
2281
|
+
});
|
|
2282
|
+
return;
|
|
2283
|
+
}
|
|
2284
|
+
setTag("deviceKey", thisDevice.deviceKey.truncate());
|
|
2285
|
+
});
|
|
2286
|
+
};
|
|
2287
|
+
|
|
2288
|
+
export {
|
|
2289
|
+
Buffer,
|
|
2290
|
+
Messenger,
|
|
2291
|
+
SignalClient,
|
|
2292
|
+
PeerInfoHash,
|
|
2293
|
+
MemorySignalManagerContext,
|
|
2294
|
+
MemorySignalManager,
|
|
2295
|
+
WebsocketSignalManager,
|
|
2296
|
+
EdgeSignalManager,
|
|
2297
|
+
setIdentityTags
|
|
2298
|
+
};
|
|
2299
|
+
//# sourceMappingURL=chunk-BN7UMWB4.mjs.map
|