@dxos/teleport 0.6.11 → 0.6.12-main.5cc132e
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/lib/browser/{chunk-ISJQDU2V.mjs → chunk-K7XTPLL4.mjs} +1 -1
- package/dist/lib/browser/{chunk-ISJQDU2V.mjs.map → chunk-K7XTPLL4.mjs.map} +1 -1
- package/dist/lib/browser/index.mjs +1 -1
- package/dist/lib/browser/meta.json +1 -1
- package/dist/lib/browser/testing/index.mjs +1 -1
- package/dist/lib/node/{chunk-TC7PLXKV.cjs → chunk-M63AQWOL.cjs} +4 -4
- package/dist/lib/node/{chunk-TC7PLXKV.cjs.map → chunk-M63AQWOL.cjs.map} +1 -1
- package/dist/lib/node/index.cjs +11 -11
- package/dist/lib/node/meta.json +1 -1
- package/dist/lib/node/testing/index.cjs +6 -6
- package/dist/lib/node/testing/index.cjs.map +1 -1
- package/dist/lib/node-esm/chunk-76FPY3V4.mjs +1966 -0
- package/dist/lib/node-esm/chunk-76FPY3V4.mjs.map +7 -0
- package/dist/lib/node-esm/index.mjs +85 -0
- package/dist/lib/node-esm/index.mjs.map +7 -0
- package/dist/lib/node-esm/meta.json +1 -0
- package/dist/lib/node-esm/testing/index.mjs +15 -0
- package/dist/lib/node-esm/testing/index.mjs.map +7 -0
- package/package.json +16 -16
- package/src/muxing/balancer.test.ts +4 -8
- package/src/muxing/framer.test.ts +6 -10
- package/src/muxing/muxer.test.ts +3 -7
- package/src/muxing/rpc-port.test.ts +2 -1
- package/src/teleport.test.ts +4 -5
- package/src/testing/test-extension-with-streams.ts +2 -2
@@ -0,0 +1,1966 @@
|
|
1
|
+
// packages/core/mesh/teleport/src/testing/test-builder.ts
|
2
|
+
import { pipeline } from "node:stream";
|
3
|
+
import { waitForCondition } from "@dxos/async";
|
4
|
+
import { invariant as invariant5 } from "@dxos/invariant";
|
5
|
+
import { PublicKey as PublicKey2 } from "@dxos/keys";
|
6
|
+
import { log as log6 } from "@dxos/log";
|
7
|
+
|
8
|
+
// packages/core/mesh/teleport/src/teleport.ts
|
9
|
+
import { runInContextAsync, synchronized, scheduleTask } from "@dxos/async";
|
10
|
+
import { Context as Context3 } from "@dxos/context";
|
11
|
+
import { failUndefined as failUndefined2 } from "@dxos/debug";
|
12
|
+
import { invariant as invariant4 } from "@dxos/invariant";
|
13
|
+
import { PublicKey } from "@dxos/keys";
|
14
|
+
import { log as log5, logInfo as logInfo2 } from "@dxos/log";
|
15
|
+
import { RpcClosedError as RpcClosedError2, TimeoutError as TimeoutError2 } from "@dxos/protocols";
|
16
|
+
|
17
|
+
// packages/core/mesh/teleport/src/control-extension.ts
|
18
|
+
import { asyncTimeout, scheduleTaskInterval, TimeoutError as AsyncTimeoutError } from "@dxos/async";
|
19
|
+
import { Context } from "@dxos/context";
|
20
|
+
import { log } from "@dxos/log";
|
21
|
+
import { RpcClosedError } from "@dxos/protocols";
|
22
|
+
import { schema } from "@dxos/protocols/proto";
|
23
|
+
import { createProtoRpcPeer } from "@dxos/rpc";
|
24
|
+
import { Callback } from "@dxos/util";
|
25
|
+
var __dxlog_file = "/home/runner/work/dxos/dxos/packages/core/mesh/teleport/src/control-extension.ts";
|
26
|
+
var HEARTBEAT_RTT_WARN_THRESH = 1e4;
|
27
|
+
var DEBUG_PRINT_HEARTBEAT = false;
|
28
|
+
var ControlExtension = class {
|
29
|
+
constructor(opts, localPeerId, remotePeerId) {
|
30
|
+
this.opts = opts;
|
31
|
+
this.localPeerId = localPeerId;
|
32
|
+
this.remotePeerId = remotePeerId;
|
33
|
+
this._ctx = new Context({
|
34
|
+
onError: (err) => {
|
35
|
+
this._extensionContext.close(err);
|
36
|
+
}
|
37
|
+
}, {
|
38
|
+
F: __dxlog_file,
|
39
|
+
L: 31
|
40
|
+
});
|
41
|
+
this.onExtensionRegistered = new Callback();
|
42
|
+
}
|
43
|
+
async registerExtension(name) {
|
44
|
+
await this._rpc.rpc.Control.registerExtension({
|
45
|
+
name
|
46
|
+
});
|
47
|
+
}
|
48
|
+
async onOpen(extensionContext) {
|
49
|
+
this._extensionContext = extensionContext;
|
50
|
+
this._rpc = createProtoRpcPeer({
|
51
|
+
requested: {
|
52
|
+
Control: schema.getService("dxos.mesh.teleport.control.ControlService")
|
53
|
+
},
|
54
|
+
exposed: {
|
55
|
+
Control: schema.getService("dxos.mesh.teleport.control.ControlService")
|
56
|
+
},
|
57
|
+
handlers: {
|
58
|
+
Control: {
|
59
|
+
registerExtension: async (request) => {
|
60
|
+
this.onExtensionRegistered.call(request.name);
|
61
|
+
},
|
62
|
+
heartbeat: async (request) => {
|
63
|
+
if (DEBUG_PRINT_HEARTBEAT) {
|
64
|
+
log("received heartbeat request", {
|
65
|
+
ts: request.requestTimestamp,
|
66
|
+
localPeerId: this.localPeerId.truncate(),
|
67
|
+
remotePeerId: this.remotePeerId.truncate()
|
68
|
+
}, {
|
69
|
+
F: __dxlog_file,
|
70
|
+
L: 69,
|
71
|
+
S: this,
|
72
|
+
C: (f, a) => f(...a)
|
73
|
+
});
|
74
|
+
}
|
75
|
+
return {
|
76
|
+
requestTimestamp: request.requestTimestamp
|
77
|
+
};
|
78
|
+
}
|
79
|
+
}
|
80
|
+
},
|
81
|
+
port: await extensionContext.createPort("rpc", {
|
82
|
+
contentType: 'application/x-protobuf; messageType="dxos.rpc.Message"'
|
83
|
+
}),
|
84
|
+
timeout: this.opts.heartbeatTimeout
|
85
|
+
});
|
86
|
+
await this._rpc.open();
|
87
|
+
scheduleTaskInterval(this._ctx, async () => {
|
88
|
+
const reqTS = /* @__PURE__ */ new Date();
|
89
|
+
try {
|
90
|
+
const resp = await asyncTimeout(this._rpc.rpc.Control.heartbeat({
|
91
|
+
requestTimestamp: reqTS
|
92
|
+
}), this.opts.heartbeatTimeout);
|
93
|
+
const now = Date.now();
|
94
|
+
if (resp.requestTimestamp instanceof Date) {
|
95
|
+
if (now - resp.requestTimestamp.getTime() > (HEARTBEAT_RTT_WARN_THRESH < this.opts.heartbeatTimeout ? HEARTBEAT_RTT_WARN_THRESH : this.opts.heartbeatTimeout / 2)) {
|
96
|
+
log.warn(`heartbeat RTT for Teleport > ${HEARTBEAT_RTT_WARN_THRESH / 1e3}s`, {
|
97
|
+
rtt: now - resp.requestTimestamp.getTime(),
|
98
|
+
localPeerId: this.localPeerId.truncate(),
|
99
|
+
remotePeerId: this.remotePeerId.truncate()
|
100
|
+
}, {
|
101
|
+
F: __dxlog_file,
|
102
|
+
L: 107,
|
103
|
+
S: this,
|
104
|
+
C: (f, a) => f(...a)
|
105
|
+
});
|
106
|
+
} else {
|
107
|
+
if (DEBUG_PRINT_HEARTBEAT) {
|
108
|
+
log("heartbeat RTT", {
|
109
|
+
rtt: now - resp.requestTimestamp.getTime(),
|
110
|
+
localPeerId: this.localPeerId.truncate(),
|
111
|
+
remotePeerId: this.remotePeerId.truncate()
|
112
|
+
}, {
|
113
|
+
F: __dxlog_file,
|
114
|
+
L: 114,
|
115
|
+
S: this,
|
116
|
+
C: (f, a) => f(...a)
|
117
|
+
});
|
118
|
+
}
|
119
|
+
}
|
120
|
+
}
|
121
|
+
} catch (err) {
|
122
|
+
const now = Date.now();
|
123
|
+
if (err instanceof RpcClosedError) {
|
124
|
+
log("ignoring RpcClosedError in heartbeat", void 0, {
|
125
|
+
F: __dxlog_file,
|
126
|
+
L: 126,
|
127
|
+
S: this,
|
128
|
+
C: (f, a) => f(...a)
|
129
|
+
});
|
130
|
+
this._extensionContext.close(err);
|
131
|
+
return;
|
132
|
+
}
|
133
|
+
if (err instanceof AsyncTimeoutError) {
|
134
|
+
log("timeout waiting for heartbeat response", {
|
135
|
+
err,
|
136
|
+
delay: now - reqTS.getTime()
|
137
|
+
}, {
|
138
|
+
F: __dxlog_file,
|
139
|
+
L: 131,
|
140
|
+
S: this,
|
141
|
+
C: (f, a) => f(...a)
|
142
|
+
});
|
143
|
+
this.opts.onTimeout(err);
|
144
|
+
} else {
|
145
|
+
log.info("other error waiting for heartbeat response", {
|
146
|
+
err,
|
147
|
+
delay: now - reqTS.getTime()
|
148
|
+
}, {
|
149
|
+
F: __dxlog_file,
|
150
|
+
L: 134,
|
151
|
+
S: this,
|
152
|
+
C: (f, a) => f(...a)
|
153
|
+
});
|
154
|
+
this.opts.onTimeout(err);
|
155
|
+
}
|
156
|
+
}
|
157
|
+
}, this.opts.heartbeatInterval);
|
158
|
+
}
|
159
|
+
async onClose(err) {
|
160
|
+
await this._ctx.dispose();
|
161
|
+
await this._rpc.close();
|
162
|
+
}
|
163
|
+
async onAbort(err) {
|
164
|
+
await this._ctx.dispose();
|
165
|
+
await this._rpc.abort();
|
166
|
+
}
|
167
|
+
};
|
168
|
+
|
169
|
+
// packages/core/mesh/teleport/src/muxing/framer.ts
|
170
|
+
import { Duplex } from "node:stream";
|
171
|
+
import { Event } from "@dxos/async";
|
172
|
+
import { invariant } from "@dxos/invariant";
|
173
|
+
import { log as log2 } from "@dxos/log";
|
174
|
+
var __dxlog_file2 = "/home/runner/work/dxos/dxos/packages/core/mesh/teleport/src/muxing/framer.ts";
|
175
|
+
var FRAME_LENGTH_SIZE = 2;
|
176
|
+
var Framer = class {
|
177
|
+
constructor() {
|
178
|
+
// private readonly _tagBuffer = Buffer.alloc(4)
|
179
|
+
this._messageCb = void 0;
|
180
|
+
this._subscribeCb = void 0;
|
181
|
+
this._buffer = void 0;
|
182
|
+
this._sendCallbacks = [];
|
183
|
+
this._bytesSent = 0;
|
184
|
+
this._bytesReceived = 0;
|
185
|
+
this._writable = true;
|
186
|
+
this.drain = new Event();
|
187
|
+
// TODO(egorgripasov): Consider using a Transform stream if it provides better backpressure handling.
|
188
|
+
this._stream = new Duplex({
|
189
|
+
objectMode: false,
|
190
|
+
read: () => {
|
191
|
+
this._processResponseQueue();
|
192
|
+
},
|
193
|
+
write: (chunk, encoding, callback) => {
|
194
|
+
invariant(!this._subscribeCb, "Internal Framer bug. Concurrent writes detected.", {
|
195
|
+
F: __dxlog_file2,
|
196
|
+
L: 40,
|
197
|
+
S: this,
|
198
|
+
A: [
|
199
|
+
"!this._subscribeCb",
|
200
|
+
"'Internal Framer bug. Concurrent writes detected.'"
|
201
|
+
]
|
202
|
+
});
|
203
|
+
this._bytesReceived += chunk.length;
|
204
|
+
if (this._buffer && this._buffer.length > 0) {
|
205
|
+
this._buffer = Buffer.concat([
|
206
|
+
this._buffer,
|
207
|
+
chunk
|
208
|
+
]);
|
209
|
+
} else {
|
210
|
+
this._buffer = chunk;
|
211
|
+
}
|
212
|
+
if (this._messageCb) {
|
213
|
+
this._popFrames();
|
214
|
+
callback();
|
215
|
+
} else {
|
216
|
+
this._subscribeCb = () => {
|
217
|
+
this._popFrames();
|
218
|
+
this._subscribeCb = void 0;
|
219
|
+
callback();
|
220
|
+
};
|
221
|
+
}
|
222
|
+
}
|
223
|
+
});
|
224
|
+
this.port = {
|
225
|
+
send: (message) => {
|
226
|
+
return new Promise((resolve) => {
|
227
|
+
const frame = encodeFrame(message);
|
228
|
+
this._bytesSent += frame.length;
|
229
|
+
this._writable = this._stream.push(frame);
|
230
|
+
if (!this._writable) {
|
231
|
+
this._sendCallbacks.push(resolve);
|
232
|
+
} else {
|
233
|
+
resolve();
|
234
|
+
}
|
235
|
+
});
|
236
|
+
},
|
237
|
+
subscribe: (callback) => {
|
238
|
+
invariant(!this._messageCb, "Rpc port already has a message listener.", {
|
239
|
+
F: __dxlog_file2,
|
240
|
+
L: 79,
|
241
|
+
S: this,
|
242
|
+
A: [
|
243
|
+
"!this._messageCb",
|
244
|
+
"'Rpc port already has a message listener.'"
|
245
|
+
]
|
246
|
+
});
|
247
|
+
this._messageCb = callback;
|
248
|
+
this._subscribeCb?.();
|
249
|
+
return () => {
|
250
|
+
this._messageCb = void 0;
|
251
|
+
};
|
252
|
+
}
|
253
|
+
};
|
254
|
+
}
|
255
|
+
get stream() {
|
256
|
+
return this._stream;
|
257
|
+
}
|
258
|
+
get bytesSent() {
|
259
|
+
return this._bytesSent;
|
260
|
+
}
|
261
|
+
get bytesReceived() {
|
262
|
+
return this._bytesReceived;
|
263
|
+
}
|
264
|
+
get writable() {
|
265
|
+
return this._writable;
|
266
|
+
}
|
267
|
+
_processResponseQueue() {
|
268
|
+
const responseQueue = this._sendCallbacks;
|
269
|
+
this._sendCallbacks = [];
|
270
|
+
this._writable = true;
|
271
|
+
this.drain.emit();
|
272
|
+
responseQueue.forEach((cb) => cb());
|
273
|
+
}
|
274
|
+
/**
|
275
|
+
* Attempts to pop frames from the buffer and call the message callback.
|
276
|
+
*/
|
277
|
+
_popFrames() {
|
278
|
+
let offset = 0;
|
279
|
+
while (offset < this._buffer.length) {
|
280
|
+
const frame = decodeFrame(this._buffer, offset);
|
281
|
+
if (!frame) {
|
282
|
+
break;
|
283
|
+
}
|
284
|
+
offset += frame.bytesConsumed;
|
285
|
+
this._messageCb(frame.payload);
|
286
|
+
}
|
287
|
+
if (offset < this._buffer.length) {
|
288
|
+
this._buffer = this._buffer.subarray(offset);
|
289
|
+
} else {
|
290
|
+
this._buffer = void 0;
|
291
|
+
}
|
292
|
+
}
|
293
|
+
destroy() {
|
294
|
+
if (this._stream.readableLength > 0) {
|
295
|
+
log2("framer destroyed while there are still read bytes in the buffer.", void 0, {
|
296
|
+
F: __dxlog_file2,
|
297
|
+
L: 140,
|
298
|
+
S: this,
|
299
|
+
C: (f, a) => f(...a)
|
300
|
+
});
|
301
|
+
}
|
302
|
+
if (this._stream.writableLength > 0) {
|
303
|
+
log2.warn("framer destroyed while there are still write bytes in the buffer.", void 0, {
|
304
|
+
F: __dxlog_file2,
|
305
|
+
L: 143,
|
306
|
+
S: this,
|
307
|
+
C: (f, a) => f(...a)
|
308
|
+
});
|
309
|
+
}
|
310
|
+
this._stream.destroy();
|
311
|
+
}
|
312
|
+
};
|
313
|
+
var decodeFrame = (buffer, offset) => {
|
314
|
+
if (buffer.length < offset + FRAME_LENGTH_SIZE) {
|
315
|
+
return void 0;
|
316
|
+
}
|
317
|
+
const frameLength = buffer.readUInt16BE(offset);
|
318
|
+
const bytesConsumed = FRAME_LENGTH_SIZE + frameLength;
|
319
|
+
if (buffer.length < offset + bytesConsumed) {
|
320
|
+
return void 0;
|
321
|
+
}
|
322
|
+
const payload = buffer.subarray(offset + FRAME_LENGTH_SIZE, offset + bytesConsumed);
|
323
|
+
return {
|
324
|
+
payload,
|
325
|
+
bytesConsumed
|
326
|
+
};
|
327
|
+
};
|
328
|
+
var encodeFrame = (payload) => {
|
329
|
+
const frame = Buffer.allocUnsafe(FRAME_LENGTH_SIZE + payload.length);
|
330
|
+
frame.writeUInt16BE(payload.length, 0);
|
331
|
+
frame.set(payload, FRAME_LENGTH_SIZE);
|
332
|
+
return frame;
|
333
|
+
};
|
334
|
+
|
335
|
+
// packages/core/mesh/teleport/src/muxing/muxer.ts
|
336
|
+
import { Duplex as Duplex2 } from "node:stream";
|
337
|
+
import { scheduleTaskInterval as scheduleTaskInterval2, Event as Event3, Trigger, asyncTimeout as asyncTimeout2 } from "@dxos/async";
|
338
|
+
import { Context as Context2 } from "@dxos/context";
|
339
|
+
import { failUndefined } from "@dxos/debug";
|
340
|
+
import { invariant as invariant3 } from "@dxos/invariant";
|
341
|
+
import { log as log4, logInfo } from "@dxos/log";
|
342
|
+
import { TimeoutError } from "@dxos/protocols";
|
343
|
+
import { schema as schema2 } from "@dxos/protocols/proto";
|
344
|
+
|
345
|
+
// packages/core/mesh/teleport/src/muxing/balancer.ts
|
346
|
+
import * as varint from "varint";
|
347
|
+
import { Event as Event2 } from "@dxos/async";
|
348
|
+
import { invariant as invariant2 } from "@dxos/invariant";
|
349
|
+
import { log as log3 } from "@dxos/log";
|
350
|
+
var __dxlog_file3 = "/home/runner/work/dxos/dxos/packages/core/mesh/teleport/src/muxing/balancer.ts";
|
351
|
+
var MAX_CHUNK_SIZE = 8192;
|
352
|
+
var Balancer = class {
|
353
|
+
constructor(_sysChannelId) {
|
354
|
+
this._sysChannelId = _sysChannelId;
|
355
|
+
this._lastCallerIndex = 0;
|
356
|
+
this._channels = [];
|
357
|
+
this._framer = new Framer();
|
358
|
+
this._sendBuffers = /* @__PURE__ */ new Map();
|
359
|
+
this._receiveBuffers = /* @__PURE__ */ new Map();
|
360
|
+
this._sending = false;
|
361
|
+
this.incomingData = new Event2();
|
362
|
+
this.stream = this._framer.stream;
|
363
|
+
this._channels.push(_sysChannelId);
|
364
|
+
this._framer.port.subscribe(this._processIncomingMessage.bind(this));
|
365
|
+
}
|
366
|
+
get bytesSent() {
|
367
|
+
return this._framer.bytesSent;
|
368
|
+
}
|
369
|
+
get bytesReceived() {
|
370
|
+
return this._framer.bytesReceived;
|
371
|
+
}
|
372
|
+
get buffersCount() {
|
373
|
+
return this._sendBuffers.size;
|
374
|
+
}
|
375
|
+
addChannel(channel) {
|
376
|
+
this._channels.push(channel);
|
377
|
+
}
|
378
|
+
pushData(data, trigger, channelId) {
|
379
|
+
this._enqueueChunk(data, trigger, channelId);
|
380
|
+
this._sendChunks().catch((err) => log3.catch(err, void 0, {
|
381
|
+
F: __dxlog_file3,
|
382
|
+
L: 75,
|
383
|
+
S: this,
|
384
|
+
C: (f, a) => f(...a)
|
385
|
+
}));
|
386
|
+
}
|
387
|
+
destroy() {
|
388
|
+
if (this._sendBuffers.size !== 0) {
|
389
|
+
log3.info("destroying balancer with pending calls", void 0, {
|
390
|
+
F: __dxlog_file3,
|
391
|
+
L: 80,
|
392
|
+
S: this,
|
393
|
+
C: (f, a) => f(...a)
|
394
|
+
});
|
395
|
+
}
|
396
|
+
this._sendBuffers.clear();
|
397
|
+
this._framer.destroy();
|
398
|
+
}
|
399
|
+
_processIncomingMessage(msg) {
|
400
|
+
const { channelId, dataLength, chunk } = decodeChunk(msg, (channelId2) => !this._receiveBuffers.has(channelId2));
|
401
|
+
if (!this._receiveBuffers.has(channelId)) {
|
402
|
+
if (chunk.length < dataLength) {
|
403
|
+
this._receiveBuffers.set(channelId, {
|
404
|
+
buffer: Buffer.from(chunk),
|
405
|
+
msgLength: dataLength
|
406
|
+
});
|
407
|
+
} else {
|
408
|
+
this.incomingData.emit(chunk);
|
409
|
+
}
|
410
|
+
} else {
|
411
|
+
const channelBuffer = this._receiveBuffers.get(channelId);
|
412
|
+
channelBuffer.buffer = Buffer.concat([
|
413
|
+
channelBuffer.buffer,
|
414
|
+
chunk
|
415
|
+
]);
|
416
|
+
if (channelBuffer.buffer.length < channelBuffer.msgLength) {
|
417
|
+
return;
|
418
|
+
}
|
419
|
+
const msg2 = channelBuffer.buffer;
|
420
|
+
this._receiveBuffers.delete(channelId);
|
421
|
+
this.incomingData.emit(msg2);
|
422
|
+
}
|
423
|
+
}
|
424
|
+
_getNextCallerId() {
|
425
|
+
if (this._sendBuffers.has(this._sysChannelId)) {
|
426
|
+
return this._sysChannelId;
|
427
|
+
}
|
428
|
+
const index = this._lastCallerIndex;
|
429
|
+
this._lastCallerIndex = (this._lastCallerIndex + 1) % this._channels.length;
|
430
|
+
return this._channels[index];
|
431
|
+
}
|
432
|
+
_enqueueChunk(data, trigger, channelId) {
|
433
|
+
if (!this._channels.includes(channelId)) {
|
434
|
+
throw new Error(`Unknown channel ${channelId}`);
|
435
|
+
}
|
436
|
+
if (!this._sendBuffers.has(channelId)) {
|
437
|
+
this._sendBuffers.set(channelId, []);
|
438
|
+
}
|
439
|
+
const sendBuffer = this._sendBuffers.get(channelId);
|
440
|
+
const chunks = [];
|
441
|
+
for (let idx = 0; idx < data.length; idx += MAX_CHUNK_SIZE) {
|
442
|
+
chunks.push(data.subarray(idx, idx + MAX_CHUNK_SIZE));
|
443
|
+
}
|
444
|
+
chunks.forEach((chunk, index) => {
|
445
|
+
const msg = encodeChunk({
|
446
|
+
chunk,
|
447
|
+
channelId,
|
448
|
+
dataLength: index === 0 ? data.length : void 0
|
449
|
+
});
|
450
|
+
sendBuffer.push({
|
451
|
+
msg,
|
452
|
+
trigger: index === chunks.length - 1 ? trigger : void 0
|
453
|
+
});
|
454
|
+
});
|
455
|
+
}
|
456
|
+
// get the next chunk or null if there are no chunks remaining
|
457
|
+
_getNextChunk() {
|
458
|
+
let chunk;
|
459
|
+
while (this._sendBuffers.size > 0) {
|
460
|
+
const channelId = this._getNextCallerId();
|
461
|
+
const sendBuffer = this._sendBuffers.get(channelId);
|
462
|
+
if (!sendBuffer) {
|
463
|
+
continue;
|
464
|
+
}
|
465
|
+
chunk = sendBuffer.shift();
|
466
|
+
if (!chunk) {
|
467
|
+
continue;
|
468
|
+
}
|
469
|
+
if (sendBuffer.length === 0) {
|
470
|
+
this._sendBuffers.delete(channelId);
|
471
|
+
}
|
472
|
+
return chunk;
|
473
|
+
}
|
474
|
+
return null;
|
475
|
+
}
|
476
|
+
async _sendChunks() {
|
477
|
+
if (this._sending) {
|
478
|
+
return;
|
479
|
+
}
|
480
|
+
this._sending = true;
|
481
|
+
let chunk;
|
482
|
+
chunk = this._getNextChunk();
|
483
|
+
while (chunk) {
|
484
|
+
if (!this._framer.writable) {
|
485
|
+
log3("PAUSE for drain", void 0, {
|
486
|
+
F: __dxlog_file3,
|
487
|
+
L: 179,
|
488
|
+
S: this,
|
489
|
+
C: (f, a) => f(...a)
|
490
|
+
});
|
491
|
+
await this._framer.drain.waitForCount(1);
|
492
|
+
log3("RESUME for drain", void 0, {
|
493
|
+
F: __dxlog_file3,
|
494
|
+
L: 181,
|
495
|
+
S: this,
|
496
|
+
C: (f, a) => f(...a)
|
497
|
+
});
|
498
|
+
}
|
499
|
+
try {
|
500
|
+
await this._framer.port.send(chunk.msg);
|
501
|
+
chunk.trigger?.wake();
|
502
|
+
} catch (err) {
|
503
|
+
log3("Error sending chunk", {
|
504
|
+
err
|
505
|
+
}, {
|
506
|
+
F: __dxlog_file3,
|
507
|
+
L: 187,
|
508
|
+
S: this,
|
509
|
+
C: (f, a) => f(...a)
|
510
|
+
});
|
511
|
+
chunk.trigger?.throw(err);
|
512
|
+
}
|
513
|
+
chunk = this._getNextChunk();
|
514
|
+
}
|
515
|
+
invariant2(this._sendBuffers.size === 0, "sendBuffers not empty", {
|
516
|
+
F: __dxlog_file3,
|
517
|
+
L: 192,
|
518
|
+
S: this,
|
519
|
+
A: [
|
520
|
+
"this._sendBuffers.size === 0",
|
521
|
+
"'sendBuffers not empty'"
|
522
|
+
]
|
523
|
+
});
|
524
|
+
this._sending = false;
|
525
|
+
}
|
526
|
+
};
|
527
|
+
var encodeChunk = ({ channelId, dataLength, chunk }) => {
|
528
|
+
const channelTagLength = varint.encodingLength(channelId);
|
529
|
+
const dataLengthLength = dataLength ? varint.encodingLength(dataLength) : 0;
|
530
|
+
const message = Buffer.allocUnsafe(channelTagLength + dataLengthLength + chunk.length);
|
531
|
+
varint.encode(channelId, message);
|
532
|
+
if (dataLength) {
|
533
|
+
varint.encode(dataLength, message, channelTagLength);
|
534
|
+
}
|
535
|
+
message.set(chunk, channelTagLength + dataLengthLength);
|
536
|
+
return message;
|
537
|
+
};
|
538
|
+
var decodeChunk = (data, withLength) => {
|
539
|
+
const channelId = varint.decode(data);
|
540
|
+
let dataLength;
|
541
|
+
let offset = varint.decode.bytes;
|
542
|
+
if (withLength(channelId)) {
|
543
|
+
dataLength = varint.decode(data, offset);
|
544
|
+
offset += varint.decode.bytes;
|
545
|
+
}
|
546
|
+
const chunk = data.subarray(offset);
|
547
|
+
return {
|
548
|
+
channelId,
|
549
|
+
dataLength,
|
550
|
+
chunk
|
551
|
+
};
|
552
|
+
};
|
553
|
+
|
554
|
+
// packages/core/mesh/teleport/src/muxing/muxer.ts
|
555
|
+
function _ts_decorate(decorators, target, key, desc) {
|
556
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
557
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
558
|
+
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
559
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
560
|
+
}
|
561
|
+
var __dxlog_file4 = "/home/runner/work/dxos/dxos/packages/core/mesh/teleport/src/muxing/muxer.ts";
|
562
|
+
var Command = schema2.getCodecForType("dxos.mesh.muxer.Command");
|
563
|
+
var DEFAULT_SEND_COMMAND_TIMEOUT = 6e4;
|
564
|
+
var DESTROY_COMMAND_SEND_TIMEOUT = 5e3;
|
565
|
+
var STATS_INTERVAL = 1e3;
|
566
|
+
var MAX_SAFE_FRAME_SIZE = 1e6;
|
567
|
+
var SYSTEM_CHANNEL_ID = 0;
|
568
|
+
var GRACEFUL_CLOSE_TIMEOUT = 3e3;
|
569
|
+
var Muxer = class {
|
570
|
+
constructor() {
|
571
|
+
this._balancer = new Balancer(SYSTEM_CHANNEL_ID);
|
572
|
+
this._channelsByLocalId = /* @__PURE__ */ new Map();
|
573
|
+
this._channelsByTag = /* @__PURE__ */ new Map();
|
574
|
+
this._ctx = new Context2(void 0, {
|
575
|
+
F: __dxlog_file4,
|
576
|
+
L: 108
|
577
|
+
});
|
578
|
+
this._nextId = 1;
|
579
|
+
this._closing = false;
|
580
|
+
this._destroying = false;
|
581
|
+
this._disposed = false;
|
582
|
+
this._lastStats = void 0;
|
583
|
+
this._lastChannelStats = /* @__PURE__ */ new Map();
|
584
|
+
this.afterClosed = new Event3();
|
585
|
+
this.statsUpdated = new Event3();
|
586
|
+
this.stream = this._balancer.stream;
|
587
|
+
this._balancer.incomingData.on(async (msg) => {
|
588
|
+
await this._handleCommand(Command.decode(msg));
|
589
|
+
});
|
590
|
+
}
|
591
|
+
setSessionId(sessionId) {
|
592
|
+
this._sessionId = sessionId;
|
593
|
+
}
|
594
|
+
get sessionIdString() {
|
595
|
+
return this._sessionId ? this._sessionId.truncate() : "none";
|
596
|
+
}
|
597
|
+
/**
|
598
|
+
* Creates a duplex Node.js-style stream.
|
599
|
+
* The remote peer is expected to call `createStream` with the same tag.
|
600
|
+
* The stream is immediately readable and writable.
|
601
|
+
* NOTE: The data will be buffered until the stream is opened remotely with the same tag (may cause a memory leak).
|
602
|
+
*/
|
603
|
+
async createStream(tag, opts = {}) {
|
604
|
+
const channel = this._getOrCreateStream({
|
605
|
+
tag,
|
606
|
+
contentType: opts.contentType
|
607
|
+
});
|
608
|
+
invariant3(!channel.push, `Channel already open: ${tag}`, {
|
609
|
+
F: __dxlog_file4,
|
610
|
+
L: 152,
|
611
|
+
S: this,
|
612
|
+
A: [
|
613
|
+
"!channel.push",
|
614
|
+
"`Channel already open: ${tag}`"
|
615
|
+
]
|
616
|
+
});
|
617
|
+
const stream = new Duplex2({
|
618
|
+
write: (data, encoding, callback) => {
|
619
|
+
this._sendData(channel, data).then(() => callback()).catch(callback);
|
620
|
+
},
|
621
|
+
read: () => {
|
622
|
+
}
|
623
|
+
});
|
624
|
+
channel.push = (data) => {
|
625
|
+
channel.stats.bytesReceived += data.length;
|
626
|
+
stream.push(data);
|
627
|
+
};
|
628
|
+
channel.destroy = (err) => {
|
629
|
+
if (err) {
|
630
|
+
if (stream.listeners("error").length > 0) {
|
631
|
+
stream.destroy(err);
|
632
|
+
} else {
|
633
|
+
stream.destroy();
|
634
|
+
}
|
635
|
+
} else {
|
636
|
+
stream.destroy();
|
637
|
+
}
|
638
|
+
};
|
639
|
+
try {
|
640
|
+
await this._sendCommand({
|
641
|
+
openChannel: {
|
642
|
+
id: channel.id,
|
643
|
+
tag: channel.tag,
|
644
|
+
contentType: channel.contentType
|
645
|
+
}
|
646
|
+
}, SYSTEM_CHANNEL_ID);
|
647
|
+
} catch (err) {
|
648
|
+
this._destroyChannel(channel, err);
|
649
|
+
throw err;
|
650
|
+
}
|
651
|
+
return stream;
|
652
|
+
}
|
653
|
+
/**
|
654
|
+
* Creates an RPC port.
|
655
|
+
* The remote peer is expected to call `createPort` with the same tag.
|
656
|
+
* The port is immediately usable.
|
657
|
+
* NOTE: The data will be buffered until the stream is opened remotely with the same tag (may cause a memory leak).
|
658
|
+
*/
|
659
|
+
async createPort(tag, opts = {}) {
|
660
|
+
const channel = this._getOrCreateStream({
|
661
|
+
tag,
|
662
|
+
contentType: opts.contentType
|
663
|
+
});
|
664
|
+
invariant3(!channel.push, `Channel already open: ${tag}`, {
|
665
|
+
F: __dxlog_file4,
|
666
|
+
L: 212,
|
667
|
+
S: this,
|
668
|
+
A: [
|
669
|
+
"!channel.push",
|
670
|
+
"`Channel already open: ${tag}`"
|
671
|
+
]
|
672
|
+
});
|
673
|
+
let inboundBuffer = [];
|
674
|
+
let callback;
|
675
|
+
channel.push = (data) => {
|
676
|
+
channel.stats.bytesReceived += data.length;
|
677
|
+
if (callback) {
|
678
|
+
callback(data);
|
679
|
+
} else {
|
680
|
+
inboundBuffer.push(data);
|
681
|
+
}
|
682
|
+
};
|
683
|
+
const port = {
|
684
|
+
send: async (data, timeout) => {
|
685
|
+
await this._sendData(channel, data, timeout);
|
686
|
+
},
|
687
|
+
subscribe: (cb) => {
|
688
|
+
invariant3(!callback, "Only one subscriber is allowed", {
|
689
|
+
F: __dxlog_file4,
|
690
|
+
L: 234,
|
691
|
+
S: this,
|
692
|
+
A: [
|
693
|
+
"!callback",
|
694
|
+
"'Only one subscriber is allowed'"
|
695
|
+
]
|
696
|
+
});
|
697
|
+
callback = cb;
|
698
|
+
for (const data of inboundBuffer) {
|
699
|
+
cb(data);
|
700
|
+
}
|
701
|
+
inboundBuffer = [];
|
702
|
+
}
|
703
|
+
};
|
704
|
+
try {
|
705
|
+
await this._sendCommand({
|
706
|
+
openChannel: {
|
707
|
+
id: channel.id,
|
708
|
+
tag: channel.tag,
|
709
|
+
contentType: channel.contentType
|
710
|
+
}
|
711
|
+
}, SYSTEM_CHANNEL_ID);
|
712
|
+
} catch (err) {
|
713
|
+
this._destroyChannel(channel, err);
|
714
|
+
throw err;
|
715
|
+
}
|
716
|
+
return port;
|
717
|
+
}
|
718
|
+
// initiate graceful close
|
719
|
+
async close(err) {
|
720
|
+
if (this._destroying) {
|
721
|
+
log4("already destroying, ignoring graceful close request", void 0, {
|
722
|
+
F: __dxlog_file4,
|
723
|
+
L: 267,
|
724
|
+
S: this,
|
725
|
+
C: (f, a) => f(...a)
|
726
|
+
});
|
727
|
+
return;
|
728
|
+
}
|
729
|
+
if (this._closing) {
|
730
|
+
log4("already closing, ignoring graceful close request", void 0, {
|
731
|
+
F: __dxlog_file4,
|
732
|
+
L: 271,
|
733
|
+
S: this,
|
734
|
+
C: (f, a) => f(...a)
|
735
|
+
});
|
736
|
+
return;
|
737
|
+
}
|
738
|
+
this._closing = true;
|
739
|
+
await this._sendCommand({
|
740
|
+
close: {
|
741
|
+
error: err?.message
|
742
|
+
}
|
743
|
+
}, SYSTEM_CHANNEL_ID, DESTROY_COMMAND_SEND_TIMEOUT).catch(async (err2) => {
|
744
|
+
log4("error sending close command", {
|
745
|
+
err: err2
|
746
|
+
}, {
|
747
|
+
F: __dxlog_file4,
|
748
|
+
L: 286,
|
749
|
+
S: this,
|
750
|
+
C: (f, a) => f(...a)
|
751
|
+
});
|
752
|
+
await this._dispose(err2);
|
753
|
+
});
|
754
|
+
await asyncTimeout2(this._dispose(err), GRACEFUL_CLOSE_TIMEOUT, new TimeoutError("gracefully closing muxer"));
|
755
|
+
}
|
756
|
+
// force close without confirmation
|
757
|
+
async destroy(err) {
|
758
|
+
if (this._destroying) {
|
759
|
+
log4("already destroying, ignoring destroy request", void 0, {
|
760
|
+
F: __dxlog_file4,
|
761
|
+
L: 299,
|
762
|
+
S: this,
|
763
|
+
C: (f, a) => f(...a)
|
764
|
+
});
|
765
|
+
return;
|
766
|
+
}
|
767
|
+
this._destroying = true;
|
768
|
+
void this._ctx.dispose();
|
769
|
+
if (this._closing) {
|
770
|
+
log4("destroy cancelling graceful close", void 0, {
|
771
|
+
F: __dxlog_file4,
|
772
|
+
L: 305,
|
773
|
+
S: this,
|
774
|
+
C: (f, a) => f(...a)
|
775
|
+
});
|
776
|
+
this._closing = false;
|
777
|
+
} else {
|
778
|
+
await this._sendCommand({
|
779
|
+
close: {
|
780
|
+
error: err?.message
|
781
|
+
}
|
782
|
+
}, SYSTEM_CHANNEL_ID).catch(async (err2) => {
|
783
|
+
log4("error sending courtesy close command", {
|
784
|
+
err: err2
|
785
|
+
}, {
|
786
|
+
F: __dxlog_file4,
|
787
|
+
L: 318,
|
788
|
+
S: this,
|
789
|
+
C: (f, a) => f(...a)
|
790
|
+
});
|
791
|
+
});
|
792
|
+
}
|
793
|
+
this._dispose(err).catch((err2) => {
|
794
|
+
log4("error disposing after destroy", {
|
795
|
+
err: err2
|
796
|
+
}, {
|
797
|
+
F: __dxlog_file4,
|
798
|
+
L: 323,
|
799
|
+
S: this,
|
800
|
+
C: (f, a) => f(...a)
|
801
|
+
});
|
802
|
+
});
|
803
|
+
}
|
804
|
+
// complete the termination, graceful or otherwise
|
805
|
+
async _dispose(err) {
|
806
|
+
if (this._disposed) {
|
807
|
+
log4("already destroyed, ignoring dispose request", void 0, {
|
808
|
+
F: __dxlog_file4,
|
809
|
+
L: 331,
|
810
|
+
S: this,
|
811
|
+
C: (f, a) => f(...a)
|
812
|
+
});
|
813
|
+
return;
|
814
|
+
}
|
815
|
+
void this._ctx.dispose();
|
816
|
+
await this._balancer.destroy();
|
817
|
+
for (const channel of this._channelsByTag.values()) {
|
818
|
+
channel.destroy?.(err);
|
819
|
+
}
|
820
|
+
this._disposed = true;
|
821
|
+
await this._emitStats();
|
822
|
+
this.afterClosed.emit(err);
|
823
|
+
this._channelsByLocalId.clear();
|
824
|
+
this._channelsByTag.clear();
|
825
|
+
}
|
826
|
+
async _handleCommand(cmd) {
|
827
|
+
if (this._disposed) {
|
828
|
+
log4.warn("Received command after disposed", {
|
829
|
+
cmd
|
830
|
+
}, {
|
831
|
+
F: __dxlog_file4,
|
832
|
+
L: 354,
|
833
|
+
S: this,
|
834
|
+
C: (f, a) => f(...a)
|
835
|
+
});
|
836
|
+
return;
|
837
|
+
}
|
838
|
+
if (cmd.close) {
|
839
|
+
if (!this._closing) {
|
840
|
+
log4("received peer close, initiating my own graceful close", void 0, {
|
841
|
+
F: __dxlog_file4,
|
842
|
+
L: 360,
|
843
|
+
S: this,
|
844
|
+
C: (f, a) => f(...a)
|
845
|
+
});
|
846
|
+
await this.close(new Error("received peer close"));
|
847
|
+
} else {
|
848
|
+
log4("received close from peer, already closing", void 0, {
|
849
|
+
F: __dxlog_file4,
|
850
|
+
L: 363,
|
851
|
+
S: this,
|
852
|
+
C: (f, a) => f(...a)
|
853
|
+
});
|
854
|
+
}
|
855
|
+
return;
|
856
|
+
}
|
857
|
+
if (cmd.openChannel) {
|
858
|
+
const channel = this._getOrCreateStream({
|
859
|
+
tag: cmd.openChannel.tag,
|
860
|
+
contentType: cmd.openChannel.contentType
|
861
|
+
});
|
862
|
+
channel.remoteId = cmd.openChannel.id;
|
863
|
+
for (const data of channel.buffer) {
|
864
|
+
await this._sendCommand({
|
865
|
+
data: {
|
866
|
+
channelId: channel.remoteId,
|
867
|
+
data
|
868
|
+
}
|
869
|
+
}, channel.id);
|
870
|
+
}
|
871
|
+
channel.buffer = [];
|
872
|
+
} else if (cmd.data) {
|
873
|
+
const stream = this._channelsByLocalId.get(cmd.data.channelId) ?? failUndefined();
|
874
|
+
if (!stream.push) {
|
875
|
+
log4.warn("Received data for channel before it was opened", {
|
876
|
+
tag: stream.tag
|
877
|
+
}, {
|
878
|
+
F: __dxlog_file4,
|
879
|
+
L: 392,
|
880
|
+
S: this,
|
881
|
+
C: (f, a) => f(...a)
|
882
|
+
});
|
883
|
+
return;
|
884
|
+
}
|
885
|
+
stream.push(cmd.data.data);
|
886
|
+
}
|
887
|
+
}
|
888
|
+
async _sendCommand(cmd, channelId = -1, timeout = DEFAULT_SEND_COMMAND_TIMEOUT) {
|
889
|
+
if (this._disposed) {
|
890
|
+
return;
|
891
|
+
}
|
892
|
+
try {
|
893
|
+
const trigger = new Trigger();
|
894
|
+
this._balancer.pushData(Command.encode(cmd), trigger, channelId);
|
895
|
+
await trigger.wait({
|
896
|
+
timeout
|
897
|
+
});
|
898
|
+
} catch (err) {
|
899
|
+
await this.destroy(err);
|
900
|
+
}
|
901
|
+
}
|
902
|
+
_getOrCreateStream(params) {
|
903
|
+
if (this._channelsByTag.size === 0) {
|
904
|
+
scheduleTaskInterval2(this._ctx, async () => this._emitStats(), STATS_INTERVAL);
|
905
|
+
}
|
906
|
+
let channel = this._channelsByTag.get(params.tag);
|
907
|
+
if (!channel) {
|
908
|
+
channel = {
|
909
|
+
id: this._nextId++,
|
910
|
+
remoteId: null,
|
911
|
+
tag: params.tag,
|
912
|
+
contentType: params.contentType,
|
913
|
+
buffer: [],
|
914
|
+
push: null,
|
915
|
+
destroy: null,
|
916
|
+
stats: {
|
917
|
+
bytesSent: 0,
|
918
|
+
bytesReceived: 0
|
919
|
+
}
|
920
|
+
};
|
921
|
+
this._channelsByTag.set(channel.tag, channel);
|
922
|
+
this._channelsByLocalId.set(channel.id, channel);
|
923
|
+
this._balancer.addChannel(channel.id);
|
924
|
+
}
|
925
|
+
return channel;
|
926
|
+
}
|
927
|
+
async _sendData(channel, data, timeout) {
|
928
|
+
if (data.length > MAX_SAFE_FRAME_SIZE) {
|
929
|
+
log4.warn("frame size exceeds maximum safe value", {
|
930
|
+
size: data.length,
|
931
|
+
threshold: MAX_SAFE_FRAME_SIZE
|
932
|
+
}, {
|
933
|
+
F: __dxlog_file4,
|
934
|
+
L: 442,
|
935
|
+
S: this,
|
936
|
+
C: (f, a) => f(...a)
|
937
|
+
});
|
938
|
+
}
|
939
|
+
channel.stats.bytesSent += data.length;
|
940
|
+
if (channel.remoteId === null) {
|
941
|
+
channel.buffer.push(data);
|
942
|
+
return;
|
943
|
+
}
|
944
|
+
await this._sendCommand({
|
945
|
+
data: {
|
946
|
+
channelId: channel.remoteId,
|
947
|
+
data
|
948
|
+
}
|
949
|
+
}, channel.id, timeout);
|
950
|
+
}
|
951
|
+
_destroyChannel(channel, err) {
|
952
|
+
if (channel.destroy) {
|
953
|
+
channel.destroy(err);
|
954
|
+
}
|
955
|
+
this._channelsByLocalId.delete(channel.id);
|
956
|
+
this._channelsByTag.delete(channel.tag);
|
957
|
+
}
|
958
|
+
async _emitStats() {
|
959
|
+
if (this._disposed || this._destroying) {
|
960
|
+
if (!this._lastStats) {
|
961
|
+
return;
|
962
|
+
}
|
963
|
+
const lastStats = this._lastStats;
|
964
|
+
this._lastStats = void 0;
|
965
|
+
lastStats.readBufferSize = 0;
|
966
|
+
lastStats.writeBufferSize = 0;
|
967
|
+
for (const c of lastStats.channels) {
|
968
|
+
c.writeBufferSize = 0;
|
969
|
+
}
|
970
|
+
this.statsUpdated.emit(lastStats);
|
971
|
+
this._lastChannelStats.clear();
|
972
|
+
return;
|
973
|
+
}
|
974
|
+
const bytesSent = this._balancer.bytesSent;
|
975
|
+
const bytesReceived = this._balancer.bytesReceived;
|
976
|
+
const now = Date.now();
|
977
|
+
const interval = this._lastStats ? (now - this._lastStats.timestamp) / 1e3 : 0;
|
978
|
+
const calculateThroughput = (current, last) => last ? {
|
979
|
+
bytesSentRate: interval ? (current.bytesSent - last.bytesSent) / interval : void 0,
|
980
|
+
bytesReceivedRate: interval ? (current.bytesReceived - last.bytesReceived) / interval : void 0
|
981
|
+
} : {};
|
982
|
+
this._lastStats = {
|
983
|
+
timestamp: now,
|
984
|
+
channels: Array.from(this._channelsByTag.values()).map((channel) => {
|
985
|
+
const stats = {
|
986
|
+
id: channel.id,
|
987
|
+
tag: channel.tag,
|
988
|
+
contentType: channel.contentType,
|
989
|
+
writeBufferSize: channel.buffer.length,
|
990
|
+
bytesSent: channel.stats.bytesSent,
|
991
|
+
bytesReceived: channel.stats.bytesReceived,
|
992
|
+
...calculateThroughput(channel.stats, this._lastChannelStats.get(channel.id))
|
993
|
+
};
|
994
|
+
this._lastChannelStats.set(channel.id, stats);
|
995
|
+
return stats;
|
996
|
+
}),
|
997
|
+
bytesSent,
|
998
|
+
bytesReceived,
|
999
|
+
...calculateThroughput({
|
1000
|
+
bytesSent,
|
1001
|
+
bytesReceived
|
1002
|
+
}, this._lastStats),
|
1003
|
+
readBufferSize: this._balancer.stream.readableLength,
|
1004
|
+
writeBufferSize: this._balancer.stream.writableLength
|
1005
|
+
};
|
1006
|
+
this.statsUpdated.emit(this._lastStats);
|
1007
|
+
}
|
1008
|
+
};
|
1009
|
+
_ts_decorate([
|
1010
|
+
logInfo
|
1011
|
+
], Muxer.prototype, "sessionIdString", null);
|
1012
|
+
|
1013
|
+
// packages/core/mesh/teleport/src/teleport.ts
|
1014
|
+
function _ts_decorate2(decorators, target, key, desc) {
|
1015
|
+
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
|
1016
|
+
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
|
1017
|
+
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
|
1018
|
+
return c > 3 && r && Object.defineProperty(target, key, r), r;
|
1019
|
+
}
|
1020
|
+
var __dxlog_file5 = "/home/runner/work/dxos/dxos/packages/core/mesh/teleport/src/teleport.ts";
|
1021
|
+
var CONTROL_HEARTBEAT_INTERVAL = 1e4;
|
1022
|
+
var CONTROL_HEARTBEAT_TIMEOUT = 6e4;
|
1023
|
+
var Teleport = class {
|
1024
|
+
constructor({ initiator, localPeerId, remotePeerId, ...rest }) {
|
1025
|
+
this._ctx = new Context3({
|
1026
|
+
onError: (err) => {
|
1027
|
+
void this.destroy(err).catch(() => {
|
1028
|
+
log5.error("Error during destroy", err, {
|
1029
|
+
F: __dxlog_file5,
|
1030
|
+
L: 41,
|
1031
|
+
S: this,
|
1032
|
+
C: (f, a) => f(...a)
|
1033
|
+
});
|
1034
|
+
});
|
1035
|
+
}
|
1036
|
+
}, {
|
1037
|
+
F: __dxlog_file5,
|
1038
|
+
L: 38
|
1039
|
+
});
|
1040
|
+
this._muxer = new Muxer();
|
1041
|
+
this._extensions = /* @__PURE__ */ new Map();
|
1042
|
+
this._remoteExtensions = /* @__PURE__ */ new Set();
|
1043
|
+
this._open = false;
|
1044
|
+
this._destroying = false;
|
1045
|
+
this._aborting = false;
|
1046
|
+
invariant4(typeof initiator === "boolean", void 0, {
|
1047
|
+
F: __dxlog_file5,
|
1048
|
+
L: 62,
|
1049
|
+
S: this,
|
1050
|
+
A: [
|
1051
|
+
"typeof initiator === 'boolean'",
|
1052
|
+
""
|
1053
|
+
]
|
1054
|
+
});
|
1055
|
+
invariant4(PublicKey.isPublicKey(localPeerId), void 0, {
|
1056
|
+
F: __dxlog_file5,
|
1057
|
+
L: 63,
|
1058
|
+
S: this,
|
1059
|
+
A: [
|
1060
|
+
"PublicKey.isPublicKey(localPeerId)",
|
1061
|
+
""
|
1062
|
+
]
|
1063
|
+
});
|
1064
|
+
invariant4(PublicKey.isPublicKey(remotePeerId), void 0, {
|
1065
|
+
F: __dxlog_file5,
|
1066
|
+
L: 64,
|
1067
|
+
S: this,
|
1068
|
+
A: [
|
1069
|
+
"PublicKey.isPublicKey(remotePeerId)",
|
1070
|
+
""
|
1071
|
+
]
|
1072
|
+
});
|
1073
|
+
this.initiator = initiator;
|
1074
|
+
this.localPeerId = localPeerId;
|
1075
|
+
this.remotePeerId = remotePeerId;
|
1076
|
+
this._control = new ControlExtension({
|
1077
|
+
heartbeatInterval: rest.controlHeartbeatInterval ?? CONTROL_HEARTBEAT_INTERVAL,
|
1078
|
+
heartbeatTimeout: rest.controlHeartbeatTimeout ?? CONTROL_HEARTBEAT_TIMEOUT,
|
1079
|
+
onTimeout: () => {
|
1080
|
+
if (this._destroying || this._aborting) {
|
1081
|
+
return;
|
1082
|
+
}
|
1083
|
+
log5.info("abort teleport due to onTimeout in ControlExtension", void 0, {
|
1084
|
+
F: __dxlog_file5,
|
1085
|
+
L: 77,
|
1086
|
+
S: this,
|
1087
|
+
C: (f, a) => f(...a)
|
1088
|
+
});
|
1089
|
+
this.abort(new TimeoutError2("control extension")).catch((err) => log5.catch(err, void 0, {
|
1090
|
+
F: __dxlog_file5,
|
1091
|
+
L: 78,
|
1092
|
+
S: this,
|
1093
|
+
C: (f, a) => f(...a)
|
1094
|
+
}));
|
1095
|
+
}
|
1096
|
+
}, this.localPeerId, this.remotePeerId);
|
1097
|
+
this._control.onExtensionRegistered.set(async (name) => {
|
1098
|
+
log5("remote extension", {
|
1099
|
+
name
|
1100
|
+
}, {
|
1101
|
+
F: __dxlog_file5,
|
1102
|
+
L: 86,
|
1103
|
+
S: this,
|
1104
|
+
C: (f, a) => f(...a)
|
1105
|
+
});
|
1106
|
+
invariant4(!this._remoteExtensions.has(name), "Remote extension already exists", {
|
1107
|
+
F: __dxlog_file5,
|
1108
|
+
L: 87,
|
1109
|
+
S: this,
|
1110
|
+
A: [
|
1111
|
+
"!this._remoteExtensions.has(name)",
|
1112
|
+
"'Remote extension already exists'"
|
1113
|
+
]
|
1114
|
+
});
|
1115
|
+
this._remoteExtensions.add(name);
|
1116
|
+
if (this._extensions.has(name)) {
|
1117
|
+
try {
|
1118
|
+
await this._openExtension(name);
|
1119
|
+
} catch (err) {
|
1120
|
+
await this.destroy(err);
|
1121
|
+
}
|
1122
|
+
}
|
1123
|
+
});
|
1124
|
+
{
|
1125
|
+
this._muxer.stream.on("close", async () => {
|
1126
|
+
if (this._destroying || this._aborting) {
|
1127
|
+
log5("destroy teleport due to muxer stream close, skipping due to already destroying/aborting", void 0, {
|
1128
|
+
F: __dxlog_file5,
|
1129
|
+
L: 103,
|
1130
|
+
S: this,
|
1131
|
+
C: (f, a) => f(...a)
|
1132
|
+
});
|
1133
|
+
return;
|
1134
|
+
}
|
1135
|
+
await this.destroy();
|
1136
|
+
});
|
1137
|
+
this._muxer.stream.on("error", async (err) => {
|
1138
|
+
await this.destroy(err);
|
1139
|
+
});
|
1140
|
+
}
|
1141
|
+
this._muxer.statsUpdated.on((stats) => {
|
1142
|
+
log5.trace("dxos.mesh.teleport.stats", {
|
1143
|
+
localPeerId,
|
1144
|
+
remotePeerId,
|
1145
|
+
bytesSent: stats.bytesSent,
|
1146
|
+
bytesSentRate: stats.bytesSentRate,
|
1147
|
+
bytesReceived: stats.bytesReceived,
|
1148
|
+
bytesReceivedRate: stats.bytesReceivedRate,
|
1149
|
+
channels: stats.channels
|
1150
|
+
}, {
|
1151
|
+
F: __dxlog_file5,
|
1152
|
+
L: 116,
|
1153
|
+
S: this,
|
1154
|
+
C: (f, a) => f(...a)
|
1155
|
+
});
|
1156
|
+
});
|
1157
|
+
}
|
1158
|
+
get isOpen() {
|
1159
|
+
return this._open;
|
1160
|
+
}
|
1161
|
+
get sessionIdString() {
|
1162
|
+
return this._sessionId ? this._sessionId.truncate() : "none";
|
1163
|
+
}
|
1164
|
+
get stream() {
|
1165
|
+
return this._muxer.stream;
|
1166
|
+
}
|
1167
|
+
get stats() {
|
1168
|
+
return this._muxer.statsUpdated;
|
1169
|
+
}
|
1170
|
+
/**
|
1171
|
+
* Blocks until the handshake is complete.
|
1172
|
+
*/
|
1173
|
+
async open(sessionId = PublicKey.random()) {
|
1174
|
+
this._sessionId = sessionId;
|
1175
|
+
log5("open", void 0, {
|
1176
|
+
F: __dxlog_file5,
|
1177
|
+
L: 150,
|
1178
|
+
S: this,
|
1179
|
+
C: (f, a) => f(...a)
|
1180
|
+
});
|
1181
|
+
this._setExtension("dxos.mesh.teleport.control", this._control);
|
1182
|
+
await this._openExtension("dxos.mesh.teleport.control");
|
1183
|
+
this._open = true;
|
1184
|
+
this._muxer.setSessionId(sessionId);
|
1185
|
+
}
|
1186
|
+
async close(err) {
|
1187
|
+
await this.destroy(err);
|
1188
|
+
}
|
1189
|
+
async abort(err) {
|
1190
|
+
if (this._aborting || this._destroying) {
|
1191
|
+
return;
|
1192
|
+
}
|
1193
|
+
this._aborting = true;
|
1194
|
+
this._open = false;
|
1195
|
+
if (this._ctx.disposed) {
|
1196
|
+
return;
|
1197
|
+
}
|
1198
|
+
await this._ctx.dispose();
|
1199
|
+
for (const extension of this._extensions.values()) {
|
1200
|
+
try {
|
1201
|
+
await extension.onAbort(err);
|
1202
|
+
} catch (err2) {
|
1203
|
+
log5.catch(err2, void 0, {
|
1204
|
+
F: __dxlog_file5,
|
1205
|
+
L: 180,
|
1206
|
+
S: this,
|
1207
|
+
C: (f, a) => f(...a)
|
1208
|
+
});
|
1209
|
+
}
|
1210
|
+
}
|
1211
|
+
await this._muxer.destroy(err);
|
1212
|
+
}
|
1213
|
+
async destroy(err) {
|
1214
|
+
if (this._destroying || this._aborting) {
|
1215
|
+
return;
|
1216
|
+
}
|
1217
|
+
log5("destroying teleport...", {
|
1218
|
+
extensionsCount: this._extensions.size
|
1219
|
+
}, {
|
1220
|
+
F: __dxlog_file5,
|
1221
|
+
L: 193,
|
1222
|
+
S: this,
|
1223
|
+
C: (f, a) => f(...a)
|
1224
|
+
});
|
1225
|
+
this._destroying = true;
|
1226
|
+
this._open = false;
|
1227
|
+
if (this._ctx.disposed) {
|
1228
|
+
return;
|
1229
|
+
}
|
1230
|
+
await this._ctx.dispose();
|
1231
|
+
for (const extension of this._extensions.values()) {
|
1232
|
+
try {
|
1233
|
+
log5("destroying extension", {
|
1234
|
+
name: extension.constructor.name
|
1235
|
+
}, {
|
1236
|
+
F: __dxlog_file5,
|
1237
|
+
L: 205,
|
1238
|
+
S: this,
|
1239
|
+
C: (f, a) => f(...a)
|
1240
|
+
});
|
1241
|
+
await extension.onClose(err);
|
1242
|
+
log5("destroyed extension", {
|
1243
|
+
name: extension.constructor.name
|
1244
|
+
}, {
|
1245
|
+
F: __dxlog_file5,
|
1246
|
+
L: 207,
|
1247
|
+
S: this,
|
1248
|
+
C: (f, a) => f(...a)
|
1249
|
+
});
|
1250
|
+
} catch (err2) {
|
1251
|
+
log5.catch(err2, void 0, {
|
1252
|
+
F: __dxlog_file5,
|
1253
|
+
L: 209,
|
1254
|
+
S: this,
|
1255
|
+
C: (f, a) => f(...a)
|
1256
|
+
});
|
1257
|
+
}
|
1258
|
+
}
|
1259
|
+
await this._muxer.close();
|
1260
|
+
log5("teleport destroyed", void 0, {
|
1261
|
+
F: __dxlog_file5,
|
1262
|
+
L: 214,
|
1263
|
+
S: this,
|
1264
|
+
C: (f, a) => f(...a)
|
1265
|
+
});
|
1266
|
+
}
|
1267
|
+
addExtension(name, extension) {
|
1268
|
+
if (!this._open) {
|
1269
|
+
throw new Error("Not open");
|
1270
|
+
}
|
1271
|
+
log5("addExtension", {
|
1272
|
+
name
|
1273
|
+
}, {
|
1274
|
+
F: __dxlog_file5,
|
1275
|
+
L: 222,
|
1276
|
+
S: this,
|
1277
|
+
C: (f, a) => f(...a)
|
1278
|
+
});
|
1279
|
+
this._setExtension(name, extension);
|
1280
|
+
scheduleTask(this._ctx, async () => {
|
1281
|
+
try {
|
1282
|
+
await this._control.registerExtension(name);
|
1283
|
+
} catch (err) {
|
1284
|
+
if (err instanceof RpcClosedError2) {
|
1285
|
+
return;
|
1286
|
+
}
|
1287
|
+
throw err;
|
1288
|
+
}
|
1289
|
+
});
|
1290
|
+
if (this._remoteExtensions.has(name)) {
|
1291
|
+
scheduleTask(this._ctx, async () => {
|
1292
|
+
await this._openExtension(name);
|
1293
|
+
});
|
1294
|
+
}
|
1295
|
+
}
|
1296
|
+
_setExtension(extensionName, extension) {
|
1297
|
+
invariant4(!extensionName.includes("/"), "Invalid extension name", {
|
1298
|
+
F: __dxlog_file5,
|
1299
|
+
L: 246,
|
1300
|
+
S: this,
|
1301
|
+
A: [
|
1302
|
+
"!extensionName.includes('/')",
|
1303
|
+
"'Invalid extension name'"
|
1304
|
+
]
|
1305
|
+
});
|
1306
|
+
invariant4(!this._extensions.has(extensionName), "Extension already exists", {
|
1307
|
+
F: __dxlog_file5,
|
1308
|
+
L: 247,
|
1309
|
+
S: this,
|
1310
|
+
A: [
|
1311
|
+
"!this._extensions.has(extensionName)",
|
1312
|
+
"'Extension already exists'"
|
1313
|
+
]
|
1314
|
+
});
|
1315
|
+
this._extensions.set(extensionName, extension);
|
1316
|
+
}
|
1317
|
+
async _openExtension(extensionName) {
|
1318
|
+
log5("open extension", {
|
1319
|
+
extensionName
|
1320
|
+
}, {
|
1321
|
+
F: __dxlog_file5,
|
1322
|
+
L: 252,
|
1323
|
+
S: this,
|
1324
|
+
C: (f, a) => f(...a)
|
1325
|
+
});
|
1326
|
+
const extension = this._extensions.get(extensionName) ?? failUndefined2();
|
1327
|
+
const context = {
|
1328
|
+
initiator: this.initiator,
|
1329
|
+
localPeerId: this.localPeerId,
|
1330
|
+
remotePeerId: this.remotePeerId,
|
1331
|
+
createPort: async (channelName, opts) => {
|
1332
|
+
invariant4(!channelName.includes("/"), "Invalid channel name", {
|
1333
|
+
F: __dxlog_file5,
|
1334
|
+
L: 260,
|
1335
|
+
S: this,
|
1336
|
+
A: [
|
1337
|
+
"!channelName.includes('/')",
|
1338
|
+
"'Invalid channel name'"
|
1339
|
+
]
|
1340
|
+
});
|
1341
|
+
return this._muxer.createPort(`${extensionName}/${channelName}`, opts);
|
1342
|
+
},
|
1343
|
+
createStream: async (channelName, opts) => {
|
1344
|
+
invariant4(!channelName.includes("/"), "Invalid channel name", {
|
1345
|
+
F: __dxlog_file5,
|
1346
|
+
L: 264,
|
1347
|
+
S: this,
|
1348
|
+
A: [
|
1349
|
+
"!channelName.includes('/')",
|
1350
|
+
"'Invalid channel name'"
|
1351
|
+
]
|
1352
|
+
});
|
1353
|
+
return this._muxer.createStream(`${extensionName}/${channelName}`, opts);
|
1354
|
+
},
|
1355
|
+
close: (err) => {
|
1356
|
+
void runInContextAsync(this._ctx, async () => {
|
1357
|
+
await this.close(err);
|
1358
|
+
});
|
1359
|
+
}
|
1360
|
+
};
|
1361
|
+
await extension.onOpen(context);
|
1362
|
+
log5("extension opened", {
|
1363
|
+
extensionName
|
1364
|
+
}, {
|
1365
|
+
F: __dxlog_file5,
|
1366
|
+
L: 275,
|
1367
|
+
S: this,
|
1368
|
+
C: (f, a) => f(...a)
|
1369
|
+
});
|
1370
|
+
}
|
1371
|
+
};
|
1372
|
+
_ts_decorate2([
|
1373
|
+
logInfo2
|
1374
|
+
], Teleport.prototype, "sessionIdString", null);
|
1375
|
+
_ts_decorate2([
|
1376
|
+
synchronized
|
1377
|
+
], Teleport.prototype, "abort", null);
|
1378
|
+
_ts_decorate2([
|
1379
|
+
synchronized
|
1380
|
+
], Teleport.prototype, "destroy", null);
|
1381
|
+
|
1382
|
+
// packages/core/mesh/teleport/src/testing/test-builder.ts
|
1383
|
+
var __dxlog_file6 = "/home/runner/work/dxos/dxos/packages/core/mesh/teleport/src/testing/test-builder.ts";
|
1384
|
+
var TestBuilder = class {
|
1385
|
+
constructor() {
|
1386
|
+
this._peers = /* @__PURE__ */ new Set();
|
1387
|
+
}
|
1388
|
+
createPeer(opts) {
|
1389
|
+
const peer = opts.factory();
|
1390
|
+
this._peers.add(peer);
|
1391
|
+
return peer;
|
1392
|
+
}
|
1393
|
+
*createPeers(opts) {
|
1394
|
+
while (true) {
|
1395
|
+
yield this.createPeer(opts);
|
1396
|
+
}
|
1397
|
+
}
|
1398
|
+
async destroy() {
|
1399
|
+
await Promise.all(Array.from(this._peers).map((agent) => agent.destroy()));
|
1400
|
+
}
|
1401
|
+
async connect(peer1, peer2) {
|
1402
|
+
invariant5(peer1 !== peer2, void 0, {
|
1403
|
+
F: __dxlog_file6,
|
1404
|
+
L: 38,
|
1405
|
+
S: this,
|
1406
|
+
A: [
|
1407
|
+
"peer1 !== peer2",
|
1408
|
+
""
|
1409
|
+
]
|
1410
|
+
});
|
1411
|
+
invariant5(this._peers.has(peer1), void 0, {
|
1412
|
+
F: __dxlog_file6,
|
1413
|
+
L: 39,
|
1414
|
+
S: this,
|
1415
|
+
A: [
|
1416
|
+
"this._peers.has(peer1)",
|
1417
|
+
""
|
1418
|
+
]
|
1419
|
+
});
|
1420
|
+
invariant5(this._peers.has(peer1), void 0, {
|
1421
|
+
F: __dxlog_file6,
|
1422
|
+
L: 40,
|
1423
|
+
S: this,
|
1424
|
+
A: [
|
1425
|
+
"this._peers.has(peer1)",
|
1426
|
+
""
|
1427
|
+
]
|
1428
|
+
});
|
1429
|
+
const connection1 = peer1.createConnection({
|
1430
|
+
initiator: true,
|
1431
|
+
remotePeerId: peer2.peerId
|
1432
|
+
});
|
1433
|
+
const connection2 = peer2.createConnection({
|
1434
|
+
initiator: false,
|
1435
|
+
remotePeerId: peer1.peerId
|
1436
|
+
});
|
1437
|
+
pipeStreams(connection1.teleport.stream, connection2.teleport.stream);
|
1438
|
+
await Promise.all([
|
1439
|
+
peer1.openConnection(connection1),
|
1440
|
+
peer2.openConnection(connection2)
|
1441
|
+
]);
|
1442
|
+
return [
|
1443
|
+
connection1,
|
1444
|
+
connection2
|
1445
|
+
];
|
1446
|
+
}
|
1447
|
+
async disconnect(peer1, peer2) {
|
1448
|
+
invariant5(peer1 !== peer2, void 0, {
|
1449
|
+
F: __dxlog_file6,
|
1450
|
+
L: 52,
|
1451
|
+
S: this,
|
1452
|
+
A: [
|
1453
|
+
"peer1 !== peer2",
|
1454
|
+
""
|
1455
|
+
]
|
1456
|
+
});
|
1457
|
+
invariant5(this._peers.has(peer1), void 0, {
|
1458
|
+
F: __dxlog_file6,
|
1459
|
+
L: 53,
|
1460
|
+
S: this,
|
1461
|
+
A: [
|
1462
|
+
"this._peers.has(peer1)",
|
1463
|
+
""
|
1464
|
+
]
|
1465
|
+
});
|
1466
|
+
invariant5(this._peers.has(peer1), void 0, {
|
1467
|
+
F: __dxlog_file6,
|
1468
|
+
L: 54,
|
1469
|
+
S: this,
|
1470
|
+
A: [
|
1471
|
+
"this._peers.has(peer1)",
|
1472
|
+
""
|
1473
|
+
]
|
1474
|
+
});
|
1475
|
+
const connection1 = Array.from(peer1.connections).find((connection) => connection.remotePeerId.equals(peer2.peerId));
|
1476
|
+
const connection2 = Array.from(peer2.connections).find((connection) => connection.remotePeerId.equals(peer1.peerId));
|
1477
|
+
invariant5(connection1, void 0, {
|
1478
|
+
F: __dxlog_file6,
|
1479
|
+
L: 63,
|
1480
|
+
S: this,
|
1481
|
+
A: [
|
1482
|
+
"connection1",
|
1483
|
+
""
|
1484
|
+
]
|
1485
|
+
});
|
1486
|
+
invariant5(connection2, void 0, {
|
1487
|
+
F: __dxlog_file6,
|
1488
|
+
L: 64,
|
1489
|
+
S: this,
|
1490
|
+
A: [
|
1491
|
+
"connection2",
|
1492
|
+
""
|
1493
|
+
]
|
1494
|
+
});
|
1495
|
+
await Promise.all([
|
1496
|
+
peer1.closeConnection(connection1),
|
1497
|
+
peer2.closeConnection(connection2)
|
1498
|
+
]);
|
1499
|
+
}
|
1500
|
+
};
|
1501
|
+
var TestPeer = class {
|
1502
|
+
constructor(peerId = PublicKey2.random()) {
|
1503
|
+
this.peerId = peerId;
|
1504
|
+
this.connections = /* @__PURE__ */ new Set();
|
1505
|
+
}
|
1506
|
+
async onOpen(connection) {
|
1507
|
+
}
|
1508
|
+
async onClose(connection) {
|
1509
|
+
}
|
1510
|
+
createConnection({ initiator, remotePeerId }) {
|
1511
|
+
const connection = new TestConnection(this.peerId, remotePeerId, initiator);
|
1512
|
+
this.connections.add(connection);
|
1513
|
+
return connection;
|
1514
|
+
}
|
1515
|
+
async openConnection(connection) {
|
1516
|
+
invariant5(this.connections.has(connection), void 0, {
|
1517
|
+
F: __dxlog_file6,
|
1518
|
+
L: 85,
|
1519
|
+
S: this,
|
1520
|
+
A: [
|
1521
|
+
"this.connections.has(connection)",
|
1522
|
+
""
|
1523
|
+
]
|
1524
|
+
});
|
1525
|
+
await connection.teleport.open(PublicKey2.random());
|
1526
|
+
await this.onOpen(connection);
|
1527
|
+
}
|
1528
|
+
async closeConnection(connection) {
|
1529
|
+
invariant5(this.connections.has(connection), void 0, {
|
1530
|
+
F: __dxlog_file6,
|
1531
|
+
L: 91,
|
1532
|
+
S: this,
|
1533
|
+
A: [
|
1534
|
+
"this.connections.has(connection)",
|
1535
|
+
""
|
1536
|
+
]
|
1537
|
+
});
|
1538
|
+
await this.onClose(connection);
|
1539
|
+
await connection.teleport.close();
|
1540
|
+
this.connections.delete(connection);
|
1541
|
+
}
|
1542
|
+
async destroy() {
|
1543
|
+
for (const teleport of this.connections) {
|
1544
|
+
await this.closeConnection(teleport);
|
1545
|
+
}
|
1546
|
+
}
|
1547
|
+
};
|
1548
|
+
var pipeStreams = (stream1, stream2) => {
|
1549
|
+
pipeline(stream1, stream2, (err) => {
|
1550
|
+
if (err && err.code !== "ERR_STREAM_PREMATURE_CLOSE") {
|
1551
|
+
log6.catch(err, void 0, {
|
1552
|
+
F: __dxlog_file6,
|
1553
|
+
L: 107,
|
1554
|
+
S: void 0,
|
1555
|
+
C: (f, a) => f(...a)
|
1556
|
+
});
|
1557
|
+
}
|
1558
|
+
});
|
1559
|
+
pipeline(stream2, stream1, (err) => {
|
1560
|
+
if (err && err.code !== "ERR_STREAM_PREMATURE_CLOSE") {
|
1561
|
+
log6.catch(err, void 0, {
|
1562
|
+
F: __dxlog_file6,
|
1563
|
+
L: 112,
|
1564
|
+
S: void 0,
|
1565
|
+
C: (f, a) => f(...a)
|
1566
|
+
});
|
1567
|
+
}
|
1568
|
+
});
|
1569
|
+
};
|
1570
|
+
var TestConnection = class {
|
1571
|
+
constructor(localPeerId, remotePeerId, initiator) {
|
1572
|
+
this.localPeerId = localPeerId;
|
1573
|
+
this.remotePeerId = remotePeerId;
|
1574
|
+
this.initiator = initiator;
|
1575
|
+
this.teleport = new Teleport({
|
1576
|
+
initiator,
|
1577
|
+
localPeerId,
|
1578
|
+
remotePeerId
|
1579
|
+
});
|
1580
|
+
}
|
1581
|
+
whenOpen(open) {
|
1582
|
+
return waitForCondition({
|
1583
|
+
condition: () => this.teleport.isOpen === open
|
1584
|
+
});
|
1585
|
+
}
|
1586
|
+
};
|
1587
|
+
|
1588
|
+
// packages/core/mesh/teleport/src/testing/test-extension.ts
|
1589
|
+
import { asyncTimeout as asyncTimeout3, Trigger as Trigger2 } from "@dxos/async";
|
1590
|
+
import { invariant as invariant6 } from "@dxos/invariant";
|
1591
|
+
import { log as log7 } from "@dxos/log";
|
1592
|
+
import { schema as schema3 } from "@dxos/protocols/proto";
|
1593
|
+
import { createProtoRpcPeer as createProtoRpcPeer2 } from "@dxos/rpc";
|
1594
|
+
var __dxlog_file7 = "/home/runner/work/dxos/dxos/packages/core/mesh/teleport/src/testing/test-extension.ts";
|
1595
|
+
var TestExtension = class {
|
1596
|
+
constructor(callbacks = {}) {
|
1597
|
+
this.callbacks = callbacks;
|
1598
|
+
this.open = new Trigger2();
|
1599
|
+
this.closed = new Trigger2();
|
1600
|
+
this.aborted = new Trigger2();
|
1601
|
+
}
|
1602
|
+
get remotePeerId() {
|
1603
|
+
return this.extensionContext?.remotePeerId;
|
1604
|
+
}
|
1605
|
+
async onOpen(context) {
|
1606
|
+
log7("onOpen", {
|
1607
|
+
localPeerId: context.localPeerId,
|
1608
|
+
remotePeerId: context.remotePeerId
|
1609
|
+
}, {
|
1610
|
+
F: __dxlog_file7,
|
1611
|
+
L: 34,
|
1612
|
+
S: this,
|
1613
|
+
C: (f, a) => f(...a)
|
1614
|
+
});
|
1615
|
+
this.extensionContext = context;
|
1616
|
+
this._rpc = createProtoRpcPeer2({
|
1617
|
+
port: await context.createPort("rpc", {
|
1618
|
+
contentType: 'application/x-protobuf; messageType="dxos.rpc.Message"'
|
1619
|
+
}),
|
1620
|
+
requested: {
|
1621
|
+
TestService: schema3.getService("example.testing.rpc.TestService")
|
1622
|
+
},
|
1623
|
+
exposed: {
|
1624
|
+
TestService: schema3.getService("example.testing.rpc.TestService")
|
1625
|
+
},
|
1626
|
+
handlers: {
|
1627
|
+
TestService: {
|
1628
|
+
voidCall: async (request) => {
|
1629
|
+
},
|
1630
|
+
testCall: async (request) => {
|
1631
|
+
return {
|
1632
|
+
data: request.data
|
1633
|
+
};
|
1634
|
+
}
|
1635
|
+
}
|
1636
|
+
},
|
1637
|
+
timeout: 2e3
|
1638
|
+
});
|
1639
|
+
await this._rpc.open();
|
1640
|
+
await this.callbacks.onOpen?.();
|
1641
|
+
this.open.wake();
|
1642
|
+
}
|
1643
|
+
async onClose(err) {
|
1644
|
+
log7("onClose", {
|
1645
|
+
err
|
1646
|
+
}, {
|
1647
|
+
F: __dxlog_file7,
|
1648
|
+
L: 68,
|
1649
|
+
S: this,
|
1650
|
+
C: (f, a) => f(...a)
|
1651
|
+
});
|
1652
|
+
await this.callbacks.onClose?.();
|
1653
|
+
this.closed.wake();
|
1654
|
+
await this._rpc?.close();
|
1655
|
+
}
|
1656
|
+
async onAbort(err) {
|
1657
|
+
log7("onAbort", {
|
1658
|
+
err
|
1659
|
+
}, {
|
1660
|
+
F: __dxlog_file7,
|
1661
|
+
L: 75,
|
1662
|
+
S: this,
|
1663
|
+
C: (f, a) => f(...a)
|
1664
|
+
});
|
1665
|
+
await this.callbacks.onAbort?.();
|
1666
|
+
this.aborted.wake();
|
1667
|
+
await this._rpc?.abort();
|
1668
|
+
}
|
1669
|
+
async test(message = "test") {
|
1670
|
+
await this.open.wait({
|
1671
|
+
timeout: 2e3
|
1672
|
+
});
|
1673
|
+
const res = await asyncTimeout3(this._rpc.rpc.TestService.testCall({
|
1674
|
+
data: message
|
1675
|
+
}), 1500);
|
1676
|
+
invariant6(res.data === message, void 0, {
|
1677
|
+
F: __dxlog_file7,
|
1678
|
+
L: 84,
|
1679
|
+
S: this,
|
1680
|
+
A: [
|
1681
|
+
"res.data === message",
|
1682
|
+
""
|
1683
|
+
]
|
1684
|
+
});
|
1685
|
+
}
|
1686
|
+
/**
|
1687
|
+
* Force-close the connection.
|
1688
|
+
*/
|
1689
|
+
async closeConnection(err) {
|
1690
|
+
this.extensionContext?.close(err);
|
1691
|
+
}
|
1692
|
+
};
|
1693
|
+
|
1694
|
+
// packages/core/mesh/teleport/src/testing/test-extension-with-streams.ts
|
1695
|
+
import { randomBytes } from "node:crypto";
|
1696
|
+
import { Trigger as Trigger3 } from "@dxos/async";
|
1697
|
+
import { invariant as invariant7 } from "@dxos/invariant";
|
1698
|
+
import { log as log8 } from "@dxos/log";
|
1699
|
+
import { schema as schema4 } from "@dxos/protocols/proto";
|
1700
|
+
import { createProtoRpcPeer as createProtoRpcPeer3 } from "@dxos/rpc";
|
1701
|
+
var __dxlog_file8 = "/home/runner/work/dxos/dxos/packages/core/mesh/teleport/src/testing/test-extension-with-streams.ts";
|
1702
|
+
var TestExtensionWithStreams = class {
|
1703
|
+
constructor(callbacks = {}) {
|
1704
|
+
this.callbacks = callbacks;
|
1705
|
+
this.open = new Trigger3();
|
1706
|
+
this.closed = new Trigger3();
|
1707
|
+
this.aborted = new Trigger3();
|
1708
|
+
this._streams = /* @__PURE__ */ new Map();
|
1709
|
+
}
|
1710
|
+
get remotePeerId() {
|
1711
|
+
return this.extensionContext?.remotePeerId;
|
1712
|
+
}
|
1713
|
+
async _openStream(streamTag, interval = 5, chunkSize = 2048) {
|
1714
|
+
invariant7(!this._streams.has(streamTag), `Stream already exists: ${streamTag}`, {
|
1715
|
+
F: __dxlog_file8,
|
1716
|
+
L: 39,
|
1717
|
+
S: this,
|
1718
|
+
A: [
|
1719
|
+
"!this._streams.has(streamTag)",
|
1720
|
+
"`Stream already exists: ${streamTag}`"
|
1721
|
+
]
|
1722
|
+
});
|
1723
|
+
const networkStream = await this.extensionContext.createStream(streamTag, {
|
1724
|
+
contentType: "application/x-test-stream"
|
1725
|
+
});
|
1726
|
+
const streamEntry = {
|
1727
|
+
networkStream,
|
1728
|
+
bytesSent: 0,
|
1729
|
+
bytesReceived: 0,
|
1730
|
+
sendErrors: 0,
|
1731
|
+
receiveErrors: 0,
|
1732
|
+
startTimestamp: Date.now()
|
1733
|
+
};
|
1734
|
+
const pushChunk = () => {
|
1735
|
+
streamEntry.timer = setTimeout(() => {
|
1736
|
+
const chunk = randomBytes(chunkSize);
|
1737
|
+
if (!networkStream.write(chunk, "binary", (err) => {
|
1738
|
+
if (!err) {
|
1739
|
+
streamEntry.bytesSent += chunk.length;
|
1740
|
+
} else {
|
1741
|
+
streamEntry.sendErrors += 1;
|
1742
|
+
}
|
1743
|
+
})) {
|
1744
|
+
networkStream.once("drain", pushChunk);
|
1745
|
+
} else {
|
1746
|
+
process.nextTick(pushChunk);
|
1747
|
+
}
|
1748
|
+
}, interval);
|
1749
|
+
};
|
1750
|
+
pushChunk();
|
1751
|
+
this._streams.set(streamTag, streamEntry);
|
1752
|
+
networkStream.on("data", (data) => {
|
1753
|
+
streamEntry.bytesReceived += data.length;
|
1754
|
+
});
|
1755
|
+
networkStream.on("error", (err) => {
|
1756
|
+
streamEntry.receiveErrors += 1;
|
1757
|
+
});
|
1758
|
+
networkStream.on("close", () => {
|
1759
|
+
networkStream.removeAllListeners();
|
1760
|
+
});
|
1761
|
+
streamEntry.reportingTimer = setInterval(() => {
|
1762
|
+
const { bytesSent, bytesReceived, sendErrors, receiveErrors } = streamEntry;
|
1763
|
+
log8.trace("dxos.test.stream-stats", {
|
1764
|
+
streamTag,
|
1765
|
+
bytesSent,
|
1766
|
+
bytesReceived,
|
1767
|
+
sendErrors,
|
1768
|
+
receiveErrors,
|
1769
|
+
from: this.extensionContext?.localPeerId,
|
1770
|
+
to: this.extensionContext?.remotePeerId
|
1771
|
+
}, {
|
1772
|
+
F: __dxlog_file8,
|
1773
|
+
L: 93,
|
1774
|
+
S: this,
|
1775
|
+
C: (f, a) => f(...a)
|
1776
|
+
});
|
1777
|
+
}, 100);
|
1778
|
+
}
|
1779
|
+
_closeStream(streamTag) {
|
1780
|
+
invariant7(this._streams.has(streamTag), `Stream does not exist: ${streamTag}`, {
|
1781
|
+
F: __dxlog_file8,
|
1782
|
+
L: 106,
|
1783
|
+
S: this,
|
1784
|
+
A: [
|
1785
|
+
"this._streams.has(streamTag)",
|
1786
|
+
"`Stream does not exist: ${streamTag}`"
|
1787
|
+
]
|
1788
|
+
});
|
1789
|
+
const stream = this._streams.get(streamTag);
|
1790
|
+
clearTimeout(stream.timer);
|
1791
|
+
clearTimeout(stream.reportingTimer);
|
1792
|
+
const { bytesSent, bytesReceived, sendErrors, receiveErrors, startTimestamp } = stream;
|
1793
|
+
stream.networkStream.destroy();
|
1794
|
+
this._streams.delete(streamTag);
|
1795
|
+
return {
|
1796
|
+
bytesSent,
|
1797
|
+
bytesReceived,
|
1798
|
+
sendErrors,
|
1799
|
+
receiveErrors,
|
1800
|
+
runningTime: Date.now() - (startTimestamp ?? 0)
|
1801
|
+
};
|
1802
|
+
}
|
1803
|
+
async onOpen(context) {
|
1804
|
+
log8("onOpen", {
|
1805
|
+
localPeerId: context.localPeerId,
|
1806
|
+
remotePeerId: context.remotePeerId
|
1807
|
+
}, {
|
1808
|
+
F: __dxlog_file8,
|
1809
|
+
L: 128,
|
1810
|
+
S: this,
|
1811
|
+
C: (f, a) => f(...a)
|
1812
|
+
});
|
1813
|
+
this.extensionContext = context;
|
1814
|
+
this._rpc = createProtoRpcPeer3({
|
1815
|
+
port: await context.createPort("rpc", {
|
1816
|
+
contentType: 'application/x-protobuf; messageType="dxos.rpc.Message"'
|
1817
|
+
}),
|
1818
|
+
requested: {
|
1819
|
+
TestServiceWithStreams: schema4.getService("example.testing.rpc.TestServiceWithStreams")
|
1820
|
+
},
|
1821
|
+
exposed: {
|
1822
|
+
TestServiceWithStreams: schema4.getService("example.testing.rpc.TestServiceWithStreams")
|
1823
|
+
},
|
1824
|
+
handlers: {
|
1825
|
+
TestServiceWithStreams: {
|
1826
|
+
requestTestStream: async (request) => {
|
1827
|
+
const { data: streamTag, streamLoadInterval, streamLoadChunkSize } = request;
|
1828
|
+
await this._openStream(streamTag, streamLoadInterval, streamLoadChunkSize);
|
1829
|
+
return {
|
1830
|
+
data: streamTag
|
1831
|
+
};
|
1832
|
+
},
|
1833
|
+
closeTestStream: async (request) => {
|
1834
|
+
const streamTag = request.data;
|
1835
|
+
const { bytesSent, bytesReceived, sendErrors, receiveErrors, runningTime } = this._closeStream(streamTag);
|
1836
|
+
return {
|
1837
|
+
data: streamTag,
|
1838
|
+
bytesSent,
|
1839
|
+
bytesReceived,
|
1840
|
+
sendErrors,
|
1841
|
+
receiveErrors,
|
1842
|
+
runningTime
|
1843
|
+
};
|
1844
|
+
}
|
1845
|
+
}
|
1846
|
+
},
|
1847
|
+
timeout: 2e3
|
1848
|
+
});
|
1849
|
+
await this._rpc.open();
|
1850
|
+
await this.callbacks.onOpen?.();
|
1851
|
+
this.open.wake();
|
1852
|
+
}
|
1853
|
+
async onClose(err) {
|
1854
|
+
log8("onClose", {
|
1855
|
+
err
|
1856
|
+
}, {
|
1857
|
+
F: __dxlog_file8,
|
1858
|
+
L: 179,
|
1859
|
+
S: this,
|
1860
|
+
C: (f, a) => f(...a)
|
1861
|
+
});
|
1862
|
+
await this.callbacks.onClose?.();
|
1863
|
+
this.closed.wake();
|
1864
|
+
for (const [streamTag, stream] of Object.entries(this._streams)) {
|
1865
|
+
log8("closing stream", {
|
1866
|
+
streamTag
|
1867
|
+
}, {
|
1868
|
+
F: __dxlog_file8,
|
1869
|
+
L: 183,
|
1870
|
+
S: this,
|
1871
|
+
C: (f, a) => f(...a)
|
1872
|
+
});
|
1873
|
+
clearTimeout(stream.interval);
|
1874
|
+
stream.networkStream.destroy();
|
1875
|
+
}
|
1876
|
+
await this._rpc?.close();
|
1877
|
+
}
|
1878
|
+
async onAbort(err) {
|
1879
|
+
log8("onAbort", {
|
1880
|
+
err
|
1881
|
+
}, {
|
1882
|
+
F: __dxlog_file8,
|
1883
|
+
L: 191,
|
1884
|
+
S: this,
|
1885
|
+
C: (f, a) => f(...a)
|
1886
|
+
});
|
1887
|
+
await this.callbacks.onAbort?.();
|
1888
|
+
this.aborted.wake();
|
1889
|
+
await this._rpc?.abort();
|
1890
|
+
}
|
1891
|
+
async addNewStream(streamLoadInterval, streamLoadChunkSize, streamTag) {
|
1892
|
+
await this.open.wait({
|
1893
|
+
timeout: 1500
|
1894
|
+
});
|
1895
|
+
if (!streamTag) {
|
1896
|
+
streamTag = `stream-${randomBytes(4).toString("hex")}`;
|
1897
|
+
}
|
1898
|
+
const { data } = await this._rpc.rpc.TestServiceWithStreams.requestTestStream({
|
1899
|
+
data: streamTag,
|
1900
|
+
streamLoadInterval,
|
1901
|
+
streamLoadChunkSize
|
1902
|
+
});
|
1903
|
+
invariant7(data === streamTag, void 0, {
|
1904
|
+
F: __dxlog_file8,
|
1905
|
+
L: 207,
|
1906
|
+
S: this,
|
1907
|
+
A: [
|
1908
|
+
"data === streamTag",
|
1909
|
+
""
|
1910
|
+
]
|
1911
|
+
});
|
1912
|
+
await this._openStream(streamTag, streamLoadInterval, streamLoadChunkSize);
|
1913
|
+
return streamTag;
|
1914
|
+
}
|
1915
|
+
async closeStream(streamTag) {
|
1916
|
+
await this.open.wait({
|
1917
|
+
timeout: 1500
|
1918
|
+
});
|
1919
|
+
const { data, bytesSent, bytesReceived, sendErrors, receiveErrors, runningTime } = await this._rpc.rpc.TestServiceWithStreams.closeTestStream({
|
1920
|
+
data: streamTag
|
1921
|
+
});
|
1922
|
+
invariant7(data === streamTag, void 0, {
|
1923
|
+
F: __dxlog_file8,
|
1924
|
+
L: 220,
|
1925
|
+
S: this,
|
1926
|
+
A: [
|
1927
|
+
"data === streamTag",
|
1928
|
+
""
|
1929
|
+
]
|
1930
|
+
});
|
1931
|
+
const local = this._closeStream(streamTag);
|
1932
|
+
return {
|
1933
|
+
streamTag,
|
1934
|
+
stats: {
|
1935
|
+
local,
|
1936
|
+
remote: {
|
1937
|
+
bytesSent,
|
1938
|
+
bytesReceived,
|
1939
|
+
sendErrors,
|
1940
|
+
receiveErrors,
|
1941
|
+
runningTime
|
1942
|
+
}
|
1943
|
+
}
|
1944
|
+
};
|
1945
|
+
}
|
1946
|
+
/**
|
1947
|
+
* Force-close the connection.
|
1948
|
+
*/
|
1949
|
+
async closeConnection(err) {
|
1950
|
+
this.extensionContext?.close(err);
|
1951
|
+
}
|
1952
|
+
};
|
1953
|
+
|
1954
|
+
export {
|
1955
|
+
Framer,
|
1956
|
+
decodeFrame,
|
1957
|
+
encodeFrame,
|
1958
|
+
Muxer,
|
1959
|
+
Teleport,
|
1960
|
+
TestBuilder,
|
1961
|
+
TestPeer,
|
1962
|
+
TestConnection,
|
1963
|
+
TestExtension,
|
1964
|
+
TestExtensionWithStreams
|
1965
|
+
};
|
1966
|
+
//# sourceMappingURL=chunk-76FPY3V4.mjs.map
|