@yz-social/webrtc 0.1.3 → 0.1.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +22 -2
- package/index.js +9 -7
- package/package.json +1 -1
- package/spec/portal.js +15 -4
- package/spec/webrtcCapacitySpec.js +32 -9
package/README.md
CHANGED
|
@@ -1,10 +1,30 @@
|
|
|
1
|
+
# @yz-social/webrtc
|
|
1
2
|
|
|
2
|
-
|
|
3
|
+
A wrapper around either the browser's WebRTC, or around @roamhq/wrtc on NodeJS.
|
|
4
|
+
|
|
5
|
+
Installing this package in NodeJS - i.e., with `npm install` in either this package's directory or in some other module that imports this - will install the @roamhq/wrtc dependency. Succesfully installing _that_ may require extra C++ tools on the host system.
|
|
6
|
+
|
|
7
|
+
For example, you probably need to have `git bash` or the like installed, and to execute the various `npm` commands within that. During the Windows installation of NodeJS (prior to installing this package), you may be asked whether to install the additional tools for VC++, including Chocalety. You should check the box to install them, and follow those directions. Installing those tools will occur in a separate window, prompt for proceeding, and may take a very long time to execute.
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
## Some Tweaks
|
|
11
|
+
|
|
12
|
+
See the test cases and spec/ports.js for examples.
|
|
13
|
+
|
|
14
|
+
### Semi-trickled ice
|
|
15
|
+
|
|
16
|
+
RTCPeerConnection generates a bunch of ICE candidates right away, and then more over the next few seconds. It can be a while before it is finished. In this package, we have utilities to collect signals as they occur, and then gather them for sending while accumulating a new set of signals to be sent.
|
|
17
|
+
|
|
18
|
+
### Simultaneous outreach
|
|
19
|
+
|
|
20
|
+
Things can get confused if two nodes try to connect to each other at the same time. There is supposed to be some automatic rollback mechanism, but implementations vary. This code tries to sort that out, if the applicaiton can label one of the pair to be "polite" and the other not.
|
|
21
|
+
|
|
22
|
+
### Data channel name event
|
|
3
23
|
|
|
4
24
|
RTCPeerConnection defines a 'datachannel' event, and RTCDataChannel defines an 'open' event, but it is difficult to use them correctly:
|
|
5
25
|
- 'datachannel' fires only for one side of a connection, and only when negotiated:false.
|
|
6
26
|
- To listen for 'open', you must already have the data channel. Not all implementations fire a handler for this when assigned in a 'datachannel' handler, and it can fire multiple times for the same channel name when two sides initiate the channel simultaneously with negotiated:true.
|
|
7
27
|
|
|
8
|
-
|
|
28
|
+
### close event
|
|
9
29
|
|
|
10
30
|
RTCPeerConnection defines a 'signalingstatechange' event in which application handlers can fire code when aPeerConnection.readyState === 'closed', but this not particuarly convenient.
|
package/index.js
CHANGED
|
@@ -80,7 +80,9 @@ export class WebRTC {
|
|
|
80
80
|
// Do not try to close or wait for data channels. It confuses Safari.
|
|
81
81
|
const pc = this.pc;
|
|
82
82
|
if (!pc) return null;
|
|
83
|
-
pc.
|
|
83
|
+
const state = pc.connectionState;
|
|
84
|
+
if (state === 'connected' || state === 'failed') pc.close();
|
|
85
|
+
else this.flog("WebRTC close in unexpected state", state);
|
|
84
86
|
this.closed.resolve(pc); // We do not automatically receive 'connectionstatechange' when our side explicitly closes. (Only if the other does.)
|
|
85
87
|
this.cleanup();
|
|
86
88
|
return this.closed;
|
|
@@ -127,7 +129,7 @@ export class WebRTC {
|
|
|
127
129
|
this.settingRemote = true;
|
|
128
130
|
try {
|
|
129
131
|
await this.pc.setRemoteDescription(description)
|
|
130
|
-
.catch(e => this
|
|
132
|
+
.catch(e => this.log(this.name, 'ignoring error in setRemoteDescription while in state', this.pc.signalingState, e));
|
|
131
133
|
if (offerCollision) this.rolledBack = true;
|
|
132
134
|
} finally {
|
|
133
135
|
this.settingRemote = false;
|
|
@@ -137,7 +139,7 @@ export class WebRTC {
|
|
|
137
139
|
if (description.type === "offer") {
|
|
138
140
|
const answer = await this.pc.createAnswer();
|
|
139
141
|
await this.pc.setLocalDescription(answer)
|
|
140
|
-
.catch(e => this.
|
|
142
|
+
.catch(e => this.log(this.name, 'ignoring error setLocalDescription of answer', e));
|
|
141
143
|
this.signal({ description: this.pc.localDescription });
|
|
142
144
|
}
|
|
143
145
|
|
|
@@ -233,10 +235,11 @@ export class WebRTC {
|
|
|
233
235
|
this.log('setupChannel:', label, dc.id, readyState, 'negotiated:', dc.negotiated);
|
|
234
236
|
const kind = isTheirs ? 'Theirs' : 'Ours';
|
|
235
237
|
dc.webrtc = this;
|
|
236
|
-
dc.onopen =
|
|
238
|
+
dc.onopen = () => { // Idempotent (except for logging), if we do not bash dataChannePromises[label] multiple times.
|
|
239
|
+
dc.onopen = null;
|
|
237
240
|
this.log('channel onopen:', label, dc.id, readyState, 'negotiated:', dc.negotiated);
|
|
238
|
-
this[this.restrictablePromiseKey()][label]?.resolve(dc);
|
|
239
|
-
this[this.restrictablePromiseKey(kind)][label]?.resolve(dc);
|
|
241
|
+
this[this.restrictablePromiseKey()]?.[label]?.resolve(dc);
|
|
242
|
+
this[this.restrictablePromiseKey(kind)]?.[label]?.resolve(dc);
|
|
240
243
|
};
|
|
241
244
|
if (isTheirs) dc.onopen();
|
|
242
245
|
return dc;
|
|
@@ -246,7 +249,6 @@ export class WebRTC {
|
|
|
246
249
|
// This our chance to setupChannel, just as if we had called createChannel
|
|
247
250
|
this.log('ondatachannel:', dc.label, dc.id, dc.readyState, dc.negotiated);
|
|
248
251
|
this.setupChannel(dc);
|
|
249
|
-
dc.onopen(); // It had been opened before we setup, so invoke handler now.
|
|
250
252
|
}
|
|
251
253
|
channelId = 128; // Non-negotiated channel.id get assigned at open by the peer, starting with 0. This avoids conflicts.
|
|
252
254
|
createChannel(name = 'data', {negotiated = false, id = this.channelId++, ...options} = {}) { // Explicitly create channel and set it up.
|
package/package.json
CHANGED
package/spec/portal.js
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
1
2
|
import process from 'node:process';
|
|
2
3
|
import cluster from 'node:cluster';
|
|
3
4
|
import express from 'express';
|
|
@@ -13,6 +14,8 @@ import { WebRTC } from '../index.js';
|
|
|
13
14
|
// For a more complete example, see https://github.com/YZ-social/kdht/blob/main/spec/portal.js
|
|
14
15
|
|
|
15
16
|
const nPortals = parseInt(process.argv[2] || WebRTC.suggestedInstancesLimit);
|
|
17
|
+
const perPortalDelay = parseInt(process.argv[3] || 1e3);
|
|
18
|
+
const port = parseInt(process.argv[4] || 3000);
|
|
16
19
|
|
|
17
20
|
if (cluster.isPrimary) { // Parent process with portal webserver through which clienta can bootstrap
|
|
18
21
|
process.title = 'webrtc-test-portal';
|
|
@@ -25,6 +28,7 @@ if (cluster.isPrimary) { // Parent process with portal webserver through which c
|
|
|
25
28
|
worker.on('message', signals => { // Message from a worker, in response to a POST.
|
|
26
29
|
worker.requestResolver?.(signals);
|
|
27
30
|
});
|
|
31
|
+
await new Promise(resolve => setTimeout(resolve, perPortalDelay));
|
|
28
32
|
}
|
|
29
33
|
const workers = Object.values(cluster.workers);
|
|
30
34
|
app.use(logger(':date[iso] :status :method :url :res[content-length] - :response-time ms'));
|
|
@@ -32,7 +36,6 @@ if (cluster.isPrimary) { // Parent process with portal webserver through which c
|
|
|
32
36
|
app.use(express.static(path.resolve(__dirname, '..'))); // Serve files needed for testing browsers.
|
|
33
37
|
app.post('/join/:to', async (req, res, next) => { // Handler for JSON POST requests that provide an array of signals and get signals back.
|
|
34
38
|
const {params, body} = req;
|
|
35
|
-
// Find the specifed worker, or pick one at random. TODO CLEANUP: Remove. We now use as separate /name/:label to pick one.
|
|
36
39
|
const worker = workers[params.to];
|
|
37
40
|
if (!worker) {
|
|
38
41
|
console.warn('no worker', params.to);
|
|
@@ -48,7 +51,8 @@ if (cluster.isPrimary) { // Parent process with portal webserver through which c
|
|
|
48
51
|
|
|
49
52
|
return res.send(response);
|
|
50
53
|
});
|
|
51
|
-
app.listen(
|
|
54
|
+
app.listen(port);
|
|
55
|
+
console.log(new Date(), 'listening on', port);
|
|
52
56
|
} else {
|
|
53
57
|
process.title = 'webrtc-test-bot-' + cluster.worker.id;
|
|
54
58
|
let portal;
|
|
@@ -59,8 +63,15 @@ if (cluster.isPrimary) { // Parent process with portal webserver through which c
|
|
|
59
63
|
function setup() {
|
|
60
64
|
console.log(new Date(), 'launched bot', cluster.worker.id);
|
|
61
65
|
portal = new WebRTC({name: 'portal'});
|
|
62
|
-
portal.getDataChannelPromise('data').then(dc =>
|
|
63
|
-
|
|
66
|
+
portal.getDataChannelPromise('data').then(dc => {
|
|
67
|
+
console.log('connected', cluster.worker.id);
|
|
68
|
+
dc.send('Welcome!');
|
|
69
|
+
});
|
|
70
|
+
portal.closed.then(() => { // Without any explicit message, this is 15 seconds after the other end goes away.
|
|
71
|
+
console.log('disconnected', cluster.worker.id);
|
|
72
|
+
// Not needed for this test, but for other purposes:
|
|
73
|
+
// setup());
|
|
74
|
+
});
|
|
64
75
|
}
|
|
65
76
|
setup();
|
|
66
77
|
}
|
|
@@ -1,18 +1,28 @@
|
|
|
1
1
|
const { describe, it, expect, beforeAll, afterAll, beforeEach, afterEach} = globalThis; // For linters.
|
|
2
2
|
import { WebRTC } from '../index.js';
|
|
3
3
|
|
|
4
|
+
function delay(ms) {
|
|
5
|
+
return new Promise(resolve => setTimeout(resolve, ms));
|
|
6
|
+
}
|
|
7
|
+
|
|
4
8
|
describe("WebRTC capacity", function () {
|
|
5
9
|
let nNodes = 20; // When running all webrtc tests at once, it is important to keep this low. (Memory leak?)
|
|
10
|
+
let perPortalDelay = 1e3;
|
|
11
|
+
let port = 3000;
|
|
12
|
+
let baseURL = `http://localhost:${port}`;
|
|
13
|
+
// Alas, I can't seem to get more than about 150-160 nodes through ngrok, even on a machine that can handle 200 directly.
|
|
14
|
+
//let baseURL = 'https://dorado.ngrok.dev'; // if E.g., node spec/portal.js 200 100; ngrok http 3000 --url https://dorado.ngrok.dev
|
|
6
15
|
|
|
7
16
|
// Uncomment this line if running a stand-alone capacity test.
|
|
8
17
|
// (And also likely comment out the import './webrtcSpec.js' in test.html.)
|
|
9
18
|
// nNodes = WebRTC.suggestedInstancesLimit;
|
|
10
19
|
|
|
11
20
|
const isNodeJS = typeof(globalThis.process) !== 'undefined';
|
|
21
|
+
const portalIsLocal = isNodeJS && baseURL.startsWith('http://localhost');
|
|
12
22
|
let nodes = [];
|
|
13
23
|
beforeAll(async function () {
|
|
14
24
|
|
|
15
|
-
if (
|
|
25
|
+
if (portalIsLocal) {
|
|
16
26
|
const { spawn } = await import('node:child_process');
|
|
17
27
|
const path = await import('path');
|
|
18
28
|
const { fileURLToPath } = await import('url');
|
|
@@ -20,29 +30,42 @@ describe("WebRTC capacity", function () {
|
|
|
20
30
|
const __filename = fileURLToPath(import.meta.url);
|
|
21
31
|
const __dirname = path.dirname(__filename);
|
|
22
32
|
function echo(data) { data = data.slice(0, -1); console.log(data.toString()); }
|
|
23
|
-
const portalProcess = spawn('node', [path.resolve(__dirname, 'portal.js'), nNodes]);
|
|
33
|
+
const portalProcess = spawn('node', [path.resolve(__dirname, 'portal.js'), nNodes, perPortalDelay, port]);
|
|
24
34
|
portalProcess.stdout.on('data', echo);
|
|
25
35
|
portalProcess.stderr.on('data', echo);
|
|
26
|
-
await
|
|
36
|
+
await delay(perPortalDelay * (5 + nNodes));
|
|
27
37
|
}
|
|
28
38
|
|
|
29
39
|
console.log(new Date(), 'creating', nNodes, 'nodes');
|
|
30
40
|
for (let index = 0; index < nNodes; index++) {
|
|
31
|
-
const node = nodes[index] = new WebRTC({name: 'node'});
|
|
32
|
-
|
|
41
|
+
const node = nodes[index] = new WebRTC({name: 'node' + index});
|
|
42
|
+
console.log('connecting', index);
|
|
43
|
+
node.transferSignals = messages => fetch(`${baseURL}/join/${index}`, {
|
|
33
44
|
method: 'POST',
|
|
34
45
|
headers: { 'Content-Type': 'application/json', 'Connection': 'close' },
|
|
35
46
|
body: JSON.stringify(messages)
|
|
36
|
-
}).then(response =>
|
|
47
|
+
}).then(response => {
|
|
48
|
+
if (!response.ok) {
|
|
49
|
+
console.log('fetch', index, 'failed', response.status, response.statusText);
|
|
50
|
+
return null;
|
|
51
|
+
}
|
|
52
|
+
return response.json();
|
|
53
|
+
});
|
|
37
54
|
node.closed.then(() => console.log('closed', index)); // Just for debugging.
|
|
38
55
|
const dataOpened = node.getDataChannelPromise('data')
|
|
39
56
|
.then(dc => node.dataReceived = new Promise(resolve => dc.onmessage = event => resolve(event.data)));
|
|
40
|
-
node.createChannel('data', {negotiated: false});
|
|
57
|
+
node.createChannel('data', {negotiated: false});
|
|
41
58
|
await dataOpened;
|
|
42
59
|
console.log('opened', index);
|
|
60
|
+
if (!portalIsLocal) {
|
|
61
|
+
const maxConnectionsPerNode = 3;
|
|
62
|
+
const maxNgrokConnectionsPerSecond = 120 / 60;
|
|
63
|
+
const secondsPerNode = maxConnectionsPerNode / maxNgrokConnectionsPerSecond;
|
|
64
|
+
await delay(secondsPerNode * 1.5e3); // fudge factor milliseconds/second
|
|
65
|
+
}
|
|
43
66
|
}
|
|
44
67
|
console.log(new Date(), 'finished setup');
|
|
45
|
-
},
|
|
68
|
+
}, nNodes * 4 * perPortalDelay);
|
|
46
69
|
for (let index = 0; index < nNodes; index++) {
|
|
47
70
|
it('opened connection ' + index, function () {
|
|
48
71
|
expect(nodes[index].pc.connectionState).toBe('connected');
|
|
@@ -60,7 +83,7 @@ describe("WebRTC capacity", function () {
|
|
|
60
83
|
expect(pc.connectionState).toBe('closed'));
|
|
61
84
|
delete nodes[index];
|
|
62
85
|
}
|
|
63
|
-
if (
|
|
86
|
+
if (portalIsLocal) {
|
|
64
87
|
const { exec } = await import('node:child_process');
|
|
65
88
|
exec('pkill webrtc-test-');
|
|
66
89
|
}
|