@webex/plugin-meetings 3.10.0-next.9 → 3.10.0-webex-services-ready.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/breakouts/breakout.js +1 -1
- package/dist/breakouts/index.js +1 -1
- package/dist/constants.js +11 -3
- package/dist/constants.js.map +1 -1
- package/dist/hashTree/constants.js +20 -0
- package/dist/hashTree/constants.js.map +1 -0
- package/dist/hashTree/hashTree.js +515 -0
- package/dist/hashTree/hashTree.js.map +1 -0
- package/dist/hashTree/hashTreeParser.js +1266 -0
- package/dist/hashTree/hashTreeParser.js.map +1 -0
- package/dist/hashTree/types.js +21 -0
- package/dist/hashTree/types.js.map +1 -0
- package/dist/hashTree/utils.js +48 -0
- package/dist/hashTree/utils.js.map +1 -0
- package/dist/interpretation/index.js +1 -1
- package/dist/interpretation/siLanguage.js +1 -1
- package/dist/locus-info/index.js +511 -48
- package/dist/locus-info/index.js.map +1 -1
- package/dist/locus-info/types.js +7 -0
- package/dist/locus-info/types.js.map +1 -0
- package/dist/meeting/index.js +41 -15
- package/dist/meeting/index.js.map +1 -1
- package/dist/meeting/util.js +1 -0
- package/dist/meeting/util.js.map +1 -1
- package/dist/meetings/index.js +112 -70
- package/dist/meetings/index.js.map +1 -1
- package/dist/metrics/constants.js +3 -1
- package/dist/metrics/constants.js.map +1 -1
- package/dist/reachability/clusterReachability.js +44 -358
- package/dist/reachability/clusterReachability.js.map +1 -1
- package/dist/reachability/reachability.types.js +14 -1
- package/dist/reachability/reachability.types.js.map +1 -1
- package/dist/reachability/reachabilityPeerConnection.js +445 -0
- package/dist/reachability/reachabilityPeerConnection.js.map +1 -0
- package/dist/types/constants.d.ts +26 -21
- package/dist/types/hashTree/constants.d.ts +8 -0
- package/dist/types/hashTree/hashTree.d.ts +129 -0
- package/dist/types/hashTree/hashTreeParser.d.ts +260 -0
- package/dist/types/hashTree/types.d.ts +25 -0
- package/dist/types/hashTree/utils.d.ts +9 -0
- package/dist/types/locus-info/index.d.ts +91 -42
- package/dist/types/locus-info/types.d.ts +46 -0
- package/dist/types/meeting/index.d.ts +22 -9
- package/dist/types/meetings/index.d.ts +9 -2
- package/dist/types/metrics/constants.d.ts +2 -0
- package/dist/types/reachability/clusterReachability.d.ts +10 -88
- package/dist/types/reachability/reachability.types.d.ts +12 -1
- package/dist/types/reachability/reachabilityPeerConnection.d.ts +111 -0
- package/dist/webinar/index.js +1 -1
- package/package.json +22 -21
- package/src/constants.ts +13 -1
- package/src/hashTree/constants.ts +9 -0
- package/src/hashTree/hashTree.ts +463 -0
- package/src/hashTree/hashTreeParser.ts +1161 -0
- package/src/hashTree/types.ts +30 -0
- package/src/hashTree/utils.ts +42 -0
- package/src/locus-info/index.ts +556 -85
- package/src/locus-info/types.ts +48 -0
- package/src/meeting/index.ts +58 -26
- package/src/meeting/util.ts +1 -0
- package/src/meetings/index.ts +104 -51
- package/src/metrics/constants.ts +2 -0
- package/src/reachability/clusterReachability.ts +50 -347
- package/src/reachability/reachability.types.ts +15 -1
- package/src/reachability/reachabilityPeerConnection.ts +416 -0
- package/test/unit/spec/hashTree/hashTree.ts +655 -0
- package/test/unit/spec/hashTree/hashTreeParser.ts +1532 -0
- package/test/unit/spec/hashTree/utils.ts +103 -0
- package/test/unit/spec/locus-info/index.js +667 -1
- package/test/unit/spec/meeting/index.js +91 -20
- package/test/unit/spec/meeting/utils.js +77 -0
- package/test/unit/spec/meetings/index.js +71 -26
- package/test/unit/spec/reachability/clusterReachability.ts +281 -138
|
@@ -1,17 +1,15 @@
|
|
|
1
1
|
import {assert} from '@webex/test-helper-chai';
|
|
2
|
-
import MockWebex from '@webex/test-helper-mock-webex';
|
|
3
2
|
import sinon from 'sinon';
|
|
4
3
|
import testUtils from '../../../utils/testUtils';
|
|
5
4
|
|
|
6
|
-
// packages/@webex/plugin-meetings/test/unit/spec/reachability/clusterReachability.ts
|
|
7
5
|
import {
|
|
8
6
|
ClusterReachability,
|
|
9
7
|
ResultEventData,
|
|
10
8
|
Events,
|
|
11
9
|
ClientMediaIpsUpdatedEventData,
|
|
12
10
|
NatTypeUpdatedEventData,
|
|
13
|
-
} from '@webex/plugin-meetings/src/reachability/clusterReachability';
|
|
14
|
-
import {
|
|
11
|
+
} from '@webex/plugin-meetings/src/reachability/clusterReachability';
|
|
12
|
+
import {ReachabilityPeerConnection} from '@webex/plugin-meetings/src/reachability/reachabilityPeerConnection';
|
|
15
13
|
|
|
16
14
|
describe('ClusterReachability', () => {
|
|
17
15
|
let previousRTCPeerConnection;
|
|
@@ -49,7 +47,7 @@ describe('ClusterReachability', () => {
|
|
|
49
47
|
xtls: ['stun:xtls1.webex.com', 'stun:xtls2.webex.com:443'],
|
|
50
48
|
});
|
|
51
49
|
|
|
52
|
-
gatherIceCandidatesSpy = sinon.spy(clusterReachability, 'gatherIceCandidates');
|
|
50
|
+
gatherIceCandidatesSpy = sinon.spy(clusterReachability.reachabilityPeerConnection as any, 'gatherIceCandidates');
|
|
53
51
|
|
|
54
52
|
resetEmittedEvents();
|
|
55
53
|
|
|
@@ -70,60 +68,16 @@ describe('ClusterReachability', () => {
|
|
|
70
68
|
global.RTCPeerConnection = previousRTCPeerConnection;
|
|
71
69
|
});
|
|
72
70
|
|
|
73
|
-
it('should create an instance correctly', () => {
|
|
71
|
+
it('should create an instance correctly with provided cluster info', () => {
|
|
74
72
|
assert.instanceOf(clusterReachability, ClusterReachability);
|
|
75
73
|
assert.equal(clusterReachability.name, 'testName');
|
|
76
74
|
assert.equal(clusterReachability.isVideoMesh, false);
|
|
77
|
-
assert.
|
|
78
|
-
assert.equal(clusterReachability.numTcpUrls, 2);
|
|
75
|
+
assert.instanceOf(clusterReachability.reachabilityPeerConnection, ReachabilityPeerConnection);
|
|
79
76
|
});
|
|
80
77
|
|
|
81
|
-
it('should
|
|
82
|
-
assert.
|
|
83
|
-
|
|
84
|
-
{username: '', credential: '', urls: ['stun:udp1']},
|
|
85
|
-
{username: '', credential: '', urls: ['stun:udp2']},
|
|
86
|
-
{
|
|
87
|
-
username: 'webexturnreachuser',
|
|
88
|
-
credential: 'webexturnreachpwd',
|
|
89
|
-
urls: ['turn:tcp1.webex.com?transport=tcp'],
|
|
90
|
-
},
|
|
91
|
-
{
|
|
92
|
-
username: 'webexturnreachuser',
|
|
93
|
-
credential: 'webexturnreachpwd',
|
|
94
|
-
urls: ['turn:tcp2.webex.com:5004?transport=tcp'],
|
|
95
|
-
},
|
|
96
|
-
{
|
|
97
|
-
username: 'webexturnreachuser',
|
|
98
|
-
credential: 'webexturnreachpwd',
|
|
99
|
-
urls: ['turns:xtls1.webex.com?transport=tcp'],
|
|
100
|
-
},
|
|
101
|
-
{
|
|
102
|
-
username: 'webexturnreachuser',
|
|
103
|
-
credential: 'webexturnreachpwd',
|
|
104
|
-
urls: ['turns:xtls2.webex.com:443?transport=tcp'],
|
|
105
|
-
},
|
|
106
|
-
],
|
|
107
|
-
iceCandidatePoolSize: 0,
|
|
108
|
-
iceTransportPolicy: 'all',
|
|
109
|
-
});
|
|
110
|
-
});
|
|
111
|
-
|
|
112
|
-
it('should create a peer connection with the right config even if lists of urls are empty', () => {
|
|
113
|
-
(global.RTCPeerConnection as any).resetHistory();
|
|
114
|
-
|
|
115
|
-
clusterReachability = new ClusterReachability('testName', {
|
|
116
|
-
isVideoMesh: false,
|
|
117
|
-
udp: [],
|
|
118
|
-
tcp: [],
|
|
119
|
-
xtls: [],
|
|
120
|
-
});
|
|
121
|
-
|
|
122
|
-
assert.calledOnceWithExactly(global.RTCPeerConnection, {
|
|
123
|
-
iceServers: [],
|
|
124
|
-
iceCandidatePoolSize: 0,
|
|
125
|
-
iceTransportPolicy: 'all',
|
|
126
|
-
});
|
|
78
|
+
it('should initialize reachedSubnets as empty set', () => {
|
|
79
|
+
assert.instanceOf(clusterReachability.reachedSubnets, Set);
|
|
80
|
+
assert.equal(clusterReachability.reachedSubnets.size, 0);
|
|
127
81
|
});
|
|
128
82
|
|
|
129
83
|
it('returns correct results before start() is called', () => {
|
|
@@ -138,7 +92,89 @@ describe('ClusterReachability', () => {
|
|
|
138
92
|
assert.deepEqual(emittedEvents[Events.clientMediaIpsUpdated], []);
|
|
139
93
|
});
|
|
140
94
|
|
|
141
|
-
describe('#
|
|
95
|
+
describe('#event relaying', () => {
|
|
96
|
+
let clock;
|
|
97
|
+
|
|
98
|
+
beforeEach(() => {
|
|
99
|
+
clock = sinon.useFakeTimers();
|
|
100
|
+
});
|
|
101
|
+
|
|
102
|
+
afterEach(() => {
|
|
103
|
+
clock.restore();
|
|
104
|
+
});
|
|
105
|
+
|
|
106
|
+
it('relays resultReady event from ReachabilityPeerConnection', async () => {
|
|
107
|
+
const promise = clusterReachability.start();
|
|
108
|
+
|
|
109
|
+
await testUtils.flushPromises();
|
|
110
|
+
|
|
111
|
+
// Simulate RPC emitting resultReady
|
|
112
|
+
await clock.tickAsync(50);
|
|
113
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp1'}});
|
|
114
|
+
|
|
115
|
+
// ClusterReachability should relay the event
|
|
116
|
+
assert.equal(emittedEvents[Events.resultReady].length, 1);
|
|
117
|
+
assert.deepEqual(emittedEvents[Events.resultReady][0], {
|
|
118
|
+
protocol: 'udp',
|
|
119
|
+
result: 'reachable',
|
|
120
|
+
latencyInMilliseconds: 50,
|
|
121
|
+
clientMediaIPs: ['somePublicIp1'],
|
|
122
|
+
});
|
|
123
|
+
|
|
124
|
+
clusterReachability.abort();
|
|
125
|
+
await promise;
|
|
126
|
+
});
|
|
127
|
+
|
|
128
|
+
it('relays clientMediaIpsUpdated event from ReachabilityPeerConnection', async () => {
|
|
129
|
+
const promise = clusterReachability.start();
|
|
130
|
+
|
|
131
|
+
await clock.tickAsync(10);
|
|
132
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp1'}});
|
|
133
|
+
|
|
134
|
+
// First IP found - only resultReady emitted
|
|
135
|
+
assert.equal(emittedEvents[Events.resultReady].length, 1);
|
|
136
|
+
assert.equal(emittedEvents[Events.clientMediaIpsUpdated].length, 0);
|
|
137
|
+
resetEmittedEvents();
|
|
138
|
+
|
|
139
|
+
// New IP found - should emit clientMediaIpsUpdated
|
|
140
|
+
await clock.tickAsync(10);
|
|
141
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp2'}});
|
|
142
|
+
|
|
143
|
+
assert.equal(emittedEvents[Events.resultReady].length, 0);
|
|
144
|
+
assert.equal(emittedEvents[Events.clientMediaIpsUpdated].length, 1);
|
|
145
|
+
assert.deepEqual(emittedEvents[Events.clientMediaIpsUpdated][0], {
|
|
146
|
+
protocol: 'udp',
|
|
147
|
+
clientMediaIPs: ['somePublicIp1', 'somePublicIp2'],
|
|
148
|
+
});
|
|
149
|
+
|
|
150
|
+
clusterReachability.abort();
|
|
151
|
+
await promise;
|
|
152
|
+
});
|
|
153
|
+
|
|
154
|
+
it('relays natTypeUpdated event from ReachabilityPeerConnection', async () => {
|
|
155
|
+
const promise = clusterReachability.start();
|
|
156
|
+
|
|
157
|
+
await clock.tickAsync(10);
|
|
158
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp1', port: 1000, relatedPort: 3478}});
|
|
159
|
+
|
|
160
|
+
// No NAT detection yet (only 1 candidate)
|
|
161
|
+
assert.equal(emittedEvents[Events.natTypeUpdated].length, 0);
|
|
162
|
+
|
|
163
|
+
// Second candidate with same address but different port - indicates symmetric NAT
|
|
164
|
+
await clock.tickAsync(10);
|
|
165
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp1', port: 2000, relatedPort: 3478}});
|
|
166
|
+
|
|
167
|
+
assert.equal(emittedEvents[Events.natTypeUpdated].length, 1);
|
|
168
|
+
assert.deepEqual(emittedEvents[Events.natTypeUpdated][0], {
|
|
169
|
+
natType: 'symmetric-nat',
|
|
170
|
+
});
|
|
171
|
+
|
|
172
|
+
clusterReachability.abort();
|
|
173
|
+
await promise;
|
|
174
|
+
});
|
|
175
|
+
});
|
|
176
|
+
|
|
177
|
+
describe('#subnet collection', () => {
|
|
142
178
|
let clock;
|
|
143
179
|
|
|
144
180
|
beforeEach(() => {
|
|
@@ -149,6 +185,142 @@ describe('ClusterReachability', () => {
|
|
|
149
185
|
clock.restore();
|
|
150
186
|
});
|
|
151
187
|
|
|
188
|
+
it('collects reached subnets from ReachabilityPeerConnection events', async () => {
|
|
189
|
+
const promise = clusterReachability.start();
|
|
190
|
+
|
|
191
|
+
await clock.tickAsync(10);
|
|
192
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:192.168.1.1:5004'}});
|
|
193
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:10.0.0.1:5004'}});
|
|
194
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'relay', address: 'relay.server.ip'}});
|
|
195
|
+
|
|
196
|
+
clusterReachability.abort();
|
|
197
|
+
await promise;
|
|
198
|
+
|
|
199
|
+
assert.equal(clusterReachability.reachedSubnets.size, 3);
|
|
200
|
+
assert.isTrue(clusterReachability.reachedSubnets.has('192.168.1.1'));
|
|
201
|
+
assert.isTrue(clusterReachability.reachedSubnets.has('10.0.0.1'));
|
|
202
|
+
assert.isTrue(clusterReachability.reachedSubnets.has('relay.server.ip'));
|
|
203
|
+
});
|
|
204
|
+
|
|
205
|
+
it('stores only unique subnet addresses', async () => {
|
|
206
|
+
const promise = clusterReachability.start();
|
|
207
|
+
|
|
208
|
+
await clock.tickAsync(10);
|
|
209
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:192.168.1.1:5004'}});
|
|
210
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:192.168.1.1:9000'}});
|
|
211
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'relay', address: '192.168.1.1'}});
|
|
212
|
+
|
|
213
|
+
clusterReachability.abort();
|
|
214
|
+
await promise;
|
|
215
|
+
|
|
216
|
+
// Should have only 1 unique subnet
|
|
217
|
+
assert.equal(clusterReachability.reachedSubnets.size, 1);
|
|
218
|
+
assert.isTrue(clusterReachability.reachedSubnets.has('192.168.1.1'));
|
|
219
|
+
});
|
|
220
|
+
|
|
221
|
+
it('accumulates subnets from multiple candidates', async () => {
|
|
222
|
+
const promise = clusterReachability.start();
|
|
223
|
+
|
|
224
|
+
await clock.tickAsync(10);
|
|
225
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:192.168.1.1:5004'}});
|
|
226
|
+
|
|
227
|
+
await clock.tickAsync(10);
|
|
228
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:10.0.0.1:5004'}});
|
|
229
|
+
|
|
230
|
+
await clock.tickAsync(10);
|
|
231
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'relay', address: '172.16.0.1'}});
|
|
232
|
+
|
|
233
|
+
clusterReachability.abort();
|
|
234
|
+
await promise;
|
|
235
|
+
|
|
236
|
+
assert.equal(clusterReachability.reachedSubnets.size, 3);
|
|
237
|
+
assert.deepEqual(Array.from(clusterReachability.reachedSubnets), ['192.168.1.1', '10.0.0.1', '172.16.0.1']);
|
|
238
|
+
});
|
|
239
|
+
});
|
|
240
|
+
|
|
241
|
+
describe('#delegation', () => {
|
|
242
|
+
it('delegates getResult() to ReachabilityPeerConnection', () => {
|
|
243
|
+
const rpcGetResultStub = sinon.stub(clusterReachability.reachabilityPeerConnection, 'getResult').returns({
|
|
244
|
+
udp: {result: 'reachable', latencyInMilliseconds: 42},
|
|
245
|
+
tcp: {result: 'unreachable'},
|
|
246
|
+
xtls: {result: 'untested'},
|
|
247
|
+
});
|
|
248
|
+
|
|
249
|
+
const result = clusterReachability.getResult();
|
|
250
|
+
|
|
251
|
+
assert.calledOnce(rpcGetResultStub);
|
|
252
|
+
assert.equal(result.udp.result, 'reachable');
|
|
253
|
+
assert.equal(result.udp.latencyInMilliseconds, 42);
|
|
254
|
+
});
|
|
255
|
+
|
|
256
|
+
it('delegates abort() to ReachabilityPeerConnection', () => {
|
|
257
|
+
const rpcAbortStub = sinon.stub(clusterReachability.reachabilityPeerConnection, 'abort');
|
|
258
|
+
|
|
259
|
+
clusterReachability.abort();
|
|
260
|
+
|
|
261
|
+
assert.calledOnce(rpcAbortStub);
|
|
262
|
+
});
|
|
263
|
+
|
|
264
|
+
it('delegates start() to ReachabilityPeerConnection and returns result', async () => {
|
|
265
|
+
const expectedResult = {
|
|
266
|
+
udp: {result: 'reachable'},
|
|
267
|
+
tcp: {result: 'unreachable'},
|
|
268
|
+
xtls: {result: 'unreachable'},
|
|
269
|
+
};
|
|
270
|
+
|
|
271
|
+
const rpcStartStub = sinon.stub(clusterReachability.reachabilityPeerConnection, 'start').resolves();
|
|
272
|
+
const rpcGetResultStub = sinon.stub(clusterReachability.reachabilityPeerConnection, 'getResult').returns(expectedResult);
|
|
273
|
+
|
|
274
|
+
const result = await clusterReachability.start();
|
|
275
|
+
|
|
276
|
+
assert.calledOnce(rpcStartStub);
|
|
277
|
+
assert.calledOnce(rpcGetResultStub);
|
|
278
|
+
assert.deepEqual(result, expectedResult);
|
|
279
|
+
});
|
|
280
|
+
});
|
|
281
|
+
|
|
282
|
+
describe('#WebRTC peer connection setup', () => {
|
|
283
|
+
let clock;
|
|
284
|
+
|
|
285
|
+
beforeEach(() => {
|
|
286
|
+
clock = sinon.useFakeTimers();
|
|
287
|
+
});
|
|
288
|
+
|
|
289
|
+
afterEach(() => {
|
|
290
|
+
clock.restore();
|
|
291
|
+
});
|
|
292
|
+
|
|
293
|
+
it('should create a peer connection with the right config', () => {
|
|
294
|
+
assert.calledOnceWithExactly(global.RTCPeerConnection, {
|
|
295
|
+
iceServers: [
|
|
296
|
+
{username: '', credential: '', urls: ['stun:udp1']},
|
|
297
|
+
{username: '', credential: '', urls: ['stun:udp2']},
|
|
298
|
+
{
|
|
299
|
+
username: 'webexturnreachuser',
|
|
300
|
+
credential: 'webexturnreachpwd',
|
|
301
|
+
urls: ['turn:tcp1.webex.com?transport=tcp'],
|
|
302
|
+
},
|
|
303
|
+
{
|
|
304
|
+
username: 'webexturnreachuser',
|
|
305
|
+
credential: 'webexturnreachpwd',
|
|
306
|
+
urls: ['turn:tcp2.webex.com:5004?transport=tcp'],
|
|
307
|
+
},
|
|
308
|
+
{
|
|
309
|
+
username: 'webexturnreachuser',
|
|
310
|
+
credential: 'webexturnreachpwd',
|
|
311
|
+
urls: ['turns:xtls1.webex.com?transport=tcp'],
|
|
312
|
+
},
|
|
313
|
+
{
|
|
314
|
+
username: 'webexturnreachuser',
|
|
315
|
+
credential: 'webexturnreachpwd',
|
|
316
|
+
urls: ['turns:xtls2.webex.com:443?transport=tcp'],
|
|
317
|
+
},
|
|
318
|
+
],
|
|
319
|
+
iceCandidatePoolSize: 0,
|
|
320
|
+
iceTransportPolicy: 'all',
|
|
321
|
+
});
|
|
322
|
+
});
|
|
323
|
+
|
|
152
324
|
it('should initiate the ICE gathering process', async () => {
|
|
153
325
|
const promise = clusterReachability.start();
|
|
154
326
|
|
|
@@ -174,6 +346,40 @@ describe('ClusterReachability', () => {
|
|
|
174
346
|
assert.deepEqual(emittedEvents[Events.clientMediaIpsUpdated], []);
|
|
175
347
|
});
|
|
176
348
|
|
|
349
|
+
it('resolves when ICE gathering is completed', async () => {
|
|
350
|
+
const promise = clusterReachability.start();
|
|
351
|
+
|
|
352
|
+
await testUtils.flushPromises();
|
|
353
|
+
|
|
354
|
+
fakePeerConnection.iceGatheringState = 'complete';
|
|
355
|
+
fakePeerConnection.onicegatheringstatechange();
|
|
356
|
+
await promise;
|
|
357
|
+
|
|
358
|
+
assert.deepEqual(clusterReachability.getResult(), {
|
|
359
|
+
udp: {result: 'unreachable'},
|
|
360
|
+
tcp: {result: 'unreachable'},
|
|
361
|
+
xtls: {result: 'unreachable'},
|
|
362
|
+
});
|
|
363
|
+
});
|
|
364
|
+
|
|
365
|
+
it('resolves with the right result when ICE gathering is completed', async () => {
|
|
366
|
+
const promise = clusterReachability.start();
|
|
367
|
+
|
|
368
|
+
// send 1 candidate
|
|
369
|
+
await clock.tickAsync(30);
|
|
370
|
+
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp1'}});
|
|
371
|
+
|
|
372
|
+
fakePeerConnection.iceGatheringState = 'complete';
|
|
373
|
+
fakePeerConnection.onicegatheringstatechange();
|
|
374
|
+
await promise;
|
|
375
|
+
|
|
376
|
+
assert.deepEqual(clusterReachability.getResult(), {
|
|
377
|
+
udp: {result: 'reachable', latencyInMilliseconds: 30, clientMediaIPs: ['somePublicIp1']},
|
|
378
|
+
tcp: {result: 'unreachable'},
|
|
379
|
+
xtls: {result: 'unreachable'},
|
|
380
|
+
});
|
|
381
|
+
});
|
|
382
|
+
|
|
177
383
|
it('resolves and returns correct results when aborted before it gets any candidates', async () => {
|
|
178
384
|
const promise = clusterReachability.start();
|
|
179
385
|
|
|
@@ -216,39 +422,17 @@ describe('ClusterReachability', () => {
|
|
|
216
422
|
xtls: {result: 'unreachable'},
|
|
217
423
|
});
|
|
218
424
|
});
|
|
425
|
+
});
|
|
219
426
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
await testUtils.flushPromises();
|
|
224
|
-
|
|
225
|
-
fakePeerConnection.iceGatheringState = 'complete';
|
|
226
|
-
fakePeerConnection.onicegatheringstatechange();
|
|
227
|
-
await promise;
|
|
427
|
+
describe('#latency and candidate handling', () => {
|
|
428
|
+
let clock;
|
|
228
429
|
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
tcp: {result: 'unreachable'},
|
|
232
|
-
xtls: {result: 'unreachable'},
|
|
233
|
-
});
|
|
430
|
+
beforeEach(() => {
|
|
431
|
+
clock = sinon.useFakeTimers();
|
|
234
432
|
});
|
|
235
433
|
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
// send 1 candidate
|
|
240
|
-
await clock.tickAsync(30);
|
|
241
|
-
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp1'}});
|
|
242
|
-
|
|
243
|
-
fakePeerConnection.iceGatheringState = 'complete';
|
|
244
|
-
fakePeerConnection.onicegatheringstatechange();
|
|
245
|
-
await promise;
|
|
246
|
-
|
|
247
|
-
assert.deepEqual(clusterReachability.getResult(), {
|
|
248
|
-
udp: {result: 'reachable', latencyInMilliseconds: 30, clientMediaIPs: ['somePublicIp1']},
|
|
249
|
-
tcp: {result: 'unreachable'},
|
|
250
|
-
xtls: {result: 'unreachable'},
|
|
251
|
-
});
|
|
434
|
+
afterEach(() => {
|
|
435
|
+
clock.restore();
|
|
252
436
|
});
|
|
253
437
|
|
|
254
438
|
it('should store latency only for the first srflx candidate, but IPs from all of them', async () => {
|
|
@@ -257,17 +441,16 @@ describe('ClusterReachability', () => {
|
|
|
257
441
|
await clock.tickAsync(10);
|
|
258
442
|
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp1'}});
|
|
259
443
|
|
|
260
|
-
//
|
|
261
|
-
await clock.tickAsync(10);
|
|
444
|
+
await clock.tickAsync(50); // total elapsed time: 60
|
|
262
445
|
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp2'}});
|
|
263
446
|
|
|
264
|
-
await clock.tickAsync(10);
|
|
447
|
+
await clock.tickAsync(10); // total elapsed time: 70
|
|
265
448
|
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', address: 'somePublicIp3'}});
|
|
266
449
|
|
|
267
450
|
clusterReachability.abort();
|
|
268
451
|
await promise;
|
|
269
452
|
|
|
270
|
-
// latency should be from only the first candidates, but the clientMediaIps should be from all UDP candidates
|
|
453
|
+
// latency should be from only the first candidates, but the clientMediaIps should be from all UDP candidates
|
|
271
454
|
assert.deepEqual(clusterReachability.getResult(), {
|
|
272
455
|
udp: {
|
|
273
456
|
result: 'reachable',
|
|
@@ -283,19 +466,18 @@ describe('ClusterReachability', () => {
|
|
|
283
466
|
const promise = clusterReachability.start();
|
|
284
467
|
|
|
285
468
|
await clock.tickAsync(10);
|
|
286
|
-
fakePeerConnection.onicecandidate({
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
await clock.tickAsync(10);
|
|
290
|
-
fakePeerConnection.onicecandidate({candidate: {type: 'relay', address: 'someTurnRelayIp2'}});
|
|
469
|
+
fakePeerConnection.onicecandidate({
|
|
470
|
+
candidate: {type: 'relay', address: 'relayIp1', port: 3478},
|
|
471
|
+
});
|
|
291
472
|
|
|
292
|
-
await clock.tickAsync(
|
|
293
|
-
fakePeerConnection.onicecandidate({
|
|
473
|
+
await clock.tickAsync(50); // total elapsed time: 60
|
|
474
|
+
fakePeerConnection.onicecandidate({
|
|
475
|
+
candidate: {type: 'relay', address: 'relayIp2', port: 3478},
|
|
476
|
+
});
|
|
294
477
|
|
|
295
478
|
clusterReachability.abort();
|
|
296
479
|
await promise;
|
|
297
480
|
|
|
298
|
-
// latency should be from only the first candidates, but the clientMediaIps should be from only from UDP candidates
|
|
299
481
|
assert.deepEqual(clusterReachability.getResult(), {
|
|
300
482
|
udp: {result: 'unreachable'},
|
|
301
483
|
tcp: {result: 'reachable', latencyInMilliseconds: 10},
|
|
@@ -308,24 +490,17 @@ describe('ClusterReachability', () => {
|
|
|
308
490
|
|
|
309
491
|
await clock.tickAsync(10);
|
|
310
492
|
fakePeerConnection.onicecandidate({
|
|
311
|
-
candidate: {type: 'relay', address: '
|
|
493
|
+
candidate: {type: 'relay', address: 'relayIp1', port: 443},
|
|
312
494
|
});
|
|
313
495
|
|
|
314
|
-
//
|
|
315
|
-
await clock.tickAsync(10);
|
|
496
|
+
await clock.tickAsync(50); // total elapsed time: 60
|
|
316
497
|
fakePeerConnection.onicecandidate({
|
|
317
|
-
candidate: {type: 'relay', address: '
|
|
318
|
-
});
|
|
319
|
-
|
|
320
|
-
await clock.tickAsync(10);
|
|
321
|
-
fakePeerConnection.onicecandidate({
|
|
322
|
-
candidate: {type: 'relay', address: 'someTurnRelayIp3', port: 443},
|
|
498
|
+
candidate: {type: 'relay', address: 'relayIp2', port: 443},
|
|
323
499
|
});
|
|
324
500
|
|
|
325
501
|
clusterReachability.abort();
|
|
326
502
|
await promise;
|
|
327
503
|
|
|
328
|
-
// latency should be from only the first candidates, but the clientMediaIps should be from only from UDP candidates
|
|
329
504
|
assert.deepEqual(clusterReachability.getResult(), {
|
|
330
505
|
udp: {result: 'unreachable'},
|
|
331
506
|
tcp: {result: 'unreachable'},
|
|
@@ -440,37 +615,5 @@ describe('ClusterReachability', () => {
|
|
|
440
615
|
xtls: {result: 'reachable', latencyInMilliseconds: 20},
|
|
441
616
|
});
|
|
442
617
|
});
|
|
443
|
-
|
|
444
|
-
it('should gather correctly reached subnets', async () => {
|
|
445
|
-
const promise = clusterReachability.start();
|
|
446
|
-
|
|
447
|
-
await clock.tickAsync(10);
|
|
448
|
-
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:1.2.3.4:5004'}});
|
|
449
|
-
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:4.3.2.1:5004'}});
|
|
450
|
-
fakePeerConnection.onicecandidate({candidate: {type: 'relay', address: 'someTurnRelayIp'}});
|
|
451
|
-
|
|
452
|
-
clusterReachability.abort();
|
|
453
|
-
await promise;
|
|
454
|
-
|
|
455
|
-
assert.deepEqual(Array.from(clusterReachability.reachedSubnets), [
|
|
456
|
-
'1.2.3.4',
|
|
457
|
-
'4.3.2.1',
|
|
458
|
-
'someTurnRelayIp'
|
|
459
|
-
]);
|
|
460
|
-
});
|
|
461
|
-
|
|
462
|
-
it('should store only unique subnet address', async () => {
|
|
463
|
-
const promise = clusterReachability.start();
|
|
464
|
-
|
|
465
|
-
await clock.tickAsync(10);
|
|
466
|
-
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:1.2.3.4:5004'}});
|
|
467
|
-
fakePeerConnection.onicecandidate({candidate: {type: 'srflx', url: 'stun:1.2.3.4:9000'}});
|
|
468
|
-
fakePeerConnection.onicecandidate({candidate: {type: 'relay', address: '1.2.3.4'}});
|
|
469
|
-
|
|
470
|
-
clusterReachability.abort();
|
|
471
|
-
await promise;
|
|
472
|
-
|
|
473
|
-
assert.deepEqual(Array.from(clusterReachability.reachedSubnets), ['1.2.3.4']);
|
|
474
|
-
});
|
|
475
618
|
});
|
|
476
619
|
});
|