@agentdance/node-webrtc-sctp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/association.d.ts +167 -0
- package/dist/association.d.ts.map +1 -0
- package/dist/association.js +1394 -0
- package/dist/association.js.map +1 -0
- package/dist/index.d.ts +4 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +4 -0
- package/dist/index.js.map +1 -0
- package/dist/packet.d.ts +45 -0
- package/dist/packet.d.ts.map +1 -0
- package/dist/packet.js +156 -0
- package/dist/packet.js.map +1 -0
- package/dist/types.d.ts +62 -0
- package/dist/types.d.ts.map +1 -0
- package/dist/types.js +5 -0
- package/dist/types.js.map +1 -0
- package/package.json +56 -0
- package/src/association.ts +1621 -0
- package/src/index.ts +3 -0
- package/src/packet.ts +227 -0
- package/src/types.ts +76 -0
|
@@ -0,0 +1,1394 @@
|
|
|
1
|
+
// SCTP association – production-grade reliable delivery over a DTLS transport
|
|
2
|
+
// Implements RFC 4960 / RFC 8832 (DCEP) for WebRTC data channels.
|
|
3
|
+
//
|
|
4
|
+
// Features:
|
|
5
|
+
// - INIT / INIT-ACK / COOKIE-ECHO / COOKIE-ACK handshake
|
|
6
|
+
// - DATA chunk fragmentation & reassembly (RFC 4960 §6.9)
|
|
7
|
+
// - Out-of-order TSN buffering with cumulative delivery
|
|
8
|
+
// - Congestion control: cwnd / ssthresh / slow-start / congestion-avoidance (RFC 4960 §7)
|
|
9
|
+
// - Flow control: peerRwnd tracks advertised receive window from SACK a_rwnd
|
|
10
|
+
// - Send queue: enqueues fragments; pump() drains under window constraints
|
|
11
|
+
// - Retransmission queue with RTO (RFC 4960 §6.3), RTO back-off & smoothing
|
|
12
|
+
// - Fast retransmit after 3 duplicate SACKs (RFC 4960 §7.2.4)
|
|
13
|
+
// - SACK processing with gap ACK blocks
|
|
14
|
+
// - FORWARD-TSN for partial-reliability (RFC 3758)
|
|
15
|
+
// - Stream-level SSN ordering with out-of-order SSN buffering
|
|
16
|
+
// - bufferedAmount tracking per data channel with bufferedamountlow event
|
|
17
|
+
// - Pre-negotiated data channels (negotiated=true, bypass DCEP)
|
|
18
|
+
// - Clean SHUTDOWN sequence (RFC 4960 §9)
|
|
19
|
+
import * as crypto from 'node:crypto';
|
|
20
|
+
import { EventEmitter } from 'node:events';
|
|
21
|
+
import { encodeSctpPacket, decodeSctpPacket, crc32c, encodeDataChunk, decodeDataChunk, encodeDcepOpen, encodeDcepAck, decodeDcep, } from './packet.js';
|
|
22
|
+
// ─── Constants ────────────────────────────────────────────────────────────────
|
|
23
|
+
const PMTU = 1200; // Path MTU for DATA chunk fragmentation (kept small for interop)
|
|
24
|
+
const MAX_FRAGMENT_SIZE = PMTU - 12 - 16; // SCTP common header (12) + DATA chunk header (16)
|
|
25
|
+
const MAX_BATCH_BYTES = 8340; // Max ~7 DATA chunks per SCTP packet; keeps UDP < 9216B (macOS net.inet.udp.maxdgram)
|
|
26
|
+
const MAX_BUFFER = 2 * 1024 * 1024; // Local receive window advertised to peer (2 MiB)
|
|
27
|
+
const INITIAL_CWND = 4 * PMTU; // RFC 4960 §7.2.1 initial cwnd
|
|
28
|
+
const MAX_CWND = 128 * 1024 * 1024; // 128 MiB soft cap – allows cwnd to grow unconstrained on loopback
|
|
29
|
+
const INITIAL_SSTHRESH = MAX_BUFFER; // RFC 4960 §7.2.1: MAY be arbitrarily high; pion uses RWND
|
|
30
|
+
const INITIAL_RTO_MS = 1000; // Initial RTO
|
|
31
|
+
const MIN_RTO_MS = 200; // RFC 4960 §6.3.1 RTO.Min
|
|
32
|
+
const MAX_RTO_MS = 60_000; // RFC 4960 §6.3.1 RTO.Max
|
|
33
|
+
const MAX_RETRANSMITS = 10; // Association.Max.Retrans
|
|
34
|
+
const SACK_DELAY_MS = 20; // Delayed SACK timer (RFC 4960 §6.2)
|
|
35
|
+
const MAX_BURST = 0; // 0 = no burst limit (pion default); window itself is the constraint
|
|
36
|
+
// RTT smoothing (RFC 6298)
|
|
37
|
+
const ALPHA = 0.125;
|
|
38
|
+
const BETA = 0.25;
|
|
39
|
+
export class SctpDataChannel extends EventEmitter {
|
|
40
|
+
id;
|
|
41
|
+
label;
|
|
42
|
+
protocol;
|
|
43
|
+
ordered;
|
|
44
|
+
maxPacketLifeTime;
|
|
45
|
+
maxRetransmits;
|
|
46
|
+
negotiated;
|
|
47
|
+
_state;
|
|
48
|
+
_assoc;
|
|
49
|
+
_ssn = 0; // outgoing stream sequence number
|
|
50
|
+
// bufferedAmount
|
|
51
|
+
_bufferedAmount = 0;
|
|
52
|
+
bufferedAmountLowThreshold = 0;
|
|
53
|
+
constructor(assoc, info) {
|
|
54
|
+
super();
|
|
55
|
+
this._assoc = assoc;
|
|
56
|
+
this.id = info.id;
|
|
57
|
+
this.label = info.label;
|
|
58
|
+
this.protocol = info.protocol;
|
|
59
|
+
this.ordered = info.ordered;
|
|
60
|
+
this.maxPacketLifeTime = info.maxPacketLifeTime;
|
|
61
|
+
this.maxRetransmits = info.maxRetransmits;
|
|
62
|
+
this.negotiated = info.negotiated ?? false;
|
|
63
|
+
this._state = info.state;
|
|
64
|
+
// RFC 8832 §6.6: DCEP DATA_CHANNEL_OPEN uses SSN=0.
|
|
65
|
+
// User messages MUST start at SSN=1 to avoid collision with the DCEP message.
|
|
66
|
+
// Negotiated channels skip DCEP, so they start at SSN=0.
|
|
67
|
+
if (!this.negotiated) {
|
|
68
|
+
this._ssn = 1;
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
get state() {
|
|
72
|
+
return this._state;
|
|
73
|
+
}
|
|
74
|
+
get readyState() {
|
|
75
|
+
return this._state;
|
|
76
|
+
}
|
|
77
|
+
get bufferedAmount() {
|
|
78
|
+
// When the peer's receive window is closed (peerRwnd=0), return an inflated value
|
|
79
|
+
// so that callers using bufferedAmount for backpressure (e.g. scenario1) don't queue
|
|
80
|
+
// more data, preventing Flutter's usrsctp receive buffer from overflowing further.
|
|
81
|
+
if (this._assoc.peerWindowClosed) {
|
|
82
|
+
return 256 * 1024; // artificially large – keep all callers paused
|
|
83
|
+
}
|
|
84
|
+
return this._bufferedAmount;
|
|
85
|
+
}
|
|
86
|
+
/** Send a message through this data channel */
|
|
87
|
+
send(data) {
|
|
88
|
+
if (this._state !== 'open') {
|
|
89
|
+
throw new Error(`DataChannel "${this.label}" is not open (state: ${this._state})`);
|
|
90
|
+
}
|
|
91
|
+
const buf = typeof data === 'string' ? Buffer.from(data, 'utf8') : data;
|
|
92
|
+
const ppid = typeof data === 'string'
|
|
93
|
+
? buf.length === 0 ? 56 /* Ppid.STRING_EMPTY */ : 51 /* Ppid.STRING */
|
|
94
|
+
: buf.length === 0 ? 57 /* Ppid.BINARY_EMPTY */ : 53 /* Ppid.BINARY */;
|
|
95
|
+
// Track buffered amount before enqueue
|
|
96
|
+
this._bufferedAmount += buf.length;
|
|
97
|
+
const ssn = this._ssn;
|
|
98
|
+
this._ssn = (this._ssn + 1) & 0xffff;
|
|
99
|
+
this._assoc._sendData(this.id, ssn, ppid, buf, this.ordered, this);
|
|
100
|
+
}
|
|
101
|
+
/** Bump and return the next outgoing SSN (used by association for ZWP probes) */
|
|
102
|
+
_nextSsn() {
|
|
103
|
+
const ssn = this._ssn;
|
|
104
|
+
this._ssn = (this._ssn + 1) & 0xffff;
|
|
105
|
+
return ssn;
|
|
106
|
+
}
|
|
107
|
+
close() {
|
|
108
|
+
if (this._state === 'closed' || this._state === 'closing')
|
|
109
|
+
return;
|
|
110
|
+
this._state = 'closing';
|
|
111
|
+
this.emit('closing');
|
|
112
|
+
this._assoc._closeChannel(this.id);
|
|
113
|
+
}
|
|
114
|
+
/** Called by association when a message arrives */
|
|
115
|
+
_deliver(ppid, data) {
|
|
116
|
+
if (this._state !== 'open')
|
|
117
|
+
return;
|
|
118
|
+
if (ppid === 51 /* Ppid.STRING */ || ppid === 56 /* Ppid.STRING_EMPTY */) {
|
|
119
|
+
this.emit('message', data.toString('utf8'));
|
|
120
|
+
}
|
|
121
|
+
else {
|
|
122
|
+
this.emit('message', Buffer.from(data));
|
|
123
|
+
}
|
|
124
|
+
}
|
|
125
|
+
/** Called by association when this channel is fully open */
|
|
126
|
+
_open() {
|
|
127
|
+
this._state = 'open';
|
|
128
|
+
this.emit('open');
|
|
129
|
+
}
|
|
130
|
+
/** Called by association when channel is forcibly closed */
|
|
131
|
+
_close() {
|
|
132
|
+
if (this._state === 'closed')
|
|
133
|
+
return;
|
|
134
|
+
this._state = 'closed';
|
|
135
|
+
this._bufferedAmount = 0;
|
|
136
|
+
this.emit('close');
|
|
137
|
+
}
|
|
138
|
+
/** Called when bytes are acknowledged (reduce bufferedAmount) */
|
|
139
|
+
_onAcked(bytes, peerRwnd = 1) {
|
|
140
|
+
const prev = this._bufferedAmount;
|
|
141
|
+
this._bufferedAmount = Math.max(0, this._bufferedAmount - bytes);
|
|
142
|
+
// Fire bufferedamountlow if threshold crossed downward AND peer window is open.
|
|
143
|
+
// Suppress when peerRwnd=0 to prevent scenario2 from re-flooding Flutter's buffer.
|
|
144
|
+
if (prev > this.bufferedAmountLowThreshold &&
|
|
145
|
+
this._bufferedAmount <= this.bufferedAmountLowThreshold &&
|
|
146
|
+
peerRwnd > 0) {
|
|
147
|
+
this.emit('bufferedamountlow');
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
export class SctpAssociation extends EventEmitter {
|
|
152
|
+
_state = 'new';
|
|
153
|
+
_localPort;
|
|
154
|
+
_remotePort;
|
|
155
|
+
_role;
|
|
156
|
+
// Verification tags
|
|
157
|
+
_localTag = 0;
|
|
158
|
+
_remoteTag = 0;
|
|
159
|
+
// ─── TSN – outgoing ───────────────────────────────────────────────────────
|
|
160
|
+
_localTsn = 0;
|
|
161
|
+
// ─── TSN – incoming ───────────────────────────────────────────────────────
|
|
162
|
+
_remoteCumulativeTsn = 0;
|
|
163
|
+
/** Set of TSNs received out-of-order but not yet cumulatively acknowledged */
|
|
164
|
+
_receivedTsns = new Set();
|
|
165
|
+
// ─── Congestion & flow control (RFC 4960 §7) ─────────────────────────────
|
|
166
|
+
/** Congestion window (bytes) – controls how many bytes can be in-flight */
|
|
167
|
+
_cwnd = INITIAL_CWND;
|
|
168
|
+
/** Slow-start threshold */
|
|
169
|
+
_ssthresh = INITIAL_SSTHRESH;
|
|
170
|
+
/** Bytes currently in-flight (sent but not yet acknowledged) */
|
|
171
|
+
_flightSize = 0;
|
|
172
|
+
/** Partial bytes acked accumulator for congestion avoidance (RFC 4960 §7.2.2) */
|
|
173
|
+
_partialBytesAcked = 0;
|
|
174
|
+
/** Peer's advertised receive window (from SACK a_rwnd field) */
|
|
175
|
+
_peerRwnd = MAX_BUFFER;
|
|
176
|
+
/** Smoothed RTT estimate (ms) */
|
|
177
|
+
_srtt;
|
|
178
|
+
/** RTT variance (ms) */
|
|
179
|
+
_rttvar;
|
|
180
|
+
// ─── Send queue (fragments waiting for window space) ─────────────────────
|
|
181
|
+
_sendQueue = [];
|
|
182
|
+
_pumping = false;
|
|
183
|
+
// ─── Retransmit queue ─────────────────────────────────────────────────────
|
|
184
|
+
/** TSN → PendingChunk for unacknowledged outgoing chunks */
|
|
185
|
+
_pendingChunks = new Map();
|
|
186
|
+
_retransmitTimer;
|
|
187
|
+
_rto = INITIAL_RTO_MS;
|
|
188
|
+
/** Zero-window probe back-off delay (ms) – doubles on each probe attempt */
|
|
189
|
+
_zwpDelay = 200;
|
|
190
|
+
/** Count of SACKs received without new data being ACKed (for fast retransmit) */
|
|
191
|
+
_dupSackCount = 0;
|
|
192
|
+
_lastCumAcked = 0;
|
|
193
|
+
// ─── Reassembly ───────────────────────────────────────────────────────────
|
|
194
|
+
/** streamId+ssn key → ReassemblyEntry for multi-fragment messages */
|
|
195
|
+
_reassembly = new Map();
|
|
196
|
+
// ─── Channels ─────────────────────────────────────────────────────────────
|
|
197
|
+
_channels = new Map();
|
|
198
|
+
_nextChannelId;
|
|
199
|
+
// ─── Per-stream ordered receive state ─────────────────────────────────────
|
|
200
|
+
_streamReceive = new Map();
|
|
201
|
+
// ─── Transport ────────────────────────────────────────────────────────────
|
|
202
|
+
_sendCallback;
|
|
203
|
+
// ─── SACK state (RFC 4960 §6.2 every-other-packet) ──────────────────────
|
|
204
|
+
/** DATA packets received since last SACK was sent. Every 2nd packet → immediate SACK. */
|
|
205
|
+
_dataPacketsSinceAck = 0;
|
|
206
|
+
// ─── Timers ───────────────────────────────────────────────────────────────
|
|
207
|
+
_sackTimer;
|
|
208
|
+
// ─── Connect promise ──────────────────────────────────────────────────────
|
|
209
|
+
_connectResolve;
|
|
210
|
+
_connectReject;
|
|
211
|
+
constructor(opts) {
|
|
212
|
+
super();
|
|
213
|
+
this._localPort = opts.localPort;
|
|
214
|
+
this._remotePort = opts.remotePort;
|
|
215
|
+
this._role = opts.role;
|
|
216
|
+
this._nextChannelId = opts.role === 'client' ? 0 : 1;
|
|
217
|
+
this._localTag = crypto.randomBytes(4).readUInt32BE(0);
|
|
218
|
+
this._localTsn = crypto.randomBytes(4).readUInt32BE(0) >>> 0;
|
|
219
|
+
this._lastCumAcked = (this._localTsn - 1) >>> 0;
|
|
220
|
+
}
|
|
221
|
+
// ─── Public API ──────────────────────────────────────────────────────────
|
|
222
|
+
setSendCallback(fn) {
|
|
223
|
+
this._sendCallback = fn;
|
|
224
|
+
}
|
|
225
|
+
/** True when peer's receive window is closed (peerRwnd=0 due to flutter uint32 wrap or genuine 0) */
|
|
226
|
+
get peerWindowClosed() {
|
|
227
|
+
return this._peerRwnd === 0;
|
|
228
|
+
}
|
|
229
|
+
async connect(timeoutMs = 30_000) {
|
|
230
|
+
if (this._state === 'connected')
|
|
231
|
+
return;
|
|
232
|
+
if (this._state !== 'new' && this._state !== 'connecting') {
|
|
233
|
+
throw new Error(`SCTP cannot connect from state: ${this._state}`);
|
|
234
|
+
}
|
|
235
|
+
if (this._state === 'new')
|
|
236
|
+
this._setState('connecting');
|
|
237
|
+
return new Promise((resolve, reject) => {
|
|
238
|
+
if (this._state === 'connected') {
|
|
239
|
+
resolve();
|
|
240
|
+
return;
|
|
241
|
+
}
|
|
242
|
+
this._connectResolve = resolve;
|
|
243
|
+
this._connectReject = reject;
|
|
244
|
+
const timer = setTimeout(() => {
|
|
245
|
+
this._connectResolve = undefined;
|
|
246
|
+
this._connectReject = undefined;
|
|
247
|
+
reject(new Error('SCTP connect timeout'));
|
|
248
|
+
}, timeoutMs);
|
|
249
|
+
timer.unref?.();
|
|
250
|
+
if (this._role === 'client')
|
|
251
|
+
this._sendInit();
|
|
252
|
+
// Server waits for INIT
|
|
253
|
+
});
|
|
254
|
+
}
|
|
255
|
+
handleIncoming(buf) {
|
|
256
|
+
let pkt;
|
|
257
|
+
try {
|
|
258
|
+
pkt = decodeSctpPacket(buf);
|
|
259
|
+
}
|
|
260
|
+
catch {
|
|
261
|
+
return;
|
|
262
|
+
}
|
|
263
|
+
// Verify CRC-32c
|
|
264
|
+
// All SCTP stacks (usrsctp/libwebrtc, Linux kernel) store CRC-32c in little-endian.
|
|
265
|
+
const zeroed = Buffer.from(buf);
|
|
266
|
+
zeroed.writeUInt32LE(0, 8);
|
|
267
|
+
const computed = crc32c(zeroed);
|
|
268
|
+
const stored = buf.readUInt32LE(8);
|
|
269
|
+
if (computed !== stored) {
|
|
270
|
+
return;
|
|
271
|
+
}
|
|
272
|
+
for (const chunk of pkt.chunks) {
|
|
273
|
+
this._handleChunk(chunk, pkt.header.verificationTag);
|
|
274
|
+
}
|
|
275
|
+
}
|
|
276
|
+
createDataChannel(opts) {
|
|
277
|
+
if (this._state !== 'connected') {
|
|
278
|
+
throw new Error('SCTP not connected');
|
|
279
|
+
}
|
|
280
|
+
// Respect explicit id for pre-negotiated channels; otherwise auto-assign
|
|
281
|
+
const id = (opts.negotiated && opts.id !== undefined)
|
|
282
|
+
? opts.id
|
|
283
|
+
: this._nextChannelId;
|
|
284
|
+
if (!opts.negotiated || opts.id === undefined) {
|
|
285
|
+
this._nextChannelId += 2; // client: 0,2,4… / server: 1,3,5…
|
|
286
|
+
}
|
|
287
|
+
const info = {
|
|
288
|
+
id,
|
|
289
|
+
label: opts.label,
|
|
290
|
+
protocol: opts.protocol ?? '',
|
|
291
|
+
ordered: opts.ordered !== false,
|
|
292
|
+
maxPacketLifeTime: opts.maxPacketLifeTime,
|
|
293
|
+
maxRetransmits: opts.maxRetransmits,
|
|
294
|
+
state: 'connecting',
|
|
295
|
+
negotiated: opts.negotiated ?? false,
|
|
296
|
+
};
|
|
297
|
+
const channel = new SctpDataChannel(this, info);
|
|
298
|
+
this._channels.set(id, channel);
|
|
299
|
+
if (opts.negotiated) {
|
|
300
|
+
// Pre-negotiated: open immediately, no DCEP exchange
|
|
301
|
+
setImmediate(() => channel._open());
|
|
302
|
+
}
|
|
303
|
+
else {
|
|
304
|
+
// Send DCEP DATA_CHANNEL_OPEN
|
|
305
|
+
this._sendDcepOpen(id, info);
|
|
306
|
+
}
|
|
307
|
+
return channel;
|
|
308
|
+
}
|
|
309
|
+
close() {
|
|
310
|
+
this._clearRetransmitTimer();
|
|
311
|
+
if (this._sackTimer) {
|
|
312
|
+
clearTimeout(this._sackTimer);
|
|
313
|
+
this._sackTimer = undefined;
|
|
314
|
+
}
|
|
315
|
+
this._sendQueue.length = 0;
|
|
316
|
+
if (this._state === 'connected') {
|
|
317
|
+
// Send SHUTDOWN
|
|
318
|
+
const value = Buffer.allocUnsafe(4);
|
|
319
|
+
value.writeUInt32BE(this._remoteCumulativeTsn, 0);
|
|
320
|
+
this._sendChunks([{ type: 7 /* ChunkType.SHUTDOWN */, flags: 0, value }]);
|
|
321
|
+
}
|
|
322
|
+
this._setState('closed');
|
|
323
|
+
for (const ch of this._channels.values())
|
|
324
|
+
ch._close();
|
|
325
|
+
this._channels.clear();
|
|
326
|
+
this._pendingChunks.clear();
|
|
327
|
+
}
|
|
328
|
+
get state() { return this._state; }
|
|
329
|
+
/** Expose congestion/flow state for testing */
|
|
330
|
+
get cwnd() { return this._cwnd; }
|
|
331
|
+
get flightSize() { return this._flightSize; }
|
|
332
|
+
get peerRwnd() { return this._peerRwnd; }
|
|
333
|
+
get sendQueueLength() { return this._sendQueue.length; }
|
|
334
|
+
// ─── Internal: called by SctpDataChannel ────────────────────────────────
|
|
335
|
+
_sendData(streamId, ssn, ppid, data, ordered, channel) {
|
|
336
|
+
if (this._state !== 'connected')
|
|
337
|
+
return;
|
|
338
|
+
// Build all fragments for this message
|
|
339
|
+
const frags = [];
|
|
340
|
+
if (data.length === 0) {
|
|
341
|
+
frags.push({ streamId, ssn, ppid, data, ordered, beginning: true, ending: true, channel });
|
|
342
|
+
}
|
|
343
|
+
else {
|
|
344
|
+
const numFragments = Math.ceil(data.length / MAX_FRAGMENT_SIZE);
|
|
345
|
+
for (let i = 0; i < numFragments; i++) {
|
|
346
|
+
const start = i * MAX_FRAGMENT_SIZE;
|
|
347
|
+
const end = Math.min(start + MAX_FRAGMENT_SIZE, data.length);
|
|
348
|
+
frags.push({
|
|
349
|
+
streamId, ssn, ppid,
|
|
350
|
+
data: data.subarray(start, end),
|
|
351
|
+
ordered,
|
|
352
|
+
beginning: i === 0,
|
|
353
|
+
ending: i === numFragments - 1,
|
|
354
|
+
channel,
|
|
355
|
+
});
|
|
356
|
+
}
|
|
357
|
+
}
|
|
358
|
+
// Priority: if this stream has no fragments currently queued, prepend the
|
|
359
|
+
// entire message so that control messages (e.g. large-file META, ACK) are
|
|
360
|
+
// not starved behind bulk data from other high-volume streams.
|
|
361
|
+
const streamAlreadyQueued = this._sendQueue.some(f => f.streamId === streamId);
|
|
362
|
+
if (!streamAlreadyQueued && this._sendQueue.length > 0) {
|
|
363
|
+
this._sendQueue.unshift(...frags);
|
|
364
|
+
}
|
|
365
|
+
else {
|
|
366
|
+
for (const frag of frags)
|
|
367
|
+
this._sendQueue.push(frag);
|
|
368
|
+
}
|
|
369
|
+
// Try to drain queue
|
|
370
|
+
this._pump();
|
|
371
|
+
}
|
|
372
|
+
_closeChannel(id) {
|
|
373
|
+
const channel = this._channels.get(id);
|
|
374
|
+
if (!channel)
|
|
375
|
+
return;
|
|
376
|
+
this._channels.delete(id);
|
|
377
|
+
channel._close();
|
|
378
|
+
}
|
|
379
|
+
// ─── Send queue & congestion pump ────────────────────────────────────────
|
|
380
|
+
_enqueue(frag) {
|
|
381
|
+
this._sendQueue.push(frag);
|
|
382
|
+
}
|
|
383
|
+
/**
|
|
384
|
+
* Drain the send queue under cwnd / peerRwnd constraints.
|
|
385
|
+
* Called after enqueue and after each SACK acknowledgement.
|
|
386
|
+
*
|
|
387
|
+
* We call _doPump via setImmediate so that incoming SACKs (which arrive as
|
|
388
|
+
* UDP packets in separate event-loop ticks) can be processed between pump
|
|
389
|
+
* cycles, keeping the window accurate and preventing stalls.
|
|
390
|
+
*/
|
|
391
|
+
_pump() {
|
|
392
|
+
if (this._pumping)
|
|
393
|
+
return;
|
|
394
|
+
this._pumping = true;
|
|
395
|
+
setImmediate(() => {
|
|
396
|
+
this._pumping = false;
|
|
397
|
+
this._doPump();
|
|
398
|
+
// Note: _doPump may schedule its own setImmediate continuation if the
|
|
399
|
+
// queue is not empty after one batch. _pumping is cleared before _doPump
|
|
400
|
+
// so that continuation setImmediate calls inside _doPump are not blocked.
|
|
401
|
+
});
|
|
402
|
+
}
|
|
403
|
+
/**
|
|
404
|
+
* Purge PR-SCTP messages (maxRetransmits=0 or maxPacketLifeTime expired)
|
|
405
|
+
* from the head of the send queue when the peer window is zero.
|
|
406
|
+
* Since no TSN has been assigned yet, we simply discard and notify bufferedAmount.
|
|
407
|
+
*
|
|
408
|
+
* Returns the number of complete messages dropped.
|
|
409
|
+
*/
|
|
410
|
+
_purgeStalePrSctp() {
|
|
411
|
+
if (this._peerRwnd > 0 && this._sendQueue.length === 0)
|
|
412
|
+
return 0;
|
|
413
|
+
let dropped = 0;
|
|
414
|
+
const now = Date.now();
|
|
415
|
+
let i = 0;
|
|
416
|
+
while (i < this._sendQueue.length) {
|
|
417
|
+
const frag = this._sendQueue[i];
|
|
418
|
+
const ch = frag.channel;
|
|
419
|
+
const isPr = ch.maxRetransmits === 0 ||
|
|
420
|
+
(ch.maxPacketLifeTime !== undefined && ch.maxPacketLifeTime !== null);
|
|
421
|
+
if (!isPr) {
|
|
422
|
+
i++;
|
|
423
|
+
continue;
|
|
424
|
+
}
|
|
425
|
+
// Drop the entire message (beginning → ending)
|
|
426
|
+
// Walk forward to find the start of this message if not at beginning
|
|
427
|
+
// (We only drop complete messages starting at beginning=true)
|
|
428
|
+
if (!frag.beginning) {
|
|
429
|
+
i++;
|
|
430
|
+
continue;
|
|
431
|
+
}
|
|
432
|
+
// Drop from i until ending=true
|
|
433
|
+
let j = i;
|
|
434
|
+
while (j < this._sendQueue.length) {
|
|
435
|
+
const f = this._sendQueue[j];
|
|
436
|
+
if (f.streamId !== frag.streamId || f.ssn !== frag.ssn)
|
|
437
|
+
break;
|
|
438
|
+
ch._onAcked(f.data.length);
|
|
439
|
+
j++;
|
|
440
|
+
if (f.ending)
|
|
441
|
+
break;
|
|
442
|
+
}
|
|
443
|
+
this._sendQueue.splice(i, j - i);
|
|
444
|
+
dropped++;
|
|
445
|
+
}
|
|
446
|
+
return dropped;
|
|
447
|
+
}
|
|
448
|
+
_doPump() {
|
|
449
|
+
// Drop stale PR-SCTP messages when peer window is zero to prevent unbounded queuing
|
|
450
|
+
if (this._peerRwnd === 0) {
|
|
451
|
+
const dropped = this._purgeStalePrSctp();
|
|
452
|
+
if (dropped > 0) {
|
|
453
|
+
console.log(`[SCTP ${this._role}] _doPump: purged ${dropped} stale PR-SCTP messages, queueLen=${this._sendQueue.length}`);
|
|
454
|
+
}
|
|
455
|
+
}
|
|
456
|
+
// Send data in batches of MAX_BATCH_BYTES per setImmediate tick.
|
|
457
|
+
// This amortises per-record DTLS crypto overhead (createCipheriv + UDP
|
|
458
|
+
// syscall) across ~7 SCTP fragments while still yielding back to the
|
|
459
|
+
// event loop between batches so that incoming SACKs are processed.
|
|
460
|
+
const w = Math.min(this._cwnd, this._peerRwnd);
|
|
461
|
+
if (w === 0) {
|
|
462
|
+
this._armRetransmitTimer();
|
|
463
|
+
return;
|
|
464
|
+
}
|
|
465
|
+
let batchBytes = 0;
|
|
466
|
+
// Collect DATA chunks for the entire batch; send them all in ONE _sendChunks
|
|
467
|
+
// call so they are bundled into a single DTLS record (~1 createCipheriv vs N).
|
|
468
|
+
const batchChunks = [];
|
|
469
|
+
while (this._sendQueue.length > 0) {
|
|
470
|
+
const frag = this._sendQueue[0];
|
|
471
|
+
const chunkSize = frag.data.length;
|
|
472
|
+
// Stop when window is full
|
|
473
|
+
if (this._flightSize + chunkSize > w) {
|
|
474
|
+
this._armRetransmitTimer();
|
|
475
|
+
break;
|
|
476
|
+
}
|
|
477
|
+
// Stop when this batch is full – schedule next batch via _pump()
|
|
478
|
+
const encodedSize = chunkSize + 16 + 4;
|
|
479
|
+
if (batchBytes + encodedSize > MAX_BATCH_BYTES && batchBytes > 0) {
|
|
480
|
+
// More data to send but batch is full – _pump will be re-triggered
|
|
481
|
+
// either from SACK (via _processSack → _pump) or we schedule it here
|
|
482
|
+
setImmediate(() => this._pump());
|
|
483
|
+
break;
|
|
484
|
+
}
|
|
485
|
+
this._sendQueue.shift();
|
|
486
|
+
const tsn = this._localTsn;
|
|
487
|
+
this._localTsn = (this._localTsn + 1) >>> 0;
|
|
488
|
+
const payload = {
|
|
489
|
+
tsn, streamId: frag.streamId, ssn: frag.ssn, ppid: frag.ppid,
|
|
490
|
+
userData: frag.data,
|
|
491
|
+
beginning: frag.beginning,
|
|
492
|
+
ending: frag.ending,
|
|
493
|
+
unordered: !frag.ordered,
|
|
494
|
+
};
|
|
495
|
+
const chunk = encodeDataChunk(payload);
|
|
496
|
+
this._flightSize += chunkSize;
|
|
497
|
+
this._pendingChunks.set(tsn, {
|
|
498
|
+
chunk,
|
|
499
|
+
tsn,
|
|
500
|
+
streamId: frag.streamId,
|
|
501
|
+
dataLen: chunkSize,
|
|
502
|
+
sentAt: Date.now(),
|
|
503
|
+
retransmitCount: 0,
|
|
504
|
+
abandoned: false,
|
|
505
|
+
inFlight: true,
|
|
506
|
+
maxRetransmits: frag.channel.maxRetransmits,
|
|
507
|
+
maxPacketLifeTime: frag.channel.maxPacketLifeTime,
|
|
508
|
+
});
|
|
509
|
+
batchChunks.push(chunk);
|
|
510
|
+
batchBytes += encodedSize;
|
|
511
|
+
}
|
|
512
|
+
// Flush the entire batch as ONE DTLS record → single createCipheriv call
|
|
513
|
+
if (batchChunks.length > 0) {
|
|
514
|
+
this._sendChunks(batchChunks);
|
|
515
|
+
}
|
|
516
|
+
this._armRetransmitTimer();
|
|
517
|
+
}
|
|
518
|
+
_transmitFragment(frag, dataLen) {
|
|
519
|
+
const tsn = this._localTsn;
|
|
520
|
+
this._localTsn = (this._localTsn + 1) >>> 0;
|
|
521
|
+
const payload = {
|
|
522
|
+
tsn, streamId: frag.streamId, ssn: frag.ssn, ppid: frag.ppid,
|
|
523
|
+
userData: frag.data,
|
|
524
|
+
beginning: frag.beginning,
|
|
525
|
+
ending: frag.ending,
|
|
526
|
+
unordered: !frag.ordered,
|
|
527
|
+
};
|
|
528
|
+
const chunk = encodeDataChunk(payload);
|
|
529
|
+
this._sendChunks([chunk]);
|
|
530
|
+
this._flightSize += dataLen;
|
|
531
|
+
// Track for retransmission
|
|
532
|
+
this._pendingChunks.set(tsn, {
|
|
533
|
+
chunk,
|
|
534
|
+
tsn,
|
|
535
|
+
streamId: frag.streamId,
|
|
536
|
+
dataLen,
|
|
537
|
+
sentAt: Date.now(),
|
|
538
|
+
retransmitCount: 0,
|
|
539
|
+
abandoned: false,
|
|
540
|
+
inFlight: true,
|
|
541
|
+
maxRetransmits: frag.channel.maxRetransmits,
|
|
542
|
+
maxPacketLifeTime: frag.channel.maxPacketLifeTime,
|
|
543
|
+
});
|
|
544
|
+
this._armRetransmitTimer();
|
|
545
|
+
}
|
|
546
|
+
// ─── Private helpers ─────────────────────────────────────────────────────
|
|
547
|
+
_setState(s) {
|
|
548
|
+
if (this._state === s)
|
|
549
|
+
return;
|
|
550
|
+
this._state = s;
|
|
551
|
+
this.emit('state', s);
|
|
552
|
+
}
|
|
553
|
+
_send(buf) {
|
|
554
|
+
this._sendCallback?.(buf);
|
|
555
|
+
}
|
|
556
|
+
_sendChunks(chunks) {
|
|
557
|
+
// Bundle all chunks into as few DTLS records as possible.
|
|
558
|
+
// We allow packets up to MAX_BATCH_BYTES so that _doPump's pre-assembled
|
|
559
|
+
// batch fits in a single DTLS record (max 16383 B), drastically reducing
|
|
560
|
+
// createCipheriv calls. Control chunks (SACK, INIT, etc.) are still small
|
|
561
|
+
// and bundle naturally. The DTLS layer re-fragments at 16383 B if needed.
|
|
562
|
+
const SCTP_COMMON_HDR = 12;
|
|
563
|
+
const MAX_PKT = MAX_BATCH_BYTES; // single-record limit
|
|
564
|
+
const header = {
|
|
565
|
+
srcPort: this._localPort,
|
|
566
|
+
dstPort: this._remotePort,
|
|
567
|
+
verificationTag: this._remoteTag,
|
|
568
|
+
checksum: 0,
|
|
569
|
+
};
|
|
570
|
+
let batch = [];
|
|
571
|
+
let batchSize = SCTP_COMMON_HDR;
|
|
572
|
+
const flush = () => {
|
|
573
|
+
if (batch.length === 0)
|
|
574
|
+
return;
|
|
575
|
+
const pkt = encodeSctpPacket({ header, chunks: batch });
|
|
576
|
+
this._send(pkt);
|
|
577
|
+
batch = [];
|
|
578
|
+
batchSize = SCTP_COMMON_HDR;
|
|
579
|
+
};
|
|
580
|
+
for (const chunk of chunks) {
|
|
581
|
+
// chunk overhead: 4B type/flags/length + payload (padded to 4B)
|
|
582
|
+
const chunkLen = 4 + chunk.value.length;
|
|
583
|
+
const paddedLen = chunkLen + ((4 - (chunkLen & 3)) & 3);
|
|
584
|
+
if (batch.length > 0 && batchSize + paddedLen > MAX_PKT) {
|
|
585
|
+
flush();
|
|
586
|
+
}
|
|
587
|
+
batch.push(chunk);
|
|
588
|
+
batchSize += paddedLen;
|
|
589
|
+
}
|
|
590
|
+
flush();
|
|
591
|
+
}
|
|
592
|
+
// ─── Retransmission (RFC 4960 §6.3) ──────────────────────────────────────
|
|
593
|
+
_armRetransmitTimer() {
|
|
594
|
+
if (this._retransmitTimer)
|
|
595
|
+
return;
|
|
596
|
+
this._retransmitTimer = setTimeout(() => {
|
|
597
|
+
this._retransmitTimer = undefined;
|
|
598
|
+
this._doRetransmit();
|
|
599
|
+
}, this._rto);
|
|
600
|
+
this._retransmitTimer.unref?.();
|
|
601
|
+
}
|
|
602
|
+
_clearRetransmitTimer() {
|
|
603
|
+
if (this._retransmitTimer) {
|
|
604
|
+
clearTimeout(this._retransmitTimer);
|
|
605
|
+
this._retransmitTimer = undefined;
|
|
606
|
+
}
|
|
607
|
+
}
|
|
608
|
+
_doRetransmit() {
|
|
609
|
+
// Zero-window state: peer has no receive buffer space.
|
|
610
|
+
// Send a single-fragment probe to elicit a SACK with updated a_rwnd (RFC 4960 §6.1).
|
|
611
|
+
// We probe as long as peerRwnd=0 and there is queued data, regardless of progress count.
|
|
612
|
+
if (this._pendingChunks.size === 0 && this._sendQueue.length > 0 && this._peerRwnd === 0) {
|
|
613
|
+
// Purge stale PR-SCTP messages first
|
|
614
|
+
const purged = this._purgeStalePrSctp();
|
|
615
|
+
if (purged > 0) {
|
|
616
|
+
console.log(`[SCTP ${this._role}] _doRetransmit: purged ${purged} stale PR-SCTP messages, queueLen=${this._sendQueue.length}`);
|
|
617
|
+
}
|
|
618
|
+
if (this._sendQueue.length === 0) {
|
|
619
|
+
return;
|
|
620
|
+
}
|
|
621
|
+
// Zero-window probe: send a single 1-byte message to elicit a SACK with updated a_rwnd.
|
|
622
|
+
// RFC 4960 §6.1: sender SHOULD send a probe of one segment when window is 0.
|
|
623
|
+
//
|
|
624
|
+
// IMPORTANT: We must NOT send fragments from the middle of a partially-sent message.
|
|
625
|
+
// Sending partial/incomplete multi-fragment messages fills Flutter's reassembly buffer
|
|
626
|
+
// without allowing usrsctp to deliver them to the app, so a_rwnd never recovers.
|
|
627
|
+
// Instead, find the first complete single-fragment message in the queue to probe with.
|
|
628
|
+
// If none exists, skip probing for now (ZWP deadlock path handles the empty-queue case).
|
|
629
|
+
let probeIndex = -1;
|
|
630
|
+
for (let i = 0; i < this._sendQueue.length; i++) {
|
|
631
|
+
const f = this._sendQueue[i];
|
|
632
|
+
if (f.beginning && f.ending) {
|
|
633
|
+
probeIndex = i;
|
|
634
|
+
break;
|
|
635
|
+
}
|
|
636
|
+
}
|
|
637
|
+
if (probeIndex === -1) {
|
|
638
|
+
// No single-fragment message available. Send a tiny probe on any open channel
|
|
639
|
+
// to elicit a SACK, without filling Flutter's reassembly buffer.
|
|
640
|
+
const probeChannel = [...this._channels.values()].find(ch => ch.state === 'open');
|
|
641
|
+
if (probeChannel) {
|
|
642
|
+
console.log(`[SCTP ${this._role}] zero-window probe (synthetic 1B) on ch=${probeChannel.id} queueLen=${this._sendQueue.length}`);
|
|
643
|
+
const tsn = this._localTsn;
|
|
644
|
+
this._localTsn = (this._localTsn + 1) >>> 0;
|
|
645
|
+
const payload = {
|
|
646
|
+
tsn,
|
|
647
|
+
streamId: probeChannel.id,
|
|
648
|
+
ssn: probeChannel._nextSsn(),
|
|
649
|
+
ppid: 57 /* Ppid.BINARY_EMPTY */,
|
|
650
|
+
userData: Buffer.from([0]),
|
|
651
|
+
beginning: true,
|
|
652
|
+
ending: true,
|
|
653
|
+
unordered: !probeChannel.ordered,
|
|
654
|
+
};
|
|
655
|
+
const chunk = encodeDataChunk(payload);
|
|
656
|
+
this._sendChunks([chunk]);
|
|
657
|
+
this._flightSize += 1;
|
|
658
|
+
this._pendingChunks.set(tsn, {
|
|
659
|
+
chunk, tsn, streamId: probeChannel.id, dataLen: 1,
|
|
660
|
+
sentAt: Date.now(), retransmitCount: 0, abandoned: false, inFlight: true,
|
|
661
|
+
maxRetransmits: undefined, maxPacketLifeTime: undefined,
|
|
662
|
+
});
|
|
663
|
+
}
|
|
664
|
+
}
|
|
665
|
+
else {
|
|
666
|
+
// Send the complete single-fragment message as the probe
|
|
667
|
+
const frag = this._sendQueue.splice(probeIndex, 1)[0];
|
|
668
|
+
this._transmitFragment(frag, frag.data.length);
|
|
669
|
+
console.log(`[SCTP ${this._role}] zero-window probe: sent 1 frag streamId=${frag.streamId} queueLen=${this._sendQueue.length}`);
|
|
670
|
+
}
|
|
671
|
+
// Arm back-off timer for next probe attempt
|
|
672
|
+
this._zwpDelay = Math.min(this._zwpDelay * 2, 60_000);
|
|
673
|
+
this._retransmitTimer = setTimeout(() => {
|
|
674
|
+
this._retransmitTimer = undefined;
|
|
675
|
+
this._doRetransmit();
|
|
676
|
+
}, this._zwpDelay);
|
|
677
|
+
this._retransmitTimer.unref?.();
|
|
678
|
+
return;
|
|
679
|
+
}
|
|
680
|
+
if (this._pendingChunks.size === 0)
|
|
681
|
+
return;
|
|
682
|
+
const now = Date.now();
|
|
683
|
+
const toRetransmit = [];
|
|
684
|
+
for (const [tsn, pending] of this._pendingChunks) {
|
|
685
|
+
if (pending.abandoned)
|
|
686
|
+
continue;
|
|
687
|
+
// Check partial reliability abandonment
|
|
688
|
+
if (pending.maxRetransmits !== undefined &&
|
|
689
|
+
pending.retransmitCount >= pending.maxRetransmits) {
|
|
690
|
+
this._abandonChunk(pending);
|
|
691
|
+
this._pendingChunks.delete(tsn);
|
|
692
|
+
continue;
|
|
693
|
+
}
|
|
694
|
+
if (pending.maxPacketLifeTime !== undefined &&
|
|
695
|
+
(now - pending.sentAt) >= pending.maxPacketLifeTime) {
|
|
696
|
+
this._abandonChunk(pending);
|
|
697
|
+
this._pendingChunks.delete(tsn);
|
|
698
|
+
continue;
|
|
699
|
+
}
|
|
700
|
+
if (now - pending.sentAt >= this._rto) {
|
|
701
|
+
if (pending.retransmitCount >= MAX_RETRANSMITS) {
|
|
702
|
+
this._abandonChunk(pending);
|
|
703
|
+
this._pendingChunks.delete(tsn);
|
|
704
|
+
continue;
|
|
705
|
+
}
|
|
706
|
+
toRetransmit.push(pending.chunk);
|
|
707
|
+
pending.retransmitCount++;
|
|
708
|
+
pending.sentAt = now;
|
|
709
|
+
}
|
|
710
|
+
}
|
|
711
|
+
if (toRetransmit.length > 0) {
|
|
712
|
+
// RFC 4960 §7.2.3 – on timeout, ssthresh = max(flightSize/2, 4*PMTU); cwnd = PMTU
|
|
713
|
+
this._ssthresh = Math.max(Math.floor(this._flightSize / 2), 4 * PMTU);
|
|
714
|
+
this._cwnd = PMTU;
|
|
715
|
+
this._partialBytesAcked = 0;
|
|
716
|
+
this._sendChunks(toRetransmit);
|
|
717
|
+
// Back off RTO on timeout
|
|
718
|
+
this._rto = Math.min(this._rto * 2, MAX_RTO_MS);
|
|
719
|
+
}
|
|
720
|
+
if (this._pendingChunks.size > 0 || this._sendQueue.length > 0) {
|
|
721
|
+
this._armRetransmitTimer();
|
|
722
|
+
}
|
|
723
|
+
}
|
|
724
|
+
_abandonChunk(pending) {
|
|
725
|
+
pending.abandoned = true;
|
|
726
|
+
if (pending.inFlight) {
|
|
727
|
+
this._flightSize = Math.max(0, this._flightSize - pending.dataLen);
|
|
728
|
+
pending.inFlight = false;
|
|
729
|
+
}
|
|
730
|
+
// Notify the channel so bufferedAmount is decremented (prevents stall)
|
|
731
|
+
// Pass peerRwnd=0 to suppress bufferedamountlow event (channel is congested)
|
|
732
|
+
const channel = this._channels.get(pending.streamId);
|
|
733
|
+
if (channel)
|
|
734
|
+
channel._onAcked(pending.dataLen, 0);
|
|
735
|
+
}
|
|
736
|
+
// ─── SACK processing (RFC 4960 §6.2, §7.2) ───────────────────────────────
|
|
737
|
+
/** Process cumulative TSN advancement and gap ACK blocks from SACK */
|
|
738
|
+
_processSack(value) {
|
|
739
|
+
if (value.length < 12)
|
|
740
|
+
return;
|
|
741
|
+
const cumTsn = value.readUInt32BE(0);
|
|
742
|
+
const aRwnd = value.readUInt32BE(4); // peer's advertised receive window
|
|
743
|
+
const numGapBlocks = value.readUInt16BE(8);
|
|
744
|
+
const numDupTsns = value.readUInt16BE(10);
|
|
745
|
+
// Update peer receive window
|
|
746
|
+
// Detect uint32 wrap: if peerRwnd was near-zero and new value is very large,
|
|
747
|
+
// usrsctp (Flutter) may have sent an underflowed value. Clamp to 0 in that case.
|
|
748
|
+
const ZERO_WINDOW_THRESHOLD = 4 * PMTU; // < 4800 bytes = near-zero window
|
|
749
|
+
const WRAP_THRESHOLD = 0x80000000; // > 2^31 = suspiciously large
|
|
750
|
+
if (this._peerRwnd < ZERO_WINDOW_THRESHOLD && aRwnd > WRAP_THRESHOLD) {
|
|
751
|
+
// Looks like a uint32 underflow from the remote side – treat as 0
|
|
752
|
+
console.log(`[SCTP ${this._role}] peerRwnd underflow detected: prev=${this._peerRwnd} aRwnd=${aRwnd} -> clamped to 0`);
|
|
753
|
+
this._peerRwnd = 0;
|
|
754
|
+
}
|
|
755
|
+
else {
|
|
756
|
+
if (this._peerRwnd === 0 && aRwnd > 0) {
|
|
757
|
+
// Window reopened – reset zero-window probe back-off
|
|
758
|
+
this._zwpDelay = 200;
|
|
759
|
+
console.log(`[SCTP ${this._role}] peerRwnd reopened: ${aRwnd} bytes`);
|
|
760
|
+
// Notify all channels below their threshold to resume sending
|
|
761
|
+
setImmediate(() => {
|
|
762
|
+
for (const channel of this._channels.values()) {
|
|
763
|
+
if (channel.bufferedAmount <= channel.bufferedAmountLowThreshold) {
|
|
764
|
+
channel.emit('bufferedamountlow');
|
|
765
|
+
}
|
|
766
|
+
}
|
|
767
|
+
});
|
|
768
|
+
}
|
|
769
|
+
this._peerRwnd = aRwnd;
|
|
770
|
+
}
|
|
771
|
+
// Collect TSNs acknowledged by gap blocks
|
|
772
|
+
const ackedTsns = new Set();
|
|
773
|
+
let off = 12;
|
|
774
|
+
for (let i = 0; i < numGapBlocks && off + 4 <= value.length; i++) {
|
|
775
|
+
const start = value.readUInt16BE(off);
|
|
776
|
+
const end = value.readUInt16BE(off + 2);
|
|
777
|
+
off += 4;
|
|
778
|
+
for (let g = start; g <= end; g++) {
|
|
779
|
+
ackedTsns.add((cumTsn + g) >>> 0);
|
|
780
|
+
}
|
|
781
|
+
}
|
|
782
|
+
// Skip dup TSNs
|
|
783
|
+
void numDupTsns;
|
|
784
|
+
// Detect if cumTsn advanced (new data ACKed)
|
|
785
|
+
const cumAdvanced = _tsnGT(cumTsn, this._lastCumAcked);
|
|
786
|
+
if (cumAdvanced) {
|
|
787
|
+
this._dupSackCount = 0;
|
|
788
|
+
}
|
|
789
|
+
else {
|
|
790
|
+
this._dupSackCount++;
|
|
791
|
+
}
|
|
792
|
+
let bytesAcked = 0;
|
|
793
|
+
// Notify channels and remove from pending
|
|
794
|
+
for (const [tsn, pending] of this._pendingChunks) {
|
|
795
|
+
const acked = _tsnLE(tsn, cumTsn) || ackedTsns.has(tsn);
|
|
796
|
+
if (acked) {
|
|
797
|
+
const channel = this._channels.get(pending.streamId);
|
|
798
|
+
if (channel)
|
|
799
|
+
channel._onAcked(pending.dataLen, this._peerRwnd);
|
|
800
|
+
if (pending.inFlight) {
|
|
801
|
+
this._flightSize = Math.max(0, this._flightSize - pending.dataLen);
|
|
802
|
+
pending.inFlight = false;
|
|
803
|
+
bytesAcked += pending.dataLen;
|
|
804
|
+
// Update RTT estimate on first transmission (no retransmits)
|
|
805
|
+
if (pending.retransmitCount === 0) {
|
|
806
|
+
const rtt = Date.now() - pending.sentAt;
|
|
807
|
+
this._updateRto(rtt);
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
this._pendingChunks.delete(tsn);
|
|
811
|
+
}
|
|
812
|
+
}
|
|
813
|
+
// Update cumulative ACK pointer
|
|
814
|
+
if (cumAdvanced) {
|
|
815
|
+
this._lastCumAcked = cumTsn;
|
|
816
|
+
}
|
|
817
|
+
// Update congestion window (RFC 4960 §7.2.1 / §7.2.2)
|
|
818
|
+
if (bytesAcked > 0) {
|
|
819
|
+
if (this._cwnd <= this._ssthresh) {
|
|
820
|
+
// Slow start: double cwnd each RTT.
|
|
821
|
+
// pion: cwnd += min(bytesAcked, cwnd) — exponential growth like TCP.
|
|
822
|
+
// RFC 4960 §7.2.1 says += min(bytesAcked, PMTU) which is much slower;
|
|
823
|
+
// pion departs from RFC for better throughput on high-BDP paths.
|
|
824
|
+
this._cwnd += Math.min(bytesAcked, this._cwnd);
|
|
825
|
+
}
|
|
826
|
+
else {
|
|
827
|
+
// Congestion avoidance: increase by one MTU per RTT (RFC 4960 §7.2.2).
|
|
828
|
+
// partial_bytes_acked accumulates; increment cwnd by MTU when it reaches cwnd.
|
|
829
|
+
this._partialBytesAcked += bytesAcked;
|
|
830
|
+
if (this._partialBytesAcked >= this._cwnd && this._pendingChunks.size > 0) {
|
|
831
|
+
this._partialBytesAcked -= this._cwnd;
|
|
832
|
+
this._cwnd += PMTU;
|
|
833
|
+
}
|
|
834
|
+
}
|
|
835
|
+
// Cap cwnd to soft ceiling
|
|
836
|
+
if (this._cwnd > MAX_CWND)
|
|
837
|
+
this._cwnd = MAX_CWND;
|
|
838
|
+
}
|
|
839
|
+
// Fast retransmit after 3 duplicate SACKs (RFC 4960 §7.2.4)
|
|
840
|
+
if (this._dupSackCount >= 3) {
|
|
841
|
+
this._dupSackCount = 0;
|
|
842
|
+
this._doFastRetransmit();
|
|
843
|
+
}
|
|
844
|
+
// Clear timer if nothing in-flight
|
|
845
|
+
if (this._pendingChunks.size === 0) {
|
|
846
|
+
this._clearRetransmitTimer();
|
|
847
|
+
// When peerRwnd=0 and there is still data queued, re-arm ZWP timer
|
|
848
|
+
// so we continue probing. Do NOT reset _zwpDelay – let backoff grow
|
|
849
|
+
// to give Flutter time to drain its receive buffer between probes.
|
|
850
|
+
if (this._peerRwnd === 0 && this._sendQueue.length > 0) {
|
|
851
|
+
this._retransmitTimer = setTimeout(() => {
|
|
852
|
+
this._retransmitTimer = undefined;
|
|
853
|
+
this._doRetransmit();
|
|
854
|
+
}, this._zwpDelay);
|
|
855
|
+
this._retransmitTimer.unref?.();
|
|
856
|
+
}
|
|
857
|
+
}
|
|
858
|
+
// Detect ZWP deadlock: peerRwnd still wrapped=0, nothing in-flight, nothing queued.
|
|
859
|
+
// Send a zero-byte DATA probe to elicit a SACK with updated a_rwnd without adding to Flutter's buffer.
|
|
860
|
+
if (this._peerRwnd === 0 && this._sendQueue.length === 0 &&
|
|
861
|
+
this._pendingChunks.size === 0 && this._flightSize === 0 && cumAdvanced) {
|
|
862
|
+
// Find any open channel to send the 0-byte probe on
|
|
863
|
+
const probeChannel = [...this._channels.values()].find(ch => ch.state === 'open');
|
|
864
|
+
if (probeChannel) {
|
|
865
|
+
// RFC 4960 §3.3.1: DATA chunks must have at least 1 byte; 0-byte DATA is rejected by usrsctp.
|
|
866
|
+
// Send a 1-byte BINARY probe to elicit a SACK with updated a_rwnd.
|
|
867
|
+
console.log(`[SCTP ${this._role}] ZWP deadlock: sending 1-byte probe on ch=${probeChannel.id}`);
|
|
868
|
+
this._sendData(probeChannel.id, probeChannel._nextSsn(), 57 /* Ppid.BINARY_EMPTY */, Buffer.from([0]), probeChannel.ordered, probeChannel);
|
|
869
|
+
}
|
|
870
|
+
}
|
|
871
|
+
// Try to send more queued fragments now that window may have opened
|
|
872
|
+
if (this._sendQueue.length > 0) {
|
|
873
|
+
this._pump();
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
/** Fast retransmit: resend earliest unacked chunk without waiting for RTO */
|
|
877
|
+
_doFastRetransmit() {
|
|
878
|
+
// RFC 4960 §7.2.4: ssthresh = max(flightSize/2, 4*PMTU); cwnd = ssthresh
|
|
879
|
+
this._ssthresh = Math.max(Math.floor(this._flightSize / 2), 4 * PMTU);
|
|
880
|
+
this._cwnd = this._ssthresh;
|
|
881
|
+
this._partialBytesAcked = 0;
|
|
882
|
+
// Find the lowest-TSN pending chunk and retransmit it
|
|
883
|
+
let lowestTsn = -1;
|
|
884
|
+
let lowestPending;
|
|
885
|
+
for (const [tsn, pending] of this._pendingChunks) {
|
|
886
|
+
if (!pending.abandoned && (lowestTsn === -1 || _tsnLE(tsn, lowestTsn))) {
|
|
887
|
+
lowestTsn = tsn;
|
|
888
|
+
lowestPending = pending;
|
|
889
|
+
}
|
|
890
|
+
}
|
|
891
|
+
if (lowestPending) {
|
|
892
|
+
lowestPending.retransmitCount++;
|
|
893
|
+
lowestPending.sentAt = Date.now();
|
|
894
|
+
this._sendChunks([lowestPending.chunk]);
|
|
895
|
+
}
|
|
896
|
+
}
|
|
897
|
+
/** RFC 6298 RTO update */
|
|
898
|
+
_updateRto(rttMs) {
|
|
899
|
+
if (this._srtt === undefined) {
|
|
900
|
+
this._srtt = rttMs;
|
|
901
|
+
this._rttvar = rttMs / 2;
|
|
902
|
+
}
|
|
903
|
+
else {
|
|
904
|
+
this._rttvar = (1 - BETA) * this._rttvar + BETA * Math.abs(this._srtt - rttMs);
|
|
905
|
+
this._srtt = (1 - ALPHA) * this._srtt + ALPHA * rttMs;
|
|
906
|
+
}
|
|
907
|
+
this._rto = Math.min(Math.max(Math.ceil(this._srtt + 4 * this._rttvar), MIN_RTO_MS), MAX_RTO_MS);
|
|
908
|
+
}
|
|
909
|
+
// ─── INIT / INIT-ACK / COOKIE-ECHO / COOKIE-ACK ─────────────────────────
|
|
910
|
+
_sendInit() {
|
|
911
|
+
const value = Buffer.allocUnsafe(16);
|
|
912
|
+
value.writeUInt32BE(this._localTag, 0);
|
|
913
|
+
value.writeUInt32BE(MAX_BUFFER, 4);
|
|
914
|
+
value.writeUInt16BE(1024, 8); // numOutStreams
|
|
915
|
+
value.writeUInt16BE(1024, 10); // numInStreams
|
|
916
|
+
value.writeUInt32BE(this._localTsn, 12);
|
|
917
|
+
const pkt = encodeSctpPacket({
|
|
918
|
+
header: { srcPort: this._localPort, dstPort: this._remotePort, verificationTag: 0, checksum: 0 },
|
|
919
|
+
chunks: [{ type: 1 /* ChunkType.INIT */, flags: 0, value }],
|
|
920
|
+
});
|
|
921
|
+
this._send(pkt);
|
|
922
|
+
}
|
|
923
|
+
_handleInit(chunk) {
|
|
924
|
+
console.log(`[SCTP server] _handleInit chunk.value.length=${chunk.value.length}`);
|
|
925
|
+
if (chunk.value.length < 16)
|
|
926
|
+
return;
|
|
927
|
+
if (this._state === 'new')
|
|
928
|
+
this._setState('connecting');
|
|
929
|
+
const remoteTag = chunk.value.readUInt32BE(0);
|
|
930
|
+
const remoteTsn = chunk.value.readUInt32BE(12);
|
|
931
|
+
this._remoteTag = remoteTag;
|
|
932
|
+
this._remoteCumulativeTsn = (remoteTsn - 1) >>> 0;
|
|
933
|
+
// Build cookie embedding both tags + remoteTsn
|
|
934
|
+
const cookie = Buffer.allocUnsafe(32);
|
|
935
|
+
crypto.randomBytes(20).copy(cookie, 12);
|
|
936
|
+
cookie.writeUInt32BE(remoteTag, 0);
|
|
937
|
+
cookie.writeUInt32BE(remoteTsn, 4);
|
|
938
|
+
cookie.writeUInt32BE(this._localTag, 8);
|
|
939
|
+
const ackValue = Buffer.allocUnsafe(16);
|
|
940
|
+
ackValue.writeUInt32BE(this._localTag, 0);
|
|
941
|
+
ackValue.writeUInt32BE(MAX_BUFFER, 4);
|
|
942
|
+
ackValue.writeUInt16BE(1024, 8);
|
|
943
|
+
ackValue.writeUInt16BE(1024, 10);
|
|
944
|
+
ackValue.writeUInt32BE(this._localTsn, 12);
|
|
945
|
+
const cookieParam = Buffer.allocUnsafe(4 + 32);
|
|
946
|
+
cookieParam.writeUInt16BE(0x0007, 0);
|
|
947
|
+
cookieParam.writeUInt16BE(4 + 32, 2);
|
|
948
|
+
cookie.copy(cookieParam, 4);
|
|
949
|
+
const pkt = encodeSctpPacket({
|
|
950
|
+
header: { srcPort: this._localPort, dstPort: this._remotePort, verificationTag: remoteTag, checksum: 0 },
|
|
951
|
+
chunks: [{ type: 2 /* ChunkType.INIT_ACK */, flags: 0, value: Buffer.concat([ackValue, cookieParam]) }],
|
|
952
|
+
});
|
|
953
|
+
this._send(pkt);
|
|
954
|
+
}
|
|
955
|
+
_handleInitAck(chunk) {
|
|
956
|
+
if (chunk.value.length < 16)
|
|
957
|
+
return;
|
|
958
|
+
const remoteTag = chunk.value.readUInt32BE(0);
|
|
959
|
+
const remoteTsn = chunk.value.readUInt32BE(12);
|
|
960
|
+
this._remoteTag = remoteTag;
|
|
961
|
+
this._remoteCumulativeTsn = (remoteTsn - 1) >>> 0;
|
|
962
|
+
// Find cookie parameter
|
|
963
|
+
let cookie;
|
|
964
|
+
let off = 16;
|
|
965
|
+
while (off + 4 <= chunk.value.length) {
|
|
966
|
+
const paramType = chunk.value.readUInt16BE(off);
|
|
967
|
+
const paramLen = chunk.value.readUInt16BE(off + 2);
|
|
968
|
+
if (paramLen < 4)
|
|
969
|
+
break;
|
|
970
|
+
if (paramType === 0x0007) {
|
|
971
|
+
cookie = Buffer.from(chunk.value.subarray(off + 4, off + paramLen));
|
|
972
|
+
}
|
|
973
|
+
off += Math.ceil(paramLen / 4) * 4;
|
|
974
|
+
}
|
|
975
|
+
if (!cookie)
|
|
976
|
+
return;
|
|
977
|
+
this._sendChunks([{ type: 10 /* ChunkType.COOKIE_ECHO */, flags: 0, value: cookie }]);
|
|
978
|
+
}
|
|
979
|
+
_handleCookieEcho(_chunk) {
|
|
980
|
+
console.log('[SCTP server] _handleCookieEcho → sending COOKIE_ACK');
|
|
981
|
+
// Accept cookie (in production: verify HMAC)
|
|
982
|
+
this._sendChunks([{ type: 11 /* ChunkType.COOKIE_ACK */, flags: 0, value: Buffer.alloc(0) }]);
|
|
983
|
+
this._onConnected();
|
|
984
|
+
}
|
|
985
|
+
_handleCookieAck() {
|
|
986
|
+
this._onConnected();
|
|
987
|
+
}
|
|
988
|
+
_onConnected() {
|
|
989
|
+
this._setState('connected');
|
|
990
|
+
const resolve = this._connectResolve;
|
|
991
|
+
this._connectResolve = undefined;
|
|
992
|
+
this._connectReject = undefined;
|
|
993
|
+
resolve?.();
|
|
994
|
+
}
|
|
995
|
+
// ─── DATA / SACK ─────────────────────────────────────────────────────────
|
|
996
|
+
_handleData(chunk) {
|
|
997
|
+
let payload;
|
|
998
|
+
try {
|
|
999
|
+
payload = decodeDataChunk(chunk);
|
|
1000
|
+
}
|
|
1001
|
+
catch {
|
|
1002
|
+
return;
|
|
1003
|
+
}
|
|
1004
|
+
const tsn = payload.tsn;
|
|
1005
|
+
// Duplicate detection
|
|
1006
|
+
if (_tsnLE(tsn, this._remoteCumulativeTsn)) {
|
|
1007
|
+
// Already received — send SACK to acknowledge
|
|
1008
|
+
this._scheduleSack();
|
|
1009
|
+
return;
|
|
1010
|
+
}
|
|
1011
|
+
// Record received TSN
|
|
1012
|
+
this._receivedTsns.add(tsn);
|
|
1013
|
+
// Advance cumulative TSN as far as possible
|
|
1014
|
+
let newCum = this._remoteCumulativeTsn;
|
|
1015
|
+
while (this._receivedTsns.has((newCum + 1) >>> 0)) {
|
|
1016
|
+
newCum = (newCum + 1) >>> 0;
|
|
1017
|
+
this._receivedTsns.delete(newCum);
|
|
1018
|
+
}
|
|
1019
|
+
this._remoteCumulativeTsn = newCum;
|
|
1020
|
+
this._scheduleSack();
|
|
1021
|
+
// Reassemble and deliver
|
|
1022
|
+
this._reassembleAndDeliver(payload);
|
|
1023
|
+
}
|
|
1024
|
+
_reassembleAndDeliver(payload) {
|
|
1025
|
+
const { tsn, streamId, ssn, ppid, userData, beginning, ending, unordered } = payload;
|
|
1026
|
+
// Single-fragment message (most common case)
|
|
1027
|
+
if (beginning && ending) {
|
|
1028
|
+
this._deliverToStream(streamId, ssn, ppid, userData, unordered);
|
|
1029
|
+
return;
|
|
1030
|
+
}
|
|
1031
|
+
// Multi-fragment: accumulate in reassembly buffer
|
|
1032
|
+
const key = `${streamId}:${ssn}`;
|
|
1033
|
+
let entry = this._reassembly.get(key);
|
|
1034
|
+
if (!entry) {
|
|
1035
|
+
entry = {
|
|
1036
|
+
streamId, ssn, ppid, unordered,
|
|
1037
|
+
fragments: new Map(),
|
|
1038
|
+
firstTsn: tsn,
|
|
1039
|
+
lastTsn: undefined,
|
|
1040
|
+
totalSize: 0,
|
|
1041
|
+
};
|
|
1042
|
+
this._reassembly.set(key, entry);
|
|
1043
|
+
}
|
|
1044
|
+
entry.fragments.set(tsn, userData);
|
|
1045
|
+
entry.totalSize += userData.length;
|
|
1046
|
+
if (ending)
|
|
1047
|
+
entry.lastTsn = tsn;
|
|
1048
|
+
// Check if we have all fragments (contiguous TSNs from first to last)
|
|
1049
|
+
if (entry.lastTsn === undefined)
|
|
1050
|
+
return;
|
|
1051
|
+
// Verify all TSNs in range are present
|
|
1052
|
+
let complete = true;
|
|
1053
|
+
let count = 0;
|
|
1054
|
+
for (let t = entry.firstTsn;; t = (t + 1) >>> 0) {
|
|
1055
|
+
if (!entry.fragments.has(t)) {
|
|
1056
|
+
complete = false;
|
|
1057
|
+
break;
|
|
1058
|
+
}
|
|
1059
|
+
if (t === entry.lastTsn)
|
|
1060
|
+
break;
|
|
1061
|
+
if (++count > 1_000_000) {
|
|
1062
|
+
complete = false;
|
|
1063
|
+
break;
|
|
1064
|
+
} // safety: 1M fragments max
|
|
1065
|
+
}
|
|
1066
|
+
if (!complete)
|
|
1067
|
+
return;
|
|
1068
|
+
// Reassemble in TSN order
|
|
1069
|
+
const parts = [];
|
|
1070
|
+
for (let t = entry.firstTsn;; t = (t + 1) >>> 0) {
|
|
1071
|
+
parts.push(entry.fragments.get(t));
|
|
1072
|
+
if (t === entry.lastTsn)
|
|
1073
|
+
break;
|
|
1074
|
+
}
|
|
1075
|
+
this._reassembly.delete(key);
|
|
1076
|
+
const data = Buffer.concat(parts);
|
|
1077
|
+
this._deliverToStream(streamId, ssn, ppid, data, unordered);
|
|
1078
|
+
}
|
|
1079
|
+
_deliverToStream(streamId, ssn, ppid, data, unordered) {
|
|
1080
|
+
// DCEP messages are dispatched immediately.
|
|
1081
|
+
// Per RFC 8832 §6.6, DCEP messages use SSN=0. We must advance the
|
|
1082
|
+
// per-stream receive SSN counter so that subsequent user messages
|
|
1083
|
+
// (which start at SSN=1 after the DCEP exchange) are delivered in order.
|
|
1084
|
+
if (ppid === 50 /* Ppid.DCEP */) {
|
|
1085
|
+
if (!unordered) {
|
|
1086
|
+
let rxState = this._streamReceive.get(streamId);
|
|
1087
|
+
if (!rxState) {
|
|
1088
|
+
rxState = { expectedSsn: 0, buffer: new Map() };
|
|
1089
|
+
this._streamReceive.set(streamId, rxState);
|
|
1090
|
+
}
|
|
1091
|
+
// Advance past SSN=0 used by DCEP_OPEN/ACK so we expect SSN=1 next
|
|
1092
|
+
if (ssn === rxState.expectedSsn) {
|
|
1093
|
+
rxState.expectedSsn = (rxState.expectedSsn + 1) & 0xffff;
|
|
1094
|
+
}
|
|
1095
|
+
}
|
|
1096
|
+
this._handleDcep(streamId, data);
|
|
1097
|
+
return;
|
|
1098
|
+
}
|
|
1099
|
+
const channel = this._channels.get(streamId);
|
|
1100
|
+
if (!channel) {
|
|
1101
|
+
return;
|
|
1102
|
+
}
|
|
1103
|
+
if (unordered) {
|
|
1104
|
+
channel._deliver(ppid, data);
|
|
1105
|
+
return;
|
|
1106
|
+
}
|
|
1107
|
+
// Ordered: enforce SSN ordering
|
|
1108
|
+
let rxState = this._streamReceive.get(streamId);
|
|
1109
|
+
if (!rxState) {
|
|
1110
|
+
rxState = { expectedSsn: 0, buffer: new Map() };
|
|
1111
|
+
this._streamReceive.set(streamId, rxState);
|
|
1112
|
+
}
|
|
1113
|
+
if (ssn === rxState.expectedSsn) {
|
|
1114
|
+
rxState.expectedSsn = (rxState.expectedSsn + 1) & 0xffff;
|
|
1115
|
+
channel._deliver(ppid, data);
|
|
1116
|
+
// Deliver any buffered messages that can now be delivered in order
|
|
1117
|
+
let next = rxState.expectedSsn;
|
|
1118
|
+
while (rxState.buffer.has(next)) {
|
|
1119
|
+
const buffered = rxState.buffer.get(next);
|
|
1120
|
+
rxState.buffer.delete(next);
|
|
1121
|
+
rxState.expectedSsn = (next + 1) & 0xffff;
|
|
1122
|
+
channel._deliver(buffered.ppid, buffered.data);
|
|
1123
|
+
next = rxState.expectedSsn;
|
|
1124
|
+
}
|
|
1125
|
+
}
|
|
1126
|
+
else if (_ssnGT(ssn, rxState.expectedSsn)) {
|
|
1127
|
+
// Buffer out-of-order message
|
|
1128
|
+
rxState.buffer.set(ssn, { ppid, data });
|
|
1129
|
+
}
|
|
1130
|
+
// else: old SSN (duplicate) — discard
|
|
1131
|
+
}
|
|
1132
|
+
_scheduleSack() {
|
|
1133
|
+
this._dataPacketsSinceAck++;
|
|
1134
|
+
// RFC 4960 §6.2: A receiver SHOULD send a SACK for every second
|
|
1135
|
+
// DATA packet received. pion implements this as: if ackState was already
|
|
1136
|
+
// "delay" (i.e. we already deferred once), send immediately.
|
|
1137
|
+
// We mirror that: on odd packets start the delayed timer; on even packets
|
|
1138
|
+
// (or when the timer already fired once) send immediately.
|
|
1139
|
+
if (this._dataPacketsSinceAck >= 2) {
|
|
1140
|
+
// Every-other-packet: send SACK immediately
|
|
1141
|
+
if (this._sackTimer) {
|
|
1142
|
+
clearTimeout(this._sackTimer);
|
|
1143
|
+
this._sackTimer = undefined;
|
|
1144
|
+
}
|
|
1145
|
+
this._sendSack();
|
|
1146
|
+
return;
|
|
1147
|
+
}
|
|
1148
|
+
// First packet since last SACK: arm the delayed timer
|
|
1149
|
+
if (this._sackTimer)
|
|
1150
|
+
return;
|
|
1151
|
+
this._sackTimer = setTimeout(() => {
|
|
1152
|
+
this._sackTimer = undefined;
|
|
1153
|
+
this._sendSack();
|
|
1154
|
+
}, SACK_DELAY_MS);
|
|
1155
|
+
this._sackTimer.unref?.();
|
|
1156
|
+
}
|
|
1157
|
+
_sendSack() {
|
|
1158
|
+
this._dataPacketsSinceAck = 0; // reset every-other-packet counter
|
|
1159
|
+
// Build gap ACK blocks from _receivedTsns
|
|
1160
|
+
const gaps = this._buildGapBlocks();
|
|
1161
|
+
const baseLen = 12;
|
|
1162
|
+
const value = Buffer.allocUnsafe(baseLen + gaps.length * 4);
|
|
1163
|
+
value.writeUInt32BE(this._remoteCumulativeTsn, 0);
|
|
1164
|
+
value.writeUInt32BE(MAX_BUFFER, 4); // advertise our full receive window
|
|
1165
|
+
value.writeUInt16BE(gaps.length, 8);
|
|
1166
|
+
value.writeUInt16BE(0, 10); // no dup TSNs
|
|
1167
|
+
let off = 12;
|
|
1168
|
+
for (const [start, end] of gaps) {
|
|
1169
|
+
value.writeUInt16BE(start, off);
|
|
1170
|
+
value.writeUInt16BE(end, off + 2);
|
|
1171
|
+
off += 4;
|
|
1172
|
+
}
|
|
1173
|
+
this._sendChunks([{ type: 3 /* ChunkType.SACK */, flags: 0, value }]);
|
|
1174
|
+
}
|
|
1175
|
+
_buildGapBlocks() {
|
|
1176
|
+
if (this._receivedTsns.size === 0)
|
|
1177
|
+
return [];
|
|
1178
|
+
const sorted = [...this._receivedTsns].map(tsn => {
|
|
1179
|
+
return _tsnDiff(tsn, this._remoteCumulativeTsn);
|
|
1180
|
+
}).filter(d => d > 0).sort((a, b) => a - b);
|
|
1181
|
+
const blocks = [];
|
|
1182
|
+
let blockStart = -1;
|
|
1183
|
+
let blockEnd = -1;
|
|
1184
|
+
for (const offset of sorted) {
|
|
1185
|
+
if (blockStart === -1) {
|
|
1186
|
+
blockStart = offset;
|
|
1187
|
+
blockEnd = offset;
|
|
1188
|
+
}
|
|
1189
|
+
else if (offset === blockEnd + 1) {
|
|
1190
|
+
blockEnd = offset;
|
|
1191
|
+
}
|
|
1192
|
+
else {
|
|
1193
|
+
blocks.push([blockStart, blockEnd]);
|
|
1194
|
+
blockStart = offset;
|
|
1195
|
+
blockEnd = offset;
|
|
1196
|
+
}
|
|
1197
|
+
}
|
|
1198
|
+
if (blockStart !== -1)
|
|
1199
|
+
blocks.push([blockStart, blockEnd]);
|
|
1200
|
+
return blocks;
|
|
1201
|
+
}
|
|
1202
|
+
// ─── FORWARD-TSN (RFC 3758) ───────────────────────────────────────────────
|
|
1203
|
+
_sendForwardTsn() {
|
|
1204
|
+
const newCumTsn = (this._localTsn - 1) >>> 0;
|
|
1205
|
+
const value = Buffer.allocUnsafe(4);
|
|
1206
|
+
value.writeUInt32BE(newCumTsn, 0);
|
|
1207
|
+
this._sendChunks([{ type: 192 /* ChunkType.FORWARD_TSN */, flags: 0, value }]);
|
|
1208
|
+
}
|
|
1209
|
+
_handleForwardTsn(chunk) {
|
|
1210
|
+
if (chunk.value.length < 4)
|
|
1211
|
+
return;
|
|
1212
|
+
const newCumTsn = chunk.value.readUInt32BE(0);
|
|
1213
|
+
if (_tsnGT(newCumTsn, this._remoteCumulativeTsn)) {
|
|
1214
|
+
for (let t = (this._remoteCumulativeTsn + 1) >>> 0;; t = (t + 1) >>> 0) {
|
|
1215
|
+
this._receivedTsns.delete(t);
|
|
1216
|
+
if (t === newCumTsn)
|
|
1217
|
+
break;
|
|
1218
|
+
if (_tsnGT(t, newCumTsn))
|
|
1219
|
+
break; // safety
|
|
1220
|
+
}
|
|
1221
|
+
this._remoteCumulativeTsn = newCumTsn;
|
|
1222
|
+
}
|
|
1223
|
+
this._scheduleSack();
|
|
1224
|
+
}
|
|
1225
|
+
// ─── DCEP ─────────────────────────────────────────────────────────────────
|
|
1226
|
+
_handleDcep(streamId, data) {
|
|
1227
|
+
let msg;
|
|
1228
|
+
console.log(`[SCTP ${this._role}] _handleDcep streamId=${streamId} data[0]=0x${data[0]?.toString(16)}`);
|
|
1229
|
+
try {
|
|
1230
|
+
msg = decodeDcep(data);
|
|
1231
|
+
}
|
|
1232
|
+
catch {
|
|
1233
|
+
return;
|
|
1234
|
+
}
|
|
1235
|
+
if (msg.type === 3 /* DcepType.DATA_CHANNEL_OPEN */) {
|
|
1236
|
+
this._handleDcepOpen(streamId, msg);
|
|
1237
|
+
}
|
|
1238
|
+
else if (msg.type === 2 /* DcepType.DATA_CHANNEL_ACK */) {
|
|
1239
|
+
this._handleDcepAck(streamId);
|
|
1240
|
+
}
|
|
1241
|
+
}
|
|
1242
|
+
_handleDcepOpen(streamId, msg) {
|
|
1243
|
+
const ordered = msg.channelType === 0 /* DcepChannelType.RELIABLE */ ||
|
|
1244
|
+
msg.channelType === 1 /* DcepChannelType.PARTIAL_RELIABLE_REXMIT */ ||
|
|
1245
|
+
msg.channelType === 2 /* DcepChannelType.PARTIAL_RELIABLE_TIMED */;
|
|
1246
|
+
const info = {
|
|
1247
|
+
id: streamId,
|
|
1248
|
+
label: msg.label,
|
|
1249
|
+
protocol: msg.protocol,
|
|
1250
|
+
ordered,
|
|
1251
|
+
maxPacketLifeTime: msg.channelType === 2 /* DcepChannelType.PARTIAL_RELIABLE_TIMED */
|
|
1252
|
+
? msg.reliabilityParam : undefined,
|
|
1253
|
+
maxRetransmits: msg.channelType === 1 /* DcepChannelType.PARTIAL_RELIABLE_REXMIT */
|
|
1254
|
+
? msg.reliabilityParam : undefined,
|
|
1255
|
+
state: 'open',
|
|
1256
|
+
negotiated: false,
|
|
1257
|
+
};
|
|
1258
|
+
const channel = new SctpDataChannel(this, info);
|
|
1259
|
+
this._channels.set(streamId, channel);
|
|
1260
|
+
this._sendDcepAck(streamId);
|
|
1261
|
+
channel._open();
|
|
1262
|
+
this.emit('datachannel', channel);
|
|
1263
|
+
}
|
|
1264
|
+
_handleDcepAck(streamId) {
|
|
1265
|
+
const channel = this._channels.get(streamId);
|
|
1266
|
+
console.log(`[SCTP ${this._role}] DCEP_ACK on streamId=${streamId} channel=${channel?.label ?? 'NOT FOUND'}`);
|
|
1267
|
+
if (channel)
|
|
1268
|
+
channel._open();
|
|
1269
|
+
}
|
|
1270
|
+
_sendDcepOpen(id, info) {
|
|
1271
|
+
let channelType = 0 /* DcepChannelType.RELIABLE */;
|
|
1272
|
+
let reliabilityParam = 0;
|
|
1273
|
+
if (!info.ordered) {
|
|
1274
|
+
if (info.maxRetransmits !== undefined) {
|
|
1275
|
+
channelType = 129 /* DcepChannelType.PARTIAL_RELIABLE_REXMIT_UNORDERED */;
|
|
1276
|
+
reliabilityParam = info.maxRetransmits;
|
|
1277
|
+
}
|
|
1278
|
+
else if (info.maxPacketLifeTime !== undefined) {
|
|
1279
|
+
channelType = 130 /* DcepChannelType.PARTIAL_RELIABLE_TIMED_UNORDERED */;
|
|
1280
|
+
reliabilityParam = info.maxPacketLifeTime;
|
|
1281
|
+
}
|
|
1282
|
+
else {
|
|
1283
|
+
channelType = 128 /* DcepChannelType.RELIABLE_UNORDERED */;
|
|
1284
|
+
}
|
|
1285
|
+
}
|
|
1286
|
+
else {
|
|
1287
|
+
if (info.maxRetransmits !== undefined) {
|
|
1288
|
+
channelType = 1 /* DcepChannelType.PARTIAL_RELIABLE_REXMIT */;
|
|
1289
|
+
reliabilityParam = info.maxRetransmits;
|
|
1290
|
+
}
|
|
1291
|
+
else if (info.maxPacketLifeTime !== undefined) {
|
|
1292
|
+
channelType = 2 /* DcepChannelType.PARTIAL_RELIABLE_TIMED */;
|
|
1293
|
+
reliabilityParam = info.maxPacketLifeTime;
|
|
1294
|
+
}
|
|
1295
|
+
}
|
|
1296
|
+
const dcepBuf = encodeDcepOpen({
|
|
1297
|
+
type: 3 /* DcepType.DATA_CHANNEL_OPEN */,
|
|
1298
|
+
channelType, priority: 0, reliabilityParam,
|
|
1299
|
+
label: info.label, protocol: info.protocol,
|
|
1300
|
+
});
|
|
1301
|
+
const tsn = this._localTsn;
|
|
1302
|
+
this._localTsn = (this._localTsn + 1) >>> 0;
|
|
1303
|
+
console.log(`[SCTP ${this._role}] sending DCEP_OPEN streamId=${id} label="${info.label}" channelType=${channelType}`);
|
|
1304
|
+
const dataChunk = encodeDataChunk({
|
|
1305
|
+
tsn, streamId: id, ssn: 0, ppid: 50 /* Ppid.DCEP */,
|
|
1306
|
+
userData: dcepBuf, beginning: true, ending: true, unordered: false,
|
|
1307
|
+
});
|
|
1308
|
+
this._sendChunks([dataChunk]);
|
|
1309
|
+
}
|
|
1310
|
+
_sendDcepAck(streamId) {
|
|
1311
|
+
const tsn = this._localTsn;
|
|
1312
|
+
this._localTsn = (this._localTsn + 1) >>> 0;
|
|
1313
|
+
const dataChunk = encodeDataChunk({
|
|
1314
|
+
tsn, streamId, ssn: 0, ppid: 50 /* Ppid.DCEP */,
|
|
1315
|
+
userData: encodeDcepAck(), beginning: true, ending: true, unordered: false,
|
|
1316
|
+
});
|
|
1317
|
+
this._sendChunks([dataChunk]);
|
|
1318
|
+
}
|
|
1319
|
+
// ─── Chunk dispatcher ────────────────────────────────────────────────────
|
|
1320
|
+
_handleChunk(chunk, incomingVerTag) {
|
|
1321
|
+
switch (chunk.type) {
|
|
1322
|
+
case 1 /* ChunkType.INIT */:
|
|
1323
|
+
if (this._role === 'server')
|
|
1324
|
+
this._handleInit(chunk);
|
|
1325
|
+
break;
|
|
1326
|
+
case 2 /* ChunkType.INIT_ACK */:
|
|
1327
|
+
if (this._role === 'client' && incomingVerTag === this._localTag) {
|
|
1328
|
+
this._handleInitAck(chunk);
|
|
1329
|
+
}
|
|
1330
|
+
break;
|
|
1331
|
+
case 10 /* ChunkType.COOKIE_ECHO */:
|
|
1332
|
+
if (this._role === 'server')
|
|
1333
|
+
this._handleCookieEcho(chunk);
|
|
1334
|
+
break;
|
|
1335
|
+
case 11 /* ChunkType.COOKIE_ACK */:
|
|
1336
|
+
if (this._role === 'client')
|
|
1337
|
+
this._handleCookieAck();
|
|
1338
|
+
break;
|
|
1339
|
+
case 0 /* ChunkType.DATA */:
|
|
1340
|
+
if (this._state === 'connected')
|
|
1341
|
+
this._handleData(chunk);
|
|
1342
|
+
break;
|
|
1343
|
+
case 3 /* ChunkType.SACK */:
|
|
1344
|
+
if (this._state === 'connected')
|
|
1345
|
+
this._processSack(chunk.value);
|
|
1346
|
+
break;
|
|
1347
|
+
case 192 /* ChunkType.FORWARD_TSN */:
|
|
1348
|
+
this._handleForwardTsn(chunk);
|
|
1349
|
+
break;
|
|
1350
|
+
case 4 /* ChunkType.HEARTBEAT */:
|
|
1351
|
+
this._sendChunks([{ type: 5 /* ChunkType.HEARTBEAT_ACK */, flags: 0, value: Buffer.from(chunk.value) }]);
|
|
1352
|
+
break;
|
|
1353
|
+
case 7 /* ChunkType.SHUTDOWN */:
|
|
1354
|
+
this._sendChunks([{ type: 8 /* ChunkType.SHUTDOWN_ACK */, flags: 0, value: Buffer.alloc(0) }]);
|
|
1355
|
+
this._setState('closed');
|
|
1356
|
+
for (const ch of this._channels.values())
|
|
1357
|
+
ch._close();
|
|
1358
|
+
this._channels.clear();
|
|
1359
|
+
break;
|
|
1360
|
+
case 8 /* ChunkType.SHUTDOWN_ACK */:
|
|
1361
|
+
this._sendChunks([{ type: 14 /* ChunkType.SHUTDOWN_COMPLETE */, flags: 0, value: Buffer.alloc(0) }]);
|
|
1362
|
+
this._setState('closed');
|
|
1363
|
+
break;
|
|
1364
|
+
case 6 /* ChunkType.ABORT */:
|
|
1365
|
+
console.log(`[SCTP ${this._role}] ABORT received! value.length=${chunk.value.length} hex=${chunk.value.slice(0, 16).toString('hex')} channels=${this._channels.size} state=${this._state}`);
|
|
1366
|
+
this._setState('failed');
|
|
1367
|
+
for (const ch of this._channels.values())
|
|
1368
|
+
ch._close();
|
|
1369
|
+
this._channels.clear();
|
|
1370
|
+
break;
|
|
1371
|
+
default:
|
|
1372
|
+
break;
|
|
1373
|
+
}
|
|
1374
|
+
}
|
|
1375
|
+
}
|
|
1376
|
+
// ─── TSN arithmetic helpers (RFC 4960 §3.3.3 wrapping comparison) ──────────
|
|
1377
|
+
/** Returns true if a ≤ b in TSN space (32-bit wrapping) */
|
|
1378
|
+
function _tsnLE(a, b) {
|
|
1379
|
+
return ((b - a) >>> 0) < 0x80000000;
|
|
1380
|
+
}
|
|
1381
|
+
/** Returns true if a > b in TSN space */
|
|
1382
|
+
function _tsnGT(a, b) {
|
|
1383
|
+
return a !== b && _tsnLE(b, a);
|
|
1384
|
+
}
|
|
1385
|
+
/** Signed distance: how many steps is b ahead of a (in TSN space) */
|
|
1386
|
+
function _tsnDiff(b, a) {
|
|
1387
|
+
const diff = (b - a) >>> 0;
|
|
1388
|
+
return diff < 0x80000000 ? diff : -(0x100000000 - diff);
|
|
1389
|
+
}
|
|
1390
|
+
/** Returns true if a > b in SSN space (16-bit wrapping) */
|
|
1391
|
+
function _ssnGT(a, b) {
|
|
1392
|
+
return ((a - b) & 0xffff) < 0x8000 && a !== b;
|
|
1393
|
+
}
|
|
1394
|
+
//# sourceMappingURL=association.js.map
|