aedes 0.51.2 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/actions/sticky-pr-comment/action.yml +55 -0
- package/.github/workflows/benchmark-compare-serial.yml +60 -0
- package/.github/workflows/ci.yml +12 -17
- package/.release-it.json +18 -0
- package/.taprc +15 -6
- package/README.md +6 -4
- package/aedes.d.ts +0 -6
- package/aedes.js +270 -238
- package/benchmarks/README.md +33 -0
- package/benchmarks/pingpong.js +94 -25
- package/benchmarks/receiver.js +77 -0
- package/benchmarks/report.js +150 -0
- package/benchmarks/runBenchmarks.js +118 -0
- package/benchmarks/sender.js +86 -0
- package/benchmarks/server.js +19 -18
- package/checkVersion.js +20 -0
- package/docs/Aedes.md +66 -8
- package/docs/Client.md +3 -4
- package/docs/Examples.md +39 -22
- package/docs/MIGRATION.md +50 -0
- package/eslint.config.js +8 -0
- package/example.js +51 -40
- package/examples/clusters/index.js +28 -23
- package/examples/clusters/package.json +10 -6
- package/lib/client.js +405 -306
- package/lib/handlers/connect.js +42 -38
- package/lib/handlers/index.js +9 -11
- package/lib/handlers/ping.js +2 -3
- package/lib/handlers/puback.js +5 -5
- package/lib/handlers/publish.js +29 -14
- package/lib/handlers/pubrec.js +9 -17
- package/lib/handlers/pubrel.js +34 -25
- package/lib/handlers/subscribe.js +54 -43
- package/lib/handlers/unsubscribe.js +16 -19
- package/lib/qos-packet.js +14 -17
- package/lib/utils.js +5 -12
- package/lib/write.js +4 -5
- package/package.json +134 -136
- package/test/auth.js +468 -804
- package/test/basic.js +613 -575
- package/test/bridge.js +44 -40
- package/test/client-pub-sub.js +531 -504
- package/test/close_socket_by_other_party.js +137 -102
- package/test/connect.js +487 -484
- package/test/drain-timeout.js +593 -0
- package/test/drain-toxiproxy.js +620 -0
- package/test/events.js +174 -144
- package/test/helper.js +351 -73
- package/test/keep-alive.js +40 -67
- package/test/meta.js +257 -210
- package/test/not-blocking.js +93 -197
- package/test/qos1.js +464 -554
- package/test/qos2.js +308 -393
- package/test/regr-21.js +39 -21
- package/test/require.cjs +22 -0
- package/test/retain.js +349 -398
- package/test/topics.js +176 -183
- package/test/types/aedes.test-d.ts +4 -8
- package/test/will.js +310 -428
- package/types/instance.d.ts +40 -35
- package/types/packet.d.ts +10 -10
- package/.coveralls.yml +0 -1
- package/benchmarks/bombing.js +0 -34
- package/benchmarks/bombingQoS1.js +0 -36
- package/benchmarks/throughputCounter.js +0 -23
- package/benchmarks/throughputCounterQoS1.js +0 -33
- package/types/.eslintrc.json +0 -47
|
@@ -0,0 +1,593 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Drain Timeout Tests
|
|
3
|
+
*
|
|
4
|
+
* Tests for the drainTimeout feature that protects against slow/frozen clients
|
|
5
|
+
* blocking message delivery to all other subscribers.
|
|
6
|
+
*
|
|
7
|
+
* Includes:
|
|
8
|
+
* - Unit tests with mocked streams (reliable, deterministic)
|
|
9
|
+
* - E2E tests with real TCP using readStop() (proves the fix works)
|
|
10
|
+
*
|
|
11
|
+
* Run: node --test test/drain-timeout.js
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { test } from 'node:test'
|
|
15
|
+
import { createServer } from 'node:net'
|
|
16
|
+
import { setTimeout as delay } from 'node:timers/promises'
|
|
17
|
+
import mqtt from 'mqtt'
|
|
18
|
+
import { Aedes } from '../aedes.js'
|
|
19
|
+
import { shouldSkipOnWindowsAndMac } from './helper.js'
|
|
20
|
+
|
|
21
|
+
// Check if we should skip tests on Windows/macOS (readStop not supported)
|
|
22
|
+
const shouldSkip = shouldSkipOnWindowsAndMac()
|
|
23
|
+
|
|
24
|
+
const { duplexPair } = await import('node:stream')
|
|
25
|
+
const { default: mqttPacket } = await import('mqtt-packet')
|
|
26
|
+
|
|
27
|
+
// ============================================================================
|
|
28
|
+
// UNIT TESTS - Mocked Streams
|
|
29
|
+
// ============================================================================
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Helper: Create a mocked MQTT client connection
|
|
33
|
+
*/
|
|
34
|
+
function createMockedClient (broker, clientId) {
|
|
35
|
+
const [clientSide, serverSide] = duplexPair()
|
|
36
|
+
|
|
37
|
+
// Allow controlling write behavior
|
|
38
|
+
let blockWrites = false
|
|
39
|
+
const originalWrite = serverSide.write.bind(serverSide)
|
|
40
|
+
|
|
41
|
+
serverSide.write = function (chunk, encoding, callback) {
|
|
42
|
+
if (blockWrites) {
|
|
43
|
+
// Simulate full buffer - return false and never emit drain
|
|
44
|
+
return false
|
|
45
|
+
}
|
|
46
|
+
return originalWrite(chunk, encoding, callback)
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
// Track received packets
|
|
50
|
+
const parser = mqttPacket.parser()
|
|
51
|
+
const receivedPackets = []
|
|
52
|
+
parser.on('packet', (packet) => receivedPackets.push(packet))
|
|
53
|
+
clientSide.on('data', (chunk) => parser.parse(chunk))
|
|
54
|
+
|
|
55
|
+
// Track connection state
|
|
56
|
+
let destroyed = false
|
|
57
|
+
serverSide.on('close', () => { destroyed = true })
|
|
58
|
+
serverSide.on('error', () => { destroyed = true })
|
|
59
|
+
|
|
60
|
+
broker.handle(serverSide)
|
|
61
|
+
|
|
62
|
+
// Send CONNECT
|
|
63
|
+
clientSide.write(mqttPacket.generate({
|
|
64
|
+
cmd: 'connect',
|
|
65
|
+
protocolId: 'MQTT',
|
|
66
|
+
protocolVersion: 4,
|
|
67
|
+
clean: true,
|
|
68
|
+
clientId,
|
|
69
|
+
keepalive: 0
|
|
70
|
+
}))
|
|
71
|
+
|
|
72
|
+
return {
|
|
73
|
+
clientSide,
|
|
74
|
+
serverSide,
|
|
75
|
+
receivedPackets,
|
|
76
|
+
setBlocked: (blocked) => { blockWrites = blocked },
|
|
77
|
+
isDestroyed: () => destroyed,
|
|
78
|
+
subscribe: (topic) => {
|
|
79
|
+
clientSide.write(mqttPacket.generate({
|
|
80
|
+
cmd: 'subscribe',
|
|
81
|
+
messageId: 1,
|
|
82
|
+
subscriptions: [{ topic, qos: 0 }]
|
|
83
|
+
}))
|
|
84
|
+
},
|
|
85
|
+
destroy: () => {
|
|
86
|
+
clientSide.destroy()
|
|
87
|
+
serverSide.destroy()
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
test('UNIT: write.js blocks indefinitely when stream.write returns false', { skip: shouldSkip }, async (t) => {
|
|
93
|
+
// This test verifies the bug behavior WITHOUT the fix.
|
|
94
|
+
// When stream.write() returns false and drain never fires,
|
|
95
|
+
// the broker blocks indefinitely waiting for drain.
|
|
96
|
+
|
|
97
|
+
const broker = await Aedes.createBroker({ drainTimeout: 0 }) // No timeout = buggy behavior
|
|
98
|
+
t.after(() => broker.close())
|
|
99
|
+
|
|
100
|
+
const client = createMockedClient(broker, 'blocked-client')
|
|
101
|
+
await delay(50)
|
|
102
|
+
client.subscribe('test/#')
|
|
103
|
+
await delay(50)
|
|
104
|
+
|
|
105
|
+
// Block writes to simulate full buffer
|
|
106
|
+
client.setBlocked(true)
|
|
107
|
+
|
|
108
|
+
// Publish - should block forever
|
|
109
|
+
let publishDone = false
|
|
110
|
+
const publishPromise = new Promise((resolve) => {
|
|
111
|
+
broker.publish(
|
|
112
|
+
{ topic: 'test/blocked', payload: Buffer.from('test') },
|
|
113
|
+
() => {
|
|
114
|
+
publishDone = true
|
|
115
|
+
resolve('completed')
|
|
116
|
+
}
|
|
117
|
+
)
|
|
118
|
+
})
|
|
119
|
+
|
|
120
|
+
const result = await Promise.race([publishPromise, delay(2000, 'timeout')])
|
|
121
|
+
|
|
122
|
+
client.destroy()
|
|
123
|
+
|
|
124
|
+
// STRICT ASSERTION: Must timeout to prove the bug
|
|
125
|
+
t.assert.strictEqual(result, 'timeout', 'broker must block when write returns false')
|
|
126
|
+
t.assert.strictEqual(publishDone, false, 'publish callback must not fire')
|
|
127
|
+
})
|
|
128
|
+
|
|
129
|
+
test('UNIT: drainTimeout disconnects slow client instead of blocking', { skip: shouldSkip }, async (t) => {
|
|
130
|
+
// This test verifies that with drainTimeout enabled,
|
|
131
|
+
// the broker disconnects slow clients instead of waiting forever.
|
|
132
|
+
|
|
133
|
+
const DRAIN_TIMEOUT_MS = 300
|
|
134
|
+
const broker = await Aedes.createBroker({ drainTimeout: DRAIN_TIMEOUT_MS })
|
|
135
|
+
t.after(() => broker.close())
|
|
136
|
+
|
|
137
|
+
const client = createMockedClient(broker, 'drain-fix-test')
|
|
138
|
+
await delay(50)
|
|
139
|
+
client.subscribe('fix/test')
|
|
140
|
+
await delay(50)
|
|
141
|
+
|
|
142
|
+
// Verify handshake completed
|
|
143
|
+
const connack = client.receivedPackets.find(p => p.cmd === 'connack')
|
|
144
|
+
const suback = client.receivedPackets.find(p => p.cmd === 'suback')
|
|
145
|
+
t.assert.ok(connack, 'should receive CONNACK')
|
|
146
|
+
t.assert.ok(suback, 'should receive SUBACK')
|
|
147
|
+
|
|
148
|
+
// Block writes to simulate backpressure
|
|
149
|
+
client.setBlocked(true)
|
|
150
|
+
|
|
151
|
+
// Publish - should complete after drainTimeout kicks the client
|
|
152
|
+
let publishDone = false
|
|
153
|
+
const publishPromise = new Promise((resolve) => {
|
|
154
|
+
broker.publish(
|
|
155
|
+
{ topic: 'fix/test', payload: Buffer.alloc(1024, 'X') },
|
|
156
|
+
() => {
|
|
157
|
+
publishDone = true
|
|
158
|
+
resolve('published')
|
|
159
|
+
}
|
|
160
|
+
)
|
|
161
|
+
})
|
|
162
|
+
|
|
163
|
+
const result = await Promise.race([
|
|
164
|
+
publishPromise,
|
|
165
|
+
delay(DRAIN_TIMEOUT_MS + 500, 'timeout')
|
|
166
|
+
])
|
|
167
|
+
|
|
168
|
+
client.destroy()
|
|
169
|
+
|
|
170
|
+
// STRICT ASSERTIONS: Fix must work
|
|
171
|
+
t.assert.strictEqual(result, 'published', 'publish must complete after drain timeout')
|
|
172
|
+
t.assert.strictEqual(publishDone, true, 'publish callback must fire')
|
|
173
|
+
t.assert.ok(client.isDestroyed(), 'slow client connection must be destroyed')
|
|
174
|
+
})
|
|
175
|
+
|
|
176
|
+
test('UNIT: single slow client with concurrency 1 causes deadlock', { skip: shouldSkip }, async (t) => {
|
|
177
|
+
// With concurrency: 1, one blocked client = complete deadlock
|
|
178
|
+
// This demonstrates why the fix is critical
|
|
179
|
+
|
|
180
|
+
const broker = await Aedes.createBroker({ concurrency: 1, drainTimeout: 0 })
|
|
181
|
+
t.after(() => broker.close())
|
|
182
|
+
|
|
183
|
+
const fastClient = createMockedClient(broker, 'fast-1')
|
|
184
|
+
const slowClient = createMockedClient(broker, 'slow-1')
|
|
185
|
+
await delay(50)
|
|
186
|
+
|
|
187
|
+
fastClient.subscribe('deadlock/#')
|
|
188
|
+
slowClient.subscribe('deadlock/#')
|
|
189
|
+
await delay(50)
|
|
190
|
+
|
|
191
|
+
// Block slow client
|
|
192
|
+
slowClient.setBlocked(true)
|
|
193
|
+
|
|
194
|
+
// Publish multiple messages
|
|
195
|
+
let publishCount = 0
|
|
196
|
+
const NUM_MESSAGES = 5
|
|
197
|
+
|
|
198
|
+
const publishPromise = (async () => {
|
|
199
|
+
for (let i = 0; i < NUM_MESSAGES; i++) {
|
|
200
|
+
await new Promise((resolve) => {
|
|
201
|
+
broker.publish(
|
|
202
|
+
{ topic: 'deadlock/test', payload: Buffer.from(`msg-${i}`) },
|
|
203
|
+
() => {
|
|
204
|
+
publishCount++
|
|
205
|
+
resolve()
|
|
206
|
+
}
|
|
207
|
+
)
|
|
208
|
+
})
|
|
209
|
+
}
|
|
210
|
+
return 'done'
|
|
211
|
+
})()
|
|
212
|
+
|
|
213
|
+
const result = await Promise.race([publishPromise, delay(3000, 'DEADLOCK')])
|
|
214
|
+
|
|
215
|
+
// Count messages fast client received
|
|
216
|
+
const fastReceived = fastClient.receivedPackets.filter(p => p.cmd === 'publish').length
|
|
217
|
+
|
|
218
|
+
fastClient.destroy()
|
|
219
|
+
slowClient.destroy()
|
|
220
|
+
|
|
221
|
+
// STRICT ASSERTIONS: Must deadlock
|
|
222
|
+
t.assert.strictEqual(result, 'DEADLOCK', 'must deadlock with concurrency 1 and blocked client')
|
|
223
|
+
t.assert.ok(publishCount < NUM_MESSAGES, `only ${publishCount}/${NUM_MESSAGES} publishes should complete`)
|
|
224
|
+
t.assert.ok(fastReceived < NUM_MESSAGES, `fast client received ${fastReceived}/${NUM_MESSAGES} - blocked by slow client`)
|
|
225
|
+
})
|
|
226
|
+
|
|
227
|
+
test('UNIT: drainTimeout allows system to recover from deadlock', { skip: shouldSkip }, async (t) => {
|
|
228
|
+
// With drainTimeout, the slow client gets disconnected and the system recovers
|
|
229
|
+
|
|
230
|
+
const DRAIN_TIMEOUT = 200
|
|
231
|
+
const broker = await Aedes.createBroker({ concurrency: 1, drainTimeout: DRAIN_TIMEOUT })
|
|
232
|
+
t.after(() => broker.close())
|
|
233
|
+
|
|
234
|
+
const fastClient = createMockedClient(broker, 'recovery-fast')
|
|
235
|
+
const slowClient = createMockedClient(broker, 'recovery-slow')
|
|
236
|
+
await delay(50)
|
|
237
|
+
|
|
238
|
+
fastClient.subscribe('recovery/#')
|
|
239
|
+
slowClient.subscribe('recovery/#')
|
|
240
|
+
await delay(50)
|
|
241
|
+
|
|
242
|
+
// Block slow client
|
|
243
|
+
slowClient.setBlocked(true)
|
|
244
|
+
|
|
245
|
+
// Publish messages - should complete after slow client is kicked
|
|
246
|
+
const NUM_MESSAGES = 5
|
|
247
|
+
let completed = 0
|
|
248
|
+
const startTime = Date.now()
|
|
249
|
+
|
|
250
|
+
for (let i = 0; i < NUM_MESSAGES; i++) {
|
|
251
|
+
await new Promise((resolve) => {
|
|
252
|
+
broker.publish(
|
|
253
|
+
{ topic: 'recovery/test', payload: Buffer.from(`msg-${i}`) },
|
|
254
|
+
() => {
|
|
255
|
+
completed++
|
|
256
|
+
resolve()
|
|
257
|
+
}
|
|
258
|
+
)
|
|
259
|
+
})
|
|
260
|
+
}
|
|
261
|
+
|
|
262
|
+
const elapsed = Date.now() - startTime
|
|
263
|
+
const fastReceived = fastClient.receivedPackets.filter(p => p.cmd === 'publish').length
|
|
264
|
+
|
|
265
|
+
fastClient.destroy()
|
|
266
|
+
slowClient.destroy()
|
|
267
|
+
|
|
268
|
+
// STRICT ASSERTIONS: Recovery must work
|
|
269
|
+
t.assert.strictEqual(completed, NUM_MESSAGES, 'all publishes must complete after recovery')
|
|
270
|
+
t.assert.ok(slowClient.isDestroyed(), 'slow client must be disconnected')
|
|
271
|
+
t.assert.ok(fastReceived > 0, `fast client received ${fastReceived} messages after recovery`)
|
|
272
|
+
t.assert.ok(elapsed >= DRAIN_TIMEOUT, `should take at least ${DRAIN_TIMEOUT}ms for first timeout`)
|
|
273
|
+
})
|
|
274
|
+
|
|
275
|
+
// ============================================================================
|
|
276
|
+
// E2E TESTS - Real TCP with readStop()
|
|
277
|
+
// ============================================================================
|
|
278
|
+
|
|
279
|
+
/**
|
|
280
|
+
* RELIABLE APPROACH: readStop()
|
|
281
|
+
*
|
|
282
|
+
* Calling socket._handle.readStop() stops libuv from reading at the kernel level.
|
|
283
|
+
* This causes TCP receive buffer to fill → TCP flow control → sender's send buffer
|
|
284
|
+
* fills → write() returns false.
|
|
285
|
+
*
|
|
286
|
+
* Other approaches that DON'T reliably work on localhost:
|
|
287
|
+
* - stream.pause() - only pauses Node.js stream, not kernel recv()
|
|
288
|
+
* - Small highWaterMark - can't change after socket construction
|
|
289
|
+
* - Transform wrapper - breaks bidirectional communication
|
|
290
|
+
*/
|
|
291
|
+
|
|
292
|
+
test('E2E: readStop() triggers real TCP backpressure', { skip: shouldSkip }, async (t) => {
|
|
293
|
+
// This test MUST demonstrate backpressure or FAIL
|
|
294
|
+
// It proves that readStop() reliably triggers write() returning false
|
|
295
|
+
|
|
296
|
+
const broker = await Aedes.createBroker({ concurrency: 1 })
|
|
297
|
+
const server = createServer(broker.handle)
|
|
298
|
+
|
|
299
|
+
let writeReturnedFalse = false
|
|
300
|
+
let backpressureAtMessage = -1
|
|
301
|
+
|
|
302
|
+
broker.on('client', (client) => {
|
|
303
|
+
if (client.id === 'e2e-slow') {
|
|
304
|
+
const origWrite = client.conn.write.bind(client.conn)
|
|
305
|
+
client.conn.write = function (...args) {
|
|
306
|
+
const result = origWrite(...args)
|
|
307
|
+
if (result === false && !writeReturnedFalse) {
|
|
308
|
+
writeReturnedFalse = true
|
|
309
|
+
}
|
|
310
|
+
return result
|
|
311
|
+
}
|
|
312
|
+
}
|
|
313
|
+
})
|
|
314
|
+
|
|
315
|
+
t.after(async () => {
|
|
316
|
+
broker.close()
|
|
317
|
+
server.close()
|
|
318
|
+
})
|
|
319
|
+
|
|
320
|
+
await new Promise(resolve => server.listen(0, resolve))
|
|
321
|
+
const port = server.address().port
|
|
322
|
+
|
|
323
|
+
// Slow client
|
|
324
|
+
const slowClient = await mqtt.connectAsync({
|
|
325
|
+
port,
|
|
326
|
+
keepalive: 0,
|
|
327
|
+
clientId: 'e2e-slow'
|
|
328
|
+
})
|
|
329
|
+
await slowClient.subscribeAsync('e2e/#')
|
|
330
|
+
|
|
331
|
+
// Stop reading at kernel level - THIS IS THE KEY
|
|
332
|
+
slowClient.stream.pause()
|
|
333
|
+
if (slowClient.stream._handle && slowClient.stream._handle.readStop) {
|
|
334
|
+
slowClient.stream._handle.readStop()
|
|
335
|
+
} else {
|
|
336
|
+
t.skip('readStop() not available on this platform')
|
|
337
|
+
return
|
|
338
|
+
}
|
|
339
|
+
|
|
340
|
+
// Publisher
|
|
341
|
+
const publisher = await mqtt.connectAsync({
|
|
342
|
+
port,
|
|
343
|
+
keepalive: 0,
|
|
344
|
+
clientId: 'e2e-pub'
|
|
345
|
+
})
|
|
346
|
+
|
|
347
|
+
const payload = Buffer.alloc(256 * 1024, 'X') // 256KB
|
|
348
|
+
const MAX_MESSAGES = 100
|
|
349
|
+
let sent = 0
|
|
350
|
+
|
|
351
|
+
for (let i = 0; i < MAX_MESSAGES; i++) {
|
|
352
|
+
await new Promise(resolve => {
|
|
353
|
+
publisher.publish('e2e/test', payload, { qos: 0 }, () => {
|
|
354
|
+
sent++
|
|
355
|
+
resolve()
|
|
356
|
+
})
|
|
357
|
+
})
|
|
358
|
+
|
|
359
|
+
if (writeReturnedFalse) {
|
|
360
|
+
backpressureAtMessage = sent
|
|
361
|
+
break
|
|
362
|
+
}
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
slowClient.end(true)
|
|
366
|
+
publisher.end(true)
|
|
367
|
+
|
|
368
|
+
// STRICT ASSERTION: This test MUST achieve backpressure
|
|
369
|
+
t.assert.strictEqual(
|
|
370
|
+
writeReturnedFalse,
|
|
371
|
+
true,
|
|
372
|
+
'readStop() must trigger TCP backpressure'
|
|
373
|
+
)
|
|
374
|
+
t.assert.ok(backpressureAtMessage > 0 && backpressureAtMessage < MAX_MESSAGES,
|
|
375
|
+
`Backpressure should occur before sending all ${MAX_MESSAGES} messages`)
|
|
376
|
+
})
|
|
377
|
+
|
|
378
|
+
test('E2E: drainTimeout disconnects slow client after TCP backpressure', { skip: shouldSkip }, async (t) => {
|
|
379
|
+
// This test demonstrates the fix with real TCP:
|
|
380
|
+
// 1. Real backpressure (write() returns false)
|
|
381
|
+
// 2. drainTimeout kicks in and disconnects slow client
|
|
382
|
+
|
|
383
|
+
const DRAIN_TIMEOUT = 500 // Short timeout for faster test
|
|
384
|
+
|
|
385
|
+
const broker = await Aedes.createBroker({
|
|
386
|
+
concurrency: 1,
|
|
387
|
+
drainTimeout: DRAIN_TIMEOUT
|
|
388
|
+
})
|
|
389
|
+
const server = createServer(broker.handle)
|
|
390
|
+
|
|
391
|
+
let writeReturnedFalse = false
|
|
392
|
+
let slowClientDisconnected = false
|
|
393
|
+
|
|
394
|
+
broker.on('client', (client) => {
|
|
395
|
+
if (client.id === 'recovery-slow') {
|
|
396
|
+
const origWrite = client.conn.write.bind(client.conn)
|
|
397
|
+
client.conn.write = function (...args) {
|
|
398
|
+
const result = origWrite(...args)
|
|
399
|
+
if (result === false && !writeReturnedFalse) {
|
|
400
|
+
writeReturnedFalse = true
|
|
401
|
+
}
|
|
402
|
+
return result
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
})
|
|
406
|
+
|
|
407
|
+
broker.on('clientDisconnect', (client) => {
|
|
408
|
+
if (client.id === 'recovery-slow') {
|
|
409
|
+
slowClientDisconnected = true
|
|
410
|
+
}
|
|
411
|
+
})
|
|
412
|
+
|
|
413
|
+
t.after(async () => {
|
|
414
|
+
broker.close()
|
|
415
|
+
server.close()
|
|
416
|
+
})
|
|
417
|
+
|
|
418
|
+
await new Promise(resolve => server.listen(0, resolve))
|
|
419
|
+
const port = server.address().port
|
|
420
|
+
|
|
421
|
+
// Slow client
|
|
422
|
+
const slowClient = await mqtt.connectAsync({
|
|
423
|
+
port,
|
|
424
|
+
keepalive: 0,
|
|
425
|
+
clientId: 'recovery-slow'
|
|
426
|
+
})
|
|
427
|
+
await slowClient.subscribeAsync('recovery/#')
|
|
428
|
+
|
|
429
|
+
// Stop reading at kernel level
|
|
430
|
+
slowClient.stream.pause()
|
|
431
|
+
if (slowClient.stream._handle && slowClient.stream._handle.readStop) {
|
|
432
|
+
slowClient.stream._handle.readStop()
|
|
433
|
+
} else {
|
|
434
|
+
t.skip('readStop() not available')
|
|
435
|
+
return
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
// Publisher
|
|
439
|
+
const publisher = await mqtt.connectAsync({
|
|
440
|
+
port,
|
|
441
|
+
keepalive: 0,
|
|
442
|
+
clientId: 'recovery-pub'
|
|
443
|
+
})
|
|
444
|
+
|
|
445
|
+
const payload = Buffer.alloc(256 * 1024, 'R')
|
|
446
|
+
const NUM_MESSAGES = 20
|
|
447
|
+
|
|
448
|
+
// Publish to trigger backpressure
|
|
449
|
+
for (let i = 0; i < NUM_MESSAGES; i++) {
|
|
450
|
+
publisher.publish('recovery/test', payload, { qos: 0 })
|
|
451
|
+
if (writeReturnedFalse) break
|
|
452
|
+
}
|
|
453
|
+
|
|
454
|
+
// Wait for drainTimeout to fire + margin
|
|
455
|
+
await delay(DRAIN_TIMEOUT + 200)
|
|
456
|
+
|
|
457
|
+
slowClient.end(true)
|
|
458
|
+
publisher.end(true)
|
|
459
|
+
|
|
460
|
+
// STRICT ASSERTIONS
|
|
461
|
+
t.assert.strictEqual(writeReturnedFalse, true,
|
|
462
|
+
'Must trigger backpressure for this test to be valid')
|
|
463
|
+
t.assert.strictEqual(slowClientDisconnected, true,
|
|
464
|
+
'drainTimeout must disconnect slow client')
|
|
465
|
+
})
|
|
466
|
+
|
|
467
|
+
test('E2E: without drainTimeout, slow client impairs system throughput', { skip: shouldSkip }, async (t) => {
|
|
468
|
+
// This test demonstrates the BUG with real TCP:
|
|
469
|
+
// Without drainTimeout, a slow client severely impairs message delivery
|
|
470
|
+
|
|
471
|
+
const broker = await Aedes.createBroker({
|
|
472
|
+
concurrency: 1,
|
|
473
|
+
drainTimeout: 0 // Disabled - original buggy behavior
|
|
474
|
+
})
|
|
475
|
+
const server = createServer(broker.handle)
|
|
476
|
+
|
|
477
|
+
let writeReturnedFalse = false
|
|
478
|
+
|
|
479
|
+
broker.on('client', (client) => {
|
|
480
|
+
if (client.id === 'bug-slow') {
|
|
481
|
+
const origWrite = client.conn.write.bind(client.conn)
|
|
482
|
+
client.conn.write = function (...args) {
|
|
483
|
+
const result = origWrite(...args)
|
|
484
|
+
if (result === false && !writeReturnedFalse) {
|
|
485
|
+
writeReturnedFalse = true
|
|
486
|
+
}
|
|
487
|
+
return result
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
})
|
|
491
|
+
|
|
492
|
+
t.after(async () => {
|
|
493
|
+
broker.close()
|
|
494
|
+
server.close()
|
|
495
|
+
})
|
|
496
|
+
|
|
497
|
+
await new Promise(resolve => server.listen(0, resolve))
|
|
498
|
+
const port = server.address().port
|
|
499
|
+
|
|
500
|
+
// Slow client
|
|
501
|
+
const slowClient = await mqtt.connectAsync({
|
|
502
|
+
port,
|
|
503
|
+
keepalive: 0,
|
|
504
|
+
clientId: 'bug-slow'
|
|
505
|
+
})
|
|
506
|
+
await slowClient.subscribeAsync('bug/#')
|
|
507
|
+
|
|
508
|
+
// Fast client
|
|
509
|
+
const fastClient = await mqtt.connectAsync({
|
|
510
|
+
port,
|
|
511
|
+
keepalive: 0,
|
|
512
|
+
clientId: 'bug-fast'
|
|
513
|
+
})
|
|
514
|
+
await fastClient.subscribeAsync('bug/#')
|
|
515
|
+
|
|
516
|
+
let fastReceived = 0
|
|
517
|
+
fastClient.on('message', () => { fastReceived++ })
|
|
518
|
+
|
|
519
|
+
// Stop slow client from reading
|
|
520
|
+
slowClient.stream.pause()
|
|
521
|
+
if (slowClient.stream._handle && slowClient.stream._handle.readStop) {
|
|
522
|
+
slowClient.stream._handle.readStop()
|
|
523
|
+
} else {
|
|
524
|
+
t.skip('readStop() not available')
|
|
525
|
+
return
|
|
526
|
+
}
|
|
527
|
+
|
|
528
|
+
// Publisher
|
|
529
|
+
const publisher = await mqtt.connectAsync({
|
|
530
|
+
port,
|
|
531
|
+
keepalive: 0,
|
|
532
|
+
clientId: 'bug-pub'
|
|
533
|
+
})
|
|
534
|
+
|
|
535
|
+
const payload = Buffer.alloc(256 * 1024, 'B')
|
|
536
|
+
const NUM_MESSAGES = 50
|
|
537
|
+
|
|
538
|
+
// Send all messages
|
|
539
|
+
for (let i = 0; i < NUM_MESSAGES; i++) {
|
|
540
|
+
publisher.publish('bug/test', payload, { qos: 0 })
|
|
541
|
+
}
|
|
542
|
+
|
|
543
|
+
// Wait for delivery attempts
|
|
544
|
+
await delay(1000)
|
|
545
|
+
|
|
546
|
+
slowClient.end(true)
|
|
547
|
+
fastClient.end(true)
|
|
548
|
+
publisher.end(true)
|
|
549
|
+
|
|
550
|
+
// STRICT ASSERTIONS
|
|
551
|
+
t.assert.strictEqual(writeReturnedFalse, true,
|
|
552
|
+
'Must trigger backpressure for this test to be valid')
|
|
553
|
+
|
|
554
|
+
// Fast client should NOT receive all messages because slow client blocks
|
|
555
|
+
t.assert.ok(fastReceived < NUM_MESSAGES,
|
|
556
|
+
`Fast client should NOT receive all ${NUM_MESSAGES} messages (received ${fastReceived})`)
|
|
557
|
+
|
|
558
|
+
// Throughput should be severely degraded (<50%)
|
|
559
|
+
t.assert.ok(fastReceived < NUM_MESSAGES * 0.5,
|
|
560
|
+
`Throughput should be severely degraded (<50%), got ${((fastReceived / NUM_MESSAGES) * 100).toFixed(1)}%`)
|
|
561
|
+
})
|
|
562
|
+
|
|
563
|
+
test('close() flushes pending drain callbacks with error', { skip: shouldSkip }, async (t) => {
|
|
564
|
+
const [client, server] = duplexPair()
|
|
565
|
+
const broker = new Aedes({ drainTimeout: 5000 })
|
|
566
|
+
const aedesClient = broker.handle(server)
|
|
567
|
+
|
|
568
|
+
// Simulate backpressure by pausing the client socket
|
|
569
|
+
client.pause()
|
|
570
|
+
|
|
571
|
+
let callbackInvoked = false
|
|
572
|
+
let callbackError = null
|
|
573
|
+
|
|
574
|
+
// Register a drain callback
|
|
575
|
+
aedesClient.waitForDrain((err) => {
|
|
576
|
+
callbackInvoked = true
|
|
577
|
+
callbackError = err
|
|
578
|
+
})
|
|
579
|
+
|
|
580
|
+
// Close the client connection while drain is pending
|
|
581
|
+
aedesClient.close()
|
|
582
|
+
|
|
583
|
+
// Wait for callback to be invoked asynchronously
|
|
584
|
+
await new Promise(resolve => setImmediate(resolve))
|
|
585
|
+
|
|
586
|
+
// Verify callback was invoked with connection closed error
|
|
587
|
+
t.assert.strictEqual(callbackInvoked, true, 'Drain callback should be invoked')
|
|
588
|
+
t.assert.ok(callbackError, 'Callback should receive an error')
|
|
589
|
+
t.assert.strictEqual(callbackError.message, 'connection closed',
|
|
590
|
+
'Error should indicate connection closed')
|
|
591
|
+
|
|
592
|
+
broker.close()
|
|
593
|
+
})
|