aedes 0.51.2 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.github/actions/sticky-pr-comment/action.yml +55 -0
- package/.github/workflows/benchmark-compare-serial.yml +60 -0
- package/.github/workflows/ci.yml +12 -17
- package/.release-it.json +18 -0
- package/.taprc +15 -6
- package/README.md +6 -4
- package/aedes.d.ts +0 -6
- package/aedes.js +270 -238
- package/benchmarks/README.md +33 -0
- package/benchmarks/pingpong.js +94 -25
- package/benchmarks/receiver.js +77 -0
- package/benchmarks/report.js +150 -0
- package/benchmarks/runBenchmarks.js +118 -0
- package/benchmarks/sender.js +86 -0
- package/benchmarks/server.js +19 -18
- package/checkVersion.js +20 -0
- package/docs/Aedes.md +66 -8
- package/docs/Client.md +3 -4
- package/docs/Examples.md +39 -22
- package/docs/MIGRATION.md +50 -0
- package/eslint.config.js +8 -0
- package/example.js +51 -40
- package/examples/clusters/index.js +28 -23
- package/examples/clusters/package.json +10 -6
- package/lib/client.js +405 -306
- package/lib/handlers/connect.js +42 -38
- package/lib/handlers/index.js +9 -11
- package/lib/handlers/ping.js +2 -3
- package/lib/handlers/puback.js +5 -5
- package/lib/handlers/publish.js +29 -14
- package/lib/handlers/pubrec.js +9 -17
- package/lib/handlers/pubrel.js +34 -25
- package/lib/handlers/subscribe.js +54 -43
- package/lib/handlers/unsubscribe.js +16 -19
- package/lib/qos-packet.js +14 -17
- package/lib/utils.js +5 -12
- package/lib/write.js +4 -5
- package/package.json +134 -136
- package/test/auth.js +468 -804
- package/test/basic.js +613 -575
- package/test/bridge.js +44 -40
- package/test/client-pub-sub.js +531 -504
- package/test/close_socket_by_other_party.js +137 -102
- package/test/connect.js +487 -484
- package/test/drain-timeout.js +593 -0
- package/test/drain-toxiproxy.js +620 -0
- package/test/events.js +174 -144
- package/test/helper.js +351 -73
- package/test/keep-alive.js +40 -67
- package/test/meta.js +257 -210
- package/test/not-blocking.js +93 -197
- package/test/qos1.js +464 -554
- package/test/qos2.js +308 -393
- package/test/regr-21.js +39 -21
- package/test/require.cjs +22 -0
- package/test/retain.js +349 -398
- package/test/topics.js +176 -183
- package/test/types/aedes.test-d.ts +4 -8
- package/test/will.js +310 -428
- package/types/instance.d.ts +40 -35
- package/types/packet.d.ts +10 -10
- package/.coveralls.yml +0 -1
- package/benchmarks/bombing.js +0 -34
- package/benchmarks/bombingQoS1.js +0 -36
- package/benchmarks/throughputCounter.js +0 -23
- package/benchmarks/throughputCounterQoS1.js +0 -33
- package/types/.eslintrc.json +0 -47
|
@@ -0,0 +1,620 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ToxiProxy Integration Tests for Drain Timeout
|
|
3
|
+
*
|
|
4
|
+
* These tests demonstrate behavior with TRULY SLOW clients (not frozen),
|
|
5
|
+
* which is different from readStop()-based tests.
|
|
6
|
+
*
|
|
7
|
+
* KEY INSIGHT: ToxiProxy simulates slow network, not frozen client.
|
|
8
|
+
* - Slow client: data IS flowing, just slowly. Messages eventually arrive.
|
|
9
|
+
* - Frozen client (readStop): data stops entirely. Messages never arrive.
|
|
10
|
+
*
|
|
11
|
+
* FINDING: With bandwidth throttling, TCP backpressure occurs after ~1-2MB
|
|
12
|
+
* (not 25MB as initially suspected). The exact threshold depends on:
|
|
13
|
+
* - TCP socket buffer sizes (typically 64KB-256KB)
|
|
14
|
+
* - ToxiProxy's internal buffering
|
|
15
|
+
* - Bandwidth toxic settings
|
|
16
|
+
*
|
|
17
|
+
* What ToxiProxy CAN reveal:
|
|
18
|
+
* 1. Slow clients DO receive messages (eventually) - no deadlock without backpressure
|
|
19
|
+
* 2. Large data volumes + bandwidth limits DO trigger TCP backpressure
|
|
20
|
+
* 3. QoS 1 with latency shows acknowledgment delays
|
|
21
|
+
* 4. drainTimeout works with proxy-induced backpressure
|
|
22
|
+
*
|
|
23
|
+
* These tests require Docker to be running.
|
|
24
|
+
*
|
|
25
|
+
* Run: node --test test/drain-toxiproxy.js
|
|
26
|
+
*/
|
|
27
|
+
|
|
28
|
+
import { test, before, after, describe } from 'node:test'
|
|
29
|
+
import { createServer } from 'node:net'
|
|
30
|
+
import { setTimeout as delay } from 'node:timers/promises'
|
|
31
|
+
import mqtt from 'mqtt'
|
|
32
|
+
import { GenericContainer, Wait } from 'testcontainers'
|
|
33
|
+
import { Toxiproxy } from 'toxiproxy-node-client'
|
|
34
|
+
import { Aedes } from '../aedes.js'
|
|
35
|
+
import { shouldSkipOnWindowsAndMac } from './helper.js'
|
|
36
|
+
|
|
37
|
+
// Check if we should skip tests on Windows/macOS
|
|
38
|
+
// Issues: "Could not find a working container runtime strategy" (Mac)
|
|
39
|
+
// "invalid volume specification" for Docker socket (Windows)
|
|
40
|
+
const shouldSkip = shouldSkipOnWindowsAndMac()
|
|
41
|
+
|
|
42
|
+
// ToxiProxy configuration
|
|
43
|
+
const TOXIPROXY_API_PORT = 8474
|
|
44
|
+
const PROXY_LISTEN_PORT = 14883
|
|
45
|
+
|
|
46
|
+
let toxiproxyContainer
|
|
47
|
+
let toxiproxy
|
|
48
|
+
let proxyHost
|
|
49
|
+
let proxyApiPort
|
|
50
|
+
let proxyMappedPort
|
|
51
|
+
|
|
52
|
+
describe('ToxiProxy: realistic slow client behavior', { skip: shouldSkip }, async () => {
|
|
53
|
+
before(async () => {
|
|
54
|
+
console.log('[Setup] Starting ToxiProxy container...')
|
|
55
|
+
|
|
56
|
+
toxiproxyContainer = await new GenericContainer('ghcr.io/shopify/toxiproxy:2.9.0')
|
|
57
|
+
.withExposedPorts(TOXIPROXY_API_PORT, PROXY_LISTEN_PORT)
|
|
58
|
+
.withCommand(['-host=0.0.0.0'])
|
|
59
|
+
.withWaitStrategy(Wait.forHttp('/version', TOXIPROXY_API_PORT))
|
|
60
|
+
.withResourcesQuota({ cpu: 0.5, memory: 256 * 1024 * 1024 })
|
|
61
|
+
.start()
|
|
62
|
+
|
|
63
|
+
proxyHost = toxiproxyContainer.getHost()
|
|
64
|
+
proxyApiPort = toxiproxyContainer.getMappedPort(TOXIPROXY_API_PORT)
|
|
65
|
+
proxyMappedPort = toxiproxyContainer.getMappedPort(PROXY_LISTEN_PORT)
|
|
66
|
+
|
|
67
|
+
toxiproxy = new Toxiproxy(`http://${proxyHost}:${proxyApiPort}`)
|
|
68
|
+
|
|
69
|
+
console.log(`[Setup] ToxiProxy ready at ${proxyHost}:${proxyApiPort}`)
|
|
70
|
+
console.log(`[Setup] Proxy port mapped: ${PROXY_LISTEN_PORT} -> ${proxyMappedPort}`)
|
|
71
|
+
})
|
|
72
|
+
|
|
73
|
+
after(async () => {
|
|
74
|
+
if (toxiproxyContainer) {
|
|
75
|
+
console.log('[Cleanup] Stopping ToxiProxy container...')
|
|
76
|
+
await toxiproxyContainer.stop()
|
|
77
|
+
}
|
|
78
|
+
})
|
|
79
|
+
test('INSIGHT: slow client (via ToxiProxy) vs frozen client (readStop) behavior', async (t) => {
|
|
80
|
+
// This test demonstrates the KEY DIFFERENCE between slow and frozen clients:
|
|
81
|
+
// - SLOW client (ToxiProxy): Data flows slowly. Messages eventually arrive.
|
|
82
|
+
// No TCP backpressure until ToxiProxy's ~25MB buffer fills.
|
|
83
|
+
// - FROZEN client (readStop): No data flow. Immediate TCP backpressure.
|
|
84
|
+
// Messages never arrive.
|
|
85
|
+
//
|
|
86
|
+
// This distinction matters for drainTimeout configuration!
|
|
87
|
+
|
|
88
|
+
const broker = await Aedes.createBroker({ concurrency: 1 })
|
|
89
|
+
const server = createServer(broker.handle)
|
|
90
|
+
await new Promise(resolve => server.listen(0, '0.0.0.0', resolve))
|
|
91
|
+
const brokerPort = server.address().port
|
|
92
|
+
|
|
93
|
+
const hostIp = process.platform === 'darwin' ? 'host.docker.internal' : '172.17.0.1'
|
|
94
|
+
|
|
95
|
+
const proxy = await toxiproxy.createProxy({
|
|
96
|
+
name: 'aedes-slow-vs-frozen',
|
|
97
|
+
listen: `0.0.0.0:${PROXY_LISTEN_PORT}`,
|
|
98
|
+
upstream: `${hostIp}:${brokerPort}`
|
|
99
|
+
})
|
|
100
|
+
|
|
101
|
+
// Slicer: 500 bytes/20ms = ~25KB/s (slow but functional)
|
|
102
|
+
await proxy.addToxic({
|
|
103
|
+
name: 'slicer-slow',
|
|
104
|
+
type: 'slicer',
|
|
105
|
+
stream: 'downstream',
|
|
106
|
+
toxicity: 1,
|
|
107
|
+
attributes: {
|
|
108
|
+
average_size: 500,
|
|
109
|
+
size_variation: 50,
|
|
110
|
+
delay: 20000 // 20ms per chunk
|
|
111
|
+
}
|
|
112
|
+
})
|
|
113
|
+
|
|
114
|
+
console.log('[Test] Testing SLOW client behavior (~25KB/s throughput)')
|
|
115
|
+
|
|
116
|
+
try {
|
|
117
|
+
const subscriber = await mqtt.connectAsync({
|
|
118
|
+
host: proxyHost,
|
|
119
|
+
port: proxyMappedPort,
|
|
120
|
+
keepalive: 0,
|
|
121
|
+
clientId: 'slow-subscriber'
|
|
122
|
+
})
|
|
123
|
+
await subscriber.subscribeAsync('test/#', { qos: 0 })
|
|
124
|
+
|
|
125
|
+
let messagesReceived = 0
|
|
126
|
+
subscriber.on('message', () => {
|
|
127
|
+
messagesReceived++
|
|
128
|
+
console.log(`[Test] Slow client received message ${messagesReceived}`)
|
|
129
|
+
})
|
|
130
|
+
|
|
131
|
+
const publisher = await mqtt.connectAsync({
|
|
132
|
+
port: brokerPort,
|
|
133
|
+
keepalive: 0,
|
|
134
|
+
clientId: 'publisher'
|
|
135
|
+
})
|
|
136
|
+
|
|
137
|
+
// Send small messages to see them arrive slowly
|
|
138
|
+
const payload = Buffer.alloc(4 * 1024, 'S') // 4KB - arrives in ~160ms
|
|
139
|
+
const numMessages = 5
|
|
140
|
+
|
|
141
|
+
console.log(`[Test] Publishing ${numMessages} x 4KB messages...`)
|
|
142
|
+
const startTime = Date.now()
|
|
143
|
+
|
|
144
|
+
for (let i = 0; i < numMessages; i++) {
|
|
145
|
+
publisher.publish('test/topic', payload, { qos: 0 })
|
|
146
|
+
}
|
|
147
|
+
|
|
148
|
+
// Wait for slow delivery (4KB at 25KB/s = 160ms per message, + buffer time)
|
|
149
|
+
console.log('[Test] Waiting for slow delivery...')
|
|
150
|
+
await delay(3000) // 3 seconds should be enough for 5 messages
|
|
151
|
+
|
|
152
|
+
const elapsed = Date.now() - startTime
|
|
153
|
+
|
|
154
|
+
console.log(`[Test] Elapsed: ${elapsed}ms`)
|
|
155
|
+
console.log(`[Test] Messages received: ${messagesReceived}/${numMessages}`)
|
|
156
|
+
|
|
157
|
+
// KEY INSIGHT: Unlike frozen client, slow client DOES receive messages!
|
|
158
|
+
// This is the crucial difference ToxiProxy reveals.
|
|
159
|
+
t.assert.ok(messagesReceived > 0, 'Slow client DOES receive messages (unlike frozen client)')
|
|
160
|
+
t.assert.ok(messagesReceived >= numMessages - 1, 'Most/all messages should arrive given enough time')
|
|
161
|
+
|
|
162
|
+
console.log('[Test] SUCCESS: Slow client received messages (no deadlock)')
|
|
163
|
+
console.log('[Test] This is DIFFERENT from frozen client where messages never arrive!')
|
|
164
|
+
|
|
165
|
+
subscriber.end(true)
|
|
166
|
+
publisher.end(true)
|
|
167
|
+
} finally {
|
|
168
|
+
await proxy.remove()
|
|
169
|
+
broker.close()
|
|
170
|
+
server.close()
|
|
171
|
+
}
|
|
172
|
+
})
|
|
173
|
+
|
|
174
|
+
test('limit_data toxic: client freezes mid-stream, drainTimeout disconnects it', async (t) => {
|
|
175
|
+
// limit_data stops forwarding after N bytes - simulates a client that freezes.
|
|
176
|
+
// With drainTimeout enabled, the broker should disconnect the frozen client.
|
|
177
|
+
// Without drainTimeout, the broker would wait forever (but ToxiProxy buffers mask this).
|
|
178
|
+
|
|
179
|
+
const DRAIN_TIMEOUT = 1000
|
|
180
|
+
|
|
181
|
+
const broker = await Aedes.createBroker({
|
|
182
|
+
concurrency: 1,
|
|
183
|
+
drainTimeout: DRAIN_TIMEOUT
|
|
184
|
+
})
|
|
185
|
+
const server = createServer(broker.handle)
|
|
186
|
+
await new Promise(resolve => server.listen(0, '0.0.0.0', resolve))
|
|
187
|
+
const brokerPort = server.address().port
|
|
188
|
+
|
|
189
|
+
const hostIp = process.platform === 'darwin' ? 'host.docker.internal' : '172.17.0.1'
|
|
190
|
+
|
|
191
|
+
const proxy = await toxiproxy.createProxy({
|
|
192
|
+
name: 'aedes-limit-test',
|
|
193
|
+
listen: `0.0.0.0:${PROXY_LISTEN_PORT}`,
|
|
194
|
+
upstream: `${hostIp}:${brokerPort}`
|
|
195
|
+
})
|
|
196
|
+
|
|
197
|
+
// Allow MQTT handshake, then freeze after 5KB downstream
|
|
198
|
+
await proxy.addToxic({
|
|
199
|
+
name: 'limit-down',
|
|
200
|
+
type: 'limit_data',
|
|
201
|
+
stream: 'downstream',
|
|
202
|
+
toxicity: 1,
|
|
203
|
+
attributes: {
|
|
204
|
+
bytes: 5 * 1024 // Freeze after 5KB sent to client
|
|
205
|
+
}
|
|
206
|
+
})
|
|
207
|
+
|
|
208
|
+
console.log('[Test] limit_data: connection freezes after 5KB downstream')
|
|
209
|
+
console.log(`[Test] drainTimeout: ${DRAIN_TIMEOUT}ms`)
|
|
210
|
+
|
|
211
|
+
let clientDisconnected = false
|
|
212
|
+
|
|
213
|
+
try {
|
|
214
|
+
const subscriber = await mqtt.connectAsync({
|
|
215
|
+
host: proxyHost,
|
|
216
|
+
port: proxyMappedPort,
|
|
217
|
+
keepalive: 0,
|
|
218
|
+
clientId: 'frozen-client'
|
|
219
|
+
})
|
|
220
|
+
await subscriber.subscribeAsync('freeze/#', { qos: 0 })
|
|
221
|
+
|
|
222
|
+
let messagesReceived = 0
|
|
223
|
+
subscriber.on('message', () => { messagesReceived++ })
|
|
224
|
+
subscriber.on('close', () => {
|
|
225
|
+
clientDisconnected = true
|
|
226
|
+
console.log('[Test] Frozen client disconnected')
|
|
227
|
+
})
|
|
228
|
+
|
|
229
|
+
const publisher = await mqtt.connectAsync({
|
|
230
|
+
port: brokerPort,
|
|
231
|
+
keepalive: 0,
|
|
232
|
+
clientId: 'freeze-publisher'
|
|
233
|
+
})
|
|
234
|
+
|
|
235
|
+
// Send enough data to exceed the 5KB limit and trigger freeze
|
|
236
|
+
const payload = Buffer.alloc(32 * 1024, 'F') // 32KB > 5KB limit
|
|
237
|
+
|
|
238
|
+
console.log('[Test] Publishing 32KB message (exceeds 5KB limit)...')
|
|
239
|
+
publisher.publish('freeze/topic', payload, { qos: 0 })
|
|
240
|
+
|
|
241
|
+
// Wait for drainTimeout + buffer
|
|
242
|
+
await delay(DRAIN_TIMEOUT + 1500)
|
|
243
|
+
|
|
244
|
+
console.log(`[Test] Client disconnected: ${clientDisconnected}`)
|
|
245
|
+
console.log(`[Test] Messages received: ${messagesReceived}`)
|
|
246
|
+
|
|
247
|
+
// SUCCESS: Client should be disconnected (either by drainTimeout or connection error)
|
|
248
|
+
// FAILURE: Client stays connected despite being frozen
|
|
249
|
+
t.assert.strictEqual(clientDisconnected, true,
|
|
250
|
+
'FAILED: Frozen client should be disconnected by drainTimeout or connection reset')
|
|
251
|
+
t.assert.strictEqual(messagesReceived, 0,
|
|
252
|
+
'Frozen client should not receive full message')
|
|
253
|
+
|
|
254
|
+
console.log('[Test] SUCCESS: Frozen client was disconnected')
|
|
255
|
+
|
|
256
|
+
subscriber.end(true)
|
|
257
|
+
publisher.end(true)
|
|
258
|
+
} finally {
|
|
259
|
+
await proxy.remove()
|
|
260
|
+
broker.close()
|
|
261
|
+
server.close()
|
|
262
|
+
}
|
|
263
|
+
})
|
|
264
|
+
|
|
265
|
+
test('LARGE DATA: filling ToxiProxy buffer triggers real TCP backpressure', async (t) => {
|
|
266
|
+
// ToxiProxy buffers ~25MB internally. If we send MORE than that,
|
|
267
|
+
// the buffer fills and TCP backpressure kicks in.
|
|
268
|
+
// This test proves the full chain works with realistic proxy conditions.
|
|
269
|
+
|
|
270
|
+
const DRAIN_TIMEOUT = 2000
|
|
271
|
+
|
|
272
|
+
const broker = await Aedes.createBroker({
|
|
273
|
+
concurrency: 1,
|
|
274
|
+
drainTimeout: DRAIN_TIMEOUT
|
|
275
|
+
})
|
|
276
|
+
const server = createServer(broker.handle)
|
|
277
|
+
await new Promise(resolve => server.listen(0, '0.0.0.0', resolve))
|
|
278
|
+
const brokerPort = server.address().port
|
|
279
|
+
|
|
280
|
+
const hostIp = process.platform === 'darwin' ? 'host.docker.internal' : '172.17.0.1'
|
|
281
|
+
|
|
282
|
+
const proxy = await toxiproxy.createProxy({
|
|
283
|
+
name: 'aedes-large-data',
|
|
284
|
+
listen: `0.0.0.0:${PROXY_LISTEN_PORT}`,
|
|
285
|
+
upstream: `${hostIp}:${brokerPort}`
|
|
286
|
+
})
|
|
287
|
+
|
|
288
|
+
// Very slow bandwidth - 1KB/s causes TCP backpressure after ~1-2MB
|
|
289
|
+
await proxy.addToxic({
|
|
290
|
+
name: 'bandwidth-crawl',
|
|
291
|
+
type: 'bandwidth',
|
|
292
|
+
stream: 'downstream',
|
|
293
|
+
toxicity: 1,
|
|
294
|
+
attributes: {
|
|
295
|
+
rate: 1 // 1 KB/s - painfully slow
|
|
296
|
+
}
|
|
297
|
+
})
|
|
298
|
+
|
|
299
|
+
console.log('[Test] Bandwidth: 1KB/s - backpressure expected at ~1-2MB')
|
|
300
|
+
|
|
301
|
+
let clientDisconnected = false
|
|
302
|
+
let writeReturnedFalse = false
|
|
303
|
+
|
|
304
|
+
broker.on('client', (client) => {
|
|
305
|
+
if (client.id === 'large-data-sub') {
|
|
306
|
+
const origWrite = client.conn.write.bind(client.conn)
|
|
307
|
+
client.conn.write = function (...args) {
|
|
308
|
+
const result = origWrite(...args)
|
|
309
|
+
if (result === false && !writeReturnedFalse) {
|
|
310
|
+
writeReturnedFalse = true
|
|
311
|
+
console.log('[Test] write() returned false - TCP BACKPRESSURE!')
|
|
312
|
+
}
|
|
313
|
+
return result
|
|
314
|
+
}
|
|
315
|
+
}
|
|
316
|
+
})
|
|
317
|
+
|
|
318
|
+
try {
|
|
319
|
+
const subscriber = await mqtt.connectAsync({
|
|
320
|
+
host: proxyHost,
|
|
321
|
+
port: proxyMappedPort,
|
|
322
|
+
keepalive: 0,
|
|
323
|
+
clientId: 'large-data-sub'
|
|
324
|
+
})
|
|
325
|
+
await subscriber.subscribeAsync('large/#', { qos: 0 })
|
|
326
|
+
|
|
327
|
+
subscriber.on('close', () => {
|
|
328
|
+
clientDisconnected = true
|
|
329
|
+
console.log('[Test] Client disconnected')
|
|
330
|
+
})
|
|
331
|
+
|
|
332
|
+
const publisher = await mqtt.connectAsync({
|
|
333
|
+
port: brokerPort,
|
|
334
|
+
keepalive: 0,
|
|
335
|
+
clientId: 'large-data-pub'
|
|
336
|
+
})
|
|
337
|
+
|
|
338
|
+
// 1MB messages × 5 = 5MB - enough to trigger backpressure at ~1-2MB
|
|
339
|
+
const payload = Buffer.alloc(1024 * 1024, 'L') // 1MB
|
|
340
|
+
const numMessages = 5
|
|
341
|
+
let sent = 0
|
|
342
|
+
|
|
343
|
+
console.log(`[Test] Publishing ${numMessages} x 1MB = 5MB total...`)
|
|
344
|
+
const startTime = Date.now()
|
|
345
|
+
|
|
346
|
+
for (let i = 0; i < numMessages; i++) {
|
|
347
|
+
publisher.publish('large/topic', payload, { qos: 0 })
|
|
348
|
+
sent++
|
|
349
|
+
if (i % 5 === 0) {
|
|
350
|
+
console.log(`[Test] Sent ${sent}MB...`)
|
|
351
|
+
}
|
|
352
|
+
// Small delay to let events process
|
|
353
|
+
await delay(10)
|
|
354
|
+
}
|
|
355
|
+
|
|
356
|
+
// Wait for drainTimeout to potentially fire
|
|
357
|
+
console.log(`[Test] Waiting for drainTimeout (${DRAIN_TIMEOUT}ms)...`)
|
|
358
|
+
await delay(DRAIN_TIMEOUT + 1000)
|
|
359
|
+
|
|
360
|
+
const elapsed = Date.now() - startTime
|
|
361
|
+
|
|
362
|
+
console.log(`[Test] Elapsed: ${elapsed}ms`)
|
|
363
|
+
console.log(`[Test] Backpressure (write=false): ${writeReturnedFalse}`)
|
|
364
|
+
console.log(`[Test] Client disconnected: ${clientDisconnected}`)
|
|
365
|
+
|
|
366
|
+
// SUCCESS: Either backpressure occurred OR client disconnected
|
|
367
|
+
// This proves large data volumes DO cause real TCP effects through ToxiProxy
|
|
368
|
+
const backpressureOccurred = writeReturnedFalse || clientDisconnected
|
|
369
|
+
|
|
370
|
+
t.assert.ok(backpressureOccurred,
|
|
371
|
+
'FAILED: With 30MB at 1KB/s, should trigger backpressure or disconnect')
|
|
372
|
+
|
|
373
|
+
if (writeReturnedFalse) {
|
|
374
|
+
console.log('[Test] SUCCESS: TCP backpressure achieved via ToxiProxy buffer fill!')
|
|
375
|
+
}
|
|
376
|
+
if (clientDisconnected) {
|
|
377
|
+
console.log('[Test] SUCCESS: drainTimeout disconnected slow client!')
|
|
378
|
+
}
|
|
379
|
+
|
|
380
|
+
subscriber.end(true)
|
|
381
|
+
publisher.end(true)
|
|
382
|
+
} finally {
|
|
383
|
+
await proxy.remove()
|
|
384
|
+
broker.close()
|
|
385
|
+
server.close()
|
|
386
|
+
}
|
|
387
|
+
})
|
|
388
|
+
|
|
389
|
+
test('QoS 1: slow client delays acknowledgments, broker tracks pending messages', async (t) => {
|
|
390
|
+
// QoS 1 requires PUBACK from client. With a slow client:
|
|
391
|
+
// 1. Broker sends PUBLISH
|
|
392
|
+
// 2. Slow client receives it slowly
|
|
393
|
+
// 3. Client sends PUBACK slowly (upstream also affected by proxy)
|
|
394
|
+
// 4. Broker waits for PUBACK before considering delivery complete
|
|
395
|
+
//
|
|
396
|
+
// This tests a DIFFERENT blocking pattern than write-side backpressure.
|
|
397
|
+
|
|
398
|
+
const broker = await Aedes.createBroker({ concurrency: 5 })
|
|
399
|
+
const server = createServer(broker.handle)
|
|
400
|
+
await new Promise(resolve => server.listen(0, '0.0.0.0', resolve))
|
|
401
|
+
const brokerPort = server.address().port
|
|
402
|
+
|
|
403
|
+
const hostIp = process.platform === 'darwin' ? 'host.docker.internal' : '172.17.0.1'
|
|
404
|
+
|
|
405
|
+
const proxy = await toxiproxy.createProxy({
|
|
406
|
+
name: 'aedes-qos1',
|
|
407
|
+
listen: `0.0.0.0:${PROXY_LISTEN_PORT}`,
|
|
408
|
+
upstream: `${hostIp}:${brokerPort}`
|
|
409
|
+
})
|
|
410
|
+
|
|
411
|
+
// Slow BOTH directions - affects PUBLISH downstream and PUBACK upstream
|
|
412
|
+
await proxy.addToxic({
|
|
413
|
+
name: 'latency-both',
|
|
414
|
+
type: 'latency',
|
|
415
|
+
stream: 'downstream',
|
|
416
|
+
toxicity: 1,
|
|
417
|
+
attributes: {
|
|
418
|
+
latency: 500, // 500ms per packet
|
|
419
|
+
jitter: 100
|
|
420
|
+
}
|
|
421
|
+
})
|
|
422
|
+
await proxy.addToxic({
|
|
423
|
+
name: 'latency-upstream',
|
|
424
|
+
type: 'latency',
|
|
425
|
+
stream: 'upstream',
|
|
426
|
+
toxicity: 1,
|
|
427
|
+
attributes: {
|
|
428
|
+
latency: 500, // 500ms for PUBACK to return
|
|
429
|
+
jitter: 100
|
|
430
|
+
}
|
|
431
|
+
})
|
|
432
|
+
|
|
433
|
+
console.log('[Test] QoS 1 with 500ms latency each direction (1s round-trip)')
|
|
434
|
+
|
|
435
|
+
try {
|
|
436
|
+
// Slow subscriber through proxy
|
|
437
|
+
const slowSubscriber = await mqtt.connectAsync({
|
|
438
|
+
host: proxyHost,
|
|
439
|
+
port: proxyMappedPort,
|
|
440
|
+
keepalive: 0,
|
|
441
|
+
clientId: 'qos1-slow'
|
|
442
|
+
})
|
|
443
|
+
await slowSubscriber.subscribeAsync('qos1/#', { qos: 1 }) // QoS 1!
|
|
444
|
+
|
|
445
|
+
let slowReceived = 0
|
|
446
|
+
slowSubscriber.on('message', () => {
|
|
447
|
+
slowReceived++
|
|
448
|
+
console.log(`[Test] Slow client received message ${slowReceived}`)
|
|
449
|
+
})
|
|
450
|
+
|
|
451
|
+
// Fast subscriber directly connected
|
|
452
|
+
const fastSubscriber = await mqtt.connectAsync({
|
|
453
|
+
port: brokerPort,
|
|
454
|
+
keepalive: 0,
|
|
455
|
+
clientId: 'qos1-fast'
|
|
456
|
+
})
|
|
457
|
+
await fastSubscriber.subscribeAsync('qos1/#', { qos: 1 })
|
|
458
|
+
|
|
459
|
+
let fastReceived = 0
|
|
460
|
+
fastSubscriber.on('message', () => {
|
|
461
|
+
fastReceived++
|
|
462
|
+
})
|
|
463
|
+
|
|
464
|
+
const publisher = await mqtt.connectAsync({
|
|
465
|
+
port: brokerPort,
|
|
466
|
+
keepalive: 0,
|
|
467
|
+
clientId: 'qos1-pub'
|
|
468
|
+
})
|
|
469
|
+
|
|
470
|
+
// Small messages, QoS 1
|
|
471
|
+
const payload = Buffer.alloc(1024, 'Q') // 1KB
|
|
472
|
+
const numMessages = 5
|
|
473
|
+
let publishAcked = 0
|
|
474
|
+
|
|
475
|
+
console.log(`[Test] Publishing ${numMessages} x 1KB messages with QoS 1...`)
|
|
476
|
+
const startTime = Date.now()
|
|
477
|
+
|
|
478
|
+
// QoS 1 publish - callback fires when broker ACKs our publish
|
|
479
|
+
const publishPromises = []
|
|
480
|
+
for (let i = 0; i < numMessages; i++) {
|
|
481
|
+
publishPromises.push(new Promise(resolve => {
|
|
482
|
+
publisher.publish('qos1/topic', payload, { qos: 1 }, (err) => {
|
|
483
|
+
publishAcked++
|
|
484
|
+
console.log(`[Test] Publish ${publishAcked} ACKed by broker`)
|
|
485
|
+
resolve(err)
|
|
486
|
+
})
|
|
487
|
+
}))
|
|
488
|
+
}
|
|
489
|
+
|
|
490
|
+
await Promise.all(publishPromises)
|
|
491
|
+
|
|
492
|
+
// Wait for slow client to receive
|
|
493
|
+
await delay(5000) // 5 messages × 1s round-trip ≈ 5s
|
|
494
|
+
|
|
495
|
+
const elapsed = Date.now() - startTime
|
|
496
|
+
|
|
497
|
+
console.log(`[Test] Elapsed: ${elapsed}ms`)
|
|
498
|
+
console.log(`[Test] Fast subscriber received: ${fastReceived}/${numMessages}`)
|
|
499
|
+
console.log(`[Test] Slow subscriber received: ${slowReceived}/${numMessages}`)
|
|
500
|
+
|
|
501
|
+
// SUCCESS criteria:
|
|
502
|
+
// - Fast subscriber should receive all messages quickly
|
|
503
|
+
// - Slow subscriber should receive all messages (eventually)
|
|
504
|
+
// - Total time should reflect the latency overhead
|
|
505
|
+
t.assert.strictEqual(fastReceived, numMessages,
|
|
506
|
+
'Fast subscriber must receive all QoS 1 messages')
|
|
507
|
+
t.assert.ok(slowReceived > 0,
|
|
508
|
+
'Slow subscriber should receive some QoS 1 messages')
|
|
509
|
+
t.assert.ok(elapsed > 2000,
|
|
510
|
+
'QoS 1 with 500ms×2 latency should take noticeable time')
|
|
511
|
+
|
|
512
|
+
console.log('[Test] SUCCESS: QoS 1 works with slow client (latency affects delivery)')
|
|
513
|
+
|
|
514
|
+
slowSubscriber.end(true)
|
|
515
|
+
fastSubscriber.end(true)
|
|
516
|
+
publisher.end(true)
|
|
517
|
+
} finally {
|
|
518
|
+
await proxy.remove()
|
|
519
|
+
broker.close()
|
|
520
|
+
server.close()
|
|
521
|
+
}
|
|
522
|
+
})
|
|
523
|
+
|
|
524
|
+
test('fast subscriber still receives messages when slow subscriber via proxy', async (t) => {
|
|
525
|
+
// This tests that a fast client (direct connection) continues receiving
|
|
526
|
+
// even when slow clients (through proxy) are degrading throughput.
|
|
527
|
+
// Unlike frozen clients, slow clients don't cause complete deadlock.
|
|
528
|
+
|
|
529
|
+
const broker = await Aedes.createBroker({ concurrency: 5 })
|
|
530
|
+
const server = createServer(broker.handle)
|
|
531
|
+
await new Promise(resolve => server.listen(0, '0.0.0.0', resolve))
|
|
532
|
+
const brokerPort = server.address().port
|
|
533
|
+
|
|
534
|
+
const hostIp = process.platform === 'darwin' ? 'host.docker.internal' : '172.17.0.1'
|
|
535
|
+
|
|
536
|
+
const proxy = await toxiproxy.createProxy({
|
|
537
|
+
name: 'aedes-fast-slow-test',
|
|
538
|
+
listen: `0.0.0.0:${PROXY_LISTEN_PORT}`,
|
|
539
|
+
upstream: `${hostIp}:${brokerPort}`
|
|
540
|
+
})
|
|
541
|
+
|
|
542
|
+
// Moderate bandwidth limit - slow but functional
|
|
543
|
+
await proxy.addToxic({
|
|
544
|
+
name: 'bandwidth-slow',
|
|
545
|
+
type: 'bandwidth',
|
|
546
|
+
stream: 'downstream',
|
|
547
|
+
toxicity: 1,
|
|
548
|
+
attributes: {
|
|
549
|
+
rate: 10 // 10 KB/s
|
|
550
|
+
}
|
|
551
|
+
})
|
|
552
|
+
|
|
553
|
+
console.log('[Test] Slow subscriber at 10KB/s, fast subscriber direct')
|
|
554
|
+
|
|
555
|
+
try {
|
|
556
|
+
// Slow subscriber through proxy
|
|
557
|
+
const slowSubscriber = await mqtt.connectAsync({
|
|
558
|
+
host: proxyHost,
|
|
559
|
+
port: proxyMappedPort,
|
|
560
|
+
keepalive: 0,
|
|
561
|
+
clientId: 'slow-sub'
|
|
562
|
+
})
|
|
563
|
+
await slowSubscriber.subscribeAsync('mixed/#', { qos: 0 })
|
|
564
|
+
|
|
565
|
+
let slowReceived = 0
|
|
566
|
+
slowSubscriber.on('message', () => { slowReceived++ })
|
|
567
|
+
|
|
568
|
+
// Fast subscriber directly connected
|
|
569
|
+
const fastSubscriber = await mqtt.connectAsync({
|
|
570
|
+
port: brokerPort,
|
|
571
|
+
keepalive: 0,
|
|
572
|
+
clientId: 'fast-sub'
|
|
573
|
+
})
|
|
574
|
+
await fastSubscriber.subscribeAsync('mixed/#', { qos: 0 })
|
|
575
|
+
|
|
576
|
+
let fastReceived = 0
|
|
577
|
+
fastSubscriber.on('message', () => { fastReceived++ })
|
|
578
|
+
|
|
579
|
+
const publisher = await mqtt.connectAsync({
|
|
580
|
+
port: brokerPort,
|
|
581
|
+
keepalive: 0,
|
|
582
|
+
clientId: 'mixed-publisher'
|
|
583
|
+
})
|
|
584
|
+
|
|
585
|
+
// Small messages so slow client can actually receive some
|
|
586
|
+
const payload = Buffer.alloc(1024, 'M') // 1KB
|
|
587
|
+
const numMessages = 10
|
|
588
|
+
|
|
589
|
+
console.log(`[Test] Publishing ${numMessages} x 1KB messages...`)
|
|
590
|
+
|
|
591
|
+
for (let i = 0; i < numMessages; i++) {
|
|
592
|
+
publisher.publish('mixed/topic', payload, { qos: 0 })
|
|
593
|
+
}
|
|
594
|
+
|
|
595
|
+
// Wait for delivery
|
|
596
|
+
await delay(3000)
|
|
597
|
+
|
|
598
|
+
console.log(`[Test] Fast subscriber received: ${fastReceived}/${numMessages}`)
|
|
599
|
+
console.log(`[Test] Slow subscriber received: ${slowReceived}/${numMessages}`)
|
|
600
|
+
|
|
601
|
+
// SUCCESS criteria:
|
|
602
|
+
// - Fast subscriber should receive ALL messages (direct connection)
|
|
603
|
+
// - Slow subscriber should receive SOME messages (slow but not frozen)
|
|
604
|
+
t.assert.strictEqual(fastReceived, numMessages,
|
|
605
|
+
'Fast subscriber must receive all messages')
|
|
606
|
+
t.assert.ok(slowReceived > 0,
|
|
607
|
+
'Slow subscriber should receive some messages (not frozen)')
|
|
608
|
+
|
|
609
|
+
console.log('[Test] SUCCESS: Slow client does not block fast client delivery')
|
|
610
|
+
|
|
611
|
+
slowSubscriber.end(true)
|
|
612
|
+
fastSubscriber.end(true)
|
|
613
|
+
publisher.end(true)
|
|
614
|
+
} finally {
|
|
615
|
+
await proxy.remove()
|
|
616
|
+
broker.close()
|
|
617
|
+
server.close()
|
|
618
|
+
}
|
|
619
|
+
})
|
|
620
|
+
})
|