@aikidosec/broker-client 1.0.6 → 1.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/app/client.js +103 -26
- package/app/log.js +10 -0
- package/app/streaming/cleanup.js +76 -0
- package/app/streaming/flowControl.js +39 -0
- package/app/streaming/handlers.js +150 -0
- package/app/streaming/initStreamingResponse.js +84 -0
- package/app/streaming/state.js +82 -0
- package/app/streamingHandler.js +15 -0
- package/package.json +15 -3
package/app/client.js
CHANGED
|
@@ -13,14 +13,11 @@ import dns from 'native-dns';
|
|
|
13
13
|
import { ResourceManager } from './resourceManager.js';
|
|
14
14
|
import { HttpsProxyAgent } from 'https-proxy-agent';
|
|
15
15
|
import { getClientId, setClientIdCache, getServerUrl, getClientSecret } from './config.js';
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
error: (msg) => console.error(`[ERROR] ${new Date().toISOString()} - ${msg}`),
|
|
22
|
-
debug: (msg) => console.log(`[DEBUG] ${new Date().toISOString()} - ${msg}`)
|
|
23
|
-
};
|
|
16
|
+
import { STREAMING_THRESHOLD } from './streaming/state.js';
|
|
17
|
+
import { registerStreamingHandlers } from './streaming/handlers.js';
|
|
18
|
+
import { initStreamingResponse } from './streaming/initStreamingResponse.js';
|
|
19
|
+
import { startStaleStreamCleanup, cleanupAllStreams } from './streaming/cleanup.js';
|
|
20
|
+
import { log } from './log.js';
|
|
24
21
|
|
|
25
22
|
// Broker Server Configuration
|
|
26
23
|
const CLIENT_SECRET = getClientSecret();
|
|
@@ -107,6 +104,32 @@ async function resolveInternalHostname(hostname) {
|
|
|
107
104
|
});
|
|
108
105
|
}
|
|
109
106
|
|
|
107
|
+
/**
|
|
108
|
+
* Get Content-Length via HEAD request to determine if streaming should be used.
|
|
109
|
+
* @param {string} url - URL to check
|
|
110
|
+
* @param {object} headers - Headers to send with the request
|
|
111
|
+
* @returns {Promise<number|null>} Content-Length in bytes, or null if unavailable
|
|
112
|
+
*/
|
|
113
|
+
async function getContentLengthViaHead(url, headers) {
|
|
114
|
+
try {
|
|
115
|
+
const headResponse = await internalHttpClient.head(url, {
|
|
116
|
+
headers,
|
|
117
|
+
validateStatus: () => true,
|
|
118
|
+
timeout: 5000
|
|
119
|
+
});
|
|
120
|
+
// Headers can be any case (Content-Length, content-length, CONTENT-LENGTH)
|
|
121
|
+
const contentLengthHeader = Object.keys(headResponse.headers)
|
|
122
|
+
.find(key => key.toLowerCase() === 'content-length');
|
|
123
|
+
const contentLength = contentLengthHeader
|
|
124
|
+
? parseInt(headResponse.headers[contentLengthHeader])
|
|
125
|
+
: NaN;
|
|
126
|
+
return isNaN(contentLength) ? null : contentLength;
|
|
127
|
+
} catch (e) {
|
|
128
|
+
log.warn(`HEAD request failed, will use buffered approach: ${e.message}`);
|
|
129
|
+
return null;
|
|
130
|
+
}
|
|
131
|
+
}
|
|
132
|
+
|
|
110
133
|
/**
|
|
111
134
|
* Check if URL points to internal resource
|
|
112
135
|
*/
|
|
@@ -210,7 +233,9 @@ const socket = io(SERVER_URL, {
|
|
|
210
233
|
randomizationFactor: 0.5,
|
|
211
234
|
tryAllTransports: true, // if we don't, it won't try to fallback from websocket to polling
|
|
212
235
|
autoConnect: false, // Don't connect until after registration
|
|
213
|
-
withCredentials: true // make sure cookies work for sticky sessions
|
|
236
|
+
withCredentials: true, // make sure cookies work for sticky sessions
|
|
237
|
+
// Increase timeouts for heavy streaming workloads
|
|
238
|
+
pingTimeout: 60000, // 60s (default 20s) - time to wait for pong before considering connection dead
|
|
214
239
|
});
|
|
215
240
|
|
|
216
241
|
// Socket.IO event handlers
|
|
@@ -230,8 +255,10 @@ socket.on('connect', async () => {
|
|
|
230
255
|
}
|
|
231
256
|
});
|
|
232
257
|
|
|
233
|
-
socket.on('disconnect', () => {
|
|
234
|
-
log.warn(
|
|
258
|
+
socket.on('disconnect', (reason) => {
|
|
259
|
+
log.warn(`Disconnected from broker server: ${reason}`);
|
|
260
|
+
// Clean up all active streams - they can't recover after reconnect
|
|
261
|
+
cleanupAllStreams();
|
|
235
262
|
});
|
|
236
263
|
|
|
237
264
|
socket.on('connect_error', (error) => {
|
|
@@ -258,6 +285,12 @@ socket.io.on('reconnect_failed', () => {
|
|
|
258
285
|
log.error(`Socket.IO reconnection failed after all attempts`);
|
|
259
286
|
});
|
|
260
287
|
|
|
288
|
+
// Register streaming handlers (handle next chunk and abort stream on client disconnect)
|
|
289
|
+
registerStreamingHandlers(socket);
|
|
290
|
+
|
|
291
|
+
// Start cleanup interval for stale streams
|
|
292
|
+
startStaleStreamCleanup();
|
|
293
|
+
|
|
261
294
|
socket.on('forward_request', async (data, callback) => {
|
|
262
295
|
/**
|
|
263
296
|
* Receive request from broker server and forward to internal resource
|
|
@@ -309,29 +342,54 @@ socket.on('forward_request', async (data, callback) => {
|
|
|
309
342
|
}
|
|
310
343
|
}
|
|
311
344
|
|
|
312
|
-
//
|
|
345
|
+
// Check Content-Length first with HEAD request to decide streaming vs buffered
|
|
346
|
+
let useStreaming = false;
|
|
347
|
+
let contentLength = null;
|
|
348
|
+
|
|
349
|
+
if (method === 'GET') {
|
|
350
|
+
contentLength = await getContentLengthViaHead(resolvedUrl, headers);
|
|
351
|
+
if (contentLength !== null && contentLength > STREAMING_THRESHOLD) {
|
|
352
|
+
useStreaming = true;
|
|
353
|
+
log.info(`Large response detected (${(contentLength / (1024 * 1024)).toFixed(2)} MB) - using streaming`);
|
|
354
|
+
}
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
// Make the request with appropriate response type
|
|
313
358
|
const response = await internalHttpClient.request({
|
|
314
359
|
method,
|
|
315
360
|
url: resolvedUrl,
|
|
316
361
|
headers,
|
|
317
362
|
data: body,
|
|
318
363
|
validateStatus: () => true, // Accept any status code
|
|
319
|
-
responseType: '
|
|
364
|
+
responseType: useStreaming ? 'stream' : 'arraybuffer',
|
|
320
365
|
});
|
|
321
366
|
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
367
|
+
if (useStreaming) {
|
|
368
|
+
// Initialize streaming - server will pull chunks via get_next_chunk
|
|
369
|
+
initStreamingResponse({
|
|
370
|
+
requestId,
|
|
371
|
+
statusCode: response.status,
|
|
372
|
+
headers: response.headers,
|
|
373
|
+
stream: response.data,
|
|
374
|
+
callback
|
|
375
|
+
});
|
|
376
|
+
} else {
|
|
377
|
+
const responseSizeBytes = response.data ? response.data.length : 0;
|
|
378
|
+
const responseSizeMB = (responseSizeBytes / (1024 * 1024)).toFixed(2);
|
|
379
|
+
|
|
380
|
+
log.info(`Successfully forwarded request ${requestId} to ${targetUrl}, status: ${response.status}, response size: ${responseSizeMB} MB`);
|
|
381
|
+
|
|
382
|
+
// Send direct response
|
|
383
|
+
sendDirectResponse(
|
|
384
|
+
requestId,
|
|
385
|
+
response.status,
|
|
386
|
+
response.headers,
|
|
387
|
+
response.data,
|
|
388
|
+
callback
|
|
389
|
+
);
|
|
390
|
+
|
|
391
|
+
log.info(`Response sent for request ${requestId}`);
|
|
392
|
+
}
|
|
335
393
|
|
|
336
394
|
} catch (error) {
|
|
337
395
|
log.error(`Error forwarding request ${requestId} to ${targetUrl}: ${error?.response?.status || error.message}`);
|
|
@@ -346,6 +404,25 @@ socket.on('forward_request', async (data, callback) => {
|
|
|
346
404
|
}
|
|
347
405
|
});
|
|
348
406
|
|
|
407
|
+
/**
|
|
408
|
+
* Send a direct response (for small files under streaming threshold)
|
|
409
|
+
* @param {string} requestId - Request ID for tracking
|
|
410
|
+
* @param {number} statusCode - HTTP status code
|
|
411
|
+
* @param {object} headers - Response headers
|
|
412
|
+
* @param {Buffer|null} responseData - Raw response data (before base64)
|
|
413
|
+
* @param {function} callback - Socket.IO callback
|
|
414
|
+
*/
|
|
415
|
+
function sendDirectResponse(requestId, statusCode, headers, responseData, callback) {
|
|
416
|
+
const responseBody = responseData ? responseData.toString('base64') : null;
|
|
417
|
+
callback({
|
|
418
|
+
request_id: requestId,
|
|
419
|
+
status_code: statusCode,
|
|
420
|
+
headers: headers,
|
|
421
|
+
body: responseBody,
|
|
422
|
+
version: 2
|
|
423
|
+
});
|
|
424
|
+
}
|
|
425
|
+
|
|
349
426
|
function formatMessageBody(message) {
|
|
350
427
|
return Buffer.from(message, 'utf-8').toString('base64');
|
|
351
428
|
}
|
package/app/log.js
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Logging utility for streaming module
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
export const log = {
|
|
6
|
+
info: (msg) => console.log(`[INFO] ${new Date().toISOString()} - ${msg}`),
|
|
7
|
+
warn: (msg) => console.warn(`[WARN] ${new Date().toISOString()} - ${msg}`),
|
|
8
|
+
error: (msg) => console.error(`[ERROR] ${new Date().toISOString()} - ${msg}`),
|
|
9
|
+
debug: (msg) => console.log(`[DEBUG] ${new Date().toISOString()} - ${msg}`)
|
|
10
|
+
};
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Stream cleanup utilities - manages cleanup of streaming state
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { activeStreams, recalculateTotalBufferSize } from './state.js';
|
|
6
|
+
import { log } from '../log.js';
|
|
7
|
+
import { resumePausedStreams } from './flowControl.js';
|
|
8
|
+
|
|
9
|
+
// Stale stream timeout (3 minutes)
|
|
10
|
+
const STALE_STREAM_TIMEOUT_MS = 3 * 60 * 1000;
|
|
11
|
+
const STALE_STREAM_INTERVAL_MS = 2 * 60 * 1000; // Check every 2 minutes
|
|
12
|
+
|
|
13
|
+
// Cleanup interval reference
|
|
14
|
+
let cleanupInterval = null;
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* Start the stale stream cleanup interval
|
|
18
|
+
* Runs every 2 minutes to check for streams inactive for 3+ minutes
|
|
19
|
+
*/
|
|
20
|
+
export function startStaleStreamCleanup() {
|
|
21
|
+
if (cleanupInterval) return; // Already running
|
|
22
|
+
|
|
23
|
+
cleanupInterval = setInterval(() => {
|
|
24
|
+
const now = Date.now();
|
|
25
|
+
|
|
26
|
+
for (const [requestId, state] of activeStreams.entries()) {
|
|
27
|
+
const inactiveMs = now - state.lastActivity;
|
|
28
|
+
if (inactiveMs > STALE_STREAM_TIMEOUT_MS) {
|
|
29
|
+
try {
|
|
30
|
+
log.warn(`Cleaning up stale stream ${requestId} (inactive for ${(inactiveMs / 1000 / 60).toFixed(1)} minutes)`);
|
|
31
|
+
cleanupStream(requestId);
|
|
32
|
+
} catch (err) {
|
|
33
|
+
log.error(`Error cleaning up stale stream ${requestId}: ${err.message}`);
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
}, STALE_STREAM_INTERVAL_MS);
|
|
39
|
+
|
|
40
|
+
log.info('Started stale stream cleanup interval');
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
/**
|
|
44
|
+
* Clean up a streaming request (e.g., on error or disconnect)
|
|
45
|
+
* @param {string} requestId - Request ID to clean up
|
|
46
|
+
*/
|
|
47
|
+
export function cleanupStream(requestId) {
|
|
48
|
+
const state = activeStreams.get(requestId);
|
|
49
|
+
if (state) {
|
|
50
|
+
// state.stream is the Node.js Readable stream from the axios HTTP response
|
|
51
|
+
// Destroying it releases the TCP connection and stops buffering data
|
|
52
|
+
state.stream.destroy();
|
|
53
|
+
activeStreams.delete(requestId);
|
|
54
|
+
|
|
55
|
+
// Recalculate total buffer size to fix any drift
|
|
56
|
+
recalculateTotalBufferSize();
|
|
57
|
+
|
|
58
|
+
// Resume any paused streams now that buffer space is freed
|
|
59
|
+
resumePausedStreams();
|
|
60
|
+
}
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* Clean up all active streams (e.g., on socket disconnect)
|
|
65
|
+
* Streams cannot recover after socket reconnect since server-side state is lost
|
|
66
|
+
*/
|
|
67
|
+
export function cleanupAllStreams() {
|
|
68
|
+
const count = activeStreams.size;
|
|
69
|
+
if (count === 0) return;
|
|
70
|
+
|
|
71
|
+
for (const requestId of activeStreams.keys()) {
|
|
72
|
+
cleanupStream(requestId);
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
log.info(`Cleaned up all ${count} streams after disconnect`);
|
|
76
|
+
}
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Stream flow control - manages pausing/resuming streams based on buffer limits
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { activeStreams, totalBufferSize, GLOBAL_MAX_BUFFER_SIZE, PER_STREAM_MAX_BUFFER_SIZE } from './state.js';
|
|
6
|
+
import { log } from '../log.js';
|
|
7
|
+
|
|
8
|
+
/**
|
|
9
|
+
* Pause a stream if global or per-stream buffer limits are exceeded
|
|
10
|
+
* @param {string} requestId - Request ID
|
|
11
|
+
* @param {Readable} stream - The readable stream to pause
|
|
12
|
+
*/
|
|
13
|
+
export function pauseStreamIfOverLimit(requestId, stream) {
|
|
14
|
+
const state = activeStreams.get(requestId);
|
|
15
|
+
if (!state || state.paused) return;
|
|
16
|
+
|
|
17
|
+
const shouldPause = totalBufferSize() >= GLOBAL_MAX_BUFFER_SIZE || state.buffer.length >= PER_STREAM_MAX_BUFFER_SIZE;
|
|
18
|
+
if (shouldPause) {
|
|
19
|
+
state.paused = true;
|
|
20
|
+
stream.pause();
|
|
21
|
+
log.info(`Stream paused for ${requestId}, stream buffer: ${(state.buffer.length / (1024 * 1024)).toFixed(2)} MB, global: ${(totalBufferSize() / (1024 * 1024)).toFixed(2)} MB`);
|
|
22
|
+
}
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
/**
|
|
26
|
+
* Resume any paused streams if both global and per-stream buffers are below thresholds
|
|
27
|
+
* Called after cleanup frees up buffer space
|
|
28
|
+
*/
|
|
29
|
+
export function resumePausedStreams() {
|
|
30
|
+
if (totalBufferSize() >= GLOBAL_MAX_BUFFER_SIZE) return;
|
|
31
|
+
|
|
32
|
+
for (const [requestId, state] of activeStreams.entries()) {
|
|
33
|
+
if (state.paused && !state.complete && state.buffer.length < PER_STREAM_MAX_BUFFER_SIZE) {
|
|
34
|
+
state.paused = false;
|
|
35
|
+
state.stream.resume();
|
|
36
|
+
log.info(`Stream resumed for ${requestId} after cleanup, stream buffer: ${(state.buffer.length / (1024 * 1024)).toFixed(2)} MB, global: ${(totalBufferSize() / (1024 * 1024)).toFixed(2)} MB`);
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Socket.IO event handlers for streaming
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { activeStreams, totalBufferSize, subtractFromTotalBuffer, STREAM_CHUNK_SIZE, GLOBAL_MAX_BUFFER_SIZE, PER_STREAM_MAX_BUFFER_SIZE } from './state.js';
|
|
6
|
+
import { cleanupStream } from './cleanup.js';
|
|
7
|
+
import { log } from '../log.js';
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Register streaming event handlers on a Socket.IO socket
|
|
11
|
+
* @param {Socket} socket - Socket.IO socket instance
|
|
12
|
+
*/
|
|
13
|
+
export function registerStreamingHandlers(socket) {
|
|
14
|
+
socket.on('get_next_chunk', handleGetNextChunk);
|
|
15
|
+
socket.on('abort_stream', handleAbortStream);
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Handle get_next_chunk request from server (pull-based streaming)
|
|
20
|
+
* Waits for data if buffer is empty but stream isn't complete
|
|
21
|
+
* @param {object} data - Request data containing request_id
|
|
22
|
+
* @param {function} callback - Socket.IO callback
|
|
23
|
+
*/
|
|
24
|
+
export async function handleGetNextChunk(data, callback) {
|
|
25
|
+
const requestId = data.request_id;
|
|
26
|
+
const state = activeStreams.get(requestId);
|
|
27
|
+
|
|
28
|
+
if (!state) {
|
|
29
|
+
log.warn(`get_next_chunk: No active stream for request ${requestId}`);
|
|
30
|
+
callback({ error: 'no_stream', request_id: requestId });
|
|
31
|
+
return;
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
state.lastActivity = Date.now();
|
|
35
|
+
|
|
36
|
+
// Check for stream error
|
|
37
|
+
if (state.error) {
|
|
38
|
+
cleanupStreamWithError(requestId, state.error, callback);
|
|
39
|
+
return;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
// Wait for data if buffer doesn't have enough for a full chunk
|
|
43
|
+
await waitForData(state);
|
|
44
|
+
|
|
45
|
+
// Check for error again after waiting
|
|
46
|
+
if (state.error) {
|
|
47
|
+
cleanupStreamWithError(requestId, state.error, callback);
|
|
48
|
+
return;
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
// Extract chunk from buffer
|
|
52
|
+
const { chunkData, isComplete } = extractChunkFromBuffer(state, requestId);
|
|
53
|
+
|
|
54
|
+
state.totalBytesSent += chunkData.length;
|
|
55
|
+
state.chunkIndex++;
|
|
56
|
+
|
|
57
|
+
log.info(`get_next_chunk ${state.chunkIndex} for ${requestId}: ${chunkData.length} bytes, complete=${isComplete}, total=${(state.totalBytesSent / (1024 * 1024)).toFixed(2)} MB`);
|
|
58
|
+
|
|
59
|
+
// Clean up if complete
|
|
60
|
+
if (isComplete) {
|
|
61
|
+
activeStreams.delete(requestId);
|
|
62
|
+
log.info(`Streaming complete for ${requestId}: ${state.chunkIndex} chunks, ${(state.totalBytesSent / (1024 * 1024)).toFixed(2)} MB total`);
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
callback({
|
|
66
|
+
request_id: requestId,
|
|
67
|
+
data: chunkData.toString('base64'),
|
|
68
|
+
complete: isComplete,
|
|
69
|
+
chunk_index: state.chunkIndex
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Wait for buffer to have enough data or stream to complete
|
|
75
|
+
*/
|
|
76
|
+
async function waitForData(state) {
|
|
77
|
+
const maxWaitMs = 60000;
|
|
78
|
+
const checkIntervalMs = 100;
|
|
79
|
+
let waited = 0;
|
|
80
|
+
|
|
81
|
+
const hasEnoughData = () => state.buffer.length >= STREAM_CHUNK_SIZE;
|
|
82
|
+
const isStreamDone = () => state.complete || state.error;
|
|
83
|
+
const hasTimedOut = () => waited >= maxWaitMs;
|
|
84
|
+
|
|
85
|
+
while (!hasEnoughData() && !isStreamDone() && !hasTimedOut()) {
|
|
86
|
+
await new Promise(resolve => setTimeout(resolve, checkIntervalMs));
|
|
87
|
+
waited += checkIntervalMs;
|
|
88
|
+
}
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/**
|
|
92
|
+
* Extract a chunk from the buffer and manage buffer state
|
|
93
|
+
*/
|
|
94
|
+
function extractChunkFromBuffer(state, requestId) {
|
|
95
|
+
let chunkData;
|
|
96
|
+
let isComplete = false;
|
|
97
|
+
|
|
98
|
+
if (state.buffer.length >= STREAM_CHUNK_SIZE) {
|
|
99
|
+
// Have enough data for a full chunk
|
|
100
|
+
chunkData = state.buffer.slice(0, STREAM_CHUNK_SIZE);
|
|
101
|
+
state.buffer = state.buffer.slice(STREAM_CHUNK_SIZE);
|
|
102
|
+
subtractFromTotalBuffer(chunkData.length);
|
|
103
|
+
|
|
104
|
+
// Resume stream if it was paused and both limits are now satisfied
|
|
105
|
+
resumeStreamIfBelowBufferLimits(state);
|
|
106
|
+
} else if (state.complete) {
|
|
107
|
+
// Stream is done, send remaining buffer
|
|
108
|
+
chunkData = state.buffer;
|
|
109
|
+
subtractFromTotalBuffer(chunkData.length);
|
|
110
|
+
state.buffer = Buffer.alloc(0);
|
|
111
|
+
isComplete = true;
|
|
112
|
+
} else {
|
|
113
|
+
// Timeout waiting for data - send what we have
|
|
114
|
+
log.warn(`get_next_chunk: Timeout waiting for data for ${requestId}, sending ${state.buffer.length} bytes`);
|
|
115
|
+
chunkData = state.buffer;
|
|
116
|
+
subtractFromTotalBuffer(chunkData.length);
|
|
117
|
+
state.buffer = Buffer.alloc(0);
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
return { chunkData, isComplete };
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Resume a paused stream if buffer limits allow
|
|
125
|
+
*/
|
|
126
|
+
function resumeStreamIfBelowBufferLimits(state) {
|
|
127
|
+
const canResume = totalBufferSize() < GLOBAL_MAX_BUFFER_SIZE && state.buffer.length < PER_STREAM_MAX_BUFFER_SIZE;
|
|
128
|
+
if (state.paused && canResume) {
|
|
129
|
+
state.paused = false;
|
|
130
|
+
state.stream.resume();
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
|
|
134
|
+
/**
|
|
135
|
+
* Clean up stream and send error callback
|
|
136
|
+
*/
|
|
137
|
+
function cleanupStreamWithError(requestId, error, callback) {
|
|
138
|
+
activeStreams.delete(requestId);
|
|
139
|
+
callback({ error, request_id: requestId });
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
/**
|
|
143
|
+
* Handle abort_stream request from server (caller (aikido service) disconnected)
|
|
144
|
+
* @param {object} data - Request data containing request_id
|
|
145
|
+
*/
|
|
146
|
+
function handleAbortStream(data) {
|
|
147
|
+
const requestId = data.request_id;
|
|
148
|
+
log.warn(`Received abort_stream for ${requestId} - caller disconnected`);
|
|
149
|
+
cleanupStream(requestId);
|
|
150
|
+
}
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Initialize streaming response - stores stream state for pull-based chunk retrieval
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
import { activeStreams, addToTotalBuffer } from './state.js';
|
|
6
|
+
import { log } from '../log.js';
|
|
7
|
+
import { pauseStreamIfOverLimit } from './flowControl.js';
|
|
8
|
+
|
|
9
|
+
/**
|
|
10
|
+
* Initialize streaming response - stores stream state for pull-based chunk retrieval
|
|
11
|
+
* Server will call get_next_chunk to pull chunks
|
|
12
|
+
* @param {object} options
|
|
13
|
+
* @param {string} options.requestId - Request ID for tracking
|
|
14
|
+
* @param {number} options.statusCode - HTTP status code
|
|
15
|
+
* @param {object} options.headers - Response headers
|
|
16
|
+
* @param {ReadableStream} options.stream - Response stream from axios
|
|
17
|
+
* @param {function} options.callback - Socket.IO callback for initial response
|
|
18
|
+
*/
|
|
19
|
+
export function initStreamingResponse({ requestId, statusCode, headers, stream, callback }) {
|
|
20
|
+
log.info(`Initializing streaming response for request ${requestId}`);
|
|
21
|
+
|
|
22
|
+
// Store stream state for pull-based retrieval
|
|
23
|
+
const streamState = {
|
|
24
|
+
stream,
|
|
25
|
+
buffer: Buffer.alloc(0),
|
|
26
|
+
complete: false,
|
|
27
|
+
error: null,
|
|
28
|
+
totalBytesSent: 0,
|
|
29
|
+
chunkIndex: 0,
|
|
30
|
+
paused: false,
|
|
31
|
+
lastActivity: Date.now()
|
|
32
|
+
};
|
|
33
|
+
|
|
34
|
+
activeStreams.set(requestId, streamState);
|
|
35
|
+
|
|
36
|
+
// Buffer data as it arrives from the internal resource
|
|
37
|
+
// Use bounded buffering with global and per-stream limits to control memory usage
|
|
38
|
+
stream.on('data', (chunk) => {
|
|
39
|
+
// Yield to event loop periodically to allow ping/pong processing
|
|
40
|
+
// This prevents stream data from starving Socket.IO heartbeats
|
|
41
|
+
setImmediate(() => {
|
|
42
|
+
const state = activeStreams.get(requestId);
|
|
43
|
+
if (!state) {
|
|
44
|
+
// Stream was cleaned up, ignore late data
|
|
45
|
+
return;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
state.buffer = Buffer.concat([state.buffer, chunk]);
|
|
49
|
+
addToTotalBuffer(chunk.length);
|
|
50
|
+
|
|
51
|
+
// Pause stream if buffer limits exceeded (backpressure)
|
|
52
|
+
pauseStreamIfOverLimit(requestId, stream);
|
|
53
|
+
});
|
|
54
|
+
});
|
|
55
|
+
|
|
56
|
+
stream.on('end', () => {
|
|
57
|
+
const state = activeStreams.get(requestId);
|
|
58
|
+
if (state) {
|
|
59
|
+
state.complete = true;
|
|
60
|
+
log.info(`Stream ended for request ${requestId}, ${(state.buffer.length / (1024 * 1024)).toFixed(2)} MB remaining in buffer`);
|
|
61
|
+
}
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
stream.on('error', (error) => {
|
|
65
|
+
const state = activeStreams.get(requestId);
|
|
66
|
+
if (state) {
|
|
67
|
+
state.error = error.message;
|
|
68
|
+
state.complete = true;
|
|
69
|
+
log.error(`Stream error for request ${requestId}: ${error.message}`);
|
|
70
|
+
}
|
|
71
|
+
});
|
|
72
|
+
|
|
73
|
+
// Send initial response - server will start pulling chunks
|
|
74
|
+
callback({
|
|
75
|
+
request_id: requestId,
|
|
76
|
+
status_code: statusCode,
|
|
77
|
+
headers: headers,
|
|
78
|
+
body: null,
|
|
79
|
+
version: 2,
|
|
80
|
+
streaming: true
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
log.info(`Streaming initialized for request ${requestId}, server will pull chunks`);
|
|
84
|
+
}
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared state for streaming - buffer tracking and active streams
|
|
3
|
+
*/
|
|
4
|
+
|
|
5
|
+
// Chunk size for streaming responses (10MB before base64 encoding)
|
|
6
|
+
export const STREAM_CHUNK_SIZE = 10 * 1024 * 1024;
|
|
7
|
+
|
|
8
|
+
// Global maximum buffer size across ALL streams
|
|
9
|
+
export const GLOBAL_MAX_BUFFER_SIZE = STREAM_CHUNK_SIZE * 30; // 300MB
|
|
10
|
+
|
|
11
|
+
// Per-stream maximum buffer size
|
|
12
|
+
export const PER_STREAM_MAX_BUFFER_SIZE = STREAM_CHUNK_SIZE * 1.5; // 15MB
|
|
13
|
+
|
|
14
|
+
// Threshold for using streaming vs direct response
|
|
15
|
+
export const STREAMING_THRESHOLD = STREAM_CHUNK_SIZE;
|
|
16
|
+
|
|
17
|
+
// Active streams for pull-based streaming (server pulls chunks from client)
|
|
18
|
+
// Key: request_id, Value: { stream, buffer, complete, error, totalBytesSent, chunkIndex, paused, lastActivity }
|
|
19
|
+
export const activeStreams = new Map();
|
|
20
|
+
|
|
21
|
+
// Track total buffer size across all streams
|
|
22
|
+
let _totalBufferSize = 0;
|
|
23
|
+
|
|
24
|
+
/**
|
|
25
|
+
* Get the current total buffer size across all streams
|
|
26
|
+
* @returns {number} Total bytes buffered
|
|
27
|
+
*/
|
|
28
|
+
export function totalBufferSize() {
|
|
29
|
+
return _totalBufferSize;
|
|
30
|
+
}
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Set the total buffer size (used for reset)
|
|
34
|
+
* @param {number} value - New value
|
|
35
|
+
*/
|
|
36
|
+
export function setTotalBufferSize(value) {
|
|
37
|
+
_totalBufferSize = value;
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
/**
|
|
41
|
+
* Add to the total buffer size
|
|
42
|
+
* @param {number} bytes - Bytes to add
|
|
43
|
+
*/
|
|
44
|
+
export function addToTotalBuffer(bytes) {
|
|
45
|
+
_totalBufferSize += bytes;
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
/**
|
|
49
|
+
* Subtract from the total buffer size
|
|
50
|
+
* @param {number} bytes - Bytes to subtract
|
|
51
|
+
*/
|
|
52
|
+
export function subtractFromTotalBuffer(bytes) {
|
|
53
|
+
_totalBufferSize -= bytes;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
/**
|
|
57
|
+
* Recalculate total buffer size from all active streams
|
|
58
|
+
* Used to fix any drift in the counter
|
|
59
|
+
* @returns {number} Actual total bytes buffered
|
|
60
|
+
*/
|
|
61
|
+
export function recalculateTotalBufferSize() {
|
|
62
|
+
let actual = 0;
|
|
63
|
+
for (const [, state] of activeStreams.entries()) {
|
|
64
|
+
if (state.buffer) {
|
|
65
|
+
actual += state.buffer.length;
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
if (actual !== _totalBufferSize) {
|
|
70
|
+
_totalBufferSize = actual;
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
return actual;
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
/**
|
|
77
|
+
* Get count of active streams (for monitoring)
|
|
78
|
+
* @returns {number} Number of active streams
|
|
79
|
+
*/
|
|
80
|
+
export function getActiveStreamCount() {
|
|
81
|
+
return activeStreams.size;
|
|
82
|
+
}
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Streaming Handler - handles large file streaming with pull-based chunk retrieval
|
|
3
|
+
* Server pulls chunks via get_next_chunk calls
|
|
4
|
+
*
|
|
5
|
+
* Uses bounded buffering to limit memory usage - pauses stream when buffer is full,
|
|
6
|
+
* resumes when buffer is drained.
|
|
7
|
+
*
|
|
8
|
+
* This file re-exports from the streaming/ module for backwards compatibility.
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
export { STREAM_CHUNK_SIZE, STREAMING_THRESHOLD, totalBufferSize as getTotalBufferSize, getActiveStreamCount } from './streaming/state.js';
|
|
12
|
+
export { registerStreamingHandlers } from './streaming/handlers.js';
|
|
13
|
+
export { initStreamingResponse } from './streaming/initStreamingResponse.js';
|
|
14
|
+
export { startStaleStreamCleanup, cleanupStream, cleanupAllStreams } from './streaming/cleanup.js';
|
|
15
|
+
|
package/package.json
CHANGED
|
@@ -1,12 +1,14 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@aikidosec/broker-client",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.7",
|
|
4
4
|
"description": "Aikido Broker Client - Runs in customer network to forward requests to internal resources",
|
|
5
5
|
"main": "app/client.js",
|
|
6
6
|
"type": "module",
|
|
7
7
|
"scripts": {
|
|
8
8
|
"start": "node app/client.js",
|
|
9
|
-
"dev": "node --watch app/client.js"
|
|
9
|
+
"dev": "node --watch app/client.js",
|
|
10
|
+
"test": "node --experimental-vm-modules node_modules/jest/bin/jest.js",
|
|
11
|
+
"test:watch": "node --experimental-vm-modules node_modules/jest/bin/jest.js --watch"
|
|
10
12
|
},
|
|
11
13
|
"keywords": [
|
|
12
14
|
"broker",
|
|
@@ -38,9 +40,19 @@
|
|
|
38
40
|
"native-dns": "0.7.0",
|
|
39
41
|
"socket.io-client": "4.8.1"
|
|
40
42
|
},
|
|
43
|
+
"devDependencies": {
|
|
44
|
+
"jest": "30.2.0"
|
|
45
|
+
},
|
|
41
46
|
"files": [
|
|
42
47
|
"app/",
|
|
43
48
|
"README.md",
|
|
44
49
|
"LICENSE"
|
|
45
|
-
]
|
|
50
|
+
],
|
|
51
|
+
"jest": {
|
|
52
|
+
"testEnvironment": "node",
|
|
53
|
+
"testMatch": [
|
|
54
|
+
"**/tests/**/*.test.js"
|
|
55
|
+
],
|
|
56
|
+
"transform": {}
|
|
57
|
+
}
|
|
46
58
|
}
|