@aikidosec/broker-client 1.0.6 → 1.0.8

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -21,7 +21,7 @@ docker compose up -d
21
21
  ## Quick Start
22
22
 
23
23
  1. **Generate CLIENT_SECRET in Aikido UI**:
24
- - Navigate to: Settings → Broker Clients → Add New Client
24
+ - Navigate to: Settings → [Broker Clients](https://app.aikido.dev/settings/integrations/broker/clients) → Add New Client
25
25
  - Copy the generated `CLIENT_SECRET`
26
26
 
27
27
  2. **Configure environment** (`.env`):
package/app/client.js CHANGED
@@ -13,14 +13,10 @@ import dns from 'native-dns';
13
13
  import { ResourceManager } from './resourceManager.js';
14
14
  import { HttpsProxyAgent } from 'https-proxy-agent';
15
15
  import { getClientId, setClientIdCache, getServerUrl, getClientSecret } from './config.js';
16
-
17
- // Configure logging
18
- const log = {
19
- info: (msg) => console.log(`[INFO] ${new Date().toISOString()} - ${msg}`),
20
- warn: (msg) => console.warn(`[WARN] ${new Date().toISOString()} - ${msg}`),
21
- error: (msg) => console.error(`[ERROR] ${new Date().toISOString()} - ${msg}`),
22
- debug: (msg) => console.log(`[DEBUG] ${new Date().toISOString()} - ${msg}`)
23
- };
16
+ import { registerStreamingHandlers } from './streaming/handlers.js';
17
+ import { initStreamingResponse } from './streaming/initStreamingResponse.js';
18
+ import { startStaleStreamCleanup, cleanupAllStreams } from './streaming/cleanup.js';
19
+ import { log } from './log.js';
24
20
 
25
21
  // Broker Server Configuration
26
22
  const CLIENT_SECRET = getClientSecret();
@@ -36,7 +32,7 @@ const DNS_SERVERS = process.env.DNS_SERVERS
36
32
  : null;
37
33
 
38
34
  // Configure axios defaults
39
- const MAX_RESPONSE_SIZE = 100 * 1024 * 1024; // 100 MB
35
+ const MAX_RESPONSE_SIZE = 50 * 1024 * 1024; // 50 MB (falls back to streaming if exceeded)
40
36
  const axiosConfig = {
41
37
  timeout: 300000,
42
38
  maxRedirects: 5,
@@ -107,6 +103,48 @@ async function resolveInternalHostname(hostname) {
107
103
  });
108
104
  }
109
105
 
106
+ /**
107
+ * Make an internal HTTP request with automatic fallback to streaming if buffered response is too large.
108
+ * @param {object} options - Request options
109
+ * @param {string} options.method - HTTP method
110
+ * @param {string} options.url - Target URL
111
+ * @param {object} options.headers - Request headers
112
+ * @param {*} options.body - Request body
113
+ * @param {string} options.requestId - Request ID for logging
114
+ * @returns {Promise<{response: object, streaming: boolean}>} Response and whether streaming was used
115
+ */
116
+ async function forwardRequestToInternalResource ({ method, url, headers, body, requestId }) {
117
+ const makeRequest = async (streaming) => {
118
+ let requestData = {
119
+ method,
120
+ url,
121
+ headers,
122
+ data: body,
123
+ validateStatus: () => true, // Accept all status codes
124
+ responseType: streaming ? 'stream' : 'arraybuffer'
125
+ }
126
+ if (streaming) {
127
+ requestData.maxContentLength = Infinity;
128
+ }
129
+ // if streaming, this returns a stream instantly, it does not wait for the full response
130
+ return await internalHttpClient.request(requestData);
131
+ };
132
+
133
+ // Try buffered request first, fallback to streaming if response too large
134
+ try {
135
+ return { response: await makeRequest(false), useStreaming: false };
136
+ } catch (reqError) {
137
+ // Fallback to streaming if buffered request exceeded maxContentLength (axios ERR_BAD_RESPONSE)
138
+ if (axios.isAxiosError(reqError) && // axios does not have a specific error for maxcontent length, so we have to check message (this is about the MAX_RESPONSE_SIZE we set above)
139
+ reqError.code === 'ERR_BAD_RESPONSE' &&
140
+ reqError.message?.includes('maxContentLength')) {
141
+ log.warn(`Buffered request exceeded maxContentLength for ${requestId}, falling back to streaming`);
142
+ return { response: await makeRequest(true), useStreaming: true };
143
+ }
144
+ throw reqError;
145
+ }
146
+ }
147
+
110
148
  /**
111
149
  * Check if URL points to internal resource
112
150
  */
@@ -210,7 +248,9 @@ const socket = io(SERVER_URL, {
210
248
  randomizationFactor: 0.5,
211
249
  tryAllTransports: true, // if we don't, it won't try to fallback from websocket to polling
212
250
  autoConnect: false, // Don't connect until after registration
213
- withCredentials: true // make sure cookies work for sticky sessions
251
+ withCredentials: true, // make sure cookies work for sticky sessions
252
+ // Increase timeouts for heavy streaming workloads
253
+ pingTimeout: 60000, // 60s (default 20s) - time to wait for pong before considering connection dead
214
254
  });
215
255
 
216
256
  // Socket.IO event handlers
@@ -230,8 +270,10 @@ socket.on('connect', async () => {
230
270
  }
231
271
  });
232
272
 
233
- socket.on('disconnect', () => {
234
- log.warn("Disconnected from broker server");
273
+ socket.on('disconnect', (reason) => {
274
+ log.warn(`Disconnected from broker server: ${reason}`);
275
+ // Clean up all active streams - they can't recover after reconnect
276
+ cleanupAllStreams();
235
277
  });
236
278
 
237
279
  socket.on('connect_error', (error) => {
@@ -258,6 +300,12 @@ socket.io.on('reconnect_failed', () => {
258
300
  log.error(`Socket.IO reconnection failed after all attempts`);
259
301
  });
260
302
 
303
+ // Register streaming handlers (handle next chunk and abort stream on client disconnect)
304
+ registerStreamingHandlers(socket);
305
+
306
+ // Start cleanup interval for stale streams
307
+ startStaleStreamCleanup();
308
+
261
309
  socket.on('forward_request', async (data, callback) => {
262
310
  /**
263
311
  * Receive request from broker server and forward to internal resource
@@ -309,29 +357,41 @@ socket.on('forward_request', async (data, callback) => {
309
357
  }
310
358
  }
311
359
 
312
- // Forward the request to the internal resource
313
- const response = await internalHttpClient.request({
360
+ // Make the request (with automatic fallback to streaming if buffered response is too large)
361
+ const { response, useStreaming } = await forwardRequestToInternalResource ({
314
362
  method,
315
363
  url: resolvedUrl,
316
364
  headers,
317
- data: body,
318
- validateStatus: () => true, // Accept any status code
319
- responseType: 'arraybuffer', // Get raw bytes, don't parse JSON
365
+ body,
366
+ requestId
320
367
  });
321
368
 
322
- log.info(`Successfully forwarded request ${requestId} to ${targetUrl}, status: ${response.status}`);
323
-
324
- // Return response via acknowledgement
325
- // Send body as base64 to preserve binary data byte-for-byte (critical for Docker registry digests)
326
- const responseBody = response.data ? Buffer.from(response.data).toString('base64') : null;
327
-
328
- callback({
329
- request_id: requestId,
330
- status_code: response.status,
331
- headers: response.headers,
332
- body: responseBody,
333
- version: 2
334
- });
369
+ if (useStreaming) {
370
+ // Initialize streaming - server will pull chunks via get_next_chunk
371
+ initStreamingResponse({
372
+ requestId,
373
+ statusCode: response.status,
374
+ headers: response.headers,
375
+ stream: response.data,
376
+ callback
377
+ });
378
+ } else {
379
+ const responseSizeBytes = response.data ? response.data.length : 0;
380
+ const responseSizeMB = (responseSizeBytes / (1024 * 1024)).toFixed(2);
381
+
382
+ log.info(`Successfully forwarded request ${requestId} to ${targetUrl}, status: ${response.status}, response size: ${responseSizeMB} MB`);
383
+
384
+ // Send direct response
385
+ sendDirectResponse(
386
+ requestId,
387
+ response.status,
388
+ response.headers,
389
+ response.data,
390
+ callback
391
+ );
392
+
393
+ log.info(`Response sent for request ${requestId}`);
394
+ }
335
395
 
336
396
  } catch (error) {
337
397
  log.error(`Error forwarding request ${requestId} to ${targetUrl}: ${error?.response?.status || error.message}`);
@@ -346,6 +406,25 @@ socket.on('forward_request', async (data, callback) => {
346
406
  }
347
407
  });
348
408
 
409
+ /**
410
+ * Send a direct response (for small files under streaming threshold)
411
+ * @param {string} requestId - Request ID for tracking
412
+ * @param {number} statusCode - HTTP status code
413
+ * @param {object} headers - Response headers
414
+ * @param {Buffer|null} responseData - Raw response data (before base64)
415
+ * @param {function} callback - Socket.IO callback
416
+ */
417
+ function sendDirectResponse(requestId, statusCode, headers, responseData, callback) {
418
+ const responseBody = responseData ? responseData.toString('base64') : null;
419
+ callback({
420
+ request_id: requestId,
421
+ status_code: statusCode,
422
+ headers: headers,
423
+ body: responseBody,
424
+ version: 2
425
+ });
426
+ }
427
+
349
428
  function formatMessageBody(message) {
350
429
  return Buffer.from(message, 'utf-8').toString('base64');
351
430
  }
package/app/log.js ADDED
@@ -0,0 +1,10 @@
1
+ /**
2
+ * Logging utility for streaming module
3
+ */
4
+
5
+ export const log = {
6
+ info: (msg) => console.log(`[INFO] ${new Date().toISOString()} - ${msg}`),
7
+ warn: (msg) => console.warn(`[WARN] ${new Date().toISOString()} - ${msg}`),
8
+ error: (msg) => console.error(`[ERROR] ${new Date().toISOString()} - ${msg}`),
9
+ debug: (msg) => console.log(`[DEBUG] ${new Date().toISOString()} - ${msg}`)
10
+ };
@@ -0,0 +1,76 @@
1
+ /**
2
+ * Stream cleanup utilities - manages cleanup of streaming state
3
+ */
4
+
5
+ import { activeStreams, recalculateTotalBufferSize } from './state.js';
6
+ import { log } from '../log.js';
7
+ import { resumePausedStreams } from './flowControl.js';
8
+
9
+ // Stale stream timeout (3 minutes)
10
+ const STALE_STREAM_TIMEOUT_MS = 3 * 60 * 1000;
11
+ const STALE_STREAM_INTERVAL_MS = 2 * 60 * 1000; // Check every 2 minutes
12
+
13
+ // Cleanup interval reference
14
+ let cleanupInterval = null;
15
+
16
+ /**
17
+ * Start the stale stream cleanup interval
18
+ * Runs every 2 minutes to check for streams inactive for 3+ minutes
19
+ */
20
+ export function startStaleStreamCleanup() {
21
+ if (cleanupInterval) return; // Already running
22
+
23
+ cleanupInterval = setInterval(() => {
24
+ const now = Date.now();
25
+
26
+ for (const [requestId, state] of activeStreams.entries()) {
27
+ const inactiveMs = now - state.lastActivity;
28
+ if (inactiveMs > STALE_STREAM_TIMEOUT_MS) {
29
+ try {
30
+ log.warn(`Cleaning up stale stream ${requestId} (inactive for ${(inactiveMs / 1000 / 60).toFixed(1)} minutes)`);
31
+ cleanupStream(requestId);
32
+ } catch (err) {
33
+ log.error(`Error cleaning up stale stream ${requestId}: ${err.message}`);
34
+ }
35
+ }
36
+ }
37
+
38
+ }, STALE_STREAM_INTERVAL_MS);
39
+
40
+ log.info('Started stale stream cleanup interval');
41
+ }
42
+
43
+ /**
44
+ * Clean up a streaming request (e.g., on error or disconnect)
45
+ * @param {string} requestId - Request ID to clean up
46
+ */
47
+ export function cleanupStream(requestId) {
48
+ const state = activeStreams.get(requestId);
49
+ if (state) {
50
+ // state.stream is the Node.js Readable stream from the axios HTTP response
51
+ // Destroying it releases the TCP connection and stops buffering data
52
+ state.stream.destroy();
53
+ activeStreams.delete(requestId);
54
+
55
+ // Recalculate total buffer size to fix any drift
56
+ recalculateTotalBufferSize();
57
+
58
+ // Resume any paused streams now that buffer space is freed
59
+ resumePausedStreams();
60
+ }
61
+ }
62
+
63
+ /**
64
+ * Clean up all active streams (e.g., on socket disconnect)
65
+ * Streams cannot recover after socket reconnect since server-side state is lost
66
+ */
67
+ export function cleanupAllStreams() {
68
+ const count = activeStreams.size;
69
+ if (count === 0) return;
70
+
71
+ for (const requestId of activeStreams.keys()) {
72
+ cleanupStream(requestId);
73
+ }
74
+
75
+ log.info(`Cleaned up all ${count} streams after disconnect`);
76
+ }
@@ -0,0 +1,39 @@
1
+ /**
2
+ * Stream flow control - manages pausing/resuming streams based on buffer limits
3
+ */
4
+
5
+ import { activeStreams, totalBufferSize, GLOBAL_MAX_BUFFER_SIZE, PER_STREAM_MAX_BUFFER_SIZE } from './state.js';
6
+ import { log } from '../log.js';
7
+
8
+ /**
9
+ * Pause a stream if global or per-stream buffer limits are exceeded
10
+ * @param {string} requestId - Request ID
11
+ * @param {Readable} stream - The readable stream to pause
12
+ */
13
+ export function pauseStreamIfOverLimit(requestId, stream) {
14
+ const state = activeStreams.get(requestId);
15
+ if (!state || state.paused) return;
16
+
17
+ const shouldPause = totalBufferSize() >= GLOBAL_MAX_BUFFER_SIZE || state.buffer.length >= PER_STREAM_MAX_BUFFER_SIZE;
18
+ if (shouldPause) {
19
+ state.paused = true;
20
+ stream.pause();
21
+ log.info(`Stream paused for ${requestId}, stream buffer: ${(state.buffer.length / (1024 * 1024)).toFixed(2)} MB, global: ${(totalBufferSize() / (1024 * 1024)).toFixed(2)} MB`);
22
+ }
23
+ }
24
+
25
+ /**
26
+ * Resume any paused streams if both global and per-stream buffers are below thresholds
27
+ * Called after cleanup frees up buffer space
28
+ */
29
+ export function resumePausedStreams() {
30
+ if (totalBufferSize() >= GLOBAL_MAX_BUFFER_SIZE) return;
31
+
32
+ for (const [requestId, state] of activeStreams.entries()) {
33
+ if (state.paused && !state.complete && state.buffer.length < PER_STREAM_MAX_BUFFER_SIZE) {
34
+ state.paused = false;
35
+ state.stream.resume();
36
+ log.info(`Stream resumed for ${requestId} after cleanup, stream buffer: ${(state.buffer.length / (1024 * 1024)).toFixed(2)} MB, global: ${(totalBufferSize() / (1024 * 1024)).toFixed(2)} MB`);
37
+ }
38
+ }
39
+ }
@@ -0,0 +1,150 @@
1
+ /**
2
+ * Socket.IO event handlers for streaming
3
+ */
4
+
5
+ import { activeStreams, totalBufferSize, subtractFromTotalBuffer, STREAM_CHUNK_SIZE, GLOBAL_MAX_BUFFER_SIZE, PER_STREAM_MAX_BUFFER_SIZE } from './state.js';
6
+ import { cleanupStream } from './cleanup.js';
7
+ import { log } from '../log.js';
8
+
9
+ /**
10
+ * Register streaming event handlers on a Socket.IO socket
11
+ * @param {Socket} socket - Socket.IO socket instance
12
+ */
13
+ export function registerStreamingHandlers(socket) {
14
+ socket.on('get_next_chunk', handleGetNextChunk);
15
+ socket.on('abort_stream', handleAbortStream);
16
+ }
17
+
18
+ /**
19
+ * Handle get_next_chunk request from server (pull-based streaming)
20
+ * Waits for data if buffer is empty but stream isn't complete
21
+ * @param {object} data - Request data containing request_id
22
+ * @param {function} callback - Socket.IO callback
23
+ */
24
+ export async function handleGetNextChunk(data, callback) {
25
+ const requestId = data.request_id;
26
+ const state = activeStreams.get(requestId);
27
+
28
+ if (!state) {
29
+ log.warn(`get_next_chunk: No active stream for request ${requestId}`);
30
+ callback({ error: 'no_stream', request_id: requestId });
31
+ return;
32
+ }
33
+
34
+ state.lastActivity = Date.now();
35
+
36
+ // Check for stream error
37
+ if (state.error) {
38
+ cleanupStreamWithError(requestId, state.error, callback);
39
+ return;
40
+ }
41
+
42
+ // Wait for data if buffer doesn't have enough for a full chunk
43
+ await waitForData(state);
44
+
45
+ // Check for error again after waiting
46
+ if (state.error) {
47
+ cleanupStreamWithError(requestId, state.error, callback);
48
+ return;
49
+ }
50
+
51
+ // Extract chunk from buffer
52
+ const { chunkData, isComplete } = extractChunkFromBuffer(state, requestId);
53
+
54
+ state.totalBytesSent += chunkData.length;
55
+ state.chunkIndex++;
56
+
57
+ log.info(`get_next_chunk ${state.chunkIndex} for ${requestId}: ${chunkData.length} bytes, complete=${isComplete}, total=${(state.totalBytesSent / (1024 * 1024)).toFixed(2)} MB`);
58
+
59
+ // Clean up if complete
60
+ if (isComplete) {
61
+ activeStreams.delete(requestId);
62
+ log.info(`Streaming complete for ${requestId}: ${state.chunkIndex} chunks, ${(state.totalBytesSent / (1024 * 1024)).toFixed(2)} MB total`);
63
+ }
64
+
65
+ callback({
66
+ request_id: requestId,
67
+ data: chunkData.toString('base64'),
68
+ complete: isComplete,
69
+ chunk_index: state.chunkIndex
70
+ });
71
+ }
72
+
73
+ /**
74
+ * Wait for buffer to have enough data or stream to complete
75
+ */
76
+ async function waitForData(state) {
77
+ const maxWaitMs = 60000;
78
+ const checkIntervalMs = 100;
79
+ let waited = 0;
80
+
81
+ const hasEnoughData = () => state.buffer.length >= STREAM_CHUNK_SIZE;
82
+ const isStreamDone = () => state.complete || state.error;
83
+ const hasTimedOut = () => waited >= maxWaitMs;
84
+
85
+ while (!hasEnoughData() && !isStreamDone() && !hasTimedOut()) {
86
+ await new Promise(resolve => setTimeout(resolve, checkIntervalMs));
87
+ waited += checkIntervalMs;
88
+ }
89
+ }
90
+
91
+ /**
92
+ * Extract a chunk from the buffer and manage buffer state
93
+ */
94
+ function extractChunkFromBuffer(state, requestId) {
95
+ let chunkData;
96
+ let isComplete = false;
97
+
98
+ if (state.buffer.length >= STREAM_CHUNK_SIZE) {
99
+ // Have enough data for a full chunk
100
+ chunkData = state.buffer.slice(0, STREAM_CHUNK_SIZE);
101
+ state.buffer = state.buffer.slice(STREAM_CHUNK_SIZE);
102
+ subtractFromTotalBuffer(chunkData.length);
103
+
104
+ // Resume stream if it was paused and both limits are now satisfied
105
+ resumeStreamIfBelowBufferLimits(state);
106
+ } else if (state.complete) {
107
+ // Stream is done, send remaining buffer
108
+ chunkData = state.buffer;
109
+ subtractFromTotalBuffer(chunkData.length);
110
+ state.buffer = Buffer.alloc(0);
111
+ isComplete = true;
112
+ } else {
113
+ // Timeout waiting for data - send what we have
114
+ log.warn(`get_next_chunk: Timeout waiting for data for ${requestId}, sending ${state.buffer.length} bytes`);
115
+ chunkData = state.buffer;
116
+ subtractFromTotalBuffer(chunkData.length);
117
+ state.buffer = Buffer.alloc(0);
118
+ }
119
+
120
+ return { chunkData, isComplete };
121
+ }
122
+
123
+ /**
124
+ * Resume a paused stream if buffer limits allow
125
+ */
126
+ function resumeStreamIfBelowBufferLimits(state) {
127
+ const canResume = totalBufferSize() < GLOBAL_MAX_BUFFER_SIZE && state.buffer.length < PER_STREAM_MAX_BUFFER_SIZE;
128
+ if (state.paused && canResume) {
129
+ state.paused = false;
130
+ state.stream.resume();
131
+ }
132
+ }
133
+
134
+ /**
135
+ * Clean up stream and send error callback
136
+ */
137
+ function cleanupStreamWithError(requestId, error, callback) {
138
+ activeStreams.delete(requestId);
139
+ callback({ error, request_id: requestId });
140
+ }
141
+
142
+ /**
143
+ * Handle abort_stream request from server (caller (aikido service) disconnected)
144
+ * @param {object} data - Request data containing request_id
145
+ */
146
+ function handleAbortStream(data) {
147
+ const requestId = data.request_id;
148
+ log.warn(`Received abort_stream for ${requestId} - caller disconnected`);
149
+ cleanupStream(requestId);
150
+ }
@@ -0,0 +1,84 @@
1
+ /**
2
+ * Initialize streaming response - stores stream state for pull-based chunk retrieval
3
+ */
4
+
5
+ import { activeStreams, addToTotalBuffer } from './state.js';
6
+ import { log } from '../log.js';
7
+ import { pauseStreamIfOverLimit } from './flowControl.js';
8
+
9
+ /**
10
+ * Initialize streaming response - stores stream state for pull-based chunk retrieval
11
+ * Server will call get_next_chunk to pull chunks
12
+ * @param {object} options
13
+ * @param {string} options.requestId - Request ID for tracking
14
+ * @param {number} options.statusCode - HTTP status code
15
+ * @param {object} options.headers - Response headers
16
+ * @param {ReadableStream} options.stream - Response stream from axios
17
+ * @param {function} options.callback - Socket.IO callback for initial response
18
+ */
19
+ export function initStreamingResponse({ requestId, statusCode, headers, stream, callback }) {
20
+ log.info(`Initializing streaming response for request ${requestId}`);
21
+
22
+ // Store stream state for pull-based retrieval
23
+ const streamState = {
24
+ stream,
25
+ buffer: Buffer.alloc(0),
26
+ complete: false,
27
+ error: null,
28
+ totalBytesSent: 0,
29
+ chunkIndex: 0,
30
+ paused: false,
31
+ lastActivity: Date.now()
32
+ };
33
+
34
+ activeStreams.set(requestId, streamState);
35
+
36
+ // Buffer data as it arrives from the internal resource
37
+ // Use bounded buffering with global and per-stream limits to control memory usage
38
+ stream.on('data', (chunk) => {
39
+ // Yield to event loop periodically to allow ping/pong processing
40
+ // This prevents stream data from starving Socket.IO heartbeats
41
+ setImmediate(() => {
42
+ const state = activeStreams.get(requestId);
43
+ if (!state) {
44
+ // Stream was cleaned up, ignore late data
45
+ return;
46
+ }
47
+
48
+ state.buffer = Buffer.concat([state.buffer, chunk]);
49
+ addToTotalBuffer(chunk.length);
50
+
51
+ // Pause stream if buffer limits exceeded (backpressure)
52
+ pauseStreamIfOverLimit(requestId, stream);
53
+ });
54
+ });
55
+
56
+ stream.on('end', () => {
57
+ const state = activeStreams.get(requestId);
58
+ if (state) {
59
+ state.complete = true;
60
+ log.info(`Stream ended for request ${requestId}, ${(state.buffer.length / (1024 * 1024)).toFixed(2)} MB remaining in buffer`);
61
+ }
62
+ });
63
+
64
+ stream.on('error', (error) => {
65
+ const state = activeStreams.get(requestId);
66
+ if (state) {
67
+ state.error = error.message;
68
+ state.complete = true;
69
+ log.error(`Stream error for request ${requestId}: ${error.message}`);
70
+ }
71
+ });
72
+
73
+ // Send initial response - server will start pulling chunks
74
+ callback({
75
+ request_id: requestId,
76
+ status_code: statusCode,
77
+ headers: headers,
78
+ body: null,
79
+ version: 2,
80
+ streaming: true
81
+ });
82
+
83
+ log.info(`Streaming initialized for request ${requestId}, server will pull chunks`);
84
+ }
@@ -0,0 +1,82 @@
1
+ /**
2
+ * Shared state for streaming - buffer tracking and active streams
3
+ */
4
+
5
+ // Chunk size for streaming responses (10MB before base64 encoding)
6
+ export const STREAM_CHUNK_SIZE = 10 * 1024 * 1024;
7
+
8
+ // Global maximum buffer size across ALL streams
9
+ export const GLOBAL_MAX_BUFFER_SIZE = STREAM_CHUNK_SIZE * 30; // 300MB
10
+
11
+ // Per-stream maximum buffer size
12
+ export const PER_STREAM_MAX_BUFFER_SIZE = STREAM_CHUNK_SIZE * 1.5; // 15MB
13
+
14
+ // Threshold for using streaming vs direct response
15
+ export const STREAMING_THRESHOLD = STREAM_CHUNK_SIZE;
16
+
17
+ // Active streams for pull-based streaming (server pulls chunks from client)
18
+ // Key: request_id, Value: { stream, buffer, complete, error, totalBytesSent, chunkIndex, paused, lastActivity }
19
+ export const activeStreams = new Map();
20
+
21
+ // Track total buffer size across all streams
22
+ let _totalBufferSize = 0;
23
+
24
+ /**
25
+ * Get the current total buffer size across all streams
26
+ * @returns {number} Total bytes buffered
27
+ */
28
+ export function totalBufferSize() {
29
+ return _totalBufferSize;
30
+ }
31
+
32
+ /**
33
+ * Set the total buffer size (used for reset)
34
+ * @param {number} value - New value
35
+ */
36
+ export function setTotalBufferSize(value) {
37
+ _totalBufferSize = value;
38
+ }
39
+
40
+ /**
41
+ * Add to the total buffer size
42
+ * @param {number} bytes - Bytes to add
43
+ */
44
+ export function addToTotalBuffer(bytes) {
45
+ _totalBufferSize += bytes;
46
+ }
47
+
48
+ /**
49
+ * Subtract from the total buffer size
50
+ * @param {number} bytes - Bytes to subtract
51
+ */
52
+ export function subtractFromTotalBuffer(bytes) {
53
+ _totalBufferSize -= bytes;
54
+ }
55
+
56
+ /**
57
+ * Recalculate total buffer size from all active streams
58
+ * Used to fix any drift in the counter
59
+ * @returns {number} Actual total bytes buffered
60
+ */
61
+ export function recalculateTotalBufferSize() {
62
+ let actual = 0;
63
+ for (const [, state] of activeStreams.entries()) {
64
+ if (state.buffer) {
65
+ actual += state.buffer.length;
66
+ }
67
+ }
68
+
69
+ if (actual !== _totalBufferSize) {
70
+ _totalBufferSize = actual;
71
+ }
72
+
73
+ return actual;
74
+ }
75
+
76
+ /**
77
+ * Get count of active streams (for monitoring)
78
+ * @returns {number} Number of active streams
79
+ */
80
+ export function getActiveStreamCount() {
81
+ return activeStreams.size;
82
+ }
@@ -0,0 +1,15 @@
1
+ /**
2
+ * Streaming Handler - handles large file streaming with pull-based chunk retrieval
3
+ * Server pulls chunks via get_next_chunk calls
4
+ *
5
+ * Uses bounded buffering to limit memory usage - pauses stream when buffer is full,
6
+ * resumes when buffer is drained.
7
+ *
8
+ * This file re-exports from the streaming/ module for backwards compatibility.
9
+ */
10
+
11
+ export { STREAM_CHUNK_SIZE, STREAMING_THRESHOLD, totalBufferSize as getTotalBufferSize, getActiveStreamCount } from './streaming/state.js';
12
+ export { registerStreamingHandlers } from './streaming/handlers.js';
13
+ export { initStreamingResponse } from './streaming/initStreamingResponse.js';
14
+ export { startStaleStreamCleanup, cleanupStream, cleanupAllStreams } from './streaming/cleanup.js';
15
+
package/package.json CHANGED
@@ -1,12 +1,14 @@
1
1
  {
2
2
  "name": "@aikidosec/broker-client",
3
- "version": "1.0.6",
3
+ "version": "1.0.8",
4
4
  "description": "Aikido Broker Client - Runs in customer network to forward requests to internal resources",
5
5
  "main": "app/client.js",
6
6
  "type": "module",
7
7
  "scripts": {
8
8
  "start": "node app/client.js",
9
- "dev": "node --watch app/client.js"
9
+ "dev": "node --watch app/client.js",
10
+ "test": "node --experimental-vm-modules node_modules/jest/bin/jest.js",
11
+ "test:watch": "node --experimental-vm-modules node_modules/jest/bin/jest.js --watch"
10
12
  },
11
13
  "keywords": [
12
14
  "broker",
@@ -38,9 +40,19 @@
38
40
  "native-dns": "0.7.0",
39
41
  "socket.io-client": "4.8.1"
40
42
  },
43
+ "devDependencies": {
44
+ "jest": "30.2.0"
45
+ },
41
46
  "files": [
42
47
  "app/",
43
48
  "README.md",
44
49
  "LICENSE"
45
- ]
50
+ ],
51
+ "jest": {
52
+ "testEnvironment": "node",
53
+ "testMatch": [
54
+ "**/tests/**/*.test.js"
55
+ ],
56
+ "transform": {}
57
+ }
46
58
  }