@aikidosec/broker-client 1.0.5 → 1.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -60,6 +60,7 @@ Resource IDs are displayed in the Aikido UI when you register them.
60
60
  - `HTTP_PROXY` - Proxy server for HTTP requests (e.g., `http://proxy.company.local:8080`)
61
61
  - `HTTPS_PROXY` - Proxy server for HTTPS requests (e.g., `http://proxy.company.local:8080`)
62
62
  - `ALL_PROXY` - Universal proxy fallback for all protocols if protocol-specific proxy is not set
63
+ - `BROKER_TARGET_URL` - Override the broker server URL (defaults to `https://broker.aikidobroker.com`)
63
64
 
64
65
  ## How It Works
65
66
 
package/app/client.js CHANGED
@@ -10,23 +10,19 @@ import axios from 'axios';
10
10
  import { URL } from 'url';
11
11
  import { Address4, Address6 } from 'ip-address';
12
12
  import dns from 'native-dns';
13
- import fs from 'fs';
14
13
  import { ResourceManager } from './resourceManager.js';
15
14
  import { HttpsProxyAgent } from 'https-proxy-agent';
16
-
17
- // Configure logging
18
- const log = {
19
- info: (msg) => console.log(`[INFO] ${new Date().toISOString()} - ${msg}`),
20
- warn: (msg) => console.warn(`[WARN] ${new Date().toISOString()} - ${msg}`),
21
- error: (msg) => console.error(`[ERROR] ${new Date().toISOString()} - ${msg}`),
22
- debug: (msg) => console.log(`[DEBUG] ${new Date().toISOString()} - ${msg}`)
23
- };
15
+ import { getClientId, setClientIdCache, getServerUrl, getClientSecret } from './config.js';
16
+ import { STREAMING_THRESHOLD } from './streaming/state.js';
17
+ import { registerStreamingHandlers } from './streaming/handlers.js';
18
+ import { initStreamingResponse } from './streaming/initStreamingResponse.js';
19
+ import { startStaleStreamCleanup, cleanupAllStreams } from './streaming/cleanup.js';
20
+ import { log } from './log.js';
24
21
 
25
22
  // Broker Server Configuration
26
- const SERVER_URL = "https://broker.aikidobroker.com";
23
+ const CLIENT_SECRET = getClientSecret();
24
+ const SERVER_URL = getServerUrl();
27
25
 
28
- // Client Configuration (from environment)
29
- const CLIENT_SECRET = process.env.CLIENT_SECRET;
30
26
  const ALLOWED_SUBNETS = process.env.ALLOWED_INTERNAL_SUBNETS
31
27
  ? process.env.ALLOWED_INTERNAL_SUBNETS.split(',').map(s => s.trim()).filter(s => s)
32
28
  : [];
@@ -39,7 +35,7 @@ const DNS_SERVERS = process.env.DNS_SERVERS
39
35
  // Configure axios defaults
40
36
  const MAX_RESPONSE_SIZE = 100 * 1024 * 1024; // 100 MB
41
37
  const axiosConfig = {
42
- timeout: 30000,
38
+ timeout: 300000,
43
39
  maxRedirects: 5,
44
40
  maxContentLength: MAX_RESPONSE_SIZE,
45
41
  maxBodyLength: MAX_RESPONSE_SIZE
@@ -51,31 +47,6 @@ const internalHttpClient = axios.create(axiosConfig);
51
47
  // Initialize ResourceManager
52
48
  const resourceManager = new ResourceManager();
53
49
 
54
- // Cache for client_id
55
- let _clientIdCache = null;
56
-
57
- /**
58
- * Get the client ID from cache or read from file.
59
- * Returns null if not registered yet.
60
- */
61
- function getClientId() {
62
- if (_clientIdCache !== null) {
63
- return _clientIdCache;
64
- }
65
-
66
- const clientIdPath = '/config/client_id';
67
- if (fs.existsSync(clientIdPath)) {
68
- try {
69
- _clientIdCache = fs.readFileSync(clientIdPath, 'utf8').trim();
70
- return _clientIdCache;
71
- } catch (e) {
72
- log.error(`Failed to read client_id: ${e.message}`);
73
- }
74
- }
75
-
76
- return null;
77
- }
78
-
79
50
  /**
80
51
  * Resolve hostname using custom DNS servers if configured.
81
52
  * Falls back to system DNS if DNS_SERVERS not set.
@@ -133,6 +104,32 @@ async function resolveInternalHostname(hostname) {
133
104
  });
134
105
  }
135
106
 
107
+ /**
108
+ * Get Content-Length via HEAD request to determine if streaming should be used.
109
+ * @param {string} url - URL to check
110
+ * @param {object} headers - Headers to send with the request
111
+ * @returns {Promise<number|null>} Content-Length in bytes, or null if unavailable
112
+ */
113
+ async function getContentLengthViaHead(url, headers) {
114
+ try {
115
+ const headResponse = await internalHttpClient.head(url, {
116
+ headers,
117
+ validateStatus: () => true,
118
+ timeout: 5000
119
+ });
120
+ // Headers can be any case (Content-Length, content-length, CONTENT-LENGTH)
121
+ const contentLengthHeader = Object.keys(headResponse.headers)
122
+ .find(key => key.toLowerCase() === 'content-length');
123
+ const contentLength = contentLengthHeader
124
+ ? parseInt(headResponse.headers[contentLengthHeader])
125
+ : NaN;
126
+ return isNaN(contentLength) ? null : contentLength;
127
+ } catch (e) {
128
+ log.warn(`HEAD request failed, will use buffered approach: ${e.message}`);
129
+ return null;
130
+ }
131
+ }
132
+
136
133
  /**
137
134
  * Check if URL points to internal resource
138
135
  */
@@ -236,7 +233,9 @@ const socket = io(SERVER_URL, {
236
233
  randomizationFactor: 0.5,
237
234
  tryAllTransports: true, // if we don't, it won't try to fallback from websocket to polling
238
235
  autoConnect: false, // Don't connect until after registration
239
- withCredentials: true // make sure cookies work for sticky sessions
236
+ withCredentials: true, // make sure cookies work for sticky sessions
237
+ // Increase timeouts for heavy streaming workloads
238
+ pingTimeout: 60000, // 60s (default 20s) - time to wait for pong before considering connection dead
240
239
  });
241
240
 
242
241
  // Socket.IO event handlers
@@ -256,8 +255,10 @@ socket.on('connect', async () => {
256
255
  }
257
256
  });
258
257
 
259
- socket.on('disconnect', () => {
260
- log.warn("Disconnected from broker server");
258
+ socket.on('disconnect', (reason) => {
259
+ log.warn(`Disconnected from broker server: ${reason}`);
260
+ // Clean up all active streams - they can't recover after reconnect
261
+ cleanupAllStreams();
261
262
  });
262
263
 
263
264
  socket.on('connect_error', (error) => {
@@ -284,6 +285,12 @@ socket.io.on('reconnect_failed', () => {
284
285
  log.error(`Socket.IO reconnection failed after all attempts`);
285
286
  });
286
287
 
288
+ // Register streaming handlers (handle next chunk and abort stream on client disconnect)
289
+ registerStreamingHandlers(socket);
290
+
291
+ // Start cleanup interval for stale streams
292
+ startStaleStreamCleanup();
293
+
287
294
  socket.on('forward_request', async (data, callback) => {
288
295
  /**
289
296
  * Receive request from broker server and forward to internal resource
@@ -335,29 +342,54 @@ socket.on('forward_request', async (data, callback) => {
335
342
  }
336
343
  }
337
344
 
338
- // Forward the request to the internal resource
345
+ // Check Content-Length first with HEAD request to decide streaming vs buffered
346
+ let useStreaming = false;
347
+ let contentLength = null;
348
+
349
+ if (method === 'GET') {
350
+ contentLength = await getContentLengthViaHead(resolvedUrl, headers);
351
+ if (contentLength !== null && contentLength > STREAMING_THRESHOLD) {
352
+ useStreaming = true;
353
+ log.info(`Large response detected (${(contentLength / (1024 * 1024)).toFixed(2)} MB) - using streaming`);
354
+ }
355
+ }
356
+
357
+ // Make the request with appropriate response type
339
358
  const response = await internalHttpClient.request({
340
359
  method,
341
360
  url: resolvedUrl,
342
361
  headers,
343
362
  data: body,
344
363
  validateStatus: () => true, // Accept any status code
345
- responseType: 'arraybuffer', // Get raw bytes, don't parse JSON
364
+ responseType: useStreaming ? 'stream' : 'arraybuffer',
346
365
  });
347
366
 
348
- log.info(`Successfully forwarded request ${requestId} to ${targetUrl}, status: ${response.status}`);
349
-
350
- // Return response via acknowledgement
351
- // Send body as base64 to preserve binary data byte-for-byte (critical for Docker registry digests)
352
- const responseBody = response.data ? Buffer.from(response.data).toString('base64') : null;
353
-
354
- callback({
355
- request_id: requestId,
356
- status_code: response.status,
357
- headers: response.headers,
358
- body: responseBody,
359
- version: 2
360
- });
367
+ if (useStreaming) {
368
+ // Initialize streaming - server will pull chunks via get_next_chunk
369
+ initStreamingResponse({
370
+ requestId,
371
+ statusCode: response.status,
372
+ headers: response.headers,
373
+ stream: response.data,
374
+ callback
375
+ });
376
+ } else {
377
+ const responseSizeBytes = response.data ? response.data.length : 0;
378
+ const responseSizeMB = (responseSizeBytes / (1024 * 1024)).toFixed(2);
379
+
380
+ log.info(`Successfully forwarded request ${requestId} to ${targetUrl}, status: ${response.status}, response size: ${responseSizeMB} MB`);
381
+
382
+ // Send direct response
383
+ sendDirectResponse(
384
+ requestId,
385
+ response.status,
386
+ response.headers,
387
+ response.data,
388
+ callback
389
+ );
390
+
391
+ log.info(`Response sent for request ${requestId}`);
392
+ }
361
393
 
362
394
  } catch (error) {
363
395
  log.error(`Error forwarding request ${requestId} to ${targetUrl}: ${error?.response?.status || error.message}`);
@@ -372,6 +404,25 @@ socket.on('forward_request', async (data, callback) => {
372
404
  }
373
405
  });
374
406
 
407
+ /**
408
+ * Send a direct response (for small files under streaming threshold)
409
+ * @param {string} requestId - Request ID for tracking
410
+ * @param {number} statusCode - HTTP status code
411
+ * @param {object} headers - Response headers
412
+ * @param {Buffer|null} responseData - Raw response data (before base64)
413
+ * @param {function} callback - Socket.IO callback
414
+ */
415
+ function sendDirectResponse(requestId, statusCode, headers, responseData, callback) {
416
+ const responseBody = responseData ? responseData.toString('base64') : null;
417
+ callback({
418
+ request_id: requestId,
419
+ status_code: statusCode,
420
+ headers: headers,
421
+ body: responseBody,
422
+ version: 2
423
+ });
424
+ }
425
+
375
426
  function formatMessageBody(message) {
376
427
  return Buffer.from(message, 'utf-8').toString('base64');
377
428
  }
@@ -402,14 +453,7 @@ async function registerWithServer() {
402
453
  log.info(`✓ Successfully registered with broker server as ${clientId}`);
403
454
 
404
455
  // Save client_id to file and cache
405
- _clientIdCache = clientId;
406
- try {
407
- fs.mkdirSync('/config', { recursive: true });
408
- fs.writeFileSync('/config/client_id', clientId);
409
- log.info("💾 Saved client_id to /config/client_id");
410
- } catch (e) {
411
- log.warn(`Could not save client_id file: ${e.message}`);
412
- }
456
+ setClientIdCache(clientId);
413
457
 
414
458
  log.info("Waiting for server to propagate configuration...");
415
459
  await new Promise(resolve => setTimeout(resolve, 5000));
@@ -417,17 +461,7 @@ async function registerWithServer() {
417
461
  } else if (response.status === 409) {
418
462
  log.info("✓ Client already registered with server (this is OK)");
419
463
  // Try to extract client_id from response if available
420
- try {
421
- const clientId = response.data.client_id;
422
- if (clientId) {
423
- _clientIdCache = clientId;
424
- fs.mkdirSync('/config', { recursive: true });
425
- fs.writeFileSync('/config/client_id', clientId);
426
- log.info("💾 Saved client_id to /config/client_id");
427
- }
428
- } catch (e) {
429
- log.warn(`Could not save client_id file: ${e.message}`);
430
- }
464
+ setClientIdCache(response.data.client_id);
431
465
  return;
432
466
  } else {
433
467
  log.warn(`Registration attempt ${attempt + 1} failed: ${response.status} - ${response.data}`);
package/app/config.js ADDED
@@ -0,0 +1,65 @@
1
+ import fs from 'fs';
2
+
3
+ // Client ID file path
4
+ const CLIENT_ID_PATH = '/config/client_id';
5
+
6
+ // Cache for client_id
7
+ let _clientIdCache = null;
8
+
9
+ // Client secret from environment
10
+ const CLIENT_SECRET = process.env.CLIENT_SECRET;
11
+
12
+ /**
13
+ * Get the server URL.
14
+ * Uses BROKER_TARGET_URL env var, defaults to https://broker.aikidobroker.com
15
+ */
16
+ export function getServerUrl() {
17
+ return process.env.BROKER_TARGET_URL || 'https://broker.aikidobroker.com';
18
+ }
19
+
20
+ /**
21
+ * Get the client secret from environment.
22
+ */
23
+ export function getClientSecret() {
24
+ return CLIENT_SECRET;
25
+ }
26
+
27
+ /**
28
+ * Get the client ID from cache or read from file.
29
+ * Returns null if not registered yet.
30
+ */
31
+ export function getClientId() {
32
+ if (_clientIdCache !== null) {
33
+ return _clientIdCache;
34
+ }
35
+
36
+ if (fs.existsSync(CLIENT_ID_PATH)) {
37
+ try {
38
+ _clientIdCache = fs.readFileSync(CLIENT_ID_PATH, 'utf8').trim();
39
+ return _clientIdCache;
40
+ } catch (e) {
41
+ console.error(`[ERROR] ${new Date().toISOString()} - Failed to read client_id: ${e.message}`);
42
+ }
43
+ }
44
+
45
+ return null;
46
+ }
47
+
48
+ /**
49
+ * Set the client ID cache and persist to file.
50
+ * Does nothing if clientId is null/undefined.
51
+ */
52
+ export function setClientIdCache(clientId) {
53
+ if (!clientId) {
54
+ return;
55
+ }
56
+ _clientIdCache = clientId;
57
+ try {
58
+ fs.mkdirSync('/config', { recursive: true });
59
+ fs.writeFileSync(CLIENT_ID_PATH, clientId);
60
+ console.log(`[INFO] ${new Date().toISOString()} - 💾 Saved client_id to ${CLIENT_ID_PATH}`);
61
+ } catch (e) {
62
+ console.error(`[ERROR] ${new Date().toISOString()} - Failed to write client_id: ${e.message}`);
63
+ }
64
+ }
65
+
package/app/log.js ADDED
@@ -0,0 +1,10 @@
1
+ /**
2
+ * Logging utility for streaming module
3
+ */
4
+
5
+ export const log = {
6
+ info: (msg) => console.log(`[INFO] ${new Date().toISOString()} - ${msg}`),
7
+ warn: (msg) => console.warn(`[WARN] ${new Date().toISOString()} - ${msg}`),
8
+ error: (msg) => console.error(`[ERROR] ${new Date().toISOString()} - ${msg}`),
9
+ debug: (msg) => console.log(`[DEBUG] ${new Date().toISOString()} - ${msg}`)
10
+ };
@@ -0,0 +1,76 @@
1
+ /**
2
+ * Stream cleanup utilities - manages cleanup of streaming state
3
+ */
4
+
5
+ import { activeStreams, recalculateTotalBufferSize } from './state.js';
6
+ import { log } from '../log.js';
7
+ import { resumePausedStreams } from './flowControl.js';
8
+
9
+ // Stale stream timeout (3 minutes)
10
+ const STALE_STREAM_TIMEOUT_MS = 3 * 60 * 1000;
11
+ const STALE_STREAM_INTERVAL_MS = 2 * 60 * 1000; // Check every 2 minutes
12
+
13
+ // Cleanup interval reference
14
+ let cleanupInterval = null;
15
+
16
+ /**
17
+ * Start the stale stream cleanup interval
18
+ * Runs every 2 minutes to check for streams inactive for 3+ minutes
19
+ */
20
+ export function startStaleStreamCleanup() {
21
+ if (cleanupInterval) return; // Already running
22
+
23
+ cleanupInterval = setInterval(() => {
24
+ const now = Date.now();
25
+
26
+ for (const [requestId, state] of activeStreams.entries()) {
27
+ const inactiveMs = now - state.lastActivity;
28
+ if (inactiveMs > STALE_STREAM_TIMEOUT_MS) {
29
+ try {
30
+ log.warn(`Cleaning up stale stream ${requestId} (inactive for ${(inactiveMs / 1000 / 60).toFixed(1)} minutes)`);
31
+ cleanupStream(requestId);
32
+ } catch (err) {
33
+ log.error(`Error cleaning up stale stream ${requestId}: ${err.message}`);
34
+ }
35
+ }
36
+ }
37
+
38
+ }, STALE_STREAM_INTERVAL_MS);
39
+
40
+ log.info('Started stale stream cleanup interval');
41
+ }
42
+
43
+ /**
44
+ * Clean up a streaming request (e.g., on error or disconnect)
45
+ * @param {string} requestId - Request ID to clean up
46
+ */
47
+ export function cleanupStream(requestId) {
48
+ const state = activeStreams.get(requestId);
49
+ if (state) {
50
+ // state.stream is the Node.js Readable stream from the axios HTTP response
51
+ // Destroying it releases the TCP connection and stops buffering data
52
+ state.stream.destroy();
53
+ activeStreams.delete(requestId);
54
+
55
+ // Recalculate total buffer size to fix any drift
56
+ recalculateTotalBufferSize();
57
+
58
+ // Resume any paused streams now that buffer space is freed
59
+ resumePausedStreams();
60
+ }
61
+ }
62
+
63
+ /**
64
+ * Clean up all active streams (e.g., on socket disconnect)
65
+ * Streams cannot recover after socket reconnect since server-side state is lost
66
+ */
67
+ export function cleanupAllStreams() {
68
+ const count = activeStreams.size;
69
+ if (count === 0) return;
70
+
71
+ for (const requestId of activeStreams.keys()) {
72
+ cleanupStream(requestId);
73
+ }
74
+
75
+ log.info(`Cleaned up all ${count} streams after disconnect`);
76
+ }
@@ -0,0 +1,39 @@
1
+ /**
2
+ * Stream flow control - manages pausing/resuming streams based on buffer limits
3
+ */
4
+
5
+ import { activeStreams, totalBufferSize, GLOBAL_MAX_BUFFER_SIZE, PER_STREAM_MAX_BUFFER_SIZE } from './state.js';
6
+ import { log } from '../log.js';
7
+
8
+ /**
9
+ * Pause a stream if global or per-stream buffer limits are exceeded
10
+ * @param {string} requestId - Request ID
11
+ * @param {Readable} stream - The readable stream to pause
12
+ */
13
+ export function pauseStreamIfOverLimit(requestId, stream) {
14
+ const state = activeStreams.get(requestId);
15
+ if (!state || state.paused) return;
16
+
17
+ const shouldPause = totalBufferSize() >= GLOBAL_MAX_BUFFER_SIZE || state.buffer.length >= PER_STREAM_MAX_BUFFER_SIZE;
18
+ if (shouldPause) {
19
+ state.paused = true;
20
+ stream.pause();
21
+ log.info(`Stream paused for ${requestId}, stream buffer: ${(state.buffer.length / (1024 * 1024)).toFixed(2)} MB, global: ${(totalBufferSize() / (1024 * 1024)).toFixed(2)} MB`);
22
+ }
23
+ }
24
+
25
+ /**
26
+ * Resume any paused streams if both global and per-stream buffers are below thresholds
27
+ * Called after cleanup frees up buffer space
28
+ */
29
+ export function resumePausedStreams() {
30
+ if (totalBufferSize() >= GLOBAL_MAX_BUFFER_SIZE) return;
31
+
32
+ for (const [requestId, state] of activeStreams.entries()) {
33
+ if (state.paused && !state.complete && state.buffer.length < PER_STREAM_MAX_BUFFER_SIZE) {
34
+ state.paused = false;
35
+ state.stream.resume();
36
+ log.info(`Stream resumed for ${requestId} after cleanup, stream buffer: ${(state.buffer.length / (1024 * 1024)).toFixed(2)} MB, global: ${(totalBufferSize() / (1024 * 1024)).toFixed(2)} MB`);
37
+ }
38
+ }
39
+ }
@@ -0,0 +1,150 @@
1
+ /**
2
+ * Socket.IO event handlers for streaming
3
+ */
4
+
5
+ import { activeStreams, totalBufferSize, subtractFromTotalBuffer, STREAM_CHUNK_SIZE, GLOBAL_MAX_BUFFER_SIZE, PER_STREAM_MAX_BUFFER_SIZE } from './state.js';
6
+ import { cleanupStream } from './cleanup.js';
7
+ import { log } from '../log.js';
8
+
9
+ /**
10
+ * Register streaming event handlers on a Socket.IO socket
11
+ * @param {Socket} socket - Socket.IO socket instance
12
+ */
13
+ export function registerStreamingHandlers(socket) {
14
+ socket.on('get_next_chunk', handleGetNextChunk);
15
+ socket.on('abort_stream', handleAbortStream);
16
+ }
17
+
18
+ /**
19
+ * Handle get_next_chunk request from server (pull-based streaming)
20
+ * Waits for data if buffer is empty but stream isn't complete
21
+ * @param {object} data - Request data containing request_id
22
+ * @param {function} callback - Socket.IO callback
23
+ */
24
+ export async function handleGetNextChunk(data, callback) {
25
+ const requestId = data.request_id;
26
+ const state = activeStreams.get(requestId);
27
+
28
+ if (!state) {
29
+ log.warn(`get_next_chunk: No active stream for request ${requestId}`);
30
+ callback({ error: 'no_stream', request_id: requestId });
31
+ return;
32
+ }
33
+
34
+ state.lastActivity = Date.now();
35
+
36
+ // Check for stream error
37
+ if (state.error) {
38
+ cleanupStreamWithError(requestId, state.error, callback);
39
+ return;
40
+ }
41
+
42
+ // Wait for data if buffer doesn't have enough for a full chunk
43
+ await waitForData(state);
44
+
45
+ // Check for error again after waiting
46
+ if (state.error) {
47
+ cleanupStreamWithError(requestId, state.error, callback);
48
+ return;
49
+ }
50
+
51
+ // Extract chunk from buffer
52
+ const { chunkData, isComplete } = extractChunkFromBuffer(state, requestId);
53
+
54
+ state.totalBytesSent += chunkData.length;
55
+ state.chunkIndex++;
56
+
57
+ log.info(`get_next_chunk ${state.chunkIndex} for ${requestId}: ${chunkData.length} bytes, complete=${isComplete}, total=${(state.totalBytesSent / (1024 * 1024)).toFixed(2)} MB`);
58
+
59
+ // Clean up if complete
60
+ if (isComplete) {
61
+ activeStreams.delete(requestId);
62
+ log.info(`Streaming complete for ${requestId}: ${state.chunkIndex} chunks, ${(state.totalBytesSent / (1024 * 1024)).toFixed(2)} MB total`);
63
+ }
64
+
65
+ callback({
66
+ request_id: requestId,
67
+ data: chunkData.toString('base64'),
68
+ complete: isComplete,
69
+ chunk_index: state.chunkIndex
70
+ });
71
+ }
72
+
73
+ /**
74
+ * Wait for buffer to have enough data or stream to complete
75
+ */
76
+ async function waitForData(state) {
77
+ const maxWaitMs = 60000;
78
+ const checkIntervalMs = 100;
79
+ let waited = 0;
80
+
81
+ const hasEnoughData = () => state.buffer.length >= STREAM_CHUNK_SIZE;
82
+ const isStreamDone = () => state.complete || state.error;
83
+ const hasTimedOut = () => waited >= maxWaitMs;
84
+
85
+ while (!hasEnoughData() && !isStreamDone() && !hasTimedOut()) {
86
+ await new Promise(resolve => setTimeout(resolve, checkIntervalMs));
87
+ waited += checkIntervalMs;
88
+ }
89
+ }
90
+
91
+ /**
92
+ * Extract a chunk from the buffer and manage buffer state
93
+ */
94
+ function extractChunkFromBuffer(state, requestId) {
95
+ let chunkData;
96
+ let isComplete = false;
97
+
98
+ if (state.buffer.length >= STREAM_CHUNK_SIZE) {
99
+ // Have enough data for a full chunk
100
+ chunkData = state.buffer.slice(0, STREAM_CHUNK_SIZE);
101
+ state.buffer = state.buffer.slice(STREAM_CHUNK_SIZE);
102
+ subtractFromTotalBuffer(chunkData.length);
103
+
104
+ // Resume stream if it was paused and both limits are now satisfied
105
+ resumeStreamIfBelowBufferLimits(state);
106
+ } else if (state.complete) {
107
+ // Stream is done, send remaining buffer
108
+ chunkData = state.buffer;
109
+ subtractFromTotalBuffer(chunkData.length);
110
+ state.buffer = Buffer.alloc(0);
111
+ isComplete = true;
112
+ } else {
113
+ // Timeout waiting for data - send what we have
114
+ log.warn(`get_next_chunk: Timeout waiting for data for ${requestId}, sending ${state.buffer.length} bytes`);
115
+ chunkData = state.buffer;
116
+ subtractFromTotalBuffer(chunkData.length);
117
+ state.buffer = Buffer.alloc(0);
118
+ }
119
+
120
+ return { chunkData, isComplete };
121
+ }
122
+
123
+ /**
124
+ * Resume a paused stream if buffer limits allow
125
+ */
126
+ function resumeStreamIfBelowBufferLimits(state) {
127
+ const canResume = totalBufferSize() < GLOBAL_MAX_BUFFER_SIZE && state.buffer.length < PER_STREAM_MAX_BUFFER_SIZE;
128
+ if (state.paused && canResume) {
129
+ state.paused = false;
130
+ state.stream.resume();
131
+ }
132
+ }
133
+
134
+ /**
135
+ * Clean up stream and send error callback
136
+ */
137
+ function cleanupStreamWithError(requestId, error, callback) {
138
+ activeStreams.delete(requestId);
139
+ callback({ error, request_id: requestId });
140
+ }
141
+
142
+ /**
143
+ * Handle abort_stream request from server (caller (aikido service) disconnected)
144
+ * @param {object} data - Request data containing request_id
145
+ */
146
+ function handleAbortStream(data) {
147
+ const requestId = data.request_id;
148
+ log.warn(`Received abort_stream for ${requestId} - caller disconnected`);
149
+ cleanupStream(requestId);
150
+ }
@@ -0,0 +1,84 @@
1
+ /**
2
+ * Initialize streaming response - stores stream state for pull-based chunk retrieval
3
+ */
4
+
5
+ import { activeStreams, addToTotalBuffer } from './state.js';
6
+ import { log } from '../log.js';
7
+ import { pauseStreamIfOverLimit } from './flowControl.js';
8
+
9
+ /**
10
+ * Initialize streaming response - stores stream state for pull-based chunk retrieval
11
+ * Server will call get_next_chunk to pull chunks
12
+ * @param {object} options
13
+ * @param {string} options.requestId - Request ID for tracking
14
+ * @param {number} options.statusCode - HTTP status code
15
+ * @param {object} options.headers - Response headers
16
+ * @param {ReadableStream} options.stream - Response stream from axios
17
+ * @param {function} options.callback - Socket.IO callback for initial response
18
+ */
19
+ export function initStreamingResponse({ requestId, statusCode, headers, stream, callback }) {
20
+ log.info(`Initializing streaming response for request ${requestId}`);
21
+
22
+ // Store stream state for pull-based retrieval
23
+ const streamState = {
24
+ stream,
25
+ buffer: Buffer.alloc(0),
26
+ complete: false,
27
+ error: null,
28
+ totalBytesSent: 0,
29
+ chunkIndex: 0,
30
+ paused: false,
31
+ lastActivity: Date.now()
32
+ };
33
+
34
+ activeStreams.set(requestId, streamState);
35
+
36
+ // Buffer data as it arrives from the internal resource
37
+ // Use bounded buffering with global and per-stream limits to control memory usage
38
+ stream.on('data', (chunk) => {
39
+ // Yield to event loop periodically to allow ping/pong processing
40
+ // This prevents stream data from starving Socket.IO heartbeats
41
+ setImmediate(() => {
42
+ const state = activeStreams.get(requestId);
43
+ if (!state) {
44
+ // Stream was cleaned up, ignore late data
45
+ return;
46
+ }
47
+
48
+ state.buffer = Buffer.concat([state.buffer, chunk]);
49
+ addToTotalBuffer(chunk.length);
50
+
51
+ // Pause stream if buffer limits exceeded (backpressure)
52
+ pauseStreamIfOverLimit(requestId, stream);
53
+ });
54
+ });
55
+
56
+ stream.on('end', () => {
57
+ const state = activeStreams.get(requestId);
58
+ if (state) {
59
+ state.complete = true;
60
+ log.info(`Stream ended for request ${requestId}, ${(state.buffer.length / (1024 * 1024)).toFixed(2)} MB remaining in buffer`);
61
+ }
62
+ });
63
+
64
+ stream.on('error', (error) => {
65
+ const state = activeStreams.get(requestId);
66
+ if (state) {
67
+ state.error = error.message;
68
+ state.complete = true;
69
+ log.error(`Stream error for request ${requestId}: ${error.message}`);
70
+ }
71
+ });
72
+
73
+ // Send initial response - server will start pulling chunks
74
+ callback({
75
+ request_id: requestId,
76
+ status_code: statusCode,
77
+ headers: headers,
78
+ body: null,
79
+ version: 2,
80
+ streaming: true
81
+ });
82
+
83
+ log.info(`Streaming initialized for request ${requestId}, server will pull chunks`);
84
+ }
@@ -0,0 +1,82 @@
1
+ /**
2
+ * Shared state for streaming - buffer tracking and active streams
3
+ */
4
+
5
+ // Chunk size for streaming responses (10MB before base64 encoding)
6
+ export const STREAM_CHUNK_SIZE = 10 * 1024 * 1024;
7
+
8
+ // Global maximum buffer size across ALL streams
9
+ export const GLOBAL_MAX_BUFFER_SIZE = STREAM_CHUNK_SIZE * 30; // 300MB
10
+
11
+ // Per-stream maximum buffer size
12
+ export const PER_STREAM_MAX_BUFFER_SIZE = STREAM_CHUNK_SIZE * 1.5; // 15MB
13
+
14
+ // Threshold for using streaming vs direct response
15
+ export const STREAMING_THRESHOLD = STREAM_CHUNK_SIZE;
16
+
17
+ // Active streams for pull-based streaming (server pulls chunks from client)
18
+ // Key: request_id, Value: { stream, buffer, complete, error, totalBytesSent, chunkIndex, paused, lastActivity }
19
+ export const activeStreams = new Map();
20
+
21
+ // Track total buffer size across all streams
22
+ let _totalBufferSize = 0;
23
+
24
+ /**
25
+ * Get the current total buffer size across all streams
26
+ * @returns {number} Total bytes buffered
27
+ */
28
+ export function totalBufferSize() {
29
+ return _totalBufferSize;
30
+ }
31
+
32
+ /**
33
+ * Set the total buffer size (used for reset)
34
+ * @param {number} value - New value
35
+ */
36
+ export function setTotalBufferSize(value) {
37
+ _totalBufferSize = value;
38
+ }
39
+
40
+ /**
41
+ * Add to the total buffer size
42
+ * @param {number} bytes - Bytes to add
43
+ */
44
+ export function addToTotalBuffer(bytes) {
45
+ _totalBufferSize += bytes;
46
+ }
47
+
48
+ /**
49
+ * Subtract from the total buffer size
50
+ * @param {number} bytes - Bytes to subtract
51
+ */
52
+ export function subtractFromTotalBuffer(bytes) {
53
+ _totalBufferSize -= bytes;
54
+ }
55
+
56
+ /**
57
+ * Recalculate total buffer size from all active streams
58
+ * Used to fix any drift in the counter
59
+ * @returns {number} Actual total bytes buffered
60
+ */
61
+ export function recalculateTotalBufferSize() {
62
+ let actual = 0;
63
+ for (const [, state] of activeStreams.entries()) {
64
+ if (state.buffer) {
65
+ actual += state.buffer.length;
66
+ }
67
+ }
68
+
69
+ if (actual !== _totalBufferSize) {
70
+ _totalBufferSize = actual;
71
+ }
72
+
73
+ return actual;
74
+ }
75
+
76
+ /**
77
+ * Get count of active streams (for monitoring)
78
+ * @returns {number} Number of active streams
79
+ */
80
+ export function getActiveStreamCount() {
81
+ return activeStreams.size;
82
+ }
@@ -0,0 +1,15 @@
1
+ /**
2
+ * Streaming Handler - handles large file streaming with pull-based chunk retrieval
3
+ * Server pulls chunks via get_next_chunk calls
4
+ *
5
+ * Uses bounded buffering to limit memory usage - pauses stream when buffer is full,
6
+ * resumes when buffer is drained.
7
+ *
8
+ * This file re-exports from the streaming/ module for backwards compatibility.
9
+ */
10
+
11
+ export { STREAM_CHUNK_SIZE, STREAMING_THRESHOLD, totalBufferSize as getTotalBufferSize, getActiveStreamCount } from './streaming/state.js';
12
+ export { registerStreamingHandlers } from './streaming/handlers.js';
13
+ export { initStreamingResponse } from './streaming/initStreamingResponse.js';
14
+ export { startStaleStreamCleanup, cleanupStream, cleanupAllStreams } from './streaming/cleanup.js';
15
+
package/package.json CHANGED
@@ -1,12 +1,14 @@
1
1
  {
2
2
  "name": "@aikidosec/broker-client",
3
- "version": "1.0.5",
3
+ "version": "1.0.7",
4
4
  "description": "Aikido Broker Client - Runs in customer network to forward requests to internal resources",
5
5
  "main": "app/client.js",
6
6
  "type": "module",
7
7
  "scripts": {
8
8
  "start": "node app/client.js",
9
- "dev": "node --watch app/client.js"
9
+ "dev": "node --watch app/client.js",
10
+ "test": "node --experimental-vm-modules node_modules/jest/bin/jest.js",
11
+ "test:watch": "node --experimental-vm-modules node_modules/jest/bin/jest.js --watch"
10
12
  },
11
13
  "keywords": [
12
14
  "broker",
@@ -38,9 +40,19 @@
38
40
  "native-dns": "0.7.0",
39
41
  "socket.io-client": "4.8.1"
40
42
  },
43
+ "devDependencies": {
44
+ "jest": "30.2.0"
45
+ },
41
46
  "files": [
42
47
  "app/",
43
48
  "README.md",
44
49
  "LICENSE"
45
- ]
50
+ ],
51
+ "jest": {
52
+ "testEnvironment": "node",
53
+ "testMatch": [
54
+ "**/tests/**/*.test.js"
55
+ ],
56
+ "transform": {}
57
+ }
46
58
  }