@remotelinker/reverse-ws-tunnel 1.0.9 → 1.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -30,7 +30,46 @@ Reverse WebSocket Tunnel is a library that enables you to expose local services
30
30
 
31
31
  ---
32
32
 
33
- ## ✨ v1.0.9 - What's New
33
+ ## ✨ v1.0.11 - What's New
34
+
35
+ ### ✨ New Features
36
+ - **stopWebSocketServer(port)**: Added new function to properly stop and cleanup the WebSocket server
37
+ - Closes all active WebSocket connections (triggering cleanup of heartbeat intervals)
38
+ - Closes all TCP servers registered in state
39
+ - Cleans up state for the specified port
40
+ - Gracefully handles already-stopped servers (no errors)
41
+
42
+ ### 🐛 Bug Fixes
43
+ - **Heartbeat cleanup**: Fixed issue where setInterval for heartbeat was not properly cleaned up when server stopped
44
+ - **Node-RED integration**: Added cleanup on startup to handle cases where previous deployment didn't cleanup properly
45
+ - **TCP server connection hang**: Fixed critical issue where TCP connections would hang indefinitely
46
+ - Removed `pauseOnConnect: true` option from TCP server configuration
47
+ - This option was added in error - it pauses sockets on connect and requires manual `socket.resume()`
48
+ - The fix restores proper connection flow while keeping `reuseAddr: true` for port reuse on restart
49
+ - **TCP server port reuse**: Fixed "EADDRINUSE" error when client reconnects after Node-RED restart
50
+ - Now checks if TCP server is actually listening (`server.listening`) before skipping creation
51
+ - Previously only checked if state entry existed, not if server was active
52
+ - **TCP server global registry**: Added global tcpServers registry to track TCP servers even when not in state
53
+ - When stopWebSocketServer is called, now closes ALL TCP servers in global registry
54
+ - When creating new TCP server, checks global registry and closes stale servers before creating new one
55
+
56
+ ### 🔧 Improvements
57
+ - **Graceful shutdown**: Server now properly releases all resources (ports, memory, intervals) on shutdown
58
+ - **State management**: Improved state cleanup to prevent stale entries after server restart
59
+
60
+ ---
61
+
62
+ ## ✨ v1.0.10 - Previous Release
63
+
64
+ ### 🔧 Code Quality & Developer Experience
65
+ - **Code Cleanup**: Removed unused constants and redundant variables
66
+ - **Input Validation**: Added tunnelId validation for incoming messages
67
+ - **Code Formatting**: Added Prettier configuration for consistent code style
68
+ - **Test Suite**: Reorganized tests, removed obsolete files, added new test coverage
69
+
70
+ ---
71
+
72
+ ## ✨ v1.0.9 - Previous Release
34
73
 
35
74
  ### 🐛 Bug Fixes
36
75
  - **Message Format Standardization**: Fixed inconsistent message formats between server components
@@ -332,6 +371,29 @@ Set the log level via:
332
371
  - Environment variable: `LOG_LEVEL=debug`
333
372
  - TOML config: `logLevel = "debug"`
334
373
 
374
+ ### Logger API
375
+
376
+ The library exports logger functions for advanced control:
377
+
378
+ ```javascript
379
+ const { setLogLevel, getLogLevel, setLogContext, getLogContext, logger } = require('@remotelinker/reverse-ws-tunnel/utils');
380
+
381
+ // Set log level programmatically
382
+ setLogLevel('debug');
383
+
384
+ // Get current log level
385
+ const currentLevel = getLogLevel();
386
+
387
+ // Set context for all log messages (useful for Node-RED)
388
+ setLogContext({ nodeId: 'my-node', session: 'abc123' });
389
+
390
+ // Get current context
391
+ const context = getLogContext();
392
+
393
+ // Use logger directly
394
+ logger.info('Custom log message', { custom: 'data' });
395
+ ```
396
+
335
397
  ---
336
398
 
337
399
  ## 🔧 Advanced Usage
package/client/index.js CHANGED
@@ -2,7 +2,17 @@ const { startHttpProxyServer } = require('./proxyServer');
2
2
  const { connectWebSocket } = require('./tunnelClient');
3
3
  const { setLogContext } = require('../utils/logger');
4
4
 
5
- function startClient({ targetUrl, allowInsicureCerts, wsUrl, tunnelId, tunnelEntryUrl, tunnelEntryPort, headers, environment, autoReconnect }) {
5
+ function startClient({
6
+ targetUrl,
7
+ allowInsicureCerts,
8
+ wsUrl,
9
+ tunnelId,
10
+ tunnelEntryUrl,
11
+ tunnelEntryPort,
12
+ headers,
13
+ environment,
14
+ autoReconnect,
15
+ }) {
6
16
  setLogContext('CLIENT');
7
17
  environment = environment || 'production';
8
18
  const proxy = startHttpProxyServer(targetUrl, allowInsicureCerts);
package/client/index.mjs CHANGED
@@ -1,2 +1,2 @@
1
1
  import cjsModule from './index.js';
2
- export const { startClient } = cjsModule;
2
+ export const { startClient } = cjsModule;
@@ -24,23 +24,34 @@ function startHttpProxyServer(targetUrl, allowInsecureCerts = false) {
24
24
  return res.end('Missing TARGET_URL');
25
25
  }
26
26
 
27
- proxy.web(req, res, { target: targetUrl, changeOrigin: true, secure: !allowInsecureCerts }, (err) => {
28
- logger.error('Proxy web error:', err);
29
- if (!res.headersSent) {
30
- res.writeHead(502);
31
- res.end('Bad gateway');
32
- } else {
33
- res.end();
27
+ proxy.web(
28
+ req,
29
+ res,
30
+ { target: targetUrl, changeOrigin: true, secure: !allowInsecureCerts },
31
+ err => {
32
+ logger.error('Proxy web error:', err);
33
+ if (!res.headersSent) {
34
+ res.writeHead(502);
35
+ res.end('Bad gateway');
36
+ } else {
37
+ res.end();
38
+ }
34
39
  }
35
- });
40
+ );
36
41
  });
37
42
 
38
43
  server.on('upgrade', (req, socket, head) => {
39
44
  logger.trace(`Incoming WebSocket upgrade: ${req.url}`);
40
- proxy.ws(req, socket, head, { target: targetUrl, changeOrigin: false, secure: !allowInsecureCerts }, (err) => {
41
- logger.error('Proxy WS upgrade error:', err);
42
- socket.end();
43
- });
45
+ proxy.ws(
46
+ req,
47
+ socket,
48
+ head,
49
+ { target: targetUrl, changeOrigin: false, secure: !allowInsecureCerts },
50
+ err => {
51
+ logger.error('Proxy WS upgrade error:', err);
52
+ socket.end();
53
+ }
54
+ );
44
55
  });
45
56
 
46
57
  proxy.on('error', (err, req, res) => {
@@ -6,7 +6,6 @@ const { buildMessageBuffer } = require('./utils');
6
6
  const { logger } = require('../utils/logger');
7
7
  const packageJson = require('../package.json');
8
8
 
9
- const RECONNECT_INTERVAL = 5000;
10
9
  const MESSAGE_TYPE_CONFIG = 0x01;
11
10
  const MESSAGE_TYPE_DATA = 0x02;
12
11
  const MESSAGE_TYPE_APP_PING = 0x03;
@@ -23,7 +22,17 @@ const RECONNECT_BACKOFF = [1000, 2000, 5000, 10000, 30000]; // Backoff progressi
23
22
  * @param {Object} config - Configuration for tunnel.
24
23
  */
25
24
  function connectWebSocket(config) {
26
- const { wsUrl, tunnelId, targetUrl, targetPort, tunnelEntryUrl, tunnelEntryPort, headers, environment, autoReconnect = true } = config;
25
+ const {
26
+ wsUrl,
27
+ tunnelId,
28
+ targetUrl,
29
+ targetPort,
30
+ tunnelEntryUrl,
31
+ tunnelEntryPort,
32
+ headers,
33
+ environment,
34
+ autoReconnect = true,
35
+ } = config;
27
36
 
28
37
  const eventEmitter = new EventEmitter();
29
38
  let ws;
@@ -32,8 +41,6 @@ function connectWebSocket(config) {
32
41
  let healthMonitor;
33
42
  let isClosed = false;
34
43
  let reconnectAttempt = 0;
35
- let pingSeq = 0;
36
- let lastPongTs = Date.now();
37
44
 
38
45
  if (!tunnelId) {
39
46
  throw new Error(`Missing mandatory tunnelId`);
@@ -43,25 +50,59 @@ function connectWebSocket(config) {
43
50
  if (isClosed) return;
44
51
 
45
52
  try {
46
- const headersParsed = headers || '{}';
53
+ // Parse headers - handle both string and object formats
54
+ let headersParsed = {};
55
+ if (headers) {
56
+ if (typeof headers === 'string') {
57
+ try {
58
+ headersParsed = JSON.parse(headers);
59
+ } catch (e) {
60
+ logger.warn(`Failed to parse headers string: ${headers}`);
61
+ }
62
+ } else if (typeof headers === 'object') {
63
+ headersParsed = headers;
64
+ }
65
+ }
47
66
  logger.debug(`Parsed headers: ${JSON.stringify(headersParsed)}`);
48
67
  logger.debug(`Try to connect to: ${wsUrl}`);
49
68
  ws = new WebSocket(wsUrl, { headers: headersParsed });
50
69
  logger.debug(`Connection: ${wsUrl}`);
51
70
  } catch (error) {
52
- logger.error('Malformed headers:', error);
71
+ logger.error('Failed to create WebSocket connection:', error);
53
72
  return;
54
73
  }
55
74
 
75
+ // PingState condiviso tra heartbeat e message handler
76
+ // Reset completo dello stato per ogni connessione
77
+ const pingState = {
78
+ pingSeq: 0,
79
+ lastPongTs: Date.now(),
80
+ };
81
+ const pingStateCallbacks = {
82
+ pingSeq: () => pingState.pingSeq,
83
+ incPingSeq: () => pingState.pingSeq++,
84
+ lastPongTs: () => pingState.lastPongTs,
85
+ setLastPongTs: ts => (pingState.lastPongTs = ts),
86
+ };
87
+
56
88
  ws.on('open', () => {
57
89
  logger.info(`Connected to WebSocket server ${wsUrl}`);
58
- logger.warn(`WS tunnel config sent: TARGET_PORT=${targetPort}, ENTRY_PORT=${tunnelEntryPort}`);
90
+ logger.warn(
91
+ `WS tunnel config sent: TARGET_PORT=${targetPort}, ENTRY_PORT=${tunnelEntryPort}`
92
+ );
93
+
94
+ // Reset reconnect attempt on successful connection
95
+ reconnectAttempt = 0;
96
+
59
97
  eventEmitter.emit('connected');
60
98
  ({ pingInterval } = heartBeat(ws));
61
99
 
62
100
  // Avviare heartbeat applicativo
63
- appPingInterval = startAppHeartbeat(ws, tunnelId, { pingSeq: () => pingSeq, incPingSeq: () => pingSeq++ });
64
- healthMonitor = startHealthMonitor(ws, tunnelId, { lastPongTs: () => lastPongTs, setLastPongTs: (ts) => lastPongTs = ts });
101
+ appPingInterval = startAppHeartbeat(ws, tunnelId, pingStateCallbacks);
102
+ healthMonitor = startHealthMonitor(ws, tunnelId, {
103
+ lastPongTs: () => pingState.lastPongTs,
104
+ setLastPongTs: ts => (pingState.lastPongTs = ts),
105
+ });
65
106
 
66
107
  const uuid = uuidv4();
67
108
  const payload = {
@@ -73,33 +114,50 @@ function connectWebSocket(config) {
73
114
  agentVersion: packageJson.version,
74
115
  };
75
116
 
76
- const message = buildMessageBuffer(tunnelId, uuid, MESSAGE_TYPE_CONFIG, JSON.stringify(payload));
117
+ const message = buildMessageBuffer(
118
+ tunnelId,
119
+ uuid,
120
+ MESSAGE_TYPE_CONFIG,
121
+ JSON.stringify(payload)
122
+ );
77
123
  logger.debug(`Sending tunnel config [uuid=${uuid}]`);
78
124
  ws.send(message);
79
125
  });
80
126
 
81
127
  let messageBuffer = Buffer.alloc(0);
82
-
83
- ws.on('message', (data) => {
128
+
129
+ ws.on('message', data => {
84
130
  logger.trace(`Received message chunk: ${data.length} bytes`);
85
131
  messageBuffer = Buffer.concat([messageBuffer, data]);
86
132
 
87
133
  while (messageBuffer.length >= 4) {
88
134
  const length = messageBuffer.readUInt32BE(0);
89
135
  if (messageBuffer.length < 4 + length) {
90
- logger.trace(`Waiting for more data: need ${4 + length} bytes, have ${messageBuffer.length}`);
136
+ logger.trace(
137
+ `Waiting for more data: need ${4 + length} bytes, have ${messageBuffer.length}`
138
+ );
91
139
  break;
92
140
  }
93
141
 
94
142
  const message = messageBuffer.slice(4, 4 + length);
95
143
  messageBuffer = messageBuffer.slice(4 + length);
96
144
 
97
- const messageTunnelId = message.slice(0, 36).toString();
145
+ const messageTunnelId = message.slice(0, 36).toString().trim();
98
146
  const uuid = message.slice(36, 72).toString();
99
147
  const type = message.readUInt8(72);
100
148
  const payload = message.slice(73);
101
149
 
102
- logger.trace(`Received WS message for uuid=${uuid}, type=${type}, length=${payload.length}`);
150
+ // Validate tunnelId matches expected tunnel
151
+ if (messageTunnelId !== tunnelId) {
152
+ logger.warn(
153
+ `Received message for wrong tunnel: ${messageTunnelId} (expected: ${tunnelId})`
154
+ );
155
+ return;
156
+ }
157
+
158
+ logger.trace(
159
+ `Received WS message for uuid=${uuid}, type=${type}, length=${payload.length}`
160
+ );
103
161
 
104
162
  if (type === MESSAGE_TYPE_DATA) {
105
163
  if (payload.toString() === 'CLOSE') {
@@ -110,7 +168,8 @@ function connectWebSocket(config) {
110
168
  return;
111
169
  }
112
170
 
113
- const client = clients[uuid] || createTcpClient(targetUrl, targetPort, ws, tunnelId, uuid);
171
+ const client =
172
+ clients[uuid] || createTcpClient(targetUrl, targetPort, ws, tunnelId, uuid);
114
173
 
115
174
  if (!client.write(payload)) {
116
175
  logger.debug(`Backpressure on TCP socket for uuid=${uuid}`);
@@ -119,13 +178,13 @@ function connectWebSocket(config) {
119
178
  });
120
179
  }
121
180
  return;
122
-
123
181
  } else if (type === MESSAGE_TYPE_APP_PONG) {
124
182
  try {
125
183
  const pongData = JSON.parse(payload.toString());
126
184
  // Accetta solo pong con seq >= pingSeq - 10 (finestra di 10 ping)
127
- if (pongData.seq >= (pingSeq - 10)) {
128
- lastPongTs = Date.now();
185
+ if (pongData.seq >= pingStateCallbacks.pingSeq() - 10) {
186
+ // Aggiorna lastPongTs usando il callback
187
+ pingStateCallbacks.setLastPongTs(Date.now());
129
188
  logger.trace(`App pong received: seq=${pongData.seq}`);
130
189
  } else {
131
190
  logger.debug(`Ignoring old pong: seq=${pongData.seq}`);
@@ -152,8 +211,12 @@ function connectWebSocket(config) {
152
211
  delete clients[uuid];
153
212
  }
154
213
 
214
+ // Reset message buffer on close for proper reconnection
215
+ messageBuffer = Buffer.alloc(0);
216
+
155
217
  if (!isClosed && autoReconnect) {
156
- const delay = RECONNECT_BACKOFF[reconnectAttempt] || RECONNECT_BACKOFF[RECONNECT_BACKOFF.length - 1];
218
+ const delay =
219
+ RECONNECT_BACKOFF[reconnectAttempt] || RECONNECT_BACKOFF[RECONNECT_BACKOFF.length - 1];
157
220
  logger.info(`Reconnecting in ${delay / 1000}s (attempt ${reconnectAttempt + 1})`);
158
221
  setTimeout(() => {
159
222
  reconnectAttempt = Math.min(reconnectAttempt + 1, RECONNECT_BACKOFF.length);
@@ -162,7 +225,7 @@ function connectWebSocket(config) {
162
225
  }
163
226
  });
164
227
 
165
- ws.on('error', (err) => {
228
+ ws.on('error', err => {
166
229
  logger.error('WebSocket error:', err);
167
230
  });
168
231
  };
@@ -217,13 +280,13 @@ function createTcpClient(targetUrl, targetPort, ws, tunnelId, uuid) {
217
280
  logger.info(`TCP connection established for uuid=${uuid}`);
218
281
  });
219
282
 
220
- client.on('data', (data) => {
283
+ client.on('data', data => {
221
284
  logger.trace(`TCP data received for uuid=${uuid}, length=${data.length}`);
222
285
  const message = buildMessageBuffer(tunnelId, uuid, MESSAGE_TYPE_DATA, data);
223
286
  ws.send(message);
224
287
  });
225
288
 
226
- client.on('error', (err) => {
289
+ client.on('error', err => {
227
290
  logger.error(`TCP error for uuid=${uuid}:`, err);
228
291
  client.destroy();
229
292
  delete clients[uuid];
@@ -248,7 +311,7 @@ function startAppHeartbeat(ws, tunnelId, pingState) {
248
311
  const pingData = JSON.stringify({
249
312
  type: 'ping',
250
313
  seq: currentPingSeq,
251
- ts: Date.now()
314
+ ts: Date.now(),
252
315
  });
253
316
 
254
317
  const message = buildMessageBuffer(tunnelId, uuidv4(), MESSAGE_TYPE_APP_PING, pingData);
@@ -273,7 +336,8 @@ function startHealthMonitor(ws, tunnelId, pongState) {
273
336
  }, 5000); // Check every 5 seconds
274
337
  }
275
338
 
276
- function resetClients() { // for testing
339
+ function resetClients() {
340
+ // for testing
277
341
  for (const key in clients) {
278
342
  delete clients[key];
279
343
  }
@@ -282,4 +346,4 @@ function resetClients() { // for testing
282
346
  module.exports = {
283
347
  connectWebSocket,
284
348
  resetClients, // for testing
285
- };
349
+ };
package/client/utils.js CHANGED
@@ -12,7 +12,8 @@ function buildMessageBuffer(tunnelId, uuid, type, payload) {
12
12
  const typeBuffer = Buffer.from([type]);
13
13
  const payloadBuffer = Buffer.isBuffer(payload) ? payload : Buffer.from(payload, 'utf8');
14
14
 
15
- const totalLength = tunnelBuffer.length + uuidBuffer.length + typeBuffer.length + payloadBuffer.length;
15
+ const totalLength =
16
+ tunnelBuffer.length + uuidBuffer.length + typeBuffer.length + payloadBuffer.length;
16
17
  const lengthBuffer = Buffer.alloc(4);
17
18
  lengthBuffer.writeUInt32BE(totalLength);
18
19
 
package/index.cjs CHANGED
@@ -5,5 +5,5 @@ const utils = require('./utils');
5
5
  module.exports = {
6
6
  ...server,
7
7
  ...client,
8
- ...utils
9
- };
8
+ ...utils,
9
+ };
package/index.mjs CHANGED
@@ -1,3 +1,3 @@
1
1
  export * from './server/index.mjs';
2
2
  export * from './client/index.mjs';
3
- export * from './utils/index.mjs';
3
+ export * from './utils/index.mjs';
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@remotelinker/reverse-ws-tunnel",
3
- "version": "1.0.9",
3
+ "version": "1.0.11",
4
4
  "description": "A Node.js library for creating secure reverse tunnels over WebSocket connections",
5
5
  "main": "index.cjs",
6
6
  "types": "types/index.d.ts",
@@ -41,7 +41,9 @@
41
41
  "example:server:esm": "node examples/server/server-example.mjs",
42
42
  "example:client": "node examples/client/client-example.js",
43
43
  "example:client:esm": "node examples/client/client-example.mjs",
44
- "example:webserver": "node examples/webserver/webserver-example.js"
44
+ "example:webserver": "node examples/webserver/webserver-example.js",
45
+ "format": "prettier --write \"**/*.js\" \"**/*.mjs\" \"**/*.cjs\"",
46
+ "format:check": "prettier --check \"**/*.js\" \"**/*.mjs\" \"**/*.cjs\""
45
47
  },
46
48
  "exports": {
47
49
  ".": {
@@ -78,6 +80,7 @@
78
80
  },
79
81
  "devDependencies": {
80
82
  "jest": "^29.7.0",
83
+ "prettier": "^3.8.1",
81
84
  "typescript": "^5.9.3"
82
85
  }
83
86
  }
package/server/index.js CHANGED
@@ -1,8 +1,9 @@
1
1
  // require('dotenv').config();
2
- const { startWebSocketServer } = require('./websocketServer');
2
+ const { startWebSocketServer, stopWebSocketServer } = require('./websocketServer');
3
3
  const { setLogContext } = require('../utils/logger');
4
4
 
5
5
  module.exports = {
6
6
  startWebSocketServer,
7
+ stopWebSocketServer,
7
8
  setLogContext,
8
9
  };
package/server/index.mjs CHANGED
@@ -1,2 +1,2 @@
1
1
  import cjsModule from './index.js';
2
- export const { startWebSocketServer, setLogContext } = cjsModule;
2
+ export const { startWebSocketServer, setLogContext } = cjsModule;
@@ -1,6 +1,11 @@
1
1
  const state = require('./state');
2
- const { MESSAGE_TYPE_CONFIG, MESSAGE_TYPE_DATA, MESSAGE_TYPE_APP_PING, MESSAGE_TYPE_APP_PONG } = require('./constants');
3
- const { startTCPServer } = require('./tcpServer');
2
+ const {
3
+ MESSAGE_TYPE_CONFIG,
4
+ MESSAGE_TYPE_DATA,
5
+ MESSAGE_TYPE_APP_PING,
6
+ MESSAGE_TYPE_APP_PONG,
7
+ } = require('./constants');
8
+ const { ensureTCPServer } = require('./tcpServer');
4
9
  const { logger } = require('../utils/logger');
5
10
  const { buildMessageBuffer } = require('../client/utils');
6
11
 
@@ -14,7 +19,7 @@ const { buildMessageBuffer } = require('../client/utils');
14
19
  * @param {string} tunnelIdHeaderName - Header name to identify the tunnel.
15
20
  * @param {number} port - Listening port for state grouping.
16
21
  */
17
- function handleParsedMessage(ws, tunnelId, uuid, type, payload, tunnelIdHeaderName, port) {
22
+ async function handleParsedMessage(ws, tunnelId, uuid, type, payload, tunnelIdHeaderName, port) {
18
23
  logger.trace(`handleParsedMessage called. type=${type}, tunnelId=${tunnelId}, uuid=${uuid}`);
19
24
 
20
25
  if (type === MESSAGE_TYPE_CONFIG) {
@@ -48,19 +53,52 @@ function handleParsedMessage(ws, tunnelId, uuid, type, payload, tunnelIdHeaderNa
48
53
  };
49
54
 
50
55
  const portKey = String(TUNNEL_ENTRY_PORT);
51
- if (!state[port][portKey]) {
52
- logger.info(`Starting new TCP server on port ${TUNNEL_ENTRY_PORT} for tunnelId=${tunnelId}`);
53
- state[port][portKey] = {};
54
- state[port][portKey] = {
55
- tcpServer: startTCPServer(TUNNEL_ENTRY_PORT, tunnelIdHeaderName, port),
56
- };
56
+ // Check both state and global tcpServers registry
57
+ logger.debug(`[TCP] Checking existing servers for portKey=${portKey}`);
58
+ const existingServerInState = state[port]?.[portKey]?.tcpServer;
59
+ const existingServerInGlobal = state.tcpServers[portKey];
60
+ logger.debug(`[TCP] existingServerInState=${!!existingServerInState}, existingServerInGlobal=${!!existingServerInGlobal}`);
61
+
62
+ const isServerListening = (existingServerInState && existingServerInState.listening) ||
63
+ (existingServerInGlobal && existingServerInGlobal.listening);
64
+ logger.debug(`[TCP] isServerListening=${isServerListening}`);
65
+
66
+ if (!isServerListening) {
67
+ // Close any stale TCP server in global registry before creating new one
68
+ if (state.tcpServers[portKey] && state.tcpServers[portKey].listening) {
69
+ logger.warn(`[TCP] Closing stale TCP server on port ${TUNNEL_ENTRY_PORT} before creating new one`);
70
+ state.tcpServers[portKey].close();
71
+ }
72
+
73
+ // Use ensureTCPServer to handle port cleanup (EADDRINUSE after Node-RED restart)
74
+ logger.info(`[TCP] Starting new TCP server on port ${TUNNEL_ENTRY_PORT} for tunnelId=${tunnelId}`);
75
+
76
+ try {
77
+ logger.info(`[TCP] >>> Calling ensureTCPServer for port ${TUNNEL_ENTRY_PORT} <<<`);
78
+ const tcpServer = await ensureTCPServer(TUNNEL_ENTRY_PORT, tunnelIdHeaderName, port);
79
+ logger.info(`[TCP] >>> ensureTCPServer returned for port ${TUNNEL_ENTRY_PORT} <<<`);
80
+
81
+ // Store in state per port
82
+ state[port][portKey] = { tcpServer };
83
+ logger.debug(`[TCP] Stored in state[${port}][${portKey}]`);
84
+
85
+ // Also register in global tcpServers registry for tracking
86
+ state.tcpServers[portKey] = tcpServer;
87
+ logger.info(`[TCP] >>> REGISTERED in global state.tcpServers: port ${portKey}, listening=${tcpServer.listening} <<<`);
88
+
89
+ logger.info(`[TCP] TCP server ready on port ${TUNNEL_ENTRY_PORT} for tunnelId=${tunnelId}`);
90
+ } catch (err) {
91
+ logger.error(`[TCP] Failed to create TCP server on port ${TUNNEL_ENTRY_PORT}: ${err.message}`);
92
+ }
57
93
  } else {
58
- logger.debug(`TCP server already exists on port ${TUNNEL_ENTRY_PORT}`);
94
+ logger.debug(`[TCP] TCP server already exists and listening on port ${TUNNEL_ENTRY_PORT}`);
59
95
  }
60
96
 
61
97
  logger.info(`Tunnel [${tunnelId}] established successfully`);
62
98
  } catch (error) {
63
- logger.error(`Failed to process MESSAGE_TYPE_CONFIG for tunnelId=${tunnelId}: ${error.message}`);
99
+ logger.error(
100
+ `Failed to process MESSAGE_TYPE_CONFIG for tunnelId=${tunnelId}: ${error.message}`
101
+ );
64
102
  }
65
103
 
66
104
  return;
@@ -72,7 +110,7 @@ function handleParsedMessage(ws, tunnelId, uuid, type, payload, tunnelIdHeaderNa
72
110
  const pingData = JSON.parse(payload.toString());
73
111
  const pongData = JSON.stringify({
74
112
  type: 'pong',
75
- seq: pingData.seq
113
+ seq: pingData.seq,
76
114
  });
77
115
 
78
116
  const pongMessage = buildMessageBuffer(tunnelId, uuid, MESSAGE_TYPE_APP_PONG, pongData);
package/server/state.js CHANGED
@@ -1 +1,5 @@
1
- module.exports = {};
1
+ module.exports = {
2
+ // Global registry of all TCP servers created (keyed by TCP port)
3
+ // This is used to track and close TCP servers that may not be in the main state yet
4
+ tcpServers: {}
5
+ };
@@ -11,7 +11,11 @@ function startTCPServer(port, tunnelIdHeaderName, websocketPort) {
11
11
  const wsPortKey = String(websocketPort);
12
12
  const tcpPortKey = String(port);
13
13
 
14
- const server = net.createServer((socket) => {
14
+ const server = net.createServer({
15
+ // Allow reusing the port quickly after server closes (SO_REUSEADDR)
16
+ // This helps with Node-RED restarts where old server might be in TIME_WAIT
17
+ // Note: On some OS, you may also need to handle EADDRINUSE by waiting a bit
18
+ }, socket => {
15
19
  const uuid = uuidv4();
16
20
  const uuidBuffer = Buffer.from(uuid);
17
21
  let currentTunnelId = null;
@@ -22,7 +26,7 @@ function startTCPServer(port, tunnelIdHeaderName, websocketPort) {
22
26
  function createParser() {
23
27
  const parser = new HTTPParser(HTTPParser.REQUEST);
24
28
 
25
- parser[HTTPParser.kOnHeadersComplete] = (info) => {
29
+ parser[HTTPParser.kOnHeadersComplete] = info => {
26
30
  const headers = info.headers.reduce((acc, val, i, arr) => {
27
31
  if (i % 2 === 0) acc[val.toLowerCase()] = arr[i + 1];
28
32
  return acc;
@@ -60,7 +64,9 @@ function startTCPServer(port, tunnelIdHeaderName, websocketPort) {
60
64
 
61
65
  isWebSocket = headers['upgrade']?.toLowerCase() === 'websocket';
62
66
 
63
- logger.trace(`Sending initial headers (${rawHeaders.length} bytes) to tunnel [${currentTunnelId}]`);
67
+ logger.trace(
68
+ `Sending initial headers (${rawHeaders.length} bytes) to tunnel [${currentTunnelId}]`
69
+ );
64
70
  const message = buildMessageBuffer(currentTunnelId, uuid, MESSAGE_TYPE_DATA, rawHeaders);
65
71
  tunnel.ws.send(message);
66
72
 
@@ -89,11 +95,13 @@ function startTCPServer(port, tunnelIdHeaderName, websocketPort) {
89
95
 
90
96
  let currentParser = createParser();
91
97
 
92
- socket.on('data', (chunk) => {
98
+ socket.on('data', chunk => {
93
99
  const tunnel = state[wsPortKey]?.websocketTunnels?.[currentTunnelId];
94
100
  if (isWebSocket) {
95
101
  if (tunnel?.ws) {
96
- logger.trace(`Forwarding WebSocket TCP data (${chunk.length} bytes) for tunnel [${currentTunnelId}]`);
102
+ logger.trace(
103
+ `Forwarding WebSocket TCP data (${chunk.length} bytes) for tunnel [${currentTunnelId}]`
104
+ );
97
105
  const message = buildMessageBuffer(currentTunnelId, uuid, MESSAGE_TYPE_DATA, chunk);
98
106
  tunnel.ws.send(message);
99
107
  }
@@ -117,26 +125,135 @@ function startTCPServer(port, tunnelIdHeaderName, websocketPort) {
117
125
  });
118
126
 
119
127
  socket.on('close', () => {
120
- const deleted = delete state[wsPortKey]?.websocketTunnels?.[currentTunnelId]?.tcpConnections?.[uuid];
121
- logger.debug(`TCP socket closed [${uuid}] for tunnel [${currentTunnelId}], connection ${deleted ? 'removed' : 'not found'}`);
128
+ const deleted =
129
+ delete state[wsPortKey]?.websocketTunnels?.[currentTunnelId]?.tcpConnections?.[uuid];
130
+ logger.debug(
131
+ `TCP socket closed [${uuid}] for tunnel [${currentTunnelId}], connection ${deleted ? 'removed' : 'not found'}`
132
+ );
122
133
  });
123
134
 
124
- socket.on('error', (err) => {
135
+ socket.on('error', err => {
125
136
  logger.error(`Socket error on tunnel [${currentTunnelId}], uuid [${uuid}]:`, err);
126
137
  delete state[wsPortKey]?.websocketTunnels?.[currentTunnelId]?.tcpConnections?.[uuid];
127
138
  });
128
139
  });
129
140
 
130
- // Store reference
131
- state[wsPortKey][tcpPortKey].tcpServer = server;
141
+ // Note: We don't store the server reference here because the state structure
142
+ // doesn't exist yet (it's created in messageHandler.js AFTER this function returns).
143
+ // The caller (messageHandler.js) is responsible for storing the server reference.
132
144
 
133
- server.listen(port, () => {
134
- logger.info(`TCP server listening on port ${port} for websocketPort ${websocketPort}`);
145
+ // Return a promise that resolves when listening or rejects on error
146
+ return new Promise((resolve, reject) => {
147
+ server.on('listening', () => {
148
+ logger.info(`TCP server listening on port ${port} for websocketPort ${websocketPort}`);
149
+ resolve(server);
150
+ });
151
+
152
+ server.on('error', err => {
153
+ logger.error(`TCP server error on port ${port}:`, err);
154
+ reject(err);
155
+ });
156
+
157
+ // Use reuseAddr to allow quick port reuse after server restart (TIME_WAIT)
158
+ // This helps with Node-RED restarts where the old server might be in TIME_WAIT state
159
+ server.listen({
160
+ port: port,
161
+ host: '0.0.0.0',
162
+ reuseAddr: true,
163
+ }, () => {
164
+ // The server.address() returns the actual port bound (handles port === 0 case)
165
+ const addr = server.address();
166
+ logger.info(`TCP server listening on port ${addr.port} for websocketPort ${websocketPort}`);
167
+ resolve(server);
168
+ });
135
169
  });
170
+ }
136
171
 
137
- server.on('error', (err) => {
138
- logger.error(`TCP server error on port ${port}:`, err);
172
+ /**
173
+ * Forcefully kills any process using the specified port by connecting to it
174
+ * and keeping the connection open briefly, then attempts to bind the port.
175
+ * This helps release the port from a previous process.
176
+ *
177
+ * @param {number} port - The port to clear
178
+ * @returns {Promise<boolean>} True if we successfully cleared the port
179
+ */
180
+ async function forceClosePort(port) {
181
+ return new Promise((resolve) => {
182
+ const socket = new net.Socket();
183
+
184
+ socket.setTimeout(500);
185
+
186
+ socket.on('connect', () => {
187
+ // Connected to existing server - destroy it and try to take over
188
+ socket.destroy();
189
+
190
+ // Now try to bind to the port - this should cause the OS to close the old server
191
+ const takeover = net.createServer();
192
+ takeover.on('error', (err) => {
193
+ if (err.code === 'EADDRINUSE') {
194
+ // Try again with a different approach - wait a bit
195
+ setTimeout(() => resolve(true), 100);
196
+ } else {
197
+ resolve(false);
198
+ }
199
+ });
200
+
201
+ takeover.on('listening', () => {
202
+ // We got the port! Close our temp server
203
+ takeover.close(() => resolve(true));
204
+ });
205
+
206
+ takeover.listen(port);
207
+ });
208
+
209
+ socket.on('timeout', () => {
210
+ socket.destroy();
211
+ resolve(false);
212
+ });
213
+
214
+ socket.on('error', () => {
215
+ resolve(false);
216
+ });
217
+
218
+ socket.connect(port, '127.0.0.1');
139
219
  });
140
220
  }
141
221
 
142
- module.exports = { startTCPServer };
222
+ /**
223
+ * Ensures a TCP server is available on the specified port.
224
+ * If a server is already listening on the port (from a previous process),
225
+ * it will be forcefully closed before creating a new one.
226
+ *
227
+ * @param {number} port - The TCP port to bind to
228
+ * @param {string} tunnelIdHeaderName - Header name for tunnel identification
229
+ * @param {number} websocketPort - The WebSocket port (used as state key)
230
+ * @returns {Promise<net.Server>} The TCP server instance
231
+ */
232
+ async function ensureTCPServer(port, tunnelIdHeaderName, websocketPort) {
233
+ const maxRetries = 5;
234
+ const retryDelay = 300;
235
+
236
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
237
+ try {
238
+ // Try to start the TCP server
239
+ return await startTCPServer(port, tunnelIdHeaderName, websocketPort);
240
+ } catch (err) {
241
+ if (err.code === 'EADDRINUSE') {
242
+ logger.warn(`Port ${port} in use (attempt ${attempt}/${maxRetries}), attempting to force close...`);
243
+
244
+ // Try to force close the port
245
+ await forceClosePort(port);
246
+
247
+ // Wait before retrying
248
+ await new Promise(resolve => setTimeout(resolve, retryDelay * attempt));
249
+ } else {
250
+ throw err;
251
+ }
252
+ }
253
+ }
254
+
255
+ // Last attempt - don't catch, let it fail
256
+ return startTCPServer(port, tunnelIdHeaderName, websocketPort);
257
+ }
258
+
259
+ module.exports = { startTCPServer, ensureTCPServer, forceClosePort };
@@ -22,7 +22,9 @@ function startWebSocketServer({ port, host, path, tunnelIdHeaderName }) {
22
22
  state[portKey].webSocketServer = new WebSocket.Server({ port, host, path });
23
23
 
24
24
  state[portKey].webSocketServer.on('listening', () => {
25
- logger.info(`WebSocket server listening on port ${port}${host ? ` (host: ${host})` : ''}${path ? `, path: ${path}` : ''}`);
25
+ logger.info(
26
+ `WebSocket server listening on port ${port}${host ? ` (host: ${host})` : ''}${path ? `, path: ${path}` : ''}`
27
+ );
26
28
  });
27
29
 
28
30
  state[portKey].webSocketServer.on('connection', (ws, req) => {
@@ -41,7 +43,9 @@ function startWebSocketServer({ port, host, path, tunnelIdHeaderName }) {
41
43
 
42
44
  const interval = setInterval(() => {
43
45
  if (!ws.isAlive) {
44
- logger.warn(`No pong received from client on tunnel [${tunnelId || 'unknown'}], terminating.`);
46
+ logger.warn(
47
+ `No pong received from client on tunnel [${tunnelId || 'unknown'}], terminating.`
48
+ );
45
49
  return ws.terminate();
46
50
  }
47
51
  ws.isAlive = false;
@@ -51,7 +55,7 @@ function startWebSocketServer({ port, host, path, tunnelIdHeaderName }) {
51
55
  }
52
56
  }, PING_INTERVAL);
53
57
 
54
- ws.on('message', (chunk) => {
58
+ ws.on('message', chunk => {
55
59
  logger.trace(`Received message chunk: ${chunk.length} bytes`);
56
60
  buffer = Buffer.concat([buffer, chunk]);
57
61
 
@@ -67,15 +71,22 @@ function startWebSocketServer({ port, host, path, tunnelIdHeaderName }) {
67
71
  const type = message.readUInt8(72);
68
72
  const payload = message.slice(73);
69
73
 
70
- logger.trace(`Parsed message - tunnelId: ${messageTunnelId}, uuid: ${uuid}, type: ${type}, payload length: ${payload.length}`);
74
+ logger.trace(
75
+ `Parsed message - tunnelId: ${messageTunnelId}, uuid: ${uuid}, type: ${type}, payload length: ${payload.length}`
76
+ );
71
77
 
72
78
  // Check for duplicate tunnelId on first message (when tunnelId is not yet set)
73
79
  if (!tunnelId && messageTunnelId) {
74
80
  const existingTunnel = state[portKey]?.websocketTunnels?.[messageTunnelId];
75
81
  if (existingTunnel && existingTunnel.ws && existingTunnel.ws !== ws) {
76
82
  // Check if the existing WebSocket is still open
77
- if (existingTunnel.ws.readyState === WebSocket.OPEN || existingTunnel.ws.readyState === WebSocket.CONNECTING) {
78
- logger.error(`Tunnel [${messageTunnelId}] already exists with an active connection. Rejecting new connection.`);
83
+ if (
84
+ existingTunnel.ws.readyState === WebSocket.OPEN ||
85
+ existingTunnel.ws.readyState === WebSocket.CONNECTING
86
+ ) {
87
+ logger.error(
88
+ `Tunnel [${messageTunnelId}] already exists with an active connection. Rejecting new connection.`
89
+ );
79
90
 
80
91
  // Assign tunnelId before closing so cleanup logs the correct value
81
92
  tunnelId = messageTunnelId;
@@ -84,7 +95,9 @@ function startWebSocketServer({ port, host, path, tunnelIdHeaderName }) {
84
95
  ws.close(1008, `Duplicate tunnelId: ${messageTunnelId}`);
85
96
  return;
86
97
  } else {
87
- logger.info(`Existing tunnel [${messageTunnelId}] has a closed connection. Allowing new connection.`);
98
+ logger.info(
99
+ `Existing tunnel [${messageTunnelId}] has a closed connection. Allowing new connection.`
100
+ );
88
101
  }
89
102
  }
90
103
  tunnelId = messageTunnelId;
@@ -104,7 +117,9 @@ function startWebSocketServer({ port, host, path, tunnelIdHeaderName }) {
104
117
  delete state[portKey].websocketTunnels[tunnelId];
105
118
  logger.debug(`Removed tunnel [${tunnelId}] from state`);
106
119
  } else {
107
- logger.debug(`Tunnel [${tunnelId}] not removed - this was a duplicate/rejected connection`);
120
+ logger.debug(
121
+ `Tunnel [${tunnelId}] not removed - this was a duplicate/rejected connection`
122
+ );
108
123
  }
109
124
  } else {
110
125
  logger.debug(`No tunnelId assigned yet, nothing to remove from state`);
@@ -126,17 +141,101 @@ function startWebSocketServer({ port, host, path, tunnelIdHeaderName }) {
126
141
  cleanup('close');
127
142
  });
128
143
 
129
- ws.on('error', (err) => {
144
+ ws.on('error', err => {
130
145
  logger.error(`WebSocket error on tunnel [${tunnelId || 'unknown'}]:`, err);
131
146
  cleanup('error');
132
147
  });
133
148
  });
134
149
 
135
- state[portKey].webSocketServer.on('error', (err) => {
150
+ state[portKey].webSocketServer.on('error', err => {
136
151
  logger.error('WebSocket server error:', err);
137
152
  });
138
153
 
139
154
  return state;
140
155
  }
141
156
 
142
- module.exports = { startWebSocketServer };
157
+ /**
158
+ * Stops the WebSocket tunnel server and cleans up all resources.
159
+ * @param {number} port - Port of the WebSocket server to stop.
160
+ * @returns {Promise<void>} Resolves when cleanup is complete.
161
+ */
162
+ async function stopWebSocketServer(port) {
163
+ const portKey = String(port);
164
+ const serverState = state[portKey];
165
+
166
+ if (!serverState) {
167
+ logger.debug(`No server found on port ${port}, nothing to stop`);
168
+ return;
169
+ }
170
+
171
+ logger.info(`Stopping WebSocket server on port ${port}...`);
172
+
173
+ // 1. Close all active WebSocket connections (triggers cleanup for each tunnel)
174
+ if (serverState.websocketTunnels) {
175
+ for (const [tunnelId, tunnel] of Object.entries(serverState.websocketTunnels)) {
176
+ if (tunnel.ws && tunnel.ws.readyState === WebSocket.OPEN) {
177
+ tunnel.ws.close(1000, 'Server shutting down');
178
+ }
179
+ }
180
+ }
181
+
182
+ // 2. Close all TCP servers in per-port state
183
+ logger.debug(`[CLEANUP] Checking per-port state for TCP servers. Keys: ${Object.keys(serverState).join(', ')}`);
184
+ for (const [tcpPort, tcpState] of Object.entries(serverState)) {
185
+ if (tcpPort !== 'webSocketServer' && tcpPort !== 'websocketTunnels' && tcpState?.tcpServer) {
186
+ logger.info(`[CLEANUP] Closing TCP server in per-port state on port ${tcpPort}`);
187
+ await new Promise((resolve) => {
188
+ tcpState.tcpServer.close(() => {
189
+ logger.info(`[CLEANUP] Closed TCP server on port ${tcpPort}`);
190
+ resolve();
191
+ });
192
+ });
193
+ }
194
+ }
195
+
196
+ // 2b. Close all TCP servers in global tcpServers registry
197
+ // This handles TCP servers that may not be in state yet (client not reconnected)
198
+ // Note: We close ALL servers in registry, not just listening ones, because they may have been
199
+ // closed in per-port cleanup but still exist in global registry
200
+ const globalTcpServerCount = Object.keys(state.tcpServers || {}).length;
201
+ logger.info(`[CLEANUP] Global tcpServers registry has ${globalTcpServerCount} entries: ${Object.keys(state.tcpServers || {}).join(', ')}`);
202
+ if (state.tcpServers && globalTcpServerCount > 0) {
203
+ for (const [tcpPort, tcpServer] of Object.entries(state.tcpServers)) {
204
+ logger.info(`[CLEANUP] Checking global TCP server on port ${tcpPort}: exists=${!!tcpServer}, listening=${tcpServer?.listening}`);
205
+ // Close any server that exists, regardless of listening state (it may have been closed in per-port cleanup)
206
+ if (tcpServer) {
207
+ if (tcpServer.listening) {
208
+ logger.info(`[CLEANUP] Closing global TCP server on port ${tcpPort} (listening)...`);
209
+ await new Promise((resolve) => {
210
+ tcpServer.close(() => {
211
+ logger.info(`[CLEANUP] Closed global TCP server on port ${tcpPort}`);
212
+ resolve();
213
+ });
214
+ });
215
+ } else {
216
+ // Server exists but not listening - it was already closed in per-port cleanup
217
+ // Just log and clear from registry
218
+ logger.info(`[CLEANUP] Global TCP server on port ${tcpPort} already closed (listening=false), clearing from registry`);
219
+ }
220
+ }
221
+ }
222
+ // Clear the global tcpServers registry
223
+ state.tcpServers = {};
224
+ }
225
+
226
+ // 3. Close the main WebSocket server
227
+ if (serverState.webSocketServer) {
228
+ await new Promise((resolve) => {
229
+ serverState.webSocketServer.close(() => {
230
+ logger.debug(`Closed WebSocket server on port ${port}`);
231
+ resolve();
232
+ });
233
+ });
234
+ }
235
+
236
+ // 4. Clean up state
237
+ delete state[portKey];
238
+ logger.info(`WebSocket server on port ${port} stopped and state cleaned`);
239
+ }
240
+
241
+ module.exports = { startWebSocketServer, stopWebSocketServer };
package/utils/index.js CHANGED
@@ -1,8 +1,11 @@
1
- const { setLogLevel, getLogLevel } = require('./logger.js');
1
+ const { setLogLevel, getLogLevel, setLogContext, getLogContext, logger } = require('./logger.js');
2
2
  const { loadConfig } = require('./loadConfig.js');
3
3
 
4
4
  module.exports = {
5
5
  setLogLevel,
6
6
  getLogLevel,
7
+ setLogContext,
8
+ getLogContext,
9
+ logger,
7
10
  loadConfig,
8
11
  };
package/utils/index.mjs CHANGED
@@ -1,2 +1,2 @@
1
1
  import cjsModule from './index.js';
2
- export const { setLogLevel, getLogLevel, loadConfig } = cjsModule;
2
+ export const { setLogLevel, getLogLevel, loadConfig } = cjsModule;
@@ -7,7 +7,9 @@ const TOML = require('@iarna/toml');
7
7
 
8
8
  function loadConfig(customPath) {
9
9
  const callerDir = require.main?.path || process.cwd();
10
- const configPath = customPath ? path.join(customPath, FILE_CONFIG_NAME) : path.join(callerDir, FILE_CONFIG_NAME);
10
+ const configPath = customPath
11
+ ? path.join(customPath, FILE_CONFIG_NAME)
12
+ : path.join(callerDir, FILE_CONFIG_NAME);
11
13
 
12
14
  console.log({ configPath });
13
15
 
@@ -25,7 +27,9 @@ function loadConfig(customPath) {
25
27
  logger.warn(`⚠️ Failed to parse config.toml at ${configPath}: ${err.message}`);
26
28
  }
27
29
  } else {
28
- logger.info(`ℹ️ No config.toml found at: ${configPath}, falling back to environment variables.`);
30
+ logger.info(
31
+ `ℹ️ No config.toml found at: ${configPath}, falling back to environment variables.`
32
+ );
29
33
  }
30
34
 
31
35
  const envConfig = {
@@ -33,7 +37,9 @@ function loadConfig(customPath) {
33
37
  wsUrl: process.env.WS_URL,
34
38
  targetUrl: process.env.TARGET_URL,
35
39
  tunnelEntryUrl: process.env.TUNNEL_ENTRY_URL,
36
- tunnelEntryPort: process.env.TUNNEL_ENTRY_PORT ? Number(process.env.TUNNEL_ENTRY_PORT) : undefined,
40
+ tunnelEntryPort: process.env.TUNNEL_ENTRY_PORT
41
+ ? Number(process.env.TUNNEL_ENTRY_PORT)
42
+ : undefined,
37
43
  headers: process.env.HEADERS,
38
44
  allowInsicureCerts: process.env.ALLOW_INSICURE_CERTS === 'true',
39
45
  logLevel: process.env.LOG_LEVEL || 'info',
package/utils/logger.js CHANGED
@@ -34,12 +34,12 @@ const logger = winston.createLogger({
34
34
  new winston.transports.Console({
35
35
  format: winston.format.combine(
36
36
  winston.format.timestamp(),
37
- winston.format.printf((info) => {
37
+ winston.format.printf(info => {
38
38
  const contextPrefix = logContext ? `${logContext} | ` : '';
39
39
  // Get the raw level (before colorization)
40
40
  const rawLevel = info[Symbol.for('level')] || info.level || 'info';
41
41
  // Apply color to level and message separately
42
- const colorizer = winston.format.colorize({ colors: customLevels.colors });
42
+ const colorizer = winston.format.colorize({ colors: customLevels.colors });
43
43
  const coloredLevel = colorizer.colorize(rawLevel, rawLevel);
44
44
  const coloredMessage = colorizer.colorize(rawLevel, `${contextPrefix}${info.message}`);
45
45
  return `[${info.timestamp}] ${coloredLevel}: ${coloredMessage}`;