@remotelinker/reverse-ws-tunnel 1.0.10 → 1.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -30,7 +30,36 @@ Reverse WebSocket Tunnel is a library that enables you to expose local services
30
30
 
31
31
  ---
32
32
 
33
- ## ✨ v1.0.10 - What's New
33
+ ## ✨ v1.0.11 - What's New
34
+
35
+ ### ✨ New Features
36
+ - **stopWebSocketServer(port)**: Added new function to properly stop and cleanup the WebSocket server
37
+ - Closes all active WebSocket connections (triggering cleanup of heartbeat intervals)
38
+ - Closes all TCP servers registered in state
39
+ - Cleans up state for the specified port
40
+ - Gracefully handles already-stopped servers (no errors)
41
+
42
+ ### 🐛 Bug Fixes
43
+ - **Heartbeat cleanup**: Fixed issue where setInterval for heartbeat was not properly cleaned up when server stopped
44
+ - **Node-RED integration**: Added cleanup on startup to handle cases where previous deployment didn't cleanup properly
45
+ - **TCP server connection hang**: Fixed critical issue where TCP connections would hang indefinitely
46
+ - Removed `pauseOnConnect: true` option from TCP server configuration
47
+ - This option was added in error - it pauses sockets on connect and requires manual `socket.resume()`
48
+ - The fix restores proper connection flow while keeping `reuseAddr: true` for port reuse on restart
49
+ - **TCP server port reuse**: Fixed "EADDRINUSE" error when client reconnects after Node-RED restart
50
+ - Now checks if TCP server is actually listening (`server.listening`) before skipping creation
51
+ - Previously only checked if state entry existed, not if server was active
52
+ - **TCP server global registry**: Added global tcpServers registry to track TCP servers even when not in state
53
+ - When stopWebSocketServer is called, now closes ALL TCP servers in global registry
54
+ - When creating new TCP server, checks global registry and closes stale servers before creating new one
55
+
56
+ ### 🔧 Improvements
57
+ - **Graceful shutdown**: Server now properly releases all resources (ports, memory, intervals) on shutdown
58
+ - **State management**: Improved state cleanup to prevent stale entries after server restart
59
+
60
+ ---
61
+
62
+ ## ✨ v1.0.10 - Previous Release
34
63
 
35
64
  ### 🔧 Code Quality & Developer Experience
36
65
  - **Code Cleanup**: Removed unused constants and redundant variables
@@ -342,6 +371,29 @@ Set the log level via:
342
371
  - Environment variable: `LOG_LEVEL=debug`
343
372
  - TOML config: `logLevel = "debug"`
344
373
 
374
+ ### Logger API
375
+
376
+ The library exports logger functions for advanced control:
377
+
378
+ ```javascript
379
+ const { setLogLevel, getLogLevel, setLogContext, getLogContext, logger } = require('@remotelinker/reverse-ws-tunnel/utils');
380
+
381
+ // Set log level programmatically
382
+ setLogLevel('debug');
383
+
384
+ // Get current log level
385
+ const currentLevel = getLogLevel();
386
+
387
+ // Set context for all log messages (useful for Node-RED)
388
+ setLogContext({ nodeId: 'my-node', session: 'abc123' });
389
+
390
+ // Get current context
391
+ const context = getLogContext();
392
+
393
+ // Use logger directly
394
+ logger.info('Custom log message', { custom: 'data' });
395
+ ```
396
+
345
397
  ---
346
398
 
347
399
  ## 🔧 Advanced Usage
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@remotelinker/reverse-ws-tunnel",
3
- "version": "1.0.10",
3
+ "version": "1.0.11",
4
4
  "description": "A Node.js library for creating secure reverse tunnels over WebSocket connections",
5
5
  "main": "index.cjs",
6
6
  "types": "types/index.d.ts",
package/server/index.js CHANGED
@@ -1,8 +1,9 @@
1
1
  // require('dotenv').config();
2
- const { startWebSocketServer } = require('./websocketServer');
2
+ const { startWebSocketServer, stopWebSocketServer } = require('./websocketServer');
3
3
  const { setLogContext } = require('../utils/logger');
4
4
 
5
5
  module.exports = {
6
6
  startWebSocketServer,
7
+ stopWebSocketServer,
7
8
  setLogContext,
8
9
  };
@@ -5,7 +5,7 @@ const {
5
5
  MESSAGE_TYPE_APP_PING,
6
6
  MESSAGE_TYPE_APP_PONG,
7
7
  } = require('./constants');
8
- const { startTCPServer } = require('./tcpServer');
8
+ const { ensureTCPServer } = require('./tcpServer');
9
9
  const { logger } = require('../utils/logger');
10
10
  const { buildMessageBuffer } = require('../client/utils');
11
11
 
@@ -19,7 +19,7 @@ const { buildMessageBuffer } = require('../client/utils');
19
19
  * @param {string} tunnelIdHeaderName - Header name to identify the tunnel.
20
20
  * @param {number} port - Listening port for state grouping.
21
21
  */
22
- function handleParsedMessage(ws, tunnelId, uuid, type, payload, tunnelIdHeaderName, port) {
22
+ async function handleParsedMessage(ws, tunnelId, uuid, type, payload, tunnelIdHeaderName, port) {
23
23
  logger.trace(`handleParsedMessage called. type=${type}, tunnelId=${tunnelId}, uuid=${uuid}`);
24
24
 
25
25
  if (type === MESSAGE_TYPE_CONFIG) {
@@ -53,16 +53,45 @@ function handleParsedMessage(ws, tunnelId, uuid, type, payload, tunnelIdHeaderNa
53
53
  };
54
54
 
55
55
  const portKey = String(TUNNEL_ENTRY_PORT);
56
- if (!state[port][portKey]) {
57
- logger.info(
58
- `Starting new TCP server on port ${TUNNEL_ENTRY_PORT} for tunnelId=${tunnelId}`
59
- );
60
- state[port][portKey] = {};
61
- state[port][portKey] = {
62
- tcpServer: startTCPServer(TUNNEL_ENTRY_PORT, tunnelIdHeaderName, port),
63
- };
56
+ // Check both state and global tcpServers registry
57
+ logger.debug(`[TCP] Checking existing servers for portKey=${portKey}`);
58
+ const existingServerInState = state[port]?.[portKey]?.tcpServer;
59
+ const existingServerInGlobal = state.tcpServers[portKey];
60
+ logger.debug(`[TCP] existingServerInState=${!!existingServerInState}, existingServerInGlobal=${!!existingServerInGlobal}`);
61
+
62
+ const isServerListening = (existingServerInState && existingServerInState.listening) ||
63
+ (existingServerInGlobal && existingServerInGlobal.listening);
64
+ logger.debug(`[TCP] isServerListening=${isServerListening}`);
65
+
66
+ if (!isServerListening) {
67
+ // Close any stale TCP server in global registry before creating new one
68
+ if (state.tcpServers[portKey] && state.tcpServers[portKey].listening) {
69
+ logger.warn(`[TCP] Closing stale TCP server on port ${TUNNEL_ENTRY_PORT} before creating new one`);
70
+ state.tcpServers[portKey].close();
71
+ }
72
+
73
+ // Use ensureTCPServer to handle port cleanup (EADDRINUSE after Node-RED restart)
74
+ logger.info(`[TCP] Starting new TCP server on port ${TUNNEL_ENTRY_PORT} for tunnelId=${tunnelId}`);
75
+
76
+ try {
77
+ logger.info(`[TCP] >>> Calling ensureTCPServer for port ${TUNNEL_ENTRY_PORT} <<<`);
78
+ const tcpServer = await ensureTCPServer(TUNNEL_ENTRY_PORT, tunnelIdHeaderName, port);
79
+ logger.info(`[TCP] >>> ensureTCPServer returned for port ${TUNNEL_ENTRY_PORT} <<<`);
80
+
81
+ // Store in state per port
82
+ state[port][portKey] = { tcpServer };
83
+ logger.debug(`[TCP] Stored in state[${port}][${portKey}]`);
84
+
85
+ // Also register in global tcpServers registry for tracking
86
+ state.tcpServers[portKey] = tcpServer;
87
+ logger.info(`[TCP] >>> REGISTERED in global state.tcpServers: port ${portKey}, listening=${tcpServer.listening} <<<`);
88
+
89
+ logger.info(`[TCP] TCP server ready on port ${TUNNEL_ENTRY_PORT} for tunnelId=${tunnelId}`);
90
+ } catch (err) {
91
+ logger.error(`[TCP] Failed to create TCP server on port ${TUNNEL_ENTRY_PORT}: ${err.message}`);
92
+ }
64
93
  } else {
65
- logger.debug(`TCP server already exists on port ${TUNNEL_ENTRY_PORT}`);
94
+ logger.debug(`[TCP] TCP server already exists and listening on port ${TUNNEL_ENTRY_PORT}`);
66
95
  }
67
96
 
68
97
  logger.info(`Tunnel [${tunnelId}] established successfully`);
package/server/state.js CHANGED
@@ -1 +1,5 @@
1
- module.exports = {};
1
+ module.exports = {
2
+ // Global registry of all TCP servers created (keyed by TCP port)
3
+ // This is used to track and close TCP servers that may not be in the main state yet
4
+ tcpServers: {}
5
+ };
@@ -11,7 +11,11 @@ function startTCPServer(port, tunnelIdHeaderName, websocketPort) {
11
11
  const wsPortKey = String(websocketPort);
12
12
  const tcpPortKey = String(port);
13
13
 
14
- const server = net.createServer(socket => {
14
+ const server = net.createServer({
15
+ // Allow reusing the port quickly after server closes (SO_REUSEADDR)
16
+ // This helps with Node-RED restarts where old server might be in TIME_WAIT
17
+ // Note: On some OS, you may also need to handle EADDRINUSE by waiting a bit
18
+ }, socket => {
15
19
  const uuid = uuidv4();
16
20
  const uuidBuffer = Buffer.from(uuid);
17
21
  let currentTunnelId = null;
@@ -134,16 +138,122 @@ function startTCPServer(port, tunnelIdHeaderName, websocketPort) {
134
138
  });
135
139
  });
136
140
 
137
- // Store reference
138
- state[wsPortKey][tcpPortKey].tcpServer = server;
141
+ // Note: We don't store the server reference here because the state structure
142
+ // doesn't exist yet (it's created in messageHandler.js AFTER this function returns).
143
+ // The caller (messageHandler.js) is responsible for storing the server reference.
139
144
 
140
- server.listen(port, () => {
141
- logger.info(`TCP server listening on port ${port} for websocketPort ${websocketPort}`);
145
+ // Return a promise that resolves when listening or rejects on error
146
+ return new Promise((resolve, reject) => {
147
+ server.on('listening', () => {
148
+ logger.info(`TCP server listening on port ${port} for websocketPort ${websocketPort}`);
149
+ resolve(server);
150
+ });
151
+
152
+ server.on('error', err => {
153
+ logger.error(`TCP server error on port ${port}:`, err);
154
+ reject(err);
155
+ });
156
+
157
+ // Use reuseAddr to allow quick port reuse after server restart (TIME_WAIT)
158
+ // This helps with Node-RED restarts where the old server might be in TIME_WAIT state
159
+ server.listen({
160
+ port: port,
161
+ host: '0.0.0.0',
162
+ reuseAddr: true,
163
+ }, () => {
164
+ // The server.address() returns the actual port bound (handles port === 0 case)
165
+ const addr = server.address();
166
+ logger.info(`TCP server listening on port ${addr.port} for websocketPort ${websocketPort}`);
167
+ resolve(server);
168
+ });
142
169
  });
170
+ }
143
171
 
144
- server.on('error', err => {
145
- logger.error(`TCP server error on port ${port}:`, err);
172
+ /**
173
+ * Forcefully kills any process using the specified port by connecting to it
174
+ * and keeping the connection open briefly, then attempts to bind the port.
175
+ * This helps release the port from a previous process.
176
+ *
177
+ * @param {number} port - The port to clear
178
+ * @returns {Promise<boolean>} True if we successfully cleared the port
179
+ */
180
+ async function forceClosePort(port) {
181
+ return new Promise((resolve) => {
182
+ const socket = new net.Socket();
183
+
184
+ socket.setTimeout(500);
185
+
186
+ socket.on('connect', () => {
187
+ // Connected to existing server - destroy it and try to take over
188
+ socket.destroy();
189
+
190
+ // Now try to bind to the port - this should cause the OS to close the old server
191
+ const takeover = net.createServer();
192
+ takeover.on('error', (err) => {
193
+ if (err.code === 'EADDRINUSE') {
194
+ // Try again with a different approach - wait a bit
195
+ setTimeout(() => resolve(true), 100);
196
+ } else {
197
+ resolve(false);
198
+ }
199
+ });
200
+
201
+ takeover.on('listening', () => {
202
+ // We got the port! Close our temp server
203
+ takeover.close(() => resolve(true));
204
+ });
205
+
206
+ takeover.listen(port);
207
+ });
208
+
209
+ socket.on('timeout', () => {
210
+ socket.destroy();
211
+ resolve(false);
212
+ });
213
+
214
+ socket.on('error', () => {
215
+ resolve(false);
216
+ });
217
+
218
+ socket.connect(port, '127.0.0.1');
146
219
  });
147
220
  }
148
221
 
149
- module.exports = { startTCPServer };
222
+ /**
223
+ * Ensures a TCP server is available on the specified port.
224
+ * If a server is already listening on the port (from a previous process),
225
+ * it will be forcefully closed before creating a new one.
226
+ *
227
+ * @param {number} port - The TCP port to bind to
228
+ * @param {string} tunnelIdHeaderName - Header name for tunnel identification
229
+ * @param {number} websocketPort - The WebSocket port (used as state key)
230
+ * @returns {Promise<net.Server>} The TCP server instance
231
+ */
232
+ async function ensureTCPServer(port, tunnelIdHeaderName, websocketPort) {
233
+ const maxRetries = 5;
234
+ const retryDelay = 300;
235
+
236
+ for (let attempt = 1; attempt <= maxRetries; attempt++) {
237
+ try {
238
+ // Try to start the TCP server
239
+ return await startTCPServer(port, tunnelIdHeaderName, websocketPort);
240
+ } catch (err) {
241
+ if (err.code === 'EADDRINUSE') {
242
+ logger.warn(`Port ${port} in use (attempt ${attempt}/${maxRetries}), attempting to force close...`);
243
+
244
+ // Try to force close the port
245
+ await forceClosePort(port);
246
+
247
+ // Wait before retrying
248
+ await new Promise(resolve => setTimeout(resolve, retryDelay * attempt));
249
+ } else {
250
+ throw err;
251
+ }
252
+ }
253
+ }
254
+
255
+ // Last attempt - don't catch, let it fail
256
+ return startTCPServer(port, tunnelIdHeaderName, websocketPort);
257
+ }
258
+
259
+ module.exports = { startTCPServer, ensureTCPServer, forceClosePort };
@@ -154,4 +154,88 @@ function startWebSocketServer({ port, host, path, tunnelIdHeaderName }) {
154
154
  return state;
155
155
  }
156
156
 
157
- module.exports = { startWebSocketServer };
157
+ /**
158
+ * Stops the WebSocket tunnel server and cleans up all resources.
159
+ * @param {number} port - Port of the WebSocket server to stop.
160
+ * @returns {Promise<void>} Resolves when cleanup is complete.
161
+ */
162
+ async function stopWebSocketServer(port) {
163
+ const portKey = String(port);
164
+ const serverState = state[portKey];
165
+
166
+ if (!serverState) {
167
+ logger.debug(`No server found on port ${port}, nothing to stop`);
168
+ return;
169
+ }
170
+
171
+ logger.info(`Stopping WebSocket server on port ${port}...`);
172
+
173
+ // 1. Close all active WebSocket connections (triggers cleanup for each tunnel)
174
+ if (serverState.websocketTunnels) {
175
+ for (const [tunnelId, tunnel] of Object.entries(serverState.websocketTunnels)) {
176
+ if (tunnel.ws && tunnel.ws.readyState === WebSocket.OPEN) {
177
+ tunnel.ws.close(1000, 'Server shutting down');
178
+ }
179
+ }
180
+ }
181
+
182
+ // 2. Close all TCP servers in per-port state
183
+ logger.debug(`[CLEANUP] Checking per-port state for TCP servers. Keys: ${Object.keys(serverState).join(', ')}`);
184
+ for (const [tcpPort, tcpState] of Object.entries(serverState)) {
185
+ if (tcpPort !== 'webSocketServer' && tcpPort !== 'websocketTunnels' && tcpState?.tcpServer) {
186
+ logger.info(`[CLEANUP] Closing TCP server in per-port state on port ${tcpPort}`);
187
+ await new Promise((resolve) => {
188
+ tcpState.tcpServer.close(() => {
189
+ logger.info(`[CLEANUP] Closed TCP server on port ${tcpPort}`);
190
+ resolve();
191
+ });
192
+ });
193
+ }
194
+ }
195
+
196
+ // 2b. Close all TCP servers in global tcpServers registry
197
+ // This handles TCP servers that may not be in state yet (client not reconnected)
198
+ // Note: We close ALL servers in registry, not just listening ones, because they may have been
199
+ // closed in per-port cleanup but still exist in global registry
200
+ const globalTcpServerCount = Object.keys(state.tcpServers || {}).length;
201
+ logger.info(`[CLEANUP] Global tcpServers registry has ${globalTcpServerCount} entries: ${Object.keys(state.tcpServers || {}).join(', ')}`);
202
+ if (state.tcpServers && globalTcpServerCount > 0) {
203
+ for (const [tcpPort, tcpServer] of Object.entries(state.tcpServers)) {
204
+ logger.info(`[CLEANUP] Checking global TCP server on port ${tcpPort}: exists=${!!tcpServer}, listening=${tcpServer?.listening}`);
205
+ // Close any server that exists, regardless of listening state (it may have been closed in per-port cleanup)
206
+ if (tcpServer) {
207
+ if (tcpServer.listening) {
208
+ logger.info(`[CLEANUP] Closing global TCP server on port ${tcpPort} (listening)...`);
209
+ await new Promise((resolve) => {
210
+ tcpServer.close(() => {
211
+ logger.info(`[CLEANUP] Closed global TCP server on port ${tcpPort}`);
212
+ resolve();
213
+ });
214
+ });
215
+ } else {
216
+ // Server exists but not listening - it was already closed in per-port cleanup
217
+ // Just log and clear from registry
218
+ logger.info(`[CLEANUP] Global TCP server on port ${tcpPort} already closed (listening=false), clearing from registry`);
219
+ }
220
+ }
221
+ }
222
+ // Clear the global tcpServers registry
223
+ state.tcpServers = {};
224
+ }
225
+
226
+ // 3. Close the main WebSocket server
227
+ if (serverState.webSocketServer) {
228
+ await new Promise((resolve) => {
229
+ serverState.webSocketServer.close(() => {
230
+ logger.debug(`Closed WebSocket server on port ${port}`);
231
+ resolve();
232
+ });
233
+ });
234
+ }
235
+
236
+ // 4. Clean up state
237
+ delete state[portKey];
238
+ logger.info(`WebSocket server on port ${port} stopped and state cleaned`);
239
+ }
240
+
241
+ module.exports = { startWebSocketServer, stopWebSocketServer };
package/utils/index.js CHANGED
@@ -1,8 +1,11 @@
1
- const { setLogLevel, getLogLevel } = require('./logger.js');
1
+ const { setLogLevel, getLogLevel, setLogContext, getLogContext, logger } = require('./logger.js');
2
2
  const { loadConfig } = require('./loadConfig.js');
3
3
 
4
4
  module.exports = {
5
5
  setLogLevel,
6
6
  getLogLevel,
7
+ setLogContext,
8
+ getLogContext,
9
+ logger,
7
10
  loadConfig,
8
11
  };