threadforge 0.1.1 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +52 -20
- package/bin/forge.js +2 -1058
- package/bin/host-commands.d.ts +2 -0
- package/bin/host-commands.d.ts.map +1 -0
- package/bin/host-commands.js +7 -8
- package/bin/platform-commands.d.ts +2 -0
- package/bin/platform-commands.d.ts.map +1 -0
- package/bin/platform-commands.js +118 -36
- package/dist/cli/base-command.d.ts +12 -0
- package/dist/cli/base-command.d.ts.map +1 -0
- package/dist/cli/base-command.js +25 -0
- package/dist/cli/base-command.js.map +1 -0
- package/dist/cli/commands/build.d.ts +10 -0
- package/dist/cli/commands/build.d.ts.map +1 -0
- package/dist/cli/commands/build.js +110 -0
- package/dist/cli/commands/build.js.map +1 -0
- package/dist/cli/commands/deploy.d.ts +12 -0
- package/dist/cli/commands/deploy.d.ts.map +1 -0
- package/dist/cli/commands/deploy.js +143 -0
- package/dist/cli/commands/deploy.js.map +1 -0
- package/dist/cli/commands/dev.d.ts +10 -0
- package/dist/cli/commands/dev.d.ts.map +1 -0
- package/dist/cli/commands/dev.js +138 -0
- package/dist/cli/commands/dev.js.map +1 -0
- package/dist/cli/commands/generate.d.ts +10 -0
- package/dist/cli/commands/generate.d.ts.map +1 -0
- package/dist/cli/commands/generate.js +76 -0
- package/dist/cli/commands/generate.js.map +1 -0
- package/dist/cli/commands/host.d.ts +8 -0
- package/dist/cli/commands/host.d.ts.map +1 -0
- package/dist/cli/commands/host.js +20 -0
- package/dist/cli/commands/host.js.map +1 -0
- package/dist/cli/commands/init.d.ts +16 -0
- package/dist/cli/commands/init.d.ts.map +1 -0
- package/dist/cli/commands/init.js +246 -0
- package/dist/cli/commands/init.js.map +1 -0
- package/dist/cli/commands/platform.d.ts +8 -0
- package/dist/cli/commands/platform.d.ts.map +1 -0
- package/dist/cli/commands/platform.js +20 -0
- package/dist/cli/commands/platform.js.map +1 -0
- package/dist/cli/commands/restart.d.ts +8 -0
- package/dist/cli/commands/restart.d.ts.map +1 -0
- package/dist/cli/commands/restart.js +13 -0
- package/dist/cli/commands/restart.js.map +1 -0
- package/dist/cli/commands/scaffold/frontend.d.ts +10 -0
- package/dist/cli/commands/scaffold/frontend.d.ts.map +1 -0
- package/dist/cli/commands/scaffold/frontend.js +130 -0
- package/dist/cli/commands/scaffold/frontend.js.map +1 -0
- package/dist/cli/commands/scaffold/react.d.ts +7 -0
- package/dist/cli/commands/scaffold/react.d.ts.map +1 -0
- package/dist/cli/commands/scaffold/react.js +12 -0
- package/dist/cli/commands/scaffold/react.js.map +1 -0
- package/dist/cli/commands/scale.d.ts +8 -0
- package/dist/cli/commands/scale.d.ts.map +1 -0
- package/dist/cli/commands/scale.js +13 -0
- package/dist/cli/commands/scale.js.map +1 -0
- package/dist/cli/commands/start.d.ts +10 -0
- package/dist/cli/commands/start.d.ts.map +1 -0
- package/dist/cli/commands/start.js +71 -0
- package/dist/cli/commands/start.js.map +1 -0
- package/dist/cli/commands/status.d.ts +11 -0
- package/dist/cli/commands/status.d.ts.map +1 -0
- package/dist/cli/commands/status.js +60 -0
- package/dist/cli/commands/status.js.map +1 -0
- package/dist/cli/commands/stop.d.ts +10 -0
- package/dist/cli/commands/stop.d.ts.map +1 -0
- package/dist/cli/commands/stop.js +89 -0
- package/dist/cli/commands/stop.js.map +1 -0
- package/dist/cli/util/config-discovery.d.ts +8 -0
- package/dist/cli/util/config-discovery.d.ts.map +1 -0
- package/dist/cli/util/config-discovery.js +70 -0
- package/dist/cli/util/config-discovery.js.map +1 -0
- package/dist/cli/util/config-patcher.d.ts +17 -0
- package/dist/cli/util/config-patcher.d.ts.map +1 -0
- package/dist/cli/util/config-patcher.js +439 -0
- package/dist/cli/util/config-patcher.js.map +1 -0
- package/dist/cli/util/frontend-dev.d.ts +8 -0
- package/dist/cli/util/frontend-dev.d.ts.map +1 -0
- package/dist/cli/util/frontend-dev.js +117 -0
- package/dist/cli/util/frontend-dev.js.map +1 -0
- package/dist/cli/util/process.d.ts +5 -0
- package/dist/cli/util/process.d.ts.map +1 -0
- package/dist/cli/util/process.js +17 -0
- package/dist/cli/util/process.js.map +1 -0
- package/dist/cli/util/templates.d.ts +10 -0
- package/dist/cli/util/templates.d.ts.map +1 -0
- package/dist/cli/util/templates.js +157 -0
- package/dist/cli/util/templates.js.map +1 -0
- package/dist/core/AlertSink.d.ts +83 -0
- package/dist/core/AlertSink.d.ts.map +1 -0
- package/dist/core/AlertSink.js +126 -0
- package/dist/core/AlertSink.js.map +1 -0
- package/dist/core/DirectMessageBus.d.ts +88 -0
- package/dist/core/DirectMessageBus.d.ts.map +1 -0
- package/dist/core/DirectMessageBus.js +352 -0
- package/dist/core/DirectMessageBus.js.map +1 -0
- package/dist/core/EndpointResolver.d.ts +111 -0
- package/dist/core/EndpointResolver.d.ts.map +1 -0
- package/dist/core/EndpointResolver.js +336 -0
- package/dist/core/EndpointResolver.js.map +1 -0
- package/dist/core/ForgeContext.d.ts +221 -0
- package/dist/core/ForgeContext.d.ts.map +1 -0
- package/dist/core/ForgeContext.js +1169 -0
- package/dist/core/ForgeContext.js.map +1 -0
- package/dist/core/ForgeEndpoints.d.ts +71 -0
- package/dist/core/ForgeEndpoints.d.ts.map +1 -0
- package/dist/core/ForgeEndpoints.js +442 -0
- package/dist/core/ForgeEndpoints.js.map +1 -0
- package/dist/core/ForgeHost.d.ts +82 -0
- package/dist/core/ForgeHost.d.ts.map +1 -0
- package/dist/core/ForgeHost.js +107 -0
- package/dist/core/ForgeHost.js.map +1 -0
- package/dist/core/ForgePlatform.d.ts +96 -0
- package/dist/core/ForgePlatform.d.ts.map +1 -0
- package/dist/core/ForgePlatform.js +136 -0
- package/dist/core/ForgePlatform.js.map +1 -0
- package/dist/core/ForgeWebSocket.d.ts +56 -0
- package/dist/core/ForgeWebSocket.d.ts.map +1 -0
- package/dist/core/ForgeWebSocket.js +415 -0
- package/dist/core/ForgeWebSocket.js.map +1 -0
- package/dist/core/Ingress.d.ts +329 -0
- package/dist/core/Ingress.d.ts.map +1 -0
- package/dist/core/Ingress.js +694 -0
- package/dist/core/Ingress.js.map +1 -0
- package/dist/core/Interceptors.d.ts +134 -0
- package/dist/core/Interceptors.d.ts.map +1 -0
- package/dist/core/Interceptors.js +416 -0
- package/dist/core/Interceptors.js.map +1 -0
- package/dist/core/Logger.d.ts +20 -0
- package/dist/core/Logger.d.ts.map +1 -0
- package/dist/core/Logger.js +77 -0
- package/dist/core/Logger.js.map +1 -0
- package/dist/core/MessageBus.d.ts +15 -0
- package/dist/core/MessageBus.d.ts.map +1 -0
- package/dist/core/MessageBus.js +18 -0
- package/dist/core/MessageBus.js.map +1 -0
- package/dist/core/Prometheus.d.ts +80 -0
- package/dist/core/Prometheus.d.ts.map +1 -0
- package/dist/core/Prometheus.js +332 -0
- package/dist/core/Prometheus.js.map +1 -0
- package/dist/core/RequestContext.d.ts +214 -0
- package/dist/core/RequestContext.d.ts.map +1 -0
- package/dist/core/RequestContext.js +556 -0
- package/dist/core/RequestContext.js.map +1 -0
- package/dist/core/Router.d.ts +45 -0
- package/dist/core/Router.d.ts.map +1 -0
- package/dist/core/Router.js +285 -0
- package/dist/core/Router.js.map +1 -0
- package/dist/core/RoutingStrategy.d.ts +116 -0
- package/dist/core/RoutingStrategy.d.ts.map +1 -0
- package/dist/core/RoutingStrategy.js +306 -0
- package/dist/core/RoutingStrategy.js.map +1 -0
- package/dist/core/RpcConfig.d.ts +72 -0
- package/dist/core/RpcConfig.d.ts.map +1 -0
- package/dist/core/RpcConfig.js +127 -0
- package/dist/core/RpcConfig.js.map +1 -0
- package/dist/core/SignatureCache.d.ts +81 -0
- package/dist/core/SignatureCache.d.ts.map +1 -0
- package/dist/core/SignatureCache.js +172 -0
- package/dist/core/SignatureCache.js.map +1 -0
- package/dist/core/StaticFileServer.d.ts +34 -0
- package/dist/core/StaticFileServer.d.ts.map +1 -0
- package/dist/core/StaticFileServer.js +497 -0
- package/dist/core/StaticFileServer.js.map +1 -0
- package/dist/core/Supervisor.d.ts +198 -0
- package/dist/core/Supervisor.d.ts.map +1 -0
- package/dist/core/Supervisor.js +1418 -0
- package/dist/core/Supervisor.js.map +1 -0
- package/dist/core/ThreadAllocator.d.ts +52 -0
- package/dist/core/ThreadAllocator.d.ts.map +1 -0
- package/dist/core/ThreadAllocator.js +174 -0
- package/dist/core/ThreadAllocator.js.map +1 -0
- package/dist/core/WorkerChannelManager.d.ts +130 -0
- package/dist/core/WorkerChannelManager.d.ts.map +1 -0
- package/dist/core/WorkerChannelManager.js +956 -0
- package/dist/core/WorkerChannelManager.js.map +1 -0
- package/dist/core/config-enums.d.ts +41 -0
- package/dist/core/config-enums.d.ts.map +1 -0
- package/dist/core/config-enums.js +59 -0
- package/dist/core/config-enums.js.map +1 -0
- package/dist/core/config.d.ts +159 -0
- package/dist/core/config.d.ts.map +1 -0
- package/dist/core/config.js +694 -0
- package/dist/core/config.js.map +1 -0
- package/dist/core/host-config.d.ts +146 -0
- package/dist/core/host-config.d.ts.map +1 -0
- package/dist/core/host-config.js +312 -0
- package/dist/core/host-config.js.map +1 -0
- package/dist/core/ipc-errors.d.ts +27 -0
- package/dist/core/ipc-errors.d.ts.map +1 -0
- package/dist/core/ipc-errors.js +36 -0
- package/dist/core/ipc-errors.js.map +1 -0
- package/dist/core/network-utils.d.ts +35 -0
- package/dist/core/network-utils.d.ts.map +1 -0
- package/dist/core/network-utils.js +145 -0
- package/dist/core/network-utils.js.map +1 -0
- package/dist/core/platform-config.d.ts +142 -0
- package/dist/core/platform-config.d.ts.map +1 -0
- package/dist/core/platform-config.js +299 -0
- package/dist/core/platform-config.js.map +1 -0
- package/dist/decorators/ServiceProxy.d.ts +175 -0
- package/dist/decorators/ServiceProxy.d.ts.map +1 -0
- package/dist/decorators/ServiceProxy.js +969 -0
- package/dist/decorators/ServiceProxy.js.map +1 -0
- package/dist/decorators/index.d.ts +146 -0
- package/dist/decorators/index.d.ts.map +1 -0
- package/dist/decorators/index.js +545 -0
- package/dist/decorators/index.js.map +1 -0
- package/dist/deploy/NginxGenerator.d.ts +165 -0
- package/dist/deploy/NginxGenerator.d.ts.map +1 -0
- package/dist/deploy/NginxGenerator.js +781 -0
- package/dist/deploy/NginxGenerator.js.map +1 -0
- package/dist/deploy/PlatformManifestGenerator.d.ts +43 -0
- package/dist/deploy/PlatformManifestGenerator.d.ts.map +1 -0
- package/dist/deploy/PlatformManifestGenerator.js +80 -0
- package/dist/deploy/PlatformManifestGenerator.js.map +1 -0
- package/dist/deploy/RouteManifestGenerator.d.ts +42 -0
- package/dist/deploy/RouteManifestGenerator.d.ts.map +1 -0
- package/dist/deploy/RouteManifestGenerator.js +105 -0
- package/dist/deploy/RouteManifestGenerator.js.map +1 -0
- package/dist/deploy/index.d.ts +210 -0
- package/dist/deploy/index.d.ts.map +1 -0
- package/dist/deploy/index.js +918 -0
- package/dist/deploy/index.js.map +1 -0
- package/dist/frontend/FrontendDevLifecycle.d.ts +26 -0
- package/dist/frontend/FrontendDevLifecycle.d.ts.map +1 -0
- package/dist/frontend/FrontendDevLifecycle.js +60 -0
- package/dist/frontend/FrontendDevLifecycle.js.map +1 -0
- package/dist/frontend/FrontendPluginOrchestrator.d.ts +64 -0
- package/dist/frontend/FrontendPluginOrchestrator.d.ts.map +1 -0
- package/dist/frontend/FrontendPluginOrchestrator.js +167 -0
- package/dist/frontend/FrontendPluginOrchestrator.js.map +1 -0
- package/dist/frontend/SiteResolver.d.ts +33 -0
- package/dist/frontend/SiteResolver.d.ts.map +1 -0
- package/dist/frontend/SiteResolver.js +53 -0
- package/dist/frontend/SiteResolver.js.map +1 -0
- package/dist/frontend/StaticMountRegistry.d.ts +36 -0
- package/dist/frontend/StaticMountRegistry.d.ts.map +1 -0
- package/dist/frontend/StaticMountRegistry.js +94 -0
- package/dist/frontend/StaticMountRegistry.js.map +1 -0
- package/dist/frontend/index.d.ts +7 -0
- package/dist/frontend/index.d.ts.map +1 -0
- package/{src → dist}/frontend/index.js +4 -2
- package/dist/frontend/index.js.map +1 -0
- package/dist/frontend/pathUtils.d.ts +8 -0
- package/dist/frontend/pathUtils.d.ts.map +1 -0
- package/dist/frontend/pathUtils.js +17 -0
- package/dist/frontend/pathUtils.js.map +1 -0
- package/dist/frontend/plugins/index.d.ts +2 -0
- package/dist/frontend/plugins/index.d.ts.map +1 -0
- package/{src → dist}/frontend/plugins/index.js +1 -1
- package/dist/frontend/plugins/index.js.map +1 -0
- package/dist/frontend/plugins/viteFrontend.d.ts +51 -0
- package/dist/frontend/plugins/viteFrontend.d.ts.map +1 -0
- package/dist/frontend/plugins/viteFrontend.js +134 -0
- package/dist/frontend/plugins/viteFrontend.js.map +1 -0
- package/dist/frontend/types.d.ts +25 -0
- package/dist/frontend/types.d.ts.map +1 -0
- package/dist/frontend/types.js +2 -0
- package/dist/frontend/types.js.map +1 -0
- package/dist/index.d.ts +17 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +32 -0
- package/dist/index.js.map +1 -0
- package/dist/internals.d.ts +21 -0
- package/dist/internals.d.ts.map +1 -0
- package/{src → dist}/internals.js +12 -14
- package/dist/internals.js.map +1 -0
- package/dist/plugins/PluginManager.d.ts +209 -0
- package/dist/plugins/PluginManager.d.ts.map +1 -0
- package/dist/plugins/PluginManager.js +365 -0
- package/dist/plugins/PluginManager.js.map +1 -0
- package/dist/plugins/ScopedPostgres.d.ts +78 -0
- package/dist/plugins/ScopedPostgres.d.ts.map +1 -0
- package/dist/plugins/ScopedPostgres.js +190 -0
- package/dist/plugins/ScopedPostgres.js.map +1 -0
- package/dist/plugins/ScopedRedis.d.ts +88 -0
- package/dist/plugins/ScopedRedis.d.ts.map +1 -0
- package/dist/plugins/ScopedRedis.js +169 -0
- package/dist/plugins/ScopedRedis.js.map +1 -0
- package/dist/plugins/index.d.ts +289 -0
- package/dist/plugins/index.d.ts.map +1 -0
- package/dist/plugins/index.js +1942 -0
- package/dist/plugins/index.js.map +1 -0
- package/dist/plugins/types.d.ts +59 -0
- package/dist/plugins/types.d.ts.map +1 -0
- package/dist/plugins/types.js +2 -0
- package/dist/plugins/types.js.map +1 -0
- package/dist/registry/ServiceRegistry.d.ts +305 -0
- package/dist/registry/ServiceRegistry.d.ts.map +1 -0
- package/dist/registry/ServiceRegistry.js +735 -0
- package/dist/registry/ServiceRegistry.js.map +1 -0
- package/dist/scaling/ScaleAdvisor.d.ts +214 -0
- package/dist/scaling/ScaleAdvisor.d.ts.map +1 -0
- package/dist/scaling/ScaleAdvisor.js +526 -0
- package/dist/scaling/ScaleAdvisor.js.map +1 -0
- package/dist/services/Service.d.ts +164 -0
- package/dist/services/Service.d.ts.map +1 -0
- package/dist/services/Service.js +106 -0
- package/dist/services/Service.js.map +1 -0
- package/dist/services/worker-bootstrap.d.ts +15 -0
- package/dist/services/worker-bootstrap.d.ts.map +1 -0
- package/dist/services/worker-bootstrap.js +744 -0
- package/dist/services/worker-bootstrap.js.map +1 -0
- package/dist/templates/auth-service.d.ts +42 -0
- package/dist/templates/auth-service.d.ts.map +1 -0
- package/dist/templates/auth-service.js +54 -0
- package/dist/templates/auth-service.js.map +1 -0
- package/dist/templates/identity-service.d.ts +50 -0
- package/dist/templates/identity-service.d.ts.map +1 -0
- package/dist/templates/identity-service.js +62 -0
- package/dist/templates/identity-service.js.map +1 -0
- package/dist/types/contract.d.ts +120 -0
- package/dist/types/contract.d.ts.map +1 -0
- package/dist/types/contract.js +69 -0
- package/dist/types/contract.js.map +1 -0
- package/package.json +78 -20
- package/src/core/DirectMessageBus.js +0 -364
- package/src/core/EndpointResolver.js +0 -259
- package/src/core/ForgeContext.js +0 -2236
- package/src/core/ForgeHost.js +0 -122
- package/src/core/ForgePlatform.js +0 -145
- package/src/core/Ingress.js +0 -768
- package/src/core/Interceptors.js +0 -420
- package/src/core/MessageBus.js +0 -321
- package/src/core/Prometheus.js +0 -305
- package/src/core/RequestContext.js +0 -413
- package/src/core/RoutingStrategy.js +0 -330
- package/src/core/Supervisor.js +0 -1349
- package/src/core/ThreadAllocator.js +0 -196
- package/src/core/WorkerChannelManager.js +0 -879
- package/src/core/config.js +0 -637
- package/src/core/host-config.js +0 -311
- package/src/core/network-utils.js +0 -166
- package/src/core/platform-config.js +0 -308
- package/src/decorators/ServiceProxy.js +0 -904
- package/src/decorators/index.js +0 -571
- package/src/deploy/NginxGenerator.js +0 -865
- package/src/deploy/PlatformManifestGenerator.js +0 -96
- package/src/deploy/RouteManifestGenerator.js +0 -112
- package/src/deploy/index.js +0 -984
- package/src/frontend/FrontendDevLifecycle.js +0 -65
- package/src/frontend/FrontendPluginOrchestrator.js +0 -187
- package/src/frontend/SiteResolver.js +0 -63
- package/src/frontend/StaticMountRegistry.js +0 -90
- package/src/frontend/plugins/viteFrontend.js +0 -79
- package/src/frontend/types.js +0 -35
- package/src/index.js +0 -58
- package/src/plugins/PluginManager.js +0 -537
- package/src/plugins/ScopedPostgres.js +0 -192
- package/src/plugins/ScopedRedis.js +0 -142
- package/src/plugins/index.js +0 -1756
- package/src/registry/ServiceRegistry.js +0 -797
- package/src/scaling/ScaleAdvisor.js +0 -442
- package/src/services/Service.js +0 -195
- package/src/services/worker-bootstrap.js +0 -679
- package/src/templates/auth-service.js +0 -65
- package/src/templates/identity-service.js +0 -75
|
@@ -0,0 +1,956 @@
|
|
|
1
|
+
import { createHmac, randomUUID, timingSafeEqual } from "node:crypto";
|
|
2
|
+
import fs from "node:fs";
|
|
3
|
+
import net from "node:net";
|
|
4
|
+
import path from "node:path";
|
|
5
|
+
import { IPC_PROTOCOL_VERSION } from "./ipc-errors.js";
|
|
6
|
+
/**
|
|
7
|
+
* WorkerChannelManager — Unix Domain Socket Mesh (Worker Side)
|
|
8
|
+
*
|
|
9
|
+
* Each worker runs a UDS server. When it receives the socket registry
|
|
10
|
+
* from the supervisor, it connects directly to other workers' UDS servers.
|
|
11
|
+
*
|
|
12
|
+
* Protocol: Length-prefixed JSON frames
|
|
13
|
+
* [4 bytes: message length (UInt32BE)][JSON payload]
|
|
14
|
+
*
|
|
15
|
+
* This gives us:
|
|
16
|
+
* - Direct worker-to-worker communication (no supervisor in the path)
|
|
17
|
+
* - Full duplex (both sides can send at any time)
|
|
18
|
+
* - No serialization bottleneck at the supervisor
|
|
19
|
+
* - Automatic reconnection on socket errors
|
|
20
|
+
*/
|
|
21
|
+
/** Maximum UDS message size: 16 MB */
|
|
22
|
+
const MAX_UDS_MESSAGE = 16 * 1024 * 1024;
|
|
23
|
+
/** Maximum number of pending requests before rejecting new ones */
|
|
24
|
+
const MAX_PENDING_REQUESTS = 10000;
|
|
25
|
+
/** Maximum socket write buffer size before refusing new writes (4 MB) */
|
|
26
|
+
const MAX_WRITE_BUFFER = 4 * 1024 * 1024;
|
|
27
|
+
/** Maximum parse errors before destroying a socket */
|
|
28
|
+
const MAX_PARSE_ERRORS = 3;
|
|
29
|
+
/** Handshake timeout for server-side inbound connections (ms) */
|
|
30
|
+
const HANDSHAKE_TIMEOUT = 5000;
|
|
31
|
+
/** Maximum reconnection attempts before giving up on a peer */
|
|
32
|
+
const MAX_RECONNECT_ATTEMPTS = 10;
|
|
33
|
+
/** IPC-H2: Maximum queued messages per service during reconnection */
|
|
34
|
+
const MAX_RECONNECT_QUEUE = 1000;
|
|
35
|
+
/**
|
|
36
|
+
* Create a reusable frame parser for length-prefixed JSON frames.
|
|
37
|
+
* Encapsulates the buffer-list pattern to avoid O(n^2) Buffer.concat on every chunk.
|
|
38
|
+
*/
|
|
39
|
+
function createFrameParser(onFrame, serviceName, socket) {
|
|
40
|
+
const chunks = [];
|
|
41
|
+
let totalLength = 0;
|
|
42
|
+
let buffer = Buffer.alloc(0);
|
|
43
|
+
let parseErrorCount = 0;
|
|
44
|
+
let skipRemaining = 0; // bytes left to discard from an oversized message
|
|
45
|
+
return (chunk) => {
|
|
46
|
+
// If we're discarding bytes from an oversized message, consume from this chunk first
|
|
47
|
+
if (skipRemaining > 0) {
|
|
48
|
+
if (chunk.length <= skipRemaining) {
|
|
49
|
+
skipRemaining -= chunk.length;
|
|
50
|
+
return; // entire chunk consumed by skip
|
|
51
|
+
}
|
|
52
|
+
chunk = chunk.subarray(skipRemaining);
|
|
53
|
+
skipRemaining = 0;
|
|
54
|
+
}
|
|
55
|
+
// Reject oversized individual chunks before any allocation
|
|
56
|
+
if (chunk.length > MAX_UDS_MESSAGE + 4) {
|
|
57
|
+
parseErrorCount++;
|
|
58
|
+
if (parseErrorCount >= MAX_PARSE_ERRORS) {
|
|
59
|
+
console.error(`[${serviceName}] UDS chunk too large (${chunk.length} bytes), ${parseErrorCount} parse errors — destroying connection`);
|
|
60
|
+
chunks.length = 0;
|
|
61
|
+
totalLength = 0;
|
|
62
|
+
buffer = Buffer.alloc(0);
|
|
63
|
+
socket.destroy();
|
|
64
|
+
}
|
|
65
|
+
else {
|
|
66
|
+
console.warn(`[${serviceName}] UDS chunk too large (${chunk.length} bytes), skipping (error ${parseErrorCount}/${MAX_PARSE_ERRORS})`);
|
|
67
|
+
}
|
|
68
|
+
return;
|
|
69
|
+
}
|
|
70
|
+
// Guard against buffer overflow (buffer + pending chunks + new chunk)
|
|
71
|
+
if (buffer.length + totalLength + chunk.length > MAX_UDS_MESSAGE + 4) {
|
|
72
|
+
parseErrorCount++;
|
|
73
|
+
if (parseErrorCount >= MAX_PARSE_ERRORS) {
|
|
74
|
+
console.error(`[${serviceName}] UDS buffer overflow detected, ${parseErrorCount} parse errors — destroying connection`);
|
|
75
|
+
chunks.length = 0;
|
|
76
|
+
totalLength = 0;
|
|
77
|
+
buffer = Buffer.alloc(0);
|
|
78
|
+
socket.destroy();
|
|
79
|
+
}
|
|
80
|
+
else {
|
|
81
|
+
console.warn(`[${serviceName}] UDS buffer overflow detected, skipping frame (error ${parseErrorCount}/${MAX_PARSE_ERRORS})`);
|
|
82
|
+
// IPC-M1: Drain any complete valid frames from the existing buffer before discarding
|
|
83
|
+
if (chunks.length > 0) {
|
|
84
|
+
buffer = Buffer.concat([buffer, ...chunks]);
|
|
85
|
+
chunks.length = 0;
|
|
86
|
+
totalLength = 0;
|
|
87
|
+
}
|
|
88
|
+
while (buffer.length >= 4) {
|
|
89
|
+
const frameLen = buffer.readUInt32BE(0);
|
|
90
|
+
if (frameLen > MAX_UDS_MESSAGE) {
|
|
91
|
+
// This frame is oversized — discard the rest of the buffer
|
|
92
|
+
break;
|
|
93
|
+
}
|
|
94
|
+
if (buffer.length < 4 + frameLen) {
|
|
95
|
+
// Incomplete frame — can't recover it without the new chunk
|
|
96
|
+
break;
|
|
97
|
+
}
|
|
98
|
+
const frameBuf = buffer.subarray(4, 4 + frameLen);
|
|
99
|
+
buffer = buffer.subarray(4 + frameLen);
|
|
100
|
+
try {
|
|
101
|
+
const msg = JSON.parse(frameBuf.toString(), (key, value) => {
|
|
102
|
+
if (key === "__proto__" || key === "constructor" || key === "prototype")
|
|
103
|
+
return undefined;
|
|
104
|
+
return value;
|
|
105
|
+
});
|
|
106
|
+
onFrame(msg);
|
|
107
|
+
}
|
|
108
|
+
catch {
|
|
109
|
+
// skip unparseable frame
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
// Discard whatever remains (partial or oversized)
|
|
113
|
+
buffer = Buffer.alloc(0);
|
|
114
|
+
}
|
|
115
|
+
return;
|
|
116
|
+
}
|
|
117
|
+
chunks.push(chunk);
|
|
118
|
+
totalLength += chunk.length;
|
|
119
|
+
// Only concatenate when we might have a complete frame
|
|
120
|
+
if (buffer.length + totalLength < 4)
|
|
121
|
+
return;
|
|
122
|
+
// Merge chunks into buffer for frame parsing
|
|
123
|
+
if (chunks.length > 0) {
|
|
124
|
+
buffer = Buffer.concat([buffer, ...chunks]);
|
|
125
|
+
chunks.length = 0;
|
|
126
|
+
totalLength = 0;
|
|
127
|
+
}
|
|
128
|
+
// Parse length-prefixed frames
|
|
129
|
+
while (buffer.length >= 4) {
|
|
130
|
+
const msgLen = buffer.readUInt32BE(0);
|
|
131
|
+
if (msgLen > MAX_UDS_MESSAGE) {
|
|
132
|
+
parseErrorCount++;
|
|
133
|
+
if (parseErrorCount >= MAX_PARSE_ERRORS) {
|
|
134
|
+
console.error(`[${serviceName}] UDS message too large (${msgLen} bytes), ${parseErrorCount} parse errors — destroying socket`);
|
|
135
|
+
buffer = Buffer.alloc(0);
|
|
136
|
+
socket.destroy();
|
|
137
|
+
return;
|
|
138
|
+
}
|
|
139
|
+
console.warn(`[${serviceName}] UDS message too large (${msgLen} bytes), skipping full frame (error ${parseErrorCount}/${MAX_PARSE_ERRORS})`);
|
|
140
|
+
// Skip the entire frame: 4-byte length prefix + msgLen body
|
|
141
|
+
const totalFrameSize = 4 + msgLen;
|
|
142
|
+
if (buffer.length >= totalFrameSize) {
|
|
143
|
+
// Full oversized frame is in buffer — skip it entirely
|
|
144
|
+
buffer = buffer.subarray(totalFrameSize);
|
|
145
|
+
}
|
|
146
|
+
else {
|
|
147
|
+
// Frame body hasn't fully arrived — track remaining bytes to discard
|
|
148
|
+
skipRemaining = totalFrameSize - buffer.length;
|
|
149
|
+
buffer = Buffer.alloc(0);
|
|
150
|
+
}
|
|
151
|
+
continue;
|
|
152
|
+
}
|
|
153
|
+
if (buffer.length < 4 + msgLen)
|
|
154
|
+
break; // wait for more data
|
|
155
|
+
const msgBuf = buffer.subarray(4, 4 + msgLen);
|
|
156
|
+
buffer = buffer.subarray(4 + msgLen);
|
|
157
|
+
try {
|
|
158
|
+
// COR-C2: Reviver prevents prototype pollution via __proto__/constructor/prototype keys
|
|
159
|
+
const msg = JSON.parse(msgBuf.toString(), (key, value) => {
|
|
160
|
+
if (key === "__proto__" || key === "constructor" || key === "prototype")
|
|
161
|
+
return undefined;
|
|
162
|
+
return value;
|
|
163
|
+
});
|
|
164
|
+
onFrame(msg);
|
|
165
|
+
}
|
|
166
|
+
catch (err) {
|
|
167
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
168
|
+
console.error(`[${serviceName}] Failed to parse UDS message:`, message);
|
|
169
|
+
}
|
|
170
|
+
}
|
|
171
|
+
};
|
|
172
|
+
}
|
|
173
|
+
/**
|
|
174
|
+
* IPC-C2: Safe JSON serializer that handles circular references, BigInt, and Buffer.
|
|
175
|
+
*/
|
|
176
|
+
function safeJsonStringify(obj) {
|
|
177
|
+
const seen = new WeakSet();
|
|
178
|
+
return JSON.stringify(obj, (_key, value) => {
|
|
179
|
+
// Handle BigInt
|
|
180
|
+
if (typeof value === "bigint") {
|
|
181
|
+
return { __bigint: value.toString() };
|
|
182
|
+
}
|
|
183
|
+
// Handle Buffer
|
|
184
|
+
if (Buffer.isBuffer(value)) {
|
|
185
|
+
return { __buffer: value.toString("base64") };
|
|
186
|
+
}
|
|
187
|
+
// Detect circular references
|
|
188
|
+
if (value !== null && typeof value === "object") {
|
|
189
|
+
if (seen.has(value)) {
|
|
190
|
+
throw new TypeError("Cannot serialize IPC message: circular reference detected");
|
|
191
|
+
}
|
|
192
|
+
seen.add(value);
|
|
193
|
+
}
|
|
194
|
+
return value;
|
|
195
|
+
});
|
|
196
|
+
}
|
|
197
|
+
export class WorkerChannelManager {
|
|
198
|
+
serviceName;
|
|
199
|
+
workerId;
|
|
200
|
+
_dependencies;
|
|
201
|
+
_onChannelDead;
|
|
202
|
+
_server;
|
|
203
|
+
_socketPath;
|
|
204
|
+
outbound;
|
|
205
|
+
inbound;
|
|
206
|
+
serviceConnections;
|
|
207
|
+
rrIndex;
|
|
208
|
+
pendingRequests;
|
|
209
|
+
requestCounter;
|
|
210
|
+
_workerId;
|
|
211
|
+
onMessage;
|
|
212
|
+
onRequest;
|
|
213
|
+
_supervisorSend;
|
|
214
|
+
_registry;
|
|
215
|
+
_reconnectAttempts;
|
|
216
|
+
_reconnectTimers;
|
|
217
|
+
_socketKeyMap;
|
|
218
|
+
backpressureEvents;
|
|
219
|
+
_reconnectQueue;
|
|
220
|
+
constructor(serviceName, workerId, options = {}) {
|
|
221
|
+
this.serviceName = serviceName;
|
|
222
|
+
this.workerId = workerId;
|
|
223
|
+
this._dependencies = options.channels ? new Set(options.channels) : null;
|
|
224
|
+
this._onChannelDead = options.onChannelDead ?? null;
|
|
225
|
+
this._server = null;
|
|
226
|
+
this._socketPath = null;
|
|
227
|
+
this.outbound = new Map();
|
|
228
|
+
this.inbound = new Map();
|
|
229
|
+
this.serviceConnections = new Map();
|
|
230
|
+
this.rrIndex = new Map();
|
|
231
|
+
this.pendingRequests = new Map();
|
|
232
|
+
this.requestCounter = 0;
|
|
233
|
+
/** Worker ID for unique request IDs; falls back to process.pid */
|
|
234
|
+
this._workerId = workerId ?? process.pid;
|
|
235
|
+
this.onMessage = null;
|
|
236
|
+
this.onRequest = null;
|
|
237
|
+
this._supervisorSend = null;
|
|
238
|
+
this._registry = {};
|
|
239
|
+
this._reconnectAttempts = new Map();
|
|
240
|
+
this._reconnectTimers = new Map();
|
|
241
|
+
/** P19: Reverse map from socket → key for O(1) lookup */
|
|
242
|
+
this._socketKeyMap = new Map();
|
|
243
|
+
this.backpressureEvents = 0;
|
|
244
|
+
/** IPC-M5: Bounded queue for messages during reconnection */
|
|
245
|
+
this._reconnectQueue = new Map();
|
|
246
|
+
}
|
|
247
|
+
/**
|
|
248
|
+
* Initialize — set up supervisor IPC listener.
|
|
249
|
+
*/
|
|
250
|
+
init(supervisorSend) {
|
|
251
|
+
this._supervisorSend = supervisorSend;
|
|
252
|
+
process.on("message", (msg) => {
|
|
253
|
+
if (!msg || !msg.type)
|
|
254
|
+
return;
|
|
255
|
+
switch (msg.type) {
|
|
256
|
+
case "forge:init-socket":
|
|
257
|
+
this._startServer(msg.socketDir, msg.serviceName, msg.workerId);
|
|
258
|
+
break;
|
|
259
|
+
case "forge:socket-registry":
|
|
260
|
+
this._updateRegistry(msg.registry);
|
|
261
|
+
break;
|
|
262
|
+
case "forge:health-check":
|
|
263
|
+
supervisorSend({
|
|
264
|
+
type: "forge:health-response",
|
|
265
|
+
timestamp: msg.timestamp,
|
|
266
|
+
uptime: process.uptime(),
|
|
267
|
+
memory: process.memoryUsage(),
|
|
268
|
+
pid: process.pid,
|
|
269
|
+
directConnections: this.outbound.size,
|
|
270
|
+
});
|
|
271
|
+
break;
|
|
272
|
+
}
|
|
273
|
+
});
|
|
274
|
+
}
|
|
275
|
+
/**
|
|
276
|
+
* Start our UDS server so other workers can connect to us.
|
|
277
|
+
*/
|
|
278
|
+
_startServer(socketDir, serviceName, workerId) {
|
|
279
|
+
// One channel manager exists per service in a colocated worker process.
|
|
280
|
+
// Ignore init messages for sibling services; each sibling manager will
|
|
281
|
+
// start its own socket server when it receives its matching message.
|
|
282
|
+
if (serviceName !== this.serviceName)
|
|
283
|
+
return;
|
|
284
|
+
if (this._server)
|
|
285
|
+
return; // already started
|
|
286
|
+
this._socketPath = path.join(socketDir, `${serviceName}-${workerId}.sock`);
|
|
287
|
+
this._server = net.createServer((socket) => {
|
|
288
|
+
// A7: Start handshake timeout — close if no handshake within HANDSHAKE_TIMEOUT
|
|
289
|
+
let handshakeCompleted = false;
|
|
290
|
+
const handshakeTimer = setTimeout(() => {
|
|
291
|
+
if (!handshakeCompleted) {
|
|
292
|
+
// DX-14: Only log handshake timeouts when this service has configured connections.
|
|
293
|
+
// Single-service setups with no `connects` targets will never receive inbound
|
|
294
|
+
// handshakes, so the timeout is meaningless noise.
|
|
295
|
+
if (this._dependencies === null || this._dependencies.size > 0) {
|
|
296
|
+
console.warn(`[${this.serviceName}] Inbound handshake timeout — closing socket`);
|
|
297
|
+
}
|
|
298
|
+
socket.destroy();
|
|
299
|
+
}
|
|
300
|
+
}, HANDSHAKE_TIMEOUT);
|
|
301
|
+
handshakeTimer.unref();
|
|
302
|
+
// Inbound connection from another worker — use shared frame parser
|
|
303
|
+
const onData = createFrameParser((msg) => {
|
|
304
|
+
// A7: Clear handshake timer on handshake message
|
|
305
|
+
if (msg.type === "forge:handshake" && !handshakeCompleted) {
|
|
306
|
+
handshakeCompleted = true;
|
|
307
|
+
clearTimeout(handshakeTimer);
|
|
308
|
+
}
|
|
309
|
+
this._handleIncomingMessage(socket, msg);
|
|
310
|
+
}, this.serviceName, socket);
|
|
311
|
+
socket.on("data", onData);
|
|
312
|
+
socket.on("error", () => {
|
|
313
|
+
clearTimeout(handshakeTimer);
|
|
314
|
+
});
|
|
315
|
+
socket.on("close", () => {
|
|
316
|
+
clearTimeout(handshakeTimer);
|
|
317
|
+
// Remove from inbound
|
|
318
|
+
for (const [key, s] of this.inbound) {
|
|
319
|
+
if (s === socket) {
|
|
320
|
+
this.inbound.delete(key);
|
|
321
|
+
break;
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
});
|
|
325
|
+
});
|
|
326
|
+
this._server.listen(this._socketPath, () => {
|
|
327
|
+
// Tell supervisor we're ready
|
|
328
|
+
this._supervisorSend({
|
|
329
|
+
type: "forge:socket-ready",
|
|
330
|
+
socketPath: this._socketPath,
|
|
331
|
+
serviceName,
|
|
332
|
+
workerId,
|
|
333
|
+
});
|
|
334
|
+
});
|
|
335
|
+
this._server.on("error", (err) => {
|
|
336
|
+
if (err.code === "EADDRINUSE") {
|
|
337
|
+
// Stale socket file — unlink and retry once
|
|
338
|
+
try {
|
|
339
|
+
fs.unlinkSync(this._socketPath);
|
|
340
|
+
}
|
|
341
|
+
catch {
|
|
342
|
+
// ignore
|
|
343
|
+
}
|
|
344
|
+
this._server.listen(this._socketPath, () => {
|
|
345
|
+
this._supervisorSend({
|
|
346
|
+
type: "forge:socket-ready",
|
|
347
|
+
socketPath: this._socketPath,
|
|
348
|
+
serviceName,
|
|
349
|
+
workerId,
|
|
350
|
+
});
|
|
351
|
+
});
|
|
352
|
+
}
|
|
353
|
+
else {
|
|
354
|
+
console.error(`[${this.serviceName}] UDS server error:`, err.message);
|
|
355
|
+
}
|
|
356
|
+
});
|
|
357
|
+
}
|
|
358
|
+
/**
|
|
359
|
+
* Update our knowledge of the socket registry and connect to new peers.
|
|
360
|
+
*/
|
|
361
|
+
_updateRegistry(registry) {
|
|
362
|
+
this._registry = registry;
|
|
363
|
+
const myKey = `${this.serviceName}:${this.workerId}`;
|
|
364
|
+
for (const [key, socketPath] of Object.entries(registry)) {
|
|
365
|
+
if (key === myKey)
|
|
366
|
+
continue; // don't connect to ourselves
|
|
367
|
+
if (this.outbound.has(key))
|
|
368
|
+
continue; // already connected
|
|
369
|
+
// P1: Only connect to services in our dependency list (if specified)
|
|
370
|
+
if (this._dependencies) {
|
|
371
|
+
const [svcName] = key.split(":");
|
|
372
|
+
if (!this._dependencies.has(svcName)) {
|
|
373
|
+
continue;
|
|
374
|
+
}
|
|
375
|
+
}
|
|
376
|
+
this._connectTo(key, socketPath);
|
|
377
|
+
}
|
|
378
|
+
}
|
|
379
|
+
/**
|
|
380
|
+
* Establish an outbound connection to another worker's UDS server.
|
|
381
|
+
*/
|
|
382
|
+
_connectTo(key, socketPath) {
|
|
383
|
+
const socket = net.createConnection(socketPath);
|
|
384
|
+
let handshakeTimer = null;
|
|
385
|
+
let dataReceived = false;
|
|
386
|
+
socket.on("connect", () => {
|
|
387
|
+
this.outbound.set(key, socket);
|
|
388
|
+
this._socketKeyMap.set(socket, key); // P19: O(1) reverse lookup
|
|
389
|
+
this._reconnectAttempts.set(key, 0);
|
|
390
|
+
// CR-IPC-13: Start handshake timeout — destroy if no handshake ack within 5s
|
|
391
|
+
handshakeTimer = setTimeout(() => {
|
|
392
|
+
if (!dataReceived) {
|
|
393
|
+
// DX-14: Suppress noisy timeout warnings for services with no connections configured
|
|
394
|
+
if (this._dependencies === null || this._dependencies.size > 0) {
|
|
395
|
+
console.warn(`[${this.serviceName}] Handshake timeout for ${key}, destroying socket`);
|
|
396
|
+
}
|
|
397
|
+
socket.destroy();
|
|
398
|
+
this.outbound.delete(key);
|
|
399
|
+
}
|
|
400
|
+
}, 5000);
|
|
401
|
+
handshakeTimer.unref();
|
|
402
|
+
// Track service → connection keys for round-robin
|
|
403
|
+
const [svcName] = key.split(":");
|
|
404
|
+
if (!this.serviceConnections.has(svcName)) {
|
|
405
|
+
this.serviceConnections.set(svcName, []);
|
|
406
|
+
}
|
|
407
|
+
const keys = this.serviceConnections.get(svcName);
|
|
408
|
+
if (!keys.includes(key))
|
|
409
|
+
keys.push(key);
|
|
410
|
+
// Handshake: tell the other side who we are
|
|
411
|
+
// IPC-M3: Include timestamp in handshake to prevent replay attacks
|
|
412
|
+
const handshakeTs = Date.now();
|
|
413
|
+
const handshake = {
|
|
414
|
+
type: "forge:handshake",
|
|
415
|
+
from: this.serviceName,
|
|
416
|
+
fromWorkerId: this.workerId,
|
|
417
|
+
ts: handshakeTs,
|
|
418
|
+
ipcVersion: IPC_PROTOCOL_VERSION,
|
|
419
|
+
};
|
|
420
|
+
const clusterSecret = process.env.FORGE_CLUSTER_SECRET;
|
|
421
|
+
if (clusterSecret) {
|
|
422
|
+
handshake.hmac = createHmac("sha256", clusterSecret)
|
|
423
|
+
.update(`${this.serviceName}:${this.workerId}:${handshakeTs}`)
|
|
424
|
+
.digest("hex");
|
|
425
|
+
}
|
|
426
|
+
this._sendFrame(socket, handshake);
|
|
427
|
+
// IPC-M5: Drain queued messages after reconnect
|
|
428
|
+
const [svcNameForQueue] = key.split(":");
|
|
429
|
+
const queued = this._reconnectQueue.get(svcNameForQueue);
|
|
430
|
+
if (queued && queued.length > 0) {
|
|
431
|
+
this._reconnectQueue.delete(svcNameForQueue);
|
|
432
|
+
for (const qMsg of queued) {
|
|
433
|
+
try {
|
|
434
|
+
this._sendFrame(socket, qMsg);
|
|
435
|
+
}
|
|
436
|
+
catch {
|
|
437
|
+
// Best-effort: drop remaining queued messages on backpressure
|
|
438
|
+
break;
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
});
|
|
443
|
+
// Outbound connection — use shared frame parser
|
|
444
|
+
const onData = createFrameParser((msg) => this._handleIncomingMessage(socket, msg), this.serviceName, socket);
|
|
445
|
+
socket.on("data", (chunk) => {
|
|
446
|
+
// CR-IPC-13: Clear handshake timeout on first data
|
|
447
|
+
if (!dataReceived) {
|
|
448
|
+
dataReceived = true;
|
|
449
|
+
if (handshakeTimer) {
|
|
450
|
+
clearTimeout(handshakeTimer);
|
|
451
|
+
handshakeTimer = null;
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
onData(chunk);
|
|
455
|
+
});
|
|
456
|
+
socket.on("error", () => {
|
|
457
|
+
if (handshakeTimer) {
|
|
458
|
+
clearTimeout(handshakeTimer);
|
|
459
|
+
handshakeTimer = null;
|
|
460
|
+
}
|
|
461
|
+
this.outbound.delete(key);
|
|
462
|
+
this._socketKeyMap.delete(socket); // P19
|
|
463
|
+
const [svcName] = key.split(":");
|
|
464
|
+
const keys = this.serviceConnections.get(svcName);
|
|
465
|
+
if (keys) {
|
|
466
|
+
const idx = keys.indexOf(key);
|
|
467
|
+
if (idx !== -1)
|
|
468
|
+
keys.splice(idx, 1);
|
|
469
|
+
}
|
|
470
|
+
// C2: Reject orphaned pending requests for this dead socket
|
|
471
|
+
this._rejectPendingForSocket(key);
|
|
472
|
+
});
|
|
473
|
+
socket.on("close", () => {
|
|
474
|
+
this.outbound.delete(key);
|
|
475
|
+
this._socketKeyMap.delete(socket); // P19
|
|
476
|
+
const [svcName] = key.split(":");
|
|
477
|
+
const keys = this.serviceConnections.get(svcName);
|
|
478
|
+
if (keys) {
|
|
479
|
+
const idx = keys.indexOf(key);
|
|
480
|
+
if (idx !== -1)
|
|
481
|
+
keys.splice(idx, 1);
|
|
482
|
+
}
|
|
483
|
+
// C2: Reject orphaned pending requests for this dead socket
|
|
484
|
+
this._rejectPendingForSocket(key);
|
|
485
|
+
// A13: Stop retrying after MAX_RECONNECT_ATTEMPTS
|
|
486
|
+
const existingTimer = this._reconnectTimers.get(key);
|
|
487
|
+
if (existingTimer)
|
|
488
|
+
clearTimeout(existingTimer);
|
|
489
|
+
const attempts = this._reconnectAttempts.get(key) ?? 0;
|
|
490
|
+
if (attempts >= MAX_RECONNECT_ATTEMPTS) {
|
|
491
|
+
console.error(`[${this.serviceName}] Channel to ${key} permanently failed after ${attempts} reconnection attempts`);
|
|
492
|
+
this._reconnectAttempts.delete(key);
|
|
493
|
+
this._reconnectTimers.delete(key);
|
|
494
|
+
if (this._onChannelDead) {
|
|
495
|
+
this._onChannelDead(key, attempts);
|
|
496
|
+
}
|
|
497
|
+
return;
|
|
498
|
+
}
|
|
499
|
+
// Attempt reconnect with exponential backoff
|
|
500
|
+
this._reconnectAttempts.set(key, attempts + 1);
|
|
501
|
+
const baseDelay = Math.min(60000, 1000 * 2 ** attempts);
|
|
502
|
+
const delay = baseDelay + Math.random() * 1000;
|
|
503
|
+
const timer = setTimeout(() => {
|
|
504
|
+
this._reconnectTimers.delete(key);
|
|
505
|
+
if (this._registry[key] && !this.outbound.has(key)) {
|
|
506
|
+
this._connectTo(key, this._registry[key]);
|
|
507
|
+
}
|
|
508
|
+
}, delay);
|
|
509
|
+
timer.unref();
|
|
510
|
+
this._reconnectTimers.set(key, timer);
|
|
511
|
+
});
|
|
512
|
+
}
|
|
513
|
+
/**
|
|
514
|
+
* Send a length-prefixed JSON frame over a socket.
|
|
515
|
+
* P6: Buffer.from(json) avoids double-scan of the string.
|
|
516
|
+
* @returns true if the write was accepted into the kernel buffer
|
|
517
|
+
*/
|
|
518
|
+
_sendFrame(socket, msg) {
|
|
519
|
+
// CR-IPC-4: Reject when buffer full — throw so callers can back off
|
|
520
|
+
// H2: Error code allows HTTP layer to return 503 with Retry-After
|
|
521
|
+
if (socket.writableLength > MAX_WRITE_BUFFER) {
|
|
522
|
+
const key = this._socketKey(socket);
|
|
523
|
+
const err = new Error(`IPC write buffer full for ${key} — receiver too slow`);
|
|
524
|
+
err.code = "ERR_BACKPRESSURE";
|
|
525
|
+
throw err;
|
|
526
|
+
}
|
|
527
|
+
// P6: Buffer.from gives us the buffer directly — byteLength = buf.length, no re-scan
|
|
528
|
+
// IPC-C2: Use safe serializer to handle circular refs, BigInt, Buffer
|
|
529
|
+
const json = safeJsonStringify(msg);
|
|
530
|
+
const body = Buffer.from(json);
|
|
531
|
+
const frame = Buffer.allocUnsafe(4 + body.length);
|
|
532
|
+
frame.writeUInt32BE(body.length, 0);
|
|
533
|
+
body.copy(frame, 4);
|
|
534
|
+
const ok = socket.write(frame);
|
|
535
|
+
if (!ok && !socket._drainWarned) {
|
|
536
|
+
socket._drainWarned = true;
|
|
537
|
+
socket.once("drain", () => {
|
|
538
|
+
socket._drainWarned = false;
|
|
539
|
+
});
|
|
540
|
+
console.warn(`[${this.serviceName}] UDS write buffer full, waiting for drain`);
|
|
541
|
+
}
|
|
542
|
+
return ok;
|
|
543
|
+
}
|
|
544
|
+
/**
|
|
545
|
+
* Send a pre-built frame buffer over a socket (for broadcast optimization).
|
|
546
|
+
* P6: Serialize once, send to all recipients.
|
|
547
|
+
*/
|
|
548
|
+
_sendRawFrame(socket, frameBuffer) {
|
|
549
|
+
// H2: Error code allows HTTP layer to return 503 with Retry-After
|
|
550
|
+
if (socket.writableLength > MAX_WRITE_BUFFER) {
|
|
551
|
+
const key = this._socketKey(socket);
|
|
552
|
+
const err = new Error(`IPC write buffer full for ${key} — receiver too slow`);
|
|
553
|
+
err.code = "ERR_BACKPRESSURE";
|
|
554
|
+
throw err;
|
|
555
|
+
}
|
|
556
|
+
const ok = socket.write(frameBuffer);
|
|
557
|
+
if (!ok && !socket._drainWarned) {
|
|
558
|
+
socket._drainWarned = true;
|
|
559
|
+
socket.once("drain", () => {
|
|
560
|
+
socket._drainWarned = false;
|
|
561
|
+
});
|
|
562
|
+
console.warn(`[${this.serviceName}] UDS write buffer full, waiting for drain`);
|
|
563
|
+
}
|
|
564
|
+
return ok;
|
|
565
|
+
}
|
|
566
|
+
/**
|
|
567
|
+
* Build a length-prefixed frame buffer from a message object.
|
|
568
|
+
* P6: Used by broadcast to serialize once and send to all.
|
|
569
|
+
*/
|
|
570
|
+
_buildFrame(msg) {
|
|
571
|
+
// IPC-C2: Use safe serializer to handle circular refs, BigInt, Buffer
|
|
572
|
+
const json = safeJsonStringify(msg);
|
|
573
|
+
const body = Buffer.from(json);
|
|
574
|
+
const frame = Buffer.allocUnsafe(4 + body.length);
|
|
575
|
+
frame.writeUInt32BE(body.length, 0);
|
|
576
|
+
body.copy(frame, 4);
|
|
577
|
+
return frame;
|
|
578
|
+
}
|
|
579
|
+
/**
|
|
580
|
+
* Handle a message from another worker (inbound or outbound socket).
|
|
581
|
+
*/
|
|
582
|
+
_handleIncomingMessage(socket, msg) {
|
|
583
|
+
switch (msg.type) {
|
|
584
|
+
case "forge:handshake": {
|
|
585
|
+
if (!msg.from || typeof msg.from !== "string") {
|
|
586
|
+
console.warn(`[${this.serviceName}] Invalid handshake: missing 'from' field`);
|
|
587
|
+
break;
|
|
588
|
+
}
|
|
589
|
+
// IPC version check: fail fast on protocol mismatch
|
|
590
|
+
const peerVersion = msg.ipcVersion;
|
|
591
|
+
if (peerVersion !== undefined && peerVersion !== IPC_PROTOCOL_VERSION) {
|
|
592
|
+
console.error(`[${this.serviceName}] IPC protocol version mismatch from ${msg.from}:${msg.fromWorkerId}: ` +
|
|
593
|
+
`peer=${peerVersion}, local=${IPC_PROTOCOL_VERSION}. ` +
|
|
594
|
+
`This usually means mismatched ThreadForge versions. Destroying connection.`);
|
|
595
|
+
socket.destroy();
|
|
596
|
+
break;
|
|
597
|
+
}
|
|
598
|
+
// S-IPC-1: Verify HMAC if cluster secret is configured
|
|
599
|
+
const clusterSecret = process.env.FORGE_CLUSTER_SECRET;
|
|
600
|
+
if (clusterSecret) {
|
|
601
|
+
// IPC-M3: Reject handshakes with stale timestamps (30s window)
|
|
602
|
+
const HANDSHAKE_MAX_AGE_MS = 30_000;
|
|
603
|
+
const ts = typeof msg.ts === "number" ? msg.ts : 0;
|
|
604
|
+
if (Math.abs(Date.now() - ts) > HANDSHAKE_MAX_AGE_MS) {
|
|
605
|
+
console.error(`[${this.serviceName}] Handshake rejected: timestamp too old or missing for ${msg.from}:${msg.fromWorkerId}`);
|
|
606
|
+
socket.destroy();
|
|
607
|
+
break;
|
|
608
|
+
}
|
|
609
|
+
const expected = createHmac("sha256", clusterSecret).update(`${msg.from}:${msg.fromWorkerId}:${ts}`).digest("hex");
|
|
610
|
+
// Wrap in try/catch: if msg.hmac is not valid hex, Buffer.from
|
|
611
|
+
// produces a different-length buffer and timingSafeEqual throws
|
|
612
|
+
try {
|
|
613
|
+
const expectedBuf = Buffer.from(expected, "hex");
|
|
614
|
+
const hmacBuf = msg.hmac ? Buffer.from(String(msg.hmac), "hex") : Buffer.alloc(0);
|
|
615
|
+
if (hmacBuf.length !== expectedBuf.length || !timingSafeEqual(expectedBuf, hmacBuf)) {
|
|
616
|
+
throw new Error("HMAC mismatch");
|
|
617
|
+
}
|
|
618
|
+
}
|
|
619
|
+
catch {
|
|
620
|
+
console.error(`[${this.serviceName}] Handshake HMAC verification failed for ${msg.from}:${msg.fromWorkerId}`);
|
|
621
|
+
socket.destroy();
|
|
622
|
+
break;
|
|
623
|
+
}
|
|
624
|
+
}
|
|
625
|
+
const key = `${msg.from}:${msg.fromWorkerId}`;
|
|
626
|
+
this.inbound.set(key, socket);
|
|
627
|
+
try {
|
|
628
|
+
this._sendFrame(socket, {
|
|
629
|
+
type: "forge:handshake-ack",
|
|
630
|
+
from: this.serviceName,
|
|
631
|
+
fromWorkerId: this.workerId,
|
|
632
|
+
});
|
|
633
|
+
}
|
|
634
|
+
catch (err) {
|
|
635
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
636
|
+
console.warn(`[${this.serviceName}] Failed to send handshake ack: ${message}`);
|
|
637
|
+
socket.destroy();
|
|
638
|
+
}
|
|
639
|
+
break;
|
|
640
|
+
}
|
|
641
|
+
case "forge:handshake-ack": {
|
|
642
|
+
break;
|
|
643
|
+
}
|
|
644
|
+
case "forge:message": {
|
|
645
|
+
if (this.onMessage) {
|
|
646
|
+
this.onMessage(msg.from, msg.payload);
|
|
647
|
+
}
|
|
648
|
+
break;
|
|
649
|
+
}
|
|
650
|
+
case "forge:request": {
|
|
651
|
+
if (this.onRequest) {
|
|
652
|
+
Promise.resolve(this.onRequest(msg.from, msg.payload))
|
|
653
|
+
.then((result) => {
|
|
654
|
+
try {
|
|
655
|
+
this._sendFrame(socket, {
|
|
656
|
+
type: "forge:response",
|
|
657
|
+
requestId: msg.requestId,
|
|
658
|
+
payload: result,
|
|
659
|
+
error: null,
|
|
660
|
+
});
|
|
661
|
+
}
|
|
662
|
+
catch (sendErr) {
|
|
663
|
+
const message = sendErr instanceof Error ? sendErr.message : String(sendErr);
|
|
664
|
+
console.error(`[${this.serviceName}] Failed to send response: ${message}`);
|
|
665
|
+
}
|
|
666
|
+
})
|
|
667
|
+
.catch((err) => {
|
|
668
|
+
try {
|
|
669
|
+
this._sendFrame(socket, {
|
|
670
|
+
type: "forge:response",
|
|
671
|
+
requestId: msg.requestId,
|
|
672
|
+
payload: null,
|
|
673
|
+
error: { message: err.message, code: err.code, statusCode: err.statusCode },
|
|
674
|
+
});
|
|
675
|
+
}
|
|
676
|
+
catch (sendErr) {
|
|
677
|
+
const message = sendErr instanceof Error ? sendErr.message : String(sendErr);
|
|
678
|
+
console.error(`[${this.serviceName}] Failed to send error response: ${message}`);
|
|
679
|
+
}
|
|
680
|
+
});
|
|
681
|
+
}
|
|
682
|
+
break;
|
|
683
|
+
}
|
|
684
|
+
case "forge:response": {
|
|
685
|
+
const pending = this.pendingRequests.get(msg.requestId);
|
|
686
|
+
if (pending && !pending.settled) {
|
|
687
|
+
pending.settled = true;
|
|
688
|
+
clearTimeout(pending.timer);
|
|
689
|
+
this.pendingRequests.delete(msg.requestId);
|
|
690
|
+
if (msg.error) {
|
|
691
|
+
const errObj = typeof msg.error === "object" ? msg.error : { message: msg.error };
|
|
692
|
+
const err = new Error(errObj.message);
|
|
693
|
+
if ("code" in errObj && errObj.code)
|
|
694
|
+
err.code = errObj.code;
|
|
695
|
+
if ("statusCode" in errObj && errObj.statusCode)
|
|
696
|
+
err.statusCode = errObj.statusCode;
|
|
697
|
+
pending.reject(err);
|
|
698
|
+
}
|
|
699
|
+
else {
|
|
700
|
+
pending.resolve(msg.payload);
|
|
701
|
+
}
|
|
702
|
+
}
|
|
703
|
+
break;
|
|
704
|
+
}
|
|
705
|
+
}
|
|
706
|
+
}
|
|
707
|
+
// -- Public API (called by ForgeContext) --
|
|
708
|
+
/**
|
|
709
|
+
* Send a fire-and-forget message. Direct UDS path if available.
|
|
710
|
+
*/
|
|
711
|
+
send(target, payload) {
|
|
712
|
+
const socket = this._pickSocket(target);
|
|
713
|
+
if (socket) {
|
|
714
|
+
try {
|
|
715
|
+
const ok = this._sendFrame(socket, {
|
|
716
|
+
type: "forge:message",
|
|
717
|
+
from: this.serviceName,
|
|
718
|
+
payload,
|
|
719
|
+
});
|
|
720
|
+
if (!ok) {
|
|
721
|
+
this.backpressureEvents++;
|
|
722
|
+
console.warn(`[${this.serviceName}] Backpressure on send() to "${target}" (total: ${this.backpressureEvents})`);
|
|
723
|
+
}
|
|
724
|
+
}
|
|
725
|
+
catch (err) {
|
|
726
|
+
this.backpressureEvents++;
|
|
727
|
+
throw err;
|
|
728
|
+
}
|
|
729
|
+
}
|
|
730
|
+
else {
|
|
731
|
+
// IPC-H2: Queue messages during reconnection instead of throwing immediately
|
|
732
|
+
const isReconnecting = this._reconnectTimers.has(target) ||
|
|
733
|
+
[...this._reconnectTimers.keys()].some((k) => k.startsWith(`${target}:`));
|
|
734
|
+
if (isReconnecting) {
|
|
735
|
+
const queue = this._reconnectQueue.get(target) ?? [];
|
|
736
|
+
if (queue.length >= MAX_RECONNECT_QUEUE) {
|
|
737
|
+
// IPC-H2: Drop oldest message to make room, preserving ordering for newer messages
|
|
738
|
+
queue.shift();
|
|
739
|
+
console.warn(`[${this.serviceName}] Reconnect queue full for "${target}" (max ${MAX_RECONNECT_QUEUE}) — dropping oldest message`);
|
|
740
|
+
}
|
|
741
|
+
queue.push({ type: "forge:message", from: this.serviceName, payload });
|
|
742
|
+
this._reconnectQueue.set(target, queue);
|
|
743
|
+
return;
|
|
744
|
+
}
|
|
745
|
+
// IPC-C1: Supervisor has no handler for forge:send — fail loudly instead of silently dropping
|
|
746
|
+
throw new Error(`[${this.serviceName}] No UDS connection to "${target}" — cannot send message (no supervisor fallback)`);
|
|
747
|
+
}
|
|
748
|
+
}
|
|
749
|
+
/**
|
|
750
|
+
* Broadcast to all workers of a target service.
|
|
751
|
+
*/
|
|
752
|
+
broadcast(target, payload) {
|
|
753
|
+
const keys = this.serviceConnections.get(target) ?? [];
|
|
754
|
+
if (keys.length > 0) {
|
|
755
|
+
// P6: Serialize once, send the same buffer to all recipients
|
|
756
|
+
const frame = this._buildFrame({
|
|
757
|
+
type: "forge:message",
|
|
758
|
+
from: this.serviceName,
|
|
759
|
+
payload,
|
|
760
|
+
});
|
|
761
|
+
// IPC-M4: Copy keys array to avoid mutation during iteration
|
|
762
|
+
// (socket error handlers may splice from the original array)
|
|
763
|
+
for (const key of [...keys]) {
|
|
764
|
+
const socket = this.outbound.get(key);
|
|
765
|
+
if (socket) {
|
|
766
|
+
try {
|
|
767
|
+
this._sendRawFrame(socket, frame);
|
|
768
|
+
}
|
|
769
|
+
catch (err) {
|
|
770
|
+
const message = err instanceof Error ? err.message : String(err);
|
|
771
|
+
console.warn(`[${this.serviceName}] Broadcast to ${key} failed: ${message}`);
|
|
772
|
+
}
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
}
|
|
776
|
+
else {
|
|
777
|
+
// IPC-C1: Supervisor has no handler for forge:broadcast — warn instead of silently dropping
|
|
778
|
+
console.warn(`[${this.serviceName}] No UDS connections to "${target}" — broadcast dropped (no supervisor fallback)`);
|
|
779
|
+
}
|
|
780
|
+
}
|
|
781
|
+
/**
|
|
782
|
+
* Request/response over direct UDS.
|
|
783
|
+
*/
|
|
784
|
+
request(target, payload, timeoutMs = 5000) {
|
|
785
|
+
if (this.pendingRequests.size >= MAX_PENDING_REQUESTS) {
|
|
786
|
+
return Promise.reject(new Error("Too many pending requests"));
|
|
787
|
+
}
|
|
788
|
+
const socket = this._pickSocket(target);
|
|
789
|
+
this.requestCounter = (this.requestCounter + 1) % 1_000_000_000;
|
|
790
|
+
const requestId = `req_${this._workerId}_${randomUUID()}`;
|
|
791
|
+
return new Promise((resolve, reject) => {
|
|
792
|
+
const timer = setTimeout(() => {
|
|
793
|
+
const entry = this.pendingRequests.get(requestId);
|
|
794
|
+
if (entry?.settled)
|
|
795
|
+
return;
|
|
796
|
+
if (entry)
|
|
797
|
+
entry.settled = true;
|
|
798
|
+
this.pendingRequests.delete(requestId);
|
|
799
|
+
reject(new Error(`Request to "${target}" timed out after ${timeoutMs}ms`));
|
|
800
|
+
}, timeoutMs);
|
|
801
|
+
const sKey = socket ? this._socketKey(socket) : null;
|
|
802
|
+
this.pendingRequests.set(requestId, { resolve, reject, timer, socketKey: sKey, settled: false });
|
|
803
|
+
try {
|
|
804
|
+
if (socket) {
|
|
805
|
+
this._sendFrame(socket, {
|
|
806
|
+
type: "forge:request",
|
|
807
|
+
requestId,
|
|
808
|
+
from: this.serviceName,
|
|
809
|
+
payload,
|
|
810
|
+
});
|
|
811
|
+
}
|
|
812
|
+
else {
|
|
813
|
+
// IPC-C1: Supervisor has no handler for forge:request — fail loudly instead of silently dropping
|
|
814
|
+
throw new Error(`[${this.serviceName}] No UDS connection to "${target}" — cannot send request (no supervisor fallback)`);
|
|
815
|
+
}
|
|
816
|
+
}
|
|
817
|
+
catch (err) {
|
|
818
|
+
const entry = this.pendingRequests.get(requestId);
|
|
819
|
+
if (entry?.settled)
|
|
820
|
+
return;
|
|
821
|
+
if (entry)
|
|
822
|
+
entry.settled = true;
|
|
823
|
+
clearTimeout(timer);
|
|
824
|
+
this.pendingRequests.delete(requestId);
|
|
825
|
+
reject(err);
|
|
826
|
+
}
|
|
827
|
+
});
|
|
828
|
+
}
|
|
829
|
+
/**
|
|
830
|
+
* Pick a socket to a target service (round-robin).
|
|
831
|
+
*/
|
|
832
|
+
_pickSocket(target) {
|
|
833
|
+
const keys = this.serviceConnections.get(target);
|
|
834
|
+
if (!keys || keys.length === 0)
|
|
835
|
+
return null;
|
|
836
|
+
const startIdx = (this.rrIndex.get(target) ?? 0) % keys.length;
|
|
837
|
+
this.rrIndex.set(target, (startIdx + 1) % 1_000_000_000);
|
|
838
|
+
// Try from startIdx, skip dead sockets
|
|
839
|
+
const dead = [];
|
|
840
|
+
let found = null;
|
|
841
|
+
for (let attempt = 0; attempt < keys.length; attempt++) {
|
|
842
|
+
const idx = (startIdx + attempt) % keys.length;
|
|
843
|
+
const socket = this.outbound.get(keys[idx]);
|
|
844
|
+
if (socket && !socket.destroyed) {
|
|
845
|
+
found = socket;
|
|
846
|
+
break;
|
|
847
|
+
}
|
|
848
|
+
dead.push(idx);
|
|
849
|
+
}
|
|
850
|
+
// Remove dead entries after the loop (reverse order to preserve indices)
|
|
851
|
+
for (let i = dead.length - 1; i >= 0; i--) {
|
|
852
|
+
keys.splice(dead[i], 1);
|
|
853
|
+
}
|
|
854
|
+
return found;
|
|
855
|
+
}
|
|
856
|
+
/**
|
|
857
|
+
* P19: O(1) socket → key lookup via reverse Map (replaces linear scan).
|
|
858
|
+
*/
|
|
859
|
+
_socketKey(socket) {
|
|
860
|
+
return this._socketKeyMap.get(socket) ?? "unknown";
|
|
861
|
+
}
|
|
862
|
+
/**
|
|
863
|
+
* Reject all pending requests that were sent over a specific socket.
|
|
864
|
+
*/
|
|
865
|
+
_rejectPendingForSocket(deadKey) {
|
|
866
|
+
for (const [id, entry] of this.pendingRequests) {
|
|
867
|
+
if (entry.socketKey === deadKey) {
|
|
868
|
+
if (entry.settled)
|
|
869
|
+
continue;
|
|
870
|
+
entry.settled = true;
|
|
871
|
+
clearTimeout(entry.timer);
|
|
872
|
+
this.pendingRequests.delete(id);
|
|
873
|
+
entry.reject(new Error("Connection lost to peer"));
|
|
874
|
+
}
|
|
875
|
+
}
|
|
876
|
+
}
|
|
877
|
+
hasDirectConnection(target) {
|
|
878
|
+
const keys = this.serviceConnections.get(target);
|
|
879
|
+
return keys !== undefined && keys.length > 0;
|
|
880
|
+
}
|
|
881
|
+
topology() {
|
|
882
|
+
const result = {};
|
|
883
|
+
for (const [service, keys] of this.serviceConnections) {
|
|
884
|
+
result[service] = {
|
|
885
|
+
connections: keys.length,
|
|
886
|
+
keys,
|
|
887
|
+
};
|
|
888
|
+
}
|
|
889
|
+
return result;
|
|
890
|
+
}
|
|
891
|
+
destroy() {
|
|
892
|
+
// Clear reconnect timers to prevent firing after shutdown
|
|
893
|
+
for (const timer of this._reconnectTimers.values())
|
|
894
|
+
clearTimeout(timer);
|
|
895
|
+
this._reconnectTimers.clear();
|
|
896
|
+
// IPC-M5: Drop queued messages
|
|
897
|
+
this._reconnectQueue.clear();
|
|
898
|
+
// Clear all pending request timers and reject pending requests
|
|
899
|
+
for (const [, entry] of this.pendingRequests) {
|
|
900
|
+
if (entry.settled)
|
|
901
|
+
continue;
|
|
902
|
+
entry.settled = true;
|
|
903
|
+
if (entry.timer)
|
|
904
|
+
clearTimeout(entry.timer);
|
|
905
|
+
entry.reject(new Error("Channel destroyed"));
|
|
906
|
+
}
|
|
907
|
+
this.pendingRequests.clear();
|
|
908
|
+
for (const [, socket] of this.outbound) {
|
|
909
|
+
try {
|
|
910
|
+
socket.destroy();
|
|
911
|
+
}
|
|
912
|
+
catch {
|
|
913
|
+
// ignore
|
|
914
|
+
}
|
|
915
|
+
}
|
|
916
|
+
for (const [, socket] of this.inbound) {
|
|
917
|
+
try {
|
|
918
|
+
socket.destroy();
|
|
919
|
+
}
|
|
920
|
+
catch {
|
|
921
|
+
// ignore
|
|
922
|
+
}
|
|
923
|
+
}
|
|
924
|
+
const cleanup = () => {
|
|
925
|
+
this.outbound.clear();
|
|
926
|
+
this.inbound.clear();
|
|
927
|
+
this.serviceConnections.clear();
|
|
928
|
+
this._socketKeyMap.clear();
|
|
929
|
+
};
|
|
930
|
+
if (this._server) {
|
|
931
|
+
return Promise.race([
|
|
932
|
+
new Promise((resolve) => {
|
|
933
|
+
this._server.close(() => {
|
|
934
|
+
cleanup();
|
|
935
|
+
resolve();
|
|
936
|
+
});
|
|
937
|
+
}),
|
|
938
|
+
new Promise((resolve) => {
|
|
939
|
+
setTimeout(() => {
|
|
940
|
+
try {
|
|
941
|
+
this._server.close();
|
|
942
|
+
}
|
|
943
|
+
catch {
|
|
944
|
+
// ignore
|
|
945
|
+
}
|
|
946
|
+
cleanup();
|
|
947
|
+
resolve();
|
|
948
|
+
}, 5000).unref();
|
|
949
|
+
}),
|
|
950
|
+
]);
|
|
951
|
+
}
|
|
952
|
+
cleanup();
|
|
953
|
+
return Promise.resolve();
|
|
954
|
+
}
|
|
955
|
+
}
|
|
956
|
+
//# sourceMappingURL=WorkerChannelManager.js.map
|