threadforge 0.1.1 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +52 -20
- package/bin/forge.js +2 -1058
- package/bin/host-commands.d.ts +2 -0
- package/bin/host-commands.d.ts.map +1 -0
- package/bin/host-commands.js +7 -8
- package/bin/platform-commands.d.ts +2 -0
- package/bin/platform-commands.d.ts.map +1 -0
- package/bin/platform-commands.js +118 -36
- package/dist/cli/base-command.d.ts +12 -0
- package/dist/cli/base-command.d.ts.map +1 -0
- package/dist/cli/base-command.js +25 -0
- package/dist/cli/base-command.js.map +1 -0
- package/dist/cli/commands/build.d.ts +10 -0
- package/dist/cli/commands/build.d.ts.map +1 -0
- package/dist/cli/commands/build.js +110 -0
- package/dist/cli/commands/build.js.map +1 -0
- package/dist/cli/commands/deploy.d.ts +12 -0
- package/dist/cli/commands/deploy.d.ts.map +1 -0
- package/dist/cli/commands/deploy.js +143 -0
- package/dist/cli/commands/deploy.js.map +1 -0
- package/dist/cli/commands/dev.d.ts +10 -0
- package/dist/cli/commands/dev.d.ts.map +1 -0
- package/dist/cli/commands/dev.js +138 -0
- package/dist/cli/commands/dev.js.map +1 -0
- package/dist/cli/commands/generate.d.ts +10 -0
- package/dist/cli/commands/generate.d.ts.map +1 -0
- package/dist/cli/commands/generate.js +76 -0
- package/dist/cli/commands/generate.js.map +1 -0
- package/dist/cli/commands/host.d.ts +8 -0
- package/dist/cli/commands/host.d.ts.map +1 -0
- package/dist/cli/commands/host.js +20 -0
- package/dist/cli/commands/host.js.map +1 -0
- package/dist/cli/commands/init.d.ts +16 -0
- package/dist/cli/commands/init.d.ts.map +1 -0
- package/dist/cli/commands/init.js +246 -0
- package/dist/cli/commands/init.js.map +1 -0
- package/dist/cli/commands/platform.d.ts +8 -0
- package/dist/cli/commands/platform.d.ts.map +1 -0
- package/dist/cli/commands/platform.js +20 -0
- package/dist/cli/commands/platform.js.map +1 -0
- package/dist/cli/commands/restart.d.ts +8 -0
- package/dist/cli/commands/restart.d.ts.map +1 -0
- package/dist/cli/commands/restart.js +13 -0
- package/dist/cli/commands/restart.js.map +1 -0
- package/dist/cli/commands/scaffold/frontend.d.ts +10 -0
- package/dist/cli/commands/scaffold/frontend.d.ts.map +1 -0
- package/dist/cli/commands/scaffold/frontend.js +130 -0
- package/dist/cli/commands/scaffold/frontend.js.map +1 -0
- package/dist/cli/commands/scaffold/react.d.ts +7 -0
- package/dist/cli/commands/scaffold/react.d.ts.map +1 -0
- package/dist/cli/commands/scaffold/react.js +12 -0
- package/dist/cli/commands/scaffold/react.js.map +1 -0
- package/dist/cli/commands/scale.d.ts +8 -0
- package/dist/cli/commands/scale.d.ts.map +1 -0
- package/dist/cli/commands/scale.js +13 -0
- package/dist/cli/commands/scale.js.map +1 -0
- package/dist/cli/commands/start.d.ts +10 -0
- package/dist/cli/commands/start.d.ts.map +1 -0
- package/dist/cli/commands/start.js +71 -0
- package/dist/cli/commands/start.js.map +1 -0
- package/dist/cli/commands/status.d.ts +11 -0
- package/dist/cli/commands/status.d.ts.map +1 -0
- package/dist/cli/commands/status.js +60 -0
- package/dist/cli/commands/status.js.map +1 -0
- package/dist/cli/commands/stop.d.ts +10 -0
- package/dist/cli/commands/stop.d.ts.map +1 -0
- package/dist/cli/commands/stop.js +89 -0
- package/dist/cli/commands/stop.js.map +1 -0
- package/dist/cli/util/config-discovery.d.ts +8 -0
- package/dist/cli/util/config-discovery.d.ts.map +1 -0
- package/dist/cli/util/config-discovery.js +70 -0
- package/dist/cli/util/config-discovery.js.map +1 -0
- package/dist/cli/util/config-patcher.d.ts +17 -0
- package/dist/cli/util/config-patcher.d.ts.map +1 -0
- package/dist/cli/util/config-patcher.js +439 -0
- package/dist/cli/util/config-patcher.js.map +1 -0
- package/dist/cli/util/frontend-dev.d.ts +8 -0
- package/dist/cli/util/frontend-dev.d.ts.map +1 -0
- package/dist/cli/util/frontend-dev.js +117 -0
- package/dist/cli/util/frontend-dev.js.map +1 -0
- package/dist/cli/util/process.d.ts +5 -0
- package/dist/cli/util/process.d.ts.map +1 -0
- package/dist/cli/util/process.js +17 -0
- package/dist/cli/util/process.js.map +1 -0
- package/dist/cli/util/templates.d.ts +10 -0
- package/dist/cli/util/templates.d.ts.map +1 -0
- package/dist/cli/util/templates.js +157 -0
- package/dist/cli/util/templates.js.map +1 -0
- package/dist/core/AlertSink.d.ts +83 -0
- package/dist/core/AlertSink.d.ts.map +1 -0
- package/dist/core/AlertSink.js +126 -0
- package/dist/core/AlertSink.js.map +1 -0
- package/dist/core/DirectMessageBus.d.ts +88 -0
- package/dist/core/DirectMessageBus.d.ts.map +1 -0
- package/dist/core/DirectMessageBus.js +352 -0
- package/dist/core/DirectMessageBus.js.map +1 -0
- package/dist/core/EndpointResolver.d.ts +111 -0
- package/dist/core/EndpointResolver.d.ts.map +1 -0
- package/dist/core/EndpointResolver.js +336 -0
- package/dist/core/EndpointResolver.js.map +1 -0
- package/dist/core/ForgeContext.d.ts +221 -0
- package/dist/core/ForgeContext.d.ts.map +1 -0
- package/dist/core/ForgeContext.js +1169 -0
- package/dist/core/ForgeContext.js.map +1 -0
- package/dist/core/ForgeEndpoints.d.ts +71 -0
- package/dist/core/ForgeEndpoints.d.ts.map +1 -0
- package/dist/core/ForgeEndpoints.js +442 -0
- package/dist/core/ForgeEndpoints.js.map +1 -0
- package/dist/core/ForgeHost.d.ts +82 -0
- package/dist/core/ForgeHost.d.ts.map +1 -0
- package/dist/core/ForgeHost.js +107 -0
- package/dist/core/ForgeHost.js.map +1 -0
- package/dist/core/ForgePlatform.d.ts +96 -0
- package/dist/core/ForgePlatform.d.ts.map +1 -0
- package/dist/core/ForgePlatform.js +136 -0
- package/dist/core/ForgePlatform.js.map +1 -0
- package/dist/core/ForgeWebSocket.d.ts +56 -0
- package/dist/core/ForgeWebSocket.d.ts.map +1 -0
- package/dist/core/ForgeWebSocket.js +415 -0
- package/dist/core/ForgeWebSocket.js.map +1 -0
- package/dist/core/Ingress.d.ts +329 -0
- package/dist/core/Ingress.d.ts.map +1 -0
- package/dist/core/Ingress.js +694 -0
- package/dist/core/Ingress.js.map +1 -0
- package/dist/core/Interceptors.d.ts +134 -0
- package/dist/core/Interceptors.d.ts.map +1 -0
- package/dist/core/Interceptors.js +416 -0
- package/dist/core/Interceptors.js.map +1 -0
- package/dist/core/Logger.d.ts +20 -0
- package/dist/core/Logger.d.ts.map +1 -0
- package/dist/core/Logger.js +77 -0
- package/dist/core/Logger.js.map +1 -0
- package/dist/core/MessageBus.d.ts +15 -0
- package/dist/core/MessageBus.d.ts.map +1 -0
- package/dist/core/MessageBus.js +18 -0
- package/dist/core/MessageBus.js.map +1 -0
- package/dist/core/Prometheus.d.ts +80 -0
- package/dist/core/Prometheus.d.ts.map +1 -0
- package/dist/core/Prometheus.js +332 -0
- package/dist/core/Prometheus.js.map +1 -0
- package/dist/core/RequestContext.d.ts +214 -0
- package/dist/core/RequestContext.d.ts.map +1 -0
- package/dist/core/RequestContext.js +556 -0
- package/dist/core/RequestContext.js.map +1 -0
- package/dist/core/Router.d.ts +45 -0
- package/dist/core/Router.d.ts.map +1 -0
- package/dist/core/Router.js +285 -0
- package/dist/core/Router.js.map +1 -0
- package/dist/core/RoutingStrategy.d.ts +116 -0
- package/dist/core/RoutingStrategy.d.ts.map +1 -0
- package/dist/core/RoutingStrategy.js +306 -0
- package/dist/core/RoutingStrategy.js.map +1 -0
- package/dist/core/RpcConfig.d.ts +72 -0
- package/dist/core/RpcConfig.d.ts.map +1 -0
- package/dist/core/RpcConfig.js +127 -0
- package/dist/core/RpcConfig.js.map +1 -0
- package/dist/core/SignatureCache.d.ts +81 -0
- package/dist/core/SignatureCache.d.ts.map +1 -0
- package/dist/core/SignatureCache.js +172 -0
- package/dist/core/SignatureCache.js.map +1 -0
- package/dist/core/StaticFileServer.d.ts +34 -0
- package/dist/core/StaticFileServer.d.ts.map +1 -0
- package/dist/core/StaticFileServer.js +497 -0
- package/dist/core/StaticFileServer.js.map +1 -0
- package/dist/core/Supervisor.d.ts +198 -0
- package/dist/core/Supervisor.d.ts.map +1 -0
- package/dist/core/Supervisor.js +1418 -0
- package/dist/core/Supervisor.js.map +1 -0
- package/dist/core/ThreadAllocator.d.ts +52 -0
- package/dist/core/ThreadAllocator.d.ts.map +1 -0
- package/dist/core/ThreadAllocator.js +174 -0
- package/dist/core/ThreadAllocator.js.map +1 -0
- package/dist/core/WorkerChannelManager.d.ts +130 -0
- package/dist/core/WorkerChannelManager.d.ts.map +1 -0
- package/dist/core/WorkerChannelManager.js +956 -0
- package/dist/core/WorkerChannelManager.js.map +1 -0
- package/dist/core/config-enums.d.ts +41 -0
- package/dist/core/config-enums.d.ts.map +1 -0
- package/dist/core/config-enums.js +59 -0
- package/dist/core/config-enums.js.map +1 -0
- package/dist/core/config.d.ts +159 -0
- package/dist/core/config.d.ts.map +1 -0
- package/dist/core/config.js +694 -0
- package/dist/core/config.js.map +1 -0
- package/dist/core/host-config.d.ts +146 -0
- package/dist/core/host-config.d.ts.map +1 -0
- package/dist/core/host-config.js +312 -0
- package/dist/core/host-config.js.map +1 -0
- package/dist/core/ipc-errors.d.ts +27 -0
- package/dist/core/ipc-errors.d.ts.map +1 -0
- package/dist/core/ipc-errors.js +36 -0
- package/dist/core/ipc-errors.js.map +1 -0
- package/dist/core/network-utils.d.ts +35 -0
- package/dist/core/network-utils.d.ts.map +1 -0
- package/dist/core/network-utils.js +145 -0
- package/dist/core/network-utils.js.map +1 -0
- package/dist/core/platform-config.d.ts +142 -0
- package/dist/core/platform-config.d.ts.map +1 -0
- package/dist/core/platform-config.js +299 -0
- package/dist/core/platform-config.js.map +1 -0
- package/dist/decorators/ServiceProxy.d.ts +175 -0
- package/dist/decorators/ServiceProxy.d.ts.map +1 -0
- package/dist/decorators/ServiceProxy.js +969 -0
- package/dist/decorators/ServiceProxy.js.map +1 -0
- package/dist/decorators/index.d.ts +146 -0
- package/dist/decorators/index.d.ts.map +1 -0
- package/dist/decorators/index.js +545 -0
- package/dist/decorators/index.js.map +1 -0
- package/dist/deploy/NginxGenerator.d.ts +165 -0
- package/dist/deploy/NginxGenerator.d.ts.map +1 -0
- package/dist/deploy/NginxGenerator.js +781 -0
- package/dist/deploy/NginxGenerator.js.map +1 -0
- package/dist/deploy/PlatformManifestGenerator.d.ts +43 -0
- package/dist/deploy/PlatformManifestGenerator.d.ts.map +1 -0
- package/dist/deploy/PlatformManifestGenerator.js +80 -0
- package/dist/deploy/PlatformManifestGenerator.js.map +1 -0
- package/dist/deploy/RouteManifestGenerator.d.ts +42 -0
- package/dist/deploy/RouteManifestGenerator.d.ts.map +1 -0
- package/dist/deploy/RouteManifestGenerator.js +105 -0
- package/dist/deploy/RouteManifestGenerator.js.map +1 -0
- package/dist/deploy/index.d.ts +210 -0
- package/dist/deploy/index.d.ts.map +1 -0
- package/dist/deploy/index.js +918 -0
- package/dist/deploy/index.js.map +1 -0
- package/dist/frontend/FrontendDevLifecycle.d.ts +26 -0
- package/dist/frontend/FrontendDevLifecycle.d.ts.map +1 -0
- package/dist/frontend/FrontendDevLifecycle.js +60 -0
- package/dist/frontend/FrontendDevLifecycle.js.map +1 -0
- package/dist/frontend/FrontendPluginOrchestrator.d.ts +64 -0
- package/dist/frontend/FrontendPluginOrchestrator.d.ts.map +1 -0
- package/dist/frontend/FrontendPluginOrchestrator.js +167 -0
- package/dist/frontend/FrontendPluginOrchestrator.js.map +1 -0
- package/dist/frontend/SiteResolver.d.ts +33 -0
- package/dist/frontend/SiteResolver.d.ts.map +1 -0
- package/dist/frontend/SiteResolver.js +53 -0
- package/dist/frontend/SiteResolver.js.map +1 -0
- package/dist/frontend/StaticMountRegistry.d.ts +36 -0
- package/dist/frontend/StaticMountRegistry.d.ts.map +1 -0
- package/dist/frontend/StaticMountRegistry.js +94 -0
- package/dist/frontend/StaticMountRegistry.js.map +1 -0
- package/dist/frontend/index.d.ts +7 -0
- package/dist/frontend/index.d.ts.map +1 -0
- package/{src → dist}/frontend/index.js +4 -2
- package/dist/frontend/index.js.map +1 -0
- package/dist/frontend/pathUtils.d.ts +8 -0
- package/dist/frontend/pathUtils.d.ts.map +1 -0
- package/dist/frontend/pathUtils.js +17 -0
- package/dist/frontend/pathUtils.js.map +1 -0
- package/dist/frontend/plugins/index.d.ts +2 -0
- package/dist/frontend/plugins/index.d.ts.map +1 -0
- package/{src → dist}/frontend/plugins/index.js +1 -1
- package/dist/frontend/plugins/index.js.map +1 -0
- package/dist/frontend/plugins/viteFrontend.d.ts +51 -0
- package/dist/frontend/plugins/viteFrontend.d.ts.map +1 -0
- package/dist/frontend/plugins/viteFrontend.js +134 -0
- package/dist/frontend/plugins/viteFrontend.js.map +1 -0
- package/dist/frontend/types.d.ts +25 -0
- package/dist/frontend/types.d.ts.map +1 -0
- package/dist/frontend/types.js +2 -0
- package/dist/frontend/types.js.map +1 -0
- package/dist/index.d.ts +17 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +32 -0
- package/dist/index.js.map +1 -0
- package/dist/internals.d.ts +21 -0
- package/dist/internals.d.ts.map +1 -0
- package/{src → dist}/internals.js +12 -14
- package/dist/internals.js.map +1 -0
- package/dist/plugins/PluginManager.d.ts +209 -0
- package/dist/plugins/PluginManager.d.ts.map +1 -0
- package/dist/plugins/PluginManager.js +365 -0
- package/dist/plugins/PluginManager.js.map +1 -0
- package/dist/plugins/ScopedPostgres.d.ts +78 -0
- package/dist/plugins/ScopedPostgres.d.ts.map +1 -0
- package/dist/plugins/ScopedPostgres.js +190 -0
- package/dist/plugins/ScopedPostgres.js.map +1 -0
- package/dist/plugins/ScopedRedis.d.ts +88 -0
- package/dist/plugins/ScopedRedis.d.ts.map +1 -0
- package/dist/plugins/ScopedRedis.js +169 -0
- package/dist/plugins/ScopedRedis.js.map +1 -0
- package/dist/plugins/index.d.ts +289 -0
- package/dist/plugins/index.d.ts.map +1 -0
- package/dist/plugins/index.js +1942 -0
- package/dist/plugins/index.js.map +1 -0
- package/dist/plugins/types.d.ts +59 -0
- package/dist/plugins/types.d.ts.map +1 -0
- package/dist/plugins/types.js +2 -0
- package/dist/plugins/types.js.map +1 -0
- package/dist/registry/ServiceRegistry.d.ts +305 -0
- package/dist/registry/ServiceRegistry.d.ts.map +1 -0
- package/dist/registry/ServiceRegistry.js +735 -0
- package/dist/registry/ServiceRegistry.js.map +1 -0
- package/dist/scaling/ScaleAdvisor.d.ts +214 -0
- package/dist/scaling/ScaleAdvisor.d.ts.map +1 -0
- package/dist/scaling/ScaleAdvisor.js +526 -0
- package/dist/scaling/ScaleAdvisor.js.map +1 -0
- package/dist/services/Service.d.ts +164 -0
- package/dist/services/Service.d.ts.map +1 -0
- package/dist/services/Service.js +106 -0
- package/dist/services/Service.js.map +1 -0
- package/dist/services/worker-bootstrap.d.ts +15 -0
- package/dist/services/worker-bootstrap.d.ts.map +1 -0
- package/dist/services/worker-bootstrap.js +744 -0
- package/dist/services/worker-bootstrap.js.map +1 -0
- package/dist/templates/auth-service.d.ts +42 -0
- package/dist/templates/auth-service.d.ts.map +1 -0
- package/dist/templates/auth-service.js +54 -0
- package/dist/templates/auth-service.js.map +1 -0
- package/dist/templates/identity-service.d.ts +50 -0
- package/dist/templates/identity-service.d.ts.map +1 -0
- package/dist/templates/identity-service.js +62 -0
- package/dist/templates/identity-service.js.map +1 -0
- package/dist/types/contract.d.ts +120 -0
- package/dist/types/contract.d.ts.map +1 -0
- package/dist/types/contract.js +69 -0
- package/dist/types/contract.js.map +1 -0
- package/package.json +78 -20
- package/src/core/DirectMessageBus.js +0 -364
- package/src/core/EndpointResolver.js +0 -259
- package/src/core/ForgeContext.js +0 -2236
- package/src/core/ForgeHost.js +0 -122
- package/src/core/ForgePlatform.js +0 -145
- package/src/core/Ingress.js +0 -768
- package/src/core/Interceptors.js +0 -420
- package/src/core/MessageBus.js +0 -321
- package/src/core/Prometheus.js +0 -305
- package/src/core/RequestContext.js +0 -413
- package/src/core/RoutingStrategy.js +0 -330
- package/src/core/Supervisor.js +0 -1349
- package/src/core/ThreadAllocator.js +0 -196
- package/src/core/WorkerChannelManager.js +0 -879
- package/src/core/config.js +0 -637
- package/src/core/host-config.js +0 -311
- package/src/core/network-utils.js +0 -166
- package/src/core/platform-config.js +0 -308
- package/src/decorators/ServiceProxy.js +0 -904
- package/src/decorators/index.js +0 -571
- package/src/deploy/NginxGenerator.js +0 -865
- package/src/deploy/PlatformManifestGenerator.js +0 -96
- package/src/deploy/RouteManifestGenerator.js +0 -112
- package/src/deploy/index.js +0 -984
- package/src/frontend/FrontendDevLifecycle.js +0 -65
- package/src/frontend/FrontendPluginOrchestrator.js +0 -187
- package/src/frontend/SiteResolver.js +0 -63
- package/src/frontend/StaticMountRegistry.js +0 -90
- package/src/frontend/plugins/viteFrontend.js +0 -79
- package/src/frontend/types.js +0 -35
- package/src/index.js +0 -58
- package/src/plugins/PluginManager.js +0 -537
- package/src/plugins/ScopedPostgres.js +0 -192
- package/src/plugins/ScopedRedis.js +0 -142
- package/src/plugins/index.js +0 -1756
- package/src/registry/ServiceRegistry.js +0 -797
- package/src/scaling/ScaleAdvisor.js +0 -442
- package/src/services/Service.js +0 -195
- package/src/services/worker-bootstrap.js +0 -679
- package/src/templates/auth-service.js +0 -65
- package/src/templates/identity-service.js +0 -75
|
@@ -0,0 +1,744 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Worker Bootstrap v3
|
|
3
|
+
*
|
|
4
|
+
* Loads services, builds proxy clients, and injects them.
|
|
5
|
+
*
|
|
6
|
+
* After bootstrap, a service can call:
|
|
7
|
+
* await this.users.getUser('123')
|
|
8
|
+
*
|
|
9
|
+
* Which transparently routes through:
|
|
10
|
+
* - Direct function call (colocated services, same process)
|
|
11
|
+
* - UDS (different process, same machine)
|
|
12
|
+
* - Supervisor IPC fallback (startup race)
|
|
13
|
+
*/
|
|
14
|
+
import { createHmac } from "node:crypto";
|
|
15
|
+
import fs from "node:fs";
|
|
16
|
+
import path from "node:path";
|
|
17
|
+
import { pathToFileURL } from "node:url";
|
|
18
|
+
import { EndpointResolver } from "../core/EndpointResolver.js";
|
|
19
|
+
import { ForgeContext, NOT_HANDLED } from "../core/ForgeContext.js";
|
|
20
|
+
import { RequestContext } from "../core/RequestContext.js";
|
|
21
|
+
import { buildServiceProxies, createServiceProxy } from "../decorators/ServiceProxy.js";
|
|
22
|
+
import { applyStaticContractWrappers, getContract } from "../decorators/index.js";
|
|
23
|
+
import { resolveRpcOptionsForTarget } from "../core/RpcConfig.js";
|
|
24
|
+
import { IPC_PROTOCOL_VERSION } from "../core/ipc-errors.js";
|
|
25
|
+
import { resolveStaticMountsForService } from "../frontend/SiteResolver.js";
|
|
26
|
+
import { REGISTRY_MODES, RegistryMode, ServiceMode, SERVICE_TYPES, ServiceType, } from "../core/config-enums.js";
|
|
27
|
+
// ─── Configuration ──────────────────────────────────────────
|
|
28
|
+
// A6: Consolidate all env var reads into a single structured config object
|
|
29
|
+
function parseWorkerConfig() {
|
|
30
|
+
const mode = process.env.FORGE_MODE;
|
|
31
|
+
return {
|
|
32
|
+
groupName: process.env.FORGE_GROUP_NAME,
|
|
33
|
+
serviceEntries: process.env.FORGE_SERVICE_ENTRIES,
|
|
34
|
+
serviceNames: process.env.FORGE_SERVICE_NAMES,
|
|
35
|
+
port: process.env.FORGE_PORT,
|
|
36
|
+
workerId: process.env.FORGE_WORKER_ID,
|
|
37
|
+
threadCount: process.env.FORGE_THREAD_COUNT,
|
|
38
|
+
mode: mode === ServiceMode.CLUSTER ? ServiceMode.CLUSTER : ServiceMode.CLUSTER,
|
|
39
|
+
serviceTypes: process.env.FORGE_SERVICE_TYPES,
|
|
40
|
+
channels: process.env.FORGE_CHANNELS,
|
|
41
|
+
hostMeta: process.env.FORGE_HOST_META,
|
|
42
|
+
registryMode: process.env.FORGE_REGISTRY_MODE ?? RegistryMode.EMBEDDED,
|
|
43
|
+
registryHost: process.env.FORGE_HOST,
|
|
44
|
+
servicePorts: process.env.FORGE_SERVICE_PORTS,
|
|
45
|
+
plugins: process.env.FORGE_PLUGINS,
|
|
46
|
+
servicePlugins: process.env.FORGE_SERVICE_PLUGINS,
|
|
47
|
+
configPath: process.env.FORGE_CONFIG_PATH,
|
|
48
|
+
serviceEndpoints: process.env.FORGE_SERVICE_ENDPOINTS,
|
|
49
|
+
sites: process.env.FORGE_SITES,
|
|
50
|
+
sitesFile: process.env.FORGE_SITES_FILE,
|
|
51
|
+
ingress: process.env.FORGE_INGRESS,
|
|
52
|
+
serviceRpc: process.env.FORGE_SERVICE_RPC,
|
|
53
|
+
};
|
|
54
|
+
}
|
|
55
|
+
const workerConfig = parseWorkerConfig();
|
|
56
|
+
const hostMeta = workerConfig.hostMeta ? JSON.parse(workerConfig.hostMeta) : null;
|
|
57
|
+
let ingressConfig = {};
|
|
58
|
+
if (workerConfig.ingress) {
|
|
59
|
+
try {
|
|
60
|
+
ingressConfig = JSON.parse(workerConfig.ingress);
|
|
61
|
+
}
|
|
62
|
+
catch (err) {
|
|
63
|
+
console.warn(`[ThreadForge] Invalid FORGE_INGRESS payload, using defaults: ${err.message}`);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
let sites = null;
|
|
67
|
+
let sitesPayload = workerConfig.sites;
|
|
68
|
+
if (!sitesPayload && workerConfig.sitesFile) {
|
|
69
|
+
try {
|
|
70
|
+
sitesPayload = fs.readFileSync(workerConfig.sitesFile, "utf8");
|
|
71
|
+
}
|
|
72
|
+
catch (err) {
|
|
73
|
+
console.warn(`[ThreadForge] Could not read FORGE_SITES_FILE, static mounts disabled: ${err.message}`);
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
if (sitesPayload) {
|
|
77
|
+
try {
|
|
78
|
+
sites = JSON.parse(sitesPayload);
|
|
79
|
+
}
|
|
80
|
+
catch (err) {
|
|
81
|
+
console.warn(`[ThreadForge] Invalid FORGE_SITES payload, static mounts disabled: ${err.message}`);
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
async function _deliverRemoteEvent(url, body, headers, serviceName, maxRetries = 3) {
|
|
85
|
+
for (let attempt = 0; attempt < maxRetries; attempt++) {
|
|
86
|
+
try {
|
|
87
|
+
const resp = await fetch(url, {
|
|
88
|
+
method: "POST",
|
|
89
|
+
headers,
|
|
90
|
+
body: JSON.stringify(body),
|
|
91
|
+
signal: AbortSignal.timeout(5000),
|
|
92
|
+
});
|
|
93
|
+
if (resp.ok)
|
|
94
|
+
return;
|
|
95
|
+
// Non-retryable status codes
|
|
96
|
+
if (resp.status >= 400 && resp.status < 500)
|
|
97
|
+
return;
|
|
98
|
+
}
|
|
99
|
+
catch (err) {
|
|
100
|
+
if (attempt === maxRetries - 1) {
|
|
101
|
+
console.error(`[ThreadForge] Remote event delivery to ${serviceName} failed after ${maxRetries} attempts: ${err.message}`);
|
|
102
|
+
return;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
// Exponential backoff: 100ms, 200ms, 400ms
|
|
106
|
+
await new Promise((resolve) => setTimeout(resolve, 100 * 2 ** attempt));
|
|
107
|
+
}
|
|
108
|
+
}
|
|
109
|
+
/**
|
|
110
|
+
* Look up which project owns a given service name.
|
|
111
|
+
* Returns { projectId, schema, keyPrefix } or null.
|
|
112
|
+
*/
|
|
113
|
+
function resolveProjectForService(serviceName) {
|
|
114
|
+
if (!hostMeta)
|
|
115
|
+
return null;
|
|
116
|
+
for (const [projectId, meta] of Object.entries(hostMeta)) {
|
|
117
|
+
if (meta.services.includes(serviceName)) {
|
|
118
|
+
return { projectId, schema: meta.schema, keyPrefix: meta.keyPrefix };
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
return null;
|
|
122
|
+
}
|
|
123
|
+
// Hoisted reference so emergency shutdown handlers can access it
|
|
124
|
+
let _localServices = null;
|
|
125
|
+
let emergencyShutdownPromise = null;
|
|
126
|
+
// M-CORE-4: Module-level flag so emergencyShutdown can check if normal shutdown is running
|
|
127
|
+
let shutdownInProgress = false;
|
|
128
|
+
// Track unhandled rejections — exit only if 5+ within 60 seconds
|
|
129
|
+
let _rejectionCount = 0;
|
|
130
|
+
let _rejectionWindowStart = Date.now();
|
|
131
|
+
const REJECTION_THRESHOLD = 5;
|
|
132
|
+
const REJECTION_WINDOW_MS = 60_000;
|
|
133
|
+
async function emergencyShutdown(reason, err) {
|
|
134
|
+
if (emergencyShutdownPromise) {
|
|
135
|
+
await emergencyShutdownPromise.catch(() => { });
|
|
136
|
+
process.exit(1);
|
|
137
|
+
return;
|
|
138
|
+
}
|
|
139
|
+
// M-CORE-4: Skip service._stop() if normal shutdown is already running
|
|
140
|
+
// to prevent double-cleanup of services
|
|
141
|
+
const skipServiceStop = shutdownInProgress;
|
|
142
|
+
emergencyShutdownPromise = (async () => {
|
|
143
|
+
console.error(`[ThreadForge] ${reason}:`, err);
|
|
144
|
+
try {
|
|
145
|
+
if (_localServices && !skipServiceStop) {
|
|
146
|
+
for (const [, { service }] of _localServices) {
|
|
147
|
+
try {
|
|
148
|
+
await Promise.race([
|
|
149
|
+
service._stop?.(),
|
|
150
|
+
new Promise((_, reject) => setTimeout(() => reject(new Error("Stop timed out")), 5000)),
|
|
151
|
+
]);
|
|
152
|
+
}
|
|
153
|
+
catch {
|
|
154
|
+
// Swallow errors during emergency shutdown
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
}
|
|
159
|
+
catch {
|
|
160
|
+
// Swallow errors during emergency shutdown
|
|
161
|
+
}
|
|
162
|
+
process.exit(1);
|
|
163
|
+
})();
|
|
164
|
+
await emergencyShutdownPromise;
|
|
165
|
+
}
|
|
166
|
+
function handleUnhandledRejection(err) {
|
|
167
|
+
const now = Date.now();
|
|
168
|
+
// Reset window if it has elapsed
|
|
169
|
+
if (now - _rejectionWindowStart > REJECTION_WINDOW_MS) {
|
|
170
|
+
_rejectionCount = 0;
|
|
171
|
+
_rejectionWindowStart = now;
|
|
172
|
+
}
|
|
173
|
+
_rejectionCount++;
|
|
174
|
+
console.error(`[ThreadForge] SEVERE: Unhandled rejection (${_rejectionCount}/${REJECTION_THRESHOLD} in window):`, err);
|
|
175
|
+
if (err?.stack)
|
|
176
|
+
console.error(err.stack);
|
|
177
|
+
if (_rejectionCount >= REJECTION_THRESHOLD) {
|
|
178
|
+
console.error(`[ThreadForge] ${REJECTION_THRESHOLD}+ unhandled rejections within ${REJECTION_WINDOW_MS / 1000}s — exiting`);
|
|
179
|
+
emergencyShutdown("Repeated unhandled rejections", err);
|
|
180
|
+
}
|
|
181
|
+
}
|
|
182
|
+
async function bootstrap() {
|
|
183
|
+
// RT-H2: Register emergency handlers early so exceptions during any phase get cleanup
|
|
184
|
+
process.on("uncaughtException", (err) => emergencyShutdown("Uncaught exception", err));
|
|
185
|
+
process.on("unhandledRejection", (err) => handleUnhandledRejection(err));
|
|
186
|
+
// RT-C2: Re-entrancy guard for graceful shutdown (uses module-level `shutdownInProgress`)
|
|
187
|
+
// H-RT-4: Register signal handlers early so SIGTERM/SIGINT during any phase triggers cleanup
|
|
188
|
+
process.once("SIGTERM", () => shutdown("SIGTERM"));
|
|
189
|
+
process.once("SIGINT", () => shutdown("SIGINT"));
|
|
190
|
+
const entries = workerConfig
|
|
191
|
+
.serviceEntries.split(",")
|
|
192
|
+
.filter((e) => e.trim())
|
|
193
|
+
.map((e) => {
|
|
194
|
+
const eqIdx = e.indexOf("=");
|
|
195
|
+
if (eqIdx === -1) {
|
|
196
|
+
throw new Error(`Invalid FORGE_SERVICE_ENTRIES format: "${e}". Expected "name=path".`);
|
|
197
|
+
}
|
|
198
|
+
const name = e.slice(0, eqIdx).trim();
|
|
199
|
+
const entryPath = e.slice(eqIdx + 1).trim();
|
|
200
|
+
if (!name || !entryPath) {
|
|
201
|
+
throw new Error(`Invalid FORGE_SERVICE_ENTRIES entry: "${e}". Name and path are both required.`);
|
|
202
|
+
}
|
|
203
|
+
return { name, entry: entryPath };
|
|
204
|
+
});
|
|
205
|
+
if (entries.length === 0) {
|
|
206
|
+
throw new Error("FORGE_SERVICE_ENTRIES is empty or contains no valid entries");
|
|
207
|
+
}
|
|
208
|
+
const typeMap = {};
|
|
209
|
+
if (workerConfig.serviceTypes) {
|
|
210
|
+
for (const pair of workerConfig.serviceTypes.split(",")) {
|
|
211
|
+
const [name, type] = pair.split("=");
|
|
212
|
+
if (SERVICE_TYPES.includes(type)) {
|
|
213
|
+
typeMap[name] = type;
|
|
214
|
+
}
|
|
215
|
+
else {
|
|
216
|
+
typeMap[name] = ServiceType.INTERNAL;
|
|
217
|
+
}
|
|
218
|
+
}
|
|
219
|
+
}
|
|
220
|
+
// Parse declared channels to know which services we might talk to
|
|
221
|
+
let declaredChannels = [];
|
|
222
|
+
try {
|
|
223
|
+
declaredChannels = JSON.parse(workerConfig.channels || "[]");
|
|
224
|
+
}
|
|
225
|
+
catch {
|
|
226
|
+
// Swallow parse errors — use empty channels
|
|
227
|
+
}
|
|
228
|
+
const port = parseInt(workerConfig.port, 10);
|
|
229
|
+
const workerId = parseInt(workerConfig.workerId, 10);
|
|
230
|
+
const threadCount = parseInt(workerConfig.threadCount, 10);
|
|
231
|
+
// Phase 1: Load all service modules (P13: parallel imports)
|
|
232
|
+
const loaded = new Map();
|
|
233
|
+
/** service name → class (for proxy building) */
|
|
234
|
+
const serviceClasses = new Map();
|
|
235
|
+
const resolvedEntries = entries.map(({ name, entry }) => ({
|
|
236
|
+
name,
|
|
237
|
+
entry,
|
|
238
|
+
url: pathToFileURL(path.resolve(process.cwd(), entry)).href,
|
|
239
|
+
}));
|
|
240
|
+
const importResults = await Promise.all(resolvedEntries.map(async ({ name, entry, url }) => {
|
|
241
|
+
try {
|
|
242
|
+
const mod = (await import(url));
|
|
243
|
+
return { name, entry, mod, error: null };
|
|
244
|
+
}
|
|
245
|
+
catch (err) {
|
|
246
|
+
return { name, entry, mod: null, error: err };
|
|
247
|
+
}
|
|
248
|
+
}));
|
|
249
|
+
for (const { name, entry, mod, error } of importResults) {
|
|
250
|
+
if (error) {
|
|
251
|
+
console.error(`[ThreadForge] Failed to load service "${name}" from ${entry}: ${error.message}`);
|
|
252
|
+
if (error.stack)
|
|
253
|
+
console.error(error.stack);
|
|
254
|
+
throw new Error(`Service "${name}" failed to load from "${entry}": ${error.message}`);
|
|
255
|
+
}
|
|
256
|
+
const ServiceClass = (mod.default ?? mod);
|
|
257
|
+
if (typeof ServiceClass !== "function") {
|
|
258
|
+
throw new Error(`Service "${entry}" must export a class. Got: ${typeof ServiceClass}`);
|
|
259
|
+
}
|
|
260
|
+
const instance = new ServiceClass();
|
|
261
|
+
loaded.set(name, { ServiceClass, instance });
|
|
262
|
+
serviceClasses.set(name, ServiceClass);
|
|
263
|
+
}
|
|
264
|
+
// Also register remote service classes if we know about them
|
|
265
|
+
// (from channels config). For remote services we don't have the
|
|
266
|
+
// class, so proxies will use dynamic dispatch.
|
|
267
|
+
const allConnectedServices = new Set();
|
|
268
|
+
for (const ch of declaredChannels) {
|
|
269
|
+
allConnectedServices.add(ch.from);
|
|
270
|
+
allConnectedServices.add(ch.to);
|
|
271
|
+
}
|
|
272
|
+
// Phase 2: Create contexts and local service registry
|
|
273
|
+
const localServices = new Map();
|
|
274
|
+
_localServices = localServices;
|
|
275
|
+
function localSend(fromName, target, payload) {
|
|
276
|
+
const local = localServices.get(target);
|
|
277
|
+
if (!local)
|
|
278
|
+
return false;
|
|
279
|
+
Promise.resolve(local.service.onMessage(fromName, payload)).catch((err) => {
|
|
280
|
+
local.ctx?.logger?.error?.("onMessage error", { from: fromName, error: err.message });
|
|
281
|
+
local.ctx?.metrics?.increment?.("forge_local_send_errors_total", { target });
|
|
282
|
+
});
|
|
283
|
+
return true;
|
|
284
|
+
}
|
|
285
|
+
async function localRequest(fromName, target, payload) {
|
|
286
|
+
const local = localServices.get(target);
|
|
287
|
+
if (local) {
|
|
288
|
+
return local.service.onRequest(fromName, payload);
|
|
289
|
+
}
|
|
290
|
+
return NOT_HANDLED;
|
|
291
|
+
}
|
|
292
|
+
// Create EndpointResolver from env — shared by all contexts in this worker
|
|
293
|
+
const endpointResolver = EndpointResolver.fromEnv();
|
|
294
|
+
for (const [name, { instance }] of loaded) {
|
|
295
|
+
const serviceType = typeMap[name] ?? ServiceType.INTERNAL;
|
|
296
|
+
const isEdge = serviceType === ServiceType.EDGE;
|
|
297
|
+
const ctx = new ForgeContext({
|
|
298
|
+
serviceName: name,
|
|
299
|
+
port: isEdge ? port : 0,
|
|
300
|
+
workerId,
|
|
301
|
+
threadCount,
|
|
302
|
+
mode: workerConfig.mode,
|
|
303
|
+
serviceType,
|
|
304
|
+
ingress: ingressConfig,
|
|
305
|
+
sendIPC: (msg) => {
|
|
306
|
+
if (process.send)
|
|
307
|
+
process.send(msg);
|
|
308
|
+
},
|
|
309
|
+
localSend: (target, payload) => localSend(name, target, payload),
|
|
310
|
+
localRequest: (target, payload) => localRequest(name, target, payload),
|
|
311
|
+
staticMounts: resolveStaticMountsForService(name, sites),
|
|
312
|
+
});
|
|
313
|
+
ctx._endpointResolver = endpointResolver;
|
|
314
|
+
const projectInfo = resolveProjectForService(name);
|
|
315
|
+
if (projectInfo) {
|
|
316
|
+
ctx._projectId = projectInfo.projectId;
|
|
317
|
+
ctx._projectSchema = projectInfo.schema;
|
|
318
|
+
ctx._projectKeyPrefix = projectInfo.keyPrefix;
|
|
319
|
+
}
|
|
320
|
+
ctx._emitEvent = (eventName, data) => {
|
|
321
|
+
const eventPayload = { __forge_event: eventName, __forge_data: data };
|
|
322
|
+
for (const ch of declaredChannels) {
|
|
323
|
+
let target;
|
|
324
|
+
if (ch.from === name)
|
|
325
|
+
target = ch.to;
|
|
326
|
+
else if (ch.to === name)
|
|
327
|
+
target = ch.from;
|
|
328
|
+
else
|
|
329
|
+
continue;
|
|
330
|
+
if (localSend(name, target, eventPayload))
|
|
331
|
+
continue;
|
|
332
|
+
const endpoint = endpointResolver.resolve(target);
|
|
333
|
+
if (endpoint?.remote) {
|
|
334
|
+
// Propagate RequestContext so events can be traced back to originating requests
|
|
335
|
+
const rctx = RequestContext.current();
|
|
336
|
+
const headers = { "Content-Type": "application/json" };
|
|
337
|
+
if (rctx)
|
|
338
|
+
Object.assign(headers, rctx.toHeaders());
|
|
339
|
+
// Sign event delivery with HMAC when FORGE_INTERNAL_SECRET is set
|
|
340
|
+
const internalSecret = process.env.FORGE_INTERNAL_SECRET;
|
|
341
|
+
if (internalSecret) {
|
|
342
|
+
const ts = String(Date.now());
|
|
343
|
+
const sig = createHmac("sha256", internalSecret).update(`POST:/__forge/event:${ts}`).digest("hex");
|
|
344
|
+
headers["x-forge-internal-sig"] = sig;
|
|
345
|
+
headers["x-forge-internal-ts"] = ts;
|
|
346
|
+
}
|
|
347
|
+
_deliverRemoteEvent(`http://${endpoint.host}:${endpoint.port}/__forge/event`, { from: name, event: eventName, data }, headers, target);
|
|
348
|
+
}
|
|
349
|
+
else {
|
|
350
|
+
ctx.send(target, eventPayload);
|
|
351
|
+
}
|
|
352
|
+
}
|
|
353
|
+
};
|
|
354
|
+
await instance._init(ctx);
|
|
355
|
+
ctx._serviceInstance = instance; // for /__forge/invoke endpoint
|
|
356
|
+
localServices.set(name, { service: instance, ctx });
|
|
357
|
+
}
|
|
358
|
+
// Phase 2b: Dynamic registry discovery
|
|
359
|
+
if (workerConfig.registryMode !== RegistryMode.EMBEDDED) {
|
|
360
|
+
try {
|
|
361
|
+
const registryModule = await import("../registry/ServiceRegistry.js");
|
|
362
|
+
const ServiceRegistry = registryModule.ServiceRegistry;
|
|
363
|
+
const workerRegistry = new ServiceRegistry({
|
|
364
|
+
mode: REGISTRY_MODES.includes(workerConfig.registryMode)
|
|
365
|
+
? workerConfig.registryMode
|
|
366
|
+
: RegistryMode.EMBEDDED,
|
|
367
|
+
host: workerConfig.registryHost || undefined,
|
|
368
|
+
});
|
|
369
|
+
workerRegistry.on("discovered", (reg) => {
|
|
370
|
+
if (reg.ports?.http) {
|
|
371
|
+
endpointResolver.set(reg.name, {
|
|
372
|
+
host: reg.host,
|
|
373
|
+
port: reg.ports.http,
|
|
374
|
+
remote: true,
|
|
375
|
+
});
|
|
376
|
+
}
|
|
377
|
+
});
|
|
378
|
+
workerRegistry.on("removed", (reg) => {
|
|
379
|
+
if (reg.ports?.http) {
|
|
380
|
+
endpointResolver.remove(reg.name, reg.host, reg.ports.http);
|
|
381
|
+
}
|
|
382
|
+
});
|
|
383
|
+
await workerRegistry.start();
|
|
384
|
+
}
|
|
385
|
+
catch (err) {
|
|
386
|
+
// Don't crash on registry errors — static endpoints still work
|
|
387
|
+
console.error(`[ThreadForge] Worker registry init failed: ${err.message}`);
|
|
388
|
+
}
|
|
389
|
+
}
|
|
390
|
+
// Phase 3: Build and inject proxy clients
|
|
391
|
+
// A2: Delegate all proxy creation to ServiceProxy — no inline retry/circuit-breaker reimplementation
|
|
392
|
+
const _servicePorts = JSON.parse(workerConfig.servicePorts || "{}");
|
|
393
|
+
const serviceRpcMap = JSON.parse(workerConfig.serviceRpc || "{}");
|
|
394
|
+
for (const [name, { service }] of localServices) {
|
|
395
|
+
const ctx = localServices.get(name).ctx;
|
|
396
|
+
const rpcConfig = serviceRpcMap[name] ?? {};
|
|
397
|
+
const proxies = buildServiceProxies(ctx, serviceClasses, localServices, { rpcConfig });
|
|
398
|
+
for (const svcName of allConnectedServices) {
|
|
399
|
+
if (svcName === name)
|
|
400
|
+
continue;
|
|
401
|
+
if (proxies[svcName])
|
|
402
|
+
continue;
|
|
403
|
+
// Service not in serviceClasses (remote/external) — resolve per-target RPC options
|
|
404
|
+
const targetOpts = resolveRpcOptionsForTarget(rpcConfig, svcName);
|
|
405
|
+
proxies[svcName] = createServiceProxy(ctx, svcName, null, null, targetOpts);
|
|
406
|
+
}
|
|
407
|
+
service._setProxies(proxies);
|
|
408
|
+
}
|
|
409
|
+
// Phase 3b: Validate that contract references match declared connections (DX-M2)
|
|
410
|
+
// Detect when a service's contract references other services not in its connects array
|
|
411
|
+
// Build a map of all emitted events: eventName -> emitting service name
|
|
412
|
+
const emittedEvents = new Map();
|
|
413
|
+
for (const [name, { ServiceClass }] of loaded) {
|
|
414
|
+
const contract = getContract(ServiceClass);
|
|
415
|
+
if (!contract)
|
|
416
|
+
continue;
|
|
417
|
+
for (const [, eventName] of contract.events) {
|
|
418
|
+
emittedEvents.set(eventName, name);
|
|
419
|
+
}
|
|
420
|
+
}
|
|
421
|
+
for (const [name, { ServiceClass }] of loaded) {
|
|
422
|
+
const contract = getContract(ServiceClass);
|
|
423
|
+
if (!contract)
|
|
424
|
+
continue;
|
|
425
|
+
// Build the set of services this service is connected to via channels
|
|
426
|
+
const connectedTo = new Set();
|
|
427
|
+
for (const ch of declaredChannels) {
|
|
428
|
+
if (ch.from === name)
|
|
429
|
+
connectedTo.add(ch.to);
|
|
430
|
+
if (ch.to === name)
|
|
431
|
+
connectedTo.add(ch.from);
|
|
432
|
+
}
|
|
433
|
+
// Check @On / static contract.on subscriptions reference connected services
|
|
434
|
+
if (contract.subscriptions) {
|
|
435
|
+
for (const sub of contract.subscriptions) {
|
|
436
|
+
if (!connectedTo.has(sub.service)) {
|
|
437
|
+
throw new Error(`Service "${name}" subscribes to event "${sub.event}" from "${sub.service}" ` +
|
|
438
|
+
`(handler: ${sub.handlerName}), but "${sub.service}" is not in its connects array. ` +
|
|
439
|
+
`Add connects: ['${sub.service}'] to the "${name}" service config.`);
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
}
|
|
443
|
+
// Check that subscription targets reference services that actually emit the subscribed event
|
|
444
|
+
if (contract.subscriptions) {
|
|
445
|
+
for (const sub of contract.subscriptions) {
|
|
446
|
+
const emitter = emittedEvents.get(sub.event);
|
|
447
|
+
if (emitter && emitter !== sub.service) {
|
|
448
|
+
console.warn(`[ThreadForge] Warning: Service "${name}" subscribes to event "${sub.event}" from "${sub.service}", ` +
|
|
449
|
+
`but that event is emitted by "${emitter}". Check the service name in your @On decorator.`);
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
}
|
|
454
|
+
// Phase 4: Connect plugins and inject into services
|
|
455
|
+
let pluginManager = null;
|
|
456
|
+
try {
|
|
457
|
+
const pluginNames = JSON.parse(workerConfig.plugins || "[]");
|
|
458
|
+
const servicePluginMap = JSON.parse(workerConfig.servicePlugins || "{}");
|
|
459
|
+
if (pluginNames.length > 0) {
|
|
460
|
+
const { PluginManager } = (await import("../plugins/PluginManager.js"));
|
|
461
|
+
pluginManager = new PluginManager();
|
|
462
|
+
if (workerConfig.configPath) {
|
|
463
|
+
const configMod = (await import(workerConfig.configPath));
|
|
464
|
+
const config = (configMod.default ?? configMod);
|
|
465
|
+
if (config.plugins) {
|
|
466
|
+
let plugins = config.plugins;
|
|
467
|
+
if (hostMeta) {
|
|
468
|
+
const scopedPostgresModule = await import("../plugins/ScopedPostgres.js");
|
|
469
|
+
const scopedPostgres = scopedPostgresModule.scopedPostgres;
|
|
470
|
+
const scopedRedisModule = await import("../plugins/ScopedRedis.js");
|
|
471
|
+
const scopedRedis = scopedRedisModule.scopedRedis;
|
|
472
|
+
plugins = plugins.map((p) => {
|
|
473
|
+
if (p.name === "postgres")
|
|
474
|
+
return scopedPostgres(p._options ?? {});
|
|
475
|
+
if (p.name === "redis")
|
|
476
|
+
return scopedRedis(p._options ?? {});
|
|
477
|
+
return p;
|
|
478
|
+
});
|
|
479
|
+
}
|
|
480
|
+
pluginManager.register(plugins);
|
|
481
|
+
}
|
|
482
|
+
}
|
|
483
|
+
// P14: Connect plugins for all services in parallel
|
|
484
|
+
const pluginEntries = [...localServices.entries()];
|
|
485
|
+
const pluginResults = await Promise.all(pluginEntries.map(async ([svcName, { service, ctx }]) => {
|
|
486
|
+
const svcPlugins = servicePluginMap[svcName];
|
|
487
|
+
const clients = await pluginManager.connectForService(svcPlugins, ctx);
|
|
488
|
+
return { svcName, service, ctx, clients, svcPlugins };
|
|
489
|
+
}));
|
|
490
|
+
for (const { service, ctx, clients, svcPlugins } of pluginResults) {
|
|
491
|
+
// Inject clients as properties on the service
|
|
492
|
+
for (const [injectName, client] of clients) {
|
|
493
|
+
if (injectName.startsWith("_"))
|
|
494
|
+
continue; // skip internal plugins (cors, etc.)
|
|
495
|
+
service[injectName] = client;
|
|
496
|
+
}
|
|
497
|
+
// Apply plugin middleware
|
|
498
|
+
const middleware = pluginManager.getMiddleware(svcPlugins);
|
|
499
|
+
for (const mw of middleware) {
|
|
500
|
+
ctx.router.use(mw);
|
|
501
|
+
}
|
|
502
|
+
// Attach websocket lifecycle hooks for this service
|
|
503
|
+
ctx._wsPluginHooks = pluginManager.getWebSocketHooks(svcPlugins);
|
|
504
|
+
}
|
|
505
|
+
}
|
|
506
|
+
}
|
|
507
|
+
catch (err) {
|
|
508
|
+
// Don't crash on plugin errors — log and continue
|
|
509
|
+
console.error(`[ThreadForge] Plugin init failed for ${workerConfig.groupName}: ${err.message}`);
|
|
510
|
+
if (err.stack)
|
|
511
|
+
console.error(err.stack);
|
|
512
|
+
for (const [, { ctx }] of localServices) {
|
|
513
|
+
try {
|
|
514
|
+
ctx?.logger?.error(`Plugin init failed: ${err.message}`);
|
|
515
|
+
}
|
|
516
|
+
catch {
|
|
517
|
+
// Swallow logging errors
|
|
518
|
+
}
|
|
519
|
+
}
|
|
520
|
+
}
|
|
521
|
+
// O4: Correct shutdown order — stop accepting connections first, then drain, then stop services, then disconnect plugins
|
|
522
|
+
// RT-C1: shutdown is a function declaration (hoisted) to avoid TDZ when called from IPC handler
|
|
523
|
+
// RT-C2: re-entrancy guard prevents double shutdown from SIGTERM + IPC race
|
|
524
|
+
async function shutdown(signal) {
|
|
525
|
+
if (shutdownInProgress)
|
|
526
|
+
return;
|
|
527
|
+
shutdownInProgress = true;
|
|
528
|
+
for (const [name, { ctx }] of localServices) {
|
|
529
|
+
ctx.logger.info(`Received ${signal}, shutting down ${name}...`);
|
|
530
|
+
}
|
|
531
|
+
// Step 1: Stop accepting new connections (server.close())
|
|
532
|
+
const serverClosePromises = [];
|
|
533
|
+
for (const [, { ctx }] of localServices) {
|
|
534
|
+
if (ctx._server) {
|
|
535
|
+
serverClosePromises.push(new Promise((resolve) => {
|
|
536
|
+
ctx._server.close(() => resolve());
|
|
537
|
+
}));
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
// Step 2: Wait for in-flight requests to drain (up to 5s)
|
|
541
|
+
if (serverClosePromises.length > 0) {
|
|
542
|
+
await Promise.race([Promise.all(serverClosePromises), new Promise((resolve) => setTimeout(resolve, 5000))]);
|
|
543
|
+
}
|
|
544
|
+
// Step 3: Stop services (onStop hooks, with 5s timeout per service)
|
|
545
|
+
for (const [name, { service, ctx }] of localServices) {
|
|
546
|
+
try {
|
|
547
|
+
await Promise.race([
|
|
548
|
+
service._stop(),
|
|
549
|
+
new Promise((_, reject) => setTimeout(() => reject(new Error(`Service "${name}" stop timed out after 5s`)), 5000)),
|
|
550
|
+
]);
|
|
551
|
+
}
|
|
552
|
+
catch (err) {
|
|
553
|
+
ctx.logger.error("Shutdown error", { error: err.message });
|
|
554
|
+
}
|
|
555
|
+
}
|
|
556
|
+
// Step 4: Disconnect all plugins
|
|
557
|
+
if (pluginManager) {
|
|
558
|
+
try {
|
|
559
|
+
const errors = await pluginManager.disconnectAll(localServices.values().next().value?.ctx?.logger);
|
|
560
|
+
if (errors.length > 0) {
|
|
561
|
+
console.warn(`[ThreadForge] ${errors.length} plugins failed to disconnect cleanly`);
|
|
562
|
+
}
|
|
563
|
+
}
|
|
564
|
+
catch (err) {
|
|
565
|
+
console.warn(`[ThreadForge] Plugin disconnect error: ${err.message}`);
|
|
566
|
+
}
|
|
567
|
+
}
|
|
568
|
+
// SUP-H4: Remove IPC message handler during shutdown to prevent
|
|
569
|
+
// messages from reaching partially torn-down contexts.
|
|
570
|
+
process.removeListener("message", ipcMessageHandler);
|
|
571
|
+
// F-3: Allow event loop to drain naturally instead of process.exit(0)
|
|
572
|
+
// so plugin disconnect and I/O flushes complete. The supervisor manages
|
|
573
|
+
// worker lifecycle via cluster 'exit' events.
|
|
574
|
+
if (process.connected && process.disconnect)
|
|
575
|
+
process.disconnect();
|
|
576
|
+
}
|
|
577
|
+
// Wire IPC before starting so no messages are lost during startup
|
|
578
|
+
// SUP-H4: Store handler reference for removal during shutdown
|
|
579
|
+
const ipcMessageHandler = (msg, _handle) => {
|
|
580
|
+
if (typeof msg !== "object" || msg === null || !msg.type)
|
|
581
|
+
return;
|
|
582
|
+
const ipcMsg = msg;
|
|
583
|
+
if (ipcMsg.type === "forge:shutdown") {
|
|
584
|
+
// Stop accepting new requests, let in-flight complete
|
|
585
|
+
shutdown("forge:shutdown");
|
|
586
|
+
return;
|
|
587
|
+
}
|
|
588
|
+
// IPC-C3: Socket messages handled only by WorkerChannelManager — skip here to avoid duplicates
|
|
589
|
+
if (ipcMsg.type === "forge:init-socket" || ipcMsg.type === "forge:socket-registry")
|
|
590
|
+
return;
|
|
591
|
+
// REG-H1: Apply topology updates pushed from Supervisor
|
|
592
|
+
if (ipcMsg.type === "forge:endpoint-update") {
|
|
593
|
+
const endpoints = ipcMsg.endpoints;
|
|
594
|
+
if (endpoints && typeof endpoints === "object") {
|
|
595
|
+
endpointResolver.applyEndpointUpdate(endpoints);
|
|
596
|
+
}
|
|
597
|
+
return;
|
|
598
|
+
}
|
|
599
|
+
// REG-H2: Apply health status updates pushed from Supervisor
|
|
600
|
+
if (ipcMsg.type === "forge:health-update") {
|
|
601
|
+
const { host, port, status } = ipcMsg;
|
|
602
|
+
if (typeof host === "string" && typeof port === "number" && typeof status === "string") {
|
|
603
|
+
endpointResolver.setHealthStatus(host, port, status);
|
|
604
|
+
}
|
|
605
|
+
return;
|
|
606
|
+
}
|
|
607
|
+
if (ipcMsg.type === "forge:health-check") {
|
|
608
|
+
for (const [, { ctx }] of localServices) {
|
|
609
|
+
ctx._handleIPCMessage(ipcMsg);
|
|
610
|
+
}
|
|
611
|
+
return;
|
|
612
|
+
}
|
|
613
|
+
if (ipcMsg.type === "forge:metrics-snapshot") {
|
|
614
|
+
if (!process.send)
|
|
615
|
+
return;
|
|
616
|
+
try {
|
|
617
|
+
const chunks = [];
|
|
618
|
+
for (const [, { ctx }] of localServices) {
|
|
619
|
+
if (ctx?.metrics?.expose) {
|
|
620
|
+
chunks.push(ctx.metrics.expose());
|
|
621
|
+
}
|
|
622
|
+
}
|
|
623
|
+
process.send({
|
|
624
|
+
type: "forge:metrics-snapshot-response",
|
|
625
|
+
requestId: ipcMsg.requestId,
|
|
626
|
+
metrics: chunks.join("\n"),
|
|
627
|
+
});
|
|
628
|
+
}
|
|
629
|
+
catch (err) {
|
|
630
|
+
process.send({
|
|
631
|
+
type: "forge:metrics-snapshot-response",
|
|
632
|
+
requestId: ipcMsg.requestId,
|
|
633
|
+
error: err.message,
|
|
634
|
+
});
|
|
635
|
+
}
|
|
636
|
+
return;
|
|
637
|
+
}
|
|
638
|
+
if (ipcMsg.type === "forge:message" || ipcMsg.type === "forge:request" || ipcMsg.type === "forge:response") {
|
|
639
|
+
for (const [, { ctx }] of localServices) {
|
|
640
|
+
ctx._handleIPCMessage(ipcMsg);
|
|
641
|
+
}
|
|
642
|
+
}
|
|
643
|
+
};
|
|
644
|
+
process.on("message", ipcMessageHandler);
|
|
645
|
+
// Request socket setup (supervisor may have sent init-socket before we were listening)
|
|
646
|
+
if (process.send) {
|
|
647
|
+
process.send({ type: "forge:worker-ready", group: workerConfig.groupName });
|
|
648
|
+
}
|
|
649
|
+
// Phase 5: Start all services — track started services for cleanup on failure (RT-H1)
|
|
650
|
+
const startedServices = [];
|
|
651
|
+
try {
|
|
652
|
+
for (const [name, { service, ctx }] of localServices) {
|
|
653
|
+
// DEC-M2: Apply deferred validation wrappers at startup, not during contract inspection
|
|
654
|
+
const entry = loaded.get(name);
|
|
655
|
+
if (entry)
|
|
656
|
+
applyStaticContractWrappers(entry.ServiceClass);
|
|
657
|
+
await service._start();
|
|
658
|
+
startedServices.push({ name, service, ctx });
|
|
659
|
+
const proxyNames = Object.keys(Object.fromEntries(Object.entries(service).filter(([_k, v]) => v?.$name ||
|
|
660
|
+
v?.$isLocal !== undefined)));
|
|
661
|
+
// Reduce startup noise: emit framework startup metadata once per group.
|
|
662
|
+
if (workerId === 0) {
|
|
663
|
+
ctx.logger.info("Service started", {
|
|
664
|
+
group: workerConfig.groupName,
|
|
665
|
+
service: name,
|
|
666
|
+
type: typeMap[name] ?? ServiceType.INTERNAL,
|
|
667
|
+
port: ctx.port || null,
|
|
668
|
+
worker: workerId,
|
|
669
|
+
pid: process.pid,
|
|
670
|
+
colocated: entries.length > 1 ? entries.map((e) => e.name) : undefined,
|
|
671
|
+
proxies: proxyNames.length > 0 ? proxyNames : undefined,
|
|
672
|
+
});
|
|
673
|
+
}
|
|
674
|
+
}
|
|
675
|
+
}
|
|
676
|
+
catch (startErr) {
|
|
677
|
+
const typedStartErr = startErr;
|
|
678
|
+
// Check if this is a fatal bind error (EPERM, EACCES, EADDRNOTAVAIL, EADDRINUSE)
|
|
679
|
+
if (typedStartErr.fatalBindError) {
|
|
680
|
+
// Log clear error message and exit without triggering restart loop
|
|
681
|
+
console.error(`\n[ThreadForge] FATAL: ${typedStartErr.userMessage || typedStartErr.message}`);
|
|
682
|
+
console.error(`[ThreadForge] Service group "${workerConfig.groupName}" cannot start. Worker will not restart.\n`);
|
|
683
|
+
// Notify supervisor this is a fatal error (already sent via IPC from ForgeContext)
|
|
684
|
+
// Exit with code 100 to signal fatal configuration error (supervisor checks this)
|
|
685
|
+
process.exit(100);
|
|
686
|
+
}
|
|
687
|
+
// RT-H1: Stop already-started services in reverse order before re-throwing
|
|
688
|
+
for (let i = startedServices.length - 1; i >= 0; i--) {
|
|
689
|
+
const { name, service, ctx } = startedServices[i];
|
|
690
|
+
try {
|
|
691
|
+
ctx.logger.warn(`Rolling back service start for ${name} due to Phase 5 failure`);
|
|
692
|
+
await service._stop();
|
|
693
|
+
}
|
|
694
|
+
catch (stopErr) {
|
|
695
|
+
ctx.logger.error(`Rollback stop failed for ${name}`, { error: stopErr.message });
|
|
696
|
+
}
|
|
697
|
+
try {
|
|
698
|
+
if (ctx._server) {
|
|
699
|
+
await new Promise((resolve) => ctx._server.close(() => resolve()));
|
|
700
|
+
}
|
|
701
|
+
}
|
|
702
|
+
catch {
|
|
703
|
+
// Swallow server close errors during rollback
|
|
704
|
+
}
|
|
705
|
+
}
|
|
706
|
+
throw typedStartErr;
|
|
707
|
+
}
|
|
708
|
+
// Phase 6: Auto-register health check (always, not just with plugins)
|
|
709
|
+
for (const [, { ctx }] of localServices) {
|
|
710
|
+
const existingHealthRoute = [...ctx.router.routes.values()].some((bucket) => bucket.some((r) => r.pattern === "/health"));
|
|
711
|
+
if (!existingHealthRoute) {
|
|
712
|
+
ctx.router.get("/health", (async (_req, res) => {
|
|
713
|
+
const health = {
|
|
714
|
+
status: "ok",
|
|
715
|
+
service: ctx.serviceName,
|
|
716
|
+
pid: process.pid,
|
|
717
|
+
};
|
|
718
|
+
if (pluginManager) {
|
|
719
|
+
health.plugins = await pluginManager.healthCheck();
|
|
720
|
+
}
|
|
721
|
+
res.json(health);
|
|
722
|
+
}));
|
|
723
|
+
}
|
|
724
|
+
}
|
|
725
|
+
// Notify supervisor that this worker finished startup for readiness aggregation.
|
|
726
|
+
if (process.send) {
|
|
727
|
+
process.send({
|
|
728
|
+
type: "forge:group-ready",
|
|
729
|
+
group: workerConfig.groupName,
|
|
730
|
+
workerId,
|
|
731
|
+
pid: process.pid,
|
|
732
|
+
services: entries.map((e) => e.name),
|
|
733
|
+
port: Number.isFinite(port) && port > 0 ? port : null,
|
|
734
|
+
ipcVersion: IPC_PROTOCOL_VERSION,
|
|
735
|
+
});
|
|
736
|
+
}
|
|
737
|
+
// Signal handlers moved to early in bootstrap() — see below uncaughtException handler
|
|
738
|
+
}
|
|
739
|
+
bootstrap().catch((err) => {
|
|
740
|
+
const services = workerConfig.serviceNames || "unknown";
|
|
741
|
+
console.error(`[ThreadForge] Worker bootstrap failed for group "${workerConfig.groupName}" (services: ${services}):`, err);
|
|
742
|
+
process.exit(1);
|
|
743
|
+
});
|
|
744
|
+
//# sourceMappingURL=worker-bootstrap.js.map
|