threadforge 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/bin/forge.js CHANGED
@@ -26,8 +26,24 @@ const args = process.argv.slice(2);
26
26
  const command = args[0];
27
27
 
28
28
  const FORGE_CONFIG_NAMES = ["forge.config.js", "forge.config.mjs", "threadforge.config.js", "threadforge.config.mjs"];
29
+ const FORGE_PLATFORM_CONFIG_NAMES = ["forge.platform.js", "forge.platform.mjs", "forge.host.js", "forge.host.mjs"];
29
30
 
30
- async function findConfig() {
31
+ function resolveConfigSearchNames(commandName = command) {
32
+ if (["dev", "start", "build", "status", "stop"].includes(commandName)) {
33
+ return [...FORGE_CONFIG_NAMES, ...FORGE_PLATFORM_CONFIG_NAMES];
34
+ }
35
+ return [...FORGE_CONFIG_NAMES];
36
+ }
37
+
38
+ function printNoConfigError(commandName = command) {
39
+ const names = resolveConfigSearchNames(commandName);
40
+ console.error(" Error: No config file found in current directory.");
41
+ console.error(` Searched for: ${names.join(", ")}`);
42
+ console.error(" Run `forge init` to create one, or use `--config <path>` to specify manually.");
43
+ }
44
+
45
+ async function findConfig(commandName = command) {
46
+ const searchNames = resolveConfigSearchNames(commandName);
31
47
  const cwd = process.cwd();
32
48
 
33
49
  // Check for --config flag
@@ -41,7 +57,7 @@ async function findConfig() {
41
57
  return resolved;
42
58
  }
43
59
 
44
- for (const name of FORGE_CONFIG_NAMES) {
60
+ for (const name of searchNames) {
45
61
  const fullPath = path.join(cwd, name);
46
62
  try {
47
63
  await fs.promises.access(fullPath);
@@ -132,11 +148,9 @@ async function startFrontendDevSessions(config) {
132
148
  }
133
149
 
134
150
  async function cmdStart(isDev = false) {
135
- const configPath = await findConfig();
151
+ const configPath = await findConfig(isDev ? "dev" : "start");
136
152
  if (!configPath) {
137
- console.error(" Error: No config file found in current directory.");
138
- console.error(` Searched for: ${FORGE_CONFIG_NAMES.join(", ")}`);
139
- console.error(" Run `forge init` to create one, or use `--config <path>` to specify manually.");
153
+ printNoConfigError(isDev ? "dev" : "start");
140
154
  process.exit(1);
141
155
  }
142
156
 
@@ -261,7 +275,8 @@ function detectLocalThreadforgeDependency(cwd) {
261
275
 
262
276
  function resolveInitDependency(cwd, source = "auto") {
263
277
  if (source === "npm") {
264
- return { spec: "threadforge", label: "npm" };
278
+ // Use npm dist-tag so generated package.json is directly installable.
279
+ return { spec: "latest", label: "npm" };
265
280
  }
266
281
 
267
282
  if (source === "github") {
@@ -478,7 +493,7 @@ export default class ApiService extends Service {
478
493
  }
479
494
 
480
495
  async function resolveMetricsPort() {
481
- const config = await findConfig().then(async (p) => {
496
+ const config = await findConfig(command).then(async (p) => {
482
497
  if (!p) return null;
483
498
  try { return await loadConfig(pathToFileURL(p).href); } catch { return null; }
484
499
  });
@@ -663,11 +678,9 @@ function printHelp() {
663
678
  }
664
679
 
665
680
  async function cmdBuild() {
666
- const configPath = await findConfig();
681
+ const configPath = await findConfig("build");
667
682
  if (!configPath) {
668
- console.error(" Error: No config file found in current directory.");
669
- console.error(` Searched for: ${FORGE_CONFIG_NAMES.join(", ")}`);
670
- console.error(" Run `forge init` to create one, or use `--config <path>` to specify manually.");
683
+ printNoConfigError("build");
671
684
  process.exit(1);
672
685
  }
673
686
 
@@ -759,11 +772,9 @@ async function cmdBuild() {
759
772
  }
760
773
 
761
774
  async function cmdDeploy() {
762
- const configPath = await findConfig();
775
+ const configPath = await findConfig("deploy");
763
776
  if (!configPath) {
764
- console.error(" Error: No config file found in current directory.");
765
- console.error(` Searched for: ${FORGE_CONFIG_NAMES.join(", ")}`);
766
- console.error(" Run `forge init` to create one, or use `--config <path>` to specify manually.");
777
+ printNoConfigError("deploy");
767
778
  process.exit(1);
768
779
  }
769
780
 
@@ -898,11 +909,9 @@ export default {
898
909
  }
899
910
 
900
911
  async function cmdGenerate() {
901
- const configPath = await findConfig();
912
+ const configPath = await findConfig("generate");
902
913
  if (!configPath) {
903
- console.error(" Error: No config file found in current directory.");
904
- console.error(` Searched for: ${FORGE_CONFIG_NAMES.join(", ")}`);
905
- console.error(" Run `forge init` to create one, or use `--config <path>` to specify manually.");
914
+ printNoConfigError("generate");
906
915
  process.exit(1);
907
916
  }
908
917
 
@@ -12,11 +12,9 @@
12
12
 
13
13
  import fs from "node:fs";
14
14
  import path from "node:path";
15
- import { fileURLToPath, pathToFileURL } from "node:url";
15
+ import { pathToFileURL } from "node:url";
16
16
 
17
17
  const PLATFORM_CONFIG_NAMES = ["forge.platform.js", "forge.platform.mjs"];
18
- const __dirname = path.dirname(fileURLToPath(import.meta.url));
19
- const LOCAL_PKG_PATH = path.resolve(__dirname, "..", "package.json");
20
18
 
21
19
  async function findPlatformConfig() {
22
20
  const cwd = process.cwd();
@@ -39,21 +37,50 @@ async function findPlatformConfig() {
39
37
  * @returns {string}
40
38
  */
41
39
  function computeImportPath(fromDir) {
42
- try {
43
- if (fs.existsSync(LOCAL_PKG_PATH)) {
44
- const pkg = JSON.parse(fs.readFileSync(LOCAL_PKG_PATH, "utf8"));
45
- if (pkg.name === "threadforge") {
46
- const indexAbsolute = fs.realpathSync(path.resolve(__dirname, "..", "src", "index.js"));
47
- const realFrom = fs.realpathSync(fromDir);
48
- let rel = path.relative(realFrom, indexAbsolute);
49
- if (!rel.startsWith(".")) rel = `./${rel}`;
50
- return rel;
51
- }
52
- }
53
- } catch {}
40
+ void fromDir;
54
41
  return "threadforge";
55
42
  }
56
43
 
44
+ function ensurePlatformPackageJson(cwd) {
45
+ const pkgPath = path.join(cwd, "package.json");
46
+ const defaultName = path.basename(cwd);
47
+ const defaultScripts = {
48
+ start: "forge platform start",
49
+ build: "forge build",
50
+ generate: "forge platform generate",
51
+ };
52
+
53
+ let pkg = {};
54
+ if (fs.existsSync(pkgPath)) {
55
+ try {
56
+ pkg = JSON.parse(fs.readFileSync(pkgPath, "utf8"));
57
+ } catch (err) {
58
+ throw new Error(`Invalid package.json: ${err.message}`);
59
+ }
60
+ } else {
61
+ pkg = {
62
+ name: defaultName,
63
+ version: "1.0.0",
64
+ };
65
+ }
66
+
67
+ if (!pkg.type) {
68
+ pkg.type = "module";
69
+ }
70
+
71
+ if (!pkg.scripts || typeof pkg.scripts !== "object" || Array.isArray(pkg.scripts)) {
72
+ pkg.scripts = {};
73
+ }
74
+
75
+ for (const [name, command] of Object.entries(defaultScripts)) {
76
+ if (!pkg.scripts[name]) {
77
+ pkg.scripts[name] = command;
78
+ }
79
+ }
80
+
81
+ fs.writeFileSync(pkgPath, `${JSON.stringify(pkg, null, 2)}\n`);
82
+ }
83
+
57
84
  async function loadPlatformConfig() {
58
85
  const configPath = await findPlatformConfig();
59
86
  if (!configPath) {
@@ -92,6 +119,8 @@ async function cmdPlatformInit() {
92
119
  const importPath = computeImportPath(cwd);
93
120
  const serviceImportPath = computeImportPath(path.join(cwd, "shared"));
94
121
 
122
+ ensurePlatformPackageJson(cwd);
123
+
95
124
  // Create directories
96
125
  for (const dir of ["shared", "apps"]) {
97
126
  fs.mkdirSync(path.join(cwd, dir), { recursive: true });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "threadforge",
3
- "version": "0.1.0",
3
+ "version": "0.1.1",
4
4
  "description": "Multi-threaded Node.js service runtime framework",
5
5
  "type": "module",
6
6
  "bin": {
@@ -62,8 +62,12 @@
62
62
  "pg": "^8.11.0"
63
63
  },
64
64
  "peerDependenciesMeta": {
65
- "ioredis": { "optional": true },
66
- "pg": { "optional": true }
65
+ "ioredis": {
66
+ "optional": true
67
+ },
68
+ "pg": {
69
+ "optional": true
70
+ }
67
71
  },
68
72
  "devDependencies": {
69
73
  "@biomejs/biome": "^2.3.15"
@@ -0,0 +1,26 @@
1
+ import { Service } from 'threadforge';
2
+
3
+ export default class IdentityService extends Service {
4
+ static contract = {
5
+ expose: ['getUser', 'getUserByEmail', 'createUser', 'listMembers'],
6
+ routes: [
7
+ { method: 'POST', path: '/login', handler: 'login' },
8
+ { method: 'POST', path: '/register', handler: 'register' },
9
+ { method: 'GET', path: '/users/:id', handler: 'getUserRoute' },
10
+ { method: 'GET', path: '/users', handler: 'listUsersRoute' },
11
+ ],
12
+ };
13
+
14
+ async onStart(ctx) {
15
+ ctx.logger.info('Identity service ready');
16
+ }
17
+
18
+ async login(_body) { return { error: 'Not implemented' }; }
19
+ async register(_body) { return { error: 'Not implemented' }; }
20
+ async getUserRoute(_body, params) { return this.getUser(params.id); }
21
+ async listUsersRoute() { return { error: 'Not implemented' }; }
22
+ async getUser(_userId) { return { error: 'Not implemented' }; }
23
+ async getUserByEmail(_email) { return { error: 'Not implemented' }; }
24
+ async createUser(_data) { return { error: 'Not implemented' }; }
25
+ async listMembers(_appId) { return { error: 'Not implemented' }; }
26
+ }
@@ -29,6 +29,10 @@ export class EndpointResolver {
29
29
 
30
30
  /** @type {Map<string, Object>} per-service routing strategies (optional override) */
31
31
  this._strategies = new Map();
32
+
33
+ // P-10: Cache wrapped endpoint entries to avoid per-resolve() allocation
34
+ /** @type {Map<string, Array<{key: string, host: string, port: number, remote: boolean}>>} */
35
+ this._wrappedCache = new Map();
32
36
  }
33
37
 
34
38
  /**
@@ -123,13 +127,17 @@ export class EndpointResolver {
123
127
  // Delegate to configured strategy if present
124
128
  const strategy = this._strategies.get(serviceName);
125
129
  if (strategy) {
126
- // Wrap endpoints as WorkerEntry-compatible objects for RoutingStrategy
127
- const entries = endpoints.map(ep => ({
128
- key: `${ep.host}:${ep.port}`,
129
- host: ep.host,
130
- port: ep.port,
131
- remote: ep.remote,
132
- }));
130
+ // P-10: Use cached wrapped entries to avoid per-call allocation
131
+ let entries = this._wrappedCache.get(serviceName);
132
+ if (!entries) {
133
+ entries = endpoints.map(ep => ({
134
+ key: `${ep.host}:${ep.port}`,
135
+ host: ep.host,
136
+ port: ep.port,
137
+ remote: ep.remote,
138
+ }));
139
+ this._wrappedCache.set(serviceName, entries);
140
+ }
133
141
  const picked = strategy.pick(entries, callContext);
134
142
  if (picked) {
135
143
  // Handle broadcast (returns array) — return first for resolve(), use all() for broadcast
@@ -180,6 +188,8 @@ export class EndpointResolver {
180
188
  }
181
189
 
182
190
  this._endpoints.set(serviceName, existing);
191
+ // P-10: Invalidate wrapped cache when endpoints change
192
+ this._wrappedCache.delete(serviceName);
183
193
  }
184
194
 
185
195
  /**
@@ -195,6 +205,8 @@ export class EndpointResolver {
195
205
  if (!existing) return;
196
206
 
197
207
  const filtered = existing.filter((e) => !(e.host === host && e.port === port));
208
+ // P-10: Invalidate wrapped cache when endpoints change
209
+ this._wrappedCache.delete(serviceName);
198
210
  if (filtered.length === 0) {
199
211
  this._endpoints.delete(serviceName);
200
212
  // CR-IPC-7: Clean up counter when all endpoints removed
@@ -69,8 +69,13 @@ const BIND_ERROR_MESSAGES = {
69
69
  function validateInternalSignature(req, method, path) {
70
70
  const secret = process.env.FORGE_INTERNAL_SECRET;
71
71
  if (!secret) {
72
- // In development, allow without signature; in production, reject
73
- return process.env.NODE_ENV !== 'production';
72
+ if (process.env.NODE_ENV === 'production') return false;
73
+ // M-7: Warn in development when internal endpoints are unsigned
74
+ if (!validateInternalSignature._warned) {
75
+ console.warn('[ThreadForge] WARNING: FORGE_INTERNAL_SECRET is not set. Internal endpoints (/__forge/*) accept unsigned requests. Set FORGE_INTERNAL_SECRET or FORGE_ALLOW_UNSIGNED_INTERNAL=true to suppress.');
76
+ validateInternalSignature._warned = true;
77
+ }
78
+ return true;
74
79
  }
75
80
  const sig = req.headers['x-forge-internal-sig'];
76
81
  const ts = req.headers['x-forge-internal-ts'];
@@ -118,7 +123,7 @@ function verifyJwt(token) {
118
123
  const header = JSON.parse(Buffer.from(headerB64, 'base64url').toString());
119
124
  if (!header || typeof header !== 'object') return null;
120
125
  const alg = (header.alg ?? '').toUpperCase();
121
- if (alg === 'NONE' || (alg !== 'HS256' && alg !== '')) return null;
126
+ if (alg !== 'HS256') return null;
122
127
  } catch {
123
128
  return null;
124
129
  }
@@ -1140,6 +1145,11 @@ export class ForgeContext {
1140
1145
  * Gracefully shut down.
1141
1146
  */
1142
1147
  async stop() {
1148
+ // M-13: Stop IngressProtection timers if active
1149
+ if (this.ingress) {
1150
+ this.ingress.stop();
1151
+ }
1152
+
1143
1153
  // Close direct channels
1144
1154
  this.channels.destroy();
1145
1155
 
@@ -1674,14 +1684,13 @@ class Router {
1674
1684
  }
1675
1685
 
1676
1686
  _matchUncached(method, url) {
1677
- const parsed = new URL(url, "http://localhost");
1678
- const pathname = parsed.pathname;
1679
-
1680
1687
  // Reject null bytes — path traversal / request smuggling vector
1681
1688
  if (url.includes("\0")) return null;
1682
1689
 
1683
- // Normalize path to collapse encoded traversals (e.g. ..%2F)
1684
- let normalized = new URL(pathname, "http://localhost").pathname;
1690
+ const parsed = new URL(url, "http://localhost");
1691
+ // P-3: Reuse the already-parsed pathname instead of constructing a second URL.
1692
+ // URL constructor already normalizes encoded traversals (e.g. ..%2F).
1693
+ let normalized = parsed.pathname;
1685
1694
  // Strip trailing slashes (except root "/")
1686
1695
  if (normalized.length > 1 && normalized.endsWith('/')) {
1687
1696
  normalized = normalized.slice(0, -1);
@@ -89,6 +89,16 @@ export class MessageBus extends EventEmitter {
89
89
  channel.splice(idx, 1);
90
90
  }
91
91
 
92
+ // F-6: Reject pending requests dispatched to the dead worker immediately
93
+ // instead of making callers wait for the full timeout.
94
+ for (const [reqId, entry] of this.pendingRequests) {
95
+ if (entry.targetPid === pidOrThreadId) {
96
+ clearTimeout(entry.timer);
97
+ this.pendingRequests.delete(reqId);
98
+ entry.reject(new Error(`Worker ${pidOrThreadId} for "${serviceName}" exited while handling request`));
99
+ }
100
+ }
101
+
92
102
  if (channel.length === 0) {
93
103
  this.channels.delete(serviceName);
94
104
  this.rrIndex.delete(serviceName);
@@ -193,10 +203,11 @@ export class MessageBus extends EventEmitter {
193
203
  reject(new Error(`[MessageBus] Request to "${target}" timed out after ${timeoutMs}ms`));
194
204
  }, timeoutMs);
195
205
 
196
- this.pendingRequests.set(requestId, { resolve, reject, timer });
197
-
198
206
  const idx = this.rrIndex.get(target);
199
207
  const worker = channel[idx % channel.length];
208
+
209
+ // F-6: Track target PID so pending requests can be rejected on worker death
210
+ this.pendingRequests.set(requestId, { resolve, reject, timer, targetPid: worker.pid });
200
211
  this.rrIndex.set(target, (idx + 1) % 1_000_000_000);
201
212
 
202
213
  try {
@@ -32,6 +32,17 @@ import { createHmac, randomBytes } from "node:crypto";
32
32
 
33
33
  const HASH_SALT = randomBytes(16).toString("hex");
34
34
 
35
+ // P-1: Fast non-cryptographic FNV-1a hash for runtime routing.
36
+ // HMAC-SHA256 is ~100x slower and unnecessary for load distribution.
37
+ function fnv1a(str) {
38
+ let hash = 0x811c9dc5 | 0; // FNV offset basis
39
+ for (let i = 0; i < str.length; i++) {
40
+ hash ^= str.charCodeAt(i);
41
+ hash = (hash * 0x01000193) | 0; // FNV prime
42
+ }
43
+ return hash >>> 0; // Convert to unsigned 32-bit
44
+ }
45
+
35
46
  /**
36
47
  * @typedef {Object} WorkerEntry
37
48
  * @property {string} key - "serviceName:workerId"
@@ -178,10 +189,11 @@ export class HashStrategy {
178
189
  }
179
190
 
180
191
  /**
181
- * Keyed SHA-256 hash collision-resistant, prevents hash flooding attacks.
192
+ * P-1: Fast FNV-1a hash for runtime routing decisions.
193
+ * Salted to prevent predictable distribution from external input.
182
194
  */
183
195
  _simpleHash(str) {
184
- return createHmac("sha256", HASH_SALT).update(str).digest().readUInt32BE(0);
196
+ return fnv1a(HASH_SALT + str);
185
197
  }
186
198
 
187
199
  get name() {
@@ -195,6 +207,7 @@ export class LeastPendingStrategy {
195
207
  constructor() {
196
208
  /** @type {Map<string, number>} key → pending count */
197
209
  this._pending = new Map();
210
+ this._lastWorkerCount = 0;
198
211
  }
199
212
 
200
213
  /**
@@ -204,8 +217,8 @@ export class LeastPendingStrategy {
204
217
  if (workers.length === 0) return null;
205
218
  if (workers.length === 1) return workers[0];
206
219
 
207
- // Prune stale entries for workers no longer in the current set
208
- if (this._pending.size > 0) {
220
+ // P-9: Only prune when worker set changes, not on every pick()
221
+ if (this._pending.size > 0 && workers.length !== this._lastWorkerCount) {
209
222
  const activeKeys = new Set(workers.map(w => w.key));
210
223
  for (const key of this._pending.keys()) {
211
224
  if (!activeKeys.has(key)) {
@@ -213,6 +226,7 @@ export class LeastPendingStrategy {
213
226
  }
214
227
  }
215
228
  }
229
+ this._lastWorkerCount = workers.length;
216
230
 
217
231
  let best = workers[0];
218
232
  let bestPending = this._pending.get(best.key) ?? 0;
@@ -483,7 +483,9 @@ export class Supervisor extends EventEmitter {
483
483
  }
484
484
 
485
485
  // H-5: Track heartbeat responses from workers
486
- if (msg?.type === "forge:heartbeat-response") {
486
+ // Workers respond to forge:health-check with forge:health-response,
487
+ // so we accept both message types for heartbeat tracking.
488
+ if (msg?.type === "forge:heartbeat-response" || msg?.type === "forge:health-response") {
487
489
  this._lastHeartbeat.set(worker.id, Date.now());
488
490
  return;
489
491
  }
@@ -1152,9 +1154,16 @@ export class Supervisor extends EventEmitter {
1152
1154
 
1153
1155
  _status() {
1154
1156
  const groups = [];
1157
+ const liveWorkersByService = {};
1158
+
1155
1159
  for (const [groupName, workerIds] of this.groupWorkers) {
1156
1160
  const group = this.groups[groupName];
1157
1161
  const pids = workerIds.map((wid) => cluster.workers[wid]?.process?.pid).filter(Boolean);
1162
+ const liveWorkers = workerIds.length;
1163
+
1164
+ for (const svc of group.services) {
1165
+ liveWorkersByService[svc.name] = liveWorkers;
1166
+ }
1158
1167
 
1159
1168
  groups.push({
1160
1169
  group: groupName,
@@ -1163,11 +1172,45 @@ export class Supervisor extends EventEmitter {
1163
1172
  type: s.type,
1164
1173
  port: s.port,
1165
1174
  })),
1166
- workers: workerIds.length,
1175
+ workers: liveWorkers,
1167
1176
  pids,
1168
1177
  });
1169
1178
  }
1170
1179
 
1180
+ const topology = this.registry.topology();
1181
+ for (const [serviceName, liveWorkers] of Object.entries(liveWorkersByService)) {
1182
+ const existing = Array.isArray(topology[serviceName]) ? topology[serviceName] : [];
1183
+ let hasLocalEntry = false;
1184
+
1185
+ const updated = existing.map((entry) => {
1186
+ const isLocalEntry = entry?.nodeId === this.registry.nodeId
1187
+ || entry?.transport === "local"
1188
+ || entry?.transport === "colocated";
1189
+
1190
+ if (!isLocalEntry) return entry;
1191
+
1192
+ hasLocalEntry = true;
1193
+ return {
1194
+ ...entry,
1195
+ workers: liveWorkers,
1196
+ status: liveWorkers > 0 ? (entry.status ?? "healthy") : "unhealthy",
1197
+ };
1198
+ });
1199
+
1200
+ if (!hasLocalEntry) {
1201
+ updated.unshift({
1202
+ nodeId: this.registry.nodeId,
1203
+ host: this.registry.host,
1204
+ transport: "local",
1205
+ status: liveWorkers > 0 ? "healthy" : "unhealthy",
1206
+ cpu: 0,
1207
+ workers: liveWorkers,
1208
+ });
1209
+ }
1210
+
1211
+ topology[serviceName] = updated;
1212
+ }
1213
+
1171
1214
  return {
1172
1215
  supervisorPid: process.pid,
1173
1216
  uptime: process.uptime(),
@@ -1184,7 +1227,7 @@ export class Supervisor extends EventEmitter {
1184
1227
  .filter((s) => s.port)
1185
1228
  .map((s) => s.port),
1186
1229
  messageBus: this.messageBus.stats(),
1187
- topology: this.registry.topology(),
1230
+ topology,
1188
1231
  scalingRecommendations: this.scaleAdvisor.recommendations,
1189
1232
  };
1190
1233
  }
@@ -523,6 +523,19 @@ export function defineServices(servicesMap, options = {}) {
523
523
  // M3: Detect group-level circular dependencies
524
524
  detectGroupCycles(services, groups);
525
525
 
526
+ // M-14: Warn on unknown option keys to catch typos early
527
+ const KNOWN_OPTIONS = new Set([
528
+ 'metricsPort', 'logging', 'watch', 'registryMode', 'host', 'httpBasePort',
529
+ 'ingress', 'plugins', 'frontendPlugins', 'sites',
530
+ // Internal keys used by platform/host modes
531
+ '_configUrl', '_isHostMode', '_isPlatformMode', '_hostMeta', '_hostMetaJSON',
532
+ ]);
533
+ for (const key of Object.keys(options)) {
534
+ if (!KNOWN_OPTIONS.has(key) && !key.startsWith('_')) {
535
+ console.warn(`[ThreadForge] Unknown option "${key}" in defineServices(). Known options: ${[...KNOWN_OPTIONS].filter(k => !k.startsWith('_')).join(', ')}`);
536
+ }
537
+ }
538
+
526
539
  // Only pass through known option keys to prevent typos polluting the config
527
540
  // A12: Namespace internal fields into _internal sub-object
528
541
  const result = {
@@ -590,6 +590,11 @@ function createProxiedMethod(
590
590
  signal: AbortSignal.timeout(effectiveTimeout),
591
591
  });
592
592
 
593
+ // M-11: Enforce response body size limit to prevent OOM from oversized responses
594
+ const contentLength = resp.headers.get('content-length');
595
+ if (contentLength && parseInt(contentLength, 10) > 10 * 1024 * 1024) {
596
+ throw new Error(`Response from ${targetHost}:${targetPort} exceeds 10MB limit`);
597
+ }
593
598
  const data = await resp.json();
594
599
  if (data.error) {
595
600
  const err = new Error(data.error);
package/src/index.js CHANGED
@@ -6,6 +6,8 @@
6
6
 
7
7
  // Core
8
8
  export { defineServices } from "./core/config.js";
9
+ export { defineHost } from "./core/host-config.js";
10
+ export { definePlatform } from "./core/platform-config.js";
9
11
  export { ForgeContext } from "./core/ForgeContext.js";
10
12
  export { RequestContext } from "./core/RequestContext.js";
11
13
 
@@ -98,7 +98,8 @@ export async function _mkRedis(url, ctx) {
98
98
  let socket,
99
99
  connected = false,
100
100
  rq = [],
101
- buf = Buffer.alloc(0);
101
+ bufChunks = [],
102
+ bufTotalLen = 0;
102
103
 
103
104
  const CRLF = Buffer.from("\r\n");
104
105
 
@@ -137,11 +138,13 @@ export async function _mkRedis(url, ctx) {
137
138
  ctx.logger.warn(`Redis socket error: ${err.message}`);
138
139
  });
139
140
  socket.on("data", (d) => {
140
- if (buf.length + d.length > MAX_REDIS_BUFFER) {
141
+ if (bufTotalLen + d.length > MAX_REDIS_BUFFER) {
141
142
  socket.destroy(new Error('Redis response buffer overflow'));
142
143
  return;
143
144
  }
144
- buf = Buffer.concat([buf, d]);
145
+ // P-7: Accumulate chunks in array, concat only when parsing needs contiguous buffer
146
+ bufChunks.push(d);
147
+ bufTotalLen += d.length;
145
148
  flush();
146
149
  });
147
150
  socket.on("close", () => {
@@ -150,7 +153,8 @@ export async function _mkRedis(url, ctx) {
150
153
  // Reject pending requests
151
154
  const pending = rq.splice(0);
152
155
  for (const r of pending) r.no(new Error("Redis connection closed"));
153
- buf = Buffer.alloc(0);
156
+ bufChunks = [];
157
+ bufTotalLen = 0;
154
158
 
155
159
  if (intentionalClose) return;
156
160
  scheduleReconnect();
@@ -195,13 +199,17 @@ export async function _mkRedis(url, ctx) {
195
199
  const MAX_RESP_BULK = 16 * 1024 * 1024; // 16MB
196
200
 
197
201
  function flush() {
202
+ // P-7: Concat accumulated chunks into a single buffer for parsing
203
+ if (bufChunks.length === 0 || rq.length === 0) return;
204
+ let buf = bufChunks.length === 1 ? bufChunks[0] : Buffer.concat(bufChunks, bufTotalLen);
205
+ bufChunks = [];
206
+ bufTotalLen = 0;
198
207
  while (buf.length && rq.length) {
199
208
  let r;
200
209
  try {
201
210
  r = parse(buf, 0);
202
211
  } catch (err) {
203
212
  // Unknown RESP type or parse error — reject current request and reset buffer
204
- buf = Buffer.alloc(0);
205
213
  rq.shift().no(err);
206
214
  return;
207
215
  }
@@ -209,6 +217,11 @@ export async function _mkRedis(url, ctx) {
209
217
  buf = r.rem;
210
218
  rq.shift().ok(r.val);
211
219
  }
220
+ // Keep remaining unparsed data for next flush
221
+ if (buf.length > 0) {
222
+ bufChunks.push(buf);
223
+ bufTotalLen = buf.length;
224
+ }
212
225
  }
213
226
  function parse(d, depth) {
214
227
  if (!d.length) return null;
@@ -342,7 +355,8 @@ export async function _mkRedis(url, ctx) {
342
355
  // ─── Subscription support (lazy separate connection) ───
343
356
  _subSocket: null,
344
357
  _subConnected: false,
345
- _subBuf: Buffer.alloc(0),
358
+ _subBufChunks: [],
359
+ _subBufTotalLen: 0,
346
360
  _subCallbacks: new Map(), // channel → Set<callback>
347
361
  _psubCallbacks: new Map(), // pattern → Set<callback>
348
362
  _subSubscribedChannels: new Set(),
@@ -369,11 +383,13 @@ export async function _mkRedis(url, ctx) {
369
383
  });
370
384
  self._subSocket.on("data", (d) => {
371
385
  // REL-C1: Guard against unbounded sub buffer growth (same limit as main connection)
372
- if (self._subBuf.length + d.length > MAX_REDIS_BUFFER) {
386
+ if (self._subBufTotalLen + d.length > MAX_REDIS_BUFFER) {
373
387
  self._subSocket.destroy(new Error('Redis sub response buffer overflow'));
374
388
  return;
375
389
  }
376
- self._subBuf = Buffer.concat([self._subBuf, d]);
390
+ // P-8: Chunk-list pattern to avoid O(n^2) Buffer.concat
391
+ self._subBufChunks.push(d);
392
+ self._subBufTotalLen += d.length;
377
393
  self._flushSub();
378
394
  });
379
395
  self._subSocket.on("close", () => {
@@ -382,7 +398,8 @@ export async function _mkRedis(url, ctx) {
382
398
  self._subSubscribedPatterns.clear();
383
399
  const pending = self._subRq.splice(0);
384
400
  for (const r of pending) r.no(new Error("Redis sub connection closed"));
385
- self._subBuf = Buffer.alloc(0);
401
+ self._subBufChunks = [];
402
+ self._subBufTotalLen = 0;
386
403
  self._scheduleSubReconnect();
387
404
  });
388
405
  });
@@ -461,16 +478,20 @@ export async function _mkRedis(url, ctx) {
461
478
  },
462
479
 
463
480
  _flushSub() {
464
- while (this._subBuf.length) {
481
+ // P-8: Concat accumulated chunks for parsing
482
+ if (this._subBufChunks.length === 0) return;
483
+ let buf = this._subBufChunks.length === 1 ? this._subBufChunks[0] : Buffer.concat(this._subBufChunks, this._subBufTotalLen);
484
+ this._subBufChunks = [];
485
+ this._subBufTotalLen = 0;
486
+ while (buf.length) {
465
487
  let r;
466
488
  try {
467
- r = parse(this._subBuf, 0);
489
+ r = parse(buf, 0);
468
490
  } catch {
469
- this._subBuf = Buffer.alloc(0);
470
491
  return;
471
492
  }
472
493
  if (!r) break;
473
- this._subBuf = r.rem;
494
+ buf = r.rem;
474
495
  const val = r.val;
475
496
  // Messages are arrays: ["message", channel, data] or ["pmessage", pattern, channel, data]
476
497
  if (Array.isArray(val)) {
@@ -504,6 +525,11 @@ export async function _mkRedis(url, ctx) {
504
525
  // Other responses (OK from AUTH/SELECT)
505
526
  if (this._subRq.length) this._subRq.shift().ok(val);
506
527
  }
528
+ // Keep remaining unparsed data for next flush
529
+ if (buf.length > 0) {
530
+ this._subBufChunks.push(buf);
531
+ this._subBufTotalLen = buf.length;
532
+ }
507
533
  },
508
534
 
509
535
  async subscribe(channel, callback) {
@@ -542,7 +568,8 @@ export async function _mkRedis(url, ctx) {
542
568
  clientRef._psubCallbacks.clear();
543
569
  clientRef._subSubscribedChannels.clear();
544
570
  clientRef._subSubscribedPatterns.clear();
545
- clientRef._subBuf = Buffer.alloc(0);
571
+ clientRef._subBufChunks = [];
572
+ clientRef._subBufTotalLen = 0;
546
573
  clientRef._subRq.length = 0;
547
574
  }
548
575
  return new Promise((resolve) => {
@@ -195,10 +195,11 @@ export class ServiceRegistry extends EventEmitter {
195
195
  * In multicast/external mode, connects to peers.
196
196
  */
197
197
  async start() {
198
- // Start heartbeat
198
+ // Start heartbeat with jitter to prevent thundering herd across nodes
199
+ const jitter = Math.floor(Math.random() * this.heartbeatIntervalMs * 0.2);
199
200
  this._heartbeatTimer = setInterval(() => {
200
201
  this._sendHeartbeats();
201
- }, this.heartbeatIntervalMs);
202
+ }, this.heartbeatIntervalMs + jitter);
202
203
  this._heartbeatTimer.unref();
203
204
 
204
205
  // Start reaper (remove stale registrations)
@@ -521,7 +521,10 @@ async function bootstrap() {
521
521
  }
522
522
  }
523
523
 
524
- process.exit(0);
524
+ // F-3: Allow event loop to drain naturally instead of process.exit(0)
525
+ // so plugin disconnect and I/O flushes complete. The supervisor manages
526
+ // worker lifecycle via cluster 'exit' events.
527
+ if (process.connected) process.disconnect();
525
528
  }
526
529
 
527
530
  // Wire IPC before starting so no messages are lost during startup