threadforge 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +152 -0
  3. package/bin/forge.js +1050 -0
  4. package/bin/host-commands.js +344 -0
  5. package/bin/platform-commands.js +570 -0
  6. package/package.json +71 -0
  7. package/shared/auth.js +475 -0
  8. package/src/core/DirectMessageBus.js +364 -0
  9. package/src/core/EndpointResolver.js +247 -0
  10. package/src/core/ForgeContext.js +2227 -0
  11. package/src/core/ForgeHost.js +122 -0
  12. package/src/core/ForgePlatform.js +145 -0
  13. package/src/core/Ingress.js +768 -0
  14. package/src/core/Interceptors.js +420 -0
  15. package/src/core/MessageBus.js +310 -0
  16. package/src/core/Prometheus.js +305 -0
  17. package/src/core/RequestContext.js +413 -0
  18. package/src/core/RoutingStrategy.js +316 -0
  19. package/src/core/Supervisor.js +1306 -0
  20. package/src/core/ThreadAllocator.js +196 -0
  21. package/src/core/WorkerChannelManager.js +879 -0
  22. package/src/core/config.js +624 -0
  23. package/src/core/host-config.js +311 -0
  24. package/src/core/network-utils.js +166 -0
  25. package/src/core/platform-config.js +308 -0
  26. package/src/decorators/ServiceProxy.js +899 -0
  27. package/src/decorators/index.js +571 -0
  28. package/src/deploy/NginxGenerator.js +865 -0
  29. package/src/deploy/PlatformManifestGenerator.js +96 -0
  30. package/src/deploy/RouteManifestGenerator.js +112 -0
  31. package/src/deploy/index.js +984 -0
  32. package/src/frontend/FrontendDevLifecycle.js +65 -0
  33. package/src/frontend/FrontendPluginOrchestrator.js +187 -0
  34. package/src/frontend/SiteResolver.js +63 -0
  35. package/src/frontend/StaticMountRegistry.js +90 -0
  36. package/src/frontend/index.js +5 -0
  37. package/src/frontend/plugins/index.js +2 -0
  38. package/src/frontend/plugins/viteFrontend.js +79 -0
  39. package/src/frontend/types.js +35 -0
  40. package/src/index.js +56 -0
  41. package/src/internals.js +31 -0
  42. package/src/plugins/PluginManager.js +537 -0
  43. package/src/plugins/ScopedPostgres.js +192 -0
  44. package/src/plugins/ScopedRedis.js +142 -0
  45. package/src/plugins/index.js +1729 -0
  46. package/src/registry/ServiceRegistry.js +796 -0
  47. package/src/scaling/ScaleAdvisor.js +442 -0
  48. package/src/services/Service.js +195 -0
  49. package/src/services/worker-bootstrap.js +676 -0
  50. package/src/templates/auth-service.js +65 -0
  51. package/src/templates/identity-service.js +75 -0
@@ -0,0 +1,364 @@
1
+ import { EventEmitter } from "node:events";
2
+ import crypto from "node:crypto";
3
+ import fs from "node:fs";
4
+ import os from "node:os";
5
+ import path from "node:path";
6
+
7
+ const EXIT_SOCKET_DIRS = new Set();
8
+ let EXIT_CLEANUP_INSTALLED = false;
9
+
10
+ function cleanupSocketDir(socketDir) {
11
+ try {
12
+ if (fs.existsSync(socketDir)) {
13
+ for (const f of fs.readdirSync(socketDir)) {
14
+ try { fs.unlinkSync(path.join(socketDir, f)); } catch {}
15
+ }
16
+ try { fs.rmdirSync(socketDir); } catch {}
17
+ }
18
+ } catch {}
19
+ }
20
+
21
+ function installExitCleanupHook() {
22
+ if (EXIT_CLEANUP_INSTALLED) return;
23
+ EXIT_CLEANUP_INSTALLED = true;
24
+ process.once("exit", () => {
25
+ for (const socketDir of EXIT_SOCKET_DIRS) {
26
+ cleanupSocketDir(socketDir);
27
+ }
28
+ EXIT_SOCKET_DIRS.clear();
29
+ });
30
+ }
31
+
32
+ function isExpectedChannelCloseError(err) {
33
+ if (!err) return false;
34
+ const code = err.code;
35
+ if (
36
+ code === "EPIPE" ||
37
+ code === "ECONNRESET" ||
38
+ code === "ERR_IPC_CHANNEL_CLOSED" ||
39
+ code === "ERR_IPC_DISCONNECTED"
40
+ ) {
41
+ return true;
42
+ }
43
+ const msg = String(err.message ?? "").toLowerCase();
44
+ return (
45
+ msg.includes("channel closed") ||
46
+ msg.includes("ipc channel is already disconnected") ||
47
+ msg.includes("broken pipe")
48
+ );
49
+ }
50
+
51
+ function isWorkerSendable(worker) {
52
+ if (!worker) return false;
53
+ if (typeof worker.isDead === "function" && worker.isDead()) return false;
54
+ if (typeof worker.isConnected === "function" && !worker.isConnected()) return false;
55
+ if (worker.process?.connected === false) return false;
56
+ return true;
57
+ }
58
+
59
+ /**
60
+ * DirectMessageBus — Unix Domain Socket Mesh (Supervisor Side)
61
+ *
62
+ * The fundamental problem: cluster.Worker.send() can only transfer
63
+ * sockets/servers, not MessagePort objects. So we can't use MessageChannel
64
+ * for direct worker-to-worker communication in cluster mode.
65
+ *
66
+ * Solution: Each worker opens a Unix domain socket server. Workers connect
67
+ * directly to each other via these sockets. The supervisor only tells
68
+ * workers WHERE to connect (socket paths), then gets out of the way.
69
+ *
70
+ * Architecture:
71
+ *
72
+ * SETUP (supervisor involved briefly):
73
+ * 1. Each worker starts a UDS server at /tmp/forge-{pid}/{service}-{worker}.sock
74
+ * 2. Worker reports its socket path to supervisor via IPC
75
+ * 3. Supervisor broadcasts the full socket registry to all workers
76
+ * 4. Workers establish direct connections to each other
77
+ *
78
+ * RUNTIME (supervisor NOT involved in message routing):
79
+ * Worker A ──UDS──► Worker B (direct, length-prefixed JSON)
80
+ * Worker B ──UDS──► Worker A
81
+ *
82
+ * Supervisor only handles:
83
+ * - Socket path registry distribution (one-time + on new workers)
84
+ * - Health checks (periodic pull, not per-message)
85
+ * - Worker lifecycle (restart, scale)
86
+ */
87
+ export class DirectMessageBus extends EventEmitter {
88
+ constructor() {
89
+ super();
90
+
91
+ /** @type {Map<string, Array<{id: number, worker: object, mode: string, socketPath?: string}>>} */
92
+ this.workers = new Map();
93
+
94
+ /** @type {Map<string, string>} "serviceName:workerId" → socket path */
95
+ this.socketRegistry = new Map();
96
+
97
+ /** @type {Set<number>} Track registered worker IDs to prevent duplicate listeners */
98
+ this._registeredWorkerIds = new Set();
99
+
100
+ /** @type {WeakMap<object, Function>} Shared per-worker error handlers */
101
+ this._workerErrorHandlers = new WeakMap();
102
+
103
+ /** @type {Map<string, Set<string>>} CR-IPC-9: worker key → set of connected peer keys */
104
+ this._connections = new Map();
105
+
106
+ // S-IPC-2: Random suffix prevents socket path prediction
107
+ this._socketDir = path.join(os.tmpdir(), `forge-${process.pid}-${crypto.randomBytes(4).toString('hex')}`);
108
+ this._broadcastTimer = null;
109
+ try {
110
+ fs.mkdirSync(this._socketDir, { recursive: true });
111
+ } catch (e) {
112
+ if (e.code !== "EEXIST") console.error("[DirectMessageBus] mkdirSync failed:", e.message);
113
+ }
114
+ EXIT_SOCKET_DIRS.add(this._socketDir);
115
+ installExitCleanupHook();
116
+ }
117
+
118
+ get socketDir() {
119
+ return this._socketDir;
120
+ }
121
+
122
+ /**
123
+ * Register a worker. Tell it to start a UDS server, then
124
+ * broadcast the updated registry to all workers.
125
+ */
126
+ registerWorker(serviceName, worker, mode = "cluster") {
127
+ if (!this.workers.has(serviceName)) {
128
+ this.workers.set(serviceName, []);
129
+ }
130
+
131
+ const workerId = mode === "cluster" ? worker.id : worker.threadId;
132
+
133
+ const entry = {
134
+ id: workerId,
135
+ worker,
136
+ mode,
137
+ socketPath: null,
138
+ };
139
+
140
+ this.workers.get(serviceName).push(entry);
141
+
142
+ // Attach one shared error listener per worker so late IPC channel errors
143
+ // during churn don't surface as unhandled EventEmitter errors.
144
+ if (!this._workerErrorHandlers.has(worker)) {
145
+ const errorHandler = (err) => {
146
+ if (!isExpectedChannelCloseError(err)) {
147
+ console.error("[DirectMessageBus] Worker error:", err?.message ?? err);
148
+ }
149
+ };
150
+ this._workerErrorHandlers.set(worker, errorHandler);
151
+ worker.on("error", errorHandler);
152
+ }
153
+ entry._errorHandler = this._workerErrorHandlers.get(worker);
154
+
155
+ // Prevent duplicate message listener registration
156
+ // If workerId is already registered (e.g., crashed worker's ID reused), clean up stale listeners
157
+ if (this._registeredWorkerIds.has(workerId)) {
158
+ // Find and clean up stale entry with this workerId across all services
159
+ for (const [svcName, entries] of this.workers) {
160
+ const staleIdx = entries.findIndex((e) => e.id === workerId && e.worker !== worker);
161
+ if (staleIdx !== -1) {
162
+ const stale = entries[staleIdx];
163
+ if (stale._messageHandler) stale.worker.off("message", stale._messageHandler);
164
+ if (stale.socketPath) {
165
+ try { fs.unlinkSync(stale.socketPath); } catch {}
166
+ this.socketRegistry.delete(`${svcName}:${workerId}`);
167
+ }
168
+ entries.splice(staleIdx, 1);
169
+ if (entries.length === 0) this.workers.delete(svcName);
170
+ }
171
+ }
172
+ this._registeredWorkerIds.delete(workerId);
173
+ }
174
+ this._registeredWorkerIds.add(workerId);
175
+
176
+ // Listen for supervisor-level IPC from this worker
177
+ const messageHandler = (msg) => {
178
+ if (!msg || !msg.type) return;
179
+
180
+ switch (msg.type) {
181
+ case "forge:socket-ready":
182
+ // In colocated groups a worker may host multiple services, each with its
183
+ // own socket. Ignore ready messages for sibling services on this entry.
184
+ if (msg.serviceName && msg.serviceName !== serviceName) break;
185
+ entry.socketPath = msg.socketPath;
186
+ this.socketRegistry.set(`${serviceName}:${msg.workerId}`, msg.socketPath);
187
+ this._scheduleBroadcast();
188
+ break;
189
+
190
+ case "forge:worker-ready":
191
+ this._sendInitSocket(worker, serviceName, entry.id);
192
+ this._sendRegistryTo(worker);
193
+ break;
194
+
195
+ case "forge:channel-ready": {
196
+ // CR-IPC-9: Track established connections
197
+ const workerKey = `${serviceName}:${entry.id}`;
198
+ if (!this._connections.has(workerKey)) {
199
+ this._connections.set(workerKey, new Set());
200
+ }
201
+ if (msg.peerKey) {
202
+ this._connections.get(workerKey).add(msg.peerKey);
203
+ }
204
+ break;
205
+ }
206
+
207
+ case "forge:metric":
208
+ case "forge:log":
209
+ case "forge:health-response": {
210
+ const eventName = msg.type.replace("forge:", "");
211
+ this.emit(eventName, { service: serviceName, ...msg });
212
+ break;
213
+ }
214
+ }
215
+ };
216
+ entry._messageHandler = messageHandler;
217
+ worker.on("message", messageHandler);
218
+
219
+ // Tell the new worker to start its UDS server
220
+ this._sendInitSocket(worker, serviceName, entry.id);
221
+ this._sendRegistryTo(worker);
222
+ }
223
+
224
+ _scheduleBroadcast() {
225
+ // CR-IPC-5: Don't cancel existing timer — let the first registration broadcast on schedule
226
+ if (this._broadcastTimer) return;
227
+ this._broadcastTimer = setTimeout(() => {
228
+ this._broadcastTimer = null;
229
+ this._broadcastRegistry();
230
+ }, 50 + Math.floor(Math.random() * 50));
231
+ }
232
+
233
+ _sendInitSocket(worker, serviceName, workerId) {
234
+ if (!isWorkerSendable(worker)) return;
235
+ try {
236
+ worker.send({
237
+ type: "forge:init-socket",
238
+ socketDir: this._socketDir,
239
+ serviceName,
240
+ workerId,
241
+ });
242
+ } catch (e) {
243
+ if (!isExpectedChannelCloseError(e)) {
244
+ console.error("[DirectMessageBus] Failed to send init-socket:", e.message);
245
+ }
246
+ }
247
+ }
248
+
249
+ _sendRegistryTo(worker) {
250
+ if (!isWorkerSendable(worker)) return;
251
+ try {
252
+ const msg = {
253
+ type: "forge:socket-registry",
254
+ registry: Object.fromEntries(this.socketRegistry),
255
+ };
256
+ // S-IPC-1: Include cluster secret for handshake authentication
257
+ const secret = process.env.FORGE_CLUSTER_SECRET;
258
+ if (secret) {
259
+ msg.secret = secret;
260
+ }
261
+ worker.send(msg);
262
+ } catch (e) {
263
+ if (!isExpectedChannelCloseError(e)) {
264
+ console.error("[DirectMessageBus] Failed to send registry:", e.message);
265
+ }
266
+ }
267
+ }
268
+
269
+ _broadcastRegistry() {
270
+ for (const [, workers] of this.workers) {
271
+ for (const entry of workers) {
272
+ this._sendRegistryTo(entry.worker);
273
+ }
274
+ }
275
+ }
276
+
277
+ unregisterWorker(serviceName, workerId, options = {}) {
278
+ const workers = this.workers.get(serviceName);
279
+ if (!workers) return;
280
+
281
+ const idx = workers.findIndex((w) => w.id === workerId);
282
+ if (idx === -1) return;
283
+
284
+ const entry = workers[idx];
285
+ if (entry._messageHandler) {
286
+ entry.worker.off("message", entry._messageHandler);
287
+ }
288
+ if (entry.socketPath) {
289
+ try {
290
+ fs.unlinkSync(entry.socketPath);
291
+ } catch (e) {
292
+ if (e.code !== "ENOENT") console.error("[DirectMessageBus] unlink failed:", e.message);
293
+ }
294
+ this.socketRegistry.delete(`${serviceName}:${workerId}`);
295
+ }
296
+ workers.splice(idx, 1);
297
+
298
+ // Clean up the registered worker ID tracking
299
+ this._registeredWorkerIds.delete(workerId);
300
+
301
+ if (workers.length === 0) {
302
+ this.workers.delete(serviceName);
303
+ }
304
+
305
+ if (!options.suppressBroadcast && this.workers.size > 0) {
306
+ this._broadcastRegistry();
307
+ }
308
+ }
309
+
310
+ unregisterService(serviceName) {
311
+ const workers = [...(this.workers.get(serviceName) ?? [])];
312
+ for (const w of workers) {
313
+ this.unregisterWorker(serviceName, w.id);
314
+ }
315
+ }
316
+
317
+ requestHealthChecks() {
318
+ for (const [, workers] of this.workers) {
319
+ for (const entry of workers) {
320
+ if (!isWorkerSendable(entry.worker)) continue;
321
+ try {
322
+ entry.worker.send({ type: "forge:health-check", timestamp: Date.now() });
323
+ } catch (err) {
324
+ if (!isExpectedChannelCloseError(err)) {
325
+ console.error("[DirectMessageBus] Failed to send health-check:", err.message);
326
+ }
327
+ }
328
+ }
329
+ }
330
+ }
331
+
332
+ stats() {
333
+ const result = {};
334
+ for (const [name, workers] of this.workers) {
335
+ result[name] = {
336
+ workerCount: workers.length,
337
+ ids: workers.map((w) => w.id),
338
+ directSockets: workers.filter((w) => w.socketPath).length,
339
+ };
340
+ }
341
+ return result;
342
+ }
343
+
344
+ /**
345
+ * CR-IPC-9: Get the connection matrix for debugging.
346
+ * @returns {Object<string, string[]>} worker key → array of connected peer keys
347
+ */
348
+ getConnectionMatrix() {
349
+ const result = {};
350
+ for (const [key, peers] of this._connections) {
351
+ result[key] = [...peers];
352
+ }
353
+ return result;
354
+ }
355
+
356
+ cleanup() {
357
+ if (this._broadcastTimer) {
358
+ clearTimeout(this._broadcastTimer);
359
+ this._broadcastTimer = null;
360
+ }
361
+ cleanupSocketDir(this._socketDir);
362
+ EXIT_SOCKET_DIRS.delete(this._socketDir);
363
+ }
364
+ }
@@ -0,0 +1,247 @@
1
+ /**
2
+ * EndpointResolver
3
+ *
4
+ * Resolves service names to { host, port, remote } endpoints.
5
+ * Supports:
6
+ * - Single endpoints (one instance per service)
7
+ * - Multi-instance arrays (round-robin selection)
8
+ * - Dynamic updates via set/remove (used by ServiceRegistry)
9
+ * - Fallback from FORGE_SERVICE_ENDPOINTS to FORGE_SERVICE_PORTS
10
+ * - C3: File fallback via FORGE_SERVICE_ENDPOINTS_FILE for large maps
11
+ */
12
+ import { readFileSync } from "node:fs";
13
+
14
+ /** Validate that an endpoint has the expected shape */
15
+ function validateEndpoint(ep) {
16
+ if (!ep || typeof ep !== 'object') return false;
17
+ if (typeof ep.host !== 'string' || !ep.host) return false;
18
+ if (ep.port !== undefined && (typeof ep.port !== 'number' || ep.port < 1 || ep.port > 65535)) return false;
19
+ return true;
20
+ }
21
+
22
+ export class EndpointResolver {
23
+ constructor() {
24
+ /** @type {Map<string, Array<{host: string, port: number, remote: boolean}>>} */
25
+ this._endpoints = new Map();
26
+
27
+ /** @type {Map<string, number>} round-robin counters */
28
+ this._counters = new Map();
29
+
30
+ /** @type {Map<string, Object>} per-service routing strategies (optional override) */
31
+ this._strategies = new Map();
32
+ }
33
+
34
+ /**
35
+ * Set a routing strategy for a specific service.
36
+ * When set, resolve() delegates to the strategy's pick() method
37
+ * instead of using built-in round-robin.
38
+ *
39
+ * The strategy must implement pick(entries, callContext) where entries
40
+ * are wrapped as { key: "host:port", host, port, remote }.
41
+ *
42
+ * @param {string} serviceName
43
+ * @param {Object} strategy - Must have a pick(workers, callContext) method
44
+ */
45
+ setStrategy(serviceName, strategy) {
46
+ if (!strategy || typeof strategy.pick !== 'function') {
47
+ throw new Error(`Strategy for "${serviceName}" must have a pick() method`);
48
+ }
49
+ this._strategies.set(serviceName, strategy);
50
+ }
51
+
52
+ /**
53
+ * Create an EndpointResolver from environment variables.
54
+ *
55
+ * Parses FORGE_SERVICE_ENDPOINTS first (full endpoint map with host/port/remote).
56
+ * Falls back to FORGE_SERVICE_PORTS (localhost-only port map) when endpoints
57
+ * aren't available.
58
+ *
59
+ * @returns {EndpointResolver}
60
+ */
61
+ static fromEnv() {
62
+ const resolver = new EndpointResolver();
63
+
64
+ // C3: Support file fallback for large endpoint maps (written by Supervisor)
65
+ let endpointsJson = process.env.FORGE_SERVICE_ENDPOINTS;
66
+ if (!endpointsJson && process.env.FORGE_SERVICE_ENDPOINTS_FILE) {
67
+ try {
68
+ endpointsJson = readFileSync(process.env.FORGE_SERVICE_ENDPOINTS_FILE, "utf8");
69
+ } catch (e) {
70
+ console.error("[EndpointResolver] Failed to read FORGE_SERVICE_ENDPOINTS_FILE:", e.message);
71
+ }
72
+ }
73
+ if (endpointsJson) {
74
+ try {
75
+ const parsed = JSON.parse(endpointsJson);
76
+ for (const [name, value] of Object.entries(parsed)) {
77
+ if (Array.isArray(value)) {
78
+ const valid = value.filter(ep => validateEndpoint(ep));
79
+ if (valid.length > 0) resolver._endpoints.set(name, valid);
80
+ } else if (validateEndpoint(value)) {
81
+ resolver._endpoints.set(name, [value]);
82
+ }
83
+ }
84
+ } catch (e) {
85
+ console.error("[EndpointResolver] Failed to parse FORGE_SERVICE_ENDPOINTS:", e.message);
86
+ }
87
+ }
88
+
89
+ // Merge FORGE_SERVICE_PORTS as fallback for any services not in endpoints
90
+ const portsJson = process.env.FORGE_SERVICE_PORTS;
91
+ if (portsJson) {
92
+ try {
93
+ const ports = JSON.parse(portsJson);
94
+ for (const [name, port] of Object.entries(ports)) {
95
+ const p = Number(port);
96
+ if (!resolver._endpoints.has(name) && Number.isInteger(p) && p >= 1 && p <= 65535) {
97
+ resolver._endpoints.set(name, [{ host: "127.0.0.1", port: p, remote: false }]);
98
+ }
99
+ }
100
+ } catch (e) {
101
+ console.error("[EndpointResolver] Failed to parse FORGE_SERVICE_PORTS:", e.message);
102
+ }
103
+ }
104
+
105
+ return resolver;
106
+ }
107
+
108
+ /**
109
+ * Resolve an endpoint for a service.
110
+ * Delegates to a configured RoutingStrategy when available,
111
+ * otherwise round-robins across instances.
112
+ *
113
+ * @param {string} serviceName
114
+ * @param {Object} [callContext] - Optional call context passed to strategy.pick()
115
+ * @returns {{ host: string, port: number, remote: boolean } | null}
116
+ */
117
+ resolve(serviceName, callContext) {
118
+ const endpoints = this._endpoints.get(serviceName);
119
+ if (!endpoints || endpoints.length === 0) return null;
120
+
121
+ if (endpoints.length === 1) return endpoints[0];
122
+
123
+ // Delegate to configured strategy if present
124
+ const strategy = this._strategies.get(serviceName);
125
+ if (strategy) {
126
+ // Wrap endpoints as WorkerEntry-compatible objects for RoutingStrategy
127
+ const entries = endpoints.map(ep => ({
128
+ key: `${ep.host}:${ep.port}`,
129
+ host: ep.host,
130
+ port: ep.port,
131
+ remote: ep.remote,
132
+ }));
133
+ const picked = strategy.pick(entries, callContext);
134
+ if (picked) {
135
+ // Handle broadcast (returns array) — return first for resolve(), use all() for broadcast
136
+ if (Array.isArray(picked)) return picked[0] ?? null;
137
+ return { host: picked.host, port: picked.port, remote: picked.remote };
138
+ }
139
+ // Strategy returned null — fall through to round-robin
140
+ }
141
+
142
+ // Round-robin: read-then-write is atomic in single-threaded Node.js
143
+ // (no microtask or I/O can interleave between get and set)
144
+ const idx = (this._counters.get(serviceName) ?? 0);
145
+ this._counters.set(serviceName, (idx + 1) % 1_000_000_000);
146
+ const endpoint = endpoints[idx % endpoints.length];
147
+ return endpoint;
148
+ }
149
+
150
+ /**
151
+ * Get all endpoints for a service (used for broadcast/event delivery).
152
+ *
153
+ * @param {string} serviceName
154
+ * @returns {Array<{ host: string, port: number, remote: boolean }>}
155
+ */
156
+ all(serviceName) {
157
+ return (this._endpoints.get(serviceName) ?? []).map(e => ({ ...e }));
158
+ }
159
+
160
+ /**
161
+ * Add or update an endpoint for a service.
162
+ * Used by dynamic registry discovery.
163
+ *
164
+ * @param {string} serviceName
165
+ * @param {{ host: string, port: number, remote?: boolean }} endpoint
166
+ */
167
+ set(serviceName, endpoint) {
168
+ if (endpoint.port !== undefined && (!Number.isInteger(endpoint.port) || endpoint.port < 1 || endpoint.port > 65535)) {
169
+ throw new Error(`Invalid port for service "${serviceName}": ${endpoint.port}`);
170
+ }
171
+ const ep = { remote: true, ...endpoint };
172
+ const existing = this._endpoints.get(serviceName) ?? [];
173
+
174
+ // Replace if same host:port exists, otherwise append
175
+ const idx = existing.findIndex((e) => e.host === ep.host && e.port === ep.port);
176
+ if (idx !== -1) {
177
+ existing[idx] = ep;
178
+ } else {
179
+ existing.push(ep);
180
+ }
181
+
182
+ this._endpoints.set(serviceName, existing);
183
+ }
184
+
185
+ /**
186
+ * Remove a specific instance of a service.
187
+ * Used when a remote node goes down.
188
+ *
189
+ * @param {string} serviceName
190
+ * @param {string} host
191
+ * @param {number} port
192
+ */
193
+ remove(serviceName, host, port) {
194
+ const existing = this._endpoints.get(serviceName);
195
+ if (!existing) return;
196
+
197
+ const filtered = existing.filter((e) => !(e.host === host && e.port === port));
198
+ if (filtered.length === 0) {
199
+ this._endpoints.delete(serviceName);
200
+ // CR-IPC-7: Clean up counter when all endpoints removed
201
+ this._counters.delete(serviceName);
202
+ } else {
203
+ this._endpoints.set(serviceName, filtered);
204
+ // Let counter naturally wrap via modulo in resolve()
205
+ }
206
+ }
207
+
208
+ /**
209
+ * COR-C1: Acquire a pending slot for a resolved endpoint.
210
+ * Delegates to the service's routing strategy if it supports acquire().
211
+ * Must be paired with releaseEndpoint() in a finally block.
212
+ *
213
+ * @param {string} serviceName
214
+ * @param {string} endpointKey - "host:port" key from resolve()
215
+ */
216
+ acquireEndpoint(serviceName, endpointKey) {
217
+ const strategy = this._strategies.get(serviceName);
218
+ if (strategy && typeof strategy.acquire === 'function') {
219
+ strategy.acquire(endpointKey);
220
+ }
221
+ }
222
+
223
+ /**
224
+ * COR-C1: Release a pending slot for a resolved endpoint.
225
+ * Delegates to the service's routing strategy if it supports release().
226
+ *
227
+ * @param {string} serviceName
228
+ * @param {string} endpointKey - "host:port" key from resolve()
229
+ */
230
+ releaseEndpoint(serviceName, endpointKey) {
231
+ const strategy = this._strategies.get(serviceName);
232
+ if (strategy && typeof strategy.release === 'function') {
233
+ strategy.release(endpointKey);
234
+ }
235
+ }
236
+
237
+ /**
238
+ * Check if a service has any known endpoints.
239
+ *
240
+ * @param {string} serviceName
241
+ * @returns {boolean}
242
+ */
243
+ has(serviceName) {
244
+ const eps = this._endpoints.get(serviceName);
245
+ return eps != null && eps.length > 0;
246
+ }
247
+ }