threadforge 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. package/LICENSE +21 -0
  2. package/README.md +152 -0
  3. package/bin/forge.js +1050 -0
  4. package/bin/host-commands.js +344 -0
  5. package/bin/platform-commands.js +570 -0
  6. package/package.json +71 -0
  7. package/shared/auth.js +475 -0
  8. package/src/core/DirectMessageBus.js +364 -0
  9. package/src/core/EndpointResolver.js +247 -0
  10. package/src/core/ForgeContext.js +2227 -0
  11. package/src/core/ForgeHost.js +122 -0
  12. package/src/core/ForgePlatform.js +145 -0
  13. package/src/core/Ingress.js +768 -0
  14. package/src/core/Interceptors.js +420 -0
  15. package/src/core/MessageBus.js +310 -0
  16. package/src/core/Prometheus.js +305 -0
  17. package/src/core/RequestContext.js +413 -0
  18. package/src/core/RoutingStrategy.js +316 -0
  19. package/src/core/Supervisor.js +1306 -0
  20. package/src/core/ThreadAllocator.js +196 -0
  21. package/src/core/WorkerChannelManager.js +879 -0
  22. package/src/core/config.js +624 -0
  23. package/src/core/host-config.js +311 -0
  24. package/src/core/network-utils.js +166 -0
  25. package/src/core/platform-config.js +308 -0
  26. package/src/decorators/ServiceProxy.js +899 -0
  27. package/src/decorators/index.js +571 -0
  28. package/src/deploy/NginxGenerator.js +865 -0
  29. package/src/deploy/PlatformManifestGenerator.js +96 -0
  30. package/src/deploy/RouteManifestGenerator.js +112 -0
  31. package/src/deploy/index.js +984 -0
  32. package/src/frontend/FrontendDevLifecycle.js +65 -0
  33. package/src/frontend/FrontendPluginOrchestrator.js +187 -0
  34. package/src/frontend/SiteResolver.js +63 -0
  35. package/src/frontend/StaticMountRegistry.js +90 -0
  36. package/src/frontend/index.js +5 -0
  37. package/src/frontend/plugins/index.js +2 -0
  38. package/src/frontend/plugins/viteFrontend.js +79 -0
  39. package/src/frontend/types.js +35 -0
  40. package/src/index.js +56 -0
  41. package/src/internals.js +31 -0
  42. package/src/plugins/PluginManager.js +537 -0
  43. package/src/plugins/ScopedPostgres.js +192 -0
  44. package/src/plugins/ScopedRedis.js +142 -0
  45. package/src/plugins/index.js +1729 -0
  46. package/src/registry/ServiceRegistry.js +796 -0
  47. package/src/scaling/ScaleAdvisor.js +442 -0
  48. package/src/services/Service.js +195 -0
  49. package/src/services/worker-bootstrap.js +676 -0
  50. package/src/templates/auth-service.js +65 -0
  51. package/src/templates/identity-service.js +75 -0
@@ -0,0 +1,442 @@
1
+ /**
2
+ * ScaleAdvisor
3
+ *
4
+ * Monitors service health across the cluster and produces
5
+ * actionable scaling recommendations.
6
+ *
7
+ * This is the "automatic" part of horizontal scaling. Instead of
8
+ * a human watching dashboards and deciding "billing needs its own
9
+ * machine", the ScaleAdvisor detects pressure and recommends:
10
+ *
11
+ * 1. SCALE UP — add more workers on the same machine
12
+ * 2. SPLIT OUT — move a service to its own dedicated process
13
+ * 3. MIGRATE — move a service to another machine
14
+ * 4. SCALE DOWN — reduce workers or colocate back
15
+ *
16
+ * ═══════════════════════════════════════════════════════════════
17
+ * SCALING SIGNALS
18
+ * ═══════════════════════════════════════════════════════════════
19
+ *
20
+ * The advisor monitors three signals per service:
21
+ *
22
+ * CPU saturation — is the service's process(es) maxing out?
23
+ * Latency drift — is P99 latency increasing over time?
24
+ * Queue pressure — are pending requests growing?
25
+ *
26
+ * Thresholds:
27
+ *
28
+ * ┌──────────────────┬────────────┬─────────────┬──────────────┐
29
+ * │ Signal │ Normal │ Warning │ Critical │
30
+ * ├──────────────────┼────────────┼─────────────┼──────────────┤
31
+ * │ CPU per worker │ < 60% │ 60-85% │ > 85% │
32
+ * │ P99 latency │ < 100ms │ 100-500ms │ > 500ms │
33
+ * │ Pending requests │ < 50 │ 50-200 │ > 200 │
34
+ * │ Memory per proc │ < 512MB │ 512MB-1GB │ > 1GB │
35
+ * └──────────────────┴────────────┴─────────────┴──────────────┘
36
+ *
37
+ * ═══════════════════════════════════════════════════════════════
38
+ * DECISION TREE
39
+ * ═══════════════════════════════════════════════════════════════
40
+ *
41
+ * Service under pressure?
42
+ * │
43
+ * ├─ Is it colocated (sharing a process with others)?
44
+ * │ └─ YES → Recommendation: SPLIT OUT to its own process
45
+ * │ (colocation was causing resource contention)
46
+ * │
47
+ * ├─ Does the machine have idle cores?
48
+ * │ └─ YES → Recommendation: SCALE UP (add more workers)
49
+ * │
50
+ * ├─ Is the MACHINE saturated (all cores busy)?
51
+ * │ └─ YES → Recommendation: MIGRATE to a new machine
52
+ * │ (generate deployment command + config change)
53
+ * │
54
+ * └─ Is this a latency problem, not a CPU problem?
55
+ * └─ YES → Recommendation: check for slow I/O,
56
+ * add connection pooling, or add caching
57
+ *
58
+ * Service underutilized?
59
+ * │
60
+ * ├─ Has it been under 20% CPU for 30+ minutes?
61
+ * │ └─ Recommendation: SCALE DOWN (reduce workers)
62
+ * │
63
+ * └─ Is it running alone and barely used?
64
+ * └─ Recommendation: COLOCATE with other light services
65
+ */
66
+
67
+ import { EventEmitter } from "node:events";
68
+ import os from "node:os";
69
+
70
+ /**
71
+ * @typedef {Object} ScaleRecommendation
72
+ * @property {string} service
73
+ * @property {'scale_up'|'split_out'|'migrate'|'scale_down'|'colocate'|'investigate'} action
74
+ * @property {string} reason
75
+ * @property {Object} details
76
+ * @property {number} confidence - 0 to 1
77
+ * @property {number} timestamp
78
+ */
79
+
80
+ export class ScaleAdvisor extends EventEmitter {
81
+ /**
82
+ * @param {import('../registry/ServiceRegistry.js').ServiceRegistry} registry
83
+ * @param {Object} [options]
84
+ * @param {number} [options.evaluationIntervalMs=30000]
85
+ * @param {Object} [options.thresholds]
86
+ */
87
+ constructor(registry, options = {}) {
88
+ super();
89
+
90
+ this.registry = registry;
91
+ this.evaluationIntervalMs = options.evaluationIntervalMs ?? 30000;
92
+
93
+ const ot = options.thresholds ?? {};
94
+ this.thresholds = {
95
+ cpu: { warning: 60, critical: 85, scaleDown: 40, ...ot.cpu },
96
+ latencyP99: { warning: 100, critical: 500, ...ot.latencyP99 },
97
+ pending: { warning: 50, critical: 200, ...ot.pending },
98
+ memory: { warning: 512, critical: 1024, ...ot.memory },
99
+ underutilized: { cpu: 20, duration: 30 * 60 * 1000, ...ot.underutilized },
100
+ };
101
+
102
+ this.cooldownMs = options.cooldownMs ?? 120000;
103
+
104
+ /** O3: Whether to auto-execute scaling recommendations */
105
+ this._autoExecute = options.scaling?.autoExecute === true;
106
+
107
+ /**
108
+ * Historical snapshots for trend detection.
109
+ * Key: service name, Value: array of health snapshots
110
+ * @type {Map<string, Array<{timestamp: number, health: Object}>>}
111
+ */
112
+ this.history = new Map();
113
+ this.maxHistory = 60; // keep 60 snapshots (30 min at 30s interval)
114
+
115
+ /** @type {ScaleRecommendation[]} */
116
+ this.recommendations = [];
117
+
118
+ /**
119
+ * Tracks the last scaling recommendation timestamp per service.
120
+ * @type {Map<string, number>}
121
+ */
122
+ this._lastScaleAction = new Map();
123
+
124
+ /** @type {NodeJS.Timeout|null} */
125
+ this._timer = null;
126
+
127
+ // Delta-based CPU measurement state
128
+ this._lastCpuMeasurement = null;
129
+ this._lastCpuUsage = 0;
130
+
131
+ /** O3: Executor function for auto-scaling, set via setExecutor() */
132
+ this._executor = null;
133
+
134
+ /** O11: Worker-reported CPU values, keyed by service name */
135
+ this._workerCpuReports = new Map();
136
+ }
137
+
138
+ /**
139
+ * O3: Set an executor function that actually performs scaling actions.
140
+ * Called by the Supervisor to provide a callback for auto-scaling.
141
+ *
142
+ * @param {function(ScaleRecommendation): Promise<void>} fn
143
+ */
144
+ setExecutor(fn) {
145
+ if (typeof fn !== 'function') {
146
+ throw new Error('ScaleAdvisor.setExecutor() requires a function');
147
+ }
148
+ this._executor = fn;
149
+ }
150
+
151
+ /**
152
+ * O11: Receive a CPU measurement reported by a worker via IPC.
153
+ * Workers should report their own CPU usage periodically.
154
+ *
155
+ * @param {string} serviceName
156
+ * @param {number} cpu - CPU percentage (0-100)
157
+ */
158
+ reportWorkerCpu(serviceName, cpu) {
159
+ this._workerCpuReports.set(serviceName, { cpu, timestamp: Date.now() });
160
+ }
161
+
162
+ start() {
163
+ this._timer = setInterval(() => {
164
+ this.evaluate();
165
+ }, this.evaluationIntervalMs);
166
+ this._timer.unref();
167
+
168
+ // Run initial evaluation
169
+ this.evaluate();
170
+ return this;
171
+ }
172
+
173
+ stop() {
174
+ if (this._timer) {
175
+ clearInterval(this._timer);
176
+ this._timer = null;
177
+ }
178
+ }
179
+
180
+ /**
181
+ * Evaluate all services and generate recommendations.
182
+ */
183
+ evaluate() {
184
+ const topology = this.registry.topology();
185
+ const newRecs = [];
186
+
187
+ for (const [serviceName, instances] of Object.entries(topology)) {
188
+ this._recordSnapshot(serviceName, instances);
189
+
190
+ for (const instance of instances) {
191
+ if (instance.nodeId !== this.registry.nodeId) continue;
192
+
193
+ const recs = this._evaluateService(serviceName, instance, instances, topology);
194
+ newRecs.push(...recs);
195
+ }
196
+ }
197
+
198
+ // Only emit if recommendations changed
199
+ const changed =
200
+ JSON.stringify(newRecs.map((r) => r.action + r.service)) !==
201
+ JSON.stringify(this.recommendations.map((r) => r.action + r.service));
202
+
203
+ this.recommendations = newRecs;
204
+
205
+ if (changed && newRecs.length > 0) {
206
+ this.emit("recommendations", newRecs);
207
+ for (const rec of newRecs) {
208
+ this.emit("recommendation", rec);
209
+
210
+ // O3: Auto-execute if configured and executor is available
211
+ if (this._executor && this._autoExecute) {
212
+ try {
213
+ this._executor(rec);
214
+ } catch (err) {
215
+ this.emit('error', err);
216
+ }
217
+ }
218
+ }
219
+ }
220
+ }
221
+
222
+ /**
223
+ * Evaluate a single service instance.
224
+ */
225
+ _evaluateService(serviceName, instance, _allInstances, topology) {
226
+ const recs = [];
227
+ const health = instance;
228
+
229
+ // Check cooldown — suppress recommendations during cooldown period
230
+ const lastAction = this._lastScaleAction.get(serviceName) ?? 0;
231
+ if (Date.now() - lastAction < this.cooldownMs) {
232
+ return recs;
233
+ }
234
+
235
+ // O11: Use worker-reported CPU if available (more accurate than supervisor CPU).
236
+ // Workers report their own CPU via reportWorkerCpu(), which is aggregated here.
237
+ // Falls back to registry health data, then to topology data.
238
+ const workerReport = this._workerCpuReports.get(serviceName);
239
+ const workerCpuFresh = workerReport && (Date.now() - workerReport.timestamp < this.evaluationIntervalMs * 2);
240
+ const regHealth = this.registry.localRegistrations.get(serviceName)?.health;
241
+ const cpu = workerCpuFresh ? workerReport.cpu : (regHealth?.cpu ?? health.cpu);
242
+
243
+ // CPU check — use critical threshold for scale-up decisions
244
+ if (cpu >= this.thresholds.cpu.critical) {
245
+ const healthWithCpu = { ...health, cpu };
246
+ const rec = this._cpuRecommendation(serviceName, healthWithCpu, topology);
247
+ recs.push(rec);
248
+ this._lastScaleAction.set(serviceName, Date.now());
249
+ }
250
+
251
+ // Underutilization check — use scaleDown threshold (hysteresis)
252
+ // Only recommend scale_down when CPU is below the lower hysteresis threshold
253
+ if (cpu < this.thresholds.underutilized.cpu && cpu < this.thresholds.cpu.scaleDown) {
254
+ const duration = this._lowCpuDuration(serviceName);
255
+ if (duration >= this.thresholds.underutilized.duration) {
256
+ recs.push({
257
+ service: serviceName,
258
+ action: "scale_down",
259
+ reason: `CPU below ${this.thresholds.underutilized.cpu}% for ${Math.round(duration / 60000)} minutes`,
260
+ details: {
261
+ currentCpu: cpu,
262
+ currentWorkers: health.workers,
263
+ suggestedWorkers: Math.max(1, Math.ceil(health.workers / 2)),
264
+ },
265
+ confidence: 0.7,
266
+ timestamp: Date.now(),
267
+ });
268
+ this._lastScaleAction.set(serviceName, Date.now());
269
+ }
270
+ }
271
+
272
+ return recs;
273
+ }
274
+
275
+ _cpuRecommendation(serviceName, health, topology) {
276
+ const machineCpu = this._getMachineCpu();
277
+
278
+ // Is this service colocated (sharing a process group with other services)?
279
+ const localRegs = [...this.registry.localRegistrations.values()];
280
+ const thisGroup = localRegs.find((lr) => lr.name === serviceName)?.metadata?.group;
281
+ // Isolated services (_isolated:name) are not truly colocated
282
+ const isColocated = thisGroup && !thisGroup.startsWith('_isolated:') &&
283
+ localRegs.some((r) => r.name !== serviceName && r.metadata?.group === thisGroup);
284
+
285
+ if (isColocated) {
286
+ return {
287
+ service: serviceName,
288
+ action: "split_out",
289
+ reason: `CPU at ${health.cpu}% while colocated — resource contention likely`,
290
+ details: {
291
+ currentCpu: health.cpu,
292
+ suggestion: `Move ${serviceName} to its own process group`,
293
+ configChange: {
294
+ [serviceName]: { group: undefined }, // remove from colocation group
295
+ },
296
+ },
297
+ confidence: 0.85,
298
+ timestamp: Date.now(),
299
+ };
300
+ }
301
+
302
+ if (machineCpu < 80) {
303
+ // Machine has headroom — add workers
304
+ return {
305
+ service: serviceName,
306
+ action: "scale_up",
307
+ reason: `CPU at ${health.cpu}% but machine has idle capacity (${machineCpu}% total)`,
308
+ details: {
309
+ currentCpu: health.cpu,
310
+ machineCpu,
311
+ currentWorkers: health.workers,
312
+ suggestedWorkers: health.workers + Math.ceil(health.workers * 0.5),
313
+ command: `forge scale ${serviceName} ${health.workers + Math.ceil(health.workers * 0.5)}`,
314
+ },
315
+ confidence: 0.8,
316
+ timestamp: Date.now(),
317
+ };
318
+ }
319
+
320
+ // Machine is saturated — migrate
321
+ return {
322
+ service: serviceName,
323
+ action: "migrate",
324
+ reason: `CPU at ${health.cpu}% and machine at ${machineCpu}% — needs dedicated hardware`,
325
+ details: {
326
+ currentCpu: health.cpu,
327
+ machineCpu,
328
+ steps: [
329
+ `1. Provision a new machine`,
330
+ `2. Deploy: forge start --http ${this.registry.httpBasePort}`,
331
+ `3. Update config: ${serviceName}: { type: 'remote', address: 'http://<new-host>:${this.registry.httpBasePort}' }`,
332
+ `4. Restart this node — traffic routes automatically`,
333
+ ],
334
+ generatedConfig: {
335
+ [serviceName]: {
336
+ type: "remote",
337
+ address: `http://<new-host>:${this.registry.httpBasePort}`,
338
+ },
339
+ },
340
+ },
341
+ confidence: 0.9,
342
+ timestamp: Date.now(),
343
+ };
344
+ }
345
+
346
+ _recordSnapshot(serviceName, instances) {
347
+ if (!this.history.has(serviceName)) {
348
+ this.history.set(serviceName, []);
349
+ }
350
+
351
+ const snapshots = this.history.get(serviceName);
352
+ snapshots.push({
353
+ timestamp: Date.now(),
354
+ health: instances.map((i) => ({ cpu: i.cpu, status: i.status, workers: i.workers })),
355
+ });
356
+
357
+ // Trim history
358
+ while (snapshots.length > this.maxHistory) {
359
+ snapshots.shift();
360
+ }
361
+ }
362
+
363
+ _lowCpuDuration(serviceName) {
364
+ const snapshots = this.history.get(serviceName) ?? [];
365
+ if (snapshots.length === 0) return 0;
366
+
367
+ // Walk backwards to find how long CPU has been low
368
+ let duration = 0;
369
+ for (let i = snapshots.length - 1; i >= 0; i--) {
370
+ const allLow = snapshots[i].health.every((h) => h.cpu < this.thresholds.underutilized.cpu);
371
+ if (!allLow) break;
372
+ duration = Date.now() - snapshots[i].timestamp;
373
+ }
374
+
375
+ return duration;
376
+ }
377
+
378
+ _getMachineCpu() {
379
+ const now = Date.now();
380
+ const currentCpus = os.cpus().map(cpu => ({ ...cpu.times }));
381
+
382
+ if (!this._lastCpuMeasurement) {
383
+ this._lastCpuMeasurement = { timestamp: now, cpus: currentCpus };
384
+ return this._lastCpuUsage ?? 0;
385
+ }
386
+
387
+ const prev = this._lastCpuMeasurement.cpus;
388
+ let totalDelta = 0;
389
+ let idleDelta = 0;
390
+
391
+ for (let i = 0; i < currentCpus.length && i < prev.length; i++) {
392
+ const c = currentCpus[i];
393
+ const p = prev[i];
394
+ const delta = (c.user - p.user) + (c.nice - p.nice) + (c.sys - p.sys) +
395
+ (c.idle - p.idle) + (c.irq - p.irq) + ((c.steal ?? 0) - (p.steal ?? 0));
396
+ totalDelta += delta;
397
+ idleDelta += (c.idle - p.idle);
398
+ }
399
+
400
+ this._lastCpuMeasurement = { timestamp: now, cpus: currentCpus };
401
+ const usage = totalDelta > 0 ? Math.round(100 - (100 * idleDelta / totalDelta)) : 0;
402
+ this._lastCpuUsage = usage;
403
+ return usage;
404
+ }
405
+
406
+ /**
407
+ * Get current recommendations as a formatted report.
408
+ */
409
+ report() {
410
+ if (this.recommendations.length === 0) {
411
+ return " All services nominal. No scaling actions needed.";
412
+ }
413
+
414
+ let report = "";
415
+ for (const rec of this.recommendations) {
416
+ const icon =
417
+ {
418
+ scale_up: "\u2191",
419
+ scale_down: "\u2193",
420
+ split_out: "\u229E",
421
+ migrate: "\u2192",
422
+ colocate: "\u229F",
423
+ investigate: "?",
424
+ }[rec.action] ?? "\u2022";
425
+
426
+ report += ` ${icon} ${rec.service}: ${rec.action.toUpperCase()}\n`;
427
+ report += ` ${rec.reason}\n`;
428
+
429
+ if (rec.details.command) {
430
+ report += ` Run: ${rec.details.command}\n`;
431
+ }
432
+ if (rec.details.steps) {
433
+ for (const step of rec.details.steps) {
434
+ report += ` ${step}\n`;
435
+ }
436
+ }
437
+ report += "\n";
438
+ }
439
+
440
+ return report;
441
+ }
442
+ }
@@ -0,0 +1,195 @@
1
+ import { autoRegisterRoutes, autoWireSubscriptions, handleProxyRequest, NOT_HANDLED } from "../decorators/ServiceProxy.js";
2
+
3
+ /**
4
+ * Service Base Class v2
5
+ *
6
+ * Developers extend this to create ThreadForge services.
7
+ *
8
+ * NEW in v2:
9
+ * - Auto-generated proxy clients for other services
10
+ * (this.users.getUser(id) instead of this.request('users', {...}))
11
+ * - @Expose, @Route, @Emit, @On decorators for contracts
12
+ * - Plain JS fallback via static `contract` property
13
+ * - Automatic HTTP route registration from @Route
14
+ * - Automatic event subscription wiring from @On
15
+ *
16
+ * @example
17
+ * ```js
18
+ * import { Service, Expose, Route, Emit } from 'threadforge';
19
+ *
20
+ * export default class UserService extends Service {
21
+ *
22
+ * @Expose()
23
+ * async getUser(userId) {
24
+ * return this.db.findUser(userId);
25
+ * }
26
+ *
27
+ * @Expose()
28
+ * @Route('POST', '/users')
29
+ * @Emit('user.created')
30
+ * async createUser(data) {
31
+ * const user = await this.db.insert(data);
32
+ * await this.notifications.userCreated(user); // auto-generated proxy
33
+ * return user;
34
+ * }
35
+ * }
36
+ * ```
37
+ *
38
+ * Plain JS equivalent (no decorators):
39
+ * ```js
40
+ * export default class UserService extends Service {
41
+ * static contract = {
42
+ * expose: ['getUser', 'createUser'],
43
+ * routes: [
44
+ * { method: 'POST', path: '/users', handler: 'createUser' },
45
+ * ],
46
+ * emits: { createUser: 'user.created' },
47
+ * };
48
+ *
49
+ * async getUser(userId) { ... }
50
+ * async createUser(data) { ... }
51
+ * }
52
+ * ```
53
+ *
54
+ * **Route handler signatures — two approaches:**
55
+ *
56
+ * 1. Contract routes (`@Route` / `static contract.routes`):
57
+ * Handler receives `(body, params, query)` and returns a value that is
58
+ * auto-serialized as JSON (201 for POST, 200 otherwise).
59
+ *
60
+ * 2. Manual routes (registered in `onStart` via `ctx.router`):
61
+ * Handler receives the raw `(req, res)` and must call `res.json()` or
62
+ * `res.end()` itself.
63
+ *
64
+ * See `autoRegisterRoutes` in `ServiceProxy.js` for full details.
65
+ */
66
+ export class Service {
67
+ constructor() {
68
+ /** @type {import('../core/ForgeContext.js').ForgeContext | null} */
69
+ this.ctx = null;
70
+
71
+ /**
72
+ * Auto-generated proxy clients for connected services.
73
+ * Populated by the framework during initialization.
74
+ *
75
+ * Usage: this.users.getUser('123')
76
+ * this.notifications.sendAlert({ ... })
77
+ *
78
+ * @type {Object}
79
+ */
80
+ // Proxies are set as direct properties on the instance
81
+ // e.g., this.users, this.billing, this.notifications
82
+ }
83
+
84
+ // ── Lifecycle hooks (override in subclass) ──
85
+
86
+ /**
87
+ * Called when the service starts. Initialize resources here.
88
+ * Routes from @Route decorators are auto-registered BEFORE this is called,
89
+ * so you can add additional routes in onStart if needed.
90
+ */
91
+ async onStart(_ctx) {}
92
+
93
+ /**
94
+ * Called when a message arrives from another service.
95
+ * For proxy-style calls, this is handled automatically.
96
+ * Override for custom fire-and-forget message handling.
97
+ */
98
+ async onMessage(_from, _payload) {}
99
+
100
+ /**
101
+ * Called when a request arrives from another service.
102
+ *
103
+ * For proxy-style calls (@Expose methods), this is dispatched
104
+ * automatically — you don't need to implement a switch/case.
105
+ *
106
+ * Override only for custom non-proxy request handling.
107
+ */
108
+ async onRequest(from, payload) {
109
+ // Try proxy-style dispatch first
110
+ const result = handleProxyRequest(this, from, payload);
111
+ if (result !== NOT_HANDLED) return result;
112
+
113
+ // Subclass can handle other request formats
114
+ return null;
115
+ }
116
+
117
+ /**
118
+ * Called when the service is shutting down.
119
+ */
120
+ async onStop() {}
121
+
122
+ // ── Convenience methods ──
123
+
124
+ /**
125
+ * Send a fire-and-forget message to another service.
126
+ * Prefer using proxy clients (this.serviceName.method()) for
127
+ * request/response patterns.
128
+ */
129
+ async send(target, payload) {
130
+ if (!this.ctx) throw new Error("Service not initialized");
131
+ return this.ctx.send(target, payload);
132
+ }
133
+
134
+ /**
135
+ * Send a request to another service.
136
+ * Prefer using proxy clients instead of this low-level API.
137
+ */
138
+ async request(target, payload, timeoutMs) {
139
+ if (!this.ctx) throw new Error("Service not initialized");
140
+ return this.ctx.request(target, payload, timeoutMs);
141
+ }
142
+
143
+ /** Broadcast to all workers of a target service. */
144
+ async broadcast(target, payload) {
145
+ if (!this.ctx) throw new Error("Service not initialized");
146
+ return this.ctx.broadcast(target, payload);
147
+ }
148
+
149
+ // ── Internal lifecycle (called by worker bootstrap) ──
150
+
151
+ /** @internal */
152
+ async _init(ctx) {
153
+ this.ctx = ctx;
154
+
155
+ // Wire IPC handlers
156
+ ctx._onMessage = (from, payload) => this.onMessage(from, payload);
157
+ ctx._onRequest = (from, payload) => this.onRequest(from, payload);
158
+ ctx._wireMessageHandlers();
159
+
160
+ // Auto-register HTTP routes from @Route decorators
161
+ autoRegisterRoutes(this, ctx);
162
+
163
+ // Auto-wire event subscriptions from @On decorators
164
+ autoWireSubscriptions(this, ctx);
165
+ }
166
+
167
+ /**
168
+ * @internal
169
+ * Set proxy clients on the service instance.
170
+ * Called by the worker bootstrap after all services are loaded.
171
+ */
172
+ _setProxies(proxies) {
173
+ for (const [name, proxy] of Object.entries(proxies)) {
174
+ this[name] = proxy;
175
+ }
176
+ }
177
+
178
+ /** @internal */
179
+ async _start() {
180
+ await this.onStart(this.ctx);
181
+
182
+ if (this.ctx._needsHttpServer) {
183
+ await this.ctx.startServer();
184
+ }
185
+ }
186
+
187
+ /** @internal */
188
+ async _stop() {
189
+ try {
190
+ await this.onStop();
191
+ } finally {
192
+ await this.ctx.stop();
193
+ }
194
+ }
195
+ }