threadforge 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +152 -0
- package/bin/forge.js +1050 -0
- package/bin/host-commands.js +344 -0
- package/bin/platform-commands.js +570 -0
- package/package.json +71 -0
- package/shared/auth.js +475 -0
- package/src/core/DirectMessageBus.js +364 -0
- package/src/core/EndpointResolver.js +247 -0
- package/src/core/ForgeContext.js +2227 -0
- package/src/core/ForgeHost.js +122 -0
- package/src/core/ForgePlatform.js +145 -0
- package/src/core/Ingress.js +768 -0
- package/src/core/Interceptors.js +420 -0
- package/src/core/MessageBus.js +310 -0
- package/src/core/Prometheus.js +305 -0
- package/src/core/RequestContext.js +413 -0
- package/src/core/RoutingStrategy.js +316 -0
- package/src/core/Supervisor.js +1306 -0
- package/src/core/ThreadAllocator.js +196 -0
- package/src/core/WorkerChannelManager.js +879 -0
- package/src/core/config.js +624 -0
- package/src/core/host-config.js +311 -0
- package/src/core/network-utils.js +166 -0
- package/src/core/platform-config.js +308 -0
- package/src/decorators/ServiceProxy.js +899 -0
- package/src/decorators/index.js +571 -0
- package/src/deploy/NginxGenerator.js +865 -0
- package/src/deploy/PlatformManifestGenerator.js +96 -0
- package/src/deploy/RouteManifestGenerator.js +112 -0
- package/src/deploy/index.js +984 -0
- package/src/frontend/FrontendDevLifecycle.js +65 -0
- package/src/frontend/FrontendPluginOrchestrator.js +187 -0
- package/src/frontend/SiteResolver.js +63 -0
- package/src/frontend/StaticMountRegistry.js +90 -0
- package/src/frontend/index.js +5 -0
- package/src/frontend/plugins/index.js +2 -0
- package/src/frontend/plugins/viteFrontend.js +79 -0
- package/src/frontend/types.js +35 -0
- package/src/index.js +56 -0
- package/src/internals.js +31 -0
- package/src/plugins/PluginManager.js +537 -0
- package/src/plugins/ScopedPostgres.js +192 -0
- package/src/plugins/ScopedRedis.js +142 -0
- package/src/plugins/index.js +1729 -0
- package/src/registry/ServiceRegistry.js +796 -0
- package/src/scaling/ScaleAdvisor.js +442 -0
- package/src/services/Service.js +195 -0
- package/src/services/worker-bootstrap.js +676 -0
- package/src/templates/auth-service.js +65 -0
- package/src/templates/identity-service.js +75 -0
|
@@ -0,0 +1,899 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ServiceProxy v2
|
|
3
|
+
*
|
|
4
|
+
* Auto-generates proxy clients with:
|
|
5
|
+
* - Pluggable routing strategies (round-robin, hash, least-pending)
|
|
6
|
+
* - Interceptor chains (deadline, logging, circuit breaker, retry)
|
|
7
|
+
* - Pending request tracking (for least-pending routing)
|
|
8
|
+
* - Contract-based method validation
|
|
9
|
+
*/
|
|
10
|
+
|
|
11
|
+
import { isPrivateNetwork, isTrustedProxy } from "../core/ForgeContext.js";
|
|
12
|
+
import { AdaptiveConcurrencyLimiter } from "../core/Ingress.js";
|
|
13
|
+
import {
|
|
14
|
+
bulkheadInterceptor,
|
|
15
|
+
CircuitBreaker,
|
|
16
|
+
deadlineInterceptor,
|
|
17
|
+
isRetryable,
|
|
18
|
+
loggingInterceptor,
|
|
19
|
+
metricsInterceptor,
|
|
20
|
+
runInterceptorChain,
|
|
21
|
+
} from "../core/Interceptors.js";
|
|
22
|
+
import { RequestContext } from "../core/RequestContext.js";
|
|
23
|
+
import { getContract } from "./index.js";
|
|
24
|
+
|
|
25
|
+
const _WIRED_SERVICES = new WeakSet();
|
|
26
|
+
|
|
27
|
+
// ─── LRU-style cache with oldest-20% eviction ────────────────────────
|
|
28
|
+
const MAX_CACHE_SIZE = 1000;
|
|
29
|
+
const EVICT_COUNT = Math.floor(MAX_CACHE_SIZE * 0.2); // 200
|
|
30
|
+
|
|
31
|
+
/**
|
|
32
|
+
* Evict the oldest 20% of entries from a Map whose values are
|
|
33
|
+
* `{ fn, order }` objects. Entries are sorted by insertion order
|
|
34
|
+
* and the lowest-order entries are removed.
|
|
35
|
+
*/
|
|
36
|
+
function evictOldest(cache) {
|
|
37
|
+
if (cache.size <= MAX_CACHE_SIZE) return;
|
|
38
|
+
// Map iteration order is insertion order, so the first EVICT_COUNT entries are oldest
|
|
39
|
+
let removed = 0;
|
|
40
|
+
for (const key of cache.keys()) {
|
|
41
|
+
if (removed >= EVICT_COUNT) break;
|
|
42
|
+
cache.delete(key);
|
|
43
|
+
removed++;
|
|
44
|
+
}
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// ─── Simple token-bucket rate limiter (per-route, per-IP) ────────────
|
|
48
|
+
const _rateLimitBuckets = new Map();
|
|
49
|
+
const MAX_RATE_LIMIT_BUCKETS = 100_000;
|
|
50
|
+
|
|
51
|
+
const _rateLimitCleanupTimer = setInterval(() => {
|
|
52
|
+
const now = Date.now();
|
|
53
|
+
for (const [key, bucket] of _rateLimitBuckets) {
|
|
54
|
+
if (now - bucket.windowStart > 120_000) {
|
|
55
|
+
_rateLimitBuckets.delete(key);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
}, 60_000);
|
|
59
|
+
_rateLimitCleanupTimer.unref?.();
|
|
60
|
+
|
|
61
|
+
function parseRateLimit(spec) {
|
|
62
|
+
const m = /^(\d+)\/(sec|min|hour)$/i.exec(spec);
|
|
63
|
+
if (!m) return null;
|
|
64
|
+
const limit = parseInt(m[1], 10);
|
|
65
|
+
const windowMs = { sec: 1000, min: 60_000, hour: 3_600_000 }[m[2].toLowerCase()];
|
|
66
|
+
return { limit, windowMs };
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
let _lastEvictionCheck = 0;
|
|
70
|
+
|
|
71
|
+
function checkRateLimit(routeKey, ip, config) {
|
|
72
|
+
const now = Date.now();
|
|
73
|
+
if (_rateLimitBuckets.size > MAX_RATE_LIMIT_BUCKETS && now - _lastEvictionCheck >= 10_000) {
|
|
74
|
+
_lastEvictionCheck = now;
|
|
75
|
+
setImmediate(() => {
|
|
76
|
+
const cutoff = Date.now();
|
|
77
|
+
for (const [k, b] of _rateLimitBuckets) {
|
|
78
|
+
if (cutoff - b.windowStart > 120_000) _rateLimitBuckets.delete(k);
|
|
79
|
+
}
|
|
80
|
+
});
|
|
81
|
+
}
|
|
82
|
+
const bucketKey = `${routeKey}:${ip}`;
|
|
83
|
+
let bucket = _rateLimitBuckets.get(bucketKey);
|
|
84
|
+
if (!bucket || now - bucket.windowStart >= config.windowMs) {
|
|
85
|
+
bucket = { count: 1, windowStart: now };
|
|
86
|
+
_rateLimitBuckets.set(bucketKey, bucket);
|
|
87
|
+
return true;
|
|
88
|
+
}
|
|
89
|
+
bucket.count++;
|
|
90
|
+
return bucket.count <= config.limit;
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
/** Sentinel returned by handleProxyRequest when the message isn't a proxy request. */
|
|
94
|
+
export const NOT_HANDLED = Symbol("NOT_HANDLED");
|
|
95
|
+
|
|
96
|
+
/**
|
|
97
|
+
* Per-target concurrency limiters, shared across all proxies on this node.
|
|
98
|
+
* @type {Map<string, AdaptiveConcurrencyLimiter>}
|
|
99
|
+
*/
|
|
100
|
+
const _concurrencyLimiters = new Map();
|
|
101
|
+
|
|
102
|
+
function getConcurrencyLimiter(targetName, config = {}) {
|
|
103
|
+
if (!_concurrencyLimiters.has(targetName)) {
|
|
104
|
+
_concurrencyLimiters.set(targetName, new AdaptiveConcurrencyLimiter(targetName, config));
|
|
105
|
+
}
|
|
106
|
+
return _concurrencyLimiters.get(targetName);
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/** Expose for status endpoints */
|
|
110
|
+
export function getAllConcurrencyStats() {
|
|
111
|
+
const stats = {};
|
|
112
|
+
for (const [name, limiter] of _concurrencyLimiters) {
|
|
113
|
+
stats[name] = limiter.stats;
|
|
114
|
+
}
|
|
115
|
+
return stats;
|
|
116
|
+
}
|
|
117
|
+
|
|
118
|
+
/**
|
|
119
|
+
* Build proxy clients for all services this service connects to.
|
|
120
|
+
*
|
|
121
|
+
* @param {ForgeContext} ctx - The calling service's context
|
|
122
|
+
* @param {Map<string, Function>} serviceClasses - service name → ServiceClass
|
|
123
|
+
* @param {Map<string, {service, ctx}>} localServices - Colocated instances
|
|
124
|
+
* @param {Object} [options]
|
|
125
|
+
* @param {Object} [options.interceptors] - Default interceptors for all proxies
|
|
126
|
+
* @returns {Object} Map of service name → proxy client
|
|
127
|
+
*/
|
|
128
|
+
export function buildServiceProxies(ctx, serviceClasses, localServices = new Map(), options = {}) {
|
|
129
|
+
const proxies = {};
|
|
130
|
+
|
|
131
|
+
for (const [serviceName, ServiceClass] of serviceClasses) {
|
|
132
|
+
if (serviceName === ctx.serviceName) continue;
|
|
133
|
+
|
|
134
|
+
const contract = getContract(ServiceClass);
|
|
135
|
+
proxies[serviceName] = createServiceProxy(
|
|
136
|
+
ctx,
|
|
137
|
+
serviceName,
|
|
138
|
+
contract,
|
|
139
|
+
localServices.get(serviceName) ?? null,
|
|
140
|
+
options,
|
|
141
|
+
);
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
return proxies;
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
/**
|
|
148
|
+
* Create a proxy client for a single target service.
|
|
149
|
+
*/
|
|
150
|
+
export function createServiceProxy(ctx, targetName, contract, localInstance, options) {
|
|
151
|
+
// Wire routing strategy to EndpointResolver if configured
|
|
152
|
+
const routingStrategy = contract?.routingStrategy ?? options.routingStrategy;
|
|
153
|
+
if (routingStrategy && ctx._endpointResolver) {
|
|
154
|
+
ctx._endpointResolver.setStrategy(targetName, routingStrategy);
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
// Build interceptor chain (retry is handled as an outer loop, not in the chain)
|
|
158
|
+
const interceptors = buildInterceptors(ctx, targetName, contract, options);
|
|
159
|
+
|
|
160
|
+
// Retry config (service-level default; per-method overrides in createProxiedMethod)
|
|
161
|
+
const retryConfig = contract?.retry ?? options.retry;
|
|
162
|
+
|
|
163
|
+
// Circuit breakers are per-endpoint (host:port), so different endpoints
|
|
164
|
+
// can trip independently. Colocated calls share a single CB per service.
|
|
165
|
+
const cbOptions = contract?.circuitBreaker ?? options.circuitBreaker ?? {};
|
|
166
|
+
const MAX_ENDPOINT_CBS = 100;
|
|
167
|
+
/** @type {Map<string, CircuitBreaker>} endpoint string → CB instance */
|
|
168
|
+
const endpointCircuitBreakers = new Map();
|
|
169
|
+
// Shared CB for colocated (non-network) calls
|
|
170
|
+
const colocatedCircuitBreaker = new CircuitBreaker(cbOptions);
|
|
171
|
+
|
|
172
|
+
/**
|
|
173
|
+
* Get or create a circuit breaker for a specific endpoint.
|
|
174
|
+
* @param {string|null} endpointKey - e.g. "192.168.1.5:4001", or null for colocated
|
|
175
|
+
* @returns {CircuitBreaker}
|
|
176
|
+
*/
|
|
177
|
+
function getCircuitBreaker(endpointKey) {
|
|
178
|
+
if (!endpointKey) return colocatedCircuitBreaker;
|
|
179
|
+
let cb = endpointCircuitBreakers.get(endpointKey);
|
|
180
|
+
if (!cb) {
|
|
181
|
+
// Cap to prevent unbounded growth
|
|
182
|
+
if (endpointCircuitBreakers.size >= MAX_ENDPOINT_CBS) {
|
|
183
|
+
// Evict the oldest entry
|
|
184
|
+
const firstKey = endpointCircuitBreakers.keys().next().value;
|
|
185
|
+
endpointCircuitBreakers.delete(firstKey);
|
|
186
|
+
}
|
|
187
|
+
cb = new CircuitBreaker(cbOptions);
|
|
188
|
+
endpointCircuitBreakers.set(endpointKey, cb);
|
|
189
|
+
}
|
|
190
|
+
return cb;
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
/** Remove a stale endpoint's CB */
|
|
194
|
+
function removeEndpointCB(endpointKey) {
|
|
195
|
+
endpointCircuitBreakers.delete(endpointKey);
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
if (contract) {
|
|
199
|
+
return buildContractProxy(
|
|
200
|
+
ctx,
|
|
201
|
+
targetName,
|
|
202
|
+
contract,
|
|
203
|
+
localInstance,
|
|
204
|
+
interceptors,
|
|
205
|
+
{ getCircuitBreaker, removeEndpointCB, colocatedCircuitBreaker, endpointCircuitBreakers },
|
|
206
|
+
retryConfig,
|
|
207
|
+
);
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
return buildDynamicProxy(ctx, targetName, localInstance, interceptors, { getCircuitBreaker, removeEndpointCB, colocatedCircuitBreaker, endpointCircuitBreakers }, retryConfig);
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
/**
|
|
214
|
+
* Build the interceptor chain for a proxy.
|
|
215
|
+
*
|
|
216
|
+
* Returns an array with a null placeholder at index 1 for the
|
|
217
|
+
* per-attempt circuit breaker interceptor. This avoids allocating
|
|
218
|
+
* a new array on every call — the caller just swaps slot [1].
|
|
219
|
+
*
|
|
220
|
+
* Layout: [deadline, CB_SLOT, concurrency, metrics?, logging]
|
|
221
|
+
*/
|
|
222
|
+
function buildInterceptors(ctx, targetName, contract, options) {
|
|
223
|
+
const chain = [];
|
|
224
|
+
|
|
225
|
+
// Deadline propagation (always on)
|
|
226
|
+
const timeout = contract?.defaultTimeout ?? options.defaultTimeout ?? 5000;
|
|
227
|
+
chain.push(deadlineInterceptor(timeout));
|
|
228
|
+
|
|
229
|
+
// Slot [1] reserved for circuit breaker (swapped per-attempt)
|
|
230
|
+
chain.push(null);
|
|
231
|
+
|
|
232
|
+
// Bulkhead: limits concurrent outgoing calls per service
|
|
233
|
+
const bulkheadConfig = contract?.bulkhead ?? options.bulkhead ?? {};
|
|
234
|
+
chain.push(bulkheadInterceptor(bulkheadConfig));
|
|
235
|
+
|
|
236
|
+
// Adaptive concurrency limiting (always on for non-local services)
|
|
237
|
+
const concurrencyConfig = contract?.concurrency ?? options.concurrency ?? {};
|
|
238
|
+
const limiter = getConcurrencyLimiter(targetName, concurrencyConfig);
|
|
239
|
+
chain.push(concurrencyInterceptor(limiter));
|
|
240
|
+
|
|
241
|
+
// Metrics (always on if available)
|
|
242
|
+
if (ctx.metrics) {
|
|
243
|
+
chain.push(metricsInterceptor(ctx.metrics));
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
// Logging (debug level)
|
|
247
|
+
chain.push(loggingInterceptor(ctx.logger));
|
|
248
|
+
|
|
249
|
+
return chain;
|
|
250
|
+
}
|
|
251
|
+
|
|
252
|
+
/**
|
|
253
|
+
* Adaptive concurrency interceptor.
|
|
254
|
+
* Automatically wraps every proxy call with concurrency limiting.
|
|
255
|
+
* No manual wrapping needed — it's invisible to the developer.
|
|
256
|
+
*/
|
|
257
|
+
function concurrencyInterceptor(limiter) {
|
|
258
|
+
return async function adaptiveConcurrency(callCtx, next) {
|
|
259
|
+
const slot = limiter.tryAcquire();
|
|
260
|
+
|
|
261
|
+
if (!slot.acquired) {
|
|
262
|
+
const err = new Error(
|
|
263
|
+
`Service "${callCtx.target}" at concurrency limit (${limiter.limit}). ` +
|
|
264
|
+
`${limiter.inFlight} in flight. Rejecting ${callCtx.method}.`,
|
|
265
|
+
);
|
|
266
|
+
err.code = "CONCURRENCY_LIMIT";
|
|
267
|
+
err.statusCode = 503;
|
|
268
|
+
throw err;
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
try {
|
|
272
|
+
const result = await next();
|
|
273
|
+
slot.release(true);
|
|
274
|
+
return result;
|
|
275
|
+
} catch (err) {
|
|
276
|
+
slot.release(false);
|
|
277
|
+
throw err;
|
|
278
|
+
}
|
|
279
|
+
};
|
|
280
|
+
}
|
|
281
|
+
|
|
282
|
+
/**
|
|
283
|
+
* Build a contract-based proxy with typed methods.
|
|
284
|
+
*/
|
|
285
|
+
function buildContractProxy(
|
|
286
|
+
ctx,
|
|
287
|
+
targetName,
|
|
288
|
+
contract,
|
|
289
|
+
localInstance,
|
|
290
|
+
interceptors,
|
|
291
|
+
cbBag,
|
|
292
|
+
retryConfig,
|
|
293
|
+
) {
|
|
294
|
+
const proxy = {};
|
|
295
|
+
|
|
296
|
+
for (const [methodName, meta] of contract.methods) {
|
|
297
|
+
proxy[methodName] = createProxiedMethod(
|
|
298
|
+
ctx,
|
|
299
|
+
targetName,
|
|
300
|
+
methodName,
|
|
301
|
+
meta,
|
|
302
|
+
localInstance,
|
|
303
|
+
contract,
|
|
304
|
+
interceptors,
|
|
305
|
+
cbBag,
|
|
306
|
+
retryConfig,
|
|
307
|
+
);
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
// Generic call method with private method guard, contract validation, and caching
|
|
311
|
+
const $callCache = new Map();
|
|
312
|
+
proxy.$call = (methodName, ...args) => {
|
|
313
|
+
if (typeof methodName !== 'string' || methodName.startsWith('_')) {
|
|
314
|
+
throw new Error(`Cannot call private method "${methodName}" via $call`);
|
|
315
|
+
}
|
|
316
|
+
if (contract && !contract.methods.has(methodName)) {
|
|
317
|
+
throw new Error(`Method "${methodName}" is not exposed on ${targetName}`);
|
|
318
|
+
}
|
|
319
|
+
evictOldest($callCache);
|
|
320
|
+
let fn = $callCache.get(methodName);
|
|
321
|
+
if (!fn) {
|
|
322
|
+
fn = createProxiedMethod(
|
|
323
|
+
ctx,
|
|
324
|
+
targetName,
|
|
325
|
+
methodName,
|
|
326
|
+
{},
|
|
327
|
+
localInstance,
|
|
328
|
+
contract,
|
|
329
|
+
interceptors,
|
|
330
|
+
cbBag,
|
|
331
|
+
retryConfig,
|
|
332
|
+
);
|
|
333
|
+
$callCache.set(methodName, fn);
|
|
334
|
+
}
|
|
335
|
+
return fn(...args);
|
|
336
|
+
};
|
|
337
|
+
|
|
338
|
+
// Invalidate method cache for hot-reload scenarios
|
|
339
|
+
proxy.$invalidate = () => {
|
|
340
|
+
$callCache.clear();
|
|
341
|
+
for (const key of Object.keys(proxy)) {
|
|
342
|
+
if (key.startsWith('$')) continue;
|
|
343
|
+
delete proxy[key];
|
|
344
|
+
}
|
|
345
|
+
// Re-create proxied methods from contract
|
|
346
|
+
for (const [methodName, meta] of contract.methods) {
|
|
347
|
+
proxy[methodName] = createProxiedMethod(
|
|
348
|
+
ctx, targetName, methodName, meta, localInstance, contract,
|
|
349
|
+
interceptors, cbBag, retryConfig,
|
|
350
|
+
);
|
|
351
|
+
}
|
|
352
|
+
};
|
|
353
|
+
|
|
354
|
+
// Metadata
|
|
355
|
+
proxy.$name = targetName;
|
|
356
|
+
proxy.$methods = [...contract.methods.keys()];
|
|
357
|
+
proxy.$isLocal = !!localInstance;
|
|
358
|
+
proxy.$circuitBreaker = cbBag.colocatedCircuitBreaker;
|
|
359
|
+
proxy.$endpointCircuitBreakers = cbBag.endpointCircuitBreakers;
|
|
360
|
+
|
|
361
|
+
return proxy;
|
|
362
|
+
}
|
|
363
|
+
|
|
364
|
+
/**
|
|
365
|
+
* Build a dynamic proxy (no contract — any method name accepted).
|
|
366
|
+
*/
|
|
367
|
+
function buildDynamicProxy(ctx, targetName, localInstance, interceptors, cbBag, retryConfig) {
|
|
368
|
+
const methodCache = new Map();
|
|
369
|
+
return new Proxy(
|
|
370
|
+
{},
|
|
371
|
+
{
|
|
372
|
+
get(_, methodName) {
|
|
373
|
+
if (methodName === "$name") return targetName;
|
|
374
|
+
if (methodName === "$isLocal") return !!localInstance;
|
|
375
|
+
if (methodName === "$circuitBreaker") return cbBag.colocatedCircuitBreaker;
|
|
376
|
+
if (methodName === "$endpointCircuitBreakers") return cbBag.endpointCircuitBreakers;
|
|
377
|
+
if (methodName === "$invalidate") return () => { methodCache.clear(); };
|
|
378
|
+
if (methodName === "then") return undefined;
|
|
379
|
+
|
|
380
|
+
evictOldest(methodCache);
|
|
381
|
+
if (methodCache.has(methodName)) return methodCache.get(methodName);
|
|
382
|
+
const fn = createProxiedMethod(
|
|
383
|
+
ctx,
|
|
384
|
+
targetName,
|
|
385
|
+
methodName,
|
|
386
|
+
{},
|
|
387
|
+
localInstance,
|
|
388
|
+
null,
|
|
389
|
+
interceptors,
|
|
390
|
+
cbBag,
|
|
391
|
+
retryConfig,
|
|
392
|
+
);
|
|
393
|
+
methodCache.set(methodName, fn);
|
|
394
|
+
return fn;
|
|
395
|
+
},
|
|
396
|
+
},
|
|
397
|
+
);
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
/**
|
|
401
|
+
* Create a single proxied method with full interceptor + routing support.
|
|
402
|
+
*
|
|
403
|
+
* Retry is implemented as an outer loop around the interceptor chain so that
|
|
404
|
+
* each attempt gets a fresh chain execution (deadline, circuit breaker,
|
|
405
|
+
* metrics, logging all run on every attempt).
|
|
406
|
+
*/
|
|
407
|
+
function createProxiedMethod(
|
|
408
|
+
ctx,
|
|
409
|
+
targetName,
|
|
410
|
+
methodName,
|
|
411
|
+
meta,
|
|
412
|
+
localInstance,
|
|
413
|
+
contract,
|
|
414
|
+
interceptors,
|
|
415
|
+
cbBag,
|
|
416
|
+
retryConfig,
|
|
417
|
+
) {
|
|
418
|
+
// Resolve retry settings: per-method overrides service-level
|
|
419
|
+
const methodRetry = meta.options?.retry ?? retryConfig;
|
|
420
|
+
const retryDisabled = methodRetry === false;
|
|
421
|
+
const maxAttempts = retryDisabled ? 1 : (methodRetry?.maxAttempts ?? 3);
|
|
422
|
+
const baseDelayMs = retryDisabled ? 0 : (methodRetry?.baseDelayMs ?? 100);
|
|
423
|
+
const maxDelayMs = retryDisabled ? 0 : (methodRetry?.maxDelayMs ?? 2000);
|
|
424
|
+
const idempotent = meta.options?.idempotent;
|
|
425
|
+
|
|
426
|
+
return async (...args) => {
|
|
427
|
+
// Enforce localOnly: reject remote calls for methods marked localOnly
|
|
428
|
+
if (meta.options?.localOnly && !localInstance) {
|
|
429
|
+
throw new Error(`Method '${methodName}' on service '${targetName}' is marked localOnly and cannot be called remotely`);
|
|
430
|
+
}
|
|
431
|
+
|
|
432
|
+
// Capture the current RequestContext (from the calling service's request)
|
|
433
|
+
const rctx = RequestContext.current();
|
|
434
|
+
|
|
435
|
+
/** @type {CallContext} */
|
|
436
|
+
const callCtx = {
|
|
437
|
+
from: ctx.serviceName,
|
|
438
|
+
target: targetName,
|
|
439
|
+
method: methodName,
|
|
440
|
+
args,
|
|
441
|
+
deadline: rctx?.deadline ?? null,
|
|
442
|
+
timeout: meta.options?.timeout ?? 5000,
|
|
443
|
+
metadata: Object.create(null),
|
|
444
|
+
attempt: 0,
|
|
445
|
+
// Propagated context
|
|
446
|
+
correlationId: rctx?.correlationId ?? null,
|
|
447
|
+
traceId: rctx?.traceId ?? null,
|
|
448
|
+
auth: rctx?.auth ?? null,
|
|
449
|
+
};
|
|
450
|
+
|
|
451
|
+
const startTime = performance.now();
|
|
452
|
+
let lastError;
|
|
453
|
+
|
|
454
|
+
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
|
455
|
+
callCtx.attempt = attempt;
|
|
456
|
+
|
|
457
|
+
// Resolve endpoint ONCE per attempt — used for both circuit breaker
|
|
458
|
+
// selection and the actual HTTP call. This avoids round-robin advancing
|
|
459
|
+
// twice, which would cause circuit breaker state to accumulate against
|
|
460
|
+
// a different endpoint than the one actually called.
|
|
461
|
+
let endpointKey = null;
|
|
462
|
+
let resolvedEndpoint = null;
|
|
463
|
+
if (!localInstance) {
|
|
464
|
+
resolvedEndpoint = ctx._endpointResolver?.resolve(targetName) ?? null;
|
|
465
|
+
if (resolvedEndpoint) {
|
|
466
|
+
endpointKey = `${resolvedEndpoint.host}:${resolvedEndpoint.port}`;
|
|
467
|
+
} else {
|
|
468
|
+
const port = ctx._servicePorts?.[targetName];
|
|
469
|
+
if (port) endpointKey = `127.0.0.1:${port}`;
|
|
470
|
+
}
|
|
471
|
+
// COR-C1: Notify strategy of pending request (enables LeastPendingStrategy)
|
|
472
|
+
if (endpointKey && ctx._endpointResolver) {
|
|
473
|
+
ctx._endpointResolver.acquireEndpoint(targetName, endpointKey);
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
callCtx.resolvedEndpoint = resolvedEndpoint;
|
|
477
|
+
const circuitBreaker = cbBag.getCircuitBreaker(endpointKey);
|
|
478
|
+
|
|
479
|
+
// Abort early if circuit breaker is open and not yet ready for a probe
|
|
480
|
+
if (circuitBreaker.state === 'open' && Date.now() < circuitBreaker.nextAttemptTime) {
|
|
481
|
+
const err = new Error(
|
|
482
|
+
`Circuit breaker OPEN for ${targetName}.${methodName}` +
|
|
483
|
+
` — service appears unavailable. Retry after ${new Date(circuitBreaker.nextAttemptTime).toISOString()}`,
|
|
484
|
+
);
|
|
485
|
+
err.code = 'CIRCUIT_OPEN';
|
|
486
|
+
throw err;
|
|
487
|
+
}
|
|
488
|
+
|
|
489
|
+
// Build per-attempt interceptor chain with the correct circuit breaker.
|
|
490
|
+
// Must copy because concurrent calls share the `interceptors` template —
|
|
491
|
+
// mutating slot [1] in-place would race with other in-flight chains.
|
|
492
|
+
const attemptChain = interceptors.slice();
|
|
493
|
+
attemptChain[1] = circuitBreaker.createInterceptor();
|
|
494
|
+
|
|
495
|
+
try {
|
|
496
|
+
const result = await runInterceptorChain(attemptChain, callCtx, async (finalCtx) => {
|
|
497
|
+
// === Colocated: direct function call with context propagation ===
|
|
498
|
+
if (localInstance) {
|
|
499
|
+
// Validate method is exposed in the contract (prevents calling private methods via colocated dispatch)
|
|
500
|
+
if (contract && !contract.methods.has(methodName)) {
|
|
501
|
+
throw new Error(`Method '${methodName}' is not exposed on service '${targetName}'`);
|
|
502
|
+
}
|
|
503
|
+
|
|
504
|
+
const method = localInstance.service[methodName];
|
|
505
|
+
if (typeof method === "function") {
|
|
506
|
+
const callFn = () => {
|
|
507
|
+
if (rctx) {
|
|
508
|
+
const childCtx = rctx.child(targetName, methodName);
|
|
509
|
+
if (finalCtx.deadline) childCtx.deadline = finalCtx.deadline;
|
|
510
|
+
return RequestContext.run(childCtx, () => method.apply(localInstance.service, args));
|
|
511
|
+
}
|
|
512
|
+
return method.apply(localInstance.service, args);
|
|
513
|
+
};
|
|
514
|
+
|
|
515
|
+
if (finalCtx.deadline) {
|
|
516
|
+
const timeLeft = finalCtx.deadline - Date.now();
|
|
517
|
+
if (timeLeft <= 0) throw new Error(`Deadline exceeded for ${targetName}.${methodName}`);
|
|
518
|
+
let timer = null;
|
|
519
|
+
const timeoutPromise = new Promise((_, reject) => {
|
|
520
|
+
timer = setTimeout(() => reject(new Error(`Deadline exceeded for colocated call ${targetName}.${methodName}`)), timeLeft);
|
|
521
|
+
});
|
|
522
|
+
return Promise.race([callFn(), timeoutPromise]).finally(() => { if (timer) clearTimeout(timer); });
|
|
523
|
+
}
|
|
524
|
+
return callFn();
|
|
525
|
+
}
|
|
526
|
+
throw new Error(`${targetName} has no method "${methodName}"`);
|
|
527
|
+
}
|
|
528
|
+
|
|
529
|
+
// === Remote: HTTP invoke when endpoint is known, otherwise IPC request fallback ===
|
|
530
|
+
// Calculate effective timeout respecting deadline.
|
|
531
|
+
let effectiveTimeout = finalCtx.timeout ?? 5000;
|
|
532
|
+
if (finalCtx.deadline) {
|
|
533
|
+
const remaining = finalCtx.deadline - Date.now();
|
|
534
|
+
if (remaining <= 0) throw new Error(`Deadline exceeded for ${targetName}.${methodName}`);
|
|
535
|
+
effectiveTimeout = Math.min(effectiveTimeout, remaining);
|
|
536
|
+
}
|
|
537
|
+
|
|
538
|
+
let targetHost = "127.0.0.1";
|
|
539
|
+
let targetPort;
|
|
540
|
+
|
|
541
|
+
// Use the endpoint resolved once at the top of this attempt
|
|
542
|
+
// (stored in callCtx) to avoid double round-robin advancement.
|
|
543
|
+
const endpoint = finalCtx.resolvedEndpoint;
|
|
544
|
+
if (endpoint) {
|
|
545
|
+
targetHost = endpoint.host;
|
|
546
|
+
targetPort = endpoint.port;
|
|
547
|
+
} else {
|
|
548
|
+
targetPort = ctx._servicePorts?.[targetName];
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
if (!targetPort) {
|
|
552
|
+
// Internal/background services may not expose HTTP ports. Use IPC request path.
|
|
553
|
+
if (typeof ctx.request === "function") {
|
|
554
|
+
const payload = {
|
|
555
|
+
__forge_method: methodName,
|
|
556
|
+
__forge_args: args,
|
|
557
|
+
};
|
|
558
|
+
if (finalCtx.deadline) {
|
|
559
|
+
payload.__forge_deadline = finalCtx.deadline;
|
|
560
|
+
}
|
|
561
|
+
return ctx.request(targetName, payload, effectiveTimeout);
|
|
562
|
+
}
|
|
563
|
+
throw new Error(
|
|
564
|
+
`No endpoint known for service "${targetName}". ` +
|
|
565
|
+
`Check that it's listed in forge.config and has a port.`,
|
|
566
|
+
);
|
|
567
|
+
}
|
|
568
|
+
|
|
569
|
+
const headers = { "Content-Type": "application/json" };
|
|
570
|
+
|
|
571
|
+
if (rctx) {
|
|
572
|
+
Object.assign(headers, rctx.toHeaders());
|
|
573
|
+
// Override deadline with remaining time from callCtx (shrinks on retry)
|
|
574
|
+
if (finalCtx.deadline) {
|
|
575
|
+
const remaining = finalCtx.deadline - Date.now();
|
|
576
|
+
if (remaining <= 0) {
|
|
577
|
+
throw new Error(`Deadline exceeded for ${targetName}.${methodName}`);
|
|
578
|
+
}
|
|
579
|
+
headers["x-forge-deadline"] = String(remaining);
|
|
580
|
+
}
|
|
581
|
+
}
|
|
582
|
+
|
|
583
|
+
const resp = await fetch(`http://${targetHost}:${targetPort}/__forge/invoke`, {
|
|
584
|
+
method: "POST",
|
|
585
|
+
headers,
|
|
586
|
+
body: JSON.stringify({
|
|
587
|
+
method: methodName,
|
|
588
|
+
args,
|
|
589
|
+
}),
|
|
590
|
+
signal: AbortSignal.timeout(effectiveTimeout),
|
|
591
|
+
});
|
|
592
|
+
|
|
593
|
+
const data = await resp.json();
|
|
594
|
+
if (data.error) {
|
|
595
|
+
const err = new Error(data.error);
|
|
596
|
+
err.statusCode = resp.status;
|
|
597
|
+
throw err;
|
|
598
|
+
}
|
|
599
|
+
|
|
600
|
+
return data.result;
|
|
601
|
+
});
|
|
602
|
+
|
|
603
|
+
return result;
|
|
604
|
+
} catch (err) {
|
|
605
|
+
lastError = err;
|
|
606
|
+
|
|
607
|
+
// Never retry circuit-open errors — the breaker is tripped
|
|
608
|
+
if (err.code === 'CIRCUIT_OPEN') {
|
|
609
|
+
throw err;
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
if (!isRetryable(err, idempotent) || attempt >= maxAttempts - 1) {
|
|
613
|
+
throw err;
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
// Check deadline before retrying
|
|
617
|
+
if (callCtx.deadline && Date.now() >= callCtx.deadline) {
|
|
618
|
+
throw new Error(`Deadline exceeded after ${attempt + 1} attempts to ${callCtx.target}.${callCtx.method}`);
|
|
619
|
+
}
|
|
620
|
+
|
|
621
|
+
// Exponential backoff with jitter, capped to remaining deadline
|
|
622
|
+
let delay = Math.min(maxDelayMs, baseDelayMs * 2 ** attempt + Math.random() * 100);
|
|
623
|
+
if (callCtx.deadline) {
|
|
624
|
+
const timeLeft = callCtx.deadline - Date.now();
|
|
625
|
+
if (timeLeft <= 0) {
|
|
626
|
+
throw new Error(`Deadline exceeded after ${attempt + 1} attempts to ${callCtx.target}.${callCtx.method}`);
|
|
627
|
+
}
|
|
628
|
+
delay = Math.min(delay, timeLeft);
|
|
629
|
+
}
|
|
630
|
+
|
|
631
|
+
await new Promise((resolve) => setTimeout(resolve, delay));
|
|
632
|
+
} finally {
|
|
633
|
+
// COR-C1: Release pending slot so LeastPendingStrategy sees accurate counts
|
|
634
|
+
if (!localInstance && endpointKey && ctx._endpointResolver) {
|
|
635
|
+
ctx._endpointResolver.releaseEndpoint(targetName, endpointKey);
|
|
636
|
+
}
|
|
637
|
+
}
|
|
638
|
+
}
|
|
639
|
+
|
|
640
|
+
throw lastError;
|
|
641
|
+
};
|
|
642
|
+
}
|
|
643
|
+
|
|
644
|
+
// ─── Request Handler (receiving side) ───────────────────────
|
|
645
|
+
|
|
646
|
+
/**
|
|
647
|
+
* Handle an incoming proxy-style RPC request on the receiving service.
|
|
648
|
+
*
|
|
649
|
+
* Detects the __forge_method convention, checks deadlines,
|
|
650
|
+
* validates the method is exposed, and dispatches.
|
|
651
|
+
*/
|
|
652
|
+
export function handleProxyRequest(service, from, payload) {
|
|
653
|
+
if (payload?.__forge_method) {
|
|
654
|
+
const methodName = payload.__forge_method;
|
|
655
|
+
const args = payload.__forge_args ?? [];
|
|
656
|
+
|
|
657
|
+
// Check deadline propagation (value is an absolute timestamp in ms)
|
|
658
|
+
let absoluteDeadline = null;
|
|
659
|
+
if (payload.__forge_deadline) {
|
|
660
|
+
absoluteDeadline = Number(payload.__forge_deadline);
|
|
661
|
+
const remaining = absoluteDeadline - Date.now();
|
|
662
|
+
if (remaining <= 0) {
|
|
663
|
+
throw new Error(`Deadline already exceeded for ${service.ctx?.serviceName}.${methodName}` + ` (caller: ${from})`);
|
|
664
|
+
}
|
|
665
|
+
}
|
|
666
|
+
|
|
667
|
+
// Contract/whitelist check first — reject unexposed methods before probing internals
|
|
668
|
+
const contract = getContract(service.constructor);
|
|
669
|
+
if (contract && !contract.methods.has(methodName)) {
|
|
670
|
+
throw new Error(`Method "${methodName}" is not exposed on this service`);
|
|
671
|
+
}
|
|
672
|
+
|
|
673
|
+
// Reject private methods when there is no contract to gate access
|
|
674
|
+
if (!contract && methodName.startsWith('_')) {
|
|
675
|
+
throw new Error(`Method "${methodName}" is private and cannot be invoked remotely`);
|
|
676
|
+
}
|
|
677
|
+
|
|
678
|
+
const method = service[methodName];
|
|
679
|
+
if (typeof method !== "function") {
|
|
680
|
+
throw new Error(
|
|
681
|
+
`Service "${service.ctx?.serviceName}" has no method "${methodName}". ` +
|
|
682
|
+
`Available: ${getExposedMethods(service).join(", ") || "none"}`,
|
|
683
|
+
);
|
|
684
|
+
}
|
|
685
|
+
|
|
686
|
+
// Enforce deadline during execution if set
|
|
687
|
+
if (absoluteDeadline) {
|
|
688
|
+
const remaining = absoluteDeadline - Date.now();
|
|
689
|
+
if (remaining <= 0) {
|
|
690
|
+
throw new Error(`Deadline already exceeded for ${service.ctx?.serviceName}.${methodName} (caller: ${from})`);
|
|
691
|
+
}
|
|
692
|
+
let timer = null;
|
|
693
|
+
const timeoutPromise = new Promise((_, reject) => {
|
|
694
|
+
timer = setTimeout(() => reject(new Error('Deadline exceeded during execution')), remaining);
|
|
695
|
+
if (timer) timer.unref();
|
|
696
|
+
});
|
|
697
|
+
// Re-check deadline immediately before race to close the window
|
|
698
|
+
const currentRemaining = absoluteDeadline - Date.now();
|
|
699
|
+
if (currentRemaining <= 0) {
|
|
700
|
+
clearTimeout(timer);
|
|
701
|
+
throw new Error('Deadline exceeded before execution start');
|
|
702
|
+
}
|
|
703
|
+
return Promise.race([method.apply(service, args), timeoutPromise]).finally(() => { if (timer) clearTimeout(timer); });
|
|
704
|
+
}
|
|
705
|
+
|
|
706
|
+
return method.apply(service, args);
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
return NOT_HANDLED;
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
function getExposedMethods(service) {
|
|
713
|
+
const contract = getContract(service.constructor);
|
|
714
|
+
if (contract) return [...contract.methods.keys()];
|
|
715
|
+
|
|
716
|
+
return Object.getOwnPropertyNames(Object.getPrototypeOf(service)).filter(
|
|
717
|
+
(m) => m !== "constructor" && !m.startsWith("_") && typeof service[m] === "function",
|
|
718
|
+
);
|
|
719
|
+
}
|
|
720
|
+
|
|
721
|
+
/**
|
|
722
|
+
* Auto-register HTTP routes from @Route decorator / contract metadata.
|
|
723
|
+
*
|
|
724
|
+
* For every route declared via `@Route(method, path)` or the plain-JS
|
|
725
|
+
* `static contract = { routes: [...] }`, this function registers the
|
|
726
|
+
* corresponding handler on the service's HTTP router.
|
|
727
|
+
*
|
|
728
|
+
* **Contract route handler signature:**
|
|
729
|
+
*
|
|
730
|
+
* async handler(body, params, query) -> result
|
|
731
|
+
*
|
|
732
|
+
* - `body` — parsed request body (`req.body`)
|
|
733
|
+
* - `params` — URL path parameters (`req.params`), e.g. `{ id: '42' }` for `/users/:id`
|
|
734
|
+
* - `query` — query-string parameters (`req.query`), e.g. `{ page: '2' }`
|
|
735
|
+
* - Return value is automatically serialized as JSON via `res.json(result)`.
|
|
736
|
+
* POST routes respond with 201; all other methods respond with 200.
|
|
737
|
+
* Errors are caught and returned as `{ error: message }` with 404 (if the
|
|
738
|
+
* message contains "not found") or 500.
|
|
739
|
+
*
|
|
740
|
+
* **How this differs from manual `ctx.router` routes:**
|
|
741
|
+
*
|
|
742
|
+
* When you register routes manually in `onStart(ctx)`, the handler receives
|
|
743
|
+
* the raw `(req, res)` pair and you are responsible for sending the response:
|
|
744
|
+
*
|
|
745
|
+
* ctx.router.get('/health', (req, res) => {
|
|
746
|
+
* res.json({ ok: true });
|
|
747
|
+
* });
|
|
748
|
+
*
|
|
749
|
+
* Contract-based handlers are higher-level: the framework supplies the
|
|
750
|
+
* individual pieces of the request as arguments and handles serialization
|
|
751
|
+
* and status codes for you.
|
|
752
|
+
*
|
|
753
|
+
* @example
|
|
754
|
+
* // ── Contract / decorator approach ──────────────────────
|
|
755
|
+
* // Handler receives (body, params, query) and returns a value.
|
|
756
|
+
*
|
|
757
|
+
* class UserService extends Service {
|
|
758
|
+
* @Expose()
|
|
759
|
+
* @Route('GET', '/users/:id')
|
|
760
|
+
* async getUser(body, params, query) {
|
|
761
|
+
* return this.db.findUser(params.id); // auto-serialized, 200 OK
|
|
762
|
+
* }
|
|
763
|
+
*
|
|
764
|
+
* @Expose()
|
|
765
|
+
* @Route('POST', '/users')
|
|
766
|
+
* async createUser(body, params, query) {
|
|
767
|
+
* return this.db.insert(body); // auto-serialized, 201 Created
|
|
768
|
+
* }
|
|
769
|
+
* }
|
|
770
|
+
*
|
|
771
|
+
* // ── Manual approach (in onStart) ──────────────────────
|
|
772
|
+
* // Handler receives (req, res) and must call res.json() itself.
|
|
773
|
+
*
|
|
774
|
+
* async onStart(ctx) {
|
|
775
|
+
* ctx.router.get('/users/:id', async (req, res) => {
|
|
776
|
+
* const user = await this.db.findUser(req.params.id);
|
|
777
|
+
* res.json(user); // you choose the status code
|
|
778
|
+
* });
|
|
779
|
+
* }
|
|
780
|
+
*
|
|
781
|
+
* @param {Service} service - The service instance whose routes to register
|
|
782
|
+
* @param {ForgeContext} ctx - The service's ForgeContext (must be type 'edge')
|
|
783
|
+
*/
|
|
784
|
+
export function autoRegisterRoutes(service, ctx) {
|
|
785
|
+
const contract = getContract(service.constructor);
|
|
786
|
+
if (!contract || contract.routes.length === 0) return;
|
|
787
|
+
|
|
788
|
+
for (const route of contract.routes) {
|
|
789
|
+
const handler = service[route.handlerName];
|
|
790
|
+
if (typeof handler !== "function") {
|
|
791
|
+
throw new Error(`Route handler "${route.handlerName}" not found on service`);
|
|
792
|
+
}
|
|
793
|
+
|
|
794
|
+
if (ctx.serviceType === "edge") {
|
|
795
|
+
const method = route.httpMethod.toLowerCase();
|
|
796
|
+
const routeRateLimit = route.rateLimit ? parseRateLimit(route.rateLimit) : null;
|
|
797
|
+
const routeKey = `${method}:${route.path}`;
|
|
798
|
+
|
|
799
|
+
ctx.router[method](route.path, async (req, res) => {
|
|
800
|
+
const rctx = req.ctx ?? (req.headers ? RequestContext.fromPropagation(req.headers) : new RequestContext());
|
|
801
|
+
|
|
802
|
+
// @Auth enforcement -- defense-in-depth even without ForgeProxy
|
|
803
|
+
if (route.auth) {
|
|
804
|
+
if (!rctx.auth) {
|
|
805
|
+
res.json({ error: "Authentication required" }, 401);
|
|
806
|
+
return;
|
|
807
|
+
}
|
|
808
|
+
if (Array.isArray(route.auth.roles) && route.auth.roles.length > 0) {
|
|
809
|
+
if (!route.auth.roles.includes(rctx.auth.role)) {
|
|
810
|
+
res.json({ error: "Insufficient permissions" }, 403);
|
|
811
|
+
return;
|
|
812
|
+
}
|
|
813
|
+
}
|
|
814
|
+
}
|
|
815
|
+
|
|
816
|
+
// @RateLimit enforcement -- per-route, per-IP token bucket
|
|
817
|
+
if (routeRateLimit) {
|
|
818
|
+
const rawAddr = req.socket?.remoteAddress ?? "";
|
|
819
|
+
let ip;
|
|
820
|
+
// Trust X-Forwarded-For only from explicitly trusted proxies (FORGE_TRUSTED_PROXIES),
|
|
821
|
+
// or fall back to any private network if FORGE_TRUSTED_PROXIES is not configured.
|
|
822
|
+
const trustXff = process.env.FORGE_TRUSTED_PROXIES
|
|
823
|
+
? isTrustedProxy(rawAddr)
|
|
824
|
+
: isPrivateNetwork(rawAddr);
|
|
825
|
+
if (trustXff) {
|
|
826
|
+
ip = req.headers?.["x-forwarded-for"]?.split(",")[0]?.trim() || rawAddr;
|
|
827
|
+
} else {
|
|
828
|
+
ip = rawAddr || "unknown";
|
|
829
|
+
}
|
|
830
|
+
if (!checkRateLimit(routeKey, ip, routeRateLimit)) {
|
|
831
|
+
res.json({ error: "Rate limit exceeded" }, 429);
|
|
832
|
+
return;
|
|
833
|
+
}
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
await RequestContext.run(rctx, async () => {
|
|
837
|
+
try {
|
|
838
|
+
const result = await handler.call(service, req.body, req.params, req.query);
|
|
839
|
+
const statusCode = route.httpMethod === "POST" ? 201 : 200;
|
|
840
|
+
res.json(result, statusCode);
|
|
841
|
+
} catch (err) {
|
|
842
|
+
let message;
|
|
843
|
+
let code;
|
|
844
|
+
|
|
845
|
+
if (err == null) {
|
|
846
|
+
message = "Unknown error";
|
|
847
|
+
code = 500;
|
|
848
|
+
} else if (typeof err === 'string') {
|
|
849
|
+
message = err;
|
|
850
|
+
code = 500;
|
|
851
|
+
} else {
|
|
852
|
+
message = err.message ?? "Unknown error";
|
|
853
|
+
if (err.statusCode) {
|
|
854
|
+
code = err.statusCode;
|
|
855
|
+
} else if (message.toLowerCase().includes("not found")) {
|
|
856
|
+
code = 404;
|
|
857
|
+
} else {
|
|
858
|
+
code = 500;
|
|
859
|
+
}
|
|
860
|
+
}
|
|
861
|
+
|
|
862
|
+
// Don't leak internals on 5xx errors
|
|
863
|
+
if (code >= 500) {
|
|
864
|
+
res.json({ error: "Internal server error" }, code);
|
|
865
|
+
} else {
|
|
866
|
+
res.json({ error: message }, code);
|
|
867
|
+
}
|
|
868
|
+
}
|
|
869
|
+
});
|
|
870
|
+
});
|
|
871
|
+
}
|
|
872
|
+
}
|
|
873
|
+
}
|
|
874
|
+
|
|
875
|
+
/**
|
|
876
|
+
* Auto-wire event subscriptions from @On decorators.
|
|
877
|
+
*/
|
|
878
|
+
export function autoWireSubscriptions(service, _ctx) {
|
|
879
|
+
if (_WIRED_SERVICES.has(service)) return; // Prevent double-wrapping on hot reload
|
|
880
|
+
|
|
881
|
+
const contract = getContract(service.constructor);
|
|
882
|
+
if (!contract || contract.subscriptions.length === 0) return;
|
|
883
|
+
|
|
884
|
+
_WIRED_SERVICES.add(service);
|
|
885
|
+
const originalOnMessage = service.onMessage.bind(service);
|
|
886
|
+
|
|
887
|
+
service.onMessage = async (from, payload) => {
|
|
888
|
+
if (payload?.__forge_event) {
|
|
889
|
+
for (const sub of contract.subscriptions) {
|
|
890
|
+
if (sub.service === from && sub.event === payload.__forge_event) {
|
|
891
|
+
const handler = service[sub.handlerName];
|
|
892
|
+
if (handler) await handler.call(service, payload.__forge_data);
|
|
893
|
+
}
|
|
894
|
+
}
|
|
895
|
+
return;
|
|
896
|
+
}
|
|
897
|
+
return originalOnMessage(from, payload);
|
|
898
|
+
};
|
|
899
|
+
}
|