threadforge 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +152 -0
- package/bin/forge.js +1050 -0
- package/bin/host-commands.js +344 -0
- package/bin/platform-commands.js +570 -0
- package/package.json +71 -0
- package/shared/auth.js +475 -0
- package/src/core/DirectMessageBus.js +364 -0
- package/src/core/EndpointResolver.js +247 -0
- package/src/core/ForgeContext.js +2227 -0
- package/src/core/ForgeHost.js +122 -0
- package/src/core/ForgePlatform.js +145 -0
- package/src/core/Ingress.js +768 -0
- package/src/core/Interceptors.js +420 -0
- package/src/core/MessageBus.js +310 -0
- package/src/core/Prometheus.js +305 -0
- package/src/core/RequestContext.js +413 -0
- package/src/core/RoutingStrategy.js +316 -0
- package/src/core/Supervisor.js +1306 -0
- package/src/core/ThreadAllocator.js +196 -0
- package/src/core/WorkerChannelManager.js +879 -0
- package/src/core/config.js +624 -0
- package/src/core/host-config.js +311 -0
- package/src/core/network-utils.js +166 -0
- package/src/core/platform-config.js +308 -0
- package/src/decorators/ServiceProxy.js +899 -0
- package/src/decorators/index.js +571 -0
- package/src/deploy/NginxGenerator.js +865 -0
- package/src/deploy/PlatformManifestGenerator.js +96 -0
- package/src/deploy/RouteManifestGenerator.js +112 -0
- package/src/deploy/index.js +984 -0
- package/src/frontend/FrontendDevLifecycle.js +65 -0
- package/src/frontend/FrontendPluginOrchestrator.js +187 -0
- package/src/frontend/SiteResolver.js +63 -0
- package/src/frontend/StaticMountRegistry.js +90 -0
- package/src/frontend/index.js +5 -0
- package/src/frontend/plugins/index.js +2 -0
- package/src/frontend/plugins/viteFrontend.js +79 -0
- package/src/frontend/types.js +35 -0
- package/src/index.js +56 -0
- package/src/internals.js +31 -0
- package/src/plugins/PluginManager.js +537 -0
- package/src/plugins/ScopedPostgres.js +192 -0
- package/src/plugins/ScopedRedis.js +142 -0
- package/src/plugins/index.js +1729 -0
- package/src/registry/ServiceRegistry.js +796 -0
- package/src/scaling/ScaleAdvisor.js +442 -0
- package/src/services/Service.js +195 -0
- package/src/services/worker-bootstrap.js +676 -0
- package/src/templates/auth-service.js +65 -0
- package/src/templates/identity-service.js +75 -0
|
@@ -0,0 +1,1729 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* ThreadForge Built-in Plugins
|
|
3
|
+
*
|
|
4
|
+
* import { redis, postgres, s3, cors, cron, realtime } from 'threadforge/plugins';
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
// ─── Redis ─────────────────────────────────────────────────
|
|
8
|
+
|
|
9
|
+
export function redis(options = {}) {
|
|
10
|
+
const url = options.url ?? `redis://${options.host ?? "127.0.0.1"}:${options.port ?? 6379}/${options.db ?? 0}`;
|
|
11
|
+
const poolSize = Math.max(1, Math.min(options.poolSize ?? 2, 8));
|
|
12
|
+
|
|
13
|
+
return {
|
|
14
|
+
name: "redis",
|
|
15
|
+
version: "1.0.0",
|
|
16
|
+
inject: "redis",
|
|
17
|
+
validate() {
|
|
18
|
+
try {
|
|
19
|
+
new URL(url);
|
|
20
|
+
} catch {
|
|
21
|
+
throw new Error(`Invalid Redis URL: ${url}`);
|
|
22
|
+
}
|
|
23
|
+
},
|
|
24
|
+
env() {
|
|
25
|
+
// REL-C2: Sanitize URL before propagating to env — strip credentials
|
|
26
|
+
// to prevent exposure via /proc/<pid>/environ. The connect() function
|
|
27
|
+
// uses the original `url` variable from closure scope.
|
|
28
|
+
try {
|
|
29
|
+
const parsed = new URL(url);
|
|
30
|
+
parsed.username = '';
|
|
31
|
+
parsed.password = '';
|
|
32
|
+
return { FORGE_REDIS_URL: parsed.toString() };
|
|
33
|
+
} catch {
|
|
34
|
+
return { FORGE_REDIS_URL: url };
|
|
35
|
+
}
|
|
36
|
+
},
|
|
37
|
+
|
|
38
|
+
async connect(ctx) {
|
|
39
|
+
let RedisClient;
|
|
40
|
+
try {
|
|
41
|
+
RedisClient = (await import("ioredis")).default;
|
|
42
|
+
} catch {
|
|
43
|
+
// P16: Pool built-in Redis connections to avoid head-of-line blocking
|
|
44
|
+
if (poolSize <= 1) {
|
|
45
|
+
return _mkRedis(url, ctx);
|
|
46
|
+
}
|
|
47
|
+
return _mkRedisPool(url, ctx, poolSize);
|
|
48
|
+
}
|
|
49
|
+
// ioredis is available — connection errors should propagate
|
|
50
|
+
const c = new RedisClient(url, { keyPrefix: options.keyPrefix ?? "", maxRetriesPerRequest: 3, lazyConnect: true });
|
|
51
|
+
await c.connect();
|
|
52
|
+
return c;
|
|
53
|
+
},
|
|
54
|
+
|
|
55
|
+
async healthCheck(c) {
|
|
56
|
+
try {
|
|
57
|
+
return { status: (await c.ping()) === "PONG" ? "ok" : "degraded" };
|
|
58
|
+
} catch (e) {
|
|
59
|
+
return { status: "error", error: e.message };
|
|
60
|
+
}
|
|
61
|
+
},
|
|
62
|
+
async disconnect(c) {
|
|
63
|
+
await c.quit?.();
|
|
64
|
+
},
|
|
65
|
+
metrics(c) {
|
|
66
|
+
return { connected: c.status === "ready" ? 1 : 0 };
|
|
67
|
+
},
|
|
68
|
+
nginx() {
|
|
69
|
+
return options.commanderUI
|
|
70
|
+
? {
|
|
71
|
+
locations: [
|
|
72
|
+
{ path: "/admin/redis", config: "proxy_pass http://127.0.0.1:8081;\nproxy_set_header Host $host;" },
|
|
73
|
+
],
|
|
74
|
+
}
|
|
75
|
+
: {};
|
|
76
|
+
},
|
|
77
|
+
};
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Convert a RESP value to a string if it's a Buffer.
|
|
82
|
+
* Returns null unchanged. For ioredis API compatibility.
|
|
83
|
+
* @internal
|
|
84
|
+
*/
|
|
85
|
+
function _bufToStr(v) {
|
|
86
|
+
if (v === null || v === undefined) return v;
|
|
87
|
+
if (Buffer.isBuffer(v)) return v.toString('utf8');
|
|
88
|
+
return v;
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/** @internal Exported for testing only */
|
|
92
|
+
export async function _mkRedis(url, ctx) {
|
|
93
|
+
const net = await import("node:net");
|
|
94
|
+
const p = new URL(url);
|
|
95
|
+
const host = p.hostname || "127.0.0.1";
|
|
96
|
+
const port = parseInt(p.port || "6379", 10);
|
|
97
|
+
const MAX_REDIS_BUFFER = 16 * 1024 * 1024; // 16 MB
|
|
98
|
+
let socket,
|
|
99
|
+
connected = false,
|
|
100
|
+
rq = [],
|
|
101
|
+
buf = Buffer.alloc(0);
|
|
102
|
+
|
|
103
|
+
const CRLF = Buffer.from("\r\n");
|
|
104
|
+
|
|
105
|
+
// Reconnection state
|
|
106
|
+
let intentionalClose = false;
|
|
107
|
+
let reconnectAttempts = 0;
|
|
108
|
+
let reconnectTimer = null;
|
|
109
|
+
let subReconnectAttempts = 0;
|
|
110
|
+
let subReconnectTimer = null;
|
|
111
|
+
let subConnecting = null;
|
|
112
|
+
const MAX_RECONNECT_RETRIES = 10;
|
|
113
|
+
const BASE_RECONNECT_DELAY = 1000;
|
|
114
|
+
const MAX_RECONNECT_DELAY = 30000;
|
|
115
|
+
|
|
116
|
+
// C-PLUGIN-5: Helper to cleanly destroy socket and prevent leaks
|
|
117
|
+
function destroySocket() {
|
|
118
|
+
if (socket) {
|
|
119
|
+
socket.removeAllListeners();
|
|
120
|
+
socket.destroy();
|
|
121
|
+
socket = null;
|
|
122
|
+
}
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
function conn() {
|
|
126
|
+
return new Promise((ok, no) => {
|
|
127
|
+
// C-PLUGIN-5: Destroy previous socket before creating new one
|
|
128
|
+
destroySocket();
|
|
129
|
+
socket = net.connect({ host, port });
|
|
130
|
+
socket.on("connect", () => {
|
|
131
|
+
connected = true;
|
|
132
|
+
reconnectAttempts = 0;
|
|
133
|
+
ok();
|
|
134
|
+
});
|
|
135
|
+
socket.on("error", (err) => {
|
|
136
|
+
if (!connected) return no(err);
|
|
137
|
+
ctx.logger.warn(`Redis socket error: ${err.message}`);
|
|
138
|
+
});
|
|
139
|
+
socket.on("data", (d) => {
|
|
140
|
+
if (buf.length + d.length > MAX_REDIS_BUFFER) {
|
|
141
|
+
socket.destroy(new Error('Redis response buffer overflow'));
|
|
142
|
+
return;
|
|
143
|
+
}
|
|
144
|
+
buf = Buffer.concat([buf, d]);
|
|
145
|
+
flush();
|
|
146
|
+
});
|
|
147
|
+
socket.on("close", () => {
|
|
148
|
+
connected = false;
|
|
149
|
+
clientRef.status = "disconnected";
|
|
150
|
+
// Reject pending requests
|
|
151
|
+
const pending = rq.splice(0);
|
|
152
|
+
for (const r of pending) r.no(new Error("Redis connection closed"));
|
|
153
|
+
buf = Buffer.alloc(0);
|
|
154
|
+
|
|
155
|
+
if (intentionalClose) return;
|
|
156
|
+
scheduleReconnect();
|
|
157
|
+
});
|
|
158
|
+
});
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
function scheduleReconnect() {
|
|
162
|
+
if (reconnectTimer !== null) return; // Already reconnecting
|
|
163
|
+
if (reconnectAttempts >= MAX_RECONNECT_RETRIES) {
|
|
164
|
+
ctx.logger.error(`Redis reconnect failed after ${MAX_RECONNECT_RETRIES} attempts, giving up`);
|
|
165
|
+
clientRef.status = 'disconnected';
|
|
166
|
+
// M-PLUGIN-4: Reject all pending requests on reconnect exhaustion
|
|
167
|
+
const pending = rq.splice(0);
|
|
168
|
+
for (const r of pending) r.no(new Error('Redis reconnect failed'));
|
|
169
|
+
// C-PLUGIN-5: Clean up socket
|
|
170
|
+
destroySocket();
|
|
171
|
+
return;
|
|
172
|
+
}
|
|
173
|
+
const delay = Math.min(BASE_RECONNECT_DELAY * 2 ** reconnectAttempts, MAX_RECONNECT_DELAY);
|
|
174
|
+
reconnectAttempts++;
|
|
175
|
+
ctx.logger.warn(`Redis reconnecting in ${delay}ms (attempt ${reconnectAttempts}/${MAX_RECONNECT_RETRIES})`);
|
|
176
|
+
reconnectTimer = setTimeout(async () => {
|
|
177
|
+
reconnectTimer = null;
|
|
178
|
+
try {
|
|
179
|
+
await conn();
|
|
180
|
+
// Re-authenticate and select DB after reconnect
|
|
181
|
+
if (p.password) await cmd("AUTH", p.password);
|
|
182
|
+
const db = parseInt(p.pathname?.slice(1) || "0", 10);
|
|
183
|
+
if (db > 0) await cmd("SELECT", String(db));
|
|
184
|
+
clientRef.status = "ready";
|
|
185
|
+
ctx.logger.info("Redis reconnected (built-in client)", { host, port });
|
|
186
|
+
} catch (err) {
|
|
187
|
+
ctx.logger.warn(`Redis reconnect attempt ${reconnectAttempts} failed: ${err.message}`);
|
|
188
|
+
scheduleReconnect();
|
|
189
|
+
}
|
|
190
|
+
}, delay);
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
const MAX_NEST_DEPTH = 32;
|
|
194
|
+
const MAX_RESP_ARRAY = 10_000;
|
|
195
|
+
const MAX_RESP_BULK = 16 * 1024 * 1024; // 16MB
|
|
196
|
+
|
|
197
|
+
function flush() {
|
|
198
|
+
while (buf.length && rq.length) {
|
|
199
|
+
let r;
|
|
200
|
+
try {
|
|
201
|
+
r = parse(buf, 0);
|
|
202
|
+
} catch (err) {
|
|
203
|
+
// Unknown RESP type or parse error — reject current request and reset buffer
|
|
204
|
+
buf = Buffer.alloc(0);
|
|
205
|
+
rq.shift().no(err);
|
|
206
|
+
return;
|
|
207
|
+
}
|
|
208
|
+
if (!r) break;
|
|
209
|
+
buf = r.rem;
|
|
210
|
+
rq.shift().ok(r.val);
|
|
211
|
+
}
|
|
212
|
+
}
|
|
213
|
+
function parse(d, depth) {
|
|
214
|
+
if (!d.length) return null;
|
|
215
|
+
if (depth > MAX_NEST_DEPTH) {
|
|
216
|
+
throw new Error('RESP array nesting too deep');
|
|
217
|
+
}
|
|
218
|
+
const t = d[0]; // byte value: '+' = 43, '-' = 45, ':' = 58, '$' = 36, '*' = 42
|
|
219
|
+
const nl = d.indexOf(CRLF[0]); // find \r
|
|
220
|
+
if (nl === -1 || nl + 1 >= d.length || d[nl + 1] !== CRLF[1]) return null;
|
|
221
|
+
const line = d.slice(1, nl).toString("utf8");
|
|
222
|
+
const afterCrlf = d.slice(nl + 2);
|
|
223
|
+
// Simple string
|
|
224
|
+
if (t === 0x2b) return { val: line, rem: afterCrlf };
|
|
225
|
+
// Error
|
|
226
|
+
if (t === 0x2d) return { val: new Error(line), rem: afterCrlf };
|
|
227
|
+
// Integer
|
|
228
|
+
if (t === 0x3a) return { val: parseInt(line, 10), rem: afterCrlf };
|
|
229
|
+
// Bulk string — payload is raw Buffer
|
|
230
|
+
if (t === 0x24) {
|
|
231
|
+
const len = parseInt(line, 10);
|
|
232
|
+
if (len < -1 || len > MAX_RESP_BULK) {
|
|
233
|
+
throw new Error(`RESP bulk string length out of bounds: ${len}`);
|
|
234
|
+
}
|
|
235
|
+
if (len === -1) return { val: null, rem: afterCrlf };
|
|
236
|
+
// Need len bytes + \r\n after the data
|
|
237
|
+
if (afterCrlf.length < len + 2) return null;
|
|
238
|
+
const val = Buffer.from(afterCrlf.subarray(0, len));
|
|
239
|
+
return { val, rem: afterCrlf.subarray(len + 2) };
|
|
240
|
+
}
|
|
241
|
+
// Array
|
|
242
|
+
if (t === 0x2a) {
|
|
243
|
+
const cnt = parseInt(line, 10);
|
|
244
|
+
if (cnt < -1 || cnt > MAX_RESP_ARRAY) {
|
|
245
|
+
throw new Error(`RESP array count out of bounds: ${cnt}`);
|
|
246
|
+
}
|
|
247
|
+
if (cnt === -1) return { val: null, rem: afterCrlf };
|
|
248
|
+
let rm = afterCrlf;
|
|
249
|
+
const a = [];
|
|
250
|
+
let totalArraySize = 0;
|
|
251
|
+
for (let i = 0; i < cnt; i++) {
|
|
252
|
+
const it = parse(rm, depth + 1);
|
|
253
|
+
if (!it) return null;
|
|
254
|
+
// Track total array element sizes
|
|
255
|
+
if (Buffer.isBuffer(it.val)) {
|
|
256
|
+
totalArraySize += it.val.length;
|
|
257
|
+
if (totalArraySize > MAX_RESP_BULK) {
|
|
258
|
+
throw new Error(`RESP array total size exceeds limit: ${totalArraySize}`);
|
|
259
|
+
}
|
|
260
|
+
}
|
|
261
|
+
a.push(it.val);
|
|
262
|
+
rm = it.rem;
|
|
263
|
+
}
|
|
264
|
+
return { val: a, rem: rm };
|
|
265
|
+
}
|
|
266
|
+
const err = new Error(`Unknown RESP type: '${String.fromCharCode(t)}' (0x${t.toString(16)})`);
|
|
267
|
+
err.code = 'RESP_PARSE_ERROR';
|
|
268
|
+
throw err;
|
|
269
|
+
}
|
|
270
|
+
function cmd(...args) {
|
|
271
|
+
return new Promise((ok, no) => {
|
|
272
|
+
if (!connected) return no(new Error("Redis not connected"));
|
|
273
|
+
// H-PLUGIN-4: Reject arguments containing CR/LF (command injection)
|
|
274
|
+
for (const a of args) {
|
|
275
|
+
if (!Buffer.isBuffer(a) && typeof a === 'string' && (/\r|\n/).test(a)) {
|
|
276
|
+
return no(new Error('Redis command argument contains invalid characters (CR/LF)'));
|
|
277
|
+
}
|
|
278
|
+
}
|
|
279
|
+
const header = `*${args.length}\r\n`;
|
|
280
|
+
const parts = [header];
|
|
281
|
+
for (const a of args) {
|
|
282
|
+
if (Buffer.isBuffer(a)) {
|
|
283
|
+
parts.push(`$${a.length}\r\n`);
|
|
284
|
+
parts.push(a);
|
|
285
|
+
parts.push('\r\n');
|
|
286
|
+
} else {
|
|
287
|
+
const s = String(a);
|
|
288
|
+
parts.push(`$${Buffer.byteLength(s)}\r\n${s}\r\n`);
|
|
289
|
+
}
|
|
290
|
+
}
|
|
291
|
+
rq.push({ ok, no });
|
|
292
|
+
socket.cork();
|
|
293
|
+
for (const part of parts) {
|
|
294
|
+
socket.write(part);
|
|
295
|
+
}
|
|
296
|
+
socket.uncork();
|
|
297
|
+
});
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
await conn();
|
|
301
|
+
if (p.password) await cmd("AUTH", p.password);
|
|
302
|
+
const db = parseInt(p.pathname?.slice(1) || "0", 10);
|
|
303
|
+
if (db > 0) await cmd("SELECT", String(db));
|
|
304
|
+
ctx.logger.info("Redis connected (built-in client)", { host, port });
|
|
305
|
+
|
|
306
|
+
const clientRef = {
|
|
307
|
+
status: "ready",
|
|
308
|
+
get: async (k) => _bufToStr(await cmd("GET", k)),
|
|
309
|
+
set: (...a) => cmd("SET", ...a),
|
|
310
|
+
del: (...k) => cmd("DEL", ...k),
|
|
311
|
+
hget: async (k, f) => _bufToStr(await cmd("HGET", k, f)),
|
|
312
|
+
hset: (k, f, v) => cmd("HSET", k, f, v),
|
|
313
|
+
hgetall: async (k) => {
|
|
314
|
+
const a = await cmd("HGETALL", k);
|
|
315
|
+
if (!Array.isArray(a)) return {};
|
|
316
|
+
const o = {};
|
|
317
|
+
for (let i = 0; i < a.length; i += 2) o[_bufToStr(a[i])] = _bufToStr(a[i + 1]);
|
|
318
|
+
return o;
|
|
319
|
+
},
|
|
320
|
+
keys: async (pat) => {
|
|
321
|
+
const a = await cmd("KEYS", pat);
|
|
322
|
+
return Array.isArray(a) ? a.map(_bufToStr) : a;
|
|
323
|
+
},
|
|
324
|
+
expire: (k, s) => cmd("EXPIRE", k, s),
|
|
325
|
+
ttl: (k) => cmd("TTL", k),
|
|
326
|
+
ping: () => cmd("PING"),
|
|
327
|
+
incr: (k) => cmd("INCR", k),
|
|
328
|
+
decr: (k) => cmd("DECR", k),
|
|
329
|
+
lpush: (k, ...v) => cmd("LPUSH", k, ...v),
|
|
330
|
+
rpush: (k, ...v) => cmd("RPUSH", k, ...v),
|
|
331
|
+
lrange: async (k, s, e) => {
|
|
332
|
+
const a = await cmd("LRANGE", k, String(s), String(e));
|
|
333
|
+
return Array.isArray(a) ? a.map(_bufToStr) : a;
|
|
334
|
+
},
|
|
335
|
+
sadd: (k, ...m) => cmd("SADD", k, ...m),
|
|
336
|
+
smembers: async (k) => {
|
|
337
|
+
const a = await cmd("SMEMBERS", k);
|
|
338
|
+
return Array.isArray(a) ? a.map(_bufToStr) : a;
|
|
339
|
+
},
|
|
340
|
+
publish: (ch, m) => cmd("PUBLISH", ch, m),
|
|
341
|
+
|
|
342
|
+
// ─── Subscription support (lazy separate connection) ───
|
|
343
|
+
_subSocket: null,
|
|
344
|
+
_subConnected: false,
|
|
345
|
+
_subBuf: Buffer.alloc(0),
|
|
346
|
+
_subCallbacks: new Map(), // channel → Set<callback>
|
|
347
|
+
_psubCallbacks: new Map(), // pattern → Set<callback>
|
|
348
|
+
_subSubscribedChannels: new Set(),
|
|
349
|
+
_subSubscribedPatterns: new Set(),
|
|
350
|
+
_subRq: [],
|
|
351
|
+
|
|
352
|
+
async _ensureSubConnection() {
|
|
353
|
+
if (this._subConnected) return;
|
|
354
|
+
if (subConnecting) return subConnecting;
|
|
355
|
+
const self = this;
|
|
356
|
+
const subNet = await import("node:net");
|
|
357
|
+
subConnecting = new Promise((ok, no) => {
|
|
358
|
+
self._subSocket = subNet.connect({ host, port });
|
|
359
|
+
self._subSocket.on("connect", () => {
|
|
360
|
+
self._subConnected = true;
|
|
361
|
+
subReconnectAttempts = 0;
|
|
362
|
+
self._subSubscribedChannels.clear();
|
|
363
|
+
self._subSubscribedPatterns.clear();
|
|
364
|
+
ok();
|
|
365
|
+
});
|
|
366
|
+
self._subSocket.on("error", (err) => {
|
|
367
|
+
if (!self._subConnected) return no(err);
|
|
368
|
+
ctx.logger.warn(`Redis sub socket error: ${err.message}`);
|
|
369
|
+
});
|
|
370
|
+
self._subSocket.on("data", (d) => {
|
|
371
|
+
// REL-C1: Guard against unbounded sub buffer growth (same limit as main connection)
|
|
372
|
+
if (self._subBuf.length + d.length > MAX_REDIS_BUFFER) {
|
|
373
|
+
self._subSocket.destroy(new Error('Redis sub response buffer overflow'));
|
|
374
|
+
return;
|
|
375
|
+
}
|
|
376
|
+
self._subBuf = Buffer.concat([self._subBuf, d]);
|
|
377
|
+
self._flushSub();
|
|
378
|
+
});
|
|
379
|
+
self._subSocket.on("close", () => {
|
|
380
|
+
self._subConnected = false;
|
|
381
|
+
self._subSubscribedChannels.clear();
|
|
382
|
+
self._subSubscribedPatterns.clear();
|
|
383
|
+
const pending = self._subRq.splice(0);
|
|
384
|
+
for (const r of pending) r.no(new Error("Redis sub connection closed"));
|
|
385
|
+
self._subBuf = Buffer.alloc(0);
|
|
386
|
+
self._scheduleSubReconnect();
|
|
387
|
+
});
|
|
388
|
+
});
|
|
389
|
+
let connectError = null;
|
|
390
|
+
try {
|
|
391
|
+
await subConnecting;
|
|
392
|
+
// Auth and DB select on sub connection
|
|
393
|
+
if (p.password) {
|
|
394
|
+
await self._subCmd("AUTH", p.password);
|
|
395
|
+
}
|
|
396
|
+
const subDb = parseInt(p.pathname?.slice(1) || "0", 10);
|
|
397
|
+
if (subDb > 0) {
|
|
398
|
+
await self._subCmd("SELECT", String(subDb));
|
|
399
|
+
}
|
|
400
|
+
// Re-subscribe desired channels/patterns after reconnect.
|
|
401
|
+
for (const channel of self._subCallbacks.keys()) {
|
|
402
|
+
if (self._subSubscribedChannels.has(channel)) continue;
|
|
403
|
+
await self._subCmd("SUBSCRIBE", channel);
|
|
404
|
+
self._subSubscribedChannels.add(channel);
|
|
405
|
+
}
|
|
406
|
+
for (const pattern of self._psubCallbacks.keys()) {
|
|
407
|
+
if (self._subSubscribedPatterns.has(pattern)) continue;
|
|
408
|
+
await self._subCmd("PSUBSCRIBE", pattern);
|
|
409
|
+
self._subSubscribedPatterns.add(pattern);
|
|
410
|
+
}
|
|
411
|
+
} catch (err) {
|
|
412
|
+
connectError = err;
|
|
413
|
+
} finally {
|
|
414
|
+
subConnecting = null;
|
|
415
|
+
}
|
|
416
|
+
if (connectError) {
|
|
417
|
+
this._scheduleSubReconnect();
|
|
418
|
+
throw connectError;
|
|
419
|
+
}
|
|
420
|
+
},
|
|
421
|
+
|
|
422
|
+
_scheduleSubReconnect() {
|
|
423
|
+
if (intentionalClose) return;
|
|
424
|
+
if (subReconnectTimer !== null || this._subConnected || subConnecting) return;
|
|
425
|
+
if (subReconnectAttempts >= MAX_RECONNECT_RETRIES) {
|
|
426
|
+
ctx.logger.error(`Redis sub reconnect failed after ${MAX_RECONNECT_RETRIES} attempts, giving up`);
|
|
427
|
+
return;
|
|
428
|
+
}
|
|
429
|
+
const delay = Math.min(BASE_RECONNECT_DELAY * 2 ** subReconnectAttempts, MAX_RECONNECT_DELAY);
|
|
430
|
+
subReconnectAttempts++;
|
|
431
|
+
ctx.logger.warn(
|
|
432
|
+
`Redis sub reconnecting in ${delay}ms (attempt ${subReconnectAttempts}/${MAX_RECONNECT_RETRIES})`,
|
|
433
|
+
);
|
|
434
|
+
subReconnectTimer = setTimeout(async () => {
|
|
435
|
+
subReconnectTimer = null;
|
|
436
|
+
try {
|
|
437
|
+
await this._ensureSubConnection();
|
|
438
|
+
ctx.logger.info("Redis sub reconnected (built-in client)", { host, port });
|
|
439
|
+
} catch (err) {
|
|
440
|
+
ctx.logger.warn(`Redis sub reconnect attempt ${subReconnectAttempts} failed: ${err.message}`);
|
|
441
|
+
this._scheduleSubReconnect();
|
|
442
|
+
}
|
|
443
|
+
}, delay);
|
|
444
|
+
subReconnectTimer.unref?.();
|
|
445
|
+
},
|
|
446
|
+
|
|
447
|
+
_subCmd(...args) {
|
|
448
|
+
return new Promise((ok, no) => {
|
|
449
|
+
if (!this._subConnected) return no(new Error("Redis sub not connected"));
|
|
450
|
+
const header = `*${args.length}\r\n`;
|
|
451
|
+
const parts = [header];
|
|
452
|
+
for (const a of args) {
|
|
453
|
+
const s = String(a);
|
|
454
|
+
parts.push(`$${Buffer.byteLength(s)}\r\n${s}\r\n`);
|
|
455
|
+
}
|
|
456
|
+
this._subRq.push({ ok, no });
|
|
457
|
+
this._subSocket.cork();
|
|
458
|
+
for (const part of parts) this._subSocket.write(part);
|
|
459
|
+
this._subSocket.uncork();
|
|
460
|
+
});
|
|
461
|
+
},
|
|
462
|
+
|
|
463
|
+
_flushSub() {
|
|
464
|
+
while (this._subBuf.length) {
|
|
465
|
+
let r;
|
|
466
|
+
try {
|
|
467
|
+
r = parse(this._subBuf, 0);
|
|
468
|
+
} catch {
|
|
469
|
+
this._subBuf = Buffer.alloc(0);
|
|
470
|
+
return;
|
|
471
|
+
}
|
|
472
|
+
if (!r) break;
|
|
473
|
+
this._subBuf = r.rem;
|
|
474
|
+
const val = r.val;
|
|
475
|
+
// Messages are arrays: ["message", channel, data] or ["pmessage", pattern, channel, data]
|
|
476
|
+
if (Array.isArray(val)) {
|
|
477
|
+
const type = _bufToStr(val[0]);
|
|
478
|
+
if (type === 'message') {
|
|
479
|
+
const ch = _bufToStr(val[1]);
|
|
480
|
+
const data = _bufToStr(val[2]);
|
|
481
|
+
const cbs = this._subCallbacks.get(ch);
|
|
482
|
+
if (cbs) for (const cb of cbs) cb(data, ch);
|
|
483
|
+
continue;
|
|
484
|
+
}
|
|
485
|
+
if (type === 'pmessage') {
|
|
486
|
+
const pat = _bufToStr(val[1]);
|
|
487
|
+
const ch = _bufToStr(val[2]);
|
|
488
|
+
const data = _bufToStr(val[3]);
|
|
489
|
+
const cbs = this._psubCallbacks.get(pat);
|
|
490
|
+
if (cbs) for (const cb of cbs) cb(data, ch, pat);
|
|
491
|
+
continue;
|
|
492
|
+
}
|
|
493
|
+
// subscribe/psubscribe confirmations resolve pending _subRq
|
|
494
|
+
if (type === 'subscribe' || type === 'psubscribe') {
|
|
495
|
+
if (this._subRq.length) this._subRq.shift().ok(val);
|
|
496
|
+
continue;
|
|
497
|
+
}
|
|
498
|
+
// unsubscribe/punsubscribe
|
|
499
|
+
if (type === 'unsubscribe' || type === 'punsubscribe') {
|
|
500
|
+
if (this._subRq.length) this._subRq.shift().ok(val);
|
|
501
|
+
continue;
|
|
502
|
+
}
|
|
503
|
+
}
|
|
504
|
+
// Other responses (OK from AUTH/SELECT)
|
|
505
|
+
if (this._subRq.length) this._subRq.shift().ok(val);
|
|
506
|
+
}
|
|
507
|
+
},
|
|
508
|
+
|
|
509
|
+
async subscribe(channel, callback) {
|
|
510
|
+
if (!this._subCallbacks.has(channel)) {
|
|
511
|
+
this._subCallbacks.set(channel, new Set());
|
|
512
|
+
}
|
|
513
|
+
this._subCallbacks.get(channel).add(callback);
|
|
514
|
+
await this._ensureSubConnection();
|
|
515
|
+
if (!this._subSubscribedChannels.has(channel)) {
|
|
516
|
+
await this._subCmd("SUBSCRIBE", channel);
|
|
517
|
+
this._subSubscribedChannels.add(channel);
|
|
518
|
+
}
|
|
519
|
+
},
|
|
520
|
+
|
|
521
|
+
async psubscribe(pattern, callback) {
|
|
522
|
+
if (!this._psubCallbacks.has(pattern)) {
|
|
523
|
+
this._psubCallbacks.set(pattern, new Set());
|
|
524
|
+
}
|
|
525
|
+
this._psubCallbacks.get(pattern).add(callback);
|
|
526
|
+
await this._ensureSubConnection();
|
|
527
|
+
if (!this._subSubscribedPatterns.has(pattern)) {
|
|
528
|
+
await this._subCmd("PSUBSCRIBE", pattern);
|
|
529
|
+
this._subSubscribedPatterns.add(pattern);
|
|
530
|
+
}
|
|
531
|
+
},
|
|
532
|
+
|
|
533
|
+
quit: () => {
|
|
534
|
+
if (intentionalClose) return Promise.resolve(); // Guard against double-call
|
|
535
|
+
// Clean up subscription connection
|
|
536
|
+
if (clientRef._subSocket) {
|
|
537
|
+
clientRef._subSocket.removeAllListeners();
|
|
538
|
+
clientRef._subSocket.destroy();
|
|
539
|
+
clientRef._subSocket = null;
|
|
540
|
+
clientRef._subConnected = false;
|
|
541
|
+
clientRef._subCallbacks.clear();
|
|
542
|
+
clientRef._psubCallbacks.clear();
|
|
543
|
+
clientRef._subSubscribedChannels.clear();
|
|
544
|
+
clientRef._subSubscribedPatterns.clear();
|
|
545
|
+
clientRef._subBuf = Buffer.alloc(0);
|
|
546
|
+
clientRef._subRq.length = 0;
|
|
547
|
+
}
|
|
548
|
+
return new Promise((resolve) => {
|
|
549
|
+
intentionalClose = true;
|
|
550
|
+
if (reconnectTimer) {
|
|
551
|
+
clearTimeout(reconnectTimer);
|
|
552
|
+
reconnectTimer = null;
|
|
553
|
+
}
|
|
554
|
+
if (subReconnectTimer) {
|
|
555
|
+
clearTimeout(subReconnectTimer);
|
|
556
|
+
subReconnectTimer = null;
|
|
557
|
+
}
|
|
558
|
+
if (rq.length === 0) {
|
|
559
|
+
connected = false;
|
|
560
|
+
// C-PLUGIN-5: Clean socket shutdown on intentional close
|
|
561
|
+
destroySocket();
|
|
562
|
+
resolve();
|
|
563
|
+
return;
|
|
564
|
+
}
|
|
565
|
+
// Event-based drain: resolve when response queue empties
|
|
566
|
+
const origFlush = flush;
|
|
567
|
+
flush = function drainFlush() {
|
|
568
|
+
origFlush();
|
|
569
|
+
if (rq.length === 0) {
|
|
570
|
+
flush = origFlush;
|
|
571
|
+
connected = false;
|
|
572
|
+
destroySocket();
|
|
573
|
+
resolve();
|
|
574
|
+
}
|
|
575
|
+
};
|
|
576
|
+
// Safety timeout in case drain never completes
|
|
577
|
+
setTimeout(() => {
|
|
578
|
+
flush = origFlush;
|
|
579
|
+
// Reject all pending requests
|
|
580
|
+
const pending = rq.splice(0);
|
|
581
|
+
for (const r of pending) {
|
|
582
|
+
r.no(new Error('Redis quit timeout - connection forcibly closed'));
|
|
583
|
+
}
|
|
584
|
+
connected = false;
|
|
585
|
+
destroySocket();
|
|
586
|
+
resolve();
|
|
587
|
+
}, 5000).unref();
|
|
588
|
+
});
|
|
589
|
+
},
|
|
590
|
+
disconnect: () => {
|
|
591
|
+
intentionalClose = true;
|
|
592
|
+
connected = false;
|
|
593
|
+
if (reconnectTimer) {
|
|
594
|
+
clearTimeout(reconnectTimer);
|
|
595
|
+
reconnectTimer = null;
|
|
596
|
+
}
|
|
597
|
+
if (subReconnectTimer) {
|
|
598
|
+
clearTimeout(subReconnectTimer);
|
|
599
|
+
subReconnectTimer = null;
|
|
600
|
+
}
|
|
601
|
+
// C-PLUGIN-5: Use destroySocket helper
|
|
602
|
+
destroySocket();
|
|
603
|
+
},
|
|
604
|
+
};
|
|
605
|
+
return clientRef;
|
|
606
|
+
}
|
|
607
|
+
|
|
608
|
+
/**
|
|
609
|
+
* P16: Redis connection pool for the built-in RESP client.
|
|
610
|
+
* Maintains multiple connections and round-robins commands across them
|
|
611
|
+
* to avoid head-of-line blocking on a single connection.
|
|
612
|
+
* @internal
|
|
613
|
+
*/
|
|
614
|
+
async function _mkRedisPool(url, ctx, size) {
|
|
615
|
+
const connections = [];
|
|
616
|
+
for (let i = 0; i < size; i++) {
|
|
617
|
+
connections.push(await _mkRedis(url, ctx));
|
|
618
|
+
}
|
|
619
|
+
let rrIndex = 0;
|
|
620
|
+
|
|
621
|
+
function nextConn() {
|
|
622
|
+
const conn = connections[rrIndex % connections.length];
|
|
623
|
+
rrIndex = (rrIndex + 1) % 1_000_000_000;
|
|
624
|
+
return conn;
|
|
625
|
+
}
|
|
626
|
+
|
|
627
|
+
// Build a pooled client that delegates to underlying connections round-robin
|
|
628
|
+
const pooled = {
|
|
629
|
+
status: "ready",
|
|
630
|
+
get _poolConnections() { return connections; },
|
|
631
|
+
get: (k) => nextConn().get(k),
|
|
632
|
+
set: (...a) => nextConn().set(...a),
|
|
633
|
+
del: (...k) => nextConn().del(...k),
|
|
634
|
+
hget: (k, f) => nextConn().hget(k, f),
|
|
635
|
+
hset: (k, f, v) => nextConn().hset(k, f, v),
|
|
636
|
+
hgetall: (k) => nextConn().hgetall(k),
|
|
637
|
+
keys: (pat) => nextConn().keys(pat),
|
|
638
|
+
expire: (k, s) => nextConn().expire(k, s),
|
|
639
|
+
ttl: (k) => nextConn().ttl(k),
|
|
640
|
+
ping: () => nextConn().ping(),
|
|
641
|
+
incr: (k) => nextConn().incr(k),
|
|
642
|
+
decr: (k) => nextConn().decr(k),
|
|
643
|
+
lpush: (k, ...v) => nextConn().lpush(k, ...v),
|
|
644
|
+
rpush: (k, ...v) => nextConn().rpush(k, ...v),
|
|
645
|
+
lrange: (k, s, e) => nextConn().lrange(k, s, e),
|
|
646
|
+
sadd: (k, ...m) => nextConn().sadd(k, ...m),
|
|
647
|
+
smembers: (k) => nextConn().smembers(k),
|
|
648
|
+
publish: (ch, m) => nextConn().publish(ch, m),
|
|
649
|
+
// Subscriptions use the first connection's sub support
|
|
650
|
+
subscribe: (ch, cb) => connections[0].subscribe(ch, cb),
|
|
651
|
+
psubscribe: (pat, cb) => connections[0].psubscribe(pat, cb),
|
|
652
|
+
quit: async () => {
|
|
653
|
+
pooled.status = "disconnected";
|
|
654
|
+
await Promise.all(connections.map(c => c.quit()));
|
|
655
|
+
},
|
|
656
|
+
disconnect: () => {
|
|
657
|
+
pooled.status = "disconnected";
|
|
658
|
+
for (const c of connections) c.disconnect();
|
|
659
|
+
},
|
|
660
|
+
};
|
|
661
|
+
ctx.logger.info(`Redis pool created (${size} connections)`, { host: new URL(url).hostname });
|
|
662
|
+
return pooled;
|
|
663
|
+
}
|
|
664
|
+
|
|
665
|
+
// ─── PostgreSQL ────────────────────────────────────────────
|
|
666
|
+
|
|
667
|
+
export function postgres(options = {}) {
|
|
668
|
+
const url = options.url ?? process.env.DATABASE_URL;
|
|
669
|
+
return {
|
|
670
|
+
name: "postgres",
|
|
671
|
+
version: "1.0.0",
|
|
672
|
+
inject: "pg",
|
|
673
|
+
validate() {
|
|
674
|
+
if (!url && !process.env.DATABASE_URL) throw new Error("PostgreSQL plugin requires url or DATABASE_URL");
|
|
675
|
+
},
|
|
676
|
+
env() {
|
|
677
|
+
return url ? { FORGE_PG_URL: url } : {};
|
|
678
|
+
},
|
|
679
|
+
async connect(ctx) {
|
|
680
|
+
const pg = await import("pg");
|
|
681
|
+
const Pool = pg.default?.Pool ?? pg.Pool;
|
|
682
|
+
const poolMax = options.poolSize ?? 10;
|
|
683
|
+
const pool = new Pool({
|
|
684
|
+
connectionString: url ?? process.env.FORGE_PG_URL,
|
|
685
|
+
max: poolMax,
|
|
686
|
+
idleTimeoutMillis: options.idleTimeout ?? 30000,
|
|
687
|
+
connectionTimeoutMillis: 10000,
|
|
688
|
+
statement_timeout: 30000,
|
|
689
|
+
});
|
|
690
|
+
const c = await pool.connect();
|
|
691
|
+
c.release();
|
|
692
|
+
ctx.logger.info("PostgreSQL connected", { pool: poolMax });
|
|
693
|
+
|
|
694
|
+
// H-PLUGIN-1: Pool saturation logging (max once per 60s)
|
|
695
|
+
let _lastSaturationWarn = 0;
|
|
696
|
+
const origQuery = pool.query.bind(pool);
|
|
697
|
+
pool.query = function (...args) {
|
|
698
|
+
const now = Date.now();
|
|
699
|
+
if (now - _lastSaturationWarn > 60000) {
|
|
700
|
+
if (pool.waitingCount > 0 || pool.idleCount < poolMax * 0.2) {
|
|
701
|
+
_lastSaturationWarn = now;
|
|
702
|
+
ctx.logger.warn('PostgreSQL pool saturation warning', {
|
|
703
|
+
waiting: pool.waitingCount,
|
|
704
|
+
idle: pool.idleCount,
|
|
705
|
+
total: pool.totalCount,
|
|
706
|
+
max: poolMax,
|
|
707
|
+
});
|
|
708
|
+
}
|
|
709
|
+
}
|
|
710
|
+
return origQuery(...args);
|
|
711
|
+
};
|
|
712
|
+
|
|
713
|
+
return pool;
|
|
714
|
+
},
|
|
715
|
+
async healthCheck(pool) {
|
|
716
|
+
try {
|
|
717
|
+
await pool.query("SELECT 1");
|
|
718
|
+
return { status: "ok", total: pool.totalCount, idle: pool.idleCount };
|
|
719
|
+
} catch (e) {
|
|
720
|
+
return { status: "error", error: e.message };
|
|
721
|
+
}
|
|
722
|
+
},
|
|
723
|
+
async disconnect(pool) {
|
|
724
|
+
await pool.end();
|
|
725
|
+
},
|
|
726
|
+
metrics(pool) {
|
|
727
|
+
return { pool_total: pool.totalCount ?? 0, pool_idle: pool.idleCount ?? 0, pool_waiting: pool.waitingCount ?? 0 };
|
|
728
|
+
},
|
|
729
|
+
nginx() {
|
|
730
|
+
return options.pgAdminUI
|
|
731
|
+
? {
|
|
732
|
+
locations: [
|
|
733
|
+
{
|
|
734
|
+
path: "/admin/pgadmin",
|
|
735
|
+
config: `proxy_pass http://127.0.0.1:${options.pgAdminPort ?? 5050};\nproxy_set_header Host $host;`,
|
|
736
|
+
},
|
|
737
|
+
],
|
|
738
|
+
}
|
|
739
|
+
: {};
|
|
740
|
+
},
|
|
741
|
+
};
|
|
742
|
+
}
|
|
743
|
+
|
|
744
|
+
// ─── S3 ────────────────────────────────────────────────────
|
|
745
|
+
|
|
746
|
+
// H-PLUGIN-8: S3 key validation helper
|
|
747
|
+
function validateS3Key(key) {
|
|
748
|
+
if (typeof key !== 'string') throw new Error('S3 key must be a string');
|
|
749
|
+
if (key.length < 1 || key.length > 1024) throw new Error(`S3 key length out of bounds: ${key.length}`);
|
|
750
|
+
if (key.includes('..')) throw new Error('S3 key must not contain ".."');
|
|
751
|
+
if (key.startsWith('/')) throw new Error('S3 key must not start with "/"');
|
|
752
|
+
if (/[\x00-\x1F\x7F]/.test(key)) throw new Error('S3 key must not contain control characters');
|
|
753
|
+
}
|
|
754
|
+
|
|
755
|
+
const MAX_STUB_SIZE = 100 * 1024 * 1024; // 100MB total
|
|
756
|
+
const MAX_FILE_SIZE = 10 * 1024 * 1024; // 10MB per file
|
|
757
|
+
|
|
758
|
+
export function s3(options = {}) {
|
|
759
|
+
return {
|
|
760
|
+
name: "s3",
|
|
761
|
+
version: "1.0.0",
|
|
762
|
+
inject: "storage",
|
|
763
|
+
validate() {
|
|
764
|
+
if (!options.bucket) throw new Error("S3 plugin requires bucket");
|
|
765
|
+
},
|
|
766
|
+
env() {
|
|
767
|
+
const e = {};
|
|
768
|
+
if (options.endpoint) e.FORGE_S3_ENDPOINT = options.endpoint;
|
|
769
|
+
if (options.region) e.AWS_REGION = options.region;
|
|
770
|
+
return e;
|
|
771
|
+
},
|
|
772
|
+
async connect(ctx) {
|
|
773
|
+
try {
|
|
774
|
+
const { S3Client, GetObjectCommand, PutObjectCommand, DeleteObjectCommand, HeadBucketCommand } = await import(
|
|
775
|
+
"@aws-sdk/client-s3"
|
|
776
|
+
);
|
|
777
|
+
const cfg = { region: options.region ?? "us-east-1" };
|
|
778
|
+
if (options.endpoint) {
|
|
779
|
+
cfg.endpoint = options.endpoint;
|
|
780
|
+
cfg.forcePathStyle = true;
|
|
781
|
+
}
|
|
782
|
+
const raw = new S3Client(cfg);
|
|
783
|
+
const bucket = options.bucket;
|
|
784
|
+
ctx.logger.info("S3 connected", { bucket, region: cfg.region });
|
|
785
|
+
return {
|
|
786
|
+
_isStub: false,
|
|
787
|
+
_raw: raw,
|
|
788
|
+
_HeadBucketCommand: HeadBucketCommand,
|
|
789
|
+
put: (k, b, ct) => { validateS3Key(k); return raw.send(new PutObjectCommand({ Bucket: bucket, Key: k, Body: b, ContentType: ct })); },
|
|
790
|
+
get: async (k) => { validateS3Key(k); return (await raw.send(new GetObjectCommand({ Bucket: bucket, Key: k }))).Body; },
|
|
791
|
+
del: (k) => { validateS3Key(k); return raw.send(new DeleteObjectCommand({ Bucket: bucket, Key: k })); },
|
|
792
|
+
url: (k) => {
|
|
793
|
+
validateS3Key(k);
|
|
794
|
+
return options.endpoint
|
|
795
|
+
? `${options.endpoint}/${bucket}/${k}`
|
|
796
|
+
: `https://${bucket}.s3.${cfg.region}.amazonaws.com/${k}`;
|
|
797
|
+
},
|
|
798
|
+
bucket,
|
|
799
|
+
};
|
|
800
|
+
} catch {
|
|
801
|
+
const store = new Map();
|
|
802
|
+
let totalSize = 0;
|
|
803
|
+
ctx.logger.warn("S3 stub active (install @aws-sdk/client-s3)");
|
|
804
|
+
return {
|
|
805
|
+
_isStub: true,
|
|
806
|
+
put: async (k, b) => {
|
|
807
|
+
validateS3Key(k);
|
|
808
|
+
const size = Buffer.isBuffer(b) ? b.length : (typeof b === 'string' ? Buffer.byteLength(b) : 0);
|
|
809
|
+
if (size > MAX_FILE_SIZE) throw new Error(`S3 stub: file size ${size} exceeds limit of ${MAX_FILE_SIZE}`);
|
|
810
|
+
// Subtract old value size if overwriting
|
|
811
|
+
const old = store.get(k);
|
|
812
|
+
if (old !== undefined) {
|
|
813
|
+
const oldSize = Buffer.isBuffer(old) ? old.length : (typeof old === 'string' ? Buffer.byteLength(old) : 0);
|
|
814
|
+
totalSize -= oldSize;
|
|
815
|
+
}
|
|
816
|
+
if (totalSize + size > MAX_STUB_SIZE) throw new Error(`S3 stub: total size would exceed limit of ${MAX_STUB_SIZE}`);
|
|
817
|
+
totalSize += size;
|
|
818
|
+
store.set(k, b);
|
|
819
|
+
},
|
|
820
|
+
get: async (k) => { validateS3Key(k); return store.get(k) ?? null; },
|
|
821
|
+
del: async (k) => {
|
|
822
|
+
validateS3Key(k);
|
|
823
|
+
const old = store.get(k);
|
|
824
|
+
if (old !== undefined) {
|
|
825
|
+
const oldSize = Buffer.isBuffer(old) ? old.length : (typeof old === 'string' ? Buffer.byteLength(old) : 0);
|
|
826
|
+
totalSize -= oldSize;
|
|
827
|
+
}
|
|
828
|
+
store.delete(k);
|
|
829
|
+
},
|
|
830
|
+
url: (k) => { validateS3Key(k); return `mem://${options.bucket}/${k}`; },
|
|
831
|
+
bucket: options.bucket,
|
|
832
|
+
};
|
|
833
|
+
}
|
|
834
|
+
},
|
|
835
|
+
async healthCheck(s) {
|
|
836
|
+
if (s._isStub === true) return { status: "ok", bucket: s.bucket, note: "in-memory stub" };
|
|
837
|
+
if (s._isStub === false && s._raw && s._HeadBucketCommand) {
|
|
838
|
+
try {
|
|
839
|
+
await s._raw.send(new s._HeadBucketCommand({ Bucket: s.bucket }));
|
|
840
|
+
return { status: "ok", bucket: s.bucket };
|
|
841
|
+
} catch (err) {
|
|
842
|
+
return { status: "error", bucket: s.bucket, error: err.message };
|
|
843
|
+
}
|
|
844
|
+
}
|
|
845
|
+
// Fallback for clients not created by connect() (e.g., in tests)
|
|
846
|
+
return { status: "ok", bucket: s.bucket };
|
|
847
|
+
},
|
|
848
|
+
async disconnect() {},
|
|
849
|
+
};
|
|
850
|
+
}
|
|
851
|
+
|
|
852
|
+
// ─── CORS ──────────────────────────────────────────────────
|
|
853
|
+
|
|
854
|
+
export function cors(options = {}) {
|
|
855
|
+
const origins = options.origins ?? ["*"];
|
|
856
|
+
const methods = options.methods ?? ["GET", "POST", "PUT", "PATCH", "DELETE", "OPTIONS"];
|
|
857
|
+
const headers = options.headers ?? ["Content-Type", "Authorization", "X-Correlation-ID"];
|
|
858
|
+
const creds = options.credentials ?? false;
|
|
859
|
+
const maxAge = options.maxAge ?? 86400;
|
|
860
|
+
|
|
861
|
+
if (origins.includes("*") && creds) {
|
|
862
|
+
throw new Error(
|
|
863
|
+
'CORS misconfiguration: credentials: true is incompatible with origins: ["*"]. ' +
|
|
864
|
+
"Specify explicit origins or disable credentials."
|
|
865
|
+
);
|
|
866
|
+
}
|
|
867
|
+
|
|
868
|
+
return {
|
|
869
|
+
name: "cors",
|
|
870
|
+
version: "1.0.0",
|
|
871
|
+
inject: "_cors",
|
|
872
|
+
async connect() {
|
|
873
|
+
return { origins, methods, headers };
|
|
874
|
+
},
|
|
875
|
+
middleware() {
|
|
876
|
+
return function corsMiddleware(req, res, next) {
|
|
877
|
+
const origin = req.headers.origin;
|
|
878
|
+
let matched = false;
|
|
879
|
+
|
|
880
|
+
if (origins.includes("*") && !creds) {
|
|
881
|
+
res.setHeader("Access-Control-Allow-Origin", "*");
|
|
882
|
+
matched = true;
|
|
883
|
+
} else if (origin) {
|
|
884
|
+
// Check exact match first
|
|
885
|
+
if (origins.includes(origin)) {
|
|
886
|
+
matched = true;
|
|
887
|
+
} else {
|
|
888
|
+
// Check wildcard subdomain patterns (e.g., *.example.com)
|
|
889
|
+
for (const allowed of origins) {
|
|
890
|
+
if (allowed.startsWith("*.")) {
|
|
891
|
+
const suffix = allowed.slice(1); // e.g., ".example.com"
|
|
892
|
+
try {
|
|
893
|
+
const originHost = new URL(origin).hostname.toLowerCase();
|
|
894
|
+
// REL-C3: *.example.com should NOT match bare example.com — wildcard
|
|
895
|
+
// requires at least one subdomain level. This is the standard interpretation
|
|
896
|
+
// per RFC 6125 and browser SAN matching.
|
|
897
|
+
if (originHost.endsWith(suffix)) {
|
|
898
|
+
// Verify the part before suffix is a valid non-empty subdomain segment
|
|
899
|
+
const beforeSuffix = originHost.slice(0, -suffix.length);
|
|
900
|
+
if (beforeSuffix && /^[a-z0-9]([a-z0-9-]*\.)*$/.test(beforeSuffix)) {
|
|
901
|
+
matched = true;
|
|
902
|
+
break;
|
|
903
|
+
}
|
|
904
|
+
}
|
|
905
|
+
} catch {
|
|
906
|
+
// Invalid origin URL, skip
|
|
907
|
+
}
|
|
908
|
+
}
|
|
909
|
+
}
|
|
910
|
+
}
|
|
911
|
+
if (matched) {
|
|
912
|
+
res.setHeader("Access-Control-Allow-Origin", origin);
|
|
913
|
+
if (creds) res.setHeader("Access-Control-Allow-Credentials", "true");
|
|
914
|
+
}
|
|
915
|
+
}
|
|
916
|
+
|
|
917
|
+
if (matched) {
|
|
918
|
+
res.setHeader("Access-Control-Allow-Methods", methods.join(", "));
|
|
919
|
+
res.setHeader("Access-Control-Allow-Headers", headers.join(", "));
|
|
920
|
+
// M-PLUGIN-3: Expose correlation and custom response headers to frontend JS
|
|
921
|
+
res.setHeader("Access-Control-Expose-Headers", "X-Correlation-ID, X-Request-ID, X-Trace-ID");
|
|
922
|
+
res.setHeader("Access-Control-Max-Age", String(maxAge));
|
|
923
|
+
res.setHeader("Vary", "Origin");
|
|
924
|
+
}
|
|
925
|
+
|
|
926
|
+
if (req.method === "OPTIONS") {
|
|
927
|
+
if (!matched) {
|
|
928
|
+
res.writeHead(403);
|
|
929
|
+
res.end();
|
|
930
|
+
return;
|
|
931
|
+
}
|
|
932
|
+
res.writeHead(204);
|
|
933
|
+
res.end();
|
|
934
|
+
return;
|
|
935
|
+
}
|
|
936
|
+
next();
|
|
937
|
+
};
|
|
938
|
+
},
|
|
939
|
+
async disconnect() {},
|
|
940
|
+
};
|
|
941
|
+
}
|
|
942
|
+
|
|
943
|
+
// ─── Realtime (WebSocket Utilities) ────────────────────────
|
|
944
|
+
|
|
945
|
+
const REALTIME_CHANNEL_RE = /^[a-zA-Z0-9:_-]{1,128}$/;
|
|
946
|
+
const REALTIME_BACKPRESSURE_STRATEGIES = new Set(["drop", "close"]);
|
|
947
|
+
|
|
948
|
+
function _assertRealtimeChannel(channel, allowedChannels) {
|
|
949
|
+
if (typeof channel !== "string" || channel.trim() === "") {
|
|
950
|
+
throw new Error("Realtime channel must be a non-empty string");
|
|
951
|
+
}
|
|
952
|
+
if (!REALTIME_CHANNEL_RE.test(channel)) {
|
|
953
|
+
throw new Error(`Invalid realtime channel "${channel}". Use [a-zA-Z0-9:_-] (max 128 chars)`);
|
|
954
|
+
}
|
|
955
|
+
if (allowedChannels && allowedChannels.size > 0 && !allowedChannels.has(channel)) {
|
|
956
|
+
throw new Error(`Realtime channel "${channel}" is not in allowedChannels`);
|
|
957
|
+
}
|
|
958
|
+
}
|
|
959
|
+
|
|
960
|
+
function _encodeRealtimeEnvelope(channel, payload, senderId, maxPayloadBytes) {
|
|
961
|
+
let type = "text";
|
|
962
|
+
let body = "";
|
|
963
|
+
|
|
964
|
+
if (Buffer.isBuffer(payload)) {
|
|
965
|
+
type = "base64";
|
|
966
|
+
body = payload.toString("base64");
|
|
967
|
+
} else if (typeof payload === "string") {
|
|
968
|
+
type = "text";
|
|
969
|
+
body = payload;
|
|
970
|
+
} else {
|
|
971
|
+
type = "json";
|
|
972
|
+
body = JSON.stringify(payload);
|
|
973
|
+
}
|
|
974
|
+
|
|
975
|
+
const size = Buffer.byteLength(body);
|
|
976
|
+
if (size > maxPayloadBytes) {
|
|
977
|
+
throw new Error(`Realtime payload exceeds maxPayloadBytes (${maxPayloadBytes})`);
|
|
978
|
+
}
|
|
979
|
+
|
|
980
|
+
return {
|
|
981
|
+
v: 1,
|
|
982
|
+
senderId,
|
|
983
|
+
channel,
|
|
984
|
+
type,
|
|
985
|
+
body,
|
|
986
|
+
ts: Date.now(),
|
|
987
|
+
};
|
|
988
|
+
}
|
|
989
|
+
|
|
990
|
+
function _decodeRealtimeEnvelope(envelope) {
|
|
991
|
+
if (envelope.type === "base64") {
|
|
992
|
+
return Buffer.from(envelope.body, "base64");
|
|
993
|
+
}
|
|
994
|
+
if (envelope.type === "json") {
|
|
995
|
+
return JSON.parse(envelope.body);
|
|
996
|
+
}
|
|
997
|
+
return envelope.body;
|
|
998
|
+
}
|
|
999
|
+
|
|
1000
|
+
function _sendRealtimePayload(ws, payload) {
|
|
1001
|
+
if (!ws || ws._closed) return false;
|
|
1002
|
+
if (Buffer.isBuffer(payload)) {
|
|
1003
|
+
ws.sendBinary(payload);
|
|
1004
|
+
} else if (typeof payload === "string") {
|
|
1005
|
+
ws.send(payload);
|
|
1006
|
+
} else {
|
|
1007
|
+
ws.send(JSON.stringify(payload));
|
|
1008
|
+
}
|
|
1009
|
+
return true;
|
|
1010
|
+
}
|
|
1011
|
+
|
|
1012
|
+
function _normalizeRealtimeDecision(decision, fallback = {}) {
|
|
1013
|
+
if (decision === undefined || decision === null || decision === true) {
|
|
1014
|
+
return { allow: true, ...fallback };
|
|
1015
|
+
}
|
|
1016
|
+
if (decision === false) {
|
|
1017
|
+
return { allow: false, ...fallback };
|
|
1018
|
+
}
|
|
1019
|
+
if (typeof decision === "object") {
|
|
1020
|
+
return {
|
|
1021
|
+
...fallback,
|
|
1022
|
+
...decision,
|
|
1023
|
+
allow: decision.allow !== false,
|
|
1024
|
+
};
|
|
1025
|
+
}
|
|
1026
|
+
return { allow: Boolean(decision), ...fallback };
|
|
1027
|
+
}
|
|
1028
|
+
|
|
1029
|
+
function _socketBufferedBytes(ws) {
|
|
1030
|
+
const sock = ws?.socket;
|
|
1031
|
+
if (!sock) return 0;
|
|
1032
|
+
const bytes = sock.writableLength ?? sock.bufferSize ?? 0;
|
|
1033
|
+
return Number.isFinite(bytes) ? bytes : 0;
|
|
1034
|
+
}
|
|
1035
|
+
|
|
1036
|
+
export function realtime(options = {}) {
|
|
1037
|
+
const adapter = options.adapter ?? (options.redisUrl ? "redis" : "memory");
|
|
1038
|
+
const busChannel = options.busChannel ?? "forge:realtime";
|
|
1039
|
+
const maxPayloadBytes = options.maxPayloadBytes ?? 256 * 1024;
|
|
1040
|
+
const maxConnections = options.maxConnections ?? 10_000;
|
|
1041
|
+
const maxSocketBufferBytes = options.maxSocketBufferBytes ?? 512 * 1024;
|
|
1042
|
+
const backpressureStrategy = options.backpressureStrategy ?? "drop";
|
|
1043
|
+
const redisSubHealthcheckMs = options.redisSubHealthcheckMs ?? 2000;
|
|
1044
|
+
const strictBusPublish = options.strictBusPublish ?? false;
|
|
1045
|
+
const authorize = options.authorize;
|
|
1046
|
+
const authorizeUpgrade = options.authorizeUpgrade;
|
|
1047
|
+
const authorizeConnect = options.authorizeConnect;
|
|
1048
|
+
const allowedChannels = options.allowedChannels ? new Set(options.allowedChannels) : null;
|
|
1049
|
+
|
|
1050
|
+
return {
|
|
1051
|
+
name: "realtime",
|
|
1052
|
+
version: "1.0.0",
|
|
1053
|
+
inject: "realtime",
|
|
1054
|
+
|
|
1055
|
+
validate() {
|
|
1056
|
+
if (!["memory", "redis"].includes(adapter)) {
|
|
1057
|
+
throw new Error(`Realtime adapter must be "memory" or "redis", got "${adapter}"`);
|
|
1058
|
+
}
|
|
1059
|
+
if (!REALTIME_CHANNEL_RE.test(busChannel)) {
|
|
1060
|
+
throw new Error(`Invalid realtime busChannel "${busChannel}"`);
|
|
1061
|
+
}
|
|
1062
|
+
if (!Number.isInteger(maxPayloadBytes) || maxPayloadBytes <= 0) {
|
|
1063
|
+
throw new Error(`Realtime maxPayloadBytes must be a positive integer, got ${maxPayloadBytes}`);
|
|
1064
|
+
}
|
|
1065
|
+
if (!Number.isInteger(maxConnections) || maxConnections <= 0) {
|
|
1066
|
+
throw new Error(`Realtime maxConnections must be a positive integer, got ${maxConnections}`);
|
|
1067
|
+
}
|
|
1068
|
+
if (!Number.isInteger(maxSocketBufferBytes) || maxSocketBufferBytes <= 0) {
|
|
1069
|
+
throw new Error(
|
|
1070
|
+
`Realtime maxSocketBufferBytes must be a positive integer, got ${maxSocketBufferBytes}`,
|
|
1071
|
+
);
|
|
1072
|
+
}
|
|
1073
|
+
if (!REALTIME_BACKPRESSURE_STRATEGIES.has(backpressureStrategy)) {
|
|
1074
|
+
throw new Error(
|
|
1075
|
+
`Realtime backpressureStrategy must be "drop" or "close", got "${backpressureStrategy}"`,
|
|
1076
|
+
);
|
|
1077
|
+
}
|
|
1078
|
+
if (!Number.isInteger(redisSubHealthcheckMs) || redisSubHealthcheckMs <= 0) {
|
|
1079
|
+
throw new Error(
|
|
1080
|
+
`Realtime redisSubHealthcheckMs must be a positive integer, got ${redisSubHealthcheckMs}`,
|
|
1081
|
+
);
|
|
1082
|
+
}
|
|
1083
|
+
if (authorize !== undefined && typeof authorize !== "function") {
|
|
1084
|
+
throw new Error(`Realtime authorize must be a function`);
|
|
1085
|
+
}
|
|
1086
|
+
if (authorizeUpgrade !== undefined && typeof authorizeUpgrade !== "function") {
|
|
1087
|
+
throw new Error(`Realtime authorizeUpgrade must be a function`);
|
|
1088
|
+
}
|
|
1089
|
+
if (authorizeConnect !== undefined && typeof authorizeConnect !== "function") {
|
|
1090
|
+
throw new Error(`Realtime authorizeConnect must be a function`);
|
|
1091
|
+
}
|
|
1092
|
+
if (allowedChannels) {
|
|
1093
|
+
for (const ch of allowedChannels) _assertRealtimeChannel(ch, null);
|
|
1094
|
+
}
|
|
1095
|
+
if (adapter === "redis") {
|
|
1096
|
+
const url = options.redisUrl ?? process.env.FORGE_REALTIME_REDIS_URL ?? process.env.FORGE_REDIS_URL;
|
|
1097
|
+
if (!url) {
|
|
1098
|
+
throw new Error('Realtime adapter "redis" requires redisUrl or FORGE_REALTIME_REDIS_URL/FORGE_REDIS_URL');
|
|
1099
|
+
}
|
|
1100
|
+
}
|
|
1101
|
+
},
|
|
1102
|
+
|
|
1103
|
+
env() {
|
|
1104
|
+
if (adapter !== "redis") return {};
|
|
1105
|
+
const url = options.redisUrl ?? process.env.FORGE_REALTIME_REDIS_URL ?? process.env.FORGE_REDIS_URL;
|
|
1106
|
+
return url ? { FORGE_REALTIME_REDIS_URL: url } : {};
|
|
1107
|
+
},
|
|
1108
|
+
|
|
1109
|
+
async connect(ctx) {
|
|
1110
|
+
const connections = new Set();
|
|
1111
|
+
const socketsByChannel = new Map(); // channel -> Set<ws>
|
|
1112
|
+
const channelsBySocket = new Map(); // ws -> Set<channel>
|
|
1113
|
+
const socketMeta = new WeakMap(); // ws -> { req, meta }
|
|
1114
|
+
const senderId = `${ctx.serviceName}:${ctx.workerId}:${process.pid}`;
|
|
1115
|
+
const metrics = {
|
|
1116
|
+
published: 0,
|
|
1117
|
+
delivered: 0,
|
|
1118
|
+
dropped: 0,
|
|
1119
|
+
backpressureDropped: 0,
|
|
1120
|
+
busPublishErrors: 0,
|
|
1121
|
+
redisReconnects: 0,
|
|
1122
|
+
};
|
|
1123
|
+
|
|
1124
|
+
let redisPub = null;
|
|
1125
|
+
let redisSub = null;
|
|
1126
|
+
let usingIoRedis = false;
|
|
1127
|
+
let builtInSubWatchdog = null;
|
|
1128
|
+
let builtInSubProbeRunning = false;
|
|
1129
|
+
let lastBackpressureLogAt = 0;
|
|
1130
|
+
|
|
1131
|
+
const shouldLogBackpressure = () => {
|
|
1132
|
+
const now = Date.now();
|
|
1133
|
+
if (now - lastBackpressureLogAt < 30_000) return false;
|
|
1134
|
+
lastBackpressureLogAt = now;
|
|
1135
|
+
return true;
|
|
1136
|
+
};
|
|
1137
|
+
|
|
1138
|
+
const runAuthorizeHook = async (stage, payload) => {
|
|
1139
|
+
const defaultDenied = stage === "upgrade"
|
|
1140
|
+
? { statusCode: 403, reason: "Forbidden" }
|
|
1141
|
+
: { closeCode: 1008, closeReason: "Policy violation" };
|
|
1142
|
+
const stageHook = stage === "upgrade" ? authorizeUpgrade : stage === "connect" ? authorizeConnect : null;
|
|
1143
|
+
if (typeof stageHook === "function") {
|
|
1144
|
+
const result = await stageHook({ stage, ...payload });
|
|
1145
|
+
return _normalizeRealtimeDecision(result, defaultDenied);
|
|
1146
|
+
}
|
|
1147
|
+
if (typeof authorize === "function") {
|
|
1148
|
+
const result = await authorize({ stage, ...payload });
|
|
1149
|
+
return _normalizeRealtimeDecision(result, defaultDenied);
|
|
1150
|
+
}
|
|
1151
|
+
return { allow: true };
|
|
1152
|
+
};
|
|
1153
|
+
|
|
1154
|
+
const ensureSocketTracked = (ws, req = null, meta = null) => {
|
|
1155
|
+
if (!connections.has(ws)) {
|
|
1156
|
+
connections.add(ws);
|
|
1157
|
+
channelsBySocket.set(ws, new Set());
|
|
1158
|
+
}
|
|
1159
|
+
const prev = socketMeta.get(ws) ?? {};
|
|
1160
|
+
socketMeta.set(ws, {
|
|
1161
|
+
req: req ?? prev.req ?? null,
|
|
1162
|
+
meta: meta ?? prev.meta ?? null,
|
|
1163
|
+
});
|
|
1164
|
+
};
|
|
1165
|
+
|
|
1166
|
+
const cleanupSocket = (ws) => {
|
|
1167
|
+
const socketChannels = channelsBySocket.get(ws);
|
|
1168
|
+
if (socketChannels) {
|
|
1169
|
+
for (const channel of socketChannels) {
|
|
1170
|
+
const members = socketsByChannel.get(channel);
|
|
1171
|
+
if (members) {
|
|
1172
|
+
members.delete(ws);
|
|
1173
|
+
if (members.size === 0) socketsByChannel.delete(channel);
|
|
1174
|
+
}
|
|
1175
|
+
}
|
|
1176
|
+
}
|
|
1177
|
+
channelsBySocket.delete(ws);
|
|
1178
|
+
connections.delete(ws);
|
|
1179
|
+
socketMeta.delete(ws);
|
|
1180
|
+
};
|
|
1181
|
+
|
|
1182
|
+
const handleBackpressure = (ws, channel) => {
|
|
1183
|
+
metrics.backpressureDropped++;
|
|
1184
|
+
metrics.dropped++;
|
|
1185
|
+
if (backpressureStrategy === "close") {
|
|
1186
|
+
try {
|
|
1187
|
+
ws.close?.(1013, "Backpressure");
|
|
1188
|
+
} catch {}
|
|
1189
|
+
cleanupSocket(ws);
|
|
1190
|
+
}
|
|
1191
|
+
if (shouldLogBackpressure()) {
|
|
1192
|
+
ctx.logger.warn("Realtime dropping message due to socket backpressure", {
|
|
1193
|
+
channel,
|
|
1194
|
+
strategy: backpressureStrategy,
|
|
1195
|
+
maxSocketBufferBytes,
|
|
1196
|
+
});
|
|
1197
|
+
}
|
|
1198
|
+
};
|
|
1199
|
+
|
|
1200
|
+
const deliverEnvelope = (envelope, { excludeWs = null } = {}) => {
|
|
1201
|
+
const members = socketsByChannel.get(envelope.channel);
|
|
1202
|
+
if (!members || members.size === 0) return 0;
|
|
1203
|
+
let delivered = 0;
|
|
1204
|
+
let payload;
|
|
1205
|
+
try {
|
|
1206
|
+
payload = _decodeRealtimeEnvelope(envelope);
|
|
1207
|
+
} catch (err) {
|
|
1208
|
+
ctx.logger.warn(`Realtime decode failed`, { error: err.message, channel: envelope.channel });
|
|
1209
|
+
return 0;
|
|
1210
|
+
}
|
|
1211
|
+
|
|
1212
|
+
for (const ws of members) {
|
|
1213
|
+
if (excludeWs && ws === excludeWs) continue;
|
|
1214
|
+
if (_socketBufferedBytes(ws) > maxSocketBufferBytes) {
|
|
1215
|
+
handleBackpressure(ws, envelope.channel);
|
|
1216
|
+
continue;
|
|
1217
|
+
}
|
|
1218
|
+
try {
|
|
1219
|
+
if (_sendRealtimePayload(ws, payload)) {
|
|
1220
|
+
delivered++;
|
|
1221
|
+
} else {
|
|
1222
|
+
metrics.dropped++;
|
|
1223
|
+
}
|
|
1224
|
+
} catch {
|
|
1225
|
+
metrics.dropped++;
|
|
1226
|
+
}
|
|
1227
|
+
}
|
|
1228
|
+
metrics.delivered += delivered;
|
|
1229
|
+
return delivered;
|
|
1230
|
+
};
|
|
1231
|
+
|
|
1232
|
+
const handleBusMessage = (raw) => {
|
|
1233
|
+
try {
|
|
1234
|
+
const envelope = JSON.parse(raw);
|
|
1235
|
+
if (!envelope || envelope.v !== 1 || typeof envelope.channel !== "string") return;
|
|
1236
|
+
if (envelope.senderId === senderId) return; // already delivered locally
|
|
1237
|
+
deliverEnvelope(envelope);
|
|
1238
|
+
} catch (err) {
|
|
1239
|
+
ctx.logger.warn(`Realtime bus message parse failed`, { error: err.message });
|
|
1240
|
+
}
|
|
1241
|
+
};
|
|
1242
|
+
|
|
1243
|
+
const publishToBus = async (envelope) => {
|
|
1244
|
+
if (adapter !== "redis") return { ok: true, skipped: true };
|
|
1245
|
+
if (!redisPub) return { ok: false, skipped: true, error: new Error("Redis publisher unavailable") };
|
|
1246
|
+
try {
|
|
1247
|
+
await redisPub.publish(busChannel, JSON.stringify(envelope));
|
|
1248
|
+
return { ok: true };
|
|
1249
|
+
} catch (err) {
|
|
1250
|
+
metrics.busPublishErrors++;
|
|
1251
|
+
ctx.logger.warn("Realtime publish to bus failed", { error: err.message, busChannel });
|
|
1252
|
+
return { ok: false, error: err };
|
|
1253
|
+
}
|
|
1254
|
+
};
|
|
1255
|
+
|
|
1256
|
+
if (adapter === "redis") {
|
|
1257
|
+
const redisUrl = options.redisUrl ?? process.env.FORGE_REALTIME_REDIS_URL ?? process.env.FORGE_REDIS_URL;
|
|
1258
|
+
try {
|
|
1259
|
+
const Redis = (await import("ioredis")).default;
|
|
1260
|
+
const retryStrategy = (times) => Math.min(100 * 2 ** Math.min(times, 8), 5000);
|
|
1261
|
+
redisPub = new Redis(redisUrl, {
|
|
1262
|
+
lazyConnect: true,
|
|
1263
|
+
maxRetriesPerRequest: null,
|
|
1264
|
+
retryStrategy,
|
|
1265
|
+
});
|
|
1266
|
+
redisSub = new Redis(redisUrl, {
|
|
1267
|
+
lazyConnect: true,
|
|
1268
|
+
maxRetriesPerRequest: null,
|
|
1269
|
+
autoResubscribe: true,
|
|
1270
|
+
retryStrategy,
|
|
1271
|
+
});
|
|
1272
|
+
await redisPub.connect();
|
|
1273
|
+
await redisSub.connect();
|
|
1274
|
+
await redisSub.subscribe(busChannel);
|
|
1275
|
+
redisSub.on("reconnecting", () => {
|
|
1276
|
+
metrics.redisReconnects++;
|
|
1277
|
+
});
|
|
1278
|
+
redisSub.on("message", (_channel, message) => {
|
|
1279
|
+
handleBusMessage(message);
|
|
1280
|
+
});
|
|
1281
|
+
redisPub.on("error", (err) => {
|
|
1282
|
+
ctx.logger.warn("Realtime redis publisher error", { error: err.message });
|
|
1283
|
+
});
|
|
1284
|
+
redisSub.on("error", (err) => {
|
|
1285
|
+
ctx.logger.warn("Realtime redis subscriber error", { error: err.message });
|
|
1286
|
+
});
|
|
1287
|
+
usingIoRedis = true;
|
|
1288
|
+
ctx.logger.info("Realtime connected (redis adapter)", { busChannel });
|
|
1289
|
+
} catch {
|
|
1290
|
+
redisPub = await _mkRedis(redisUrl, ctx);
|
|
1291
|
+
redisSub = await _mkRedis(redisUrl, ctx);
|
|
1292
|
+
await redisSub.subscribe(busChannel, (message) => handleBusMessage(message));
|
|
1293
|
+
// Built-in client doesn't expose reconnect events; monitor sub connection health.
|
|
1294
|
+
builtInSubWatchdog = setInterval(async () => {
|
|
1295
|
+
if (builtInSubProbeRunning) return;
|
|
1296
|
+
if (!redisSub || redisSub._subConnected !== false) return;
|
|
1297
|
+
builtInSubProbeRunning = true;
|
|
1298
|
+
try {
|
|
1299
|
+
metrics.redisReconnects++;
|
|
1300
|
+
await redisSub._ensureSubConnection?.();
|
|
1301
|
+
} catch (err) {
|
|
1302
|
+
ctx.logger.warn("Realtime built-in redis subscriber reconnect failed", {
|
|
1303
|
+
error: err.message,
|
|
1304
|
+
});
|
|
1305
|
+
} finally {
|
|
1306
|
+
builtInSubProbeRunning = false;
|
|
1307
|
+
}
|
|
1308
|
+
}, redisSubHealthcheckMs);
|
|
1309
|
+
builtInSubWatchdog.unref?.();
|
|
1310
|
+
ctx.logger.info("Realtime connected (redis adapter, built-in client)", { busChannel });
|
|
1311
|
+
}
|
|
1312
|
+
} else {
|
|
1313
|
+
ctx.logger.info("Realtime connected (memory adapter)");
|
|
1314
|
+
}
|
|
1315
|
+
|
|
1316
|
+
const client = {
|
|
1317
|
+
adapter,
|
|
1318
|
+
busChannel,
|
|
1319
|
+
|
|
1320
|
+
join(ws, channel) {
|
|
1321
|
+
_assertRealtimeChannel(channel, allowedChannels);
|
|
1322
|
+
ensureSocketTracked(ws);
|
|
1323
|
+
const socketChannels = channelsBySocket.get(ws);
|
|
1324
|
+
socketChannels.add(channel);
|
|
1325
|
+
if (!socketsByChannel.has(channel)) socketsByChannel.set(channel, new Set());
|
|
1326
|
+
socketsByChannel.get(channel).add(ws);
|
|
1327
|
+
return true;
|
|
1328
|
+
},
|
|
1329
|
+
|
|
1330
|
+
leave(ws, channel) {
|
|
1331
|
+
const socketChannels = channelsBySocket.get(ws);
|
|
1332
|
+
if (!socketChannels) return false;
|
|
1333
|
+
socketChannels.delete(channel);
|
|
1334
|
+
const members = socketsByChannel.get(channel);
|
|
1335
|
+
if (members) {
|
|
1336
|
+
members.delete(ws);
|
|
1337
|
+
if (members.size === 0) socketsByChannel.delete(channel);
|
|
1338
|
+
}
|
|
1339
|
+
return true;
|
|
1340
|
+
},
|
|
1341
|
+
|
|
1342
|
+
async publish(channel, payload, opts = {}) {
|
|
1343
|
+
_assertRealtimeChannel(channel, allowedChannels);
|
|
1344
|
+
const envelope = _encodeRealtimeEnvelope(channel, payload, senderId, maxPayloadBytes);
|
|
1345
|
+
const localDelivered = deliverEnvelope(envelope, { excludeWs: opts.excludeWs ?? null });
|
|
1346
|
+
metrics.published++;
|
|
1347
|
+
const busResult = await publishToBus(envelope);
|
|
1348
|
+
if (!busResult.ok && strictBusPublish) {
|
|
1349
|
+
throw new Error(`Realtime bus publish failed: ${busResult.error?.message ?? "unknown error"}`);
|
|
1350
|
+
}
|
|
1351
|
+
return {
|
|
1352
|
+
delivered: localDelivered,
|
|
1353
|
+
busPublished: busResult.ok === true || busResult.skipped === true,
|
|
1354
|
+
};
|
|
1355
|
+
},
|
|
1356
|
+
|
|
1357
|
+
broadcast(payload, opts = {}) {
|
|
1358
|
+
const envelope = _encodeRealtimeEnvelope("__broadcast__", payload, senderId, maxPayloadBytes);
|
|
1359
|
+
const decoded = _decodeRealtimeEnvelope(envelope);
|
|
1360
|
+
let delivered = 0;
|
|
1361
|
+
for (const ws of connections) {
|
|
1362
|
+
if (opts.excludeWs && ws === opts.excludeWs) continue;
|
|
1363
|
+
if (_socketBufferedBytes(ws) > maxSocketBufferBytes) {
|
|
1364
|
+
handleBackpressure(ws, "__broadcast__");
|
|
1365
|
+
continue;
|
|
1366
|
+
}
|
|
1367
|
+
try {
|
|
1368
|
+
if (_sendRealtimePayload(ws, decoded)) {
|
|
1369
|
+
delivered++;
|
|
1370
|
+
} else {
|
|
1371
|
+
metrics.dropped++;
|
|
1372
|
+
}
|
|
1373
|
+
} catch {
|
|
1374
|
+
metrics.dropped++;
|
|
1375
|
+
}
|
|
1376
|
+
}
|
|
1377
|
+
metrics.delivered += delivered;
|
|
1378
|
+
metrics.published++;
|
|
1379
|
+
return { delivered };
|
|
1380
|
+
},
|
|
1381
|
+
|
|
1382
|
+
attach(ws, attachOptions = {}) {
|
|
1383
|
+
ensureSocketTracked(ws, attachOptions.req ?? null, attachOptions.meta ?? null);
|
|
1384
|
+
for (const ch of attachOptions.channels ?? []) {
|
|
1385
|
+
this.join(ws, ch);
|
|
1386
|
+
}
|
|
1387
|
+
return {
|
|
1388
|
+
join: (channel) => this.join(ws, channel),
|
|
1389
|
+
leave: (channel) => this.leave(ws, channel),
|
|
1390
|
+
publish: (channel, payload) => this.publish(channel, payload, { excludeWs: ws }),
|
|
1391
|
+
detach: () => cleanupSocket(ws),
|
|
1392
|
+
};
|
|
1393
|
+
},
|
|
1394
|
+
|
|
1395
|
+
channels() {
|
|
1396
|
+
return [...socketsByChannel.keys()];
|
|
1397
|
+
},
|
|
1398
|
+
|
|
1399
|
+
connectionCount() {
|
|
1400
|
+
return connections.size;
|
|
1401
|
+
},
|
|
1402
|
+
|
|
1403
|
+
async _onWsUpgrade(hookCtx) {
|
|
1404
|
+
if (connections.size >= maxConnections) {
|
|
1405
|
+
return { allow: false, statusCode: 503, reason: "WebSocket capacity reached" };
|
|
1406
|
+
}
|
|
1407
|
+
const decision = await runAuthorizeHook("upgrade", hookCtx);
|
|
1408
|
+
if (!decision.allow) {
|
|
1409
|
+
return {
|
|
1410
|
+
allow: false,
|
|
1411
|
+
statusCode: decision.statusCode ?? 403,
|
|
1412
|
+
reason: decision.reason ?? "Forbidden",
|
|
1413
|
+
headers: decision.headers ?? {},
|
|
1414
|
+
};
|
|
1415
|
+
}
|
|
1416
|
+
return { allow: true };
|
|
1417
|
+
},
|
|
1418
|
+
|
|
1419
|
+
async _onWsConnect(hookCtx) {
|
|
1420
|
+
const { ws, req, meta } = hookCtx ?? {};
|
|
1421
|
+
ensureSocketTracked(ws, req, meta);
|
|
1422
|
+
const decision = await runAuthorizeHook("connect", hookCtx);
|
|
1423
|
+
if (!decision.allow) {
|
|
1424
|
+
try {
|
|
1425
|
+
ws?.close?.(decision.closeCode ?? 1008, decision.closeReason ?? "Policy violation");
|
|
1426
|
+
} catch {}
|
|
1427
|
+
cleanupSocket(ws);
|
|
1428
|
+
return {
|
|
1429
|
+
allow: false,
|
|
1430
|
+
closeCode: decision.closeCode ?? 1008,
|
|
1431
|
+
closeReason: decision.closeReason ?? "Policy violation",
|
|
1432
|
+
};
|
|
1433
|
+
}
|
|
1434
|
+
return { allow: true };
|
|
1435
|
+
},
|
|
1436
|
+
|
|
1437
|
+
_registerConnection(ws, req, meta) {
|
|
1438
|
+
ensureSocketTracked(ws, req, meta);
|
|
1439
|
+
},
|
|
1440
|
+
|
|
1441
|
+
_cleanupConnection(ws) {
|
|
1442
|
+
cleanupSocket(ws);
|
|
1443
|
+
},
|
|
1444
|
+
|
|
1445
|
+
_metrics() {
|
|
1446
|
+
return {
|
|
1447
|
+
connections: connections.size,
|
|
1448
|
+
channels: socketsByChannel.size,
|
|
1449
|
+
published: metrics.published,
|
|
1450
|
+
delivered: metrics.delivered,
|
|
1451
|
+
dropped: metrics.dropped,
|
|
1452
|
+
backpressureDropped: metrics.backpressureDropped,
|
|
1453
|
+
busPublishErrors: metrics.busPublishErrors,
|
|
1454
|
+
redisReconnects: metrics.redisReconnects,
|
|
1455
|
+
};
|
|
1456
|
+
},
|
|
1457
|
+
|
|
1458
|
+
async _shutdown() {
|
|
1459
|
+
for (const ws of connections) {
|
|
1460
|
+
cleanupSocket(ws);
|
|
1461
|
+
}
|
|
1462
|
+
if (builtInSubWatchdog) {
|
|
1463
|
+
clearInterval(builtInSubWatchdog);
|
|
1464
|
+
builtInSubWatchdog = null;
|
|
1465
|
+
}
|
|
1466
|
+
if (redisSub) {
|
|
1467
|
+
if (usingIoRedis) {
|
|
1468
|
+
redisSub.removeAllListeners("message");
|
|
1469
|
+
}
|
|
1470
|
+
await redisSub.quit?.();
|
|
1471
|
+
redisSub = null;
|
|
1472
|
+
}
|
|
1473
|
+
if (redisPub) {
|
|
1474
|
+
await redisPub.quit?.();
|
|
1475
|
+
redisPub = null;
|
|
1476
|
+
}
|
|
1477
|
+
},
|
|
1478
|
+
};
|
|
1479
|
+
|
|
1480
|
+
return client;
|
|
1481
|
+
},
|
|
1482
|
+
|
|
1483
|
+
async disconnect(client) {
|
|
1484
|
+
await client._shutdown?.();
|
|
1485
|
+
},
|
|
1486
|
+
|
|
1487
|
+
metrics(client) {
|
|
1488
|
+
return client._metrics?.() ?? {};
|
|
1489
|
+
},
|
|
1490
|
+
|
|
1491
|
+
onWsUpgrade(client, ctx) {
|
|
1492
|
+
return client._onWsUpgrade?.(ctx);
|
|
1493
|
+
},
|
|
1494
|
+
|
|
1495
|
+
async onWsConnect(client, ctx) {
|
|
1496
|
+
return client._onWsConnect?.(ctx);
|
|
1497
|
+
},
|
|
1498
|
+
|
|
1499
|
+
onWsClose(client, ctx) {
|
|
1500
|
+
client._cleanupConnection?.(ctx.ws);
|
|
1501
|
+
},
|
|
1502
|
+
};
|
|
1503
|
+
}
|
|
1504
|
+
|
|
1505
|
+
// ─── Cron ──────────────────────────────────────────────────
|
|
1506
|
+
|
|
1507
|
+
/**
|
|
1508
|
+
* Parse a single cron field into a Set of matching values.
|
|
1509
|
+
* Supports: star, star-slash-N, N, N-M, N-M/S, comma-separated lists.
|
|
1510
|
+
* @param {string} field - cron field string
|
|
1511
|
+
* @param {number} min - minimum valid value
|
|
1512
|
+
* @param {number} max - maximum valid value
|
|
1513
|
+
* @returns {Set<number>}
|
|
1514
|
+
*/
|
|
1515
|
+
function _parseCronField(field, min, max) {
|
|
1516
|
+
const values = new Set();
|
|
1517
|
+
for (const part of field.split(',')) {
|
|
1518
|
+
const trimmed = part.trim();
|
|
1519
|
+
// */N or N-M/S
|
|
1520
|
+
const stepMatch = trimmed.match(/^(\*|(\d+)-(\d+))\/(\d+)$/);
|
|
1521
|
+
if (stepMatch) {
|
|
1522
|
+
const step = parseInt(stepMatch[4], 10);
|
|
1523
|
+
if (step <= 0) throw new Error(`Invalid cron step: ${trimmed}`);
|
|
1524
|
+
let start = min, end = max;
|
|
1525
|
+
if (stepMatch[2] !== undefined) {
|
|
1526
|
+
start = parseInt(stepMatch[2], 10);
|
|
1527
|
+
end = parseInt(stepMatch[3], 10);
|
|
1528
|
+
}
|
|
1529
|
+
for (let i = start; i <= end; i += step) values.add(i);
|
|
1530
|
+
continue;
|
|
1531
|
+
}
|
|
1532
|
+
// *
|
|
1533
|
+
if (trimmed === '*') {
|
|
1534
|
+
for (let i = min; i <= max; i++) values.add(i);
|
|
1535
|
+
continue;
|
|
1536
|
+
}
|
|
1537
|
+
// N-M range
|
|
1538
|
+
const rangeMatch = trimmed.match(/^(\d+)-(\d+)$/);
|
|
1539
|
+
if (rangeMatch) {
|
|
1540
|
+
const s = parseInt(rangeMatch[1], 10);
|
|
1541
|
+
const e = parseInt(rangeMatch[2], 10);
|
|
1542
|
+
for (let i = s; i <= e; i++) values.add(i);
|
|
1543
|
+
continue;
|
|
1544
|
+
}
|
|
1545
|
+
// Single value
|
|
1546
|
+
const num = parseInt(trimmed, 10);
|
|
1547
|
+
if (Number.isNaN(num) || num < min || num > max) {
|
|
1548
|
+
throw new Error(`Invalid cron value "${trimmed}" (valid range: ${min}-${max})`);
|
|
1549
|
+
}
|
|
1550
|
+
values.add(num);
|
|
1551
|
+
}
|
|
1552
|
+
return values;
|
|
1553
|
+
}
|
|
1554
|
+
|
|
1555
|
+
/**
|
|
1556
|
+
* Parse a 5-field cron expression and return either:
|
|
1557
|
+
* - { type: 'interval', ms } for simple periodic patterns (e.g. "* /5 * * * *")
|
|
1558
|
+
* - { type: 'complex', minutes, hours, daysOfMonth, months, daysOfWeek } for complex patterns
|
|
1559
|
+
*/
|
|
1560
|
+
function parseCronExpression(expr) {
|
|
1561
|
+
const parts = expr.trim().split(/\s+/);
|
|
1562
|
+
if (parts.length !== 5) {
|
|
1563
|
+
throw new Error(`Invalid cron expression "${expr}": expected 5 fields, got ${parts.length}`);
|
|
1564
|
+
}
|
|
1565
|
+
const [minF, hourF, domF, monF, dowF] = parts;
|
|
1566
|
+
|
|
1567
|
+
// Try to detect simple interval patterns for setInterval
|
|
1568
|
+
if (hourF === '*' && domF === '*' && monF === '*' && dowF === '*') {
|
|
1569
|
+
if (minF === '*') return { type: 'interval', ms: 60_000 };
|
|
1570
|
+
const stepMatch = minF.match(/^\*\/(\d+)$/);
|
|
1571
|
+
if (stepMatch) return { type: 'interval', ms: parseInt(stepMatch[1], 10) * 60_000 };
|
|
1572
|
+
}
|
|
1573
|
+
if (minF === '0' && domF === '*' && monF === '*' && dowF === '*') {
|
|
1574
|
+
if (hourF === '*') return { type: 'interval', ms: 3_600_000 };
|
|
1575
|
+
const stepMatch = hourF.match(/^\*\/(\d+)$/);
|
|
1576
|
+
if (stepMatch) return { type: 'interval', ms: parseInt(stepMatch[1], 10) * 3_600_000 };
|
|
1577
|
+
}
|
|
1578
|
+
if (minF === '0' && hourF === '0' && monF === '*' && dowF === '*') {
|
|
1579
|
+
if (domF === '*') return { type: 'interval', ms: 86_400_000 };
|
|
1580
|
+
}
|
|
1581
|
+
|
|
1582
|
+
// Complex pattern — parse all fields
|
|
1583
|
+
return {
|
|
1584
|
+
type: 'complex',
|
|
1585
|
+
minutes: _parseCronField(minF, 0, 59),
|
|
1586
|
+
hours: _parseCronField(hourF, 0, 23),
|
|
1587
|
+
daysOfMonth: _parseCronField(domF, 1, 31),
|
|
1588
|
+
months: _parseCronField(monF, 1, 12),
|
|
1589
|
+
daysOfWeek: _parseCronField(dowF, 0, 6),
|
|
1590
|
+
};
|
|
1591
|
+
}
|
|
1592
|
+
|
|
1593
|
+
/**
|
|
1594
|
+
* Calculate milliseconds until the next matching time for a complex cron schedule.
|
|
1595
|
+
* Scans up to 366 days ahead.
|
|
1596
|
+
*/
|
|
1597
|
+
function _nextCronTimeout(parsed) {
|
|
1598
|
+
const now = new Date();
|
|
1599
|
+
const check = new Date(now.getFullYear(), now.getMonth(), now.getDate(), now.getHours(), now.getMinutes() + 1, 0, 0);
|
|
1600
|
+
const limit = 366 * 24 * 60; // max minutes to scan
|
|
1601
|
+
for (let i = 0; i < limit; i++) {
|
|
1602
|
+
const m = check.getMinutes();
|
|
1603
|
+
const h = check.getHours();
|
|
1604
|
+
const dom = check.getDate();
|
|
1605
|
+
const mon = check.getMonth() + 1;
|
|
1606
|
+
const dow = check.getDay();
|
|
1607
|
+
if (parsed.minutes.has(m) && parsed.hours.has(h) &&
|
|
1608
|
+
parsed.daysOfMonth.has(dom) && parsed.months.has(mon) &&
|
|
1609
|
+
parsed.daysOfWeek.has(dow)) {
|
|
1610
|
+
return check.getTime() - now.getTime();
|
|
1611
|
+
}
|
|
1612
|
+
check.setMinutes(check.getMinutes() + 1);
|
|
1613
|
+
}
|
|
1614
|
+
throw new Error('Could not find next cron run time within 366 days');
|
|
1615
|
+
}
|
|
1616
|
+
|
|
1617
|
+
export function cron(options = {}) {
|
|
1618
|
+
const leaderOnly = options.leaderOnly !== false;
|
|
1619
|
+
return {
|
|
1620
|
+
name: "cron",
|
|
1621
|
+
version: "1.0.0",
|
|
1622
|
+
inject: "cron",
|
|
1623
|
+
async connect(ctx) {
|
|
1624
|
+
// Leader election: workerId === 0 is the default leader.
|
|
1625
|
+
// The supervisor can explicitly designate a leader via ctx._isCronLeader.
|
|
1626
|
+
const isLeader = ctx.workerId === 0 || ctx._isCronLeader === true,
|
|
1627
|
+
jobs = new Map(),
|
|
1628
|
+
timers = [];
|
|
1629
|
+
|
|
1630
|
+
if (leaderOnly && !isLeader && ctx.workerId !== 0) {
|
|
1631
|
+
ctx.logger.info(`Cron: worker ${ctx.workerId} is not the cron leader, cron jobs will not run on this worker`);
|
|
1632
|
+
}
|
|
1633
|
+
|
|
1634
|
+
return {
|
|
1635
|
+
jobs,
|
|
1636
|
+
schedule(name, expr, fn, options = {}) {
|
|
1637
|
+
if (leaderOnly && !isLeader) return;
|
|
1638
|
+
|
|
1639
|
+
const parsed = parseCronExpression(expr);
|
|
1640
|
+
|
|
1641
|
+
// Run immediately unless explicitly disabled
|
|
1642
|
+
if (options.immediate !== false) {
|
|
1643
|
+
(async () => {
|
|
1644
|
+
try {
|
|
1645
|
+
ctx.logger.info(`Cron: ${name} (initial run)`);
|
|
1646
|
+
await fn();
|
|
1647
|
+
} catch (e) {
|
|
1648
|
+
ctx.logger.error(`Cron "${name}" initial run failed: ${e.message}`);
|
|
1649
|
+
}
|
|
1650
|
+
})();
|
|
1651
|
+
}
|
|
1652
|
+
|
|
1653
|
+
let _running = false;
|
|
1654
|
+
let _lastSuccess = Date.now();
|
|
1655
|
+
|
|
1656
|
+
const runJob = async () => {
|
|
1657
|
+
const intervalMs = parsed.type === 'interval' ? parsed.ms : 60_000;
|
|
1658
|
+
const timeoutMs = Math.floor(intervalMs * 0.9);
|
|
1659
|
+
if (_running) {
|
|
1660
|
+
if (Date.now() - _lastSuccess > intervalMs * 2) {
|
|
1661
|
+
ctx.logger.warn(`Cron "${name}" appears stuck — no success for ${Math.round((Date.now() - _lastSuccess) / 1000)}s`);
|
|
1662
|
+
}
|
|
1663
|
+
return;
|
|
1664
|
+
}
|
|
1665
|
+
_running = true;
|
|
1666
|
+
try {
|
|
1667
|
+
ctx.logger.info(`Cron: ${name}`);
|
|
1668
|
+
await Promise.race([
|
|
1669
|
+
fn(),
|
|
1670
|
+
new Promise((_, reject) => setTimeout(() => reject(new Error(`Cron "${name}" timed out after ${timeoutMs}ms`)), timeoutMs)),
|
|
1671
|
+
]);
|
|
1672
|
+
_lastSuccess = Date.now();
|
|
1673
|
+
} catch (e) {
|
|
1674
|
+
ctx.logger.error(`Cron "${name}" failed: ${e.message}`);
|
|
1675
|
+
} finally {
|
|
1676
|
+
_running = false;
|
|
1677
|
+
}
|
|
1678
|
+
};
|
|
1679
|
+
|
|
1680
|
+
let t;
|
|
1681
|
+
if (parsed.type === 'interval') {
|
|
1682
|
+
t = setInterval(runJob, parsed.ms);
|
|
1683
|
+
jobs.set(name, { expr, timer: t });
|
|
1684
|
+
timers.push(t);
|
|
1685
|
+
ctx.logger.info(`Cron: "${name}" every ${parsed.ms / 1000}s`);
|
|
1686
|
+
} else {
|
|
1687
|
+
// Complex pattern: use setTimeout with recalculation
|
|
1688
|
+
let active = true;
|
|
1689
|
+
function scheduleNext() {
|
|
1690
|
+
if (!active) return;
|
|
1691
|
+
const delayMs = _nextCronTimeout(parsed);
|
|
1692
|
+
t = setTimeout(async () => {
|
|
1693
|
+
await runJob();
|
|
1694
|
+
scheduleNext();
|
|
1695
|
+
}, delayMs);
|
|
1696
|
+
jobs.set(name, { expr, timer: t, _cancelComplex() { active = false; } });
|
|
1697
|
+
// Update the timers array entry
|
|
1698
|
+
const idx = timers.indexOf(t);
|
|
1699
|
+
if (idx === -1) timers.push(t);
|
|
1700
|
+
}
|
|
1701
|
+
scheduleNext();
|
|
1702
|
+
ctx.logger.info(`Cron: "${name}" scheduled (complex expression: ${expr})`);
|
|
1703
|
+
}
|
|
1704
|
+
},
|
|
1705
|
+
cancel(name) {
|
|
1706
|
+
const j = jobs.get(name);
|
|
1707
|
+
if (j) {
|
|
1708
|
+
clearInterval(j.timer);
|
|
1709
|
+
clearTimeout(j.timer);
|
|
1710
|
+
if (j._cancelComplex) j._cancelComplex();
|
|
1711
|
+
const idx = timers.indexOf(j.timer);
|
|
1712
|
+
if (idx !== -1) timers.splice(idx, 1);
|
|
1713
|
+
jobs.delete(name);
|
|
1714
|
+
}
|
|
1715
|
+
},
|
|
1716
|
+
listJobs: () => [...jobs.keys()],
|
|
1717
|
+
_timers: timers,
|
|
1718
|
+
};
|
|
1719
|
+
},
|
|
1720
|
+
async disconnect(c) {
|
|
1721
|
+
c._timers.forEach((t) => { clearInterval(t); clearTimeout(t); });
|
|
1722
|
+
for (const j of c.jobs.values()) {
|
|
1723
|
+
if (j._cancelComplex) j._cancelComplex();
|
|
1724
|
+
}
|
|
1725
|
+
c._timers.length = 0;
|
|
1726
|
+
c.jobs.clear();
|
|
1727
|
+
},
|
|
1728
|
+
};
|
|
1729
|
+
}
|