@spfn/core 0.2.0-beta.2 → 0.2.0-beta.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +262 -1092
- package/dist/{boss-D-fGtVgM.d.ts → boss-DI1r4kTS.d.ts} +68 -11
- package/dist/codegen/index.d.ts +55 -8
- package/dist/codegen/index.js +179 -5
- package/dist/codegen/index.js.map +1 -1
- package/dist/config/index.d.ts +204 -6
- package/dist/config/index.js +44 -11
- package/dist/config/index.js.map +1 -1
- package/dist/db/index.d.ts +13 -0
- package/dist/db/index.js +92 -33
- package/dist/db/index.js.map +1 -1
- package/dist/env/index.d.ts +83 -3
- package/dist/env/index.js +83 -15
- package/dist/env/index.js.map +1 -1
- package/dist/env/loader.d.ts +95 -0
- package/dist/env/loader.js +78 -0
- package/dist/env/loader.js.map +1 -0
- package/dist/event/index.d.ts +29 -70
- package/dist/event/index.js +15 -1
- package/dist/event/index.js.map +1 -1
- package/dist/event/sse/client.d.ts +157 -0
- package/dist/event/sse/client.js +169 -0
- package/dist/event/sse/client.js.map +1 -0
- package/dist/event/sse/index.d.ts +46 -0
- package/dist/event/sse/index.js +205 -0
- package/dist/event/sse/index.js.map +1 -0
- package/dist/job/index.d.ts +54 -8
- package/dist/job/index.js +61 -12
- package/dist/job/index.js.map +1 -1
- package/dist/middleware/index.d.ts +124 -11
- package/dist/middleware/index.js +41 -7
- package/dist/middleware/index.js.map +1 -1
- package/dist/nextjs/index.d.ts +2 -2
- package/dist/nextjs/index.js +37 -5
- package/dist/nextjs/index.js.map +1 -1
- package/dist/nextjs/server.d.ts +45 -24
- package/dist/nextjs/server.js +87 -66
- package/dist/nextjs/server.js.map +1 -1
- package/dist/route/index.d.ts +207 -14
- package/dist/route/index.js +304 -31
- package/dist/route/index.js.map +1 -1
- package/dist/route/types.d.ts +2 -31
- package/dist/router-Di7ENoah.d.ts +151 -0
- package/dist/server/index.d.ts +321 -10
- package/dist/server/index.js +798 -189
- package/dist/server/index.js.map +1 -1
- package/dist/{types-DRG2XMTR.d.ts → types-7Mhoxnnt.d.ts} +97 -4
- package/dist/types-DHQMQlcb.d.ts +305 -0
- package/docs/cache.md +133 -0
- package/docs/codegen.md +74 -0
- package/docs/database.md +346 -0
- package/docs/entity.md +539 -0
- package/docs/env.md +499 -0
- package/docs/errors.md +319 -0
- package/docs/event.md +432 -0
- package/docs/file-upload.md +717 -0
- package/docs/job.md +131 -0
- package/docs/logger.md +108 -0
- package/docs/middleware.md +337 -0
- package/docs/nextjs.md +247 -0
- package/docs/repository.md +496 -0
- package/docs/route.md +497 -0
- package/docs/server.md +429 -0
- package/package.json +19 -3
package/dist/server/index.js
CHANGED
|
@@ -1,14 +1,17 @@
|
|
|
1
1
|
import { env } from '@spfn/core/config';
|
|
2
|
-
import {
|
|
3
|
-
import { existsSync } from 'fs';
|
|
2
|
+
import { existsSync, readFileSync } from 'fs';
|
|
4
3
|
import { resolve, join } from 'path';
|
|
4
|
+
import { parse } from 'dotenv';
|
|
5
|
+
import { logger } from '@spfn/core/logger';
|
|
5
6
|
import { Hono } from 'hono';
|
|
6
7
|
import { cors } from 'hono/cors';
|
|
7
8
|
import { registerRoutes } from '@spfn/core/route';
|
|
8
9
|
import { ErrorHandler, RequestLogger } from '@spfn/core/middleware';
|
|
10
|
+
import { streamSSE } from 'hono/streaming';
|
|
11
|
+
import { randomBytes } from 'crypto';
|
|
12
|
+
import { Agent, setGlobalDispatcher } from 'undici';
|
|
9
13
|
import { initDatabase, getDatabase, closeDatabase } from '@spfn/core/db';
|
|
10
14
|
import { initCache, getCache, closeCache } from '@spfn/core/cache';
|
|
11
|
-
import { logger } from '@spfn/core/logger';
|
|
12
15
|
import { serve } from '@hono/node-server';
|
|
13
16
|
import PgBoss from 'pg-boss';
|
|
14
17
|
import { networkInterfaces } from 'os';
|
|
@@ -313,24 +316,496 @@ var init_formatters = __esm({
|
|
|
313
316
|
};
|
|
314
317
|
}
|
|
315
318
|
});
|
|
319
|
+
var envLogger = logger.child("@spfn/core:env-loader");
|
|
320
|
+
function getEnvFiles(nodeEnv, server) {
|
|
321
|
+
const files = [
|
|
322
|
+
".env",
|
|
323
|
+
`.env.${nodeEnv}`
|
|
324
|
+
];
|
|
325
|
+
if (nodeEnv !== "test") {
|
|
326
|
+
files.push(".env.local");
|
|
327
|
+
}
|
|
328
|
+
files.push(`.env.${nodeEnv}.local`);
|
|
329
|
+
if (server) {
|
|
330
|
+
files.push(".env.server");
|
|
331
|
+
files.push(".env.server.local");
|
|
332
|
+
}
|
|
333
|
+
return files;
|
|
334
|
+
}
|
|
335
|
+
function parseEnvFile(filePath) {
|
|
336
|
+
if (!existsSync(filePath)) {
|
|
337
|
+
return null;
|
|
338
|
+
}
|
|
339
|
+
return parse(readFileSync(filePath, "utf-8"));
|
|
340
|
+
}
|
|
341
|
+
function loadEnv(options = {}) {
|
|
342
|
+
const {
|
|
343
|
+
cwd = process.cwd(),
|
|
344
|
+
nodeEnv = process.env.NODE_ENV || "local",
|
|
345
|
+
server = true,
|
|
346
|
+
debug = false,
|
|
347
|
+
override = false
|
|
348
|
+
} = options;
|
|
349
|
+
const envFiles = getEnvFiles(nodeEnv, server);
|
|
350
|
+
const loadedFiles = [];
|
|
351
|
+
const existingKeys = new Set(Object.keys(process.env));
|
|
352
|
+
const merged = {};
|
|
353
|
+
for (const fileName of envFiles) {
|
|
354
|
+
const filePath = resolve(cwd, fileName);
|
|
355
|
+
const parsed = parseEnvFile(filePath);
|
|
356
|
+
if (parsed === null) {
|
|
357
|
+
continue;
|
|
358
|
+
}
|
|
359
|
+
loadedFiles.push(fileName);
|
|
360
|
+
Object.assign(merged, parsed);
|
|
361
|
+
}
|
|
362
|
+
const loadedKeys = [];
|
|
363
|
+
for (const [key, value] of Object.entries(merged)) {
|
|
364
|
+
if (!override && existingKeys.has(key)) {
|
|
365
|
+
continue;
|
|
366
|
+
}
|
|
367
|
+
process.env[key] = value;
|
|
368
|
+
loadedKeys.push(key);
|
|
369
|
+
}
|
|
370
|
+
if (debug && loadedFiles.length > 0) {
|
|
371
|
+
envLogger.debug(`Loaded env files: ${loadedFiles.join(", ")}`);
|
|
372
|
+
envLogger.debug(`Loaded ${loadedKeys.length} environment variables`);
|
|
373
|
+
}
|
|
374
|
+
return { loadedFiles, loadedKeys };
|
|
375
|
+
}
|
|
376
|
+
|
|
377
|
+
// src/server/dotenv-loader.ts
|
|
378
|
+
var warned = false;
|
|
316
379
|
function loadEnvFiles() {
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
|
|
320
|
-
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
|
|
324
|
-
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
380
|
+
if (!warned) {
|
|
381
|
+
warned = true;
|
|
382
|
+
console.warn(
|
|
383
|
+
'[SPFN] loadEnvFiles() is deprecated. Use loadEnv() from "@spfn/core/env/loader" instead.'
|
|
384
|
+
);
|
|
385
|
+
}
|
|
386
|
+
loadEnv();
|
|
387
|
+
}
|
|
388
|
+
var sseLogger = logger.child("@spfn/core:sse");
|
|
389
|
+
function createSSEHandler(router, config = {}, tokenManager) {
|
|
390
|
+
const {
|
|
391
|
+
pingInterval = 3e4,
|
|
392
|
+
auth: authConfig
|
|
393
|
+
} = config;
|
|
394
|
+
return async (c) => {
|
|
395
|
+
const subject = await authenticateToken(c, tokenManager);
|
|
396
|
+
if (subject === false) {
|
|
397
|
+
return c.json({ error: "Missing token parameter" }, 401);
|
|
398
|
+
}
|
|
399
|
+
if (subject === null) {
|
|
400
|
+
return c.json({ error: "Invalid or expired token" }, 401);
|
|
401
|
+
}
|
|
402
|
+
if (subject) {
|
|
403
|
+
c.set("sseSubject", subject);
|
|
404
|
+
}
|
|
405
|
+
const requestedEvents = parseRequestedEvents(c);
|
|
406
|
+
if (!requestedEvents) {
|
|
407
|
+
return c.json({ error: "Missing events parameter" }, 400);
|
|
408
|
+
}
|
|
409
|
+
const validEventNames = router.eventNames;
|
|
410
|
+
const invalidEvents = requestedEvents.filter((e) => !validEventNames.includes(e));
|
|
411
|
+
if (invalidEvents.length > 0) {
|
|
412
|
+
return c.json({
|
|
413
|
+
error: "Invalid event names",
|
|
414
|
+
invalidEvents,
|
|
415
|
+
validEvents: validEventNames
|
|
416
|
+
}, 400);
|
|
417
|
+
}
|
|
418
|
+
const allowedEvents = await authorizeEvents(subject, requestedEvents, authConfig);
|
|
419
|
+
if (allowedEvents === null) {
|
|
420
|
+
return c.json({ error: "Not authorized for any requested events" }, 403);
|
|
421
|
+
}
|
|
422
|
+
sseLogger.debug("SSE connection requested", {
|
|
423
|
+
events: allowedEvents,
|
|
424
|
+
subject: subject || void 0,
|
|
425
|
+
clientIp: c.req.header("x-forwarded-for") || c.req.header("x-real-ip")
|
|
426
|
+
});
|
|
427
|
+
return streamSSE(c, async (stream) => {
|
|
428
|
+
const unsubscribes = [];
|
|
429
|
+
let messageId = 0;
|
|
430
|
+
for (const eventName of allowedEvents) {
|
|
431
|
+
const eventDef = router.events[eventName];
|
|
432
|
+
if (!eventDef) {
|
|
433
|
+
continue;
|
|
434
|
+
}
|
|
435
|
+
const unsubscribe = eventDef.subscribe((payload) => {
|
|
436
|
+
if (subject && authConfig?.filter?.[eventName]) {
|
|
437
|
+
if (!authConfig.filter[eventName](subject, payload)) {
|
|
438
|
+
return;
|
|
439
|
+
}
|
|
440
|
+
}
|
|
441
|
+
messageId++;
|
|
442
|
+
const message = {
|
|
443
|
+
event: eventName,
|
|
444
|
+
data: payload
|
|
445
|
+
};
|
|
446
|
+
sseLogger.debug("SSE sending event", {
|
|
447
|
+
event: eventName,
|
|
448
|
+
messageId
|
|
449
|
+
});
|
|
450
|
+
void stream.writeSSE({
|
|
451
|
+
id: String(messageId),
|
|
452
|
+
event: eventName,
|
|
453
|
+
data: JSON.stringify(message)
|
|
454
|
+
});
|
|
455
|
+
});
|
|
456
|
+
unsubscribes.push(unsubscribe);
|
|
457
|
+
}
|
|
458
|
+
sseLogger.info("SSE connection established", {
|
|
459
|
+
events: allowedEvents,
|
|
460
|
+
subscriptionCount: unsubscribes.length
|
|
461
|
+
});
|
|
462
|
+
await stream.writeSSE({
|
|
463
|
+
event: "connected",
|
|
464
|
+
data: JSON.stringify({
|
|
465
|
+
subscribedEvents: allowedEvents,
|
|
466
|
+
timestamp: Date.now()
|
|
467
|
+
})
|
|
468
|
+
});
|
|
469
|
+
const pingTimer = setInterval(() => {
|
|
470
|
+
void stream.writeSSE({
|
|
471
|
+
event: "ping",
|
|
472
|
+
data: JSON.stringify({ timestamp: Date.now() })
|
|
473
|
+
});
|
|
474
|
+
}, pingInterval);
|
|
475
|
+
const abortSignal = c.req.raw.signal;
|
|
476
|
+
while (!abortSignal.aborted) {
|
|
477
|
+
await stream.sleep(pingInterval);
|
|
478
|
+
}
|
|
479
|
+
clearInterval(pingTimer);
|
|
480
|
+
unsubscribes.forEach((fn) => fn());
|
|
481
|
+
sseLogger.info("SSE connection closed", {
|
|
482
|
+
events: allowedEvents
|
|
483
|
+
});
|
|
484
|
+
}, async (err) => {
|
|
485
|
+
sseLogger.error("SSE stream error", {
|
|
486
|
+
error: err.message
|
|
487
|
+
});
|
|
488
|
+
});
|
|
489
|
+
};
|
|
490
|
+
}
|
|
491
|
+
async function authenticateToken(c, tokenManager) {
|
|
492
|
+
if (!tokenManager) {
|
|
493
|
+
return void 0;
|
|
494
|
+
}
|
|
495
|
+
const token = c.req.query("token");
|
|
496
|
+
if (!token) {
|
|
497
|
+
return false;
|
|
498
|
+
}
|
|
499
|
+
return await tokenManager.verify(token);
|
|
500
|
+
}
|
|
501
|
+
function parseRequestedEvents(c) {
|
|
502
|
+
const eventsParam = c.req.query("events");
|
|
503
|
+
if (!eventsParam) {
|
|
504
|
+
return null;
|
|
505
|
+
}
|
|
506
|
+
return eventsParam.split(",").map((e) => e.trim());
|
|
507
|
+
}
|
|
508
|
+
async function authorizeEvents(subject, requestedEvents, authConfig) {
|
|
509
|
+
if (!subject || !authConfig?.authorize) {
|
|
510
|
+
return requestedEvents;
|
|
511
|
+
}
|
|
512
|
+
const allowed = await authConfig.authorize(subject, requestedEvents);
|
|
513
|
+
if (allowed.length === 0) {
|
|
514
|
+
return null;
|
|
515
|
+
}
|
|
516
|
+
return allowed;
|
|
517
|
+
}
|
|
518
|
+
var InMemoryTokenStore = class {
|
|
519
|
+
tokens = /* @__PURE__ */ new Map();
|
|
520
|
+
async set(token, data) {
|
|
521
|
+
this.tokens.set(token, data);
|
|
522
|
+
}
|
|
523
|
+
async consume(token) {
|
|
524
|
+
const data = this.tokens.get(token);
|
|
525
|
+
if (!data) {
|
|
526
|
+
return null;
|
|
527
|
+
}
|
|
528
|
+
this.tokens.delete(token);
|
|
529
|
+
return data;
|
|
530
|
+
}
|
|
531
|
+
async cleanup() {
|
|
532
|
+
const now = Date.now();
|
|
533
|
+
for (const [token, data] of this.tokens) {
|
|
534
|
+
if (data.expiresAt <= now) {
|
|
535
|
+
this.tokens.delete(token);
|
|
536
|
+
}
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
};
|
|
540
|
+
var SSETokenManager = class {
|
|
541
|
+
store;
|
|
542
|
+
ttl;
|
|
543
|
+
cleanupTimer = null;
|
|
544
|
+
constructor(config) {
|
|
545
|
+
this.ttl = config?.ttl ?? 3e4;
|
|
546
|
+
this.store = config?.store ?? new InMemoryTokenStore();
|
|
547
|
+
const cleanupInterval = config?.cleanupInterval ?? 6e4;
|
|
548
|
+
this.cleanupTimer = setInterval(() => void this.store.cleanup(), cleanupInterval);
|
|
549
|
+
this.cleanupTimer.unref();
|
|
550
|
+
}
|
|
551
|
+
/**
|
|
552
|
+
* Issue a new one-time-use token for the given subject
|
|
553
|
+
*/
|
|
554
|
+
async issue(subject) {
|
|
555
|
+
const token = randomBytes(32).toString("hex");
|
|
556
|
+
await this.store.set(token, {
|
|
557
|
+
token,
|
|
558
|
+
subject,
|
|
559
|
+
expiresAt: Date.now() + this.ttl
|
|
560
|
+
});
|
|
561
|
+
return token;
|
|
562
|
+
}
|
|
563
|
+
/**
|
|
564
|
+
* Verify and consume a token
|
|
565
|
+
* @returns subject string if valid, null if invalid/expired/already consumed
|
|
566
|
+
*/
|
|
567
|
+
async verify(token) {
|
|
568
|
+
const data = await this.store.consume(token);
|
|
569
|
+
if (!data || data.expiresAt <= Date.now()) {
|
|
570
|
+
return null;
|
|
571
|
+
}
|
|
572
|
+
return data.subject;
|
|
573
|
+
}
|
|
574
|
+
/**
|
|
575
|
+
* Cleanup timer and resources
|
|
576
|
+
*/
|
|
577
|
+
destroy() {
|
|
578
|
+
if (this.cleanupTimer) {
|
|
579
|
+
clearInterval(this.cleanupTimer);
|
|
580
|
+
this.cleanupTimer = null;
|
|
581
|
+
}
|
|
582
|
+
}
|
|
583
|
+
};
|
|
584
|
+
var serverLogger = logger.child("@spfn/core:server");
|
|
585
|
+
|
|
586
|
+
// src/server/shutdown-manager.ts
|
|
587
|
+
var DEFAULT_HOOK_TIMEOUT = 1e4;
|
|
588
|
+
var DEFAULT_HOOK_ORDER = 100;
|
|
589
|
+
var DRAIN_POLL_INTERVAL = 500;
|
|
590
|
+
var ShutdownManager = class {
|
|
591
|
+
state = "running";
|
|
592
|
+
hooks = [];
|
|
593
|
+
operations = /* @__PURE__ */ new Map();
|
|
594
|
+
operationCounter = 0;
|
|
595
|
+
/**
|
|
596
|
+
* Register a shutdown hook
|
|
597
|
+
*
|
|
598
|
+
* Hooks run in order during shutdown, after all tracked operations drain.
|
|
599
|
+
* Each hook has its own timeout — failure does not block subsequent hooks.
|
|
600
|
+
*
|
|
601
|
+
* @example
|
|
602
|
+
* shutdown.onShutdown('ai-service', async () => {
|
|
603
|
+
* await aiService.cancelPending();
|
|
604
|
+
* }, { timeout: 30000, order: 10 });
|
|
605
|
+
*/
|
|
606
|
+
onShutdown(name, handler, options) {
|
|
607
|
+
this.hooks.push({
|
|
608
|
+
name,
|
|
609
|
+
handler,
|
|
610
|
+
timeout: options?.timeout ?? DEFAULT_HOOK_TIMEOUT,
|
|
611
|
+
order: options?.order ?? DEFAULT_HOOK_ORDER
|
|
612
|
+
});
|
|
613
|
+
this.hooks.sort((a, b) => a.order - b.order);
|
|
614
|
+
serverLogger.debug(`Shutdown hook registered: ${name}`, {
|
|
615
|
+
order: options?.order ?? DEFAULT_HOOK_ORDER,
|
|
616
|
+
timeout: `${options?.timeout ?? DEFAULT_HOOK_TIMEOUT}ms`
|
|
617
|
+
});
|
|
618
|
+
}
|
|
619
|
+
/**
|
|
620
|
+
* Track a long-running operation
|
|
621
|
+
*
|
|
622
|
+
* During shutdown (drain phase), the process waits for ALL tracked
|
|
623
|
+
* operations to complete before proceeding with cleanup.
|
|
624
|
+
*
|
|
625
|
+
* If shutdown has already started, the operation is rejected immediately.
|
|
626
|
+
*
|
|
627
|
+
* @returns The operation result (pass-through)
|
|
628
|
+
*
|
|
629
|
+
* @example
|
|
630
|
+
* const result = await shutdown.trackOperation(
|
|
631
|
+
* 'ai-generate',
|
|
632
|
+
* aiService.generate(prompt)
|
|
633
|
+
* );
|
|
634
|
+
*/
|
|
635
|
+
async trackOperation(name, operation) {
|
|
636
|
+
if (this.state !== "running") {
|
|
637
|
+
throw new Error(`Cannot start operation '${name}': server is shutting down`);
|
|
638
|
+
}
|
|
639
|
+
const id = `${name}-${++this.operationCounter}`;
|
|
640
|
+
this.operations.set(id, {
|
|
641
|
+
name,
|
|
642
|
+
startedAt: Date.now()
|
|
643
|
+
});
|
|
644
|
+
serverLogger.debug(`Operation tracked: ${id}`, {
|
|
645
|
+
activeOperations: this.operations.size
|
|
646
|
+
});
|
|
647
|
+
try {
|
|
648
|
+
return await operation;
|
|
649
|
+
} finally {
|
|
650
|
+
this.operations.delete(id);
|
|
651
|
+
serverLogger.debug(`Operation completed: ${id}`, {
|
|
652
|
+
activeOperations: this.operations.size
|
|
653
|
+
});
|
|
654
|
+
}
|
|
655
|
+
}
|
|
656
|
+
/**
|
|
657
|
+
* Whether the server is shutting down
|
|
658
|
+
*
|
|
659
|
+
* Use this to reject new work early (e.g., return 503 in route handlers).
|
|
660
|
+
*/
|
|
661
|
+
isShuttingDown() {
|
|
662
|
+
return this.state !== "running";
|
|
663
|
+
}
|
|
664
|
+
/**
|
|
665
|
+
* Number of currently active tracked operations
|
|
666
|
+
*/
|
|
667
|
+
getActiveOperationCount() {
|
|
668
|
+
return this.operations.size;
|
|
669
|
+
}
|
|
670
|
+
/**
|
|
671
|
+
* Mark shutdown as started immediately
|
|
672
|
+
*
|
|
673
|
+
* Call this at the very beginning of the shutdown sequence so that:
|
|
674
|
+
* - Health check returns 503 right away
|
|
675
|
+
* - trackOperation() rejects new work
|
|
676
|
+
* - isShuttingDown() returns true
|
|
677
|
+
*/
|
|
678
|
+
beginShutdown() {
|
|
679
|
+
if (this.state !== "running") {
|
|
680
|
+
return;
|
|
681
|
+
}
|
|
682
|
+
this.state = "draining";
|
|
683
|
+
serverLogger.info("Shutdown manager: state set to draining");
|
|
684
|
+
}
|
|
685
|
+
/**
|
|
686
|
+
* Execute the full shutdown sequence
|
|
687
|
+
*
|
|
688
|
+
* 1. State → draining (reject new operations)
|
|
689
|
+
* 2. Wait for all tracked operations to complete (drain)
|
|
690
|
+
* 3. Run shutdown hooks in order
|
|
691
|
+
* 4. State → closed
|
|
692
|
+
*
|
|
693
|
+
* @param drainTimeout - Max time to wait for operations to drain (ms)
|
|
694
|
+
*/
|
|
695
|
+
async execute(drainTimeout) {
|
|
696
|
+
if (this.state === "closed") {
|
|
697
|
+
serverLogger.warn("ShutdownManager.execute() called but already closed");
|
|
698
|
+
return;
|
|
699
|
+
}
|
|
700
|
+
this.state = "draining";
|
|
701
|
+
serverLogger.info("Shutdown manager: draining started", {
|
|
702
|
+
activeOperations: this.operations.size,
|
|
703
|
+
registeredHooks: this.hooks.length,
|
|
704
|
+
drainTimeout: `${drainTimeout}ms`
|
|
705
|
+
});
|
|
706
|
+
await this.drain(drainTimeout);
|
|
707
|
+
await this.executeHooks();
|
|
708
|
+
this.state = "closed";
|
|
709
|
+
serverLogger.info("Shutdown manager: all hooks executed");
|
|
710
|
+
}
|
|
711
|
+
// ========================================================================
|
|
712
|
+
// Private
|
|
713
|
+
// ========================================================================
|
|
714
|
+
/**
|
|
715
|
+
* Wait for all tracked operations to complete, up to drainTimeout
|
|
716
|
+
*/
|
|
717
|
+
async drain(drainTimeout) {
|
|
718
|
+
if (this.operations.size === 0) {
|
|
719
|
+
serverLogger.info("Shutdown manager: no active operations, drain skipped");
|
|
720
|
+
return;
|
|
721
|
+
}
|
|
722
|
+
serverLogger.info(`Shutdown manager: waiting for ${this.operations.size} operations to drain...`);
|
|
723
|
+
const deadline = Date.now() + drainTimeout;
|
|
724
|
+
while (this.operations.size > 0 && Date.now() < deadline) {
|
|
725
|
+
const remaining = deadline - Date.now();
|
|
726
|
+
const ops = Array.from(this.operations.values()).map((op) => ({
|
|
727
|
+
name: op.name,
|
|
728
|
+
elapsed: `${Math.round((Date.now() - op.startedAt) / 1e3)}s`
|
|
729
|
+
}));
|
|
730
|
+
serverLogger.info("Shutdown manager: drain in progress", {
|
|
731
|
+
activeOperations: this.operations.size,
|
|
732
|
+
remainingTimeout: `${Math.round(remaining / 1e3)}s`,
|
|
733
|
+
operations: ops
|
|
734
|
+
});
|
|
735
|
+
await sleep(Math.min(DRAIN_POLL_INTERVAL, remaining));
|
|
736
|
+
}
|
|
737
|
+
if (this.operations.size > 0) {
|
|
738
|
+
const abandoned = Array.from(this.operations.values()).map((op) => op.name);
|
|
739
|
+
serverLogger.warn("Shutdown manager: drain timeout \u2014 abandoning operations", {
|
|
740
|
+
abandoned
|
|
741
|
+
});
|
|
742
|
+
} else {
|
|
743
|
+
serverLogger.info("Shutdown manager: all operations drained successfully");
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
/**
|
|
747
|
+
* Execute registered shutdown hooks in order
|
|
748
|
+
*/
|
|
749
|
+
async executeHooks() {
|
|
750
|
+
if (this.hooks.length === 0) {
|
|
751
|
+
return;
|
|
752
|
+
}
|
|
753
|
+
serverLogger.info(`Shutdown manager: executing ${this.hooks.length} hooks...`);
|
|
754
|
+
for (const hook of this.hooks) {
|
|
755
|
+
serverLogger.debug(`Shutdown hook [${hook.name}] starting (timeout: ${hook.timeout}ms)`);
|
|
756
|
+
try {
|
|
757
|
+
await withTimeout(
|
|
758
|
+
hook.handler(),
|
|
759
|
+
hook.timeout,
|
|
760
|
+
`Shutdown hook '${hook.name}' timeout after ${hook.timeout}ms`
|
|
761
|
+
);
|
|
762
|
+
serverLogger.info(`Shutdown hook [${hook.name}] completed`);
|
|
763
|
+
} catch (error) {
|
|
764
|
+
serverLogger.error(
|
|
765
|
+
`Shutdown hook [${hook.name}] failed`,
|
|
766
|
+
error
|
|
767
|
+
);
|
|
768
|
+
}
|
|
329
769
|
}
|
|
330
770
|
}
|
|
771
|
+
};
|
|
772
|
+
var instance = null;
|
|
773
|
+
function getShutdownManager() {
|
|
774
|
+
if (!instance) {
|
|
775
|
+
instance = new ShutdownManager();
|
|
776
|
+
}
|
|
777
|
+
return instance;
|
|
778
|
+
}
|
|
779
|
+
function resetShutdownManager() {
|
|
780
|
+
instance = null;
|
|
781
|
+
}
|
|
782
|
+
function sleep(ms) {
|
|
783
|
+
return new Promise((resolve2) => setTimeout(resolve2, ms));
|
|
784
|
+
}
|
|
785
|
+
async function withTimeout(promise, timeout, message) {
|
|
786
|
+
let timeoutId;
|
|
787
|
+
return Promise.race([
|
|
788
|
+
promise.finally(() => {
|
|
789
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
790
|
+
}),
|
|
791
|
+
new Promise((_, reject) => {
|
|
792
|
+
timeoutId = setTimeout(() => {
|
|
793
|
+
reject(new Error(message));
|
|
794
|
+
}, timeout);
|
|
795
|
+
})
|
|
796
|
+
]);
|
|
331
797
|
}
|
|
798
|
+
|
|
799
|
+
// src/server/helpers.ts
|
|
332
800
|
function createHealthCheckHandler(detailed) {
|
|
333
801
|
return async (c) => {
|
|
802
|
+
const shutdownManager = getShutdownManager();
|
|
803
|
+
if (shutdownManager.isShuttingDown()) {
|
|
804
|
+
return c.json({
|
|
805
|
+
status: "shutting_down",
|
|
806
|
+
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
807
|
+
}, 503);
|
|
808
|
+
}
|
|
334
809
|
const response = {
|
|
335
810
|
status: "ok",
|
|
336
811
|
timestamp: (/* @__PURE__ */ new Date()).toISOString()
|
|
@@ -387,34 +862,49 @@ function applyServerTimeouts(server, timeouts) {
|
|
|
387
862
|
server.headersTimeout = timeouts.headers;
|
|
388
863
|
}
|
|
389
864
|
}
|
|
390
|
-
function getTimeoutConfig(
|
|
865
|
+
function getTimeoutConfig(config) {
|
|
391
866
|
return {
|
|
392
|
-
request:
|
|
393
|
-
keepAlive:
|
|
394
|
-
headers:
|
|
867
|
+
request: config?.request ?? env.SERVER_TIMEOUT,
|
|
868
|
+
keepAlive: config?.keepAlive ?? env.SERVER_KEEPALIVE_TIMEOUT,
|
|
869
|
+
headers: config?.headers ?? env.SERVER_HEADERS_TIMEOUT
|
|
395
870
|
};
|
|
396
871
|
}
|
|
397
|
-
function getShutdownTimeout(
|
|
398
|
-
return
|
|
872
|
+
function getShutdownTimeout(config) {
|
|
873
|
+
return config?.timeout ?? env.SHUTDOWN_TIMEOUT;
|
|
874
|
+
}
|
|
875
|
+
function getFetchTimeoutConfig(config) {
|
|
876
|
+
return {
|
|
877
|
+
connect: config?.connect ?? env.FETCH_CONNECT_TIMEOUT,
|
|
878
|
+
headers: config?.headers ?? env.FETCH_HEADERS_TIMEOUT,
|
|
879
|
+
body: config?.body ?? env.FETCH_BODY_TIMEOUT
|
|
880
|
+
};
|
|
881
|
+
}
|
|
882
|
+
function applyGlobalFetchTimeouts(timeouts) {
|
|
883
|
+
const agent = new Agent({
|
|
884
|
+
connect: { timeout: timeouts.connect },
|
|
885
|
+
headersTimeout: timeouts.headers,
|
|
886
|
+
bodyTimeout: timeouts.body
|
|
887
|
+
});
|
|
888
|
+
setGlobalDispatcher(agent);
|
|
399
889
|
}
|
|
400
|
-
function buildMiddlewareOrder(
|
|
890
|
+
function buildMiddlewareOrder(config) {
|
|
401
891
|
const order = [];
|
|
402
|
-
const middlewareConfig =
|
|
892
|
+
const middlewareConfig = config.middleware ?? {};
|
|
403
893
|
const enableLogger = middlewareConfig.logger !== false;
|
|
404
894
|
const enableCors = middlewareConfig.cors !== false;
|
|
405
895
|
const enableErrorHandler = middlewareConfig.errorHandler !== false;
|
|
406
896
|
if (enableLogger) order.push("RequestLogger");
|
|
407
897
|
if (enableCors) order.push("CORS");
|
|
408
|
-
|
|
409
|
-
if (
|
|
898
|
+
config.use?.forEach((_, i) => order.push(`Custom[${i}]`));
|
|
899
|
+
if (config.beforeRoutes) order.push("beforeRoutes hook");
|
|
410
900
|
order.push("Routes");
|
|
411
|
-
if (
|
|
901
|
+
if (config.afterRoutes) order.push("afterRoutes hook");
|
|
412
902
|
if (enableErrorHandler) order.push("ErrorHandler");
|
|
413
903
|
return order;
|
|
414
904
|
}
|
|
415
|
-
function buildStartupConfig(
|
|
416
|
-
const middlewareConfig =
|
|
417
|
-
const healthCheckConfig =
|
|
905
|
+
function buildStartupConfig(config, timeouts) {
|
|
906
|
+
const middlewareConfig = config.middleware ?? {};
|
|
907
|
+
const healthCheckConfig = config.healthCheck ?? {};
|
|
418
908
|
const healthCheckEnabled = healthCheckConfig.enabled !== false;
|
|
419
909
|
const healthCheckPath = healthCheckConfig.path ?? "/health";
|
|
420
910
|
const healthCheckDetailed = healthCheckConfig.detailed ?? env.NODE_ENV === "development";
|
|
@@ -423,7 +913,7 @@ function buildStartupConfig(config2, timeouts) {
|
|
|
423
913
|
logger: middlewareConfig.logger !== false,
|
|
424
914
|
cors: middlewareConfig.cors !== false,
|
|
425
915
|
errorHandler: middlewareConfig.errorHandler !== false,
|
|
426
|
-
custom:
|
|
916
|
+
custom: config.use?.length ?? 0
|
|
427
917
|
},
|
|
428
918
|
healthCheck: healthCheckEnabled ? {
|
|
429
919
|
enabled: true,
|
|
@@ -431,8 +921,8 @@ function buildStartupConfig(config2, timeouts) {
|
|
|
431
921
|
detailed: healthCheckDetailed
|
|
432
922
|
} : { enabled: false },
|
|
433
923
|
hooks: {
|
|
434
|
-
beforeRoutes: !!
|
|
435
|
-
afterRoutes: !!
|
|
924
|
+
beforeRoutes: !!config.beforeRoutes,
|
|
925
|
+
afterRoutes: !!config.afterRoutes
|
|
436
926
|
},
|
|
437
927
|
timeout: {
|
|
438
928
|
request: `${timeouts.request}ms`,
|
|
@@ -440,23 +930,22 @@ function buildStartupConfig(config2, timeouts) {
|
|
|
440
930
|
headers: `${timeouts.headers}ms`
|
|
441
931
|
},
|
|
442
932
|
shutdown: {
|
|
443
|
-
timeout: `${
|
|
933
|
+
timeout: `${config.shutdown?.timeout ?? env.SHUTDOWN_TIMEOUT}ms`
|
|
444
934
|
}
|
|
445
935
|
};
|
|
446
936
|
}
|
|
447
|
-
var serverLogger = logger.child("@spfn/core:server");
|
|
448
937
|
|
|
449
938
|
// src/server/create-server.ts
|
|
450
|
-
async function createServer(
|
|
939
|
+
async function createServer(config) {
|
|
451
940
|
const cwd = process.cwd();
|
|
452
941
|
const appPath = join(cwd, "src", "server", "app.ts");
|
|
453
942
|
const appJsPath = join(cwd, "src", "server", "app");
|
|
454
943
|
if (existsSync(appPath) || existsSync(appJsPath)) {
|
|
455
|
-
return await loadCustomApp(appPath, appJsPath,
|
|
944
|
+
return await loadCustomApp(appPath, appJsPath, config);
|
|
456
945
|
}
|
|
457
|
-
return await createAutoConfiguredApp(
|
|
946
|
+
return await createAutoConfiguredApp(config);
|
|
458
947
|
}
|
|
459
|
-
async function loadCustomApp(appPath, appJsPath,
|
|
948
|
+
async function loadCustomApp(appPath, appJsPath, config) {
|
|
460
949
|
const actualPath = existsSync(appPath) ? appPath : appJsPath;
|
|
461
950
|
const appModule = await import(actualPath);
|
|
462
951
|
const appFactory = appModule.default;
|
|
@@ -464,14 +953,15 @@ async function loadCustomApp(appPath, appJsPath, config2) {
|
|
|
464
953
|
throw new Error("app.ts must export a default function that returns a Hono app");
|
|
465
954
|
}
|
|
466
955
|
const app = await appFactory();
|
|
467
|
-
if (
|
|
468
|
-
registerRoutes(app,
|
|
956
|
+
if (config?.routes) {
|
|
957
|
+
const routes = registerRoutes(app, config.routes, config.middlewares);
|
|
958
|
+
logRegisteredRoutes(routes, config?.debug ?? false);
|
|
469
959
|
}
|
|
470
960
|
return app;
|
|
471
961
|
}
|
|
472
|
-
async function createAutoConfiguredApp(
|
|
962
|
+
async function createAutoConfiguredApp(config) {
|
|
473
963
|
const app = new Hono();
|
|
474
|
-
const middlewareConfig =
|
|
964
|
+
const middlewareConfig = config?.middleware ?? {};
|
|
475
965
|
const enableLogger = middlewareConfig.logger !== false;
|
|
476
966
|
const enableCors = middlewareConfig.cors !== false;
|
|
477
967
|
const enableErrorHandler = middlewareConfig.errorHandler !== false;
|
|
@@ -481,30 +971,31 @@ async function createAutoConfiguredApp(config2) {
|
|
|
481
971
|
await next();
|
|
482
972
|
});
|
|
483
973
|
}
|
|
484
|
-
applyDefaultMiddleware(app,
|
|
485
|
-
if (Array.isArray(
|
|
486
|
-
|
|
974
|
+
applyDefaultMiddleware(app, config, enableLogger, enableCors);
|
|
975
|
+
if (Array.isArray(config?.use)) {
|
|
976
|
+
config.use.forEach((mw) => app.use("*", mw));
|
|
487
977
|
}
|
|
488
|
-
registerHealthCheckEndpoint(app,
|
|
489
|
-
await executeBeforeRoutesHook(app,
|
|
490
|
-
await loadAppRoutes(app,
|
|
491
|
-
|
|
978
|
+
registerHealthCheckEndpoint(app, config);
|
|
979
|
+
await executeBeforeRoutesHook(app, config);
|
|
980
|
+
await loadAppRoutes(app, config);
|
|
981
|
+
registerSSEEndpoint(app, config);
|
|
982
|
+
await executeAfterRoutesHook(app, config);
|
|
492
983
|
if (enableErrorHandler) {
|
|
493
|
-
app.onError(ErrorHandler());
|
|
984
|
+
app.onError(ErrorHandler({ onError: config?.middleware?.onError }));
|
|
494
985
|
}
|
|
495
986
|
return app;
|
|
496
987
|
}
|
|
497
|
-
function applyDefaultMiddleware(app,
|
|
988
|
+
function applyDefaultMiddleware(app, config, enableLogger, enableCors) {
|
|
498
989
|
if (enableLogger) {
|
|
499
990
|
app.use("*", RequestLogger());
|
|
500
991
|
}
|
|
501
992
|
if (enableCors) {
|
|
502
|
-
const corsOptions =
|
|
993
|
+
const corsOptions = config?.cors !== false ? config?.cors : void 0;
|
|
503
994
|
app.use("*", cors(corsOptions));
|
|
504
995
|
}
|
|
505
996
|
}
|
|
506
|
-
function registerHealthCheckEndpoint(app,
|
|
507
|
-
const healthCheckConfig =
|
|
997
|
+
function registerHealthCheckEndpoint(app, config) {
|
|
998
|
+
const healthCheckConfig = config?.healthCheck ?? {};
|
|
508
999
|
const healthCheckEnabled = healthCheckConfig.enabled !== false;
|
|
509
1000
|
const healthCheckPath = healthCheckConfig.path ?? "/health";
|
|
510
1001
|
const healthCheckDetailed = healthCheckConfig.detailed ?? process.env.NODE_ENV === "development";
|
|
@@ -513,46 +1004,100 @@ function registerHealthCheckEndpoint(app, config2) {
|
|
|
513
1004
|
serverLogger.debug(`Health check endpoint enabled at ${healthCheckPath}`);
|
|
514
1005
|
}
|
|
515
1006
|
}
|
|
516
|
-
async function executeBeforeRoutesHook(app,
|
|
517
|
-
if (
|
|
518
|
-
await
|
|
1007
|
+
async function executeBeforeRoutesHook(app, config) {
|
|
1008
|
+
if (config?.lifecycle?.beforeRoutes) {
|
|
1009
|
+
await config.lifecycle.beforeRoutes(app);
|
|
519
1010
|
}
|
|
520
1011
|
}
|
|
521
|
-
async function loadAppRoutes(app,
|
|
522
|
-
const debug = isDebugMode(
|
|
523
|
-
if (
|
|
524
|
-
registerRoutes(app,
|
|
525
|
-
|
|
526
|
-
serverLogger.info("\u2713 Routes registered");
|
|
527
|
-
}
|
|
1012
|
+
async function loadAppRoutes(app, config) {
|
|
1013
|
+
const debug = isDebugMode(config);
|
|
1014
|
+
if (config?.routes) {
|
|
1015
|
+
const routes = registerRoutes(app, config.routes, config.middlewares);
|
|
1016
|
+
logRegisteredRoutes(routes, debug);
|
|
528
1017
|
} else if (debug) {
|
|
529
1018
|
serverLogger.warn("\u26A0\uFE0F No routes configured. Use defineServerConfig().routes() to register routes.");
|
|
530
1019
|
}
|
|
531
1020
|
}
|
|
532
|
-
|
|
533
|
-
if (
|
|
534
|
-
|
|
1021
|
+
function logRegisteredRoutes(routes, debug) {
|
|
1022
|
+
if (routes.length === 0) {
|
|
1023
|
+
if (debug) {
|
|
1024
|
+
serverLogger.warn("\u26A0\uFE0F No routes registered");
|
|
1025
|
+
}
|
|
1026
|
+
return;
|
|
535
1027
|
}
|
|
1028
|
+
const sortedRoutes = [...routes].sort((a, b) => a.path.localeCompare(b.path));
|
|
1029
|
+
const maxMethodLen = Math.max(...sortedRoutes.map((r) => r.method.length));
|
|
1030
|
+
const routeLines = sortedRoutes.map(
|
|
1031
|
+
(r) => ` ${r.method.padEnd(maxMethodLen)} ${r.path}`
|
|
1032
|
+
).join("\n");
|
|
1033
|
+
serverLogger.info(`\u2713 Routes registered (${routes.length}):
|
|
1034
|
+
${routeLines}`);
|
|
536
1035
|
}
|
|
537
|
-
function
|
|
538
|
-
|
|
1036
|
+
async function executeAfterRoutesHook(app, config) {
|
|
1037
|
+
if (config?.lifecycle?.afterRoutes) {
|
|
1038
|
+
await config.lifecycle.afterRoutes(app);
|
|
1039
|
+
}
|
|
1040
|
+
}
|
|
1041
|
+
function registerSSEEndpoint(app, config) {
|
|
1042
|
+
if (!config?.events) {
|
|
1043
|
+
return;
|
|
1044
|
+
}
|
|
1045
|
+
const eventsConfig = config.eventsConfig ?? {};
|
|
1046
|
+
const streamPath = eventsConfig.path ?? "/events/stream";
|
|
1047
|
+
const authConfig = eventsConfig.auth;
|
|
1048
|
+
const debug = isDebugMode(config);
|
|
1049
|
+
let tokenManager;
|
|
1050
|
+
if (authConfig?.enabled) {
|
|
1051
|
+
tokenManager = new SSETokenManager({
|
|
1052
|
+
ttl: authConfig.tokenTtl,
|
|
1053
|
+
store: authConfig.store
|
|
1054
|
+
});
|
|
1055
|
+
const tokenPath = streamPath.replace(/\/[^/]+$/, "/token");
|
|
1056
|
+
const mwHandlers = (config.middlewares ?? []).map((mw) => mw.handler);
|
|
1057
|
+
const getSubject = authConfig.getSubject ?? ((c) => c.get("auth")?.userId ?? null);
|
|
1058
|
+
app.post(tokenPath, ...mwHandlers, async (c) => {
|
|
1059
|
+
const subject = getSubject(c);
|
|
1060
|
+
if (!subject) {
|
|
1061
|
+
return c.json({ error: "Unable to identify subject" }, 401);
|
|
1062
|
+
}
|
|
1063
|
+
const token = await tokenManager.issue(subject);
|
|
1064
|
+
return c.json({ token });
|
|
1065
|
+
});
|
|
1066
|
+
if (debug) {
|
|
1067
|
+
serverLogger.info(`\u2713 SSE token endpoint registered at POST ${tokenPath}`);
|
|
1068
|
+
}
|
|
1069
|
+
}
|
|
1070
|
+
app.get(streamPath, createSSEHandler(config.events, eventsConfig, tokenManager));
|
|
1071
|
+
if (debug) {
|
|
1072
|
+
const eventNames = config.events.eventNames;
|
|
1073
|
+
serverLogger.info(`\u2713 SSE endpoint registered at ${streamPath}`, {
|
|
1074
|
+
events: eventNames,
|
|
1075
|
+
auth: !!authConfig?.enabled
|
|
1076
|
+
});
|
|
1077
|
+
}
|
|
1078
|
+
}
|
|
1079
|
+
function isDebugMode(config) {
|
|
1080
|
+
return config?.debug ?? process.env.NODE_ENV === "development";
|
|
539
1081
|
}
|
|
540
1082
|
var jobLogger = logger.child("@spfn/core:job");
|
|
541
1083
|
var bossInstance = null;
|
|
542
1084
|
var bossConfig = null;
|
|
543
|
-
async function initBoss(
|
|
1085
|
+
async function initBoss(options) {
|
|
544
1086
|
if (bossInstance) {
|
|
545
1087
|
jobLogger.warn("pg-boss already initialized, returning existing instance");
|
|
546
1088
|
return bossInstance;
|
|
547
1089
|
}
|
|
548
1090
|
jobLogger.info("Initializing pg-boss...");
|
|
549
|
-
bossConfig =
|
|
550
|
-
|
|
551
|
-
connectionString:
|
|
552
|
-
schema:
|
|
553
|
-
maintenanceIntervalSeconds:
|
|
554
|
-
|
|
555
|
-
|
|
1091
|
+
bossConfig = options;
|
|
1092
|
+
const pgBossOptions = {
|
|
1093
|
+
connectionString: options.connectionString,
|
|
1094
|
+
schema: options.schema ?? "spfn_queue",
|
|
1095
|
+
maintenanceIntervalSeconds: options.maintenanceIntervalSeconds ?? 120
|
|
1096
|
+
};
|
|
1097
|
+
if (options.monitorIntervalSeconds !== void 0 && options.monitorIntervalSeconds >= 1) {
|
|
1098
|
+
pgBossOptions.monitorIntervalSeconds = options.monitorIntervalSeconds;
|
|
1099
|
+
}
|
|
1100
|
+
bossInstance = new PgBoss(pgBossOptions);
|
|
556
1101
|
bossInstance.on("error", (error) => {
|
|
557
1102
|
jobLogger.error("pg-boss error:", error);
|
|
558
1103
|
});
|
|
@@ -639,7 +1184,11 @@ async function registerJobs(router) {
|
|
|
639
1184
|
}
|
|
640
1185
|
jobLogger2.info("All jobs registered successfully");
|
|
641
1186
|
}
|
|
1187
|
+
async function ensureQueue(boss, queueName) {
|
|
1188
|
+
await boss.createQueue(queueName);
|
|
1189
|
+
}
|
|
642
1190
|
async function registerWorker(boss, job2, queueName) {
|
|
1191
|
+
await ensureQueue(boss, queueName);
|
|
643
1192
|
await boss.work(
|
|
644
1193
|
queueName,
|
|
645
1194
|
{ batchSize: 1 },
|
|
@@ -686,6 +1235,7 @@ async function registerCronSchedule(boss, job2) {
|
|
|
686
1235
|
return;
|
|
687
1236
|
}
|
|
688
1237
|
jobLogger2.debug(`[Job:${job2.name}] Scheduling cron: ${job2.cronExpression}`);
|
|
1238
|
+
await ensureQueue(boss, job2.name);
|
|
689
1239
|
await boss.schedule(
|
|
690
1240
|
job2.name,
|
|
691
1241
|
job2.cronExpression,
|
|
@@ -699,6 +1249,7 @@ async function queueRunOnceJob(boss, job2) {
|
|
|
699
1249
|
return;
|
|
700
1250
|
}
|
|
701
1251
|
jobLogger2.debug(`[Job:${job2.name}] Queuing runOnce job`);
|
|
1252
|
+
await ensureQueue(boss, job2.name);
|
|
702
1253
|
await boss.send(
|
|
703
1254
|
job2.name,
|
|
704
1255
|
{},
|
|
@@ -762,16 +1313,16 @@ function printBanner(options) {
|
|
|
762
1313
|
}
|
|
763
1314
|
|
|
764
1315
|
// src/server/validation.ts
|
|
765
|
-
function validateServerConfig(
|
|
766
|
-
if (
|
|
767
|
-
if (!Number.isInteger(
|
|
1316
|
+
function validateServerConfig(config) {
|
|
1317
|
+
if (config.port !== void 0) {
|
|
1318
|
+
if (!Number.isInteger(config.port) || config.port < 0 || config.port > 65535) {
|
|
768
1319
|
throw new Error(
|
|
769
|
-
`Invalid port: ${
|
|
1320
|
+
`Invalid port: ${config.port}. Port must be an integer between 0 and 65535.`
|
|
770
1321
|
);
|
|
771
1322
|
}
|
|
772
1323
|
}
|
|
773
|
-
if (
|
|
774
|
-
const { request, keepAlive, headers } =
|
|
1324
|
+
if (config.timeout) {
|
|
1325
|
+
const { request, keepAlive, headers } = config.timeout;
|
|
775
1326
|
if (request !== void 0 && (request < 0 || !Number.isFinite(request))) {
|
|
776
1327
|
throw new Error(`Invalid timeout.request: ${request}. Must be a positive number.`);
|
|
777
1328
|
}
|
|
@@ -787,16 +1338,16 @@ function validateServerConfig(config2) {
|
|
|
787
1338
|
);
|
|
788
1339
|
}
|
|
789
1340
|
}
|
|
790
|
-
if (
|
|
791
|
-
const timeout =
|
|
1341
|
+
if (config.shutdown?.timeout !== void 0) {
|
|
1342
|
+
const timeout = config.shutdown.timeout;
|
|
792
1343
|
if (timeout < 0 || !Number.isFinite(timeout)) {
|
|
793
1344
|
throw new Error(`Invalid shutdown.timeout: ${timeout}. Must be a positive number.`);
|
|
794
1345
|
}
|
|
795
1346
|
}
|
|
796
|
-
if (
|
|
797
|
-
if (!
|
|
1347
|
+
if (config.healthCheck?.path) {
|
|
1348
|
+
if (!config.healthCheck.path.startsWith("/")) {
|
|
798
1349
|
throw new Error(
|
|
799
|
-
`Invalid healthCheck.path: "${
|
|
1350
|
+
`Invalid healthCheck.path: "${config.healthCheck.path}". Must start with "/".`
|
|
800
1351
|
);
|
|
801
1352
|
}
|
|
802
1353
|
}
|
|
@@ -805,9 +1356,7 @@ var DEFAULT_MAX_LISTENERS = 15;
|
|
|
805
1356
|
var TIMEOUTS = {
|
|
806
1357
|
SERVER_CLOSE: 5e3,
|
|
807
1358
|
DATABASE_CLOSE: 5e3,
|
|
808
|
-
REDIS_CLOSE: 5e3
|
|
809
|
-
PRODUCTION_ERROR_SHUTDOWN: 1e4
|
|
810
|
-
};
|
|
1359
|
+
REDIS_CLOSE: 5e3};
|
|
811
1360
|
var CONFIG_FILE_PATHS = [
|
|
812
1361
|
".spfn/server/server.config.mjs",
|
|
813
1362
|
".spfn/server/server.config",
|
|
@@ -815,9 +1364,9 @@ var CONFIG_FILE_PATHS = [
|
|
|
815
1364
|
"src/server/server.config.ts"
|
|
816
1365
|
];
|
|
817
1366
|
var processHandlersRegistered = false;
|
|
818
|
-
async function startServer(
|
|
819
|
-
|
|
820
|
-
const finalConfig = await loadAndMergeConfig(
|
|
1367
|
+
async function startServer(config) {
|
|
1368
|
+
loadEnv();
|
|
1369
|
+
const finalConfig = await loadAndMergeConfig(config);
|
|
821
1370
|
const { host, port, debug } = finalConfig;
|
|
822
1371
|
validateServerConfig(finalConfig);
|
|
823
1372
|
if (!host || !port) {
|
|
@@ -835,6 +1384,8 @@ async function startServer(config2) {
|
|
|
835
1384
|
const server = startHttpServer(app, host, port);
|
|
836
1385
|
const timeouts = getTimeoutConfig(finalConfig.timeout);
|
|
837
1386
|
applyServerTimeouts(server, timeouts);
|
|
1387
|
+
const fetchTimeouts = getFetchTimeoutConfig(finalConfig.fetchTimeout);
|
|
1388
|
+
applyGlobalFetchTimeouts(fetchTimeouts);
|
|
838
1389
|
logServerTimeouts(timeouts);
|
|
839
1390
|
printBanner({
|
|
840
1391
|
mode: debug ? "Development" : "Production",
|
|
@@ -851,11 +1402,6 @@ async function startServer(config2) {
|
|
|
851
1402
|
config: finalConfig,
|
|
852
1403
|
close: async () => {
|
|
853
1404
|
serverLogger.info("Manual server shutdown requested");
|
|
854
|
-
if (shutdownState.isShuttingDown) {
|
|
855
|
-
serverLogger.warn("Shutdown already in progress, ignoring manual close request");
|
|
856
|
-
return;
|
|
857
|
-
}
|
|
858
|
-
shutdownState.isShuttingDown = true;
|
|
859
1405
|
await shutdownServer();
|
|
860
1406
|
}
|
|
861
1407
|
};
|
|
@@ -875,7 +1421,7 @@ async function startServer(config2) {
|
|
|
875
1421
|
throw error;
|
|
876
1422
|
}
|
|
877
1423
|
}
|
|
878
|
-
async function loadAndMergeConfig(
|
|
1424
|
+
async function loadAndMergeConfig(config) {
|
|
879
1425
|
const cwd = process.cwd();
|
|
880
1426
|
let fileConfig = {};
|
|
881
1427
|
let loadedConfigPath = null;
|
|
@@ -899,26 +1445,26 @@ async function loadAndMergeConfig(config2) {
|
|
|
899
1445
|
}
|
|
900
1446
|
return {
|
|
901
1447
|
...fileConfig,
|
|
902
|
-
...
|
|
903
|
-
port:
|
|
904
|
-
host:
|
|
1448
|
+
...config,
|
|
1449
|
+
port: config?.port ?? fileConfig?.port ?? env.PORT,
|
|
1450
|
+
host: config?.host ?? fileConfig?.host ?? env.HOST
|
|
905
1451
|
};
|
|
906
1452
|
}
|
|
907
|
-
function getInfrastructureConfig(
|
|
1453
|
+
function getInfrastructureConfig(config) {
|
|
908
1454
|
return {
|
|
909
|
-
database:
|
|
910
|
-
redis:
|
|
1455
|
+
database: config.infrastructure?.database !== false,
|
|
1456
|
+
redis: config.infrastructure?.redis !== false
|
|
911
1457
|
};
|
|
912
1458
|
}
|
|
913
|
-
async function initializeInfrastructure(
|
|
914
|
-
if (
|
|
1459
|
+
async function initializeInfrastructure(config) {
|
|
1460
|
+
if (config.lifecycle?.beforeInfrastructure) {
|
|
915
1461
|
serverLogger.debug("Executing beforeInfrastructure hook...");
|
|
916
|
-
await
|
|
1462
|
+
await config.lifecycle.beforeInfrastructure(config);
|
|
917
1463
|
}
|
|
918
|
-
const infraConfig = getInfrastructureConfig(
|
|
1464
|
+
const infraConfig = getInfrastructureConfig(config);
|
|
919
1465
|
if (infraConfig.database) {
|
|
920
1466
|
serverLogger.debug("Initializing database...");
|
|
921
|
-
await initDatabase(
|
|
1467
|
+
await initDatabase(config.database);
|
|
922
1468
|
} else {
|
|
923
1469
|
serverLogger.debug("Database initialization disabled");
|
|
924
1470
|
}
|
|
@@ -928,11 +1474,11 @@ async function initializeInfrastructure(config2) {
|
|
|
928
1474
|
} else {
|
|
929
1475
|
serverLogger.debug("Redis initialization disabled");
|
|
930
1476
|
}
|
|
931
|
-
if (
|
|
1477
|
+
if (config.lifecycle?.afterInfrastructure) {
|
|
932
1478
|
serverLogger.debug("Executing afterInfrastructure hook...");
|
|
933
|
-
await
|
|
1479
|
+
await config.lifecycle.afterInfrastructure();
|
|
934
1480
|
}
|
|
935
|
-
if (
|
|
1481
|
+
if (config.jobs) {
|
|
936
1482
|
const dbUrl = env.DATABASE_URL;
|
|
937
1483
|
if (!dbUrl) {
|
|
938
1484
|
throw new Error(
|
|
@@ -942,10 +1488,24 @@ async function initializeInfrastructure(config2) {
|
|
|
942
1488
|
serverLogger.debug("Initializing pg-boss...");
|
|
943
1489
|
await initBoss({
|
|
944
1490
|
connectionString: dbUrl,
|
|
945
|
-
...
|
|
1491
|
+
...config.jobsConfig
|
|
946
1492
|
});
|
|
947
1493
|
serverLogger.debug("Registering jobs...");
|
|
948
|
-
await registerJobs(
|
|
1494
|
+
await registerJobs(config.jobs);
|
|
1495
|
+
}
|
|
1496
|
+
if (config.workflows) {
|
|
1497
|
+
const infraConfig2 = getInfrastructureConfig(config);
|
|
1498
|
+
if (!infraConfig2.database) {
|
|
1499
|
+
throw new Error(
|
|
1500
|
+
"Workflows require database connection. Ensure database is enabled in infrastructure config."
|
|
1501
|
+
);
|
|
1502
|
+
}
|
|
1503
|
+
serverLogger.debug("Initializing workflow engine...");
|
|
1504
|
+
config.workflows._init(
|
|
1505
|
+
getDatabase(),
|
|
1506
|
+
config.workflowsConfig
|
|
1507
|
+
);
|
|
1508
|
+
serverLogger.info("Workflow engine initialized");
|
|
949
1509
|
}
|
|
950
1510
|
}
|
|
951
1511
|
function startHttpServer(app, host, port) {
|
|
@@ -956,8 +1516,8 @@ function startHttpServer(app, host, port) {
|
|
|
956
1516
|
hostname: host
|
|
957
1517
|
});
|
|
958
1518
|
}
|
|
959
|
-
function logMiddlewareOrder(
|
|
960
|
-
const middlewareOrder = buildMiddlewareOrder(
|
|
1519
|
+
function logMiddlewareOrder(config) {
|
|
1520
|
+
const middlewareOrder = buildMiddlewareOrder(config);
|
|
961
1521
|
serverLogger.debug("Middleware execution order", {
|
|
962
1522
|
order: middlewareOrder
|
|
963
1523
|
});
|
|
@@ -969,8 +1529,8 @@ function logServerTimeouts(timeouts) {
|
|
|
969
1529
|
headers: `${timeouts.headers}ms`
|
|
970
1530
|
});
|
|
971
1531
|
}
|
|
972
|
-
function logServerStarted(debug, host, port,
|
|
973
|
-
const startupConfig = buildStartupConfig(
|
|
1532
|
+
function logServerStarted(debug, host, port, config, timeouts) {
|
|
1533
|
+
const startupConfig = buildStartupConfig(config, timeouts);
|
|
974
1534
|
serverLogger.info("Server started successfully", {
|
|
975
1535
|
mode: debug ? "development" : "production",
|
|
976
1536
|
host,
|
|
@@ -978,65 +1538,74 @@ function logServerStarted(debug, host, port, config2, timeouts) {
|
|
|
978
1538
|
config: startupConfig
|
|
979
1539
|
});
|
|
980
1540
|
}
|
|
981
|
-
function createShutdownHandler(server,
|
|
1541
|
+
function createShutdownHandler(server, config, shutdownState) {
|
|
982
1542
|
return async () => {
|
|
983
1543
|
if (shutdownState.isShuttingDown) {
|
|
984
1544
|
serverLogger.debug("Shutdown already in progress for this instance, skipping");
|
|
985
1545
|
return;
|
|
986
1546
|
}
|
|
987
1547
|
shutdownState.isShuttingDown = true;
|
|
988
|
-
|
|
989
|
-
|
|
990
|
-
|
|
991
|
-
|
|
992
|
-
|
|
993
|
-
|
|
994
|
-
|
|
995
|
-
serverLogger.error("HTTP server close error", err);
|
|
996
|
-
reject(err);
|
|
997
|
-
} else {
|
|
998
|
-
serverLogger.info("HTTP server closed");
|
|
999
|
-
resolve2();
|
|
1000
|
-
}
|
|
1001
|
-
});
|
|
1002
|
-
}),
|
|
1003
|
-
new Promise((_, reject) => {
|
|
1004
|
-
timeoutId = setTimeout(() => {
|
|
1005
|
-
reject(new Error(`HTTP server close timeout after ${TIMEOUTS.SERVER_CLOSE}ms`));
|
|
1006
|
-
}, TIMEOUTS.SERVER_CLOSE);
|
|
1007
|
-
})
|
|
1008
|
-
]).catch((error) => {
|
|
1009
|
-
if (timeoutId) clearTimeout(timeoutId);
|
|
1010
|
-
serverLogger.warn("HTTP server close timeout, forcing shutdown", error);
|
|
1011
|
-
});
|
|
1012
|
-
if (config2.jobs) {
|
|
1013
|
-
serverLogger.debug("Stopping pg-boss...");
|
|
1548
|
+
const shutdownTimeout = getShutdownTimeout(config.shutdown);
|
|
1549
|
+
const shutdownManager = getShutdownManager();
|
|
1550
|
+
shutdownManager.beginShutdown();
|
|
1551
|
+
serverLogger.info("Phase 1: Closing HTTP server (stop accepting new connections)...");
|
|
1552
|
+
await closeHttpServer(server);
|
|
1553
|
+
if (config.jobs) {
|
|
1554
|
+
serverLogger.info("Phase 2: Stopping pg-boss...");
|
|
1014
1555
|
try {
|
|
1015
1556
|
await stopBoss();
|
|
1557
|
+
serverLogger.info("pg-boss stopped");
|
|
1016
1558
|
} catch (error) {
|
|
1017
1559
|
serverLogger.error("pg-boss stop failed", error);
|
|
1018
1560
|
}
|
|
1019
1561
|
}
|
|
1020
|
-
|
|
1021
|
-
|
|
1562
|
+
const drainTimeout = Math.floor(shutdownTimeout * 0.8);
|
|
1563
|
+
serverLogger.info(`Phase 3: Draining tracked operations (timeout: ${drainTimeout}ms)...`);
|
|
1564
|
+
await shutdownManager.execute(drainTimeout);
|
|
1565
|
+
if (config.lifecycle?.beforeShutdown) {
|
|
1566
|
+
serverLogger.info("Phase 4: Executing beforeShutdown lifecycle hook...");
|
|
1022
1567
|
try {
|
|
1023
|
-
await
|
|
1568
|
+
await config.lifecycle.beforeShutdown();
|
|
1024
1569
|
} catch (error) {
|
|
1025
|
-
serverLogger.error("beforeShutdown hook failed", error);
|
|
1570
|
+
serverLogger.error("beforeShutdown lifecycle hook failed", error);
|
|
1026
1571
|
}
|
|
1027
1572
|
}
|
|
1028
|
-
|
|
1573
|
+
serverLogger.info("Phase 5: Closing infrastructure...");
|
|
1574
|
+
const infraConfig = getInfrastructureConfig(config);
|
|
1029
1575
|
if (infraConfig.database) {
|
|
1030
|
-
serverLogger.debug("Closing database connections...");
|
|
1031
1576
|
await closeInfrastructure(closeDatabase, "Database", TIMEOUTS.DATABASE_CLOSE);
|
|
1032
1577
|
}
|
|
1033
1578
|
if (infraConfig.redis) {
|
|
1034
|
-
serverLogger.debug("Closing Redis connections...");
|
|
1035
1579
|
await closeInfrastructure(closeCache, "Redis", TIMEOUTS.REDIS_CLOSE);
|
|
1036
1580
|
}
|
|
1037
1581
|
serverLogger.info("Server shutdown completed");
|
|
1038
1582
|
};
|
|
1039
1583
|
}
|
|
1584
|
+
async function closeHttpServer(server) {
|
|
1585
|
+
let timeoutId;
|
|
1586
|
+
await Promise.race([
|
|
1587
|
+
new Promise((resolve2, reject) => {
|
|
1588
|
+
server.close((err) => {
|
|
1589
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
1590
|
+
if (err) {
|
|
1591
|
+
serverLogger.error("HTTP server close error", err);
|
|
1592
|
+
reject(err);
|
|
1593
|
+
} else {
|
|
1594
|
+
serverLogger.info("HTTP server closed");
|
|
1595
|
+
resolve2();
|
|
1596
|
+
}
|
|
1597
|
+
});
|
|
1598
|
+
}),
|
|
1599
|
+
new Promise((_, reject) => {
|
|
1600
|
+
timeoutId = setTimeout(() => {
|
|
1601
|
+
reject(new Error(`HTTP server close timeout after ${TIMEOUTS.SERVER_CLOSE}ms`));
|
|
1602
|
+
}, TIMEOUTS.SERVER_CLOSE);
|
|
1603
|
+
})
|
|
1604
|
+
]).catch((error) => {
|
|
1605
|
+
if (timeoutId) clearTimeout(timeoutId);
|
|
1606
|
+
serverLogger.warn("HTTP server close timeout, forcing shutdown", error);
|
|
1607
|
+
});
|
|
1608
|
+
}
|
|
1040
1609
|
async function closeInfrastructure(closeFn, name, timeout) {
|
|
1041
1610
|
let timeoutId;
|
|
1042
1611
|
try {
|
|
@@ -1056,14 +1625,14 @@ async function closeInfrastructure(closeFn, name, timeout) {
|
|
|
1056
1625
|
serverLogger.error(`${name} close failed or timed out`, error);
|
|
1057
1626
|
}
|
|
1058
1627
|
}
|
|
1059
|
-
function createGracefulShutdown(shutdownServer,
|
|
1628
|
+
function createGracefulShutdown(shutdownServer, config, shutdownState) {
|
|
1060
1629
|
return async (signal) => {
|
|
1061
1630
|
if (shutdownState.isShuttingDown) {
|
|
1062
1631
|
serverLogger.warn(`${signal} received but shutdown already in progress, ignoring`);
|
|
1063
1632
|
return;
|
|
1064
1633
|
}
|
|
1065
1634
|
serverLogger.info(`${signal} received, starting graceful shutdown...`);
|
|
1066
|
-
const shutdownTimeout = getShutdownTimeout(
|
|
1635
|
+
const shutdownTimeout = getShutdownTimeout(config.shutdown);
|
|
1067
1636
|
let timeoutId;
|
|
1068
1637
|
try {
|
|
1069
1638
|
await Promise.race([
|
|
@@ -1091,31 +1660,8 @@ function createGracefulShutdown(shutdownServer, config2, shutdownState) {
|
|
|
1091
1660
|
}
|
|
1092
1661
|
};
|
|
1093
1662
|
}
|
|
1094
|
-
function handleProcessError(errorType
|
|
1095
|
-
|
|
1096
|
-
const isDevelopment = env.NODE_ENV === "development";
|
|
1097
|
-
if (isDevelopment || process.env.WATCH_MODE === "true") {
|
|
1098
|
-
serverLogger.info("Exiting immediately for clean restart");
|
|
1099
|
-
process.exit(1);
|
|
1100
|
-
} else if (isProduction) {
|
|
1101
|
-
serverLogger.info(`Attempting graceful shutdown after ${errorType}`);
|
|
1102
|
-
const forceExitTimer = setTimeout(() => {
|
|
1103
|
-
serverLogger.error(`Forced exit after ${TIMEOUTS.PRODUCTION_ERROR_SHUTDOWN}ms - graceful shutdown did not complete`);
|
|
1104
|
-
process.exit(1);
|
|
1105
|
-
}, TIMEOUTS.PRODUCTION_ERROR_SHUTDOWN);
|
|
1106
|
-
shutdown(errorType).then(() => {
|
|
1107
|
-
clearTimeout(forceExitTimer);
|
|
1108
|
-
serverLogger.info("Graceful shutdown completed, exiting");
|
|
1109
|
-
process.exit(0);
|
|
1110
|
-
}).catch((shutdownError) => {
|
|
1111
|
-
clearTimeout(forceExitTimer);
|
|
1112
|
-
serverLogger.error("Graceful shutdown failed", shutdownError);
|
|
1113
|
-
process.exit(1);
|
|
1114
|
-
});
|
|
1115
|
-
} else {
|
|
1116
|
-
serverLogger.info("Exiting immediately");
|
|
1117
|
-
process.exit(1);
|
|
1118
|
-
}
|
|
1663
|
+
function handleProcessError(errorType) {
|
|
1664
|
+
serverLogger.warn(`${errorType} occurred - server continues running. Check logs above for details.`);
|
|
1119
1665
|
}
|
|
1120
1666
|
function registerProcessHandlers(shutdown) {
|
|
1121
1667
|
if (processHandlersRegistered) {
|
|
@@ -1150,7 +1696,7 @@ function registerProcessHandlers(shutdown) {
|
|
|
1150
1696
|
} else {
|
|
1151
1697
|
serverLogger.error("Uncaught exception", error);
|
|
1152
1698
|
}
|
|
1153
|
-
handleProcessError("UNCAUGHT_EXCEPTION"
|
|
1699
|
+
handleProcessError("UNCAUGHT_EXCEPTION");
|
|
1154
1700
|
});
|
|
1155
1701
|
process.on("unhandledRejection", (reason, promise) => {
|
|
1156
1702
|
if (reason instanceof Error) {
|
|
@@ -1168,20 +1714,21 @@ function registerProcessHandlers(shutdown) {
|
|
|
1168
1714
|
promise
|
|
1169
1715
|
});
|
|
1170
1716
|
}
|
|
1171
|
-
handleProcessError("UNHANDLED_REJECTION"
|
|
1717
|
+
handleProcessError("UNHANDLED_REJECTION");
|
|
1172
1718
|
});
|
|
1173
1719
|
serverLogger.debug("Process-level shutdown handlers registered successfully");
|
|
1174
1720
|
}
|
|
1175
|
-
async function cleanupOnFailure(
|
|
1721
|
+
async function cleanupOnFailure(config) {
|
|
1176
1722
|
try {
|
|
1177
1723
|
serverLogger.debug("Cleaning up after initialization failure...");
|
|
1178
|
-
const infraConfig = getInfrastructureConfig(
|
|
1724
|
+
const infraConfig = getInfrastructureConfig(config);
|
|
1179
1725
|
if (infraConfig.database) {
|
|
1180
1726
|
await closeInfrastructure(closeDatabase, "Database", TIMEOUTS.DATABASE_CLOSE);
|
|
1181
1727
|
}
|
|
1182
1728
|
if (infraConfig.redis) {
|
|
1183
1729
|
await closeInfrastructure(closeCache, "Redis", TIMEOUTS.REDIS_CLOSE);
|
|
1184
1730
|
}
|
|
1731
|
+
resetShutdownManager();
|
|
1185
1732
|
serverLogger.debug("Cleanup completed");
|
|
1186
1733
|
} catch (cleanupError) {
|
|
1187
1734
|
serverLogger.error("Cleanup failed", cleanupError);
|
|
@@ -1307,10 +1854,45 @@ var ServerConfigBuilder = class {
|
|
|
1307
1854
|
* .build();
|
|
1308
1855
|
* ```
|
|
1309
1856
|
*/
|
|
1310
|
-
jobs(router,
|
|
1857
|
+
jobs(router, config) {
|
|
1311
1858
|
this.config.jobs = router;
|
|
1312
|
-
if (
|
|
1313
|
-
this.config.jobsConfig =
|
|
1859
|
+
if (config) {
|
|
1860
|
+
this.config.jobsConfig = config;
|
|
1861
|
+
}
|
|
1862
|
+
return this;
|
|
1863
|
+
}
|
|
1864
|
+
/**
|
|
1865
|
+
* Register event router for SSE (Server-Sent Events)
|
|
1866
|
+
*
|
|
1867
|
+
* Enables real-time event streaming to frontend clients.
|
|
1868
|
+
* Events defined with defineEvent() can be subscribed by:
|
|
1869
|
+
* - Backend: .subscribe() for internal handlers
|
|
1870
|
+
* - Jobs: .on(event) for background processing
|
|
1871
|
+
* - Frontend: SSE stream for real-time updates
|
|
1872
|
+
*
|
|
1873
|
+
* @example
|
|
1874
|
+
* ```typescript
|
|
1875
|
+
* import { defineEvent, defineEventRouter } from '@spfn/core/event';
|
|
1876
|
+
*
|
|
1877
|
+
* const userCreated = defineEvent('user.created', Type.Object({
|
|
1878
|
+
* userId: Type.String(),
|
|
1879
|
+
* }));
|
|
1880
|
+
*
|
|
1881
|
+
* const eventRouter = defineEventRouter({ userCreated });
|
|
1882
|
+
*
|
|
1883
|
+
* export default defineServerConfig()
|
|
1884
|
+
* .routes(appRouter)
|
|
1885
|
+
* .events(eventRouter) // → GET /events/stream
|
|
1886
|
+
* .build();
|
|
1887
|
+
*
|
|
1888
|
+
* // Custom path
|
|
1889
|
+
* .events(eventRouter, { path: '/sse' })
|
|
1890
|
+
* ```
|
|
1891
|
+
*/
|
|
1892
|
+
events(router, config) {
|
|
1893
|
+
this.config.events = router;
|
|
1894
|
+
if (config) {
|
|
1895
|
+
this.config.eventsConfig = config;
|
|
1314
1896
|
}
|
|
1315
1897
|
return this;
|
|
1316
1898
|
}
|
|
@@ -1356,6 +1938,33 @@ var ServerConfigBuilder = class {
|
|
|
1356
1938
|
this.config.infrastructure = infrastructure;
|
|
1357
1939
|
return this;
|
|
1358
1940
|
}
|
|
1941
|
+
/**
|
|
1942
|
+
* Register workflow router for workflow orchestration
|
|
1943
|
+
*
|
|
1944
|
+
* Automatically initializes the workflow engine after database is ready.
|
|
1945
|
+
*
|
|
1946
|
+
* @example
|
|
1947
|
+
* ```typescript
|
|
1948
|
+
* import { defineWorkflowRouter } from '@spfn/workflow';
|
|
1949
|
+
*
|
|
1950
|
+
* const workflowRouter = defineWorkflowRouter([
|
|
1951
|
+
* provisionTenant,
|
|
1952
|
+
* deprovisionTenant,
|
|
1953
|
+
* ]);
|
|
1954
|
+
*
|
|
1955
|
+
* export default defineServerConfig()
|
|
1956
|
+
* .routes(appRouter)
|
|
1957
|
+
* .workflows(workflowRouter)
|
|
1958
|
+
* .build();
|
|
1959
|
+
* ```
|
|
1960
|
+
*/
|
|
1961
|
+
workflows(router, config) {
|
|
1962
|
+
this.config.workflows = router;
|
|
1963
|
+
if (config) {
|
|
1964
|
+
this.config.workflowsConfig = config;
|
|
1965
|
+
}
|
|
1966
|
+
return this;
|
|
1967
|
+
}
|
|
1359
1968
|
/**
|
|
1360
1969
|
* Configure lifecycle hooks
|
|
1361
1970
|
* Can be called multiple times - hooks will be executed in registration order
|
|
@@ -1403,6 +2012,6 @@ function defineServerConfig() {
|
|
|
1403
2012
|
return new ServerConfigBuilder();
|
|
1404
2013
|
}
|
|
1405
2014
|
|
|
1406
|
-
export { createServer, defineServerConfig, loadEnvFiles, startServer };
|
|
2015
|
+
export { createServer, defineServerConfig, getShutdownManager, loadEnv, loadEnvFiles, startServer };
|
|
1407
2016
|
//# sourceMappingURL=index.js.map
|
|
1408
2017
|
//# sourceMappingURL=index.js.map
|