@noego/app 0.0.16 → 0.0.18
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/cjs/client.cjs +35 -0
- package/cjs/config.cjs +24 -0
- package/cjs/index.cjs +54 -0
- package/package.json +5 -2
- package/src/commands/dev.js +275 -17
- package/src/runtime/runtime.js +49 -2
package/cjs/client.cjs
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// CJS wrapper for ESM client module
|
|
4
|
+
|
|
5
|
+
let modulePromise = null;
|
|
6
|
+
|
|
7
|
+
function getModule() {
|
|
8
|
+
if (!modulePromise) {
|
|
9
|
+
modulePromise = import('../src/client.js');
|
|
10
|
+
}
|
|
11
|
+
return modulePromise;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
// Export functions that return promises
|
|
15
|
+
module.exports = {
|
|
16
|
+
setContext: async (...args) => {
|
|
17
|
+
const mod = await getModule();
|
|
18
|
+
return mod.setContext(...args);
|
|
19
|
+
},
|
|
20
|
+
boot: async (...args) => {
|
|
21
|
+
const mod = await getModule();
|
|
22
|
+
return mod.boot(...args);
|
|
23
|
+
},
|
|
24
|
+
clientBoot: async (...args) => {
|
|
25
|
+
const mod = await getModule();
|
|
26
|
+
return mod.clientBoot(...args);
|
|
27
|
+
},
|
|
28
|
+
clientInit: async (...args) => {
|
|
29
|
+
const mod = await getModule();
|
|
30
|
+
return mod.clientInit(...args);
|
|
31
|
+
},
|
|
32
|
+
get client() {
|
|
33
|
+
return getModule().then(mod => mod.client);
|
|
34
|
+
}
|
|
35
|
+
};
|
package/cjs/config.cjs
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// CJS wrapper for ESM config module
|
|
4
|
+
|
|
5
|
+
let modulePromise = null;
|
|
6
|
+
|
|
7
|
+
function getModule() {
|
|
8
|
+
if (!modulePromise) {
|
|
9
|
+
modulePromise = import('../src/runtime/config.js');
|
|
10
|
+
}
|
|
11
|
+
return modulePromise;
|
|
12
|
+
}
|
|
13
|
+
|
|
14
|
+
// Export functions that return promises
|
|
15
|
+
module.exports = {
|
|
16
|
+
buildConfig: async (...args) => {
|
|
17
|
+
const mod = await getModule();
|
|
18
|
+
return mod.buildConfig(...args);
|
|
19
|
+
},
|
|
20
|
+
getConfig: async (...args) => {
|
|
21
|
+
const mod = await getModule();
|
|
22
|
+
return mod.getConfig(...args);
|
|
23
|
+
}
|
|
24
|
+
};
|
package/cjs/index.cjs
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
'use strict';
|
|
2
|
+
|
|
3
|
+
// CJS wrapper for ESM module
|
|
4
|
+
// Uses dynamic import to load ESM and caches the result
|
|
5
|
+
|
|
6
|
+
let modulePromise = null;
|
|
7
|
+
|
|
8
|
+
function getModule() {
|
|
9
|
+
if (!modulePromise) {
|
|
10
|
+
modulePromise = import('../src/index.js');
|
|
11
|
+
}
|
|
12
|
+
return modulePromise;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
module.exports = new Proxy({}, {
|
|
16
|
+
get(_, prop) {
|
|
17
|
+
if (prop === 'then' || prop === 'catch' || prop === 'finally') {
|
|
18
|
+
return undefined; // Prevent treating as thenable
|
|
19
|
+
}
|
|
20
|
+
return async (...args) => {
|
|
21
|
+
const mod = await getModule();
|
|
22
|
+
const fn = mod[prop];
|
|
23
|
+
if (typeof fn === 'function') {
|
|
24
|
+
return fn(...args);
|
|
25
|
+
}
|
|
26
|
+
return fn;
|
|
27
|
+
};
|
|
28
|
+
}
|
|
29
|
+
});
|
|
30
|
+
|
|
31
|
+
// Also export named exports as promises for destructuring
|
|
32
|
+
Object.defineProperties(module.exports, {
|
|
33
|
+
runCombinedServices: {
|
|
34
|
+
get() {
|
|
35
|
+
return async (...args) => {
|
|
36
|
+
const mod = await getModule();
|
|
37
|
+
return mod.runCombinedServices(...args);
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
},
|
|
41
|
+
boot: {
|
|
42
|
+
get() {
|
|
43
|
+
return async (...args) => {
|
|
44
|
+
const mod = await getModule();
|
|
45
|
+
return mod.boot(...args);
|
|
46
|
+
};
|
|
47
|
+
}
|
|
48
|
+
},
|
|
49
|
+
client: {
|
|
50
|
+
get() {
|
|
51
|
+
return getModule().then(mod => mod.client);
|
|
52
|
+
}
|
|
53
|
+
}
|
|
54
|
+
});
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@noego/app",
|
|
3
|
-
"version": "0.0.
|
|
3
|
+
"version": "0.0.18",
|
|
4
4
|
"description": "Production build tool for Dinner/Forge apps.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"bin": {
|
|
@@ -9,14 +9,17 @@
|
|
|
9
9
|
},
|
|
10
10
|
"exports": {
|
|
11
11
|
".": {
|
|
12
|
-
"import": "./src/index.js"
|
|
12
|
+
"import": "./src/index.js",
|
|
13
|
+
"require": "./cjs/index.cjs"
|
|
13
14
|
},
|
|
14
15
|
"./client": {
|
|
15
16
|
"import": "./src/client.js",
|
|
17
|
+
"require": "./cjs/client.cjs",
|
|
16
18
|
"types": "./types/client.d.ts"
|
|
17
19
|
},
|
|
18
20
|
"./config": {
|
|
19
21
|
"import": "./src/runtime/config.js",
|
|
22
|
+
"require": "./cjs/config.cjs",
|
|
20
23
|
"types": "./types/config.d.ts"
|
|
21
24
|
}
|
|
22
25
|
},
|
package/src/commands/dev.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import path from 'node:path';
|
|
2
2
|
import { fileURLToPath } from 'node:url';
|
|
3
|
-
import { spawn } from 'node:child_process';
|
|
3
|
+
import { spawn, execSync } from 'node:child_process';
|
|
4
4
|
import { createBuildContext } from '../build/context.js';
|
|
5
5
|
import { findConfigFile } from '../runtime/index.js';
|
|
6
6
|
import { loadConfig } from '../runtime/config-loader.js';
|
|
@@ -28,6 +28,90 @@ function killProcessTree(pid, signal = 'SIGKILL') {
|
|
|
28
28
|
}
|
|
29
29
|
}
|
|
30
30
|
|
|
31
|
+
/**
|
|
32
|
+
* Recovery utility for EADDRINUSE errors
|
|
33
|
+
* Uses lsof to detect what process is using a port, kills it, and waits for the port to be free
|
|
34
|
+
*
|
|
35
|
+
* @param {number} port - The port to recover
|
|
36
|
+
* @param {object} logger - Logger instance for output
|
|
37
|
+
* @returns {boolean} - True if recovery was successful, false otherwise
|
|
38
|
+
*/
|
|
39
|
+
function recoverFromPortInUse(port, logger) {
|
|
40
|
+
try {
|
|
41
|
+
logger.warn(`[PORT RECOVERY] Attempting to recover port ${port}...`);
|
|
42
|
+
|
|
43
|
+
// Run lsof to find what's using the port
|
|
44
|
+
let lsofOutput;
|
|
45
|
+
try {
|
|
46
|
+
lsofOutput = execSync(`lsof -i :${port}`, { encoding: 'utf-8' });
|
|
47
|
+
} catch (err) {
|
|
48
|
+
// lsof returns exit code 1 if no processes found
|
|
49
|
+
if (err.status === 1) {
|
|
50
|
+
logger.info(`[PORT RECOVERY] Port ${port} appears to be free (no process found)`);
|
|
51
|
+
return true;
|
|
52
|
+
}
|
|
53
|
+
throw err;
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
// Parse lsof output to extract PIDs
|
|
57
|
+
// Output format: COMMAND PID USER FD TYPE DEVICE SIZE/OFF NODE NAME
|
|
58
|
+
// Skip header line, extract second column (PID)
|
|
59
|
+
const lines = lsofOutput.trim().split('\n');
|
|
60
|
+
const pidsToKill = new Set();
|
|
61
|
+
|
|
62
|
+
for (let i = 1; i < lines.length; i++) { // Skip header
|
|
63
|
+
const columns = lines[i].split(/\s+/);
|
|
64
|
+
if (columns.length >= 2) {
|
|
65
|
+
const pid = parseInt(columns[1], 10);
|
|
66
|
+
if (!isNaN(pid) && pid !== process.pid) { // Don't kill ourselves
|
|
67
|
+
pidsToKill.add(pid);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
if (pidsToKill.size === 0) {
|
|
73
|
+
logger.info(`[PORT RECOVERY] No killable processes found on port ${port}`);
|
|
74
|
+
return true;
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// Kill each process holding the port
|
|
78
|
+
for (const pid of pidsToKill) {
|
|
79
|
+
try {
|
|
80
|
+
logger.warn(`[PORT RECOVERY] Killing stale process PID ${pid} holding port ${port}`);
|
|
81
|
+
execSync(`kill -9 ${pid}`, { encoding: 'utf-8' });
|
|
82
|
+
} catch (killErr) {
|
|
83
|
+
// Process might already be dead
|
|
84
|
+
logger.debug(`[PORT RECOVERY] Could not kill PID ${pid}: ${killErr.message}`);
|
|
85
|
+
}
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
// Brief wait for OS to release the port
|
|
89
|
+
// execSync is synchronous so we use a simple busy-wait
|
|
90
|
+
const waitStart = Date.now();
|
|
91
|
+
const maxWait = 2000; // 2 seconds max
|
|
92
|
+
while (Date.now() - waitStart < maxWait) {
|
|
93
|
+
try {
|
|
94
|
+
// Check if port is now free by running lsof again
|
|
95
|
+
execSync(`lsof -i :${port}`, { encoding: 'utf-8' });
|
|
96
|
+
// If lsof succeeds, port is still in use - keep waiting
|
|
97
|
+
// Small synchronous delay
|
|
98
|
+
const spinStart = Date.now();
|
|
99
|
+
while (Date.now() - spinStart < 100) { /* busy wait 100ms */ }
|
|
100
|
+
} catch {
|
|
101
|
+
// lsof failed = port is free
|
|
102
|
+
logger.info(`[PORT RECOVERY] Port ${port} is now free`);
|
|
103
|
+
return true;
|
|
104
|
+
}
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
logger.warn(`[PORT RECOVERY] Port ${port} may still be in use after recovery attempt`);
|
|
108
|
+
return false;
|
|
109
|
+
} catch (error) {
|
|
110
|
+
logger.error(`[PORT RECOVERY] Failed to recover port ${port}: ${error.message}`);
|
|
111
|
+
return false;
|
|
112
|
+
}
|
|
113
|
+
}
|
|
114
|
+
|
|
31
115
|
export async function runDev(config) {
|
|
32
116
|
const context = createBuildContext(config);
|
|
33
117
|
const { logger } = context;
|
|
@@ -441,6 +525,7 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
441
525
|
|
|
442
526
|
// Crash restart tracking
|
|
443
527
|
const MAX_CRASH_RESTARTS = 3;
|
|
528
|
+
const PORT_RECOVERY_TIMEOUT = 30000; // 30 seconds to wait for port to become free
|
|
444
529
|
const CRASH_RESTART_DELAY = 2000; // 2 seconds between crash restarts
|
|
445
530
|
const STABILITY_THRESHOLD = 30000; // Reset crash counter if running 30+ seconds
|
|
446
531
|
let backendCrashRestarts = 0;
|
|
@@ -448,7 +533,27 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
448
533
|
let backendStartTime = 0;
|
|
449
534
|
let frontendStartTime = 0;
|
|
450
535
|
|
|
451
|
-
|
|
536
|
+
// Port recovery retry tracking
|
|
537
|
+
const MAX_PORT_RECOVERY_RETRIES = 3;
|
|
538
|
+
let backendPortRetries = 0;
|
|
539
|
+
let frontendPortRetries = 0;
|
|
540
|
+
|
|
541
|
+
const startBackend = (retryAfterRecovery = false) => {
|
|
542
|
+
if (retryAfterRecovery) {
|
|
543
|
+
backendPortRetries++;
|
|
544
|
+
if (backendPortRetries > MAX_PORT_RECOVERY_RETRIES) {
|
|
545
|
+
logger.error(`[BACKEND] Exceeded max port recovery retries (${MAX_PORT_RECOVERY_RETRIES}).`);
|
|
546
|
+
logger.info(`[BACKEND] Service is down. Waiting for file change to retry...`);
|
|
547
|
+
backendProc = null;
|
|
548
|
+
// Don't shutdown - keep file watcher running so a file change can trigger a fresh restart
|
|
549
|
+
return;
|
|
550
|
+
}
|
|
551
|
+
logger.warn(`[BACKEND] Port recovery retry ${backendPortRetries}/${MAX_PORT_RECOVERY_RETRIES}...`);
|
|
552
|
+
} else {
|
|
553
|
+
// Reset retry counter on normal start
|
|
554
|
+
backendPortRetries = 0;
|
|
555
|
+
}
|
|
556
|
+
|
|
452
557
|
backendRestartCount++;
|
|
453
558
|
backendStartTime = Date.now();
|
|
454
559
|
logger.info(`🚀 [RESTART #${backendRestartCount}] Starting backend on port ${backendPort}...`);
|
|
@@ -458,13 +563,23 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
458
563
|
NOEGO_PORT: String(backendPort)
|
|
459
564
|
};
|
|
460
565
|
|
|
566
|
+
// Capture stderr to differentiate between port errors and boot errors
|
|
567
|
+
let stderrBuffer = '';
|
|
568
|
+
|
|
461
569
|
backendProc = spawn(tsxExecutable, tsxArgs, {
|
|
462
570
|
cwd: context.config.rootDir,
|
|
463
571
|
env: backendEnv,
|
|
464
|
-
stdio: 'inherit',
|
|
572
|
+
stdio: ['inherit', 'inherit', 'pipe'], // Pipe stderr to capture errors
|
|
465
573
|
detached: false
|
|
466
574
|
});
|
|
467
575
|
|
|
576
|
+
// Capture stderr while also logging it to console
|
|
577
|
+
backendProc.stderr.on('data', (data) => {
|
|
578
|
+
const text = data.toString();
|
|
579
|
+
stderrBuffer += text;
|
|
580
|
+
process.stderr.write(text); // Still show errors to user
|
|
581
|
+
});
|
|
582
|
+
|
|
468
583
|
backendProc.on('exit', (code, signal) => {
|
|
469
584
|
if (isShuttingDown) return;
|
|
470
585
|
|
|
@@ -472,19 +587,40 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
472
587
|
const runDuration = Date.now() - backendStartTime;
|
|
473
588
|
if (runDuration > STABILITY_THRESHOLD) {
|
|
474
589
|
backendCrashRestarts = 0;
|
|
590
|
+
backendPortRetries = 0; // Also reset port retries on stability
|
|
475
591
|
}
|
|
476
592
|
|
|
593
|
+
// Quick exit (< 5 seconds) might be port conflict OR boot error
|
|
594
|
+
const isQuickExit = runDuration < 5000;
|
|
595
|
+
|
|
596
|
+
// Check stderr for EADDRINUSE to determine error type
|
|
597
|
+
const isPortError = stderrBuffer.includes('EADDRINUSE') ||
|
|
598
|
+
stderrBuffer.includes('address already in use') ||
|
|
599
|
+
stderrBuffer.includes('listen EADDRINUSE');
|
|
600
|
+
|
|
477
601
|
if (code !== null && code !== 0) {
|
|
478
|
-
logger.error(`[BACKEND] Exited with code ${code}, signal ${signal}`);
|
|
602
|
+
logger.error(`[BACKEND] Exited with code ${code}, signal ${signal}, ran for ${runDuration}ms`);
|
|
479
603
|
|
|
480
|
-
//
|
|
604
|
+
// Differentiate between port errors and other boot errors
|
|
605
|
+
if (isQuickExit && !isPortError && stderrBuffer.length > 0) {
|
|
606
|
+
// Boot error (syntax error, missing module, etc.) - don't retry
|
|
607
|
+
logger.error(`[BACKEND] Boot error detected (not a port conflict). Not retrying.`);
|
|
608
|
+
logger.error(`[BACKEND] Fix the error and save a file to trigger a new restart attempt.`);
|
|
609
|
+
backendProc = null;
|
|
610
|
+
// Reset crash counter so next file change gets a fresh attempt
|
|
611
|
+
backendCrashRestarts = 0;
|
|
612
|
+
backendPortRetries = 0;
|
|
613
|
+
return;
|
|
614
|
+
}
|
|
615
|
+
|
|
616
|
+
// Auto-restart on crash if under limit (for port errors or runtime crashes)
|
|
481
617
|
if (backendCrashRestarts < MAX_CRASH_RESTARTS) {
|
|
482
618
|
backendCrashRestarts++;
|
|
483
619
|
logger.warn(`[BACKEND] Crash detected. Auto-restart ${backendCrashRestarts}/${MAX_CRASH_RESTARTS}...`);
|
|
484
620
|
|
|
485
621
|
// Wait for port to be free before restarting (fixes race condition)
|
|
486
622
|
backendProc = null;
|
|
487
|
-
waitForPortFree(backendPort,
|
|
623
|
+
waitForPortFree(backendPort, PORT_RECOVERY_TIMEOUT, 100).subscribe({
|
|
488
624
|
next: () => {
|
|
489
625
|
if (!isShuttingDown) {
|
|
490
626
|
logger.info(`[BACKEND] Port ${backendPort} is free, restarting...`);
|
|
@@ -492,25 +628,72 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
492
628
|
}
|
|
493
629
|
},
|
|
494
630
|
error: (err) => {
|
|
495
|
-
logger.warn(`[BACKEND] Port wait
|
|
631
|
+
logger.warn(`[BACKEND] Port wait timed out: ${err.message}`);
|
|
632
|
+
|
|
633
|
+
// Quick exit + port not free + port error = likely EADDRINUSE from stale process
|
|
634
|
+
if (isQuickExit && isPortError) {
|
|
635
|
+
logger.warn(`[BACKEND] Port conflict detected - attempting port recovery...`);
|
|
636
|
+
const recovered = recoverFromPortInUse(backendPort, logger);
|
|
637
|
+
if (recovered && !isShuttingDown) {
|
|
638
|
+
// Wait for OS to fully release the port before retrying
|
|
639
|
+
setTimeout(() => {
|
|
640
|
+
if (!isShuttingDown) {
|
|
641
|
+
startBackend(true); // Mark as recovery retry
|
|
642
|
+
}
|
|
643
|
+
}, CRASH_RESTART_DELAY);
|
|
644
|
+
return;
|
|
645
|
+
}
|
|
646
|
+
}
|
|
647
|
+
|
|
648
|
+
// Fallback: try restart anyway
|
|
496
649
|
if (!isShuttingDown) {
|
|
497
650
|
startBackend();
|
|
498
651
|
}
|
|
499
652
|
}
|
|
500
653
|
});
|
|
501
654
|
} else {
|
|
502
|
-
logger.error(`[BACKEND] Exceeded max crash restarts (${MAX_CRASH_RESTARTS})
|
|
503
|
-
|
|
655
|
+
logger.error(`[BACKEND] Exceeded max crash restarts (${MAX_CRASH_RESTARTS}).`);
|
|
656
|
+
logger.info(`[BACKEND] Service is down. Waiting for file change to retry...`);
|
|
657
|
+
backendProc = null;
|
|
658
|
+
// Don't shutdown - keep file watcher running so a file change can trigger a fresh restart
|
|
504
659
|
}
|
|
505
660
|
}
|
|
506
661
|
});
|
|
507
662
|
|
|
508
663
|
backendProc.on('error', (err) => {
|
|
509
664
|
logger.error(`[BACKEND] Spawn error: ${err.message}`);
|
|
665
|
+
|
|
666
|
+
// Check if this is an EADDRINUSE-like error at spawn level
|
|
667
|
+
if (err.code === 'EADDRINUSE' || err.message.includes('EADDRINUSE')) {
|
|
668
|
+
logger.warn(`[BACKEND] EADDRINUSE detected at spawn - attempting port recovery...`);
|
|
669
|
+
const recovered = recoverFromPortInUse(backendPort, logger);
|
|
670
|
+
if (recovered && backendPortRetries < MAX_PORT_RECOVERY_RETRIES && !isShuttingDown) {
|
|
671
|
+
setTimeout(() => {
|
|
672
|
+
if (!isShuttingDown) {
|
|
673
|
+
startBackend(true);
|
|
674
|
+
}
|
|
675
|
+
}, CRASH_RESTART_DELAY);
|
|
676
|
+
}
|
|
677
|
+
}
|
|
510
678
|
});
|
|
511
679
|
};
|
|
512
680
|
|
|
513
|
-
const startFrontend = () => {
|
|
681
|
+
const startFrontend = (retryAfterRecovery = false) => {
|
|
682
|
+
if (retryAfterRecovery) {
|
|
683
|
+
frontendPortRetries++;
|
|
684
|
+
if (frontendPortRetries > MAX_PORT_RECOVERY_RETRIES) {
|
|
685
|
+
logger.error(`[FRONTEND] Exceeded max port recovery retries (${MAX_PORT_RECOVERY_RETRIES}).`);
|
|
686
|
+
logger.info(`[FRONTEND] Service is down. Waiting for file change to retry...`);
|
|
687
|
+
frontendProc = null;
|
|
688
|
+
// Don't shutdown - keep file watcher running so a file change can trigger a fresh restart
|
|
689
|
+
return;
|
|
690
|
+
}
|
|
691
|
+
logger.warn(`[FRONTEND] Port recovery retry ${frontendPortRetries}/${MAX_PORT_RECOVERY_RETRIES}...`);
|
|
692
|
+
} else {
|
|
693
|
+
// Reset retry counter on normal start
|
|
694
|
+
frontendPortRetries = 0;
|
|
695
|
+
}
|
|
696
|
+
|
|
514
697
|
frontendRestartCount++;
|
|
515
698
|
frontendStartTime = Date.now();
|
|
516
699
|
logger.info(`🚀 [RESTART #${frontendRestartCount}] Starting frontend on port ${frontendPort}...`);
|
|
@@ -520,13 +703,23 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
520
703
|
NOEGO_PORT: String(frontendPort)
|
|
521
704
|
};
|
|
522
705
|
|
|
706
|
+
// Capture stderr to differentiate between port errors and boot errors
|
|
707
|
+
let stderrBuffer = '';
|
|
708
|
+
|
|
523
709
|
frontendProc = spawn(tsxExecutable, tsxArgs, {
|
|
524
710
|
cwd: context.config.rootDir,
|
|
525
711
|
env: frontendEnv,
|
|
526
|
-
stdio: 'inherit',
|
|
712
|
+
stdio: ['inherit', 'inherit', 'pipe'], // Pipe stderr to capture errors
|
|
527
713
|
detached: false
|
|
528
714
|
});
|
|
529
715
|
|
|
716
|
+
// Capture stderr while also logging it to console
|
|
717
|
+
frontendProc.stderr.on('data', (data) => {
|
|
718
|
+
const text = data.toString();
|
|
719
|
+
stderrBuffer += text;
|
|
720
|
+
process.stderr.write(text); // Still show errors to user
|
|
721
|
+
});
|
|
722
|
+
|
|
530
723
|
frontendProc.on('exit', (code, signal) => {
|
|
531
724
|
if (isShuttingDown) return;
|
|
532
725
|
|
|
@@ -534,19 +727,40 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
534
727
|
const runDuration = Date.now() - frontendStartTime;
|
|
535
728
|
if (runDuration > STABILITY_THRESHOLD) {
|
|
536
729
|
frontendCrashRestarts = 0;
|
|
730
|
+
frontendPortRetries = 0; // Also reset port retries on stability
|
|
537
731
|
}
|
|
538
732
|
|
|
733
|
+
// Quick exit (< 5 seconds) might be port conflict OR boot error
|
|
734
|
+
const isQuickExit = runDuration < 5000;
|
|
735
|
+
|
|
736
|
+
// Check stderr for EADDRINUSE to determine error type
|
|
737
|
+
const isPortError = stderrBuffer.includes('EADDRINUSE') ||
|
|
738
|
+
stderrBuffer.includes('address already in use') ||
|
|
739
|
+
stderrBuffer.includes('listen EADDRINUSE');
|
|
740
|
+
|
|
539
741
|
if (code !== null && code !== 0) {
|
|
540
|
-
logger.error(`[FRONTEND] Exited with code ${code}, signal ${signal}`);
|
|
742
|
+
logger.error(`[FRONTEND] Exited with code ${code}, signal ${signal}, ran for ${runDuration}ms`);
|
|
743
|
+
|
|
744
|
+
// Differentiate between port errors and other boot errors
|
|
745
|
+
if (isQuickExit && !isPortError && stderrBuffer.length > 0) {
|
|
746
|
+
// Boot error (syntax error, missing module, etc.) - don't retry
|
|
747
|
+
logger.error(`[FRONTEND] Boot error detected (not a port conflict). Not retrying.`);
|
|
748
|
+
logger.error(`[FRONTEND] Fix the error and save a file to trigger a new restart attempt.`);
|
|
749
|
+
frontendProc = null;
|
|
750
|
+
// Reset crash counter so next file change gets a fresh attempt
|
|
751
|
+
frontendCrashRestarts = 0;
|
|
752
|
+
frontendPortRetries = 0;
|
|
753
|
+
return;
|
|
754
|
+
}
|
|
541
755
|
|
|
542
|
-
// Auto-restart on crash if under limit
|
|
756
|
+
// Auto-restart on crash if under limit (for port errors or runtime crashes)
|
|
543
757
|
if (frontendCrashRestarts < MAX_CRASH_RESTARTS) {
|
|
544
758
|
frontendCrashRestarts++;
|
|
545
759
|
logger.warn(`[FRONTEND] Crash detected. Auto-restart ${frontendCrashRestarts}/${MAX_CRASH_RESTARTS}...`);
|
|
546
760
|
|
|
547
761
|
// Wait for port to be free before restarting (fixes race condition)
|
|
548
762
|
frontendProc = null;
|
|
549
|
-
waitForPortFree(frontendPort,
|
|
763
|
+
waitForPortFree(frontendPort, PORT_RECOVERY_TIMEOUT, 100).subscribe({
|
|
550
764
|
next: () => {
|
|
551
765
|
if (!isShuttingDown) {
|
|
552
766
|
logger.info(`[FRONTEND] Port ${frontendPort} is free, restarting...`);
|
|
@@ -554,21 +768,53 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
554
768
|
}
|
|
555
769
|
},
|
|
556
770
|
error: (err) => {
|
|
557
|
-
logger.warn(`[FRONTEND] Port wait
|
|
771
|
+
logger.warn(`[FRONTEND] Port wait timed out: ${err.message}`);
|
|
772
|
+
|
|
773
|
+
// Quick exit + port not free + port error = likely EADDRINUSE from stale process
|
|
774
|
+
if (isQuickExit && isPortError) {
|
|
775
|
+
logger.warn(`[FRONTEND] Port conflict detected - attempting port recovery...`);
|
|
776
|
+
const recovered = recoverFromPortInUse(frontendPort, logger);
|
|
777
|
+
if (recovered && !isShuttingDown) {
|
|
778
|
+
// Wait for OS to fully release the port before retrying
|
|
779
|
+
setTimeout(() => {
|
|
780
|
+
if (!isShuttingDown) {
|
|
781
|
+
startFrontend(true); // Mark as recovery retry
|
|
782
|
+
}
|
|
783
|
+
}, CRASH_RESTART_DELAY);
|
|
784
|
+
return;
|
|
785
|
+
}
|
|
786
|
+
}
|
|
787
|
+
|
|
788
|
+
// Fallback: try restart anyway
|
|
558
789
|
if (!isShuttingDown) {
|
|
559
790
|
startFrontend();
|
|
560
791
|
}
|
|
561
792
|
}
|
|
562
793
|
});
|
|
563
794
|
} else {
|
|
564
|
-
logger.error(`[FRONTEND] Exceeded max crash restarts (${MAX_CRASH_RESTARTS})
|
|
565
|
-
|
|
795
|
+
logger.error(`[FRONTEND] Exceeded max crash restarts (${MAX_CRASH_RESTARTS}).`);
|
|
796
|
+
logger.info(`[FRONTEND] Service is down. Waiting for file change to retry...`);
|
|
797
|
+
frontendProc = null;
|
|
798
|
+
// Don't shutdown - keep file watcher running so a file change can trigger a fresh restart
|
|
566
799
|
}
|
|
567
800
|
}
|
|
568
801
|
});
|
|
569
802
|
|
|
570
803
|
frontendProc.on('error', (err) => {
|
|
571
804
|
logger.error(`[FRONTEND] Spawn error: ${err.message}`);
|
|
805
|
+
|
|
806
|
+
// Check if this is an EADDRINUSE-like error at spawn level
|
|
807
|
+
if (err.code === 'EADDRINUSE' || err.message.includes('EADDRINUSE')) {
|
|
808
|
+
logger.warn(`[FRONTEND] EADDRINUSE detected at spawn - attempting port recovery...`);
|
|
809
|
+
const recovered = recoverFromPortInUse(frontendPort, logger);
|
|
810
|
+
if (recovered && frontendPortRetries < MAX_PORT_RECOVERY_RETRIES && !isShuttingDown) {
|
|
811
|
+
setTimeout(() => {
|
|
812
|
+
if (!isShuttingDown) {
|
|
813
|
+
startFrontend(true);
|
|
814
|
+
}
|
|
815
|
+
}, CRASH_RESTART_DELAY);
|
|
816
|
+
}
|
|
817
|
+
}
|
|
572
818
|
});
|
|
573
819
|
};
|
|
574
820
|
|
|
@@ -782,6 +1028,9 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
782
1028
|
// Then start the new backend
|
|
783
1029
|
of(undefined).pipe(
|
|
784
1030
|
tap(() => {
|
|
1031
|
+
// Reset crash counters - file change gives a fresh start
|
|
1032
|
+
backendCrashRestarts = 0;
|
|
1033
|
+
backendPortRetries = 0;
|
|
785
1034
|
startBackend();
|
|
786
1035
|
logger.info(`✅ Backend restart complete\n`);
|
|
787
1036
|
})
|
|
@@ -818,6 +1067,9 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
818
1067
|
// Then start the new frontend
|
|
819
1068
|
of(undefined).pipe(
|
|
820
1069
|
tap(() => {
|
|
1070
|
+
// Reset crash counters - file change gives a fresh start
|
|
1071
|
+
frontendCrashRestarts = 0;
|
|
1072
|
+
frontendPortRetries = 0;
|
|
821
1073
|
startFrontend();
|
|
822
1074
|
logger.info(`✅ Frontend restart complete\n`);
|
|
823
1075
|
})
|
|
@@ -893,6 +1145,12 @@ async function runSplitServeWithWatch(context, tsxExecutable, tsxArgs, baseEnv,
|
|
|
893
1145
|
})
|
|
894
1146
|
.on('error', handleWatcherError);
|
|
895
1147
|
|
|
1148
|
+
// Proactive port recovery before initial startup
|
|
1149
|
+
// This handles the case where stale processes from a previous dev session are holding the ports
|
|
1150
|
+
logger.info(`[STARTUP] Checking ports ${backendPort} and ${frontendPort} for stale processes...`);
|
|
1151
|
+
recoverFromPortInUse(backendPort, logger);
|
|
1152
|
+
recoverFromPortInUse(frontendPort, logger);
|
|
1153
|
+
|
|
896
1154
|
// Start initial processes
|
|
897
1155
|
startBackend();
|
|
898
1156
|
startFrontend();
|
package/src/runtime/runtime.js
CHANGED
|
@@ -390,10 +390,23 @@ async function runBackendService(config) {
|
|
|
390
390
|
console.log('[backend] config.dev.backendPort:', config.dev.backendPort);
|
|
391
391
|
console.log('[backend] Using port:', backendPort);
|
|
392
392
|
|
|
393
|
-
|
|
393
|
+
const httpServer = http.createServer(backendApp);
|
|
394
|
+
|
|
395
|
+
httpServer.on('error', (err) => {
|
|
396
|
+
if (err.code === 'EADDRINUSE') {
|
|
397
|
+
console.error(`[ERROR] Port ${backendPort} is already in use (EADDRINUSE)`);
|
|
398
|
+
console.error(`[ERROR] This likely means a stale process is still running.`);
|
|
399
|
+
console.error(`[ERROR] The dev server will attempt to recover...`);
|
|
400
|
+
} else {
|
|
401
|
+
console.error(`[ERROR] Server error on port ${backendPort}:`, err.message);
|
|
402
|
+
}
|
|
403
|
+
process.exit(1);
|
|
404
|
+
});
|
|
405
|
+
|
|
406
|
+
httpServer.listen(backendPort, '0.0.0.0', () => {
|
|
394
407
|
console.log(`Backend server running on http://localhost:${backendPort}`);
|
|
395
408
|
});
|
|
396
|
-
|
|
409
|
+
|
|
397
410
|
return backendApp;
|
|
398
411
|
}
|
|
399
412
|
|
|
@@ -441,6 +454,17 @@ async function runFrontendService(config) {
|
|
|
441
454
|
console.log('[frontend] config.dev.port:', config.dev.port);
|
|
442
455
|
console.log('[frontend] Using port:', frontendPort);
|
|
443
456
|
|
|
457
|
+
httpServer.on('error', (err) => {
|
|
458
|
+
if (err.code === 'EADDRINUSE') {
|
|
459
|
+
console.error(`[ERROR] Port ${frontendPort} is already in use (EADDRINUSE)`);
|
|
460
|
+
console.error(`[ERROR] This likely means a stale process is still running.`);
|
|
461
|
+
console.error(`[ERROR] The dev server will attempt to recover...`);
|
|
462
|
+
} else {
|
|
463
|
+
console.error(`[ERROR] Server error on port ${frontendPort}:`, err.message);
|
|
464
|
+
}
|
|
465
|
+
process.exit(1);
|
|
466
|
+
});
|
|
467
|
+
|
|
444
468
|
httpServer.listen(frontendPort, '0.0.0.0', () => {
|
|
445
469
|
console.log(`Frontend server running on http://localhost:${frontendPort}`);
|
|
446
470
|
});
|
|
@@ -669,6 +693,17 @@ async function runRouterService(config) {
|
|
|
669
693
|
try { socket?.destroy?.(); } catch {}
|
|
670
694
|
});
|
|
671
695
|
|
|
696
|
+
httpServer.on('error', (err) => {
|
|
697
|
+
if (err.code === 'EADDRINUSE') {
|
|
698
|
+
console.error(`[ERROR] Port ${routerPort} is already in use (EADDRINUSE)`);
|
|
699
|
+
console.error(`[ERROR] This likely means a stale process is still running.`);
|
|
700
|
+
console.error(`[ERROR] The dev server will attempt to recover...`);
|
|
701
|
+
} else {
|
|
702
|
+
console.error(`[ERROR] Server error on port ${routerPort}:`, err.message);
|
|
703
|
+
}
|
|
704
|
+
process.exit(1);
|
|
705
|
+
});
|
|
706
|
+
|
|
672
707
|
httpServer.listen(routerPort, '0.0.0.0', () => {
|
|
673
708
|
console.log(`Router server running on http://localhost:${routerPort}`);
|
|
674
709
|
console.log(` Proxying to frontend on port ${frontendPort}`);
|
|
@@ -724,6 +759,18 @@ export async function runCombinedServices(config, options = {}) {
|
|
|
724
759
|
|
|
725
760
|
// Allow PORT env var to override config
|
|
726
761
|
const port = process.env.PORT ? parseInt(process.env.PORT, 10) : (config.dev?.port || 3000);
|
|
762
|
+
|
|
763
|
+
httpServer.on('error', (err) => {
|
|
764
|
+
if (err.code === 'EADDRINUSE') {
|
|
765
|
+
console.error(`[ERROR] Port ${port} is already in use (EADDRINUSE)`);
|
|
766
|
+
console.error(`[ERROR] This likely means a stale process is still running.`);
|
|
767
|
+
console.error(`[ERROR] The dev server will attempt to recover...`);
|
|
768
|
+
} else {
|
|
769
|
+
console.error(`[ERROR] Server error on port ${port}:`, err.message);
|
|
770
|
+
}
|
|
771
|
+
process.exit(1);
|
|
772
|
+
});
|
|
773
|
+
|
|
727
774
|
httpServer.listen(port, '0.0.0.0', () => {
|
|
728
775
|
const services = [];
|
|
729
776
|
if (hasBackend && config.server?.main_abs) services.push('Backend');
|