@hienlh/ppm 0.8.65 → 0.8.66
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/package.json +1 -1
- package/src/services/supervisor.ts +12 -2
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,12 @@
|
|
|
1
1
|
# Changelog
|
|
2
2
|
|
|
3
|
+
## [0.8.66] - 2026-03-29
|
|
4
|
+
|
|
5
|
+
### Fixed
|
|
6
|
+
- **Auto-upgrade port conflict**: Supervisor self-replace now sets `shuttingDown` flag before killing server, preventing crash-restart loop from respawning on the same port as the new supervisor
|
|
7
|
+
- **Account cooldown re-enable**: Handle expired accounts that cannot be re-enabled during cooldown clear — disable them instead of crashing
|
|
8
|
+
- **Hot-reload log duplication**: Guard `setupLogFile()` against re-wrapping console on `bun --hot` module re-execution
|
|
9
|
+
|
|
3
10
|
## [0.8.65] - 2026-03-29
|
|
4
11
|
|
|
5
12
|
### Added
|
package/package.json
CHANGED
|
@@ -328,6 +328,9 @@ async function selfReplace(): Promise<{ success: boolean; error?: string }> {
|
|
|
328
328
|
const currentSupervisorPid = process.pid;
|
|
329
329
|
|
|
330
330
|
try {
|
|
331
|
+
// Prevent spawnServer crash-restart loop from respawning killed children
|
|
332
|
+
shuttingDown = true;
|
|
333
|
+
|
|
331
334
|
// Kill server + tunnel children FIRST to free the port for the new supervisor
|
|
332
335
|
log("INFO", "Stopping server and tunnel before spawning new supervisor");
|
|
333
336
|
if (serverChild) { try { serverChild.kill(); } catch {} serverChild = null; }
|
|
@@ -365,12 +368,14 @@ async function selfReplace(): Promise<{ success: boolean; error?: string }> {
|
|
|
365
368
|
} catch {}
|
|
366
369
|
}
|
|
367
370
|
|
|
368
|
-
// Timeout — new supervisor didn't start
|
|
371
|
+
// Timeout — new supervisor didn't start, restore old supervisor
|
|
369
372
|
log("ERROR", "Self-replace timeout: new supervisor did not start");
|
|
370
373
|
try { child.kill(); } catch {}
|
|
374
|
+
shuttingDown = false;
|
|
371
375
|
return { success: false, error: "New supervisor failed to start within 30s" };
|
|
372
376
|
} catch (e) {
|
|
373
377
|
log("ERROR", `Self-replace error: ${e}`);
|
|
378
|
+
shuttingDown = false;
|
|
374
379
|
return { success: false, error: (e as Error).message };
|
|
375
380
|
}
|
|
376
381
|
}
|
|
@@ -436,7 +441,12 @@ export async function runSupervisor(opts: {
|
|
|
436
441
|
process.on("SIGUSR1", async () => {
|
|
437
442
|
log("INFO", "SIGUSR1 received, starting self-replace for upgrade");
|
|
438
443
|
const result = await selfReplace();
|
|
439
|
-
if (!result.success)
|
|
444
|
+
if (!result.success) {
|
|
445
|
+
log("ERROR", `Self-replace failed: ${result.error}, restarting children`);
|
|
446
|
+
// Respawn server (and tunnel if configured) since selfReplace killed them
|
|
447
|
+
spawnServer(serverArgs, logFd);
|
|
448
|
+
if (opts.share) spawnTunnel(opts.port);
|
|
449
|
+
}
|
|
440
450
|
});
|
|
441
451
|
|
|
442
452
|
// Start health checks
|