@portel/photon 1.20.1 → 1.22.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -5
- package/dist/ag-ui/adapter.d.ts +4 -1
- package/dist/ag-ui/adapter.d.ts.map +1 -1
- package/dist/ag-ui/adapter.js +58 -3
- package/dist/ag-ui/adapter.js.map +1 -1
- package/dist/ag-ui/types.d.ts +12 -0
- package/dist/ag-ui/types.d.ts.map +1 -1
- package/dist/auto-ui/beam/routes/api-browse.d.ts.map +1 -1
- package/dist/auto-ui/beam/routes/api-browse.js +8 -49
- package/dist/auto-ui/beam/routes/api-browse.js.map +1 -1
- package/dist/auto-ui/beam/routes/api-config.d.ts +1 -1
- package/dist/auto-ui/beam/routes/api-config.d.ts.map +1 -1
- package/dist/auto-ui/beam/routes/api-config.js +79 -1
- package/dist/auto-ui/beam/routes/api-config.js.map +1 -1
- package/dist/auto-ui/beam.d.ts.map +1 -1
- package/dist/auto-ui/beam.js +23 -31
- package/dist/auto-ui/beam.js.map +1 -1
- package/dist/auto-ui/bridge/index.d.ts.map +1 -1
- package/dist/auto-ui/bridge/index.js +107 -11
- package/dist/auto-ui/bridge/index.js.map +1 -1
- package/dist/auto-ui/bridge/renderers.d.ts +14 -0
- package/dist/auto-ui/bridge/renderers.d.ts.map +1 -1
- package/dist/auto-ui/bridge/renderers.js +680 -57
- package/dist/auto-ui/bridge/renderers.js.map +1 -1
- package/dist/auto-ui/frontend/index.html +3 -3
- package/dist/auto-ui/frontend/pure-view.html +19 -19
- package/dist/auto-ui/streamable-http-transport.d.ts.map +1 -1
- package/dist/auto-ui/streamable-http-transport.js +53 -2
- package/dist/auto-ui/streamable-http-transport.js.map +1 -1
- package/dist/auto-ui/ui-resolver.d.ts +25 -0
- package/dist/auto-ui/ui-resolver.d.ts.map +1 -0
- package/dist/auto-ui/ui-resolver.js +95 -0
- package/dist/auto-ui/ui-resolver.js.map +1 -0
- package/dist/beam-form.bundle.js +7 -7
- package/dist/beam-form.bundle.js.map +1 -1
- package/dist/beam.bundle.js +905 -185
- package/dist/beam.bundle.js.map +4 -4
- package/dist/cli/commands/build.d.ts.map +1 -1
- package/dist/cli/commands/build.js +9 -5
- package/dist/cli/commands/build.js.map +1 -1
- package/dist/cli/commands/init.d.ts.map +1 -1
- package/dist/cli/commands/init.js +93 -53
- package/dist/cli/commands/init.js.map +1 -1
- package/dist/cli/commands/publish.d.ts +14 -0
- package/dist/cli/commands/publish.d.ts.map +1 -0
- package/dist/cli/commands/publish.js +126 -0
- package/dist/cli/commands/publish.js.map +1 -0
- package/dist/cli/commands/run.d.ts.map +1 -1
- package/dist/cli/commands/run.js +2 -0
- package/dist/cli/commands/run.js.map +1 -1
- package/dist/cli/index.d.ts.map +1 -1
- package/dist/cli/index.js +3 -0
- package/dist/cli/index.js.map +1 -1
- package/dist/cli.d.ts +4 -0
- package/dist/cli.d.ts.map +1 -1
- package/dist/cli.js +11 -1
- package/dist/cli.js.map +1 -1
- package/dist/context.d.ts +6 -0
- package/dist/context.d.ts.map +1 -1
- package/dist/context.js +17 -5
- package/dist/context.js.map +1 -1
- package/dist/daemon/client.d.ts +9 -1
- package/dist/daemon/client.d.ts.map +1 -1
- package/dist/daemon/client.js +54 -1
- package/dist/daemon/client.js.map +1 -1
- package/dist/daemon/manager.d.ts +3 -0
- package/dist/daemon/manager.d.ts.map +1 -1
- package/dist/daemon/manager.js +88 -38
- package/dist/daemon/manager.js.map +1 -1
- package/dist/daemon/ownership.d.ts +12 -0
- package/dist/daemon/ownership.d.ts.map +1 -0
- package/dist/daemon/ownership.js +55 -0
- package/dist/daemon/ownership.js.map +1 -0
- package/dist/daemon/protocol.d.ts +4 -2
- package/dist/daemon/protocol.d.ts.map +1 -1
- package/dist/daemon/protocol.js +15 -2
- package/dist/daemon/protocol.js.map +1 -1
- package/dist/daemon/server.js +557 -83
- package/dist/daemon/server.js.map +1 -1
- package/dist/daemon/session-manager.d.ts +9 -1
- package/dist/daemon/session-manager.d.ts.map +1 -1
- package/dist/daemon/session-manager.js +54 -1
- package/dist/daemon/session-manager.js.map +1 -1
- package/dist/daemon/worker-manager.d.ts +12 -0
- package/dist/daemon/worker-manager.d.ts.map +1 -1
- package/dist/daemon/worker-manager.js +89 -6
- package/dist/daemon/worker-manager.js.map +1 -1
- package/dist/loader.d.ts +17 -9
- package/dist/loader.d.ts.map +1 -1
- package/dist/loader.js +415 -141
- package/dist/loader.js.map +1 -1
- package/dist/photon-cli-runner.d.ts.map +1 -1
- package/dist/photon-cli-runner.js +26 -2
- package/dist/photon-cli-runner.js.map +1 -1
- package/dist/photons/canvas/ui/canvas.photon.html +1493 -0
- package/dist/photons/canvas.photon.d.ts +400 -0
- package/dist/photons/canvas.photon.d.ts.map +1 -0
- package/dist/photons/canvas.photon.js +662 -0
- package/dist/photons/canvas.photon.js.map +1 -0
- package/dist/photons/canvas.photon.ts +814 -0
- package/dist/photons/publish.photon.d.ts +97 -0
- package/dist/photons/publish.photon.d.ts.map +1 -0
- package/dist/photons/publish.photon.js +569 -0
- package/dist/photons/publish.photon.js.map +1 -0
- package/dist/photons/publish.photon.ts +683 -0
- package/dist/photons/ui/canvas.photon.html +624 -0
- package/dist/resource-server.d.ts.map +1 -1
- package/dist/resource-server.js +7 -1
- package/dist/resource-server.js.map +1 -1
- package/dist/server.d.ts +7 -0
- package/dist/server.d.ts.map +1 -1
- package/dist/server.js +67 -37
- package/dist/server.js.map +1 -1
- package/dist/shared/error-handler.d.ts +1 -0
- package/dist/shared/error-handler.d.ts.map +1 -1
- package/dist/shared/error-handler.js +68 -10
- package/dist/shared/error-handler.js.map +1 -1
- package/dist/shared/logger.d.ts.map +1 -1
- package/dist/shared/logger.js +34 -0
- package/dist/shared/logger.js.map +1 -1
- package/dist/shared-utils.d.ts.map +1 -1
- package/dist/shared-utils.js +2 -2
- package/dist/shared-utils.js.map +1 -1
- package/dist/telemetry/context.d.ts +24 -0
- package/dist/telemetry/context.d.ts.map +1 -0
- package/dist/telemetry/context.js +17 -0
- package/dist/telemetry/context.js.map +1 -0
- package/dist/telemetry/logs.d.ts +38 -0
- package/dist/telemetry/logs.d.ts.map +1 -0
- package/dist/telemetry/logs.js +108 -0
- package/dist/telemetry/logs.js.map +1 -0
- package/dist/telemetry/metrics.d.ts +71 -0
- package/dist/telemetry/metrics.d.ts.map +1 -0
- package/dist/telemetry/metrics.js +184 -0
- package/dist/telemetry/metrics.js.map +1 -0
- package/dist/telemetry/otel.d.ts +20 -1
- package/dist/telemetry/otel.d.ts.map +1 -1
- package/dist/telemetry/otel.js +79 -2
- package/dist/telemetry/otel.js.map +1 -1
- package/dist/telemetry/sdk.d.ts +49 -0
- package/dist/telemetry/sdk.d.ts.map +1 -0
- package/dist/telemetry/sdk.js +110 -0
- package/dist/telemetry/sdk.js.map +1 -0
- package/dist/tsx-compiler.d.ts +23 -0
- package/dist/tsx-compiler.d.ts.map +1 -0
- package/dist/tsx-compiler.js +221 -0
- package/dist/tsx-compiler.js.map +1 -0
- package/package.json +7 -7
package/dist/daemon/server.js
CHANGED
|
@@ -24,6 +24,7 @@ import { timingSafeEqual, readBody, SimpleRateLimiter } from '../shared/security
|
|
|
24
24
|
import { audit } from '../shared/audit.js';
|
|
25
25
|
import { WorkerManager } from './worker-manager.js';
|
|
26
26
|
import fastJsonPatch from 'fast-json-patch';
|
|
27
|
+
import { getOwnerFilePath, isPidAlive, readOwnerRecord, removeOwnerRecord, waitForPidExit, writeOwnerRecord, } from './ownership.js';
|
|
27
28
|
// eslint-disable-next-line @typescript-eslint/unbound-method
|
|
28
29
|
const jsonPatchCompare = fastJsonPatch.compare;
|
|
29
30
|
// Command line args: socketPath (global daemon only needs socket path)
|
|
@@ -37,6 +38,29 @@ if (!socketPath) {
|
|
|
37
38
|
logger.error('Missing required argument: socketPath');
|
|
38
39
|
process.exit(1);
|
|
39
40
|
}
|
|
41
|
+
const pidFile = path.join(path.dirname(socketPath), 'daemon.pid');
|
|
42
|
+
const ownerFile = getOwnerFilePath(socketPath);
|
|
43
|
+
let daemonOwnershipConfirmed = false;
|
|
44
|
+
async function isSocketResponsive(target) {
|
|
45
|
+
if (process.platform === 'win32' || !fs.existsSync(target))
|
|
46
|
+
return false;
|
|
47
|
+
return new Promise((resolve) => {
|
|
48
|
+
const client = net.createConnection(target);
|
|
49
|
+
const timer = setTimeout(() => {
|
|
50
|
+
client.destroy();
|
|
51
|
+
resolve(false);
|
|
52
|
+
}, 1000);
|
|
53
|
+
client.on('connect', () => {
|
|
54
|
+
clearTimeout(timer);
|
|
55
|
+
client.destroy();
|
|
56
|
+
resolve(true);
|
|
57
|
+
});
|
|
58
|
+
client.on('error', () => {
|
|
59
|
+
clearTimeout(timer);
|
|
60
|
+
resolve(false);
|
|
61
|
+
});
|
|
62
|
+
});
|
|
63
|
+
}
|
|
40
64
|
// ════════════════════════════════════════════════════════════════════════════════
|
|
41
65
|
// IN-PROCESS BROKER
|
|
42
66
|
// All photons run inside the daemon process, so pub/sub is just in-memory dispatch.
|
|
@@ -104,6 +128,38 @@ workerManager.onPublish = (channel, message) => {
|
|
|
104
128
|
// Also forward to other workers
|
|
105
129
|
workerManager.dispatchToWorkers(channel, message);
|
|
106
130
|
};
|
|
131
|
+
// Track connected sockets for graceful shutdown broadcast
|
|
132
|
+
const connectedSockets = new Set();
|
|
133
|
+
/** Reference to the daemon server for closing the listener during shutdown */
|
|
134
|
+
let daemonServer = null;
|
|
135
|
+
/** Whether the daemon is shutting down (reject new commands) */
|
|
136
|
+
let isShuttingDown = false;
|
|
137
|
+
/** Tracks active executeTool calls per composite key */
|
|
138
|
+
const activeExecutions = new Map();
|
|
139
|
+
/** Per-key mutex to prevent concurrent reloads (format-on-save race) */
|
|
140
|
+
const reloadMutex = new Map();
|
|
141
|
+
function trackExecution(key) {
|
|
142
|
+
const tracker = activeExecutions.get(key);
|
|
143
|
+
if (tracker) {
|
|
144
|
+
tracker.count++;
|
|
145
|
+
}
|
|
146
|
+
else {
|
|
147
|
+
activeExecutions.set(key, { count: 1 });
|
|
148
|
+
}
|
|
149
|
+
}
|
|
150
|
+
function untrackExecution(key) {
|
|
151
|
+
const tracker = activeExecutions.get(key);
|
|
152
|
+
if (!tracker)
|
|
153
|
+
return;
|
|
154
|
+
tracker.count--;
|
|
155
|
+
if (tracker.count <= 0 && tracker.drainResolve) {
|
|
156
|
+
tracker.drainResolve();
|
|
157
|
+
tracker.drainResolve = undefined;
|
|
158
|
+
}
|
|
159
|
+
if (tracker.count <= 0) {
|
|
160
|
+
activeExecutions.delete(key);
|
|
161
|
+
}
|
|
162
|
+
}
|
|
107
163
|
// Map of compositeKey -> SessionManager (lazy initialized)
|
|
108
164
|
const sessionManagers = new Map();
|
|
109
165
|
const photonPaths = new Map(); // compositeKey -> photonPath
|
|
@@ -484,6 +540,7 @@ async function runJob(jobId) {
|
|
|
484
540
|
return;
|
|
485
541
|
}
|
|
486
542
|
logger.info('Running scheduled job', { jobId, method: job.method, photon: job.photonName });
|
|
543
|
+
trackExecution(key);
|
|
487
544
|
try {
|
|
488
545
|
const session = await sessionManager.getOrCreateSession('scheduler', 'scheduler');
|
|
489
546
|
await sessionManager.loader.executeTool(session.instance, job.method, job.args || {});
|
|
@@ -511,6 +568,9 @@ async function runJob(jobId) {
|
|
|
511
568
|
error: getErrorMessage(error),
|
|
512
569
|
});
|
|
513
570
|
}
|
|
571
|
+
finally {
|
|
572
|
+
untrackExecution(key);
|
|
573
|
+
}
|
|
514
574
|
scheduleJob(job);
|
|
515
575
|
}
|
|
516
576
|
function unscheduleJob(jobId) {
|
|
@@ -525,13 +585,14 @@ function unscheduleJob(jobId) {
|
|
|
525
585
|
}
|
|
526
586
|
return existed;
|
|
527
587
|
}
|
|
528
|
-
/** Update persisted schedule file
|
|
588
|
+
/** Update persisted schedule file after job execution */
|
|
529
589
|
function updatePersistedSchedule(jobId, photonName, updates) {
|
|
530
|
-
//
|
|
531
|
-
const
|
|
532
|
-
|
|
590
|
+
// Handle both ScheduleProvider jobs (photonName:sched:uuid) and IPC jobs (photonName:*:ipc:uuid)
|
|
591
|
+
const schedMatch = jobId.match(/^[^:]+:sched:(.+)$/);
|
|
592
|
+
const ipcMatch = jobId.match(/^[^:]+(?::[^:]+)?:ipc:(.+)$/);
|
|
593
|
+
if (!schedMatch && !ipcMatch)
|
|
533
594
|
return;
|
|
534
|
-
const taskId =
|
|
595
|
+
const taskId = schedMatch ? schedMatch[1] : ipcMatch[1];
|
|
535
596
|
const schedulesDir = path.join(process.env.PHOTON_SCHEDULES_DIR || path.join(os.homedir(), '.photon', 'schedules'), photonName.replace(/[^a-zA-Z0-9_-]/g, '_'));
|
|
536
597
|
const filePath = path.join(schedulesDir, `${taskId}.json`);
|
|
537
598
|
try {
|
|
@@ -547,6 +608,152 @@ function updatePersistedSchedule(jobId, photonName, updates) {
|
|
|
547
608
|
// File may have been removed — ignore
|
|
548
609
|
}
|
|
549
610
|
}
|
|
611
|
+
/** Persist an IPC-created schedule job to disk for daemon restart recovery */
|
|
612
|
+
function persistIpcSchedule(job) {
|
|
613
|
+
const schedulesDir = path.join(process.env.PHOTON_SCHEDULES_DIR || path.join(os.homedir(), '.photon', 'schedules'), job.photonName.replace(/[^a-zA-Z0-9_-]/g, '_'));
|
|
614
|
+
try {
|
|
615
|
+
fs.mkdirSync(schedulesDir, { recursive: true });
|
|
616
|
+
}
|
|
617
|
+
catch {
|
|
618
|
+
// Directory may already exist
|
|
619
|
+
}
|
|
620
|
+
// Extract taskId from job ID (format: photonName:dirHash:ipc:taskId or photonName:ipc:taskId)
|
|
621
|
+
const match = job.id.match(/:ipc:(.+)$/);
|
|
622
|
+
const taskId = match ? match[1] : job.id;
|
|
623
|
+
const filePath = path.join(schedulesDir, `${taskId}.json`);
|
|
624
|
+
const persisted = {
|
|
625
|
+
id: job.id,
|
|
626
|
+
method: job.method,
|
|
627
|
+
args: job.args || {},
|
|
628
|
+
cron: job.cron,
|
|
629
|
+
photonName: job.photonName,
|
|
630
|
+
workingDir: job.workingDir,
|
|
631
|
+
source: 'ipc',
|
|
632
|
+
status: 'active',
|
|
633
|
+
createdAt: new Date(job.createdAt).toISOString(),
|
|
634
|
+
createdBy: job.createdBy,
|
|
635
|
+
executionCount: job.runCount,
|
|
636
|
+
lastExecutionAt: job.lastRun ? new Date(job.lastRun).toISOString() : null,
|
|
637
|
+
};
|
|
638
|
+
try {
|
|
639
|
+
fs.writeFileSync(filePath, JSON.stringify(persisted, null, 2));
|
|
640
|
+
logger.debug('Persisted IPC schedule', { jobId: job.id, path: filePath });
|
|
641
|
+
}
|
|
642
|
+
catch (err) {
|
|
643
|
+
logger.warn('Failed to persist IPC schedule', {
|
|
644
|
+
jobId: job.id,
|
|
645
|
+
error: getErrorMessage(err),
|
|
646
|
+
});
|
|
647
|
+
}
|
|
648
|
+
}
|
|
649
|
+
/** Delete a persisted IPC schedule file */
|
|
650
|
+
function deletePersistedIpcSchedule(jobId, photonName) {
|
|
651
|
+
const match = jobId.match(/:ipc:(.+)$/);
|
|
652
|
+
if (!match)
|
|
653
|
+
return;
|
|
654
|
+
const taskId = match[1];
|
|
655
|
+
const schedulesDir = path.join(process.env.PHOTON_SCHEDULES_DIR || path.join(os.homedir(), '.photon', 'schedules'), photonName.replace(/[^a-zA-Z0-9_-]/g, '_'));
|
|
656
|
+
const filePath = path.join(schedulesDir, `${taskId}.json`);
|
|
657
|
+
try {
|
|
658
|
+
if (fs.existsSync(filePath)) {
|
|
659
|
+
fs.unlinkSync(filePath);
|
|
660
|
+
logger.debug('Deleted persisted IPC schedule', { jobId, path: filePath });
|
|
661
|
+
}
|
|
662
|
+
}
|
|
663
|
+
catch {
|
|
664
|
+
// Ignore — file may already be gone
|
|
665
|
+
}
|
|
666
|
+
}
|
|
667
|
+
/** Load all persisted schedules from disk on daemon startup */
|
|
668
|
+
function loadAllPersistedSchedules() {
|
|
669
|
+
const baseDir = process.env.PHOTON_SCHEDULES_DIR || path.join(os.homedir(), '.photon', 'schedules');
|
|
670
|
+
if (!fs.existsSync(baseDir))
|
|
671
|
+
return;
|
|
672
|
+
let loadedCount = 0;
|
|
673
|
+
let skippedCount = 0;
|
|
674
|
+
const TTL_DAYS = 30;
|
|
675
|
+
const ttlMs = TTL_DAYS * 24 * 60 * 60 * 1000;
|
|
676
|
+
try {
|
|
677
|
+
const photonDirs = fs.readdirSync(baseDir, { withFileTypes: true });
|
|
678
|
+
for (const dir of photonDirs) {
|
|
679
|
+
if (!dir.isDirectory())
|
|
680
|
+
continue;
|
|
681
|
+
const schedulesPath = path.join(baseDir, dir.name);
|
|
682
|
+
const files = fs.readdirSync(schedulesPath).filter((f) => f.endsWith('.json'));
|
|
683
|
+
for (const file of files) {
|
|
684
|
+
const filePath = path.join(schedulesPath, file);
|
|
685
|
+
try {
|
|
686
|
+
const content = fs.readFileSync(filePath, 'utf-8');
|
|
687
|
+
const task = JSON.parse(content);
|
|
688
|
+
// Skip non-IPC jobs (ScheduleProvider handles its own)
|
|
689
|
+
if (task.source !== 'ipc')
|
|
690
|
+
continue;
|
|
691
|
+
// Validate required fields
|
|
692
|
+
if (!task.id || !task.method || !task.cron || !task.photonName) {
|
|
693
|
+
logger.warn('Skipping invalid persisted schedule', { file: filePath });
|
|
694
|
+
skippedCount++;
|
|
695
|
+
continue;
|
|
696
|
+
}
|
|
697
|
+
// TTL check: skip jobs not executed in 30+ days
|
|
698
|
+
const lastExec = task.lastExecutionAt ? new Date(task.lastExecutionAt).getTime() : 0;
|
|
699
|
+
const created = task.createdAt ? new Date(task.createdAt).getTime() : 0;
|
|
700
|
+
const lastActivity = Math.max(lastExec, created);
|
|
701
|
+
if (lastActivity > 0 && Date.now() - lastActivity > ttlMs) {
|
|
702
|
+
logger.info('Removing expired schedule (TTL)', {
|
|
703
|
+
jobId: task.id,
|
|
704
|
+
lastActivity: new Date(lastActivity).toISOString(),
|
|
705
|
+
});
|
|
706
|
+
try {
|
|
707
|
+
fs.unlinkSync(filePath);
|
|
708
|
+
}
|
|
709
|
+
catch {
|
|
710
|
+
/* ignore */
|
|
711
|
+
}
|
|
712
|
+
skippedCount++;
|
|
713
|
+
continue;
|
|
714
|
+
}
|
|
715
|
+
// Skip if already registered (ScheduleProvider may have loaded it)
|
|
716
|
+
if (scheduledJobs.has(task.id))
|
|
717
|
+
continue;
|
|
718
|
+
const job = {
|
|
719
|
+
id: task.id,
|
|
720
|
+
method: task.method,
|
|
721
|
+
args: task.args || {},
|
|
722
|
+
cron: task.cron,
|
|
723
|
+
runCount: task.executionCount || 0,
|
|
724
|
+
createdAt: created || Date.now(),
|
|
725
|
+
createdBy: task.createdBy,
|
|
726
|
+
photonName: task.photonName,
|
|
727
|
+
workingDir: task.workingDir,
|
|
728
|
+
};
|
|
729
|
+
if (scheduleJob(job)) {
|
|
730
|
+
loadedCount++;
|
|
731
|
+
}
|
|
732
|
+
else {
|
|
733
|
+
logger.warn('Failed to schedule persisted job (invalid cron?)', { jobId: task.id });
|
|
734
|
+
skippedCount++;
|
|
735
|
+
}
|
|
736
|
+
}
|
|
737
|
+
catch (err) {
|
|
738
|
+
logger.warn('Failed to load persisted schedule file', {
|
|
739
|
+
file: filePath,
|
|
740
|
+
error: getErrorMessage(err),
|
|
741
|
+
});
|
|
742
|
+
skippedCount++;
|
|
743
|
+
}
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
catch (err) {
|
|
748
|
+
logger.warn('Failed to scan schedules directory', {
|
|
749
|
+
dir: baseDir,
|
|
750
|
+
error: getErrorMessage(err),
|
|
751
|
+
});
|
|
752
|
+
}
|
|
753
|
+
if (loadedCount > 0 || skippedCount > 0) {
|
|
754
|
+
logger.info('Loaded persisted schedules', { loaded: loadedCount, skipped: skippedCount });
|
|
755
|
+
}
|
|
756
|
+
}
|
|
550
757
|
// ════════════════════════════════════════════════════════════════════════════════
|
|
551
758
|
// WEBHOOK HTTP SERVER
|
|
552
759
|
// ════════════════════════════════════════════════════════════════════════════════
|
|
@@ -644,6 +851,8 @@ function startWebhookServer(port) {
|
|
|
644
851
|
}
|
|
645
852
|
resolvedMethod = mapped;
|
|
646
853
|
}
|
|
854
|
+
const webhookKey = compositeKey(photonName);
|
|
855
|
+
trackExecution(webhookKey);
|
|
647
856
|
try {
|
|
648
857
|
const session = await sessionManager.getOrCreateSession('webhook', 'webhook');
|
|
649
858
|
const result = await sessionManager.loader.executeTool(session.instance, resolvedMethod, args);
|
|
@@ -665,6 +874,9 @@ function startWebhookServer(port) {
|
|
|
665
874
|
res.writeHead(500, { 'Content-Type': 'application/json' });
|
|
666
875
|
res.end(JSON.stringify({ error: getErrorMessage(error) }));
|
|
667
876
|
}
|
|
877
|
+
finally {
|
|
878
|
+
untrackExecution(webhookKey);
|
|
879
|
+
}
|
|
668
880
|
})();
|
|
669
881
|
});
|
|
670
882
|
webhookServer.listen(port, () => {
|
|
@@ -821,8 +1033,14 @@ async function getOrCreateSessionManager(photonName, photonPath, workingDir) {
|
|
|
821
1033
|
const depManager = sessionManagers.get(depKey);
|
|
822
1034
|
if (!depManager)
|
|
823
1035
|
throw new Error(`Dependency ${depName} not loaded`);
|
|
824
|
-
|
|
825
|
-
|
|
1036
|
+
trackExecution(depKey);
|
|
1037
|
+
try {
|
|
1038
|
+
const loaded = await depManager.getOrLoadInstance('');
|
|
1039
|
+
return await depManager.loader.executeTool(loaded, method, args);
|
|
1040
|
+
}
|
|
1041
|
+
finally {
|
|
1042
|
+
untrackExecution(depKey);
|
|
1043
|
+
}
|
|
826
1044
|
};
|
|
827
1045
|
}
|
|
828
1046
|
logger.info('Spawning worker thread for @worker photon', { photonName, key });
|
|
@@ -852,6 +1070,7 @@ async function getOrCreateSessionManager(photonName, photonPath, workingDir) {
|
|
|
852
1070
|
key,
|
|
853
1071
|
photonPath: pathToUse,
|
|
854
1072
|
workingDir,
|
|
1073
|
+
ownerPid: daemonOwnershipConfirmed ? process.pid : null,
|
|
855
1074
|
});
|
|
856
1075
|
manager = new SessionManager(pathToUse, photonName, idleTimeout, logger.child({ scope: photonName }), workingDir);
|
|
857
1076
|
// Wire @photon dependency resolver: when this photon's loader encounters
|
|
@@ -895,8 +1114,15 @@ async function getOrCreateSessionManager(photonName, photonPath, workingDir) {
|
|
|
895
1114
|
prop !== 'on' &&
|
|
896
1115
|
prop !== 'off') {
|
|
897
1116
|
return async (params) => {
|
|
898
|
-
const
|
|
899
|
-
|
|
1117
|
+
const depExecKey = compositeKey(depName, workingDir);
|
|
1118
|
+
trackExecution(depExecKey);
|
|
1119
|
+
try {
|
|
1120
|
+
const latest = depManager.getCurrentInstance(depInstanceKey) ?? loaded;
|
|
1121
|
+
return await depManager.loader.executeTool(latest, prop, params || {});
|
|
1122
|
+
}
|
|
1123
|
+
finally {
|
|
1124
|
+
untrackExecution(depExecKey);
|
|
1125
|
+
}
|
|
900
1126
|
};
|
|
901
1127
|
}
|
|
902
1128
|
// Bind methods to current instance so `this` resolves correctly
|
|
@@ -1107,6 +1333,15 @@ async function handleRequest(request, socket) {
|
|
|
1107
1333
|
if (request.type === 'ping') {
|
|
1108
1334
|
return { type: 'pong', id: request.id };
|
|
1109
1335
|
}
|
|
1336
|
+
// Reject new commands during shutdown (allow ping for health checks)
|
|
1337
|
+
if (isShuttingDown && request.type !== 'shutdown') {
|
|
1338
|
+
return {
|
|
1339
|
+
type: 'error',
|
|
1340
|
+
id: request.id,
|
|
1341
|
+
error: 'Daemon is shutting down',
|
|
1342
|
+
suggestion: 'Retry after the daemon restarts',
|
|
1343
|
+
};
|
|
1344
|
+
}
|
|
1110
1345
|
if (request.type === 'status') {
|
|
1111
1346
|
let totalSessions = 0;
|
|
1112
1347
|
for (const sm of sessionManagers.values()) {
|
|
@@ -1403,9 +1638,16 @@ async function handleRequest(request, socket) {
|
|
|
1403
1638
|
suggestion: 'Include photonName in the request payload',
|
|
1404
1639
|
};
|
|
1405
1640
|
}
|
|
1406
|
-
|
|
1641
|
+
// Generate IPC job ID with workingDir hash to prevent cross-project collisions
|
|
1642
|
+
const dirHash = request.workingDir
|
|
1643
|
+
? crypto.createHash('sha256').update(request.workingDir).digest('hex').slice(0, 8)
|
|
1644
|
+
: '';
|
|
1645
|
+
const ipcJobId = dirHash
|
|
1646
|
+
? `${photonName}:${dirHash}:ipc:${request.jobId}`
|
|
1647
|
+
: `${photonName}:ipc:${request.jobId}`;
|
|
1648
|
+
const existing = scheduledJobs.get(ipcJobId);
|
|
1407
1649
|
const job = {
|
|
1408
|
-
id:
|
|
1650
|
+
id: ipcJobId,
|
|
1409
1651
|
method: request.method,
|
|
1410
1652
|
args: request.args,
|
|
1411
1653
|
cron: request.cron,
|
|
@@ -1416,6 +1658,9 @@ async function handleRequest(request, socket) {
|
|
|
1416
1658
|
workingDir: request.workingDir,
|
|
1417
1659
|
};
|
|
1418
1660
|
const scheduled = scheduleJob(job);
|
|
1661
|
+
if (scheduled) {
|
|
1662
|
+
persistIpcSchedule(job);
|
|
1663
|
+
}
|
|
1419
1664
|
return {
|
|
1420
1665
|
type: 'result',
|
|
1421
1666
|
id: request.id,
|
|
@@ -1428,14 +1673,42 @@ async function handleRequest(request, socket) {
|
|
|
1428
1673
|
// Handle job unscheduling
|
|
1429
1674
|
if (request.type === 'unschedule') {
|
|
1430
1675
|
const jobId = request.jobId;
|
|
1431
|
-
|
|
1432
|
-
|
|
1676
|
+
// Try exact match first, then look for IPC-prefixed version
|
|
1677
|
+
let actualJobId = jobId;
|
|
1678
|
+
if (!scheduledJobs.has(jobId)) {
|
|
1679
|
+
// Search for IPC-prefixed job
|
|
1680
|
+
for (const key of scheduledJobs.keys()) {
|
|
1681
|
+
if (key.endsWith(`:ipc:${jobId}`)) {
|
|
1682
|
+
actualJobId = key;
|
|
1683
|
+
break;
|
|
1684
|
+
}
|
|
1685
|
+
}
|
|
1686
|
+
}
|
|
1687
|
+
const job = scheduledJobs.get(actualJobId);
|
|
1688
|
+
const unscheduled = unscheduleJob(actualJobId);
|
|
1689
|
+
if (unscheduled && job) {
|
|
1690
|
+
deletePersistedIpcSchedule(actualJobId, job.photonName);
|
|
1691
|
+
}
|
|
1692
|
+
return {
|
|
1693
|
+
type: 'result',
|
|
1694
|
+
id: request.id,
|
|
1695
|
+
success: true,
|
|
1696
|
+
data: { unscheduled, jobId: actualJobId },
|
|
1697
|
+
};
|
|
1433
1698
|
}
|
|
1434
1699
|
// Handle list jobs
|
|
1435
1700
|
if (request.type === 'list_jobs') {
|
|
1436
1701
|
const jobs = Array.from(scheduledJobs.values());
|
|
1437
1702
|
return { type: 'result', id: request.id, success: true, data: { jobs } };
|
|
1438
1703
|
}
|
|
1704
|
+
if (request.type === 'get_circuit_health') {
|
|
1705
|
+
// Aggregate circuit breaker states from all loaded photon loaders
|
|
1706
|
+
const circuits = {};
|
|
1707
|
+
for (const manager of sessionManagers.values()) {
|
|
1708
|
+
Object.assign(circuits, manager.loader.getCircuitHealth());
|
|
1709
|
+
}
|
|
1710
|
+
return { type: 'result', id: request.id, success: true, data: { circuits } };
|
|
1711
|
+
}
|
|
1439
1712
|
// Handle command execution
|
|
1440
1713
|
if (request.type === 'command') {
|
|
1441
1714
|
if (!request.method) {
|
|
@@ -1705,7 +1978,14 @@ async function handleRequest(request, socket) {
|
|
|
1705
1978
|
// Snapshot state before execution for JSON Patch diffing
|
|
1706
1979
|
const preSnapshot = await snapshotState(targetInst, photonName);
|
|
1707
1980
|
const startTime = Date.now();
|
|
1708
|
-
|
|
1981
|
+
trackExecution(cmdKey);
|
|
1982
|
+
let result;
|
|
1983
|
+
try {
|
|
1984
|
+
result = await sessionManager.loader.executeTool(targetInst, request.method, request.args || {}, { outputHandler });
|
|
1985
|
+
}
|
|
1986
|
+
finally {
|
|
1987
|
+
untrackExecution(cmdKey);
|
|
1988
|
+
}
|
|
1709
1989
|
const durationMs = Date.now() - startTime;
|
|
1710
1990
|
setPromptHandler(null);
|
|
1711
1991
|
logger.info('Request completed', {
|
|
@@ -1791,7 +2071,14 @@ async function handleRequest(request, socket) {
|
|
|
1791
2071
|
// Snapshot state before execution for JSON Patch diffing
|
|
1792
2072
|
const preSnapshot = await snapshotState(session.instance, photonName);
|
|
1793
2073
|
const startTime = Date.now();
|
|
1794
|
-
|
|
2074
|
+
trackExecution(cmdKey);
|
|
2075
|
+
let result;
|
|
2076
|
+
try {
|
|
2077
|
+
result = await sessionManager.loader.executeTool(session.instance, request.method, request.args || {}, { outputHandler });
|
|
2078
|
+
}
|
|
2079
|
+
finally {
|
|
2080
|
+
untrackExecution(cmdKey);
|
|
2081
|
+
}
|
|
1795
2082
|
const durationMs = Date.now() - startTime;
|
|
1796
2083
|
setPromptHandler(null);
|
|
1797
2084
|
logger.info('Request completed', {
|
|
@@ -2583,8 +2870,29 @@ function watchStateDir(workingDir) {
|
|
|
2583
2870
|
// HOT RELOAD
|
|
2584
2871
|
// ════════════════════════════════════════════════════════════════════════════════
|
|
2585
2872
|
async function reloadPhoton(photonName, newPhotonPath, workingDir) {
|
|
2873
|
+
const key = compositeKey(photonName, workingDir);
|
|
2874
|
+
// Reload mutex: prevent concurrent reloads for the same photon
|
|
2875
|
+
// (format-on-save can trigger two rapid file change events)
|
|
2876
|
+
const existing = reloadMutex.get(key);
|
|
2877
|
+
if (existing) {
|
|
2878
|
+
logger.debug('Reload already in progress, waiting...', { photonName, key });
|
|
2879
|
+
await existing;
|
|
2880
|
+
}
|
|
2881
|
+
let mutexResolve;
|
|
2882
|
+
const mutexPromise = new Promise((resolve) => {
|
|
2883
|
+
mutexResolve = resolve;
|
|
2884
|
+
});
|
|
2885
|
+
reloadMutex.set(key, mutexPromise);
|
|
2886
|
+
try {
|
|
2887
|
+
return await doReloadPhoton(photonName, newPhotonPath, workingDir, key);
|
|
2888
|
+
}
|
|
2889
|
+
finally {
|
|
2890
|
+
reloadMutex.delete(key);
|
|
2891
|
+
mutexResolve();
|
|
2892
|
+
}
|
|
2893
|
+
}
|
|
2894
|
+
async function doReloadPhoton(photonName, newPhotonPath, workingDir, key) {
|
|
2586
2895
|
try {
|
|
2587
|
-
const key = compositeKey(photonName, workingDir);
|
|
2588
2896
|
logger.info('Hot-reloading photon', { photonName, key, path: newPhotonPath });
|
|
2589
2897
|
// If running in a worker, delegate reload to the worker
|
|
2590
2898
|
if (workerManager.has(key)) {
|
|
@@ -2596,6 +2904,7 @@ async function reloadPhoton(photonName, newPhotonPath, workingDir) {
|
|
|
2596
2904
|
worker: true,
|
|
2597
2905
|
});
|
|
2598
2906
|
logger.info('Worker photon reloaded', { photonName });
|
|
2907
|
+
workerManager.resetCrashHistory(key);
|
|
2599
2908
|
}
|
|
2600
2909
|
else {
|
|
2601
2910
|
publishToChannel(`system:${photonName}`, {
|
|
@@ -2638,6 +2947,30 @@ async function reloadPhoton(photonName, newPhotonPath, workingDir) {
|
|
|
2638
2947
|
});
|
|
2639
2948
|
return { success: false, error: errorMessage };
|
|
2640
2949
|
}
|
|
2950
|
+
// Drain: wait for in-flight executions to complete before swapping instances
|
|
2951
|
+
const DRAIN_TIMEOUT_MS = 2000;
|
|
2952
|
+
const tracker = activeExecutions.get(key);
|
|
2953
|
+
if (tracker && tracker.count > 0) {
|
|
2954
|
+
logger.info('Draining in-flight executions before reload', {
|
|
2955
|
+
photonName,
|
|
2956
|
+
activeCount: tracker.count,
|
|
2957
|
+
});
|
|
2958
|
+
await Promise.race([
|
|
2959
|
+
new Promise((resolve) => {
|
|
2960
|
+
tracker.drainResolve = resolve;
|
|
2961
|
+
}),
|
|
2962
|
+
new Promise((resolve) => setTimeout(() => {
|
|
2963
|
+
logger.warn('Drain timeout, proceeding with reload', {
|
|
2964
|
+
photonName,
|
|
2965
|
+
activeCount: tracker.count,
|
|
2966
|
+
timeoutMs: DRAIN_TIMEOUT_MS,
|
|
2967
|
+
});
|
|
2968
|
+
resolve();
|
|
2969
|
+
}, DRAIN_TIMEOUT_MS)),
|
|
2970
|
+
]);
|
|
2971
|
+
// Clean up drain resolve if it was set but not called
|
|
2972
|
+
tracker.drainResolve = undefined;
|
|
2973
|
+
}
|
|
2641
2974
|
const sessions = sessionManager.getSessions();
|
|
2642
2975
|
let updatedCount = 0;
|
|
2643
2976
|
for (const session of sessions) {
|
|
@@ -2891,7 +3224,11 @@ function startupWatchPhotons() {
|
|
|
2891
3224
|
const manager = await getOrCreateSessionManager(p.name, p.path);
|
|
2892
3225
|
if (manager) {
|
|
2893
3226
|
await manager.getOrLoadInstance('');
|
|
2894
|
-
logger.info('Eager-loaded lifecycle photon', {
|
|
3227
|
+
logger.info('Eager-loaded lifecycle photon', {
|
|
3228
|
+
name: p.name,
|
|
3229
|
+
photonPath: p.path,
|
|
3230
|
+
ownerPid: process.pid,
|
|
3231
|
+
});
|
|
2895
3232
|
}
|
|
2896
3233
|
}
|
|
2897
3234
|
catch (err) {
|
|
@@ -2903,6 +3240,13 @@ function startupWatchPhotons() {
|
|
|
2903
3240
|
}
|
|
2904
3241
|
};
|
|
2905
3242
|
setTimeout(() => {
|
|
3243
|
+
if (!daemonOwnershipConfirmed) {
|
|
3244
|
+
logger.warn('Skipping eager lifecycle load before exclusive ownership confirmation', {
|
|
3245
|
+
socketPath,
|
|
3246
|
+
currentPid: process.pid,
|
|
3247
|
+
});
|
|
3248
|
+
return;
|
|
3249
|
+
}
|
|
2906
3250
|
eagerLoad().catch(() => { });
|
|
2907
3251
|
}, 1000);
|
|
2908
3252
|
}
|
|
@@ -2944,6 +3288,7 @@ function startupWatchPhotons() {
|
|
|
2944
3288
|
function startServer() {
|
|
2945
3289
|
const server = net.createServer((socket) => {
|
|
2946
3290
|
logger.info('Client connected');
|
|
3291
|
+
connectedSockets.add(socket);
|
|
2947
3292
|
let buffer = '';
|
|
2948
3293
|
socket.on('data', (chunk) => {
|
|
2949
3294
|
void (async () => {
|
|
@@ -2975,18 +3320,26 @@ function startServer() {
|
|
|
2975
3320
|
});
|
|
2976
3321
|
socket.on('end', () => {
|
|
2977
3322
|
logger.info('Client disconnected');
|
|
3323
|
+
connectedSockets.delete(socket);
|
|
2978
3324
|
cleanupSocketSubscriptions(socket);
|
|
2979
3325
|
});
|
|
2980
3326
|
socket.on('error', (error) => {
|
|
2981
3327
|
logger.warn('Socket error', { error: getErrorMessage(error) });
|
|
3328
|
+
connectedSockets.delete(socket);
|
|
2982
3329
|
cleanupSocketSubscriptions(socket);
|
|
2983
3330
|
});
|
|
2984
3331
|
socket.on('close', () => {
|
|
3332
|
+
connectedSockets.delete(socket);
|
|
2985
3333
|
cleanupSocketSubscriptions(socket);
|
|
2986
3334
|
});
|
|
2987
3335
|
});
|
|
3336
|
+
daemonServer = server;
|
|
2988
3337
|
server.listen(socketPath, () => {
|
|
2989
|
-
logger.info('Global Photon daemon listening', {
|
|
3338
|
+
logger.info('Global Photon daemon listening', {
|
|
3339
|
+
socketPath,
|
|
3340
|
+
pid: process.pid,
|
|
3341
|
+
ownerPid: process.pid,
|
|
3342
|
+
});
|
|
2990
3343
|
});
|
|
2991
3344
|
server.on('error', (error) => {
|
|
2992
3345
|
logger.error('Server error', { error: getErrorMessage(error) });
|
|
@@ -3008,88 +3361,209 @@ function startServer() {
|
|
|
3008
3361
|
});
|
|
3009
3362
|
});
|
|
3010
3363
|
}
|
|
3011
|
-
function
|
|
3012
|
-
|
|
3013
|
-
if (
|
|
3014
|
-
|
|
3364
|
+
async function claimExclusiveOwnership() {
|
|
3365
|
+
const owner = readOwnerRecord(ownerFile);
|
|
3366
|
+
if (owner && owner.socketPath === socketPath && owner.pid !== process.pid) {
|
|
3367
|
+
if (isPidAlive(owner.pid)) {
|
|
3368
|
+
logger.warn('Sibling daemon detected for socket', {
|
|
3369
|
+
socketPath,
|
|
3370
|
+
currentPid: process.pid,
|
|
3371
|
+
ownerPid: owner.pid,
|
|
3372
|
+
action: 'terminate-stale-owner',
|
|
3373
|
+
});
|
|
3374
|
+
try {
|
|
3375
|
+
process.kill(owner.pid, 'SIGTERM');
|
|
3376
|
+
}
|
|
3377
|
+
catch {
|
|
3378
|
+
// Ignore races with process exit
|
|
3379
|
+
}
|
|
3380
|
+
const exited = await waitForPidExit(owner.pid, 5000);
|
|
3381
|
+
if (!exited) {
|
|
3382
|
+
logger.error('Failed to gain exclusive daemon ownership', {
|
|
3383
|
+
socketPath,
|
|
3384
|
+
currentPid: process.pid,
|
|
3385
|
+
ownerPid: owner.pid,
|
|
3386
|
+
action: 'startup-rejected',
|
|
3387
|
+
});
|
|
3388
|
+
throw new Error(`Could not terminate sibling daemon ${owner.pid}`);
|
|
3389
|
+
}
|
|
3390
|
+
}
|
|
3391
|
+
else {
|
|
3392
|
+
logger.warn('Removing stale daemon owner record', {
|
|
3393
|
+
socketPath,
|
|
3394
|
+
currentPid: process.pid,
|
|
3395
|
+
ownerPid: owner.pid,
|
|
3396
|
+
});
|
|
3397
|
+
}
|
|
3398
|
+
removeOwnerRecord(ownerFile);
|
|
3015
3399
|
}
|
|
3016
|
-
|
|
3017
|
-
|
|
3018
|
-
|
|
3019
|
-
|
|
3400
|
+
if (process.platform !== 'win32' && fs.existsSync(socketPath)) {
|
|
3401
|
+
const responsive = await isSocketResponsive(socketPath);
|
|
3402
|
+
if (!responsive) {
|
|
3403
|
+
logger.warn('Removing stale daemon socket before listen', {
|
|
3404
|
+
socketPath,
|
|
3405
|
+
currentPid: process.pid,
|
|
3406
|
+
});
|
|
3407
|
+
try {
|
|
3408
|
+
fs.unlinkSync(socketPath);
|
|
3409
|
+
}
|
|
3410
|
+
catch {
|
|
3411
|
+
// Ignore races with other cleanup
|
|
3412
|
+
}
|
|
3413
|
+
}
|
|
3020
3414
|
}
|
|
3021
|
-
|
|
3022
|
-
|
|
3023
|
-
|
|
3024
|
-
|
|
3025
|
-
|
|
3026
|
-
|
|
3027
|
-
|
|
3028
|
-
|
|
3029
|
-
|
|
3030
|
-
|
|
3031
|
-
|
|
3032
|
-
|
|
3033
|
-
|
|
3034
|
-
for (const photonPath of fileWatchers.keys()) {
|
|
3035
|
-
unwatchPhotonFile(photonPath);
|
|
3036
|
-
}
|
|
3037
|
-
// Clean up poll-based watchers (bun fallback)
|
|
3038
|
-
for (const timer of pollTimers) {
|
|
3039
|
-
clearInterval(timer);
|
|
3040
|
-
}
|
|
3041
|
-
pollTimers.clear();
|
|
3042
|
-
// Terminate all worker threads
|
|
3043
|
-
void workerManager.terminateAll().catch((err) => {
|
|
3044
|
-
logger.warn('Error terminating workers during shutdown', { error: getErrorMessage(err) });
|
|
3415
|
+
writeOwnerRecord(ownerFile, {
|
|
3416
|
+
pid: process.pid,
|
|
3417
|
+
socketPath,
|
|
3418
|
+
claimedAt: Date.now(),
|
|
3419
|
+
});
|
|
3420
|
+
fs.writeFileSync(pidFile, process.pid.toString());
|
|
3421
|
+
daemonOwnershipConfirmed = true;
|
|
3422
|
+
logger.info('Daemon ownership claimed', {
|
|
3423
|
+
socketPath,
|
|
3424
|
+
currentPid: process.pid,
|
|
3425
|
+
ownerPid: process.pid,
|
|
3426
|
+
pidFile,
|
|
3427
|
+
ownerFile,
|
|
3045
3428
|
});
|
|
3046
|
-
|
|
3047
|
-
|
|
3048
|
-
|
|
3049
|
-
|
|
3050
|
-
|
|
3051
|
-
|
|
3052
|
-
|
|
3053
|
-
//
|
|
3054
|
-
|
|
3055
|
-
|
|
3056
|
-
|
|
3057
|
-
|
|
3429
|
+
}
|
|
3430
|
+
function shutdown() {
|
|
3431
|
+
// Guard against multiple shutdown calls (e.g. SIGTERM + SIGINT in quick succession)
|
|
3432
|
+
if (isShuttingDown)
|
|
3433
|
+
return;
|
|
3434
|
+
isShuttingDown = true;
|
|
3435
|
+
logger.info('Shutting down global daemon');
|
|
3436
|
+
// Step 1: Close the listener — stop accepting new connections
|
|
3437
|
+
if (daemonServer) {
|
|
3438
|
+
daemonServer.close();
|
|
3439
|
+
}
|
|
3440
|
+
// Step 2: Broadcast shutdown signal to all connected sockets
|
|
3441
|
+
const shutdownMessage = JSON.stringify({
|
|
3442
|
+
type: 'shutdown',
|
|
3443
|
+
id: 'daemon-shutdown',
|
|
3444
|
+
reason: 'daemon-shutting-down',
|
|
3445
|
+
}) + '\n';
|
|
3446
|
+
for (const socket of connectedSockets) {
|
|
3058
3447
|
try {
|
|
3059
|
-
|
|
3060
|
-
const pidContent = fs.readFileSync(pidFile, 'utf-8').trim();
|
|
3061
|
-
const filePid = parseInt(pidContent, 10);
|
|
3062
|
-
if (!isNaN(filePid) && filePid !== process.pid) {
|
|
3063
|
-
// PID file points to a different process — new daemon already started
|
|
3064
|
-
weOwnSocket = false;
|
|
3065
|
-
logger.info('Socket belongs to new daemon, skipping cleanup', {
|
|
3066
|
-
ourPid: process.pid,
|
|
3067
|
-
newPid: filePid,
|
|
3068
|
-
});
|
|
3069
|
-
}
|
|
3448
|
+
socket.write(shutdownMessage);
|
|
3070
3449
|
}
|
|
3071
3450
|
catch {
|
|
3072
|
-
//
|
|
3073
|
-
|
|
3074
|
-
|
|
3451
|
+
// Socket may already be closed
|
|
3452
|
+
}
|
|
3453
|
+
}
|
|
3454
|
+
// Step 3: Async cleanup with grace period for shutdown message flush
|
|
3455
|
+
void (async () => {
|
|
3456
|
+
// Give sockets 500ms to receive the shutdown message
|
|
3457
|
+
await new Promise((resolve) => setTimeout(resolve, 500));
|
|
3458
|
+
if (idleTimer) {
|
|
3459
|
+
clearTimeout(idleTimer);
|
|
3460
|
+
}
|
|
3461
|
+
clearInterval(lockCleanupInterval);
|
|
3462
|
+
clearInterval(staleMapCleanupInterval);
|
|
3463
|
+
for (const timer of jobTimers.values()) {
|
|
3464
|
+
clearTimeout(timer);
|
|
3465
|
+
}
|
|
3466
|
+
jobTimers.clear();
|
|
3467
|
+
scheduledJobs.clear();
|
|
3468
|
+
activeLocks.clear();
|
|
3469
|
+
channelEventBuffers.clear();
|
|
3470
|
+
eventLogSeq.clear();
|
|
3471
|
+
stateKeysCache.clear();
|
|
3472
|
+
// Resolve any pending prompts so promises don't hang
|
|
3473
|
+
for (const [_id, pending] of pendingPrompts.entries()) {
|
|
3474
|
+
pending.resolve(null);
|
|
3475
|
+
}
|
|
3476
|
+
pendingPrompts.clear();
|
|
3477
|
+
socketPromptIds.clear();
|
|
3478
|
+
// Close file watchers and debounce timers
|
|
3479
|
+
for (const photonPath of fileWatchers.keys()) {
|
|
3480
|
+
unwatchPhotonFile(photonPath);
|
|
3481
|
+
}
|
|
3482
|
+
// Clean up poll-based watchers (bun fallback)
|
|
3483
|
+
for (const timer of pollTimers) {
|
|
3484
|
+
clearInterval(timer);
|
|
3075
3485
|
}
|
|
3076
|
-
|
|
3486
|
+
pollTimers.clear();
|
|
3487
|
+
// Terminate all worker threads FIRST (before session destroy,
|
|
3488
|
+
// so @photon deps in workers still respond during onShutdown)
|
|
3489
|
+
try {
|
|
3490
|
+
await workerManager.terminateAll();
|
|
3491
|
+
}
|
|
3492
|
+
catch (err) {
|
|
3493
|
+
logger.warn('Error terminating workers during shutdown', { error: getErrorMessage(err) });
|
|
3494
|
+
}
|
|
3495
|
+
// Gracefully destroy all session managers (calls onShutdown on instances)
|
|
3496
|
+
await Promise.allSettled(Array.from(sessionManagers.values()).map((m) => m.destroyGraceful()));
|
|
3497
|
+
sessionManagers.clear();
|
|
3498
|
+
if (webhookServer) {
|
|
3499
|
+
webhookServer.close();
|
|
3500
|
+
}
|
|
3501
|
+
if (daemonOwnershipConfirmed) {
|
|
3502
|
+
const owner = readOwnerRecord(ownerFile);
|
|
3503
|
+
if (owner?.pid === process.pid && owner.socketPath === socketPath) {
|
|
3504
|
+
removeOwnerRecord(ownerFile, process.pid);
|
|
3505
|
+
}
|
|
3077
3506
|
try {
|
|
3078
|
-
fs.
|
|
3507
|
+
const pidContent = fs.readFileSync(pidFile, 'utf-8').trim();
|
|
3508
|
+
if (parseInt(pidContent, 10) === process.pid) {
|
|
3509
|
+
fs.unlinkSync(pidFile);
|
|
3510
|
+
}
|
|
3079
3511
|
}
|
|
3080
3512
|
catch {
|
|
3081
|
-
// Ignore
|
|
3513
|
+
// Ignore missing pid file
|
|
3082
3514
|
}
|
|
3083
3515
|
}
|
|
3084
|
-
|
|
3085
|
-
|
|
3516
|
+
// Only delete the socket if we still own it.
|
|
3517
|
+
// After `daemon stop` + `daemon start`, a new daemon may have already created
|
|
3518
|
+
// a new socket at this path. Deleting it would orphan the new daemon's listener.
|
|
3519
|
+
if (fs.existsSync(socketPath) && process.platform !== 'win32') {
|
|
3520
|
+
let weOwnSocket = true;
|
|
3521
|
+
try {
|
|
3522
|
+
const pidContent = fs.readFileSync(pidFile, 'utf-8').trim();
|
|
3523
|
+
const filePid = parseInt(pidContent, 10);
|
|
3524
|
+
if (!isNaN(filePid) && filePid !== process.pid) {
|
|
3525
|
+
// PID file points to a different process — new daemon already started
|
|
3526
|
+
weOwnSocket = false;
|
|
3527
|
+
logger.info('Socket belongs to new daemon, skipping cleanup', {
|
|
3528
|
+
ourPid: process.pid,
|
|
3529
|
+
newPid: filePid,
|
|
3530
|
+
});
|
|
3531
|
+
}
|
|
3532
|
+
}
|
|
3533
|
+
catch {
|
|
3534
|
+
// PID file missing (deleted by stop) — another process may own the socket now.
|
|
3535
|
+
// If the socket still exists, it likely belongs to a new daemon. Don't delete.
|
|
3536
|
+
weOwnSocket = false;
|
|
3537
|
+
}
|
|
3538
|
+
if (weOwnSocket) {
|
|
3539
|
+
try {
|
|
3540
|
+
fs.unlinkSync(socketPath);
|
|
3541
|
+
}
|
|
3542
|
+
catch {
|
|
3543
|
+
// Ignore cleanup errors
|
|
3544
|
+
}
|
|
3545
|
+
}
|
|
3546
|
+
}
|
|
3547
|
+
process.exit(0);
|
|
3548
|
+
})();
|
|
3086
3549
|
}
|
|
3087
3550
|
// Main execution
|
|
3088
|
-
(() => {
|
|
3551
|
+
void (async () => {
|
|
3552
|
+
await claimExclusiveOwnership();
|
|
3089
3553
|
startupWatchPhotons();
|
|
3090
3554
|
startServer();
|
|
3555
|
+
loadAllPersistedSchedules();
|
|
3091
3556
|
startWebhookServer(WEBHOOK_PORT);
|
|
3092
3557
|
startIdleTimer();
|
|
3093
3558
|
startHealthMonitor();
|
|
3094
|
-
|
|
3559
|
+
// Notify photons that any locks from a prior daemon session are gone
|
|
3560
|
+
publishToChannel('system:*', {
|
|
3561
|
+
event: 'locks-reset',
|
|
3562
|
+
reason: 'daemon-startup',
|
|
3563
|
+
timestamp: Date.now(),
|
|
3564
|
+
});
|
|
3565
|
+
})().catch((err) => {
|
|
3566
|
+
logger.error('Daemon startup failed', { error: getErrorMessage(err) });
|
|
3567
|
+
process.exit(1);
|
|
3568
|
+
});
|
|
3095
3569
|
//# sourceMappingURL=server.js.map
|