@portel/photon 1.26.1 → 1.28.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. package/README.md +33 -0
  2. package/dist/auto-ui/beam/routes/api-daemon.d.ts +1 -0
  3. package/dist/auto-ui/beam/routes/api-daemon.d.ts.map +1 -1
  4. package/dist/auto-ui/beam/routes/api-daemon.js +35 -1
  5. package/dist/auto-ui/beam/routes/api-daemon.js.map +1 -1
  6. package/dist/beam-form.bundle.js +41 -1
  7. package/dist/beam-form.bundle.js.map +2 -2
  8. package/dist/beam.bundle.js +1661 -252
  9. package/dist/beam.bundle.js.map +4 -4
  10. package/dist/cli/commands/daemon.d.ts.map +1 -1
  11. package/dist/cli/commands/daemon.js +157 -0
  12. package/dist/cli/commands/daemon.js.map +1 -1
  13. package/dist/cli/commands/update.d.ts.map +1 -1
  14. package/dist/cli/commands/update.js +7 -8
  15. package/dist/cli/commands/update.js.map +1 -1
  16. package/dist/daemon/client.d.ts +1 -0
  17. package/dist/daemon/client.d.ts.map +1 -1
  18. package/dist/daemon/client.js +110 -23
  19. package/dist/daemon/client.js.map +1 -1
  20. package/dist/daemon/in-process-bridge.d.ts +29 -0
  21. package/dist/daemon/in-process-bridge.d.ts.map +1 -0
  22. package/dist/daemon/in-process-bridge.js +26 -0
  23. package/dist/daemon/in-process-bridge.js.map +1 -0
  24. package/dist/daemon/manager.d.ts +103 -1
  25. package/dist/daemon/manager.d.ts.map +1 -1
  26. package/dist/daemon/manager.js +313 -92
  27. package/dist/daemon/manager.js.map +1 -1
  28. package/dist/daemon/protocol.d.ts +1 -1
  29. package/dist/daemon/protocol.d.ts.map +1 -1
  30. package/dist/daemon/protocol.js +1 -0
  31. package/dist/daemon/protocol.js.map +1 -1
  32. package/dist/daemon/server.js +859 -38
  33. package/dist/daemon/server.js.map +1 -1
  34. package/dist/loader.d.ts.map +1 -1
  35. package/dist/loader.js +39 -13
  36. package/dist/loader.js.map +1 -1
  37. package/dist/sample-augmenter.d.ts +84 -0
  38. package/dist/sample-augmenter.d.ts.map +1 -0
  39. package/dist/sample-augmenter.js +164 -0
  40. package/dist/sample-augmenter.js.map +1 -0
  41. package/dist/shared/npm-registry.d.ts +30 -0
  42. package/dist/shared/npm-registry.d.ts.map +1 -0
  43. package/dist/shared/npm-registry.js +97 -0
  44. package/dist/shared/npm-registry.js.map +1 -0
  45. package/dist/version-notify.d.ts +5 -0
  46. package/dist/version-notify.d.ts.map +1 -1
  47. package/dist/version-notify.js +46 -23
  48. package/dist/version-notify.js.map +1 -1
  49. package/package.json +1 -1
  50. package/templates/cloudflare/worker.ts.template +94 -22
@@ -14,6 +14,7 @@ import * as fs from 'fs';
14
14
  import * as path from 'path';
15
15
  import * as os from 'os';
16
16
  import * as crypto from 'crypto';
17
+ import { spawnSync } from 'child_process';
17
18
  import { SessionManager } from './session-manager.js';
18
19
  import { transferHotReloadState } from './hot-reload-state.js';
19
20
  import { resolveWithGlobalFallback } from './session-resolver.js';
@@ -47,10 +48,24 @@ const pidFile = path.join(path.dirname(socketPath), 'daemon.pid');
47
48
  const ownerFile = getOwnerFilePath(socketPath);
48
49
  let daemonOwnershipConfirmed = false;
49
50
  async function isSocketResponsive(target) {
50
- if (process.platform === 'win32' || !fs.existsSync(target))
51
+ // Windows named pipes have no filesystem entry; skip the FS gate on
52
+ // win32 and let net.createConnection probe the pipe directly. The
53
+ // 'error' handler below resolves false on failure; the try/catch
54
+ // wrapper guards against sync throws.
55
+ const isPipe = process.platform === 'win32' && target.startsWith('\\\\.\\pipe\\');
56
+ if (!isPipe && !fs.existsSync(target))
51
57
  return false;
52
58
  return new Promise((resolve) => {
53
- const client = net.createConnection(target);
59
+ let client;
60
+ try {
61
+ // Bun can throw synchronously on a missing/unreachable unix socket
62
+ // before the 'error' listener attaches — TOCTOU vs. existsSync above.
63
+ client = net.createConnection(target);
64
+ }
65
+ catch {
66
+ resolve(false);
67
+ return;
68
+ }
54
69
  const timer = setTimeout(() => {
55
70
  client.destroy();
56
71
  resolve(false);
@@ -437,6 +452,19 @@ staleMapCleanupInterval.unref();
437
452
  */
438
453
  const scheduledJobs = new Map();
439
454
  const jobTimers = new Map();
455
+ /**
456
+ * Per-job count of consecutive failures whose error message points at a
457
+ * missing photon source file (ENOENT against a *.photon.ts path). When
458
+ * the count exceeds AUTO_SUPPRESS_THRESHOLD, the schedule is suppressed
459
+ * and unscheduled — see runJob's catch block. Reset on any successful
460
+ * run or non-ENOENT failure.
461
+ *
462
+ * Without this, a deleted photon file leaves its scheduled methods
463
+ * firing forever (every minute / hour / etc.) — each run failing with
464
+ * ENOENT, log spam, no recovery, no operator signal beyond the noise.
465
+ */
466
+ const photonFileMissingFailures = new Map();
467
+ const AUTO_SUPPRESS_THRESHOLD = 3;
440
468
  // parseCron moved to ./cron.ts so the boot loader and tests can reuse it.
441
469
  function scheduleJob(job) {
442
470
  const { isValid, nextRun } = parseCron(job.cron);
@@ -471,7 +499,13 @@ async function runJob(jobId) {
471
499
  const job = scheduledJobs.get(jobId);
472
500
  if (!job)
473
501
  return;
474
- const key = compositeKey(job.photonName, job.workingDir);
502
+ // `key` may be recomputed after the legacy-base self-heal below — the
503
+ // initial value reflects the schedule's stored workingDir (often undefined
504
+ // for legacy ScheduleProvider files), and the post-pin value reflects the
505
+ // resolved owning base. trackExecution/untrackExecution must run against
506
+ // the post-pin key so hot-reload drain sees the in-flight execution under
507
+ // the same key that other code paths use for the photon. Codex P3.
508
+ let key = compositeKey(job.photonName, job.workingDir);
475
509
  // Phantom prune: when a ScheduleProvider-sourced job's backing file
476
510
  // has been deleted (e.g. `this.schedule.cancel()` ran the unlink
477
511
  // but the in-memory registration survived daemon restart), stop
@@ -515,9 +549,64 @@ async function runJob(jobId) {
515
549
  }
516
550
  }
517
551
  if (!sessionManager) {
518
- logger.warn('Cannot run job - photon not initialized', { jobId, photon: job.photonName });
519
- scheduleJob(job); // Reschedule anyway
520
- return;
552
+ // Ghost-schedule guard: when the photon source is gone from every known
553
+ // base, the lazy-load can never succeed. Rescheduling here would loop
554
+ // every interval forever. A reinstall in flight may briefly fail this
555
+ // probe — the lost tick is acceptable because photons rebuild their
556
+ // schedules on first invocation.
557
+ const probe = probePhotonSource(job.photonName, job.workingDir);
558
+ if (!probe.resolved) {
559
+ logger.warn('Dropping orphan scheduled job — photon source missing', {
560
+ jobId,
561
+ photon: job.photonName,
562
+ sourceFile: job.sourceFile,
563
+ });
564
+ if (job.sourceFile) {
565
+ try {
566
+ fs.unlinkSync(job.sourceFile);
567
+ }
568
+ catch {
569
+ // File may have been removed by a concurrent cleanup
570
+ }
571
+ }
572
+ unscheduleJob(jobId);
573
+ return;
574
+ }
575
+ // Legacy schedule with no workingDir but the photon lives in a
576
+ // registered non-default base: pin workingDir and retry the lazy-load
577
+ // once. Without this self-heal, the probe says OK but the next
578
+ // `getOrCreateSessionManager` (which only checks workingDir +
579
+ // defaultBase) returns null again and the schedule reschedules forever.
580
+ if (!job.workingDir && probe.ownerBase) {
581
+ job.workingDir = probe.ownerBase;
582
+ // Recompute the execution key against the resolved base so
583
+ // trackExecution/untrackExecution and any consumer that looks up the
584
+ // base-scoped key (notably hot-reload drain) see this run under the
585
+ // same identity as the rest of the photon's lifecycle.
586
+ key = compositeKey(job.photonName, job.workingDir);
587
+ logger.info('Pinned scheduled job to resolved base — retrying lazy-load', {
588
+ jobId,
589
+ photon: job.photonName,
590
+ resolvedBase: probe.ownerBase,
591
+ });
592
+ try {
593
+ sessionManager =
594
+ (await getOrCreateSessionManager(job.photonName, job.photonPath, job.workingDir)) ??
595
+ undefined;
596
+ }
597
+ catch (err) {
598
+ logger.warn('Lazy-load retry after base pin failed', {
599
+ jobId,
600
+ photon: job.photonName,
601
+ error: getErrorMessage(err),
602
+ });
603
+ }
604
+ }
605
+ if (!sessionManager) {
606
+ logger.warn('Cannot run job - photon not initialized', { jobId, photon: job.photonName });
607
+ scheduleJob(job); // Reschedule anyway
608
+ return;
609
+ }
521
610
  }
522
611
  logger.info('Running scheduled job', { jobId, method: job.method, photon: job.photonName });
523
612
  trackExecution(key);
@@ -525,6 +614,7 @@ async function runJob(jobId) {
525
614
  let status = 'success';
526
615
  let errorMessage;
527
616
  let result;
617
+ let suppressedThisRun = false;
528
618
  try {
529
619
  const session = await sessionManager.getOrCreateSession('scheduler', 'scheduler');
530
620
  result = await sessionManager.loader.executeTool(session.instance, job.method, job.args || {});
@@ -542,6 +632,7 @@ async function runJob(jobId) {
542
632
  runCount: job.runCount,
543
633
  });
544
634
  logger.info('Job completed', { jobId, method: job.method, runCount: job.runCount });
635
+ photonFileMissingFailures.delete(jobId);
545
636
  }
546
637
  catch (error) {
547
638
  status = 'error';
@@ -553,6 +644,53 @@ async function runJob(jobId) {
553
644
  method: job.method,
554
645
  error: errorMessage,
555
646
  });
647
+ // Auto-suppress when the photon source file has clearly vanished.
648
+ // Without this, a deleted .photon.ts keeps its scheduled methods
649
+ // firing forever — every fire failing with ENOENT, log spam, no
650
+ // recovery, no operator signal beyond the noise.
651
+ const isPhotonFileMissing = errorMessage.includes('ENOENT') &&
652
+ (errorMessage.includes('.photon.ts') ||
653
+ (job.photonPath != null && errorMessage.includes(job.photonPath)) ||
654
+ (job.sourceFile != null && errorMessage.includes(job.sourceFile)));
655
+ if (isPhotonFileMissing) {
656
+ const count = (photonFileMissingFailures.get(jobId) ?? 0) + 1;
657
+ photonFileMissingFailures.set(jobId, count);
658
+ if (count >= AUTO_SUPPRESS_THRESHOLD) {
659
+ // Cross-check with the authoritative source probe before suppressing —
660
+ // a transient ENOENT during reinstall shouldn't kill the schedule.
661
+ const probe = probePhotonSource(job.photonName, job.workingDir);
662
+ if (!probe.resolved) {
663
+ const suppressBase = path.resolve(job.workingDir ?? probe.ownerBase ?? getDefaultContext().baseDir);
664
+ logger.error('Auto-suppressing schedule: photon source missing for ' +
665
+ `${count} consecutive runs. Re-enable with \`photon ps enable ${job.photonName}:${job.method}\` ` +
666
+ 'after the source file is restored.', {
667
+ jobId,
668
+ photon: job.photonName,
669
+ method: job.method,
670
+ suppressBase,
671
+ });
672
+ try {
673
+ writeSuppressedEntry(suppressBase, job.photonName, job.method);
674
+ }
675
+ catch (writeErr) {
676
+ logger.warn('Failed to persist suppression entry', {
677
+ error: getErrorMessage(writeErr),
678
+ });
679
+ }
680
+ unscheduleJob(jobId);
681
+ suppressedThisRun = true;
682
+ }
683
+ else {
684
+ // Source reappeared between failures — reset counter.
685
+ photonFileMissingFailures.set(jobId, 0);
686
+ }
687
+ }
688
+ }
689
+ else {
690
+ // Different failure class — reset the photon-missing counter so
691
+ // unrelated noise doesn't accumulate toward suppression.
692
+ photonFileMissingFailures.delete(jobId);
693
+ }
556
694
  }
557
695
  finally {
558
696
  untrackExecution(key);
@@ -566,6 +704,8 @@ async function runJob(jobId) {
566
704
  outputPreview: status === 'success' ? previewResult(result) : undefined,
567
705
  }, job.workingDir);
568
706
  }
707
+ if (suppressedThisRun)
708
+ return;
569
709
  scheduleJob(job);
570
710
  }
571
711
  function unscheduleJob(jobId) {
@@ -578,8 +718,52 @@ function unscheduleJob(jobId) {
578
718
  if (existed) {
579
719
  logger.info('Job unscheduled', { jobId });
580
720
  }
721
+ // Forget any auto-suppress counter for this slot — if the same key
722
+ // is later re-scheduled, the count starts fresh.
723
+ photonFileMissingFailures.delete(jobId);
581
724
  return existed;
582
725
  }
726
+ /**
727
+ * Append a suppressed entry to a base's active-schedules file so the
728
+ * daemon won't re-register the schedule at next boot. Idempotent on the
729
+ * (photon, method) pair.
730
+ */
731
+ function writeSuppressedEntry(baseDir, photon, method) {
732
+ const file = readActiveSchedulesFile(baseDir);
733
+ const suppressed = file.suppressed ?? [];
734
+ if (!suppressed.some((s) => s.photon === photon && s.method === method)) {
735
+ suppressed.push({ photon, method, suppressedAt: new Date().toISOString() });
736
+ file.suppressed = suppressed;
737
+ }
738
+ // Drop any active-row that matches — no point keeping it around.
739
+ file.active = file.active.filter((e) => !(e.photon === photon && e.method === method));
740
+ writeActiveSchedulesFile(baseDir, file);
741
+ }
742
+ /**
743
+ * Evict a scheduled job by its raw ID. Used by both the IPC `unschedule`
744
+ * handler and the in-process bridge that the loader uses when it's
745
+ * running inside the daemon. Returns true iff a job was actually removed.
746
+ */
747
+ function evictScheduledJobByRawId(rawJobId) {
748
+ const jobId = asScheduleKey(rawJobId);
749
+ let actualJobId = jobId;
750
+ if (!scheduledJobs.has(jobId)) {
751
+ // The loader hands us a bare UUID-shaped ID; the registry stores
752
+ // it under a `<base>::<photon>:ipc:<uuid>` key, so probe both.
753
+ for (const key of scheduledJobs.keys()) {
754
+ if (key.endsWith(`:ipc:${jobId}`)) {
755
+ actualJobId = key;
756
+ break;
757
+ }
758
+ }
759
+ }
760
+ const job = scheduledJobs.get(actualJobId);
761
+ const removed = unscheduleJob(actualJobId);
762
+ if (removed && job) {
763
+ deletePersistedIpcSchedule(actualJobId, job.photonName);
764
+ }
765
+ return removed;
766
+ }
583
767
  /**
584
768
  * Resolve the canonical schedules dir for a photon under the Option B
585
769
  * contract: {workingDir || default baseDir}/.data/{photonName}/schedules/.
@@ -598,6 +782,131 @@ function unscheduleJob(jobId) {
598
782
  function resolveScheduleDir(photonName, workingDir) {
599
783
  return getPhotonSchedulesDir('', photonName, workingDir || getDefaultContext().baseDir);
600
784
  }
785
+ const PHOTON_SOURCE_EXTENSIONS = ['.photon.ts', '.photon.tsx', '.photon.js'];
786
+ const HOST_DISABLE_MARKER = '.photon-no-host';
787
+ /**
788
+ * A base is "host-disabled" when a `.photon-no-host` marker file exists at
789
+ * its root. The daemon will not load ScheduleProvider files, auto-register
790
+ * `@scheduled` annotations, or wire `@webhook` routes for that base.
791
+ *
792
+ * Manual `photon run` on a photon under that base still works — host mode
793
+ * only suppresses background activation. Used to keep one machine the sole
794
+ * scheduler/runner across a multi-host setup that shares a `~/Projects`
795
+ * tree (e.g. via Syncthing): place the marker at the base root on every
796
+ * host that should be quiet.
797
+ */
798
+ function isHostDisabledBase(basePath) {
799
+ try {
800
+ return fs.existsSync(path.join(basePath, HOST_DISABLE_MARKER));
801
+ }
802
+ catch {
803
+ return false;
804
+ }
805
+ }
806
+ /**
807
+ * Synchronous probe for whether a photon's source file is resolvable.
808
+ * Mirrors the resolution semantics used by `runJob` and the photon-core
809
+ * async `resolvePhotonPath`: namespace-qualified names (`team:foo`) only
810
+ * search `<base>/team/foo.photon.{ts,tsx,js}`; unqualified names search
811
+ * flat files first, then one-level namespace subdirs.
812
+ *
813
+ * Scoping rules:
814
+ * - When `baseHint` is supplied (per-base layout: schedule has a
815
+ * `workingDir`), probe ONLY that base + the default base. This is the
816
+ * resolver runJob actually uses, so a ghost detected here is exactly a
817
+ * schedule whose lazy-load would fail at fire time. Do NOT walk every
818
+ * registered base — a legitimate copy of `claw` at `~/Projects/claw/`
819
+ * must not mask stale claw schedule files left in
820
+ * `~/Projects/kith/.data/claw/schedules/`.
821
+ * - When `baseHint` is undefined (legacy `~/.photon/schedules/<photon>/`
822
+ * layout — no `workingDir` recorded on the task), the schedule could
823
+ * belong to a photon in any registered base. Walk every base in the
824
+ * active registry. This avoids deleting valid legacy ScheduleProvider
825
+ * files for photons that live in a non-default PHOTON_DIR (Codex P2
826
+ * finding).
827
+ *
828
+ * Sync so cron tick handlers and the boot schedule loader can call it
829
+ * without an await.
830
+ */
831
+ function probePhotonSource(photonName, baseHint) {
832
+ // Parse `namespace:name` format the same way photon-core's resolvePath does.
833
+ // Without this, a namespaced photon like `team:foo` would be probed as the
834
+ // literal string `team:foo.photon.ts` (which never exists) and its valid
835
+ // schedule file under `<base>/team/foo.photon.ts` would be unlinked at
836
+ // boot. Codex P2 finding.
837
+ const colonIdx = photonName.indexOf(':');
838
+ let namespace;
839
+ let bareName = photonName;
840
+ if (colonIdx !== -1) {
841
+ namespace = photonName.slice(0, colonIdx);
842
+ bareName = photonName.slice(colonIdx + 1);
843
+ }
844
+ const candidates = [];
845
+ let defaultBase;
846
+ try {
847
+ defaultBase = path.resolve(getDefaultContext().baseDir);
848
+ }
849
+ catch {
850
+ /* default context may be missing in early boot */
851
+ }
852
+ if (baseHint) {
853
+ candidates.push(path.resolve(baseHint));
854
+ if (defaultBase && !candidates.includes(defaultBase))
855
+ candidates.push(defaultBase);
856
+ }
857
+ else {
858
+ // Legacy layout: no owning base recorded. The photon could live in
859
+ // any registered base — walk all of them before declaring missing.
860
+ if (defaultBase)
861
+ candidates.push(defaultBase);
862
+ try {
863
+ for (const b of listActiveBases()) {
864
+ const resolved = path.resolve(b.path);
865
+ if (!candidates.includes(resolved))
866
+ candidates.push(resolved);
867
+ }
868
+ }
869
+ catch {
870
+ /* registry may not be initialized */
871
+ }
872
+ }
873
+ for (const base of candidates) {
874
+ if (namespace) {
875
+ // Namespace-qualified: only `<base>/<namespace>/<name>.photon.{ts,tsx,js}`.
876
+ for (const ext of PHOTON_SOURCE_EXTENSIONS) {
877
+ if (fs.existsSync(path.join(base, namespace, `${bareName}${ext}`))) {
878
+ return { resolved: true, ownerBase: base };
879
+ }
880
+ }
881
+ continue;
882
+ }
883
+ // Unqualified: flat first, then one-level namespace subdirs.
884
+ for (const ext of PHOTON_SOURCE_EXTENSIONS) {
885
+ if (fs.existsSync(path.join(base, `${bareName}${ext}`))) {
886
+ return { resolved: true, ownerBase: base };
887
+ }
888
+ }
889
+ let entries;
890
+ try {
891
+ entries = fs.readdirSync(base, { withFileTypes: true });
892
+ }
893
+ catch {
894
+ continue;
895
+ }
896
+ for (const entry of entries) {
897
+ if (!entry.isDirectory())
898
+ continue;
899
+ if (entry.name.startsWith('.') || entry.name.startsWith('_'))
900
+ continue;
901
+ for (const ext of PHOTON_SOURCE_EXTENSIONS) {
902
+ if (fs.existsSync(path.join(base, entry.name, `${bareName}${ext}`))) {
903
+ return { resolved: true, ownerBase: base };
904
+ }
905
+ }
906
+ }
907
+ }
908
+ return { resolved: false };
909
+ }
601
910
  /**
602
911
  * Root of the legacy schedules tree (pre-Option-B layout).
603
912
  * Honors `PHOTON_SCHEDULES_DIR` only for the one-release deprecation
@@ -729,6 +1038,56 @@ function loadDaemonSchedulesFromDir(schedulesPath, ttlMs, photonNameHint, workin
729
1038
  return loadPersistedSchedulesFromDir(schedulesPath, ttlMs, photonNameHint, workingDirHint, {
730
1039
  alreadyRegistered: (id) => scheduledJobs.has(asScheduleKey(id)),
731
1040
  register: (job) => {
1041
+ // Drop schedules whose photon source is gone before they ever enter the
1042
+ // cron map. Without this, every daemon boot reloads ghost schedules
1043
+ // from `<base>/.data/<photon>/schedules/*.json`; long-cron ghosts then
1044
+ // sit dormant in memory and resurrect on the next restart even after
1045
+ // the runJob orphan probe would have caught them.
1046
+ const probe = probePhotonSource(job.photonName, job.workingDir);
1047
+ if (!probe.resolved) {
1048
+ logger.warn('Dropping persisted schedule — photon source missing', {
1049
+ jobId: job.id,
1050
+ photon: job.photonName,
1051
+ workingDir: job.workingDir,
1052
+ sourceFile: job.sourceFile,
1053
+ });
1054
+ if (job.sourceFile) {
1055
+ try {
1056
+ fs.unlinkSync(job.sourceFile);
1057
+ }
1058
+ catch {
1059
+ // File may have been removed concurrently
1060
+ }
1061
+ }
1062
+ return false;
1063
+ }
1064
+ // Legacy schedules carry no `workingDir`. Pin the resolved base so
1065
+ // runJob's lazy-load (which only checks workingDir + defaultBase, not
1066
+ // every registered base) can find the photon at fire time. Without
1067
+ // this, the probe says "OK" but `getOrCreateSessionManager` returns
1068
+ // null for photons living in non-default bases — back to the
1069
+ // reschedule loop. Codex P2 finding.
1070
+ if (!job.workingDir && probe.ownerBase) {
1071
+ job.workingDir = probe.ownerBase;
1072
+ logger.info('Pinned legacy schedule to resolved base', {
1073
+ jobId: job.id,
1074
+ photon: job.photonName,
1075
+ resolvedBase: probe.ownerBase,
1076
+ });
1077
+ }
1078
+ // Host mode: the per-base loader already gates by isHostDisabledBase
1079
+ // before calling probePhotonSource, but the legacy flat-root path
1080
+ // (~/.photon/schedules/<photon>/*.json) walks ALL bases via probe and
1081
+ // could resolve to a host-disabled base. Drop here so legacy files
1082
+ // can't reanimate a quiet machine.
1083
+ if (job.workingDir && isHostDisabledBase(job.workingDir)) {
1084
+ logger.info('Skipping persisted schedule — owning base is host-disabled', {
1085
+ jobId: job.id,
1086
+ photon: job.photonName,
1087
+ base: job.workingDir,
1088
+ });
1089
+ return false;
1090
+ }
732
1091
  // Seed in-memory lastRun from persisted lastExecutionAt so the post-run
733
1092
  // reschedule path keeps updating the same field instead of starting at 0.
734
1093
  const scheduledJob = job;
@@ -927,9 +1286,18 @@ function loadAllPersistedSchedules() {
927
1286
  // Always include the default base even if the registry hasn't recorded it yet.
928
1287
  const defaultBase = getDefaultContext().baseDir;
929
1288
  if (!bases.some((b) => b.path === path.resolve(defaultBase))) {
930
- scanBaseDataRoot(defaultBase);
1289
+ if (isHostDisabledBase(defaultBase)) {
1290
+ logger.info('Skipping schedule load — host-disabled base', { base: defaultBase });
1291
+ }
1292
+ else {
1293
+ scanBaseDataRoot(defaultBase);
1294
+ }
931
1295
  }
932
1296
  for (const base of bases) {
1297
+ if (isHostDisabledBase(base.path)) {
1298
+ logger.info('Skipping schedule load — host-disabled base', { base: base.path });
1299
+ continue;
1300
+ }
933
1301
  scanBaseDataRoot(base.path);
934
1302
  }
935
1303
  // Legacy location for schedules that predate the per-base layout.
@@ -1123,6 +1491,12 @@ async function discoverProactiveMetadataAtBoot() {
1123
1491
  // Lazy import to keep daemon startup cheap for users with no bases.
1124
1492
  const core = await import('@portel/photon-core');
1125
1493
  for (const basePath of baseCandidates) {
1494
+ if (isHostDisabledBase(basePath)) {
1495
+ logger.info('Skipping proactive metadata discovery — host-disabled base', {
1496
+ base: basePath,
1497
+ });
1498
+ continue;
1499
+ }
1126
1500
  let photons;
1127
1501
  try {
1128
1502
  photons = await core.listPhotonFilesWithNamespace(basePath);
@@ -1189,6 +1563,10 @@ async function discoverProactiveMetadataAtBoot() {
1189
1563
  function watchBaseForProactiveMetadata(basePath, _isDefaultBase) {
1190
1564
  if (baseDirWatchers.has(basePath))
1191
1565
  return;
1566
+ if (isHostDisabledBase(basePath)) {
1567
+ logger.info('Skipping proactive metadata watcher — host-disabled base', { base: basePath });
1568
+ return;
1569
+ }
1192
1570
  try {
1193
1571
  if (!fs.existsSync(basePath))
1194
1572
  return;
@@ -1292,6 +1670,10 @@ function syncActiveSchedulesAtBoot() {
1292
1670
  let registered = 0;
1293
1671
  let missingRefs = 0;
1294
1672
  for (const basePath of bases) {
1673
+ if (isHostDisabledBase(basePath)) {
1674
+ logger.info('Skipping active-schedules sync — host-disabled base', { base: basePath });
1675
+ continue;
1676
+ }
1295
1677
  const file = readActiveSchedulesFile(basePath);
1296
1678
  let dirty = false;
1297
1679
  // One-time migration: seed the active list from the current
@@ -1340,6 +1722,36 @@ function syncActiveSchedulesAtBoot() {
1340
1722
  if (suppressedSet.has(`${entry.photon}:${entry.method}`))
1341
1723
  continue;
1342
1724
  const key = declaredKey(entry.photon, entry.method, basePath);
1725
+ // Manual entry: cron is embedded directly, no @scheduled declaration
1726
+ // required. Re-validate the cron each boot since the file is hand-editable.
1727
+ if (entry.cron) {
1728
+ const { isValid } = parseCron(entry.cron);
1729
+ if (!isValid) {
1730
+ logger.warn('Manual schedule has invalid cron — skipping', {
1731
+ base: basePath,
1732
+ photon: entry.photon,
1733
+ method: entry.method,
1734
+ cron: entry.cron,
1735
+ });
1736
+ continue;
1737
+ }
1738
+ if (scheduledJobs.has(key))
1739
+ continue;
1740
+ const ok = scheduleJob({
1741
+ id: key,
1742
+ method: entry.method,
1743
+ args: {},
1744
+ cron: entry.cron,
1745
+ runCount: 0,
1746
+ createdAt: Date.now(),
1747
+ createdBy: 'manual',
1748
+ photonName: entry.photon,
1749
+ workingDir: basePath,
1750
+ });
1751
+ if (ok)
1752
+ registered++;
1753
+ continue;
1754
+ }
1343
1755
  const decl = declaredSchedules.get(key);
1344
1756
  if (!decl) {
1345
1757
  missingRefs++;
@@ -1353,6 +1765,44 @@ function syncActiveSchedulesAtBoot() {
1353
1765
  }
1354
1766
  if (scheduledJobs.has(key))
1355
1767
  continue;
1768
+ // Boot-time annotation-vs-provider dedup: a photon that uses both
1769
+ // `@scheduled` AND `this.schedule.create()` for the same method ends
1770
+ // up with both a persisted ScheduleProvider file and an annotation
1771
+ // job. The runtime dedup in autoRegisterFromMetadata only catches
1772
+ // this AFTER the photon is loaded into a session — at boot, both
1773
+ // already landed in scheduledJobs from loadAllPersistedSchedules.
1774
+ // The annotation is the source of truth (codebase intent), so drop
1775
+ // the provider sibling and unlink its persisted file. Without this,
1776
+ // a kith-sync method like scheduled_freshness_scan fires every 15
1777
+ // minutes from BOTH timers; field reports show duplicate browser
1778
+ // navigation and double API calls.
1779
+ for (const [staleKey, job] of Array.from(scheduledJobs.entries())) {
1780
+ if (job.photonName !== entry.photon || job.method !== entry.method)
1781
+ continue;
1782
+ if (staleKey === key)
1783
+ continue;
1784
+ if (!staleKey.includes(':sched:'))
1785
+ continue; // only ScheduleProvider keys
1786
+ const jobBase = job.workingDir ? path.resolve(job.workingDir) : undefined;
1787
+ if (jobBase && jobBase !== basePath)
1788
+ continue; // scope to this base
1789
+ logger.info('Dropping ScheduleProvider duplicate of @scheduled method', {
1790
+ photon: entry.photon,
1791
+ method: entry.method,
1792
+ providerJobId: staleKey,
1793
+ annotationJobId: key,
1794
+ sourceFile: job.sourceFile,
1795
+ });
1796
+ if (job.sourceFile) {
1797
+ try {
1798
+ fs.unlinkSync(job.sourceFile);
1799
+ }
1800
+ catch {
1801
+ // File may have been removed concurrently
1802
+ }
1803
+ }
1804
+ unscheduleJob(staleKey);
1805
+ }
1356
1806
  const ok = scheduleJob({
1357
1807
  id: key,
1358
1808
  method: decl.method,
@@ -2000,10 +2450,19 @@ function readActiveSchedulesFile(baseDir) {
2000
2450
  if (parsed && typeof parsed === 'object' && Array.isArray(parsed.active)) {
2001
2451
  return {
2002
2452
  version: 1,
2003
- active: parsed.active.filter((e) => e &&
2453
+ active: parsed.active
2454
+ .filter((e) => e &&
2004
2455
  typeof e.photon === 'string' &&
2005
2456
  typeof e.method === 'string' &&
2006
- typeof e.enabledAt === 'string'),
2457
+ typeof e.enabledAt === 'string')
2458
+ .map((e) => ({
2459
+ photon: e.photon,
2460
+ method: e.method,
2461
+ enabledAt: e.enabledAt,
2462
+ enabledBy: typeof e.enabledBy === 'string' ? e.enabledBy : 'unknown',
2463
+ paused: !!e.paused,
2464
+ cron: typeof e.cron === 'string' ? e.cron : undefined,
2465
+ })),
2007
2466
  suppressed: Array.isArray(parsed.suppressed)
2008
2467
  ? parsed.suppressed.filter((s) => s && typeof s.photon === 'string' && typeof s.method === 'string')
2009
2468
  : undefined,
@@ -2036,6 +2495,17 @@ async function autoRegisterFromMetadata(photonName, manager) {
2036
2495
  if (autoRegistered.has(autoKey))
2037
2496
  return;
2038
2497
  autoRegistered.add(autoKey);
2498
+ // Host mode: skip @scheduled / @webhook auto-registration when this base
2499
+ // has the .photon-no-host marker. Manual `photon run` still works because
2500
+ // it goes through the command handler, not this auto-register path.
2501
+ const baseForHostCheck = manager.loader?.baseDir ?? getDefaultContext().baseDir;
2502
+ if (isHostDisabledBase(baseForHostCheck)) {
2503
+ logger.debug('Skipping auto-register — host-disabled base', {
2504
+ photon: photonName,
2505
+ base: baseForHostCheck,
2506
+ });
2507
+ return;
2508
+ }
2039
2509
  try {
2040
2510
  // Get a session to access the loaded photon's tools
2041
2511
  const session = await manager.getOrCreateSession('__autoregister', 'system');
@@ -2522,6 +2992,31 @@ async function handleRequest(request, socket) {
2522
2992
  suggestion: 'Include photonName in the request payload',
2523
2993
  };
2524
2994
  }
2995
+ // Host mode: refuse to arm a cron when the owning base is host-disabled.
2996
+ // Without this, a manual `photon run` on a host-disabled machine could
2997
+ // call this.schedule.create() and immediately install a timer — silently
2998
+ // defeating the marker. The check uses the request's workingDir (or the
2999
+ // default base when unset), matching the resolution that scheduleJob
3000
+ // would otherwise apply.
3001
+ const scheduleBase = request.workingDir
3002
+ ? path.resolve(request.workingDir)
3003
+ : path.resolve(getDefaultContext().baseDir);
3004
+ if (isHostDisabledBase(scheduleBase)) {
3005
+ logger.info('Refusing schedule create — host-disabled base', {
3006
+ base: scheduleBase,
3007
+ photon: photonName,
3008
+ method: request.method,
3009
+ });
3010
+ return {
3011
+ type: 'result',
3012
+ id: request.id,
3013
+ success: false,
3014
+ data: {
3015
+ scheduled: false,
3016
+ reason: `host-disabled: ${scheduleBase} has a .photon-no-host marker`,
3017
+ },
3018
+ };
3019
+ }
2525
3020
  // Generate IPC job ID with workingDir hash to prevent cross-project collisions
2526
3021
  const dirHash = request.workingDir
2527
3022
  ? crypto.createHash('sha256').update(request.workingDir).digest('hex').slice(0, 8)
@@ -2558,28 +3053,13 @@ async function handleRequest(request, socket) {
2558
3053
  if (request.type === 'unschedule') {
2559
3054
  // IPC input: coerce at the boundary. Job IDs shipped over the protocol
2560
3055
  // are already ScheduleKey-shaped (<base>::<photon>:<method>).
2561
- const jobId = asScheduleKey(request.jobId);
2562
- // Try exact match first, then look for IPC-prefixed version
2563
- let actualJobId = jobId;
2564
- if (!scheduledJobs.has(jobId)) {
2565
- // Search for IPC-prefixed job
2566
- for (const key of scheduledJobs.keys()) {
2567
- if (key.endsWith(`:ipc:${jobId}`)) {
2568
- actualJobId = key;
2569
- break;
2570
- }
2571
- }
2572
- }
2573
- const job = scheduledJobs.get(actualJobId);
2574
- const unscheduled = unscheduleJob(actualJobId);
2575
- if (unscheduled && job) {
2576
- deletePersistedIpcSchedule(actualJobId, job.photonName);
2577
- }
3056
+ const rawJobId = request.jobId;
3057
+ const unscheduled = evictScheduledJobByRawId(rawJobId);
2578
3058
  return {
2579
3059
  type: 'result',
2580
3060
  id: request.id,
2581
3061
  success: true,
2582
- data: { unscheduled, jobId: actualJobId },
3062
+ data: { unscheduled, jobId: asScheduleKey(rawJobId) },
2583
3063
  };
2584
3064
  }
2585
3065
  // Handle list jobs (legacy shape — just the active cron jobs)
@@ -2603,8 +3083,34 @@ async function handleRequest(request, socket) {
2603
3083
  workingDir: j.workingDir ?? defaultBase,
2604
3084
  createdBy: j.createdBy,
2605
3085
  }));
3086
+ // Resolve declared->active equivalence by (photon, method, cron, base).
3087
+ // The exact-key lookup `scheduledJobs.has(declaredKey(...))` only catches
3088
+ // a declaration that was enrolled via `photon ps enable` — it misses
3089
+ // the case where a photon ships BOTH `@scheduled` AND a legacy
3090
+ // `enable_schedule` that calls `this.schedule.create()`. The legacy
3091
+ // path keys jobs as `<photon>:sched:<uuid>`, so the declaration shows
3092
+ // up as DECLARED-not-enrolled even though an equivalent timer is
3093
+ // actively firing (Bug 5 in v1.27.0). Compare on the underlying
3094
+ // identity tuple so the CLI can suppress the cosmetic duplicate.
2606
3095
  const declared = Array.from(declaredSchedules.values()).map((d) => {
2607
3096
  const k = declaredKey(d.photon, d.method, d.workingDir);
3097
+ const declBase = d.workingDir ? path.resolve(d.workingDir) : defaultBase;
3098
+ let isActive = scheduledJobs.has(k);
3099
+ if (!isActive) {
3100
+ for (const job of scheduledJobs.values()) {
3101
+ if (job.photonName !== d.photon)
3102
+ continue;
3103
+ if (job.method !== d.method)
3104
+ continue;
3105
+ if (job.cron !== d.cron)
3106
+ continue;
3107
+ const jobBase = job.workingDir ? path.resolve(job.workingDir) : defaultBase;
3108
+ if (jobBase !== declBase)
3109
+ continue;
3110
+ isActive = true;
3111
+ break;
3112
+ }
3113
+ }
2608
3114
  return {
2609
3115
  key: k,
2610
3116
  photon: d.photon,
@@ -2612,7 +3118,7 @@ async function handleRequest(request, socket) {
2612
3118
  cron: d.cron,
2613
3119
  photonPath: d.photonPath,
2614
3120
  workingDir: d.workingDir ?? defaultBase,
2615
- active: scheduledJobs.has(k),
3121
+ active: isActive,
2616
3122
  };
2617
3123
  });
2618
3124
  const webhooks = [];
@@ -2664,6 +3170,112 @@ async function handleRequest(request, socket) {
2664
3170
  data: { active, declared, webhooks, sessions, suppressed },
2665
3171
  };
2666
3172
  }
3173
+ // Add an ad-hoc cron schedule for any public method on a photon. Unlike
3174
+ // `enable_schedule`, this does not require an `@scheduled` JSDoc tag in
3175
+ // source — the cron expression is supplied by the caller and persisted
3176
+ // directly in the active-schedules file. Used by the Pulse UI's
3177
+ // "Add schedule" form.
3178
+ if (request.type === 'add_manual_schedule') {
3179
+ const photon = request.photonName;
3180
+ const method = request.method;
3181
+ const cron = request.cron;
3182
+ if (!photon || !method || !cron) {
3183
+ return {
3184
+ type: 'error',
3185
+ id: request.id,
3186
+ error: '`add_manual_schedule` requires photonName, method, and cron',
3187
+ };
3188
+ }
3189
+ const cronCheck = parseCron(cron);
3190
+ if (!cronCheck.isValid) {
3191
+ return {
3192
+ type: 'error',
3193
+ id: request.id,
3194
+ error: `Invalid cron expression: "${cron}"`,
3195
+ };
3196
+ }
3197
+ const preferredBase = request.workingDir;
3198
+ // Refuse to shadow a `@scheduled` declaration — direct the user to
3199
+ // `enable_schedule` so the source-of-truth stays the JSDoc tag.
3200
+ const declMatches = findDeclarationsFor(photon, method, preferredBase);
3201
+ if (declMatches.length > 0) {
3202
+ return {
3203
+ type: 'error',
3204
+ id: request.id,
3205
+ error: `${photon}:${method} already has a @scheduled declaration. ` +
3206
+ `Use enable/disable to manage it, or remove the @scheduled tag first.`,
3207
+ };
3208
+ }
3209
+ const base = path.resolve(preferredBase ?? probePhotonSource(photon).ownerBase ?? getDefaultContext().baseDir);
3210
+ if (isHostDisabledBase(base)) {
3211
+ return {
3212
+ type: 'error',
3213
+ id: request.id,
3214
+ error: `Cannot add manual schedule for ${photon}:${method} — base ${base} is host-disabled ` +
3215
+ `(remove ${base}/.photon-no-host to allow scheduling).`,
3216
+ };
3217
+ }
3218
+ const file = readActiveSchedulesFile(base);
3219
+ const existing = file.active.find((e) => e.photon === photon && e.method === method);
3220
+ const nowIso = new Date().toISOString();
3221
+ if (existing) {
3222
+ // Update the cron in place — same semantics as resave.
3223
+ existing.cron = cron;
3224
+ existing.paused = false;
3225
+ existing.enabledAt = nowIso;
3226
+ existing.enabledBy = request.source || 'manual';
3227
+ }
3228
+ else {
3229
+ file.active.push({
3230
+ photon,
3231
+ method,
3232
+ cron,
3233
+ enabledAt: nowIso,
3234
+ enabledBy: request.source || 'manual',
3235
+ });
3236
+ }
3237
+ // Clear any matching suppression — manual add is an explicit re-enable.
3238
+ if (file.suppressed) {
3239
+ file.suppressed = file.suppressed.filter((s) => !(s.photon === photon && s.method === method));
3240
+ }
3241
+ writeActiveSchedulesFile(base, file);
3242
+ try {
3243
+ touchBase(base);
3244
+ }
3245
+ catch {
3246
+ /* non-fatal */
3247
+ }
3248
+ const key = declaredKey(photon, method, base);
3249
+ // If a timer for this key was already running (e.g. previous manual entry
3250
+ // with a different cron), unscheduleJob first so scheduleJob arms the new cron.
3251
+ if (scheduledJobs.has(key)) {
3252
+ unscheduleJob(key);
3253
+ }
3254
+ const ok = scheduleJob({
3255
+ id: key,
3256
+ method,
3257
+ args: {},
3258
+ cron,
3259
+ runCount: 0,
3260
+ createdAt: Date.now(),
3261
+ createdBy: 'manual',
3262
+ photonName: photon,
3263
+ workingDir: base,
3264
+ });
3265
+ if (!ok) {
3266
+ return {
3267
+ type: 'error',
3268
+ id: request.id,
3269
+ error: `Failed to schedule ${photon}:${method} — see daemon logs.`,
3270
+ };
3271
+ }
3272
+ return {
3273
+ type: 'result',
3274
+ id: request.id,
3275
+ success: true,
3276
+ data: { photon, method, cron, base, status: 'active' },
3277
+ };
3278
+ }
2667
3279
  // Enroll a declared @scheduled method into the active list.
2668
3280
  if (request.type === 'enable_schedule') {
2669
3281
  const photon = request.photonName;
@@ -2696,6 +3308,17 @@ async function handleRequest(request, socket) {
2696
3308
  }
2697
3309
  const { key, decl } = matches[0];
2698
3310
  const base = path.resolve(decl.workingDir || getDefaultContext().baseDir);
3311
+ // Host mode: refuse to arm a timer when the owning base is host-disabled.
3312
+ // Disable still works (broad sweep) so users can clear stale state from
3313
+ // any machine; only enabling background work is blocked.
3314
+ if (isHostDisabledBase(base)) {
3315
+ return {
3316
+ type: 'error',
3317
+ id: request.id,
3318
+ error: `Cannot enable ${photon}:${method} — base ${base} is host-disabled ` +
3319
+ `(remove ${base}/.photon-no-host to allow scheduling).`,
3320
+ };
3321
+ }
2699
3322
  const file = readActiveSchedulesFile(base);
2700
3323
  const existing = file.active.find((e) => e.photon === photon && e.method === method);
2701
3324
  if (existing) {
@@ -2767,7 +3390,13 @@ async function handleRequest(request, socket) {
2767
3390
  }
2768
3391
  const match = matches[0];
2769
3392
  // Disable tolerates a missing declaration so the caller can clean up
2770
- // orphan active-schedule rows after the source file is deleted.
3393
+ // orphan active-schedule rows after the source file is deleted. When
3394
+ // there's no declaration AND no caller-pinned base, treat the disable
3395
+ // as a hard guarantee and broaden the in-memory sweep + persisted-file
3396
+ // walk across every known base — the user can't be expected to know
3397
+ // which PHOTON_DIR seeded a ghost schedule.
3398
+ const orphan = !match;
3399
+ const broaden = orphan && !preferredBase;
2771
3400
  const base = path.resolve(match?.decl.workingDir ?? preferredBase ?? getDefaultContext().baseDir);
2772
3401
  const file = readActiveSchedulesFile(base);
2773
3402
  const before = file.active.length;
@@ -2792,13 +3421,15 @@ async function handleRequest(request, socket) {
2792
3421
  // flagged.
2793
3422
  const key = match?.key ?? declaredKey(photon, method, base);
2794
3423
  unscheduleJob(key);
3424
+ let filesRemoved = 0;
2795
3425
  // Defensive sweep: during upgrade transitions a daemon may still hold
2796
3426
  // timers registered under the pre-base-scoping `${photon}:${method}` key
2797
3427
  // format. Report-success-but-keep-firing is worse than a noisy sweep, so
2798
- // also drop any job matching this request but only when the job is
2799
- // scoped to THIS base (or the legacy "no base" form). Without this check,
2800
- // disabling foo:bar under base X would also stop foo:bar under base Y,
2801
- // silently breaking the other PHOTON_DIR's timer.
3428
+ // also drop any job matching this request. Restrict to `base` unless this
3429
+ // is an orphan disable without a preferred base see comment above.
3430
+ // Persisted ScheduleProvider files are unlinked here so they don't
3431
+ // resurrect on the next daemon restart; without this, disable was a
3432
+ // memory-only operation and the boot loader would replay the ghost.
2802
3433
  for (const staleKey of Array.from(scheduledJobs.keys())) {
2803
3434
  const job = scheduledJobs.get(staleKey);
2804
3435
  if (!job)
@@ -2807,17 +3438,78 @@ async function handleRequest(request, socket) {
2807
3438
  continue;
2808
3439
  if (staleKey === key)
2809
3440
  continue; // already handled above
2810
- // Only sweep legacy keys (no base prefix) and keys scoped to `base`.
2811
3441
  const jobBase = job.workingDir ? path.resolve(job.workingDir) : undefined;
2812
- if (jobBase && jobBase !== base)
3442
+ if (!broaden && jobBase && jobBase !== base)
2813
3443
  continue;
3444
+ if (job.sourceFile) {
3445
+ try {
3446
+ fs.unlinkSync(job.sourceFile);
3447
+ filesRemoved++;
3448
+ }
3449
+ catch {
3450
+ // File may have been removed concurrently
3451
+ }
3452
+ }
2814
3453
  unscheduleJob(staleKey);
2815
3454
  }
3455
+ // Even when no in-memory job matched, scan persisted ScheduleProvider
3456
+ // files. Useful for ghost schedules whose timer was already dropped
3457
+ // (e.g. by the runJob orphan check) but whose JSON file would
3458
+ // otherwise revive the ghost on the next daemon restart.
3459
+ if (orphan) {
3460
+ const basesToScan = new Set();
3461
+ basesToScan.add(base);
3462
+ if (broaden) {
3463
+ try {
3464
+ basesToScan.add(path.resolve(getDefaultContext().baseDir));
3465
+ }
3466
+ catch {
3467
+ /* ignore */
3468
+ }
3469
+ try {
3470
+ for (const b of listActiveBases())
3471
+ basesToScan.add(path.resolve(b.path));
3472
+ }
3473
+ catch {
3474
+ /* ignore */
3475
+ }
3476
+ }
3477
+ for (const baseDir of basesToScan) {
3478
+ const dir = resolveScheduleDir(photon, baseDir);
3479
+ let entries;
3480
+ try {
3481
+ entries = fs.readdirSync(dir).filter((f) => f.endsWith('.json'));
3482
+ }
3483
+ catch {
3484
+ continue;
3485
+ }
3486
+ for (const f of entries) {
3487
+ const fp = path.join(dir, f);
3488
+ try {
3489
+ const task = JSON.parse(fs.readFileSync(fp, 'utf-8'));
3490
+ if (task.method === method) {
3491
+ fs.unlinkSync(fp);
3492
+ filesRemoved++;
3493
+ }
3494
+ }
3495
+ catch {
3496
+ // Ignore malformed file or unlink race
3497
+ }
3498
+ }
3499
+ }
3500
+ }
2816
3501
  return {
2817
3502
  type: 'result',
2818
3503
  id: request.id,
2819
3504
  success: true,
2820
- data: { photon, method, base, removed: removed > 0, status: 'disabled' },
3505
+ data: {
3506
+ photon,
3507
+ method,
3508
+ base,
3509
+ removed: removed > 0 || filesRemoved > 0,
3510
+ filesRemoved,
3511
+ status: 'disabled',
3512
+ },
2821
3513
  };
2822
3514
  }
2823
3515
  // Pause: keep the enrollment record but cancel the timer.
@@ -2847,6 +3539,17 @@ async function handleRequest(request, socket) {
2847
3539
  const decl = match?.decl;
2848
3540
  const base = path.resolve(decl?.workingDir ?? preferredBase ?? getDefaultContext().baseDir);
2849
3541
  const key = match?.key ?? declaredKey(photon, method, base);
3542
+ // Host mode: pause is allowed (it stops background work, consistent with
3543
+ // the marker's intent). Resume is rejected because it would arm a timer
3544
+ // on a quiet machine.
3545
+ if (!pause && isHostDisabledBase(base)) {
3546
+ return {
3547
+ type: 'error',
3548
+ id: request.id,
3549
+ error: `Cannot resume ${photon}:${method} — base ${base} is host-disabled ` +
3550
+ `(remove ${base}/.photon-no-host to allow scheduling).`,
3551
+ };
3552
+ }
2850
3553
  const file = readActiveSchedulesFile(base);
2851
3554
  const entry = file.active.find((e) => e.photon === photon && e.method === method);
2852
3555
  if (!entry) {
@@ -4398,6 +5101,17 @@ function startupWatchPhotons() {
4398
5101
  const photonDir = getDefaultContext().baseDir;
4399
5102
  if (!fs.existsSync(photonDir))
4400
5103
  return;
5104
+ // Host mode: when the default base is host-disabled, skip the file
5105
+ // watcher, the eager onInitialize loader, and the directory watcher
5106
+ // entirely. They all activate background work that the marker is
5107
+ // supposed to suppress. The photon-paths map stays empty for this
5108
+ // base; manual `photon run` populates entries on demand.
5109
+ if (isHostDisabledBase(photonDir)) {
5110
+ logger.info('Skipping startupWatchPhotons — host-disabled default base', {
5111
+ base: photonDir,
5112
+ });
5113
+ return;
5114
+ }
4401
5115
  let entries;
4402
5116
  try {
4403
5117
  entries = fs.readdirSync(photonDir, { withFileTypes: true });
@@ -4516,7 +5230,10 @@ function startupWatchPhotons() {
4516
5230
  }
4517
5231
  function startServer() {
4518
5232
  const server = net.createServer((socket) => {
4519
- logger.info('Client connected');
5233
+ // Demoted from info to debug: scheduler healthchecks open a fresh
5234
+ // connection every minute. At info level these two lines pair up to
5235
+ // dominate the daemon log (5.6M lines / 558 MB seen in the wild).
5236
+ logger.debug('Client connected');
4520
5237
  connectedSockets.add(socket);
4521
5238
  let buffer = '';
4522
5239
  socket.on('data', (chunk) => {
@@ -4548,7 +5265,7 @@ function startServer() {
4548
5265
  })();
4549
5266
  });
4550
5267
  socket.on('end', () => {
4551
- logger.info('Client disconnected');
5268
+ logger.debug('Client disconnected');
4552
5269
  connectedSockets.delete(socket);
4553
5270
  cleanupSocketSubscriptions(socket);
4554
5271
  });
@@ -4590,6 +5307,60 @@ function startServer() {
4590
5307
  });
4591
5308
  });
4592
5309
  }
5310
+ /**
5311
+ * Scan the OS process table for any other process whose argv looks like
5312
+ * a Photon daemon for THIS exact socket path. Returns their PIDs. Used to
5313
+ * defend against the multi-daemon-on-one-socket failure mode where a
5314
+ * previous daemon survived a stop/start race (or was launched bypassing
5315
+ * the DaemonManager entirely, e.g. directly via `node server.js` from a
5316
+ * test or worktree). The owner-record check only spots siblings the
5317
+ * previous daemon successfully recorded; an argv scan finds the rest
5318
+ * regardless of which runtime (node/bun/deno) launched them.
5319
+ *
5320
+ * Match shape: argv contains `daemon/server.js` AND the literal socket
5321
+ * path. Self is filtered. Windows is skipped (named-pipe IPC has its own
5322
+ * exclusivity guarantees and `ps` isn't available).
5323
+ */
5324
+ function findImposterDaemonPids() {
5325
+ if (process.platform === 'win32')
5326
+ return [];
5327
+ const pids = [];
5328
+ try {
5329
+ const result = spawnSync('ps', ['-ax', '-o', 'pid=,args='], {
5330
+ encoding: 'utf-8',
5331
+ timeout: 3000,
5332
+ });
5333
+ if (result.status !== 0)
5334
+ return [];
5335
+ const myPid = process.pid;
5336
+ for (const rawLine of (result.stdout || '').split('\n')) {
5337
+ const line = rawLine.trim();
5338
+ if (!line)
5339
+ continue;
5340
+ const m = line.match(/^(\d+)\s+(.*)$/);
5341
+ if (!m)
5342
+ continue;
5343
+ const pid = parseInt(m[1], 10);
5344
+ const cmd = m[2];
5345
+ if (!Number.isFinite(pid) || pid === myPid)
5346
+ continue;
5347
+ if (!cmd.includes('daemon/server.js'))
5348
+ continue;
5349
+ // Match the socket path as a whitespace-bounded token so a daemon
5350
+ // for `/tmp/foo.sock` doesn't accidentally match `/tmp/foo.sock.bak`.
5351
+ if (!new RegExp(`(^|\\s)${escapeRegExp(socketPath)}(\\s|$)`).test(cmd))
5352
+ continue;
5353
+ pids.push(pid);
5354
+ }
5355
+ }
5356
+ catch (err) {
5357
+ logger.warn('Failed to scan for imposter daemons', { error: getErrorMessage(err) });
5358
+ }
5359
+ return pids;
5360
+ }
5361
+ function escapeRegExp(s) {
5362
+ return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
5363
+ }
4593
5364
  async function claimExclusiveOwnership() {
4594
5365
  const owner = readOwnerRecord(ownerFile);
4595
5366
  if (owner && owner.socketPath === socketPath && owner.pid !== process.pid) {
@@ -4626,6 +5397,46 @@ async function claimExclusiveOwnership() {
4626
5397
  }
4627
5398
  removeOwnerRecord(ownerFile);
4628
5399
  }
5400
+ // Belt-and-suspenders: even after the owner-record path, scan the OS
5401
+ // process table for any other process running `daemon/server.js` against
5402
+ // this same socket. Catches imposters that bypassed DaemonManager (direct
5403
+ // `node server.js` invocations from tests/worktrees, leftover daemons whose
5404
+ // owner record was wiped, daemons started by a different runtime than the
5405
+ // one currently bound).
5406
+ const imposters = findImposterDaemonPids();
5407
+ if (imposters.length > 0) {
5408
+ logger.warn('Imposter daemon(s) detected via argv scan', {
5409
+ socketPath,
5410
+ currentPid: process.pid,
5411
+ imposterPids: imposters,
5412
+ });
5413
+ for (const pid of imposters) {
5414
+ try {
5415
+ process.kill(pid, 'SIGTERM');
5416
+ }
5417
+ catch {
5418
+ // Already gone
5419
+ }
5420
+ }
5421
+ // Wait for graceful exit, then escalate to SIGKILL on holdouts.
5422
+ const deadline = Date.now() + 5000;
5423
+ while (Date.now() < deadline) {
5424
+ if (imposters.every((pid) => !isPidAlive(pid)))
5425
+ break;
5426
+ await new Promise((r) => setTimeout(r, 100));
5427
+ }
5428
+ for (const pid of imposters) {
5429
+ if (!isPidAlive(pid))
5430
+ continue;
5431
+ logger.warn('Imposter daemon ignored SIGTERM, escalating to SIGKILL', { pid });
5432
+ try {
5433
+ process.kill(pid, 'SIGKILL');
5434
+ }
5435
+ catch {
5436
+ /* ignore */
5437
+ }
5438
+ }
5439
+ }
4629
5440
  if (process.platform !== 'win32' && fs.existsSync(socketPath)) {
4630
5441
  const responsive = await isSocketResponsive(socketPath);
4631
5442
  if (!responsive) {
@@ -4779,6 +5590,16 @@ function shutdown() {
4779
5590
  // Main execution
4780
5591
  void (async () => {
4781
5592
  await claimExclusiveOwnership();
5593
+ // Register in-process adapters BEFORE the loader can be invoked, so
5594
+ // any photon code path that goes through schedule.cancel() while
5595
+ // running inside the daemon evicts directly instead of round-tripping
5596
+ // through our own Unix socket (which fails during recovery windows).
5597
+ const { registerInProcessAdapters } = await import('./in-process-bridge.js');
5598
+ registerInProcessAdapters({
5599
+ unscheduleJob: async (_photonName, jobId) => {
5600
+ return evictScheduledJobByRawId(jobId);
5601
+ },
5602
+ });
4782
5603
  startupWatchPhotons();
4783
5604
  startServer();
4784
5605
  migrateLegacyIpcSchedules();