quadwork 1.3.0 → 1.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +189 -82
- package/out/404.html +1 -1
- package/out/__next.__PAGE__.txt +3 -3
- package/out/__next._full.txt +12 -12
- package/out/__next._head.txt +4 -4
- package/out/__next._index.txt +6 -6
- package/out/__next._tree.txt +2 -2
- package/out/_next/static/chunks/006g3lco-9xqf.js +1 -0
- package/out/_next/static/chunks/035rt-n0oid7d.js +1 -0
- package/out/_next/static/chunks/{0e~ue9ca5zrep.js → 05ok82hwk0x-c.js} +1 -1
- package/out/_next/static/chunks/0u~7e4fgf-u06.css +2 -0
- package/out/_next/static/chunks/{0swlbn4q4u71z.js → 0zqyw6q.jp~1i.js} +14 -14
- package/out/_next/static/chunks/17y2walb2um9w.js +1 -0
- package/out/_next/static/chunks/{134b1p_egmf1c.js → 18cmux34jwe.p.js} +1 -1
- package/out/_not-found/__next._full.txt +11 -11
- package/out/_not-found/__next._head.txt +4 -4
- package/out/_not-found/__next._index.txt +6 -6
- package/out/_not-found/__next._not-found.__PAGE__.txt +2 -2
- package/out/_not-found/__next._not-found.txt +3 -3
- package/out/_not-found/__next._tree.txt +2 -2
- package/out/_not-found.html +1 -1
- package/out/_not-found.txt +11 -11
- package/out/app-shell/__next._full.txt +11 -11
- package/out/app-shell/__next._head.txt +4 -4
- package/out/app-shell/__next._index.txt +6 -6
- package/out/app-shell/__next._tree.txt +2 -2
- package/out/app-shell/__next.app-shell.__PAGE__.txt +2 -2
- package/out/app-shell/__next.app-shell.txt +3 -3
- package/out/app-shell.html +1 -1
- package/out/app-shell.txt +11 -11
- package/out/index.html +1 -1
- package/out/index.txt +12 -12
- package/out/project/_/__next._full.txt +12 -12
- package/out/project/_/__next._head.txt +4 -4
- package/out/project/_/__next._index.txt +6 -6
- package/out/project/_/__next._tree.txt +2 -2
- package/out/project/_/__next.project.$d$id.__PAGE__.txt +3 -3
- package/out/project/_/__next.project.$d$id.txt +3 -3
- package/out/project/_/__next.project.txt +3 -3
- package/out/project/_/memory/__next._full.txt +12 -12
- package/out/project/_/memory/__next._head.txt +4 -4
- package/out/project/_/memory/__next._index.txt +6 -6
- package/out/project/_/memory/__next._tree.txt +2 -2
- package/out/project/_/memory/__next.project.$d$id.memory.__PAGE__.txt +3 -3
- package/out/project/_/memory/__next.project.$d$id.memory.txt +3 -3
- package/out/project/_/memory/__next.project.$d$id.txt +3 -3
- package/out/project/_/memory/__next.project.txt +3 -3
- package/out/project/_/memory.html +1 -1
- package/out/project/_/memory.txt +12 -12
- package/out/project/_/queue/__next._full.txt +12 -12
- package/out/project/_/queue/__next._head.txt +4 -4
- package/out/project/_/queue/__next._index.txt +6 -6
- package/out/project/_/queue/__next._tree.txt +2 -2
- package/out/project/_/queue/__next.project.$d$id.queue.__PAGE__.txt +3 -3
- package/out/project/_/queue/__next.project.$d$id.queue.txt +3 -3
- package/out/project/_/queue/__next.project.$d$id.txt +3 -3
- package/out/project/_/queue/__next.project.txt +3 -3
- package/out/project/_/queue.html +1 -1
- package/out/project/_/queue.txt +12 -12
- package/out/project/_.html +1 -1
- package/out/project/_.txt +12 -12
- package/out/settings/__next._full.txt +12 -12
- package/out/settings/__next._head.txt +4 -4
- package/out/settings/__next._index.txt +6 -6
- package/out/settings/__next._tree.txt +2 -2
- package/out/settings/__next.settings.__PAGE__.txt +3 -3
- package/out/settings/__next.settings.txt +3 -3
- package/out/settings.html +1 -1
- package/out/settings.txt +12 -12
- package/out/setup/__next._full.txt +12 -12
- package/out/setup/__next._head.txt +4 -4
- package/out/setup/__next._index.txt +6 -6
- package/out/setup/__next._tree.txt +2 -2
- package/out/setup/__next.setup.__PAGE__.txt +3 -3
- package/out/setup/__next.setup.txt +3 -3
- package/out/setup.html +1 -1
- package/out/setup.txt +12 -12
- package/package.json +5 -2
- package/server/index.js +248 -12
- package/server/routes.js +364 -10
- package/out/_next/static/chunks/06mbme.sc_26-.css +0 -2
- package/out/_next/static/chunks/0caq73v0knw_w.js +0 -1
- package/out/_next/static/chunks/0md7hgvwnovzq.js +0 -1
- package/out/_next/static/chunks/0omuxbg.tg-il.js +0 -1
- /package/out/_next/static/{na3L7KeOGKGsbamYVibRj → 6uvV3nUfwr_t_JKrZJSP8}/_buildManifest.js +0 -0
- /package/out/_next/static/{na3L7KeOGKGsbamYVibRj → 6uvV3nUfwr_t_JKrZJSP8}/_clientMiddlewareManifest.js +0 -0
- /package/out/_next/static/{na3L7KeOGKGsbamYVibRj → 6uvV3nUfwr_t_JKrZJSP8}/_ssgManifest.js +0 -0
package/server/index.js
CHANGED
|
@@ -664,6 +664,55 @@ app.get("/api/agents", (_req, res) => {
|
|
|
664
664
|
res.json(agents);
|
|
665
665
|
});
|
|
666
666
|
|
|
667
|
+
// #424 / quadwork#304: best-effort auto-snapshot of chat history
|
|
668
|
+
// before any AgentChattr restart. Defense-in-depth against
|
|
669
|
+
// destructive ops like /clear that rewrite AC's JSONL log in place
|
|
670
|
+
// — per #303 the log itself IS persistent across normal restarts,
|
|
671
|
+
// so the snapshot's job is to give the operator a point-in-time
|
|
672
|
+
// rollback if the log gets clobbered, not to prevent history loss
|
|
673
|
+
// on ordinary lifecycle events.
|
|
674
|
+
//
|
|
675
|
+
// Snapshot contents = the same envelope GET /api/project-history
|
|
676
|
+
// returns, so an operator (or a future "restore" button) can feed
|
|
677
|
+
// the file straight into POST /api/project-history for replay.
|
|
678
|
+
const HISTORY_SNAPSHOT_LIMIT = 5;
|
|
679
|
+
|
|
680
|
+
async function snapshotProjectHistory(projectId) {
|
|
681
|
+
try {
|
|
682
|
+
const snapDir = path.join(require("os").homedir(), ".quadwork", projectId, "history-snapshots");
|
|
683
|
+
if (!fs.existsSync(snapDir)) fs.mkdirSync(snapDir, { recursive: true });
|
|
684
|
+
const res = await fetch(`http://127.0.0.1:${PORT}/api/project-history?project=${encodeURIComponent(projectId)}`, {
|
|
685
|
+
signal: AbortSignal.timeout(30000),
|
|
686
|
+
});
|
|
687
|
+
if (!res.ok) {
|
|
688
|
+
console.warn(`[snapshot] ${projectId} history fetch returned ${res.status}; skipping snapshot`);
|
|
689
|
+
return false;
|
|
690
|
+
}
|
|
691
|
+
const text = await res.text();
|
|
692
|
+
const stamp = new Date().toISOString().replace(/[:.]/g, "-");
|
|
693
|
+
const outPath = path.join(snapDir, `${stamp}.json`);
|
|
694
|
+
fs.writeFileSync(outPath, text);
|
|
695
|
+
console.log(`[snapshot] ${projectId} → ${outPath}`);
|
|
696
|
+
// Prune to the newest HISTORY_SNAPSHOT_LIMIT files so the
|
|
697
|
+
// directory can't grow unbounded across weeks of restarts.
|
|
698
|
+
try {
|
|
699
|
+
const entries = fs.readdirSync(snapDir)
|
|
700
|
+
.filter((f) => f.endsWith(".json"))
|
|
701
|
+
.map((f) => ({ f, t: fs.statSync(path.join(snapDir, f)).mtimeMs }))
|
|
702
|
+
.sort((a, b) => b.t - a.t);
|
|
703
|
+
for (const old of entries.slice(HISTORY_SNAPSHOT_LIMIT)) {
|
|
704
|
+
try { fs.unlinkSync(path.join(snapDir, old.f)); } catch {}
|
|
705
|
+
}
|
|
706
|
+
} catch {
|
|
707
|
+
// non-fatal — stale snapshots just linger
|
|
708
|
+
}
|
|
709
|
+
return true;
|
|
710
|
+
} catch (err) {
|
|
711
|
+
console.warn(`[snapshot] ${projectId} snapshot failed: ${err.message || err}`);
|
|
712
|
+
return false;
|
|
713
|
+
}
|
|
714
|
+
}
|
|
715
|
+
|
|
667
716
|
// Per-project AgentChattr lifecycle: /api/agentchattr/:project/:action
|
|
668
717
|
// Backward compat: /api/agentchattr/:action uses first project
|
|
669
718
|
async function handleAgentChattr(req, res) {
|
|
@@ -785,6 +834,18 @@ async function handleAgentChattr(req, res) {
|
|
|
785
834
|
setProc({ process: null, state: "stopped", error: null });
|
|
786
835
|
res.json({ ok: true, state: "stopped" });
|
|
787
836
|
} else if (action === "restart") {
|
|
837
|
+
// #424 / quadwork#304: snapshot history before killing the
|
|
838
|
+
// process. Best-effort and non-blocking-on-failure so a flaky
|
|
839
|
+
// snapshot doesn't leave the operator unable to restart AC.
|
|
840
|
+
await snapshotProjectHistory(projectId).catch(() => {});
|
|
841
|
+
// #424 / quadwork#304 Phase 3: latch the opt-in BEFORE the
|
|
842
|
+
// spawn so a restart that itself clears the flag can't starve
|
|
843
|
+
// the auto-restore. We capture the snapshot filename we just
|
|
844
|
+
// wrote + the project's auto_restore_after_restart flag and
|
|
845
|
+
// replay it in the post-spawn tick below if both are set.
|
|
846
|
+
const preRestartCfg = readConfig();
|
|
847
|
+
const preRestartProject = preRestartCfg.projects?.find((p) => p.id === projectId);
|
|
848
|
+
const shouldAutoRestore = !!(preRestartProject && preRestartProject.auto_restore_after_restart);
|
|
788
849
|
const proc = getProc();
|
|
789
850
|
if (proc.process) {
|
|
790
851
|
try { proc.process.kill("SIGTERM"); } catch {}
|
|
@@ -799,6 +860,30 @@ async function handleAgentChattr(req, res) {
|
|
|
799
860
|
}
|
|
800
861
|
// Sync token after AgentChattr restarts
|
|
801
862
|
setTimeout(() => syncChattrToken(projectId), 2000);
|
|
863
|
+
// #424 / quadwork#304 Phase 3: optional auto-restore.
|
|
864
|
+
// Fire the restore 3s after spawn so AC's ws is ready.
|
|
865
|
+
// Best-effort: never blocks the restart response or
|
|
866
|
+
// rolls back on error.
|
|
867
|
+
if (shouldAutoRestore) {
|
|
868
|
+
setTimeout(async () => {
|
|
869
|
+
try {
|
|
870
|
+
const snapDir = path.join(require("os").homedir(), ".quadwork", projectId, "history-snapshots");
|
|
871
|
+
if (!fs.existsSync(snapDir)) return;
|
|
872
|
+
const newest = fs.readdirSync(snapDir)
|
|
873
|
+
.filter((f) => f.endsWith(".json"))
|
|
874
|
+
.map((f) => ({ f, t: fs.statSync(path.join(snapDir, f)).mtimeMs }))
|
|
875
|
+
.sort((a, b) => b.t - a.t)[0];
|
|
876
|
+
if (!newest) return;
|
|
877
|
+
const r = await fetch(`http://127.0.0.1:${PORT}/api/project-history/restore?project=${encodeURIComponent(projectId)}&name=${encodeURIComponent(newest.f)}`, {
|
|
878
|
+
method: "POST",
|
|
879
|
+
});
|
|
880
|
+
if (r.ok) console.log(`[snapshot] ${projectId} auto-restored ${newest.f}`);
|
|
881
|
+
else console.warn(`[snapshot] ${projectId} auto-restore returned ${r.status}`);
|
|
882
|
+
} catch (err) {
|
|
883
|
+
console.warn(`[snapshot] ${projectId} auto-restore failed: ${err.message || err}`);
|
|
884
|
+
}
|
|
885
|
+
}, 3000);
|
|
886
|
+
}
|
|
802
887
|
res.json({ ok: true, state: "running", pid: child.pid });
|
|
803
888
|
} catch (err) {
|
|
804
889
|
setProc({ process: null, state: "error", error: err.message });
|
|
@@ -814,7 +899,16 @@ async function handleAgentChattr(req, res) {
|
|
|
814
899
|
try {
|
|
815
900
|
const { execSync } = require("child_process");
|
|
816
901
|
|
|
817
|
-
// Stop running process before pulling
|
|
902
|
+
// Stop running process before pulling. Snapshot first so a
|
|
903
|
+
// botched git pull can still be rolled back from disk.
|
|
904
|
+
// #424 / quadwork#304: best-effort.
|
|
905
|
+
await snapshotProjectHistory(projectId).catch(() => {});
|
|
906
|
+
// Latch the auto-restore opt-in BEFORE stop, same as the
|
|
907
|
+
// explicit restart branch above — a config mutation during
|
|
908
|
+
// the git pull shouldn't starve the replay.
|
|
909
|
+
const updateCfgPre = readConfig();
|
|
910
|
+
const updateProjectPre = updateCfgPre.projects?.find((p) => p.id === projectId);
|
|
911
|
+
const updateShouldAutoRestore = !!(updateProjectPre && updateProjectPre.auto_restore_after_restart);
|
|
818
912
|
const proc = getProc();
|
|
819
913
|
const wasRunning = proc.process && proc.state === "running";
|
|
820
914
|
if (wasRunning) {
|
|
@@ -839,6 +933,30 @@ async function handleAgentChattr(req, res) {
|
|
|
839
933
|
restarted = !!child;
|
|
840
934
|
if (child) {
|
|
841
935
|
setTimeout(() => syncChattrToken(projectId).catch(() => {}), 2000);
|
|
936
|
+
// #424 / quadwork#304 Phase 3: auto-restore after an
|
|
937
|
+
// update-triggered restart too (t2a re-review). Same
|
|
938
|
+
//3s wait + newest-snapshot-by-mtime path as the explicit
|
|
939
|
+
// restart branch, using the pre-stop latched opt-in.
|
|
940
|
+
if (updateShouldAutoRestore) {
|
|
941
|
+
setTimeout(async () => {
|
|
942
|
+
try {
|
|
943
|
+
const snapDir = path.join(require("os").homedir(), ".quadwork", projectId, "history-snapshots");
|
|
944
|
+
if (!fs.existsSync(snapDir)) return;
|
|
945
|
+
const newest = fs.readdirSync(snapDir)
|
|
946
|
+
.filter((f) => f.endsWith(".json"))
|
|
947
|
+
.map((f) => ({ f, t: fs.statSync(path.join(snapDir, f)).mtimeMs }))
|
|
948
|
+
.sort((a, b) => b.t - a.t)[0];
|
|
949
|
+
if (!newest) return;
|
|
950
|
+
const r = await fetch(`http://127.0.0.1:${PORT}/api/project-history/restore?project=${encodeURIComponent(projectId)}&name=${encodeURIComponent(newest.f)}`, {
|
|
951
|
+
method: "POST",
|
|
952
|
+
});
|
|
953
|
+
if (r.ok) console.log(`[snapshot] ${projectId} auto-restored ${newest.f} after update`);
|
|
954
|
+
else console.warn(`[snapshot] ${projectId} post-update auto-restore returned ${r.status}`);
|
|
955
|
+
} catch (err) {
|
|
956
|
+
console.warn(`[snapshot] ${projectId} post-update auto-restore failed: ${err.message || err}`);
|
|
957
|
+
}
|
|
958
|
+
}, 3000);
|
|
959
|
+
}
|
|
842
960
|
}
|
|
843
961
|
}
|
|
844
962
|
|
|
@@ -1086,7 +1204,12 @@ function stopTrigger(project) {
|
|
|
1086
1204
|
|
|
1087
1205
|
app.post("/api/triggers/:project/start", (req, res) => {
|
|
1088
1206
|
const { project } = req.params;
|
|
1089
|
-
|
|
1207
|
+
// #418 / quadwork#306: sendImmediately was an always-true
|
|
1208
|
+
// "Send Message and Start Trigger" flag from #210; operators
|
|
1209
|
+
// asked for a pure scheduler ("Start Trigger" — wait for the
|
|
1210
|
+
// first interval). The field is ignored here; the send-now
|
|
1211
|
+
// endpoint below still exists for the explicit one-shot path.
|
|
1212
|
+
const { interval, duration, message } = req.body || {};
|
|
1090
1213
|
const ms = (interval || 30) * 60 * 1000;
|
|
1091
1214
|
const durationMs = duration ? duration * 60 * 1000 : 0; // duration in minutes, 0 = indefinite
|
|
1092
1215
|
|
|
@@ -1113,16 +1236,12 @@ app.post("/api/triggers/:project/start", (req, res) => {
|
|
|
1113
1236
|
if (existing.durationTimer) clearTimeout(existing.durationTimer);
|
|
1114
1237
|
}
|
|
1115
1238
|
|
|
1116
|
-
// #
|
|
1117
|
-
//
|
|
1118
|
-
//
|
|
1119
|
-
//
|
|
1120
|
-
|
|
1121
|
-
|
|
1122
|
-
// its own errors and updates lastError on the trigger info.
|
|
1123
|
-
sendTriggerMessage(project).catch(() => {});
|
|
1124
|
-
}
|
|
1125
|
-
|
|
1239
|
+
// #418 / quadwork#306: no immediate fire — the first send happens
|
|
1240
|
+
// at T + interval via the setInterval below. Operators set the
|
|
1241
|
+
// trigger up in advance of going afk and don't want it interrupting
|
|
1242
|
+
// whatever agents are currently mid-task. The explicit "send now"
|
|
1243
|
+
// path still lives at /api/triggers/:project/send-now for the
|
|
1244
|
+
// rare case an operator actually wants to kick things off.
|
|
1126
1245
|
const timer = setInterval(() => sendTriggerMessage(project), ms);
|
|
1127
1246
|
const expiresAt = durationMs > 0 ? Date.now() + durationMs : null;
|
|
1128
1247
|
|
|
@@ -1390,6 +1509,123 @@ function syncTriggersFromConfig() {
|
|
|
1390
1509
|
}
|
|
1391
1510
|
}
|
|
1392
1511
|
|
|
1512
|
+
// #422 / quadwork#310: auto-continue after loop guard.
|
|
1513
|
+
//
|
|
1514
|
+
// Per opted-in project, poll AC's /api/status every 10s. When we see
|
|
1515
|
+
// a false → true transition on `paused`, wait the configured delay
|
|
1516
|
+
// (default 30s) and POST /continue to /api/chat — same path the
|
|
1517
|
+
// operator would use manually. The delay gives a human a chance to
|
|
1518
|
+
// intervene on an actually-runaway loop, and acts as a soft rate
|
|
1519
|
+
// limit against pathological loops that would otherwise just loop
|
|
1520
|
+
// forever under an auto-continue.
|
|
1521
|
+
//
|
|
1522
|
+
// Detection is deliberately polling rather than a long-lived ws:
|
|
1523
|
+
// a ws subscription per project would complicate lifecycle and
|
|
1524
|
+
// reconnection, and 10s polling latency is acceptable when the
|
|
1525
|
+
// delay is tens of seconds. Skipping projects without the opt-in
|
|
1526
|
+
// keeps the poller cheap for single-project setups.
|
|
1527
|
+
|
|
1528
|
+
const _loopGuardPausedState = new Map(); // projectId -> { paused: bool, scheduled: Timeout? }
|
|
1529
|
+
const LOOP_GUARD_POLL_INTERVAL_MS = 10000;
|
|
1530
|
+
|
|
1531
|
+
async function checkLoopGuardPause(project) {
|
|
1532
|
+
if (!project || !project.auto_continue_loop_guard) return;
|
|
1533
|
+
const { url: base, token: sessionToken } = resolveProjectChattr(project.id);
|
|
1534
|
+
if (!base) return;
|
|
1535
|
+
let paused = false;
|
|
1536
|
+
try {
|
|
1537
|
+
const r = await fetch(`${base}/api/status`, {
|
|
1538
|
+
headers: sessionToken ? { "x-session-token": sessionToken } : {},
|
|
1539
|
+
signal: AbortSignal.timeout(5000),
|
|
1540
|
+
});
|
|
1541
|
+
if (!r.ok) return;
|
|
1542
|
+
const data = await r.json();
|
|
1543
|
+
paused = !!(data && data.paused);
|
|
1544
|
+
} catch {
|
|
1545
|
+
return;
|
|
1546
|
+
}
|
|
1547
|
+
const state = _loopGuardPausedState.get(project.id) || { paused: false, scheduled: null };
|
|
1548
|
+
// Transition false → true: schedule an auto-continue after the delay.
|
|
1549
|
+
if (paused && !state.paused && !state.scheduled) {
|
|
1550
|
+
const delaySec = Number.isFinite(project.auto_continue_delay_sec) && project.auto_continue_delay_sec >= 5
|
|
1551
|
+
? project.auto_continue_delay_sec
|
|
1552
|
+
: 30;
|
|
1553
|
+
console.log(`[loop-guard] ${project.id} paused — auto-continue in ${delaySec}s`);
|
|
1554
|
+
state.scheduled = setTimeout(async () => {
|
|
1555
|
+
try {
|
|
1556
|
+
// Re-check the opt-in at fire time so a checkbox disable
|
|
1557
|
+
// mid-wait actually stops the auto-continue.
|
|
1558
|
+
const freshCfg = readConfig();
|
|
1559
|
+
const fresh = freshCfg.projects?.find((p) => p.id === project.id);
|
|
1560
|
+
if (!fresh || !fresh.auto_continue_loop_guard) {
|
|
1561
|
+
console.log(`[loop-guard] ${project.id} auto-continue cancelled (opt-in disabled during wait)`);
|
|
1562
|
+
} else {
|
|
1563
|
+
// Re-check the router's pause state at fire time too. The
|
|
1564
|
+
// 10s status poller may not have seen a manual operator
|
|
1565
|
+
// /continue yet when the delay window (5–9s) is shorter
|
|
1566
|
+
// than the poll interval — without this, a manual resume
|
|
1567
|
+
// inside a 5s wait would be followed by a stale auto
|
|
1568
|
+
// /continue that clobbers hop_count on an already-running
|
|
1569
|
+
// chain (router.continue_routing resets the counter
|
|
1570
|
+
// unconditionally). The re-check closes the race.
|
|
1571
|
+
let stillPaused = false;
|
|
1572
|
+
try {
|
|
1573
|
+
const { url: freshBase, token: freshToken } = resolveProjectChattr(project.id);
|
|
1574
|
+
if (freshBase) {
|
|
1575
|
+
const sr = await fetch(`${freshBase}/api/status`, {
|
|
1576
|
+
headers: freshToken ? { "x-session-token": freshToken } : {},
|
|
1577
|
+
signal: AbortSignal.timeout(5000),
|
|
1578
|
+
});
|
|
1579
|
+
if (sr.ok) {
|
|
1580
|
+
const sd = await sr.json();
|
|
1581
|
+
stillPaused = !!(sd && sd.paused);
|
|
1582
|
+
}
|
|
1583
|
+
}
|
|
1584
|
+
} catch {
|
|
1585
|
+
// Status re-check failed — fall back to "don't fire".
|
|
1586
|
+
// Stuck pause will still be caught on the next 10s tick.
|
|
1587
|
+
}
|
|
1588
|
+
if (!stillPaused) {
|
|
1589
|
+
console.log(`[loop-guard] ${project.id} auto-continue cancelled (router already resumed)`);
|
|
1590
|
+
} else {
|
|
1591
|
+
const res = await fetch(`http://127.0.0.1:${PORT}/api/chat?project=${encodeURIComponent(project.id)}`, {
|
|
1592
|
+
method: "POST",
|
|
1593
|
+
headers: { "Content-Type": "application/json" },
|
|
1594
|
+
body: JSON.stringify({ text: "/continue", channel: "general" }),
|
|
1595
|
+
});
|
|
1596
|
+
if (res.ok) console.log(`[loop-guard] ${project.id} auto-continued`);
|
|
1597
|
+
else console.warn(`[loop-guard] ${project.id} auto-continue POST returned ${res.status}`);
|
|
1598
|
+
}
|
|
1599
|
+
}
|
|
1600
|
+
} catch (err) {
|
|
1601
|
+
console.warn(`[loop-guard] ${project.id} auto-continue failed: ${err.message || err}`);
|
|
1602
|
+
}
|
|
1603
|
+
const s2 = _loopGuardPausedState.get(project.id);
|
|
1604
|
+
if (s2) s2.scheduled = null;
|
|
1605
|
+
}, delaySec * 1000);
|
|
1606
|
+
}
|
|
1607
|
+
// Transition true → false: clear any pending timer.
|
|
1608
|
+
if (!paused && state.paused && state.scheduled) {
|
|
1609
|
+
clearTimeout(state.scheduled);
|
|
1610
|
+
state.scheduled = null;
|
|
1611
|
+
}
|
|
1612
|
+
state.paused = paused;
|
|
1613
|
+
_loopGuardPausedState.set(project.id, state);
|
|
1614
|
+
}
|
|
1615
|
+
|
|
1616
|
+
function runLoopGuardPollingTick() {
|
|
1617
|
+
try {
|
|
1618
|
+
const cfg = readConfig();
|
|
1619
|
+
for (const p of (cfg.projects || [])) {
|
|
1620
|
+
if (p && p.auto_continue_loop_guard) checkLoopGuardPause(p);
|
|
1621
|
+
}
|
|
1622
|
+
} catch {
|
|
1623
|
+
// config unreadable — next tick will retry
|
|
1624
|
+
}
|
|
1625
|
+
}
|
|
1626
|
+
|
|
1627
|
+
setInterval(runLoopGuardPollingTick, LOOP_GUARD_POLL_INTERVAL_MS);
|
|
1628
|
+
|
|
1393
1629
|
// --- Start ---
|
|
1394
1630
|
|
|
1395
1631
|
server.listen(PORT, "127.0.0.1", () => {
|