quadwork 1.2.5 → 1.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (98) hide show
  1. package/README.md +189 -82
  2. package/bin/quadwork.js +102 -0
  3. package/out/404.html +1 -1
  4. package/out/__next.__PAGE__.txt +3 -3
  5. package/out/__next._full.txt +12 -12
  6. package/out/__next._head.txt +4 -4
  7. package/out/__next._index.txt +6 -6
  8. package/out/__next._tree.txt +2 -2
  9. package/out/_next/static/chunks/006g3lco-9xqf.js +1 -0
  10. package/out/_next/static/chunks/035rt-n0oid7d.js +1 -0
  11. package/out/_next/static/chunks/{0e~ue9ca5zrep.js → 05ok82hwk0x-c.js} +1 -1
  12. package/out/_next/static/chunks/0u~7e4fgf-u06.css +2 -0
  13. package/out/_next/static/chunks/0zqyw6q.jp~1i.js +26 -0
  14. package/out/_next/static/chunks/17y2walb2um9w.js +1 -0
  15. package/out/_next/static/chunks/{16ell.n1p8o7d.js → 18cmux34jwe.p.js} +1 -1
  16. package/out/_not-found/__next._full.txt +11 -11
  17. package/out/_not-found/__next._head.txt +4 -4
  18. package/out/_not-found/__next._index.txt +6 -6
  19. package/out/_not-found/__next._not-found.__PAGE__.txt +2 -2
  20. package/out/_not-found/__next._not-found.txt +3 -3
  21. package/out/_not-found/__next._tree.txt +2 -2
  22. package/out/_not-found.html +1 -1
  23. package/out/_not-found.txt +11 -11
  24. package/out/app-shell/__next._full.txt +11 -11
  25. package/out/app-shell/__next._head.txt +4 -4
  26. package/out/app-shell/__next._index.txt +6 -6
  27. package/out/app-shell/__next._tree.txt +2 -2
  28. package/out/app-shell/__next.app-shell.__PAGE__.txt +2 -2
  29. package/out/app-shell/__next.app-shell.txt +3 -3
  30. package/out/app-shell.html +1 -1
  31. package/out/app-shell.txt +11 -11
  32. package/out/index.html +1 -1
  33. package/out/index.txt +12 -12
  34. package/out/project/_/__next._full.txt +12 -12
  35. package/out/project/_/__next._head.txt +4 -4
  36. package/out/project/_/__next._index.txt +6 -6
  37. package/out/project/_/__next._tree.txt +2 -2
  38. package/out/project/_/__next.project.$d$id.__PAGE__.txt +3 -3
  39. package/out/project/_/__next.project.$d$id.txt +3 -3
  40. package/out/project/_/__next.project.txt +3 -3
  41. package/out/project/_/memory/__next._full.txt +12 -12
  42. package/out/project/_/memory/__next._head.txt +4 -4
  43. package/out/project/_/memory/__next._index.txt +6 -6
  44. package/out/project/_/memory/__next._tree.txt +2 -2
  45. package/out/project/_/memory/__next.project.$d$id.memory.__PAGE__.txt +3 -3
  46. package/out/project/_/memory/__next.project.$d$id.memory.txt +3 -3
  47. package/out/project/_/memory/__next.project.$d$id.txt +3 -3
  48. package/out/project/_/memory/__next.project.txt +3 -3
  49. package/out/project/_/memory.html +1 -1
  50. package/out/project/_/memory.txt +12 -12
  51. package/out/project/_/queue/__next._full.txt +12 -12
  52. package/out/project/_/queue/__next._head.txt +4 -4
  53. package/out/project/_/queue/__next._index.txt +6 -6
  54. package/out/project/_/queue/__next._tree.txt +2 -2
  55. package/out/project/_/queue/__next.project.$d$id.queue.__PAGE__.txt +3 -3
  56. package/out/project/_/queue/__next.project.$d$id.queue.txt +3 -3
  57. package/out/project/_/queue/__next.project.$d$id.txt +3 -3
  58. package/out/project/_/queue/__next.project.txt +3 -3
  59. package/out/project/_/queue.html +1 -1
  60. package/out/project/_/queue.txt +12 -12
  61. package/out/project/_.html +1 -1
  62. package/out/project/_.txt +12 -12
  63. package/out/settings/__next._full.txt +12 -12
  64. package/out/settings/__next._head.txt +4 -4
  65. package/out/settings/__next._index.txt +6 -6
  66. package/out/settings/__next._tree.txt +2 -2
  67. package/out/settings/__next.settings.__PAGE__.txt +3 -3
  68. package/out/settings/__next.settings.txt +3 -3
  69. package/out/settings.html +1 -1
  70. package/out/settings.txt +12 -12
  71. package/out/setup/__next._full.txt +12 -12
  72. package/out/setup/__next._head.txt +4 -4
  73. package/out/setup/__next._index.txt +6 -6
  74. package/out/setup/__next._tree.txt +2 -2
  75. package/out/setup/__next.setup.__PAGE__.txt +3 -3
  76. package/out/setup/__next.setup.txt +3 -3
  77. package/out/setup.html +1 -1
  78. package/out/setup.txt +12 -12
  79. package/out/sounds/alert-tone.mp3 +0 -0
  80. package/out/sounds/click.mp3 +0 -0
  81. package/out/sounds/pluck.mp3 +0 -0
  82. package/out/sounds/soft-chime.mp3 +0 -0
  83. package/out/sounds/warm-bell.mp3 +0 -0
  84. package/package.json +5 -2
  85. package/server/config.js +43 -1
  86. package/server/index.js +268 -21
  87. package/server/routes.js +1103 -7
  88. package/templates/CLAUDE.md +1 -0
  89. package/templates/OVERNIGHT-QUEUE.md +1 -1
  90. package/templates/seeds/head.AGENTS.md +21 -2
  91. package/out/_next/static/chunks/0-yus965h3bk_.js +0 -24
  92. package/out/_next/static/chunks/0caq73v0knw_w.js +0 -1
  93. package/out/_next/static/chunks/0d.f~y5jeh785.css +0 -2
  94. package/out/_next/static/chunks/0md7hgvwnovzq.js +0 -1
  95. package/out/_next/static/chunks/0spbjcw4anq15.js +0 -1
  96. /package/out/_next/static/{Cpy01wZHv0vXd_j_HlrSf → 6uvV3nUfwr_t_JKrZJSP8}/_buildManifest.js +0 -0
  97. /package/out/_next/static/{Cpy01wZHv0vXd_j_HlrSf → 6uvV3nUfwr_t_JKrZJSP8}/_clientMiddlewareManifest.js +0 -0
  98. /package/out/_next/static/{Cpy01wZHv0vXd_j_HlrSf → 6uvV3nUfwr_t_JKrZJSP8}/_ssgManifest.js +0 -0
package/server/routes.js CHANGED
@@ -43,7 +43,17 @@ function writeConfigFile(cfg) {
43
43
  router.get("/api/config", (_req, res) => {
44
44
  try {
45
45
  const raw = fs.readFileSync(CONFIG_PATH, "utf-8");
46
- res.json(JSON.parse(raw));
46
+ const parsed = JSON.parse(raw);
47
+ // #409 / quadwork#273: overlay the sanitized operator_name so
48
+ // the chat panel's self-message filter compares against the same
49
+ // sender /api/chat actually stamps. The on-disk file keeps the
50
+ // raw value the operator typed (so a future feature can show
51
+ // both raw + effective), but every reader sees the effective
52
+ // value here — including SettingsPage, which now reflects what
53
+ // chat actually sends. This also makes a hand-edited file with
54
+ // garbage characters self-correct visibly on next reload.
55
+ parsed.operator_name = sanitizeOperatorName(parsed.operator_name);
56
+ res.json(parsed);
47
57
  } catch (err) {
48
58
  if (err.code === "ENOENT") return res.json(DEFAULT_CONFIG);
49
59
  res.status(500).json({ error: "Failed to read config", detail: err.message });
@@ -68,7 +78,7 @@ router.put("/api/config", (req, res) => {
68
78
 
69
79
  // ─── Chat (AgentChattr proxy) ──────────────────────────────────────────────
70
80
 
71
- const { resolveProjectChattr } = require("./config");
81
+ const { resolveProjectChattr, sanitizeOperatorName } = require("./config");
72
82
  const { installAgentChattr, findAgentChattr } = require("./install-agentchattr");
73
83
 
74
84
  /**
@@ -174,6 +184,636 @@ function sendViaWebSocket(baseUrl, sessionToken, message) {
174
184
  });
175
185
  }
176
186
 
187
+ /**
188
+ * #403 / quadwork#274: send an arbitrary AC ws event (not a chat
189
+ * message). Used for `update_settings` so the loop guard widget can
190
+ * push the new max_agent_hops to the running AgentChattr without a
191
+ * full restart. Mirrors sendViaWebSocket but lets the caller pick
192
+ * the event type.
193
+ */
194
+ function sendWsEvent(baseUrl, sessionToken, event) {
195
+ return new Promise((resolve, reject) => {
196
+ const wsUrl = `${baseUrl.replace(/^http/, "ws")}/ws?token=${encodeURIComponent(sessionToken || "")}`;
197
+ const ws = new NodeWebSocket(wsUrl);
198
+ let settled = false;
199
+ const finish = (err, value) => {
200
+ if (settled) return;
201
+ settled = true;
202
+ try { ws.close(); } catch {}
203
+ if (err) reject(err); else resolve(value);
204
+ };
205
+ const giveUp = setTimeout(() => finish(new Error("websocket send timeout")), 4000);
206
+ ws.on("open", () => {
207
+ try {
208
+ ws.send(JSON.stringify(event));
209
+ setTimeout(() => { clearTimeout(giveUp); finish(null, { ok: true }); }, 250);
210
+ } catch (err) { clearTimeout(giveUp); finish(err); }
211
+ });
212
+ ws.on("error", (err) => { clearTimeout(giveUp); finish(err); });
213
+ ws.on("close", (code, reason) => {
214
+ if (!settled && code === 4003) {
215
+ clearTimeout(giveUp);
216
+ const msg = (reason && reason.toString()) || "forbidden: invalid session token";
217
+ const e = new Error(msg);
218
+ e.code = "EAGENTCHATTR_401";
219
+ finish(e);
220
+ }
221
+ });
222
+ });
223
+ }
224
+
225
+ // #403 / quadwork#274: read/write the loop guard for a given project.
226
+ // Source of truth at rest is the project's config.toml [routing]
227
+ // max_agent_hops. The PUT also pushes the value to the running AC via
228
+ // `update_settings` so the change is live without a daemon restart.
229
+ // Resolve the per-project config.toml path through resolveProjectChattr
230
+ // so we honor `project.agentchattr_dir` (web wizard sets this; legacy
231
+ // imports can have arbitrary paths) and don't drift from the rest of
232
+ // the codebase that already goes through that helper.
233
+ function resolveProjectConfigToml(projectId) {
234
+ const resolved = resolveProjectChattr(projectId);
235
+ if (!resolved || !resolved.dir) return null;
236
+ return path.join(resolved.dir, "config.toml");
237
+ }
238
+
239
+ router.get("/api/loop-guard", (req, res) => {
240
+ const projectId = req.query.project;
241
+ if (!projectId) return res.status(400).json({ error: "Missing project" });
242
+ const tomlPath = resolveProjectConfigToml(projectId);
243
+ if (!tomlPath || !fs.existsSync(tomlPath)) return res.json({ value: 30, source: "default" });
244
+ try {
245
+ const content = fs.readFileSync(tomlPath, "utf-8");
246
+ const m = content.match(/^\s*max_agent_hops\s*=\s*(\d+)/m);
247
+ const value = m ? parseInt(m[1], 10) : 30;
248
+ res.json({ value, source: m ? "toml" : "default" });
249
+ } catch (err) {
250
+ res.status(500).json({ error: "Failed to read config.toml", detail: err.message });
251
+ }
252
+ });
253
+
254
+ router.put("/api/loop-guard", async (req, res) => {
255
+ const projectId = req.query.project || req.body?.project;
256
+ if (!projectId) return res.status(400).json({ error: "Missing project" });
257
+ const raw = req.body?.value;
258
+ const value = typeof raw === "number" ? raw : parseInt(raw, 10);
259
+ // AC's update_settings handler clamps to [1, 50]; mirror that
260
+ // here so we don't write a value AC will silently rewrite.
261
+ if (!Number.isInteger(value) || value < 4 || value > 50) {
262
+ return res.status(400).json({ error: "value must be an integer between 4 and 50" });
263
+ }
264
+
265
+ // 1. Persist to config.toml so the next restart picks it up.
266
+ const tomlPath = resolveProjectConfigToml(projectId);
267
+ if (!tomlPath || !fs.existsSync(tomlPath)) {
268
+ return res.status(404).json({ error: "config.toml not found for project" });
269
+ }
270
+ // Capture the previous value before rewriting so we can decide
271
+ // whether the /continue auto-resume should fire (only when the
272
+ // operator is RAISING the limit — lowering it means they want
273
+ // the runaway loop to stay paused).
274
+ let previousValue = null;
275
+ try {
276
+ const previousContent = fs.readFileSync(tomlPath, "utf-8");
277
+ const prevMatch = previousContent.match(/^\s*max_agent_hops\s*=\s*(\d+)/m);
278
+ if (prevMatch) previousValue = parseInt(prevMatch[1], 10);
279
+ } catch {
280
+ // fall through — previousValue stays null, auto-resume will skip
281
+ }
282
+ try {
283
+ let content = fs.readFileSync(tomlPath, "utf-8");
284
+ if (/^\s*max_agent_hops\s*=/m.test(content)) {
285
+ content = content.replace(/^\s*max_agent_hops\s*=.*$/m, `max_agent_hops = ${value}`);
286
+ } else if (/^\s*\[routing\]/m.test(content)) {
287
+ // Section exists but the key doesn't — append the key on the
288
+ // line right after the [routing] header to keep it scoped.
289
+ content = content.replace(/^(\s*\[routing\]\s*\n)/m, `$1max_agent_hops = ${value}\n`);
290
+ } else {
291
+ const trailing = content.endsWith("\n") ? "" : "\n";
292
+ content += `${trailing}\n[routing]\ndefault = "none"\nmax_agent_hops = ${value}\n`;
293
+ }
294
+ fs.writeFileSync(tomlPath, content);
295
+ } catch (err) {
296
+ return res.status(500).json({ error: "Failed to write config.toml", detail: err.message });
297
+ }
298
+
299
+ // 2. Best-effort push to the running AC so the change is live.
300
+ // On stale-token (4003 → EAGENTCHATTR_401) recover the same way
301
+ // /api/chat does (#230): re-sync the session token from AC and
302
+ // retry once. Other failures stay non-fatal — the persisted value
303
+ // still takes effect on next AC restart.
304
+ //
305
+ // #417 / quadwork#309: the update_settings ws event correctly
306
+ // updates router.max_hops in the running AC (verified in AC's
307
+ // app.py:1249), AND writes settings.json via _save_settings. But
308
+ // AC's router stays paused once it has tripped the guard — raising
309
+ // max_hops at runtime does NOT resurrect an already-paused channel
310
+ // (router.py:76-77 → `paused = True`). The operator typically
311
+ // raises the limit precisely BECAUSE the channel is stuck paused,
312
+ // so we immediately follow the update_settings event with a
313
+ // `/continue` chat message (the same path AC's own slash command
314
+ // handler uses at app.py:1106-1110) to resume routing. This is the
315
+ // whole fix: the previous version updated max_hops live but left
316
+ // the channel frozen, which made the widget look like a no-op.
317
+ let live = false;
318
+ let autoResumed = false;
319
+ // Only auto-resume when ALL of:
320
+ // (a) operator is RAISING the limit (lowering = "make it
321
+ // stricter", must leave a paused runaway alone)
322
+ // (b) the router is currently paused (AC's continue_routing
323
+ // resets hop_count + paused + guard_emitted unconditionally,
324
+ // so firing it on an actively-running chain would silently
325
+ // extend the chain beyond the new limit — t2a finding)
326
+ // (c) previousValue is known (null means we can't prove it's a
327
+ // raise, so err on the side of not touching router state)
328
+ const isRaising = previousValue !== null && value > previousValue;
329
+ const ensureLive = async (sessionToken) => {
330
+ await sendWsEvent(base, sessionToken, { type: "update_settings", data: { max_agent_hops: value } });
331
+ if (isRaising) {
332
+ // Check AC's /api/status before firing /continue so we don't
333
+ // reset hop_count on a running (unpaused) chain. The endpoint
334
+ // exposes `paused: true` iff ANY channel currently paused.
335
+ let isPaused = false;
336
+ try {
337
+ // AC's security middleware (app.py:212-224) only accepts
338
+ // bearer auth for /api/messages, /api/send, and /api/rules/*.
339
+ // /api/status requires x-session-token header (or ?token=),
340
+ // so pass that instead — a bearer header silently 403s and
341
+ // leaves isPaused stuck at false, defeating the gate.
342
+ const statusUrl = `${base}/api/status`;
343
+ const statusRes = await fetch(statusUrl, {
344
+ headers: sessionToken ? { "x-session-token": sessionToken } : {},
345
+ signal: AbortSignal.timeout(5000),
346
+ });
347
+ if (statusRes.ok) {
348
+ const statusJson = await statusRes.json();
349
+ isPaused = !!(statusJson && statusJson.paused);
350
+ }
351
+ } catch {
352
+ // Status fetch failed — err toward "don't auto-resume". The
353
+ // operator can always type /continue manually.
354
+ }
355
+ if (isPaused) {
356
+ // Resume paused channels. /continue is routed by AC's ws
357
+ // message handler when the buffer starts with /continue;
358
+ // the handler calls router.continue_routing() which
359
+ // unpauses AND resets hop_count — which is why we gate on
360
+ // isPaused to avoid wiping the counter on a live chain.
361
+ await sendWsEvent(base, sessionToken, { type: "message", text: "/continue", channel: "general", sender: "user" });
362
+ autoResumed = true;
363
+ }
364
+ }
365
+ live = true;
366
+ };
367
+ let base = null;
368
+ try {
369
+ const chattr = getChattrConfig(projectId);
370
+ base = chattr.url;
371
+ const sessionToken = chattr.token;
372
+ if (base) {
373
+ try {
374
+ await ensureLive(sessionToken);
375
+ } catch (err) {
376
+ if (err && err.code === "EAGENTCHATTR_401") {
377
+ console.warn(`[loop-guard] ws auth failed for ${projectId}, re-syncing session token and retrying...`);
378
+ try { await syncChattrToken(projectId); }
379
+ catch (syncErr) { console.warn(`[loop-guard] syncChattrToken failed: ${syncErr.message}`); }
380
+ const { token: refreshed } = getChattrConfig(projectId);
381
+ if (refreshed && refreshed !== sessionToken) {
382
+ try {
383
+ await ensureLive(refreshed);
384
+ } catch (retryErr) {
385
+ console.warn(`[loop-guard] retry after token resync failed: ${retryErr.message || retryErr}`);
386
+ }
387
+ }
388
+ } else {
389
+ throw err;
390
+ }
391
+ }
392
+ }
393
+ } catch (err) {
394
+ console.warn(`[loop-guard] live update failed for ${projectId}: ${err.message || err}`);
395
+ }
396
+
397
+ res.json({ ok: true, value, live, previousValue, resumed: autoResumed });
398
+ });
399
+
400
+ // #412 / quadwork#279: project history export + import.
401
+ //
402
+ // Export proxies AC's /api/messages for the project channel and
403
+ // wraps the array in a small metadata envelope so future imports
404
+ // can warn on project-id mismatch and so a future schema bump can
405
+ // be detected client-side.
406
+ //
407
+ // Import accepts the same envelope, validates the shape + size,
408
+ // and replays each message back into the project's AgentChattr
409
+ // instance via sendViaWebSocket — preserving the original sender
410
+ // field for cross-tool consistency. Originals' message IDs are NOT
411
+ // preserved (AC re-assigns on insert), which is a known v1 limit
412
+ // and matches the issue's "AgentChattr will tell us" note.
413
+
414
+ const PROJECT_HISTORY_VERSION = 1;
415
+ const PROJECT_HISTORY_MAX_BYTES = 10 * 1024 * 1024; // 10 MB cap per issue
416
+ const PROJECT_HISTORY_REPLAY_DELAY_MS = 25; // pace AC ws inserts
417
+
418
+ // #414 / quadwork#297: reject imports whose messages claim a
419
+ // reserved agent / system sender by default. This closes the only
420
+ // path in QuadWork that lets a client-supplied sender reach AC
421
+ // (every other route hardcodes / sanitizes to operator). Mirrors
422
+ // the RESERVED_OPERATOR_NAMES denylist from sanitizeOperatorName so
423
+ // the same identities are blocked across the codebase.
424
+ const RESERVED_HISTORY_SENDERS = new Set([
425
+ "head",
426
+ "dev",
427
+ "reviewer1",
428
+ "reviewer2",
429
+ "t1",
430
+ "t2a",
431
+ "t2b",
432
+ "t3",
433
+ "system",
434
+ ]);
435
+
436
+ router.get("/api/project-history", async (req, res) => {
437
+ const projectId = req.query.project;
438
+ if (!projectId) return res.status(400).json({ error: "Missing project" });
439
+ const { url: base, token: sessionToken } = getChattrConfig(projectId);
440
+ if (!base) return res.status(400).json({ error: "No AgentChattr configured for project" });
441
+ try {
442
+ // AC's /api/messages accepts a bearer token in the Authorization
443
+ // header; the session token is what the chat panel already uses.
444
+ const target = `${base}/api/messages?channel=general&limit=100000`;
445
+ const r = await fetch(target, {
446
+ headers: sessionToken ? { Authorization: `Bearer ${sessionToken}` } : {},
447
+ // Cap the AC fetch at 30s so a hung daemon doesn't park the
448
+ // export request indefinitely.
449
+ signal: AbortSignal.timeout(30000),
450
+ });
451
+ if (!r.ok) {
452
+ const detail = await r.text().catch(() => "");
453
+ return res.status(502).json({ error: `AgentChattr /api/messages returned ${r.status}`, detail: detail.slice(0, 200) });
454
+ }
455
+ const raw = await r.json();
456
+ // AC returns either a bare array or { messages: [...] } depending
457
+ // on version — handle both.
458
+ const messages = Array.isArray(raw) ? raw : Array.isArray(raw && raw.messages) ? raw.messages : [];
459
+ res.json({
460
+ version: PROJECT_HISTORY_VERSION,
461
+ project_id: projectId,
462
+ exported_at: new Date().toISOString(),
463
+ message_count: messages.length,
464
+ messages,
465
+ });
466
+ } catch (err) {
467
+ res.status(502).json({ error: "Project history export failed", detail: err.message || String(err) });
468
+ }
469
+ });
470
+
471
+ // Global express.json() in server/index.js is bumped to 10mb to
472
+ // cover this route — see the comment there. The route handler still
473
+ // double-checks the byte size of the parsed body below as a defense
474
+ // in depth (e.g. if a future change scopes the global parser back
475
+ // down without updating this comment).
476
+ router.post("/api/project-history", async (req, res) => {
477
+ const projectId = req.query.project || req.body?.project_id;
478
+ if (!projectId) return res.status(400).json({ error: "Missing project" });
479
+
480
+ const body = req.body;
481
+ if (!body || typeof body !== "object") {
482
+ return res.status(400).json({ error: "Invalid JSON body" });
483
+ }
484
+ // Body size guard — express.json() respects its own limit too,
485
+ // but stamp the explicit cap from the issue here so the error
486
+ // message is operator-readable.
487
+ try {
488
+ const approxBytes = Buffer.byteLength(JSON.stringify(body));
489
+ if (approxBytes > PROJECT_HISTORY_MAX_BYTES) {
490
+ return res.status(413).json({ error: `History file too large (${approxBytes} bytes; limit ${PROJECT_HISTORY_MAX_BYTES})` });
491
+ }
492
+ } catch {
493
+ // JSON.stringify circular — already invalid, fall through
494
+ }
495
+
496
+ if (!Array.isArray(body.messages)) {
497
+ return res.status(400).json({ error: "Missing or invalid 'messages' array" });
498
+ }
499
+ if (body.version && body.version !== PROJECT_HISTORY_VERSION) {
500
+ return res.status(400).json({ error: `Unsupported export version ${body.version} (expected ${PROJECT_HISTORY_VERSION})` });
501
+ }
502
+ // Soft project-id mismatch warning. The client UI should confirm
503
+ // before POSTing when the IDs differ; if it didn't (e.g. curl),
504
+ // require an explicit override flag so we can't silently merge
505
+ // foreign chat into the wrong project.
506
+ if (body.project_id && body.project_id !== projectId && body.allow_project_mismatch !== true) {
507
+ return res.status(409).json({
508
+ error: `Project mismatch: file is from '${body.project_id}', target is '${projectId}'. Resend with allow_project_mismatch=true to override.`,
509
+ });
510
+ }
511
+
512
+ // #414 / quadwork#297 — Issue 1: agent/system sender denylist.
513
+ // Pre-scan the messages array; if any line claims a reserved
514
+ // identity, reject the entire import unless the operator opted
515
+ // in via allow_agent_senders=true. Default-safe so a leaked or
516
+ // crafted export file can't post as Head from the dashboard.
517
+ if (body.allow_agent_senders !== true) {
518
+ const offenders = new Set();
519
+ for (const m of body.messages) {
520
+ if (m && typeof m === "object" && typeof m.sender === "string") {
521
+ if (RESERVED_HISTORY_SENDERS.has(m.sender.toLowerCase())) {
522
+ offenders.add(m.sender);
523
+ if (offenders.size >= 5) break;
524
+ }
525
+ }
526
+ }
527
+ if (offenders.size > 0) {
528
+ return res.status(400).json({
529
+ error: `Import contains messages attributed to reserved agent/system identities: ${[...offenders].join(", ")}. Resend with allow_agent_senders=true to override (e.g. legitimate disaster-recovery restore).`,
530
+ });
531
+ }
532
+ }
533
+
534
+ // #414 / quadwork#297 — Issue 2: duplicate import detection.
535
+ // Persist the most recent imported `exported_at` on the project
536
+ // entry in config.json. If the file's marker matches, refuse the
537
+ // import unless allow_duplicate=true. Re-importing the same file
538
+ // would otherwise replay every message a second time and double
539
+ // the chat history.
540
+ const cfg = readConfigFile();
541
+ const project = cfg.projects?.find((p) => p.id === projectId);
542
+ const incomingExportedAt = typeof body.exported_at === "string" ? body.exported_at : null;
543
+ if (body.allow_duplicate !== true && project && incomingExportedAt) {
544
+ if (project.history_last_imported_at === incomingExportedAt) {
545
+ return res.status(409).json({
546
+ error: `This export was already imported (exported_at=${incomingExportedAt}). Resend with allow_duplicate=true to import again.`,
547
+ });
548
+ }
549
+ }
550
+
551
+ const { url: base, token: sessionToken } = getChattrConfig(projectId);
552
+ if (!base) return res.status(400).json({ error: "No AgentChattr configured for project" });
553
+
554
+ // Replay each message via the existing ws send helper. Preserve
555
+ // the original sender so the imported transcript still attributes
556
+ // each line correctly. Pace the writes so AC's ws handler isn't
557
+ // overloaded on a multi-thousand-message import.
558
+ //
559
+ // SECURITY NOTE: This deliberately bypasses /api/chat's #230/#288
560
+ // sanitize-as-user lockdown — the imported sender field is sent
561
+ // straight to AC's ws, so a crafted import file CAN post as
562
+ // `head` / `dev` / etc. That's intentional: imports must round-
563
+ // trip the original attribution to be useful (otherwise every
564
+ // restored message would say `user` and the transcript would be
565
+ // worthless). The trade-off is acceptable because the only entry
566
+ // point is an authenticated dashboard operator picking a file by
567
+ // hand and clicking through the project-mismatch confirm. Don't
568
+ // expose this route from a less-trusted surface without revisiting.
569
+ let imported = 0;
570
+ let skipped = 0;
571
+ const errors = [];
572
+ for (const m of body.messages) {
573
+ if (!m || typeof m !== "object" || typeof m.text !== "string" || !m.text) {
574
+ skipped++;
575
+ continue;
576
+ }
577
+ const msg = {
578
+ text: m.text,
579
+ channel: typeof m.channel === "string" && m.channel ? m.channel : "general",
580
+ sender: typeof m.sender === "string" && m.sender ? m.sender : "user",
581
+ };
582
+ try {
583
+ await sendViaWebSocket(base, sessionToken, msg);
584
+ imported++;
585
+ } catch (err) {
586
+ errors.push(`#${m.id ?? "?"}: ${err.message || String(err)}`);
587
+ // Stop on the first error to avoid spamming AC if its ws is down.
588
+ if (errors.length > 5) break;
589
+ }
590
+ // Tiny delay between sends — AC's ws handler can keep up but
591
+ // 10k messages back-to-back hit the recv buffer hard.
592
+ if (PROJECT_HISTORY_REPLAY_DELAY_MS > 0) {
593
+ await new Promise((r) => setTimeout(r, PROJECT_HISTORY_REPLAY_DELAY_MS));
594
+ }
595
+ }
596
+ // #414 / quadwork#297 — Issue 2: stamp the import marker on the
597
+ // project so a re-import of the same file is caught next time.
598
+ // Only update on a successful (no errors) replay so a half-broken
599
+ // import can be retried without the duplicate guard tripping.
600
+ if (incomingExportedAt && errors.length === 0 && project) {
601
+ project.history_last_imported_at = incomingExportedAt;
602
+ try { writeConfigFile(cfg); }
603
+ catch (err) { console.warn(`[history] failed to persist history_last_imported_at: ${err.message || err}`); }
604
+ }
605
+
606
+ res.json({ ok: errors.length === 0, imported, skipped, total: body.messages.length, errors });
607
+ });
608
+
609
+ // #424 / quadwork#304 Phase 4: list + restore auto-snapshots.
610
+ // snapshotProjectHistory() in server/index.js writes envelope
611
+ // files to ~/.quadwork/{id}/history-snapshots/{ISO}.json before
612
+ // destructive restart/update operations. These endpoints let the
613
+ // Project History widget surface them with a restore button so
614
+ // the operator can roll back a bad /clear or botched update.
615
+ router.get("/api/project-history/snapshots", (req, res) => {
616
+ const projectId = req.query.project;
617
+ if (!projectId) return res.status(400).json({ error: "Missing project" });
618
+ const snapDir = path.join(CONFIG_DIR, projectId, "history-snapshots");
619
+ if (!fs.existsSync(snapDir)) return res.json({ snapshots: [] });
620
+ try {
621
+ const entries = fs.readdirSync(snapDir)
622
+ .filter((f) => f.endsWith(".json"))
623
+ .map((f) => {
624
+ const st = fs.statSync(path.join(snapDir, f));
625
+ return { name: f, size: st.size, mtime: st.mtimeMs };
626
+ })
627
+ .sort((a, b) => b.mtime - a.mtime);
628
+ res.json({ snapshots: entries });
629
+ } catch (err) {
630
+ res.status(500).json({ error: "Failed to list snapshots", detail: err.message });
631
+ }
632
+ });
633
+
634
+ router.post("/api/project-history/restore", async (req, res) => {
635
+ const projectId = req.query.project;
636
+ const name = req.query.name || req.body?.name;
637
+ if (!projectId || !name) return res.status(400).json({ error: "Missing project or name" });
638
+ // Prevent path traversal — only allow basenames from the snapshot
639
+ // directory; reject anything with a separator or ".." segment.
640
+ if (name !== path.basename(name) || name.includes("..") || !name.endsWith(".json")) {
641
+ return res.status(400).json({ error: "Invalid snapshot name" });
642
+ }
643
+ const snapPath = path.join(CONFIG_DIR, projectId, "history-snapshots", name);
644
+ if (!fs.existsSync(snapPath)) {
645
+ return res.status(404).json({ error: "Snapshot not found" });
646
+ }
647
+ let body;
648
+ try {
649
+ const text = fs.readFileSync(snapPath, "utf-8");
650
+ body = JSON.parse(text);
651
+ } catch (err) {
652
+ return res.status(500).json({ error: "Failed to read snapshot", detail: err.message });
653
+ }
654
+ // Post the snapshot back through the existing import endpoint
655
+ // with both bypass flags — the snapshot contains real agent
656
+ // senders (so allow_agent_senders) and may match a previous
657
+ // restore's exported_at (so allow_duplicate). This is the
658
+ // legitimate disaster-recovery case the #297 denylist expected.
659
+ try {
660
+ const cfg = JSON.parse(fs.readFileSync(CONFIG_PATH, "utf-8"));
661
+ const qwPort = cfg.port || 8400;
662
+ const r = await fetch(`http://127.0.0.1:${qwPort}/api/project-history?project=${encodeURIComponent(projectId)}`, {
663
+ method: "POST",
664
+ headers: { "Content-Type": "application/json" },
665
+ body: JSON.stringify({ ...body, allow_agent_senders: true, allow_duplicate: true }),
666
+ });
667
+ const data = await r.json().catch(() => null);
668
+ if (!r.ok) {
669
+ return res.status(r.status).json(data || { error: `import returned ${r.status}` });
670
+ }
671
+ res.json({ ok: true, ...(data || {}) });
672
+ } catch (err) {
673
+ res.status(502).json({ error: "Restore failed", detail: err.message });
674
+ }
675
+ });
676
+
677
+ // #430 / quadwork#312: AI team work-hours tracking.
678
+ //
679
+ // The frontend's TerminalGrid detects per-agent activity transitions
680
+ // (idle → active, active → idle) via the existing activity ref and
681
+ // POSTs them to /api/activity/log. We buffer `start` events in
682
+ // memory keyed by `${project}/${agent}`; an `end` event looks up the
683
+ // matching buffered start, computes the duration, and appends a
684
+ // complete session row to ~/.quadwork/{project}/activity.jsonl.
685
+ //
686
+ // /api/activity/stats aggregates across all projects with a 30s
687
+ // cache so the dashboard can poll it every minute without thrashing
688
+ // the filesystem.
689
+
690
+ const _activityStarts = new Map(); // `${project}/${agent}` → startTimestamp
691
+ const _activityStatsCache = { ts: 0, data: null };
692
+ const ACTIVITY_STATS_TTL_MS = 30000;
693
+
694
+ function activityLogPath(projectId) {
695
+ return path.join(CONFIG_DIR, projectId, "activity.jsonl");
696
+ }
697
+
698
+ router.post("/api/activity/log", (req, res) => {
699
+ const { project, agent, type, timestamp } = req.body || {};
700
+ if (typeof project !== "string" || !project) return res.status(400).json({ error: "Missing project" });
701
+ if (typeof agent !== "string" || !agent) return res.status(400).json({ error: "Missing agent" });
702
+ if (type !== "start" && type !== "end") return res.status(400).json({ error: "type must be start|end" });
703
+ const ts = typeof timestamp === "number" && Number.isFinite(timestamp) ? timestamp : Date.now();
704
+ const key = `${project}/${agent}`;
705
+
706
+ if (type === "start") {
707
+ // Only remember the first start per session — duplicate starts
708
+ // are possible if the frontend re-mounts mid-stream; ignore
709
+ // them so the session duration reflects the original onset.
710
+ if (!_activityStarts.has(key)) _activityStarts.set(key, ts);
711
+ return res.json({ ok: true });
712
+ }
713
+
714
+ // type === "end"
715
+ const start = _activityStarts.get(key);
716
+ if (start === undefined) {
717
+ // Orphan end (missed start — probably happens on server
718
+ // restart while a session was live). Drop it silently so we
719
+ // don't write a row with an unknown start timestamp.
720
+ return res.json({ ok: true, dropped: "orphan" });
721
+ }
722
+ _activityStarts.delete(key);
723
+ const row = { agent, start, end: ts, duration_ms: Math.max(0, ts - start) };
724
+ try {
725
+ const p = activityLogPath(project);
726
+ fs.mkdirSync(path.dirname(p), { recursive: true });
727
+ fs.appendFileSync(p, JSON.stringify(row) + "\n");
728
+ // Invalidate the stats cache so the next read sees the new row.
729
+ _activityStatsCache.ts = 0;
730
+ } catch (err) {
731
+ console.warn(`[activity] failed to append ${project}/${agent}: ${err.message || err}`);
732
+ }
733
+ res.json({ ok: true, duration_ms: row.duration_ms });
734
+ });
735
+
736
+ // Aggregate all activity.jsonl files under ~/.quadwork/*/activity.jsonl.
737
+ // `today`, `week`, `month` boundaries use the operator's local
738
+ // timezone rather than UTC — "this week" should mean the week the
739
+ // operator is living in, not a UTC-offset week that starts at
740
+ // 16:00 local time.
741
+ function computeActivityStats() {
742
+ if (Date.now() - _activityStatsCache.ts < ACTIVITY_STATS_TTL_MS && _activityStatsCache.data) {
743
+ return _activityStatsCache.data;
744
+ }
745
+ const now = new Date();
746
+ const startOfToday = new Date(now.getFullYear(), now.getMonth(), now.getDate()).getTime();
747
+ // Start of this week = local Monday 00:00. JS: getDay() → 0-Sun..6-Sat.
748
+ const day = now.getDay();
749
+ const mondayOffset = day === 0 ? -6 : 1 - day; // Sun → -6, Mon → 0, Tue → -1, …
750
+ const startOfWeek = new Date(now.getFullYear(), now.getMonth(), now.getDate() + mondayOffset).getTime();
751
+ const startOfMonth = new Date(now.getFullYear(), now.getMonth(), 1).getTime();
752
+
753
+ const totals = { today_ms: 0, week_ms: 0, month_ms: 0, total_ms: 0 };
754
+ const byProject = {};
755
+ // #430 / quadwork#312: only count projects registered in
756
+ // config.json, not every directory under ~/.quadwork/. Stray
757
+ // folders from deleted / unconfigured projects must not inflate
758
+ // the stats — that's explicit in #312's acceptance.
759
+ let projectIds = [];
760
+ try {
761
+ const cfg = JSON.parse(fs.readFileSync(CONFIG_PATH, "utf-8"));
762
+ if (Array.isArray(cfg.projects)) {
763
+ projectIds = cfg.projects.map((p) => p && p.id).filter((id) => typeof id === "string" && id);
764
+ }
765
+ } catch {
766
+ // config unreadable → no projects → empty stats (safe fallback)
767
+ }
768
+ for (const projectId of projectIds) {
769
+ const p = activityLogPath(projectId);
770
+ if (!fs.existsSync(p)) continue;
771
+ const projectTotals = { today_ms: 0, week_ms: 0, month_ms: 0, total_ms: 0 };
772
+ let text;
773
+ try { text = fs.readFileSync(p, "utf-8"); } catch { continue; }
774
+ for (const line of text.split("\n")) {
775
+ if (!line.trim()) continue;
776
+ let row;
777
+ try { row = JSON.parse(line); } catch { continue; }
778
+ const d = row && typeof row.duration_ms === "number" ? row.duration_ms : 0;
779
+ const start = row && typeof row.start === "number" ? row.start : 0;
780
+ if (d <= 0 || !start) continue;
781
+ projectTotals.total_ms += d;
782
+ if (start >= startOfToday) projectTotals.today_ms += d;
783
+ if (start >= startOfWeek) projectTotals.week_ms += d;
784
+ if (start >= startOfMonth) projectTotals.month_ms += d;
785
+ }
786
+ byProject[projectId] = {
787
+ today: Math.round(projectTotals.today_ms / 3600) / 1000,
788
+ week: Math.round(projectTotals.week_ms / 3600) / 1000,
789
+ month: Math.round(projectTotals.month_ms / 3600) / 1000,
790
+ total: Math.round(projectTotals.total_ms / 3600) / 1000,
791
+ };
792
+ totals.today_ms += projectTotals.today_ms;
793
+ totals.week_ms += projectTotals.week_ms;
794
+ totals.month_ms += projectTotals.month_ms;
795
+ totals.total_ms += projectTotals.total_ms;
796
+ }
797
+ const data = {
798
+ today: Math.round(totals.today_ms / 3600) / 1000,
799
+ week: Math.round(totals.week_ms / 3600) / 1000,
800
+ month: Math.round(totals.month_ms / 3600) / 1000,
801
+ total: Math.round(totals.total_ms / 3600) / 1000,
802
+ by_project: byProject,
803
+ };
804
+ _activityStatsCache.ts = Date.now();
805
+ _activityStatsCache.data = data;
806
+ return data;
807
+ }
808
+
809
+ router.get("/api/activity/stats", (_req, res) => {
810
+ try {
811
+ res.json(computeActivityStats());
812
+ } catch (err) {
813
+ res.status(500).json({ error: "Failed to compute activity stats", detail: err.message });
814
+ }
815
+ });
816
+
177
817
  router.post("/api/chat", async (req, res) => {
178
818
  const projectId = req.query.project || req.body.project;
179
819
  const { url: base, token: sessionToken } = getChattrConfig(projectId);
@@ -181,15 +821,38 @@ router.post("/api/chat", async (req, res) => {
181
821
 
182
822
  // #230: ignore any client-supplied sender. /api/chat is the
183
823
  // dashboard's send path, so the message must always be attributed
184
- // to "user". Forwarding `req.body.sender` would let any caller
185
- // hitting QuadWork's /api/chat impersonate an agent identity (t1,
186
- // t3, …) over the AgentChattr ws path, which the old /api/send
187
- // flow could not do.
824
+ // to a server-controlled value. Forwarding `req.body.sender` would
825
+ // let any caller hitting QuadWork's /api/chat impersonate an agent
826
+ // identity (t1, t3, …) over the AgentChattr ws path, which the
827
+ // old /api/send flow could not do.
828
+ //
829
+ // #405 / quadwork#278: read the operator's display name from the
830
+ // server-side config file rather than hardcoding "user". The
831
+ // sanitizer matches AC's registry name validator (1–32 alnum +
832
+ // dash + underscore) so even a hand-edited config can't post a
833
+ // value AC will reject (or impersonate an agent), and an empty /
834
+ // missing value falls back to "user".
835
+ let operatorSender = "user";
836
+ try {
837
+ const cfg = readConfigFile();
838
+ operatorSender = sanitizeOperatorName(cfg.operator_name);
839
+ } catch {
840
+ // non-fatal — fall through to "user"
841
+ }
842
+ // #397 / quadwork#262: pass reply_to through to AgentChattr so the
843
+ // dashboard's reply button mirrors AC's native threaded-reply
844
+ // behavior. Only forward when it's a real positive integer — guards
845
+ // against arbitrary client payloads.
846
+ const replyToRaw = req.body?.reply_to;
847
+ const replyTo = (typeof replyToRaw === "number" && Number.isInteger(replyToRaw) && replyToRaw > 0)
848
+ ? replyToRaw
849
+ : null;
188
850
  const message = {
189
851
  text: typeof req.body?.text === "string" ? req.body.text : "",
190
852
  channel: req.body?.channel || "general",
191
- sender: "user",
853
+ sender: operatorSender,
192
854
  attachments: Array.isArray(req.body?.attachments) ? req.body.attachments : [],
855
+ ...(replyTo !== null ? { reply_to: replyTo } : {}),
193
856
  };
194
857
  if (!message.text && message.attachments.length === 0) {
195
858
  return res.status(400).json({ error: "text or attachments required" });
@@ -378,6 +1041,434 @@ router.get("/api/github/prs", (req, res) => {
378
1041
  }
379
1042
  });
380
1043
 
1044
+ // #411 / quadwork#281: recently closed issues + merged PRs for the
1045
+ // "Recently closed" / "Recently merged" sub-sections under each
1046
+ // list in GitHubPanel. Limit 5 items each, ordered by closedAt
1047
+ // descending so the freshest activity sits at the top.
1048
+ // gh CLI's default ordering for `issue list --state closed` and
1049
+ // `pr list --state merged` is createdAt-desc, not closedAt/mergedAt-desc,
1050
+ // so a stale-but-recently-closed item can sit below a fresh-but-
1051
+ // older one. We pull a wider window and re-sort by close/merge time
1052
+ // before truncating to 5 to honor #281's "newest first" requirement.
1053
+ const RECENT_FETCH_LIMIT = 20;
1054
+ const RECENT_DISPLAY_LIMIT = 5;
1055
+
1056
+ router.get("/api/github/closed-issues", (req, res) => {
1057
+ const repo = getRepo(req.query.project || "");
1058
+ if (!repo) return res.status(400).json({ error: "No repo configured for project" });
1059
+ try {
1060
+ const out = execFileSync(
1061
+ "gh",
1062
+ ["issue", "list", "-R", repo, "--state", "closed", "--json", "number,title,state,url,closedAt", "--limit", String(RECENT_FETCH_LIMIT)],
1063
+ { encoding: "utf-8", timeout: 15000 },
1064
+ );
1065
+ const items = JSON.parse(out);
1066
+ const sorted = Array.isArray(items)
1067
+ ? items
1068
+ .slice()
1069
+ .sort((a, b) => {
1070
+ const ta = a && a.closedAt ? Date.parse(a.closedAt) : 0;
1071
+ const tb = b && b.closedAt ? Date.parse(b.closedAt) : 0;
1072
+ return tb - ta;
1073
+ })
1074
+ .slice(0, RECENT_DISPLAY_LIMIT)
1075
+ : items;
1076
+ res.json(sorted);
1077
+ } catch (err) {
1078
+ res.status(502).json({ error: "gh issue list (closed) failed", detail: err.message });
1079
+ }
1080
+ });
1081
+
1082
+ router.get("/api/github/merged-prs", (req, res) => {
1083
+ const repo = getRepo(req.query.project || "");
1084
+ if (!repo) return res.status(400).json({ error: "No repo configured for project" });
1085
+ try {
1086
+ // gh pr list with `--state merged` filters server-side so we
1087
+ // don't have to pull every closed PR and discard the un-merged
1088
+ // ones (closed-without-merge). Same fetch-wider-then-sort
1089
+ // strategy as closed-issues so the newest merge always wins.
1090
+ const out = execFileSync(
1091
+ "gh",
1092
+ ["pr", "list", "-R", repo, "--state", "merged", "--json", "number,title,state,url,mergedAt,author", "--limit", String(RECENT_FETCH_LIMIT)],
1093
+ { encoding: "utf-8", timeout: 15000 },
1094
+ );
1095
+ const items = JSON.parse(out);
1096
+ const sorted = Array.isArray(items)
1097
+ ? items
1098
+ .slice()
1099
+ .sort((a, b) => {
1100
+ const ta = a && a.mergedAt ? Date.parse(a.mergedAt) : 0;
1101
+ const tb = b && b.mergedAt ? Date.parse(b.mergedAt) : 0;
1102
+ return tb - ta;
1103
+ })
1104
+ .slice(0, RECENT_DISPLAY_LIMIT)
1105
+ : items;
1106
+ res.json(sorted);
1107
+ } catch (err) {
1108
+ res.status(502).json({ error: "gh pr list (merged) failed", detail: err.message });
1109
+ }
1110
+ });
1111
+
1112
+ // #413 / quadwork#282: Current Batch Progress panel.
1113
+ //
1114
+ // Reads ~/.quadwork/{project}/OVERNIGHT-QUEUE.md, parses the
1115
+ // `## Active Batch` section for `Batch: N` + issue numbers, and
1116
+ // resolves each issue against GitHub (state + linked PR + review
1117
+ // counts) to compute a progress state. The 5 progress buckets are
1118
+ // deterministic from issue/PR state — no agent inference.
1119
+ //
1120
+ // Progress mapping (from upstream issue):
1121
+ // queued 0% issue exists, no linked PR
1122
+ // in_review 20% PR open, 0 approvals
1123
+ // approved1 50% PR open, 1 approval
1124
+ // ready 80% PR open, 2+ approvals
1125
+ // merged 100% PR merged AND issue closed
1126
+ //
1127
+ // Cached for 10s per project to avoid hammering gh on every poll.
1128
+
1129
+ const _batchProgressCache = new Map(); // projectId -> { ts, data }
1130
+
1131
+ // #429 / quadwork#316: persistent batch snapshot on disk so the
1132
+ // Batch Progress panel keeps showing merged items after Head moves
1133
+ // them from Active Batch to Done. The in-memory `_batchProgressCache`
1134
+ // above is a 10s TTL cache of the rendered rows; this new cache is
1135
+ // the *set of issue numbers* we currently consider "the active
1136
+ // batch", and it survives restarts + lives across polls.
1137
+ function batchSnapshotPath(projectId) {
1138
+ return path.join(CONFIG_DIR, projectId, "batch-progress-cache.json");
1139
+ }
1140
+ function readBatchSnapshot(projectId) {
1141
+ try {
1142
+ return JSON.parse(fs.readFileSync(batchSnapshotPath(projectId), "utf-8"));
1143
+ } catch {
1144
+ return null;
1145
+ }
1146
+ }
1147
+ function writeBatchSnapshot(projectId, snapshot) {
1148
+ try {
1149
+ const p = batchSnapshotPath(projectId);
1150
+ fs.mkdirSync(path.dirname(p), { recursive: true });
1151
+ fs.writeFileSync(p, JSON.stringify(snapshot));
1152
+ } catch {
1153
+ // Non-fatal — panel still works from the live parse.
1154
+ }
1155
+ }
1156
+
1157
+ // Decide which batch to render, combining the live parse of
1158
+ // OVERNIGHT-QUEUE.md with the persistent snapshot. The snapshot is
1159
+ // replaced whenever a new batch starts (explicit Batch: N bump OR
1160
+ // the live Active Batch contains items the snapshot doesn't); in
1161
+ // all other cases the snapshot wins, so items Head moved to Done
1162
+ // stay visible until the operator starts the next batch.
1163
+ function resolveDisplayedBatch(queueText, projectId, { queueReadOk = true } = {}) {
1164
+ // Queue file deleted / unreadable → fall back to empty state per
1165
+ // #316's edge case. Returning the snapshot here would "heal" a
1166
+ // genuinely missing file into stale data the operator can't
1167
+ // reconcile without nuking ~/.quadwork/{id}/batch-progress-cache.json
1168
+ // manually.
1169
+ if (!queueReadOk) return { batchNumber: null, issueNumbers: [] };
1170
+ const current = parseActiveBatch(queueText);
1171
+ const snapshot = readBatchSnapshot(projectId);
1172
+ const hasExplicitBump =
1173
+ current.batchNumber !== null &&
1174
+ (!snapshot || snapshot.batchNumber === null || current.batchNumber > snapshot.batchNumber);
1175
+ const hasNewItems =
1176
+ current.issueNumbers.length > 0 &&
1177
+ (!snapshot || current.issueNumbers.some((n) => !snapshot.issueNumbers.includes(n)));
1178
+ let next;
1179
+ if (hasExplicitBump || hasNewItems) {
1180
+ next = { batchNumber: current.batchNumber, issueNumbers: current.issueNumbers.slice() };
1181
+ } else if (snapshot && Array.isArray(snapshot.issueNumbers) && snapshot.issueNumbers.length > 0) {
1182
+ next = {
1183
+ batchNumber: snapshot.batchNumber ?? null,
1184
+ issueNumbers: snapshot.issueNumbers.slice(),
1185
+ };
1186
+ } else {
1187
+ next = { batchNumber: current.batchNumber, issueNumbers: current.issueNumbers.slice() };
1188
+ }
1189
+ if (next.issueNumbers.length > 0) writeBatchSnapshot(projectId, next);
1190
+ return next;
1191
+ }
1192
+ const BATCH_PROGRESS_TTL_MS = 10000;
1193
+
1194
+ function parseActiveBatch(queueText) {
1195
+ if (typeof queueText !== "string" || !queueText) {
1196
+ return { batchNumber: null, issueNumbers: [] };
1197
+ }
1198
+ // Pull just the Active Batch section so a stray `#123` in Backlog
1199
+ // or Done doesn't leak into the active list.
1200
+ const m = queueText.match(/##\s+Active Batch[\s\S]*?(?=\n##\s|$)/i);
1201
+ if (!m) return { batchNumber: null, issueNumbers: [] };
1202
+ const section = m[0];
1203
+ const batchMatch = section.match(/\*\*Batch:\*\*\s*(\d+)/i) || section.match(/Batch:\s*(\d+)/i);
1204
+ const batchNumber = batchMatch ? parseInt(batchMatch[1], 10) : null;
1205
+ // Only collect issue numbers from lines that look like list-item
1206
+ // entries — i.e. lines whose first content token is either `#N`
1207
+ // or `[#N]` after an optional list marker. This rejects prose
1208
+ // like "Tracking umbrella: #293", "next after #294 merged", and
1209
+ // similar dependency / commentary references that t2a flagged on
1210
+ // realproject7/dropcast's queue.
1211
+ //
1212
+ // Accepted line shapes:
1213
+ // - #295 sub-A heartbeat
1214
+ // * #295 sub-A heartbeat
1215
+ // 1. #295 sub-A heartbeat
1216
+ // #295 sub-A heartbeat
1217
+ // - [#295] sub-A heartbeat
1218
+ // [#295] sub-A heartbeat
1219
+ //
1220
+ // Rejected:
1221
+ // Tracking umbrella: #293
1222
+ // Assigned next after #294 merged.
1223
+ // See #295 for context.
1224
+ const ITEM_LINE_RE = /^\s*(?:[-*]\s+|\d+\.\s+)?\[?#(\d{1,6})\]?\b/;
1225
+ const seen = new Set();
1226
+ const issueNumbers = [];
1227
+ for (const line of section.split("\n")) {
1228
+ const lineMatch = line.match(ITEM_LINE_RE);
1229
+ if (!lineMatch) continue;
1230
+ const n = parseInt(lineMatch[1], 10);
1231
+ if (!seen.has(n)) {
1232
+ seen.add(n);
1233
+ issueNumbers.push(n);
1234
+ }
1235
+ }
1236
+ return { batchNumber, issueNumbers };
1237
+ }
1238
+
1239
+ // #416 / quadwork#299: async variant used by the parallelized batch
1240
+ // progress fetcher. Wraps node's execFile in a promise.
1241
+ //
1242
+ // THROWS on subprocess failure (non-zero exit, timeout, JSON parse,
1243
+ // network) so progressForItemAsync can decide which subset of
1244
+ // failures should bubble up to the Promise.allSettled "fetch failed"
1245
+ // row vs. which should fall through to a softer state. The previous
1246
+ // catch-all-and-return-null contract collapsed real subprocess
1247
+ // errors into the "not found" branch, making the new failure-row
1248
+ // fallback unreachable for genuine command failures (t2a review).
1249
+ const { execFile: _execFile } = require("child_process");
1250
+ const _execFileAsync = require("util").promisify(_execFile);
1251
+ async function ghJsonExecAsync(args) {
1252
+ const { stdout } = await _execFileAsync("gh", args, { encoding: "utf-8", timeout: 10000 });
1253
+ return JSON.parse(stdout);
1254
+ }
1255
+
1256
+ async function progressForItemAsync(repo, issueNumber) {
1257
+ // Pull issue state + linked PRs in one call. closedByPullRequestsReferences
1258
+ // is gh's serializer for the GraphQL `closedByPullRequestsReferences`
1259
+ // edge — only present when a PR with `Fixes #N` / `Closes #N`
1260
+ // (or the link UI) targets the issue.
1261
+ // Issue fetch is the load-bearing call — if gh can't read the
1262
+ // issue at all (404, network, auth, timeout) we can't compute a
1263
+ // meaningful progress row. Let the rejection propagate to the
1264
+ // route's Promise.allSettled so the operator sees a single
1265
+ // "fetch failed" row instead of a misleading "queued" entry.
1266
+ const issue = await ghJsonExecAsync([
1267
+ "issue",
1268
+ "view",
1269
+ String(issueNumber),
1270
+ "-R",
1271
+ repo,
1272
+ "--json",
1273
+ "number,title,state,url,closedByPullRequestsReferences",
1274
+ ]);
1275
+ const linked = Array.isArray(issue.closedByPullRequestsReferences)
1276
+ ? issue.closedByPullRequestsReferences
1277
+ : [];
1278
+ // Pick the freshest linked PR (highest number) if there are multiple.
1279
+ const pr = linked.length > 0
1280
+ ? linked.slice().sort((a, b) => (b.number || 0) - (a.number || 0))[0]
1281
+ : null;
1282
+ // No linked PR yet — queued.
1283
+ if (!pr) {
1284
+ return {
1285
+ issue_number: issue.number,
1286
+ title: issue.title,
1287
+ url: issue.url,
1288
+ status: "queued",
1289
+ progress: 0,
1290
+ label: "Issue · queued",
1291
+ };
1292
+ }
1293
+ // Re-fetch the PR to get reviewDecision + reviews + state, since
1294
+ // the issue's closedByPullRequestsReferences edge only carries
1295
+ // number/state/url. The PR fetch is intentionally soft: if gh
1296
+ // glitches on this single call we still know the PR exists (we
1297
+ // got the link from the issue) and can render a partial
1298
+ // "in_review" row, which is more useful than dropping the whole
1299
+ // item to "fetch failed". A persistent failure here will still
1300
+ // surface on the next cache miss because the issue fetch above
1301
+ // is the load-bearing one that controls the per-item rejection.
1302
+ let prData = null;
1303
+ try {
1304
+ prData = await ghJsonExecAsync([
1305
+ "pr",
1306
+ "view",
1307
+ String(pr.number),
1308
+ "-R",
1309
+ repo,
1310
+ "--json",
1311
+ "number,state,url,reviewDecision,reviews",
1312
+ ]);
1313
+ } catch {
1314
+ // soft fall-through to the in_review row below
1315
+ }
1316
+ if (!prData) {
1317
+ return {
1318
+ issue_number: issue.number,
1319
+ title: issue.title,
1320
+ url: pr.url || issue.url,
1321
+ pr_number: pr.number,
1322
+ status: "in_review",
1323
+ progress: 20,
1324
+ label: `PR #${pr.number} · waiting on review`,
1325
+ };
1326
+ }
1327
+ const merged = prData.state === "MERGED" && issue.state === "CLOSED";
1328
+ if (merged) {
1329
+ return {
1330
+ issue_number: issue.number,
1331
+ title: issue.title,
1332
+ url: prData.url || issue.url,
1333
+ pr_number: prData.number,
1334
+ status: "merged",
1335
+ progress: 100,
1336
+ label: "Merged ✓",
1337
+ };
1338
+ }
1339
+ // Count distinct APPROVED reviews per author so a stale APPROVED
1340
+ // followed by REQUEST_CHANGES doesn't double-count. Sort by
1341
+ // submittedAt ascending first so the Map's "last write wins"
1342
+ // genuinely lands on the freshest review per author — gh's
1343
+ // current ordering is chronological in practice but undocumented,
1344
+ // so the explicit sort keeps us safe if that ever changes.
1345
+ const reviews = Array.isArray(prData.reviews) ? prData.reviews.slice() : [];
1346
+ reviews.sort((a, b) => {
1347
+ const ta = (a && a.submittedAt) ? Date.parse(a.submittedAt) : 0;
1348
+ const tb = (b && b.submittedAt) ? Date.parse(b.submittedAt) : 0;
1349
+ return ta - tb;
1350
+ });
1351
+ const latestByAuthor = new Map();
1352
+ for (const r of reviews) {
1353
+ const author = (r && r.author && r.author.login) || "";
1354
+ if (!author) continue;
1355
+ latestByAuthor.set(author, r.state);
1356
+ }
1357
+ let approvalCount = 0;
1358
+ for (const state of latestByAuthor.values()) {
1359
+ if (state === "APPROVED") approvalCount++;
1360
+ }
1361
+ if (approvalCount >= 2) {
1362
+ return {
1363
+ issue_number: issue.number,
1364
+ title: issue.title,
1365
+ url: prData.url || issue.url,
1366
+ pr_number: prData.number,
1367
+ status: "ready",
1368
+ progress: 80,
1369
+ label: `PR #${prData.number} · 2 approvals · ready`,
1370
+ };
1371
+ }
1372
+ if (approvalCount === 1) {
1373
+ return {
1374
+ issue_number: issue.number,
1375
+ title: issue.title,
1376
+ url: prData.url || issue.url,
1377
+ pr_number: prData.number,
1378
+ status: "approved1",
1379
+ progress: 50,
1380
+ label: `PR #${prData.number} · 1 approval`,
1381
+ };
1382
+ }
1383
+ return {
1384
+ issue_number: issue.number,
1385
+ title: issue.title,
1386
+ url: prData.url || issue.url,
1387
+ pr_number: prData.number,
1388
+ status: "in_review",
1389
+ progress: 20,
1390
+ label: `PR #${prData.number} · waiting on review`,
1391
+ };
1392
+ }
1393
+
1394
+ function summarizeItems(items) {
1395
+ let merged = 0, ready = 0, approved1 = 0, inReview = 0, queued = 0;
1396
+ for (const it of items) {
1397
+ if (it.status === "merged") merged++;
1398
+ else if (it.status === "ready") ready++;
1399
+ else if (it.status === "approved1") approved1++;
1400
+ else if (it.status === "in_review") inReview++;
1401
+ else if (it.status === "queued") queued++;
1402
+ }
1403
+ const parts = [`${merged}/${items.length} merged`];
1404
+ if (ready > 0) parts.push(`${ready} ready to merge`);
1405
+ if (approved1 > 0) parts.push(`${approved1} needs 2nd approval`);
1406
+ if (inReview > 0) parts.push(`${inReview} in review`);
1407
+ if (queued > 0) parts.push(`${queued} queued`);
1408
+ return parts.join(" · ");
1409
+ }
1410
+
1411
+ router.get("/api/batch-progress", async (req, res) => {
1412
+ const projectId = req.query.project;
1413
+ if (!projectId) return res.status(400).json({ error: "Missing project" });
1414
+
1415
+ const cached = _batchProgressCache.get(projectId);
1416
+ if (cached && Date.now() - cached.ts < BATCH_PROGRESS_TTL_MS) {
1417
+ return res.json(cached.data);
1418
+ }
1419
+
1420
+ const repo = getRepo(projectId);
1421
+ if (!repo) return res.status(400).json({ error: "No repo configured for project" });
1422
+
1423
+ const queuePath = path.join(CONFIG_DIR, projectId, "OVERNIGHT-QUEUE.md");
1424
+ let queueText = "";
1425
+ let queueReadOk = false;
1426
+ try {
1427
+ queueText = fs.readFileSync(queuePath, "utf-8");
1428
+ queueReadOk = true;
1429
+ } catch {
1430
+ // Missing / unreadable file — pass queueReadOk=false so the
1431
+ // resolver bypasses the snapshot and returns the empty state
1432
+ // per #316's edge case.
1433
+ }
1434
+
1435
+ // #429 / quadwork#316: resolve the displayed batch through the
1436
+ // snapshot-aware helper so merged items stay visible after Head
1437
+ // moves them from Active Batch to Done, until a new batch starts.
1438
+ const { batchNumber, issueNumbers } = resolveDisplayedBatch(queueText, projectId, { queueReadOk });
1439
+ if (issueNumbers.length === 0) {
1440
+ const data = { batch_number: batchNumber, items: [], summary: "", complete: false };
1441
+ _batchProgressCache.set(projectId, { ts: Date.now(), data });
1442
+ return res.json(data);
1443
+ }
1444
+
1445
+ // #416 / quadwork#299: parallelize the per-item gh fetches.
1446
+ // Sequential execFileSync was costing ~10s on a cold cache for a
1447
+ // 5-item batch (2 gh calls per item, ~1s each); Promise.allSettled
1448
+ // over progressForItemAsync drops that to roughly the time of the
1449
+ // slowest single item-pair (~2s). One failed item resolves with a
1450
+ // synthetic "unknown" row instead of failing the whole response.
1451
+ const settled = await Promise.allSettled(
1452
+ issueNumbers.map((n) => progressForItemAsync(repo, n)),
1453
+ );
1454
+ const items = settled.map((r, i) => {
1455
+ if (r.status === "fulfilled") return r.value;
1456
+ return {
1457
+ issue_number: issueNumbers[i],
1458
+ title: `#${issueNumbers[i]} (fetch failed)`,
1459
+ url: null,
1460
+ status: "unknown",
1461
+ progress: 0,
1462
+ label: "fetch failed",
1463
+ };
1464
+ });
1465
+ const summary = summarizeItems(items);
1466
+ const complete = items.length > 0 && items.every((it) => it.status === "merged");
1467
+ const data = { batch_number: batchNumber, items, summary, complete };
1468
+ _batchProgressCache.set(projectId, { ts: Date.now(), data });
1469
+ res.json(data);
1470
+ });
1471
+
381
1472
  // ─── Memory ────────────────────────────────────────────────────────────────
382
1473
 
383
1474
  function getProject(projectId) {
@@ -830,6 +1921,11 @@ router.post("/api/setup", (req, res) => {
830
1921
  const wtDir = path.join(parentDir, `${dirName}-${agent}`);
831
1922
  content += `[agents.${agent}]\ncommand = "${(backends && backends[agent]) || "claude"}"\ncwd = "${wtDir}"\ncolor = "${colors[i]}"\nlabel = "${agent.charAt(0).toUpperCase() + agent.slice(1)} ${labels[i]}"\nmcp_inject = "flag"\n\n`;
832
1923
  });
1924
+ // #403 / quadwork#274: raise the loop guard from AC's default
1925
+ // of 4 to 30 so autonomous PR review cycles (head→dev→re1+re2→
1926
+ // dev→head, ~5 hops) don't fire mid-batch and force the
1927
+ // operator to type /continue. AC clamps to [1, 50] internally.
1928
+ content += `[routing]\ndefault = "none"\nmax_agent_hops = 30\n\n`;
833
1929
  content += `[mcp]\nhttp_port = ${mcp_http}\nsse_port = ${mcp_sse}\n`;
834
1930
  fs.writeFileSync(tomlPath, content);
835
1931