quadwork 1.2.5 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/quadwork.js +102 -0
- package/out/404.html +1 -1
- package/out/__next.__PAGE__.txt +1 -1
- package/out/__next._full.txt +2 -2
- package/out/__next._head.txt +1 -1
- package/out/__next._index.txt +2 -2
- package/out/__next._tree.txt +2 -2
- package/out/_next/static/chunks/06mbme.sc_26-.css +2 -0
- package/out/_next/static/chunks/0omuxbg.tg-il.js +1 -0
- package/out/_next/static/chunks/{0-yus965h3bk_.js → 0swlbn4q4u71z.js} +15 -13
- package/out/_next/static/chunks/{16ell.n1p8o7d.js → 134b1p_egmf1c.js} +1 -1
- package/out/_not-found/__next._full.txt +2 -2
- package/out/_not-found/__next._head.txt +1 -1
- package/out/_not-found/__next._index.txt +2 -2
- package/out/_not-found/__next._not-found.__PAGE__.txt +1 -1
- package/out/_not-found/__next._not-found.txt +1 -1
- package/out/_not-found/__next._tree.txt +2 -2
- package/out/_not-found.html +1 -1
- package/out/_not-found.txt +2 -2
- package/out/app-shell/__next._full.txt +2 -2
- package/out/app-shell/__next._head.txt +1 -1
- package/out/app-shell/__next._index.txt +2 -2
- package/out/app-shell/__next._tree.txt +2 -2
- package/out/app-shell/__next.app-shell.__PAGE__.txt +1 -1
- package/out/app-shell/__next.app-shell.txt +1 -1
- package/out/app-shell.html +1 -1
- package/out/app-shell.txt +2 -2
- package/out/index.html +1 -1
- package/out/index.txt +2 -2
- package/out/project/_/__next._full.txt +3 -3
- package/out/project/_/__next._head.txt +1 -1
- package/out/project/_/__next._index.txt +2 -2
- package/out/project/_/__next._tree.txt +2 -2
- package/out/project/_/__next.project.$d$id.__PAGE__.txt +2 -2
- package/out/project/_/__next.project.$d$id.txt +1 -1
- package/out/project/_/__next.project.txt +1 -1
- package/out/project/_/memory/__next._full.txt +2 -2
- package/out/project/_/memory/__next._head.txt +1 -1
- package/out/project/_/memory/__next._index.txt +2 -2
- package/out/project/_/memory/__next._tree.txt +2 -2
- package/out/project/_/memory/__next.project.$d$id.memory.__PAGE__.txt +1 -1
- package/out/project/_/memory/__next.project.$d$id.memory.txt +1 -1
- package/out/project/_/memory/__next.project.$d$id.txt +1 -1
- package/out/project/_/memory/__next.project.txt +1 -1
- package/out/project/_/memory.html +1 -1
- package/out/project/_/memory.txt +2 -2
- package/out/project/_/queue/__next._full.txt +2 -2
- package/out/project/_/queue/__next._head.txt +1 -1
- package/out/project/_/queue/__next._index.txt +2 -2
- package/out/project/_/queue/__next._tree.txt +2 -2
- package/out/project/_/queue/__next.project.$d$id.queue.__PAGE__.txt +1 -1
- package/out/project/_/queue/__next.project.$d$id.queue.txt +1 -1
- package/out/project/_/queue/__next.project.$d$id.txt +1 -1
- package/out/project/_/queue/__next.project.txt +1 -1
- package/out/project/_/queue.html +1 -1
- package/out/project/_/queue.txt +2 -2
- package/out/project/_.html +1 -1
- package/out/project/_.txt +3 -3
- package/out/settings/__next._full.txt +3 -3
- package/out/settings/__next._head.txt +1 -1
- package/out/settings/__next._index.txt +2 -2
- package/out/settings/__next._tree.txt +2 -2
- package/out/settings/__next.settings.__PAGE__.txt +2 -2
- package/out/settings/__next.settings.txt +1 -1
- package/out/settings.html +1 -1
- package/out/settings.txt +3 -3
- package/out/setup/__next._full.txt +2 -2
- package/out/setup/__next._head.txt +1 -1
- package/out/setup/__next._index.txt +2 -2
- package/out/setup/__next._tree.txt +2 -2
- package/out/setup/__next.setup.__PAGE__.txt +1 -1
- package/out/setup/__next.setup.txt +1 -1
- package/out/setup.html +1 -1
- package/out/setup.txt +2 -2
- package/out/sounds/alert-tone.mp3 +0 -0
- package/out/sounds/click.mp3 +0 -0
- package/out/sounds/pluck.mp3 +0 -0
- package/out/sounds/soft-chime.mp3 +0 -0
- package/out/sounds/warm-bell.mp3 +0 -0
- package/package.json +1 -1
- package/server/config.js +43 -1
- package/server/index.js +20 -9
- package/server/routes.js +749 -7
- package/templates/CLAUDE.md +1 -0
- package/templates/OVERNIGHT-QUEUE.md +1 -1
- package/templates/seeds/head.AGENTS.md +21 -2
- package/out/_next/static/chunks/0d.f~y5jeh785.css +0 -2
- package/out/_next/static/chunks/0spbjcw4anq15.js +0 -1
- /package/out/_next/static/{Cpy01wZHv0vXd_j_HlrSf → na3L7KeOGKGsbamYVibRj}/_buildManifest.js +0 -0
- /package/out/_next/static/{Cpy01wZHv0vXd_j_HlrSf → na3L7KeOGKGsbamYVibRj}/_clientMiddlewareManifest.js +0 -0
- /package/out/_next/static/{Cpy01wZHv0vXd_j_HlrSf → na3L7KeOGKGsbamYVibRj}/_ssgManifest.js +0 -0
package/server/routes.js
CHANGED
|
@@ -43,7 +43,17 @@ function writeConfigFile(cfg) {
|
|
|
43
43
|
router.get("/api/config", (_req, res) => {
|
|
44
44
|
try {
|
|
45
45
|
const raw = fs.readFileSync(CONFIG_PATH, "utf-8");
|
|
46
|
-
|
|
46
|
+
const parsed = JSON.parse(raw);
|
|
47
|
+
// #409 / quadwork#273: overlay the sanitized operator_name so
|
|
48
|
+
// the chat panel's self-message filter compares against the same
|
|
49
|
+
// sender /api/chat actually stamps. The on-disk file keeps the
|
|
50
|
+
// raw value the operator typed (so a future feature can show
|
|
51
|
+
// both raw + effective), but every reader sees the effective
|
|
52
|
+
// value here — including SettingsPage, which now reflects what
|
|
53
|
+
// chat actually sends. This also makes a hand-edited file with
|
|
54
|
+
// garbage characters self-correct visibly on next reload.
|
|
55
|
+
parsed.operator_name = sanitizeOperatorName(parsed.operator_name);
|
|
56
|
+
res.json(parsed);
|
|
47
57
|
} catch (err) {
|
|
48
58
|
if (err.code === "ENOENT") return res.json(DEFAULT_CONFIG);
|
|
49
59
|
res.status(500).json({ error: "Failed to read config", detail: err.message });
|
|
@@ -68,7 +78,7 @@ router.put("/api/config", (req, res) => {
|
|
|
68
78
|
|
|
69
79
|
// ─── Chat (AgentChattr proxy) ──────────────────────────────────────────────
|
|
70
80
|
|
|
71
|
-
const { resolveProjectChattr } = require("./config");
|
|
81
|
+
const { resolveProjectChattr, sanitizeOperatorName } = require("./config");
|
|
72
82
|
const { installAgentChattr, findAgentChattr } = require("./install-agentchattr");
|
|
73
83
|
|
|
74
84
|
/**
|
|
@@ -174,6 +184,354 @@ function sendViaWebSocket(baseUrl, sessionToken, message) {
|
|
|
174
184
|
});
|
|
175
185
|
}
|
|
176
186
|
|
|
187
|
+
/**
|
|
188
|
+
* #403 / quadwork#274: send an arbitrary AC ws event (not a chat
|
|
189
|
+
* message). Used for `update_settings` so the loop guard widget can
|
|
190
|
+
* push the new max_agent_hops to the running AgentChattr without a
|
|
191
|
+
* full restart. Mirrors sendViaWebSocket but lets the caller pick
|
|
192
|
+
* the event type.
|
|
193
|
+
*/
|
|
194
|
+
function sendWsEvent(baseUrl, sessionToken, event) {
|
|
195
|
+
return new Promise((resolve, reject) => {
|
|
196
|
+
const wsUrl = `${baseUrl.replace(/^http/, "ws")}/ws?token=${encodeURIComponent(sessionToken || "")}`;
|
|
197
|
+
const ws = new NodeWebSocket(wsUrl);
|
|
198
|
+
let settled = false;
|
|
199
|
+
const finish = (err, value) => {
|
|
200
|
+
if (settled) return;
|
|
201
|
+
settled = true;
|
|
202
|
+
try { ws.close(); } catch {}
|
|
203
|
+
if (err) reject(err); else resolve(value);
|
|
204
|
+
};
|
|
205
|
+
const giveUp = setTimeout(() => finish(new Error("websocket send timeout")), 4000);
|
|
206
|
+
ws.on("open", () => {
|
|
207
|
+
try {
|
|
208
|
+
ws.send(JSON.stringify(event));
|
|
209
|
+
setTimeout(() => { clearTimeout(giveUp); finish(null, { ok: true }); }, 250);
|
|
210
|
+
} catch (err) { clearTimeout(giveUp); finish(err); }
|
|
211
|
+
});
|
|
212
|
+
ws.on("error", (err) => { clearTimeout(giveUp); finish(err); });
|
|
213
|
+
ws.on("close", (code, reason) => {
|
|
214
|
+
if (!settled && code === 4003) {
|
|
215
|
+
clearTimeout(giveUp);
|
|
216
|
+
const msg = (reason && reason.toString()) || "forbidden: invalid session token";
|
|
217
|
+
const e = new Error(msg);
|
|
218
|
+
e.code = "EAGENTCHATTR_401";
|
|
219
|
+
finish(e);
|
|
220
|
+
}
|
|
221
|
+
});
|
|
222
|
+
});
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
// #403 / quadwork#274: read/write the loop guard for a given project.
|
|
226
|
+
// Source of truth at rest is the project's config.toml [routing]
|
|
227
|
+
// max_agent_hops. The PUT also pushes the value to the running AC via
|
|
228
|
+
// `update_settings` so the change is live without a daemon restart.
|
|
229
|
+
// Resolve the per-project config.toml path through resolveProjectChattr
|
|
230
|
+
// so we honor `project.agentchattr_dir` (web wizard sets this; legacy
|
|
231
|
+
// imports can have arbitrary paths) and don't drift from the rest of
|
|
232
|
+
// the codebase that already goes through that helper.
|
|
233
|
+
function resolveProjectConfigToml(projectId) {
|
|
234
|
+
const resolved = resolveProjectChattr(projectId);
|
|
235
|
+
if (!resolved || !resolved.dir) return null;
|
|
236
|
+
return path.join(resolved.dir, "config.toml");
|
|
237
|
+
}
|
|
238
|
+
|
|
239
|
+
router.get("/api/loop-guard", (req, res) => {
|
|
240
|
+
const projectId = req.query.project;
|
|
241
|
+
if (!projectId) return res.status(400).json({ error: "Missing project" });
|
|
242
|
+
const tomlPath = resolveProjectConfigToml(projectId);
|
|
243
|
+
if (!tomlPath || !fs.existsSync(tomlPath)) return res.json({ value: 30, source: "default" });
|
|
244
|
+
try {
|
|
245
|
+
const content = fs.readFileSync(tomlPath, "utf-8");
|
|
246
|
+
const m = content.match(/^\s*max_agent_hops\s*=\s*(\d+)/m);
|
|
247
|
+
const value = m ? parseInt(m[1], 10) : 30;
|
|
248
|
+
res.json({ value, source: m ? "toml" : "default" });
|
|
249
|
+
} catch (err) {
|
|
250
|
+
res.status(500).json({ error: "Failed to read config.toml", detail: err.message });
|
|
251
|
+
}
|
|
252
|
+
});
|
|
253
|
+
|
|
254
|
+
router.put("/api/loop-guard", async (req, res) => {
|
|
255
|
+
const projectId = req.query.project || req.body?.project;
|
|
256
|
+
if (!projectId) return res.status(400).json({ error: "Missing project" });
|
|
257
|
+
const raw = req.body?.value;
|
|
258
|
+
const value = typeof raw === "number" ? raw : parseInt(raw, 10);
|
|
259
|
+
// AC's update_settings handler clamps to [1, 50]; mirror that
|
|
260
|
+
// here so we don't write a value AC will silently rewrite.
|
|
261
|
+
if (!Number.isInteger(value) || value < 4 || value > 50) {
|
|
262
|
+
return res.status(400).json({ error: "value must be an integer between 4 and 50" });
|
|
263
|
+
}
|
|
264
|
+
|
|
265
|
+
// 1. Persist to config.toml so the next restart picks it up.
|
|
266
|
+
const tomlPath = resolveProjectConfigToml(projectId);
|
|
267
|
+
if (!tomlPath || !fs.existsSync(tomlPath)) {
|
|
268
|
+
return res.status(404).json({ error: "config.toml not found for project" });
|
|
269
|
+
}
|
|
270
|
+
try {
|
|
271
|
+
let content = fs.readFileSync(tomlPath, "utf-8");
|
|
272
|
+
if (/^\s*max_agent_hops\s*=/m.test(content)) {
|
|
273
|
+
content = content.replace(/^\s*max_agent_hops\s*=.*$/m, `max_agent_hops = ${value}`);
|
|
274
|
+
} else if (/^\s*\[routing\]/m.test(content)) {
|
|
275
|
+
// Section exists but the key doesn't — append the key on the
|
|
276
|
+
// line right after the [routing] header to keep it scoped.
|
|
277
|
+
content = content.replace(/^(\s*\[routing\]\s*\n)/m, `$1max_agent_hops = ${value}\n`);
|
|
278
|
+
} else {
|
|
279
|
+
const trailing = content.endsWith("\n") ? "" : "\n";
|
|
280
|
+
content += `${trailing}\n[routing]\ndefault = "none"\nmax_agent_hops = ${value}\n`;
|
|
281
|
+
}
|
|
282
|
+
fs.writeFileSync(tomlPath, content);
|
|
283
|
+
} catch (err) {
|
|
284
|
+
return res.status(500).json({ error: "Failed to write config.toml", detail: err.message });
|
|
285
|
+
}
|
|
286
|
+
|
|
287
|
+
// 2. Best-effort push to the running AC so the change is live.
|
|
288
|
+
// On stale-token (4003 → EAGENTCHATTR_401) recover the same way
|
|
289
|
+
// /api/chat does (#230): re-sync the session token from AC and
|
|
290
|
+
// retry once. Other failures stay non-fatal — the persisted value
|
|
291
|
+
// still takes effect on next AC restart.
|
|
292
|
+
let live = false;
|
|
293
|
+
try {
|
|
294
|
+
const { url: base, token: sessionToken } = getChattrConfig(projectId);
|
|
295
|
+
if (base) {
|
|
296
|
+
const event = { type: "update_settings", data: { max_agent_hops: value } };
|
|
297
|
+
try {
|
|
298
|
+
await sendWsEvent(base, sessionToken, event);
|
|
299
|
+
live = true;
|
|
300
|
+
} catch (err) {
|
|
301
|
+
if (err && err.code === "EAGENTCHATTR_401") {
|
|
302
|
+
console.warn(`[loop-guard] ws auth failed for ${projectId}, re-syncing session token and retrying...`);
|
|
303
|
+
try { await syncChattrToken(projectId); }
|
|
304
|
+
catch (syncErr) { console.warn(`[loop-guard] syncChattrToken failed: ${syncErr.message}`); }
|
|
305
|
+
const { token: refreshed } = getChattrConfig(projectId);
|
|
306
|
+
if (refreshed && refreshed !== sessionToken) {
|
|
307
|
+
try {
|
|
308
|
+
await sendWsEvent(base, refreshed, event);
|
|
309
|
+
live = true;
|
|
310
|
+
} catch (retryErr) {
|
|
311
|
+
console.warn(`[loop-guard] retry after token resync failed: ${retryErr.message || retryErr}`);
|
|
312
|
+
}
|
|
313
|
+
}
|
|
314
|
+
} else {
|
|
315
|
+
throw err;
|
|
316
|
+
}
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
} catch (err) {
|
|
320
|
+
console.warn(`[loop-guard] live update failed for ${projectId}: ${err.message || err}`);
|
|
321
|
+
}
|
|
322
|
+
|
|
323
|
+
res.json({ ok: true, value, live });
|
|
324
|
+
});
|
|
325
|
+
|
|
326
|
+
// #412 / quadwork#279: project history export + import.
|
|
327
|
+
//
|
|
328
|
+
// Export proxies AC's /api/messages for the project channel and
|
|
329
|
+
// wraps the array in a small metadata envelope so future imports
|
|
330
|
+
// can warn on project-id mismatch and so a future schema bump can
|
|
331
|
+
// be detected client-side.
|
|
332
|
+
//
|
|
333
|
+
// Import accepts the same envelope, validates the shape + size,
|
|
334
|
+
// and replays each message back into the project's AgentChattr
|
|
335
|
+
// instance via sendViaWebSocket — preserving the original sender
|
|
336
|
+
// field for cross-tool consistency. Originals' message IDs are NOT
|
|
337
|
+
// preserved (AC re-assigns on insert), which is a known v1 limit
|
|
338
|
+
// and matches the issue's "AgentChattr will tell us" note.
|
|
339
|
+
|
|
340
|
+
const PROJECT_HISTORY_VERSION = 1;
|
|
341
|
+
const PROJECT_HISTORY_MAX_BYTES = 10 * 1024 * 1024; // 10 MB cap per issue
|
|
342
|
+
const PROJECT_HISTORY_REPLAY_DELAY_MS = 25; // pace AC ws inserts
|
|
343
|
+
|
|
344
|
+
// #414 / quadwork#297: reject imports whose messages claim a
|
|
345
|
+
// reserved agent / system sender by default. This closes the only
|
|
346
|
+
// path in QuadWork that lets a client-supplied sender reach AC
|
|
347
|
+
// (every other route hardcodes / sanitizes to operator). Mirrors
|
|
348
|
+
// the RESERVED_OPERATOR_NAMES denylist from sanitizeOperatorName so
|
|
349
|
+
// the same identities are blocked across the codebase.
|
|
350
|
+
const RESERVED_HISTORY_SENDERS = new Set([
|
|
351
|
+
"head",
|
|
352
|
+
"dev",
|
|
353
|
+
"reviewer1",
|
|
354
|
+
"reviewer2",
|
|
355
|
+
"t1",
|
|
356
|
+
"t2a",
|
|
357
|
+
"t2b",
|
|
358
|
+
"t3",
|
|
359
|
+
"system",
|
|
360
|
+
]);
|
|
361
|
+
|
|
362
|
+
router.get("/api/project-history", async (req, res) => {
|
|
363
|
+
const projectId = req.query.project;
|
|
364
|
+
if (!projectId) return res.status(400).json({ error: "Missing project" });
|
|
365
|
+
const { url: base, token: sessionToken } = getChattrConfig(projectId);
|
|
366
|
+
if (!base) return res.status(400).json({ error: "No AgentChattr configured for project" });
|
|
367
|
+
try {
|
|
368
|
+
// AC's /api/messages accepts a bearer token in the Authorization
|
|
369
|
+
// header; the session token is what the chat panel already uses.
|
|
370
|
+
const target = `${base}/api/messages?channel=general&limit=100000`;
|
|
371
|
+
const r = await fetch(target, {
|
|
372
|
+
headers: sessionToken ? { Authorization: `Bearer ${sessionToken}` } : {},
|
|
373
|
+
// Cap the AC fetch at 30s so a hung daemon doesn't park the
|
|
374
|
+
// export request indefinitely.
|
|
375
|
+
signal: AbortSignal.timeout(30000),
|
|
376
|
+
});
|
|
377
|
+
if (!r.ok) {
|
|
378
|
+
const detail = await r.text().catch(() => "");
|
|
379
|
+
return res.status(502).json({ error: `AgentChattr /api/messages returned ${r.status}`, detail: detail.slice(0, 200) });
|
|
380
|
+
}
|
|
381
|
+
const raw = await r.json();
|
|
382
|
+
// AC returns either a bare array or { messages: [...] } depending
|
|
383
|
+
// on version — handle both.
|
|
384
|
+
const messages = Array.isArray(raw) ? raw : Array.isArray(raw && raw.messages) ? raw.messages : [];
|
|
385
|
+
res.json({
|
|
386
|
+
version: PROJECT_HISTORY_VERSION,
|
|
387
|
+
project_id: projectId,
|
|
388
|
+
exported_at: new Date().toISOString(),
|
|
389
|
+
message_count: messages.length,
|
|
390
|
+
messages,
|
|
391
|
+
});
|
|
392
|
+
} catch (err) {
|
|
393
|
+
res.status(502).json({ error: "Project history export failed", detail: err.message || String(err) });
|
|
394
|
+
}
|
|
395
|
+
});
|
|
396
|
+
|
|
397
|
+
// Global express.json() in server/index.js is bumped to 10mb to
|
|
398
|
+
// cover this route — see the comment there. The route handler still
|
|
399
|
+
// double-checks the byte size of the parsed body below as a defense
|
|
400
|
+
// in depth (e.g. if a future change scopes the global parser back
|
|
401
|
+
// down without updating this comment).
|
|
402
|
+
router.post("/api/project-history", async (req, res) => {
|
|
403
|
+
const projectId = req.query.project || req.body?.project_id;
|
|
404
|
+
if (!projectId) return res.status(400).json({ error: "Missing project" });
|
|
405
|
+
|
|
406
|
+
const body = req.body;
|
|
407
|
+
if (!body || typeof body !== "object") {
|
|
408
|
+
return res.status(400).json({ error: "Invalid JSON body" });
|
|
409
|
+
}
|
|
410
|
+
// Body size guard — express.json() respects its own limit too,
|
|
411
|
+
// but stamp the explicit cap from the issue here so the error
|
|
412
|
+
// message is operator-readable.
|
|
413
|
+
try {
|
|
414
|
+
const approxBytes = Buffer.byteLength(JSON.stringify(body));
|
|
415
|
+
if (approxBytes > PROJECT_HISTORY_MAX_BYTES) {
|
|
416
|
+
return res.status(413).json({ error: `History file too large (${approxBytes} bytes; limit ${PROJECT_HISTORY_MAX_BYTES})` });
|
|
417
|
+
}
|
|
418
|
+
} catch {
|
|
419
|
+
// JSON.stringify circular — already invalid, fall through
|
|
420
|
+
}
|
|
421
|
+
|
|
422
|
+
if (!Array.isArray(body.messages)) {
|
|
423
|
+
return res.status(400).json({ error: "Missing or invalid 'messages' array" });
|
|
424
|
+
}
|
|
425
|
+
if (body.version && body.version !== PROJECT_HISTORY_VERSION) {
|
|
426
|
+
return res.status(400).json({ error: `Unsupported export version ${body.version} (expected ${PROJECT_HISTORY_VERSION})` });
|
|
427
|
+
}
|
|
428
|
+
// Soft project-id mismatch warning. The client UI should confirm
|
|
429
|
+
// before POSTing when the IDs differ; if it didn't (e.g. curl),
|
|
430
|
+
// require an explicit override flag so we can't silently merge
|
|
431
|
+
// foreign chat into the wrong project.
|
|
432
|
+
if (body.project_id && body.project_id !== projectId && body.allow_project_mismatch !== true) {
|
|
433
|
+
return res.status(409).json({
|
|
434
|
+
error: `Project mismatch: file is from '${body.project_id}', target is '${projectId}'. Resend with allow_project_mismatch=true to override.`,
|
|
435
|
+
});
|
|
436
|
+
}
|
|
437
|
+
|
|
438
|
+
// #414 / quadwork#297 — Issue 1: agent/system sender denylist.
|
|
439
|
+
// Pre-scan the messages array; if any line claims a reserved
|
|
440
|
+
// identity, reject the entire import unless the operator opted
|
|
441
|
+
// in via allow_agent_senders=true. Default-safe so a leaked or
|
|
442
|
+
// crafted export file can't post as Head from the dashboard.
|
|
443
|
+
if (body.allow_agent_senders !== true) {
|
|
444
|
+
const offenders = new Set();
|
|
445
|
+
for (const m of body.messages) {
|
|
446
|
+
if (m && typeof m === "object" && typeof m.sender === "string") {
|
|
447
|
+
if (RESERVED_HISTORY_SENDERS.has(m.sender.toLowerCase())) {
|
|
448
|
+
offenders.add(m.sender);
|
|
449
|
+
if (offenders.size >= 5) break;
|
|
450
|
+
}
|
|
451
|
+
}
|
|
452
|
+
}
|
|
453
|
+
if (offenders.size > 0) {
|
|
454
|
+
return res.status(400).json({
|
|
455
|
+
error: `Import contains messages attributed to reserved agent/system identities: ${[...offenders].join(", ")}. Resend with allow_agent_senders=true to override (e.g. legitimate disaster-recovery restore).`,
|
|
456
|
+
});
|
|
457
|
+
}
|
|
458
|
+
}
|
|
459
|
+
|
|
460
|
+
// #414 / quadwork#297 — Issue 2: duplicate import detection.
|
|
461
|
+
// Persist the most recent imported `exported_at` on the project
|
|
462
|
+
// entry in config.json. If the file's marker matches, refuse the
|
|
463
|
+
// import unless allow_duplicate=true. Re-importing the same file
|
|
464
|
+
// would otherwise replay every message a second time and double
|
|
465
|
+
// the chat history.
|
|
466
|
+
const cfg = readConfigFile();
|
|
467
|
+
const project = cfg.projects?.find((p) => p.id === projectId);
|
|
468
|
+
const incomingExportedAt = typeof body.exported_at === "string" ? body.exported_at : null;
|
|
469
|
+
if (body.allow_duplicate !== true && project && incomingExportedAt) {
|
|
470
|
+
if (project.history_last_imported_at === incomingExportedAt) {
|
|
471
|
+
return res.status(409).json({
|
|
472
|
+
error: `This export was already imported (exported_at=${incomingExportedAt}). Resend with allow_duplicate=true to import again.`,
|
|
473
|
+
});
|
|
474
|
+
}
|
|
475
|
+
}
|
|
476
|
+
|
|
477
|
+
const { url: base, token: sessionToken } = getChattrConfig(projectId);
|
|
478
|
+
if (!base) return res.status(400).json({ error: "No AgentChattr configured for project" });
|
|
479
|
+
|
|
480
|
+
// Replay each message via the existing ws send helper. Preserve
|
|
481
|
+
// the original sender so the imported transcript still attributes
|
|
482
|
+
// each line correctly. Pace the writes so AC's ws handler isn't
|
|
483
|
+
// overloaded on a multi-thousand-message import.
|
|
484
|
+
//
|
|
485
|
+
// SECURITY NOTE: This deliberately bypasses /api/chat's #230/#288
|
|
486
|
+
// sanitize-as-user lockdown — the imported sender field is sent
|
|
487
|
+
// straight to AC's ws, so a crafted import file CAN post as
|
|
488
|
+
// `head` / `dev` / etc. That's intentional: imports must round-
|
|
489
|
+
// trip the original attribution to be useful (otherwise every
|
|
490
|
+
// restored message would say `user` and the transcript would be
|
|
491
|
+
// worthless). The trade-off is acceptable because the only entry
|
|
492
|
+
// point is an authenticated dashboard operator picking a file by
|
|
493
|
+
// hand and clicking through the project-mismatch confirm. Don't
|
|
494
|
+
// expose this route from a less-trusted surface without revisiting.
|
|
495
|
+
let imported = 0;
|
|
496
|
+
let skipped = 0;
|
|
497
|
+
const errors = [];
|
|
498
|
+
for (const m of body.messages) {
|
|
499
|
+
if (!m || typeof m !== "object" || typeof m.text !== "string" || !m.text) {
|
|
500
|
+
skipped++;
|
|
501
|
+
continue;
|
|
502
|
+
}
|
|
503
|
+
const msg = {
|
|
504
|
+
text: m.text,
|
|
505
|
+
channel: typeof m.channel === "string" && m.channel ? m.channel : "general",
|
|
506
|
+
sender: typeof m.sender === "string" && m.sender ? m.sender : "user",
|
|
507
|
+
};
|
|
508
|
+
try {
|
|
509
|
+
await sendViaWebSocket(base, sessionToken, msg);
|
|
510
|
+
imported++;
|
|
511
|
+
} catch (err) {
|
|
512
|
+
errors.push(`#${m.id ?? "?"}: ${err.message || String(err)}`);
|
|
513
|
+
// Stop on the first error to avoid spamming AC if its ws is down.
|
|
514
|
+
if (errors.length > 5) break;
|
|
515
|
+
}
|
|
516
|
+
// Tiny delay between sends — AC's ws handler can keep up but
|
|
517
|
+
// 10k messages back-to-back hit the recv buffer hard.
|
|
518
|
+
if (PROJECT_HISTORY_REPLAY_DELAY_MS > 0) {
|
|
519
|
+
await new Promise((r) => setTimeout(r, PROJECT_HISTORY_REPLAY_DELAY_MS));
|
|
520
|
+
}
|
|
521
|
+
}
|
|
522
|
+
// #414 / quadwork#297 — Issue 2: stamp the import marker on the
|
|
523
|
+
// project so a re-import of the same file is caught next time.
|
|
524
|
+
// Only update on a successful (no errors) replay so a half-broken
|
|
525
|
+
// import can be retried without the duplicate guard tripping.
|
|
526
|
+
if (incomingExportedAt && errors.length === 0 && project) {
|
|
527
|
+
project.history_last_imported_at = incomingExportedAt;
|
|
528
|
+
try { writeConfigFile(cfg); }
|
|
529
|
+
catch (err) { console.warn(`[history] failed to persist history_last_imported_at: ${err.message || err}`); }
|
|
530
|
+
}
|
|
531
|
+
|
|
532
|
+
res.json({ ok: errors.length === 0, imported, skipped, total: body.messages.length, errors });
|
|
533
|
+
});
|
|
534
|
+
|
|
177
535
|
router.post("/api/chat", async (req, res) => {
|
|
178
536
|
const projectId = req.query.project || req.body.project;
|
|
179
537
|
const { url: base, token: sessionToken } = getChattrConfig(projectId);
|
|
@@ -181,15 +539,38 @@ router.post("/api/chat", async (req, res) => {
|
|
|
181
539
|
|
|
182
540
|
// #230: ignore any client-supplied sender. /api/chat is the
|
|
183
541
|
// dashboard's send path, so the message must always be attributed
|
|
184
|
-
// to
|
|
185
|
-
// hitting QuadWork's /api/chat impersonate an agent
|
|
186
|
-
// t3, …) over the AgentChattr ws path, which the
|
|
187
|
-
// flow could not do.
|
|
542
|
+
// to a server-controlled value. Forwarding `req.body.sender` would
|
|
543
|
+
// let any caller hitting QuadWork's /api/chat impersonate an agent
|
|
544
|
+
// identity (t1, t3, …) over the AgentChattr ws path, which the
|
|
545
|
+
// old /api/send flow could not do.
|
|
546
|
+
//
|
|
547
|
+
// #405 / quadwork#278: read the operator's display name from the
|
|
548
|
+
// server-side config file rather than hardcoding "user". The
|
|
549
|
+
// sanitizer matches AC's registry name validator (1–32 alnum +
|
|
550
|
+
// dash + underscore) so even a hand-edited config can't post a
|
|
551
|
+
// value AC will reject (or impersonate an agent), and an empty /
|
|
552
|
+
// missing value falls back to "user".
|
|
553
|
+
let operatorSender = "user";
|
|
554
|
+
try {
|
|
555
|
+
const cfg = readConfigFile();
|
|
556
|
+
operatorSender = sanitizeOperatorName(cfg.operator_name);
|
|
557
|
+
} catch {
|
|
558
|
+
// non-fatal — fall through to "user"
|
|
559
|
+
}
|
|
560
|
+
// #397 / quadwork#262: pass reply_to through to AgentChattr so the
|
|
561
|
+
// dashboard's reply button mirrors AC's native threaded-reply
|
|
562
|
+
// behavior. Only forward when it's a real positive integer — guards
|
|
563
|
+
// against arbitrary client payloads.
|
|
564
|
+
const replyToRaw = req.body?.reply_to;
|
|
565
|
+
const replyTo = (typeof replyToRaw === "number" && Number.isInteger(replyToRaw) && replyToRaw > 0)
|
|
566
|
+
? replyToRaw
|
|
567
|
+
: null;
|
|
188
568
|
const message = {
|
|
189
569
|
text: typeof req.body?.text === "string" ? req.body.text : "",
|
|
190
570
|
channel: req.body?.channel || "general",
|
|
191
|
-
sender:
|
|
571
|
+
sender: operatorSender,
|
|
192
572
|
attachments: Array.isArray(req.body?.attachments) ? req.body.attachments : [],
|
|
573
|
+
...(replyTo !== null ? { reply_to: replyTo } : {}),
|
|
193
574
|
};
|
|
194
575
|
if (!message.text && message.attachments.length === 0) {
|
|
195
576
|
return res.status(400).json({ error: "text or attachments required" });
|
|
@@ -378,6 +759,362 @@ router.get("/api/github/prs", (req, res) => {
|
|
|
378
759
|
}
|
|
379
760
|
});
|
|
380
761
|
|
|
762
|
+
// #411 / quadwork#281: recently closed issues + merged PRs for the
|
|
763
|
+
// "Recently closed" / "Recently merged" sub-sections under each
|
|
764
|
+
// list in GitHubPanel. Limit 5 items each, ordered by closedAt
|
|
765
|
+
// descending so the freshest activity sits at the top.
|
|
766
|
+
// gh CLI's default ordering for `issue list --state closed` and
|
|
767
|
+
// `pr list --state merged` is createdAt-desc, not closedAt/mergedAt-desc,
|
|
768
|
+
// so a stale-but-recently-closed item can sit below a fresh-but-
|
|
769
|
+
// older one. We pull a wider window and re-sort by close/merge time
|
|
770
|
+
// before truncating to 5 to honor #281's "newest first" requirement.
|
|
771
|
+
const RECENT_FETCH_LIMIT = 20;
|
|
772
|
+
const RECENT_DISPLAY_LIMIT = 5;
|
|
773
|
+
|
|
774
|
+
router.get("/api/github/closed-issues", (req, res) => {
|
|
775
|
+
const repo = getRepo(req.query.project || "");
|
|
776
|
+
if (!repo) return res.status(400).json({ error: "No repo configured for project" });
|
|
777
|
+
try {
|
|
778
|
+
const out = execFileSync(
|
|
779
|
+
"gh",
|
|
780
|
+
["issue", "list", "-R", repo, "--state", "closed", "--json", "number,title,state,url,closedAt", "--limit", String(RECENT_FETCH_LIMIT)],
|
|
781
|
+
{ encoding: "utf-8", timeout: 15000 },
|
|
782
|
+
);
|
|
783
|
+
const items = JSON.parse(out);
|
|
784
|
+
const sorted = Array.isArray(items)
|
|
785
|
+
? items
|
|
786
|
+
.slice()
|
|
787
|
+
.sort((a, b) => {
|
|
788
|
+
const ta = a && a.closedAt ? Date.parse(a.closedAt) : 0;
|
|
789
|
+
const tb = b && b.closedAt ? Date.parse(b.closedAt) : 0;
|
|
790
|
+
return tb - ta;
|
|
791
|
+
})
|
|
792
|
+
.slice(0, RECENT_DISPLAY_LIMIT)
|
|
793
|
+
: items;
|
|
794
|
+
res.json(sorted);
|
|
795
|
+
} catch (err) {
|
|
796
|
+
res.status(502).json({ error: "gh issue list (closed) failed", detail: err.message });
|
|
797
|
+
}
|
|
798
|
+
});
|
|
799
|
+
|
|
800
|
+
router.get("/api/github/merged-prs", (req, res) => {
|
|
801
|
+
const repo = getRepo(req.query.project || "");
|
|
802
|
+
if (!repo) return res.status(400).json({ error: "No repo configured for project" });
|
|
803
|
+
try {
|
|
804
|
+
// gh pr list with `--state merged` filters server-side so we
|
|
805
|
+
// don't have to pull every closed PR and discard the un-merged
|
|
806
|
+
// ones (closed-without-merge). Same fetch-wider-then-sort
|
|
807
|
+
// strategy as closed-issues so the newest merge always wins.
|
|
808
|
+
const out = execFileSync(
|
|
809
|
+
"gh",
|
|
810
|
+
["pr", "list", "-R", repo, "--state", "merged", "--json", "number,title,state,url,mergedAt,author", "--limit", String(RECENT_FETCH_LIMIT)],
|
|
811
|
+
{ encoding: "utf-8", timeout: 15000 },
|
|
812
|
+
);
|
|
813
|
+
const items = JSON.parse(out);
|
|
814
|
+
const sorted = Array.isArray(items)
|
|
815
|
+
? items
|
|
816
|
+
.slice()
|
|
817
|
+
.sort((a, b) => {
|
|
818
|
+
const ta = a && a.mergedAt ? Date.parse(a.mergedAt) : 0;
|
|
819
|
+
const tb = b && b.mergedAt ? Date.parse(b.mergedAt) : 0;
|
|
820
|
+
return tb - ta;
|
|
821
|
+
})
|
|
822
|
+
.slice(0, RECENT_DISPLAY_LIMIT)
|
|
823
|
+
: items;
|
|
824
|
+
res.json(sorted);
|
|
825
|
+
} catch (err) {
|
|
826
|
+
res.status(502).json({ error: "gh pr list (merged) failed", detail: err.message });
|
|
827
|
+
}
|
|
828
|
+
});
|
|
829
|
+
|
|
830
|
+
// #413 / quadwork#282: Current Batch Progress panel.
|
|
831
|
+
//
|
|
832
|
+
// Reads ~/.quadwork/{project}/OVERNIGHT-QUEUE.md, parses the
|
|
833
|
+
// `## Active Batch` section for `Batch: N` + issue numbers, and
|
|
834
|
+
// resolves each issue against GitHub (state + linked PR + review
|
|
835
|
+
// counts) to compute a progress state. The 5 progress buckets are
|
|
836
|
+
// deterministic from issue/PR state — no agent inference.
|
|
837
|
+
//
|
|
838
|
+
// Progress mapping (from upstream issue):
|
|
839
|
+
// queued 0% issue exists, no linked PR
|
|
840
|
+
// in_review 20% PR open, 0 approvals
|
|
841
|
+
// approved1 50% PR open, 1 approval
|
|
842
|
+
// ready 80% PR open, 2+ approvals
|
|
843
|
+
// merged 100% PR merged AND issue closed
|
|
844
|
+
//
|
|
845
|
+
// Cached for 10s per project to avoid hammering gh on every poll.
|
|
846
|
+
|
|
847
|
+
const _batchProgressCache = new Map(); // projectId -> { ts, data }
|
|
848
|
+
const BATCH_PROGRESS_TTL_MS = 10000;
|
|
849
|
+
|
|
850
|
+
function parseActiveBatch(queueText) {
|
|
851
|
+
if (typeof queueText !== "string" || !queueText) {
|
|
852
|
+
return { batchNumber: null, issueNumbers: [] };
|
|
853
|
+
}
|
|
854
|
+
// Pull just the Active Batch section so a stray `#123` in Backlog
|
|
855
|
+
// or Done doesn't leak into the active list.
|
|
856
|
+
const m = queueText.match(/##\s+Active Batch[\s\S]*?(?=\n##\s|$)/i);
|
|
857
|
+
if (!m) return { batchNumber: null, issueNumbers: [] };
|
|
858
|
+
const section = m[0];
|
|
859
|
+
const batchMatch = section.match(/\*\*Batch:\*\*\s*(\d+)/i) || section.match(/Batch:\s*(\d+)/i);
|
|
860
|
+
const batchNumber = batchMatch ? parseInt(batchMatch[1], 10) : null;
|
|
861
|
+
// Only collect issue numbers from lines that look like list-item
|
|
862
|
+
// entries — i.e. lines whose first content token is either `#N`
|
|
863
|
+
// or `[#N]` after an optional list marker. This rejects prose
|
|
864
|
+
// like "Tracking umbrella: #293", "next after #294 merged", and
|
|
865
|
+
// similar dependency / commentary references that t2a flagged on
|
|
866
|
+
// realproject7/dropcast's queue.
|
|
867
|
+
//
|
|
868
|
+
// Accepted line shapes:
|
|
869
|
+
// - #295 sub-A heartbeat
|
|
870
|
+
// * #295 sub-A heartbeat
|
|
871
|
+
// 1. #295 sub-A heartbeat
|
|
872
|
+
// #295 sub-A heartbeat
|
|
873
|
+
// - [#295] sub-A heartbeat
|
|
874
|
+
// [#295] sub-A heartbeat
|
|
875
|
+
//
|
|
876
|
+
// Rejected:
|
|
877
|
+
// Tracking umbrella: #293
|
|
878
|
+
// Assigned next after #294 merged.
|
|
879
|
+
// See #295 for context.
|
|
880
|
+
const ITEM_LINE_RE = /^\s*(?:[-*]\s+|\d+\.\s+)?\[?#(\d{1,6})\]?\b/;
|
|
881
|
+
const seen = new Set();
|
|
882
|
+
const issueNumbers = [];
|
|
883
|
+
for (const line of section.split("\n")) {
|
|
884
|
+
const lineMatch = line.match(ITEM_LINE_RE);
|
|
885
|
+
if (!lineMatch) continue;
|
|
886
|
+
const n = parseInt(lineMatch[1], 10);
|
|
887
|
+
if (!seen.has(n)) {
|
|
888
|
+
seen.add(n);
|
|
889
|
+
issueNumbers.push(n);
|
|
890
|
+
}
|
|
891
|
+
}
|
|
892
|
+
return { batchNumber, issueNumbers };
|
|
893
|
+
}
|
|
894
|
+
|
|
895
|
+
// #416 / quadwork#299: async variant used by the parallelized batch
|
|
896
|
+
// progress fetcher. Wraps node's execFile in a promise.
|
|
897
|
+
//
|
|
898
|
+
// THROWS on subprocess failure (non-zero exit, timeout, JSON parse,
|
|
899
|
+
// network) so progressForItemAsync can decide which subset of
|
|
900
|
+
// failures should bubble up to the Promise.allSettled "fetch failed"
|
|
901
|
+
// row vs. which should fall through to a softer state. The previous
|
|
902
|
+
// catch-all-and-return-null contract collapsed real subprocess
|
|
903
|
+
// errors into the "not found" branch, making the new failure-row
|
|
904
|
+
// fallback unreachable for genuine command failures (t2a review).
|
|
905
|
+
const { execFile: _execFile } = require("child_process");
|
|
906
|
+
const _execFileAsync = require("util").promisify(_execFile);
|
|
907
|
+
async function ghJsonExecAsync(args) {
|
|
908
|
+
const { stdout } = await _execFileAsync("gh", args, { encoding: "utf-8", timeout: 10000 });
|
|
909
|
+
return JSON.parse(stdout);
|
|
910
|
+
}
|
|
911
|
+
|
|
912
|
+
async function progressForItemAsync(repo, issueNumber) {
|
|
913
|
+
// Pull issue state + linked PRs in one call. closedByPullRequestsReferences
|
|
914
|
+
// is gh's serializer for the GraphQL `closedByPullRequestsReferences`
|
|
915
|
+
// edge — only present when a PR with `Fixes #N` / `Closes #N`
|
|
916
|
+
// (or the link UI) targets the issue.
|
|
917
|
+
// Issue fetch is the load-bearing call — if gh can't read the
|
|
918
|
+
// issue at all (404, network, auth, timeout) we can't compute a
|
|
919
|
+
// meaningful progress row. Let the rejection propagate to the
|
|
920
|
+
// route's Promise.allSettled so the operator sees a single
|
|
921
|
+
// "fetch failed" row instead of a misleading "queued" entry.
|
|
922
|
+
const issue = await ghJsonExecAsync([
|
|
923
|
+
"issue",
|
|
924
|
+
"view",
|
|
925
|
+
String(issueNumber),
|
|
926
|
+
"-R",
|
|
927
|
+
repo,
|
|
928
|
+
"--json",
|
|
929
|
+
"number,title,state,url,closedByPullRequestsReferences",
|
|
930
|
+
]);
|
|
931
|
+
const linked = Array.isArray(issue.closedByPullRequestsReferences)
|
|
932
|
+
? issue.closedByPullRequestsReferences
|
|
933
|
+
: [];
|
|
934
|
+
// Pick the freshest linked PR (highest number) if there are multiple.
|
|
935
|
+
const pr = linked.length > 0
|
|
936
|
+
? linked.slice().sort((a, b) => (b.number || 0) - (a.number || 0))[0]
|
|
937
|
+
: null;
|
|
938
|
+
// No linked PR yet — queued.
|
|
939
|
+
if (!pr) {
|
|
940
|
+
return {
|
|
941
|
+
issue_number: issue.number,
|
|
942
|
+
title: issue.title,
|
|
943
|
+
url: issue.url,
|
|
944
|
+
status: "queued",
|
|
945
|
+
progress: 0,
|
|
946
|
+
label: "Issue · queued",
|
|
947
|
+
};
|
|
948
|
+
}
|
|
949
|
+
// Re-fetch the PR to get reviewDecision + reviews + state, since
|
|
950
|
+
// the issue's closedByPullRequestsReferences edge only carries
|
|
951
|
+
// number/state/url. The PR fetch is intentionally soft: if gh
|
|
952
|
+
// glitches on this single call we still know the PR exists (we
|
|
953
|
+
// got the link from the issue) and can render a partial
|
|
954
|
+
// "in_review" row, which is more useful than dropping the whole
|
|
955
|
+
// item to "fetch failed". A persistent failure here will still
|
|
956
|
+
// surface on the next cache miss because the issue fetch above
|
|
957
|
+
// is the load-bearing one that controls the per-item rejection.
|
|
958
|
+
let prData = null;
|
|
959
|
+
try {
|
|
960
|
+
prData = await ghJsonExecAsync([
|
|
961
|
+
"pr",
|
|
962
|
+
"view",
|
|
963
|
+
String(pr.number),
|
|
964
|
+
"-R",
|
|
965
|
+
repo,
|
|
966
|
+
"--json",
|
|
967
|
+
"number,state,url,reviewDecision,reviews",
|
|
968
|
+
]);
|
|
969
|
+
} catch {
|
|
970
|
+
// soft fall-through to the in_review row below
|
|
971
|
+
}
|
|
972
|
+
if (!prData) {
|
|
973
|
+
return {
|
|
974
|
+
issue_number: issue.number,
|
|
975
|
+
title: issue.title,
|
|
976
|
+
url: pr.url || issue.url,
|
|
977
|
+
pr_number: pr.number,
|
|
978
|
+
status: "in_review",
|
|
979
|
+
progress: 20,
|
|
980
|
+
label: `PR #${pr.number} · waiting on review`,
|
|
981
|
+
};
|
|
982
|
+
}
|
|
983
|
+
const merged = prData.state === "MERGED" && issue.state === "CLOSED";
|
|
984
|
+
if (merged) {
|
|
985
|
+
return {
|
|
986
|
+
issue_number: issue.number,
|
|
987
|
+
title: issue.title,
|
|
988
|
+
url: prData.url || issue.url,
|
|
989
|
+
pr_number: prData.number,
|
|
990
|
+
status: "merged",
|
|
991
|
+
progress: 100,
|
|
992
|
+
label: "Merged ✓",
|
|
993
|
+
};
|
|
994
|
+
}
|
|
995
|
+
// Count distinct APPROVED reviews per author so a stale APPROVED
|
|
996
|
+
// followed by REQUEST_CHANGES doesn't double-count. Sort by
|
|
997
|
+
// submittedAt ascending first so the Map's "last write wins"
|
|
998
|
+
// genuinely lands on the freshest review per author — gh's
|
|
999
|
+
// current ordering is chronological in practice but undocumented,
|
|
1000
|
+
// so the explicit sort keeps us safe if that ever changes.
|
|
1001
|
+
const reviews = Array.isArray(prData.reviews) ? prData.reviews.slice() : [];
|
|
1002
|
+
reviews.sort((a, b) => {
|
|
1003
|
+
const ta = (a && a.submittedAt) ? Date.parse(a.submittedAt) : 0;
|
|
1004
|
+
const tb = (b && b.submittedAt) ? Date.parse(b.submittedAt) : 0;
|
|
1005
|
+
return ta - tb;
|
|
1006
|
+
});
|
|
1007
|
+
const latestByAuthor = new Map();
|
|
1008
|
+
for (const r of reviews) {
|
|
1009
|
+
const author = (r && r.author && r.author.login) || "";
|
|
1010
|
+
if (!author) continue;
|
|
1011
|
+
latestByAuthor.set(author, r.state);
|
|
1012
|
+
}
|
|
1013
|
+
let approvalCount = 0;
|
|
1014
|
+
for (const state of latestByAuthor.values()) {
|
|
1015
|
+
if (state === "APPROVED") approvalCount++;
|
|
1016
|
+
}
|
|
1017
|
+
if (approvalCount >= 2) {
|
|
1018
|
+
return {
|
|
1019
|
+
issue_number: issue.number,
|
|
1020
|
+
title: issue.title,
|
|
1021
|
+
url: prData.url || issue.url,
|
|
1022
|
+
pr_number: prData.number,
|
|
1023
|
+
status: "ready",
|
|
1024
|
+
progress: 80,
|
|
1025
|
+
label: `PR #${prData.number} · 2 approvals · ready`,
|
|
1026
|
+
};
|
|
1027
|
+
}
|
|
1028
|
+
if (approvalCount === 1) {
|
|
1029
|
+
return {
|
|
1030
|
+
issue_number: issue.number,
|
|
1031
|
+
title: issue.title,
|
|
1032
|
+
url: prData.url || issue.url,
|
|
1033
|
+
pr_number: prData.number,
|
|
1034
|
+
status: "approved1",
|
|
1035
|
+
progress: 50,
|
|
1036
|
+
label: `PR #${prData.number} · 1 approval`,
|
|
1037
|
+
};
|
|
1038
|
+
}
|
|
1039
|
+
return {
|
|
1040
|
+
issue_number: issue.number,
|
|
1041
|
+
title: issue.title,
|
|
1042
|
+
url: prData.url || issue.url,
|
|
1043
|
+
pr_number: prData.number,
|
|
1044
|
+
status: "in_review",
|
|
1045
|
+
progress: 20,
|
|
1046
|
+
label: `PR #${prData.number} · waiting on review`,
|
|
1047
|
+
};
|
|
1048
|
+
}
|
|
1049
|
+
|
|
1050
|
+
function summarizeItems(items) {
|
|
1051
|
+
let merged = 0, ready = 0, approved1 = 0, inReview = 0, queued = 0;
|
|
1052
|
+
for (const it of items) {
|
|
1053
|
+
if (it.status === "merged") merged++;
|
|
1054
|
+
else if (it.status === "ready") ready++;
|
|
1055
|
+
else if (it.status === "approved1") approved1++;
|
|
1056
|
+
else if (it.status === "in_review") inReview++;
|
|
1057
|
+
else if (it.status === "queued") queued++;
|
|
1058
|
+
}
|
|
1059
|
+
const parts = [`${merged}/${items.length} merged`];
|
|
1060
|
+
if (ready > 0) parts.push(`${ready} ready to merge`);
|
|
1061
|
+
if (approved1 > 0) parts.push(`${approved1} needs 2nd approval`);
|
|
1062
|
+
if (inReview > 0) parts.push(`${inReview} in review`);
|
|
1063
|
+
if (queued > 0) parts.push(`${queued} queued`);
|
|
1064
|
+
return parts.join(" · ");
|
|
1065
|
+
}
|
|
1066
|
+
|
|
1067
|
+
router.get("/api/batch-progress", async (req, res) => {
|
|
1068
|
+
const projectId = req.query.project;
|
|
1069
|
+
if (!projectId) return res.status(400).json({ error: "Missing project" });
|
|
1070
|
+
|
|
1071
|
+
const cached = _batchProgressCache.get(projectId);
|
|
1072
|
+
if (cached && Date.now() - cached.ts < BATCH_PROGRESS_TTL_MS) {
|
|
1073
|
+
return res.json(cached.data);
|
|
1074
|
+
}
|
|
1075
|
+
|
|
1076
|
+
const repo = getRepo(projectId);
|
|
1077
|
+
if (!repo) return res.status(400).json({ error: "No repo configured for project" });
|
|
1078
|
+
|
|
1079
|
+
const queuePath = path.join(CONFIG_DIR, projectId, "OVERNIGHT-QUEUE.md");
|
|
1080
|
+
let queueText = "";
|
|
1081
|
+
try { queueText = fs.readFileSync(queuePath, "utf-8"); }
|
|
1082
|
+
catch { /* missing file → empty active batch */ }
|
|
1083
|
+
|
|
1084
|
+
const { batchNumber, issueNumbers } = parseActiveBatch(queueText);
|
|
1085
|
+
if (issueNumbers.length === 0) {
|
|
1086
|
+
const data = { batch_number: batchNumber, items: [], summary: "", complete: false };
|
|
1087
|
+
_batchProgressCache.set(projectId, { ts: Date.now(), data });
|
|
1088
|
+
return res.json(data);
|
|
1089
|
+
}
|
|
1090
|
+
|
|
1091
|
+
// #416 / quadwork#299: parallelize the per-item gh fetches.
|
|
1092
|
+
// Sequential execFileSync was costing ~10s on a cold cache for a
|
|
1093
|
+
// 5-item batch (2 gh calls per item, ~1s each); Promise.allSettled
|
|
1094
|
+
// over progressForItemAsync drops that to roughly the time of the
|
|
1095
|
+
// slowest single item-pair (~2s). One failed item resolves with a
|
|
1096
|
+
// synthetic "unknown" row instead of failing the whole response.
|
|
1097
|
+
const settled = await Promise.allSettled(
|
|
1098
|
+
issueNumbers.map((n) => progressForItemAsync(repo, n)),
|
|
1099
|
+
);
|
|
1100
|
+
const items = settled.map((r, i) => {
|
|
1101
|
+
if (r.status === "fulfilled") return r.value;
|
|
1102
|
+
return {
|
|
1103
|
+
issue_number: issueNumbers[i],
|
|
1104
|
+
title: `#${issueNumbers[i]} (fetch failed)`,
|
|
1105
|
+
url: null,
|
|
1106
|
+
status: "unknown",
|
|
1107
|
+
progress: 0,
|
|
1108
|
+
label: "fetch failed",
|
|
1109
|
+
};
|
|
1110
|
+
});
|
|
1111
|
+
const summary = summarizeItems(items);
|
|
1112
|
+
const complete = items.length > 0 && items.every((it) => it.status === "merged");
|
|
1113
|
+
const data = { batch_number: batchNumber, items, summary, complete };
|
|
1114
|
+
_batchProgressCache.set(projectId, { ts: Date.now(), data });
|
|
1115
|
+
res.json(data);
|
|
1116
|
+
});
|
|
1117
|
+
|
|
381
1118
|
// ─── Memory ────────────────────────────────────────────────────────────────
|
|
382
1119
|
|
|
383
1120
|
function getProject(projectId) {
|
|
@@ -830,6 +1567,11 @@ router.post("/api/setup", (req, res) => {
|
|
|
830
1567
|
const wtDir = path.join(parentDir, `${dirName}-${agent}`);
|
|
831
1568
|
content += `[agents.${agent}]\ncommand = "${(backends && backends[agent]) || "claude"}"\ncwd = "${wtDir}"\ncolor = "${colors[i]}"\nlabel = "${agent.charAt(0).toUpperCase() + agent.slice(1)} ${labels[i]}"\nmcp_inject = "flag"\n\n`;
|
|
832
1569
|
});
|
|
1570
|
+
// #403 / quadwork#274: raise the loop guard from AC's default
|
|
1571
|
+
// of 4 to 30 so autonomous PR review cycles (head→dev→re1+re2→
|
|
1572
|
+
// dev→head, ~5 hops) don't fire mid-batch and force the
|
|
1573
|
+
// operator to type /continue. AC clamps to [1, 50] internally.
|
|
1574
|
+
content += `[routing]\ndefault = "none"\nmax_agent_hops = 30\n\n`;
|
|
833
1575
|
content += `[mcp]\nhttp_port = ${mcp_http}\nsse_port = ${mcp_sse}\n`;
|
|
834
1576
|
fs.writeFileSync(tomlPath, content);
|
|
835
1577
|
|