quadwork 1.4.0 → 1.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/bin/quadwork.js +82 -0
- package/out/404.html +1 -1
- package/out/__next.__PAGE__.txt +3 -3
- package/out/__next._full.txt +12 -12
- package/out/__next._head.txt +4 -4
- package/out/__next._index.txt +6 -6
- package/out/__next._tree.txt +2 -2
- package/out/_next/static/chunks/{18cmux34jwe.p.js → 0-y13tz~pmpno.js} +1 -1
- package/out/_next/static/chunks/{0zqyw6q.jp~1i.js → 0.9m84as-sc_r.js} +13 -13
- package/out/_next/static/chunks/05.po0c1knrbu.css +2 -0
- package/out/_next/static/chunks/084lff9v4p_vh.js +1 -0
- package/out/_next/static/chunks/0e.ktwt1nyj...js +1 -0
- package/out/_next/static/chunks/{05ok82hwk0x-c.js → 0za4cvk8.n0-y.js} +1 -1
- package/out/_not-found/__next._full.txt +11 -11
- package/out/_not-found/__next._head.txt +4 -4
- package/out/_not-found/__next._index.txt +6 -6
- package/out/_not-found/__next._not-found.__PAGE__.txt +2 -2
- package/out/_not-found/__next._not-found.txt +3 -3
- package/out/_not-found/__next._tree.txt +2 -2
- package/out/_not-found.html +1 -1
- package/out/_not-found.txt +11 -11
- package/out/app-shell/__next._full.txt +11 -11
- package/out/app-shell/__next._head.txt +4 -4
- package/out/app-shell/__next._index.txt +6 -6
- package/out/app-shell/__next._tree.txt +2 -2
- package/out/app-shell/__next.app-shell.__PAGE__.txt +2 -2
- package/out/app-shell/__next.app-shell.txt +3 -3
- package/out/app-shell.html +1 -1
- package/out/app-shell.txt +11 -11
- package/out/index.html +1 -1
- package/out/index.txt +12 -12
- package/out/project/_/__next._full.txt +12 -12
- package/out/project/_/__next._head.txt +4 -4
- package/out/project/_/__next._index.txt +6 -6
- package/out/project/_/__next._tree.txt +2 -2
- package/out/project/_/__next.project.$d$id.__PAGE__.txt +3 -3
- package/out/project/_/__next.project.$d$id.txt +3 -3
- package/out/project/_/__next.project.txt +3 -3
- package/out/project/_/memory/__next._full.txt +12 -12
- package/out/project/_/memory/__next._head.txt +4 -4
- package/out/project/_/memory/__next._index.txt +6 -6
- package/out/project/_/memory/__next._tree.txt +2 -2
- package/out/project/_/memory/__next.project.$d$id.memory.__PAGE__.txt +3 -3
- package/out/project/_/memory/__next.project.$d$id.memory.txt +3 -3
- package/out/project/_/memory/__next.project.$d$id.txt +3 -3
- package/out/project/_/memory/__next.project.txt +3 -3
- package/out/project/_/memory.html +1 -1
- package/out/project/_/memory.txt +12 -12
- package/out/project/_/queue/__next._full.txt +12 -12
- package/out/project/_/queue/__next._head.txt +4 -4
- package/out/project/_/queue/__next._index.txt +6 -6
- package/out/project/_/queue/__next._tree.txt +2 -2
- package/out/project/_/queue/__next.project.$d$id.queue.__PAGE__.txt +3 -3
- package/out/project/_/queue/__next.project.$d$id.queue.txt +3 -3
- package/out/project/_/queue/__next.project.$d$id.txt +3 -3
- package/out/project/_/queue/__next.project.txt +3 -3
- package/out/project/_/queue.html +1 -1
- package/out/project/_/queue.txt +12 -12
- package/out/project/_.html +1 -1
- package/out/project/_.txt +12 -12
- package/out/settings/__next._full.txt +12 -12
- package/out/settings/__next._head.txt +4 -4
- package/out/settings/__next._index.txt +6 -6
- package/out/settings/__next._tree.txt +2 -2
- package/out/settings/__next.settings.__PAGE__.txt +3 -3
- package/out/settings/__next.settings.txt +3 -3
- package/out/settings.html +1 -1
- package/out/settings.txt +12 -12
- package/out/setup/__next._full.txt +12 -12
- package/out/setup/__next._head.txt +4 -4
- package/out/setup/__next._index.txt +6 -6
- package/out/setup/__next._tree.txt +2 -2
- package/out/setup/__next.setup.__PAGE__.txt +3 -3
- package/out/setup/__next.setup.txt +3 -3
- package/out/setup.html +1 -1
- package/out/setup.txt +12 -12
- package/package.json +1 -1
- package/server/index.js +26 -0
- package/server/queue-watcher.js +47 -10
- package/server/queue-watcher.test.js +64 -0
- package/server/routes.batchProgress.test.js +94 -0
- package/server/routes.js +388 -23
- package/server/routes.parseActiveBatch.test.js +88 -0
- package/server/routes.telegramBridge.test.js +70 -0
- package/templates/CLAUDE.md +0 -1
- package/out/_next/static/chunks/006g3lco-9xqf.js +0 -1
- package/out/_next/static/chunks/035rt-n0oid7d.js +0 -1
- package/out/_next/static/chunks/0u~7e4fgf-u06.css +0 -2
- /package/out/_next/static/{6uvV3nUfwr_t_JKrZJSP8 → OzDK1Fplm2eUu23bzILlU}/_buildManifest.js +0 -0
- /package/out/_next/static/{6uvV3nUfwr_t_JKrZJSP8 → OzDK1Fplm2eUu23bzILlU}/_clientMiddlewareManifest.js +0 -0
- /package/out/_next/static/{6uvV3nUfwr_t_JKrZJSP8 → OzDK1Fplm2eUu23bzILlU}/_ssgManifest.js +0 -0
package/server/routes.js
CHANGED
|
@@ -1118,11 +1118,13 @@ router.get("/api/github/merged-prs", (req, res) => {
|
|
|
1118
1118
|
// deterministic from issue/PR state — no agent inference.
|
|
1119
1119
|
//
|
|
1120
1120
|
// Progress mapping (from upstream issue):
|
|
1121
|
-
// queued 0% issue
|
|
1121
|
+
// queued 0% issue OPEN, no linked PR
|
|
1122
1122
|
// in_review 20% PR open, 0 approvals
|
|
1123
1123
|
// approved1 50% PR open, 1 approval
|
|
1124
1124
|
// ready 80% PR open, 2+ approvals
|
|
1125
1125
|
// merged 100% PR merged AND issue closed
|
|
1126
|
+
// closed 100% issue CLOSED with no linked PR (superseded,
|
|
1127
|
+
// not planned, or runbook-only tasks) — #350
|
|
1126
1128
|
//
|
|
1127
1129
|
// Cached for 10s per project to avoid hammering gh on every poll.
|
|
1128
1130
|
|
|
@@ -1153,6 +1155,55 @@ function writeBatchSnapshot(projectId, snapshot) {
|
|
|
1153
1155
|
// Non-fatal — panel still works from the live parse.
|
|
1154
1156
|
}
|
|
1155
1157
|
}
|
|
1158
|
+
function deleteBatchSnapshot(projectId) {
|
|
1159
|
+
try {
|
|
1160
|
+
fs.unlinkSync(batchSnapshotPath(projectId));
|
|
1161
|
+
} catch {
|
|
1162
|
+
// Non-fatal — file may already be gone.
|
|
1163
|
+
}
|
|
1164
|
+
}
|
|
1165
|
+
|
|
1166
|
+
// #334: verify the snapshot's first issue number still exists on
|
|
1167
|
+
// GitHub before trusting the snapshot. A soft existence check is
|
|
1168
|
+
// enough — if the first issue genuinely 404s, treat the whole
|
|
1169
|
+
// snapshot as stale (most likely a leftover from a prior
|
|
1170
|
+
// project/repo that was purged) and let the caller drop it. One
|
|
1171
|
+
// gh call per cache miss, wrapped in the existing
|
|
1172
|
+
// BATCH_PROGRESS_TTL_MS cache upstream.
|
|
1173
|
+
//
|
|
1174
|
+
// Returns one of:
|
|
1175
|
+
// "fresh" — first issue resolved, snapshot is trustworthy
|
|
1176
|
+
// "gone" — first issue confirmed 404; snapshot should be dropped
|
|
1177
|
+
// "unknown" — transient error (auth/network/timeout); leave
|
|
1178
|
+
// snapshot alone and let the next cache miss retry
|
|
1179
|
+
async function checkBatchSnapshotFreshness(repo, snapshot) {
|
|
1180
|
+
if (!snapshot || !Array.isArray(snapshot.issueNumbers) || snapshot.issueNumbers.length === 0) {
|
|
1181
|
+
return "gone";
|
|
1182
|
+
}
|
|
1183
|
+
const first = snapshot.issueNumbers[0];
|
|
1184
|
+
try {
|
|
1185
|
+
await ghJsonExecAsync([
|
|
1186
|
+
"issue",
|
|
1187
|
+
"view",
|
|
1188
|
+
String(first),
|
|
1189
|
+
"-R",
|
|
1190
|
+
repo,
|
|
1191
|
+
"--json",
|
|
1192
|
+
"number",
|
|
1193
|
+
]);
|
|
1194
|
+
return "fresh";
|
|
1195
|
+
} catch (err) {
|
|
1196
|
+
// gh surfaces a 404 via stderr text on a non-zero exit. Only
|
|
1197
|
+
// the unambiguous "not found" / "could not resolve" shapes
|
|
1198
|
+
// count as genuinely gone; anything else (network, auth,
|
|
1199
|
+
// timeout) is transient and must NOT delete the snapshot.
|
|
1200
|
+
const msg = String((err && (err.stderr || err.message)) || "").toLowerCase();
|
|
1201
|
+
if (msg.includes("could not resolve") || msg.includes("not found") || msg.includes("no issue")) {
|
|
1202
|
+
return "gone";
|
|
1203
|
+
}
|
|
1204
|
+
return "unknown";
|
|
1205
|
+
}
|
|
1206
|
+
}
|
|
1156
1207
|
|
|
1157
1208
|
// Decide which batch to render, combining the live parse of
|
|
1158
1209
|
// OVERNIGHT-QUEUE.md with the persistent snapshot. The snapshot is
|
|
@@ -1204,10 +1255,11 @@ function parseActiveBatch(queueText) {
|
|
|
1204
1255
|
const batchNumber = batchMatch ? parseInt(batchMatch[1], 10) : null;
|
|
1205
1256
|
// Only collect issue numbers from lines that look like list-item
|
|
1206
1257
|
// entries — i.e. lines whose first content token is either `#N`
|
|
1207
|
-
// or `[#N]` after an optional list marker
|
|
1208
|
-
//
|
|
1209
|
-
//
|
|
1210
|
-
//
|
|
1258
|
+
// or `[#N]` after an optional list marker, and optionally after
|
|
1259
|
+
// a GitHub-flavored markdown checkbox token `[ ]` / `[x]` / `[X]`.
|
|
1260
|
+
// This rejects prose like "Tracking umbrella: #293", "next after
|
|
1261
|
+
// #294 merged", and similar dependency / commentary references
|
|
1262
|
+
// that t2a flagged on realproject7/dropcast's queue.
|
|
1211
1263
|
//
|
|
1212
1264
|
// Accepted line shapes:
|
|
1213
1265
|
// - #295 sub-A heartbeat
|
|
@@ -1216,12 +1268,22 @@ function parseActiveBatch(queueText) {
|
|
|
1216
1268
|
// #295 sub-A heartbeat
|
|
1217
1269
|
// - [#295] sub-A heartbeat
|
|
1218
1270
|
// [#295] sub-A heartbeat
|
|
1271
|
+
// - [ ] #295 sub-A heartbeat (#342/quadwork#341: GFM checkbox)
|
|
1272
|
+
// - [x] #295 sub-A heartbeat (checked)
|
|
1273
|
+
// - [X] #295 sub-A heartbeat (checked, uppercase)
|
|
1219
1274
|
//
|
|
1220
1275
|
// Rejected:
|
|
1221
1276
|
// Tracking umbrella: #293
|
|
1222
1277
|
// Assigned next after #294 merged.
|
|
1223
1278
|
// See #295 for context.
|
|
1224
|
-
|
|
1279
|
+
//
|
|
1280
|
+
// The previous regex permitted an optional `[` *immediately*
|
|
1281
|
+
// before `#`, which happened to match `[#295]` but not `[ ] #295`
|
|
1282
|
+
// (a space between `[` and `#`), so Head-generated queues that
|
|
1283
|
+
// used GFM checkbox syntax produced zero issue numbers and the
|
|
1284
|
+
// Current Batch panel showed empty. #341 adds an explicit optional
|
|
1285
|
+
// checkbox token after the list marker.
|
|
1286
|
+
const ITEM_LINE_RE = /^\s*(?:[-*]\s+|\d+\.\s+)?(?:\[[ xX]\]\s+)?\[?#(\d{1,6})\]?\b/;
|
|
1225
1287
|
const seen = new Set();
|
|
1226
1288
|
const issueNumbers = [];
|
|
1227
1289
|
for (const line of section.split("\n")) {
|
|
@@ -1253,6 +1315,32 @@ async function ghJsonExecAsync(args) {
|
|
|
1253
1315
|
return JSON.parse(stdout);
|
|
1254
1316
|
}
|
|
1255
1317
|
|
|
1318
|
+
// #350: pure helper for the "no linked PR" branch of
|
|
1319
|
+
// progressForItemAsync. Takes the issue JSON (shape: { number,
|
|
1320
|
+
// title, state, url, ... }) and returns the batch-progress row
|
|
1321
|
+
// for an item that has no closedByPullRequestsReferences. Exported
|
|
1322
|
+
// from module.exports below for unit tests — no other callers.
|
|
1323
|
+
function buildNoPrRow(issue) {
|
|
1324
|
+
if (issue && issue.state === "CLOSED") {
|
|
1325
|
+
return {
|
|
1326
|
+
issue_number: issue.number,
|
|
1327
|
+
title: issue.title,
|
|
1328
|
+
url: issue.url,
|
|
1329
|
+
status: "closed",
|
|
1330
|
+
progress: 100,
|
|
1331
|
+
label: "Closed (no PR) ✓",
|
|
1332
|
+
};
|
|
1333
|
+
}
|
|
1334
|
+
return {
|
|
1335
|
+
issue_number: issue.number,
|
|
1336
|
+
title: issue.title,
|
|
1337
|
+
url: issue.url,
|
|
1338
|
+
status: "queued",
|
|
1339
|
+
progress: 0,
|
|
1340
|
+
label: "Issue · queued",
|
|
1341
|
+
};
|
|
1342
|
+
}
|
|
1343
|
+
|
|
1256
1344
|
async function progressForItemAsync(repo, issueNumber) {
|
|
1257
1345
|
// Pull issue state + linked PRs in one call. closedByPullRequestsReferences
|
|
1258
1346
|
// is gh's serializer for the GraphQL `closedByPullRequestsReferences`
|
|
@@ -1279,16 +1367,14 @@ async function progressForItemAsync(repo, issueNumber) {
|
|
|
1279
1367
|
const pr = linked.length > 0
|
|
1280
1368
|
? linked.slice().sort((a, b) => (b.number || 0) - (a.number || 0))[0]
|
|
1281
1369
|
: null;
|
|
1282
|
-
// No linked PR
|
|
1370
|
+
// No linked PR. #350: before falling into the "queued" bucket,
|
|
1371
|
+
// honor the issue's own state — a CLOSED issue with no linked
|
|
1372
|
+
// PR is fully done (superseded, not planned, runbook-only, etc.)
|
|
1373
|
+
// and should render at 100% with a ✓ label instead of a
|
|
1374
|
+
// misleading "0% · queued" row. Only truly OPEN issues with no
|
|
1375
|
+
// linked PR are still queued.
|
|
1283
1376
|
if (!pr) {
|
|
1284
|
-
return
|
|
1285
|
-
issue_number: issue.number,
|
|
1286
|
-
title: issue.title,
|
|
1287
|
-
url: issue.url,
|
|
1288
|
-
status: "queued",
|
|
1289
|
-
progress: 0,
|
|
1290
|
-
label: "Issue · queued",
|
|
1291
|
-
};
|
|
1377
|
+
return buildNoPrRow(issue);
|
|
1292
1378
|
}
|
|
1293
1379
|
// Re-fetch the PR to get reviewDecision + reviews + state, since
|
|
1294
1380
|
// the issue's closedByPullRequestsReferences edge only carries
|
|
@@ -1392,15 +1478,23 @@ async function progressForItemAsync(repo, issueNumber) {
|
|
|
1392
1478
|
}
|
|
1393
1479
|
|
|
1394
1480
|
function summarizeItems(items) {
|
|
1395
|
-
|
|
1481
|
+
// #350: "closed" (CLOSED issue with no linked PR — superseded,
|
|
1482
|
+
// not planned, runbook-only) counts toward the complete tally
|
|
1483
|
+
// alongside "merged". The panel tally now reads "X/N complete"
|
|
1484
|
+
// when the batch mixes both kinds of completion, otherwise
|
|
1485
|
+
// "X/N merged" for the classic all-via-PR case.
|
|
1486
|
+
let merged = 0, closed = 0, ready = 0, approved1 = 0, inReview = 0, queued = 0;
|
|
1396
1487
|
for (const it of items) {
|
|
1397
1488
|
if (it.status === "merged") merged++;
|
|
1489
|
+
else if (it.status === "closed") closed++;
|
|
1398
1490
|
else if (it.status === "ready") ready++;
|
|
1399
1491
|
else if (it.status === "approved1") approved1++;
|
|
1400
1492
|
else if (it.status === "in_review") inReview++;
|
|
1401
1493
|
else if (it.status === "queued") queued++;
|
|
1402
1494
|
}
|
|
1403
|
-
const
|
|
1495
|
+
const done = merged + closed;
|
|
1496
|
+
const doneLabel = closed > 0 ? "complete" : "merged";
|
|
1497
|
+
const parts = [`${done}/${items.length} ${doneLabel}`];
|
|
1404
1498
|
if (ready > 0) parts.push(`${ready} ready to merge`);
|
|
1405
1499
|
if (approved1 > 0) parts.push(`${approved1} needs 2nd approval`);
|
|
1406
1500
|
if (inReview > 0) parts.push(`${inReview} in review`);
|
|
@@ -1432,6 +1526,27 @@ router.get("/api/batch-progress", async (req, res) => {
|
|
|
1432
1526
|
// per #316's edge case.
|
|
1433
1527
|
}
|
|
1434
1528
|
|
|
1529
|
+
// #334 / quadwork#334: validate the on-disk snapshot against
|
|
1530
|
+
// GitHub before resolveDisplayedBatch can serve it. A snapshot
|
|
1531
|
+
// whose first issue 404s is almost certainly a leftover from a
|
|
1532
|
+
// prior project/repo that was purged; drop the file so the
|
|
1533
|
+
// resolver falls through to the live queue parse (which will
|
|
1534
|
+
// typically also be empty) instead of serving stale data
|
|
1535
|
+
// indefinitely. We only run the check on cache-miss paths (this
|
|
1536
|
+
// route already sits behind BATCH_PROGRESS_TTL_MS) and only
|
|
1537
|
+
// when we'd actually rely on the snapshot — i.e. the live queue
|
|
1538
|
+
// read succeeded, so the existing #316 bypass for unreadable
|
|
1539
|
+
// queue files keeps precedence.
|
|
1540
|
+
if (queueReadOk) {
|
|
1541
|
+
const existing = readBatchSnapshot(projectId);
|
|
1542
|
+
if (existing && Array.isArray(existing.issueNumbers) && existing.issueNumbers.length > 0) {
|
|
1543
|
+
const freshness = await checkBatchSnapshotFreshness(repo, existing);
|
|
1544
|
+
if (freshness === "gone") deleteBatchSnapshot(projectId);
|
|
1545
|
+
// "unknown" → leave the file alone; transient failure will
|
|
1546
|
+
// retry on the next cache miss.
|
|
1547
|
+
}
|
|
1548
|
+
}
|
|
1549
|
+
|
|
1435
1550
|
// #429 / quadwork#316: resolve the displayed batch through the
|
|
1436
1551
|
// snapshot-aware helper so merged items stay visible after Head
|
|
1437
1552
|
// moves them from Active Batch to Done, until a new batch starts.
|
|
@@ -1463,7 +1578,10 @@ router.get("/api/batch-progress", async (req, res) => {
|
|
|
1463
1578
|
};
|
|
1464
1579
|
});
|
|
1465
1580
|
const summary = summarizeItems(items);
|
|
1466
|
-
|
|
1581
|
+
// #350: treat CLOSED-without-PR items as complete alongside merged
|
|
1582
|
+
// so batches that mix runbook/superseded closes with real PRs
|
|
1583
|
+
// still flip to the COMPLETE state once everything is done.
|
|
1584
|
+
const complete = items.length > 0 && items.every((it) => it.status === "merged" || it.status === "closed");
|
|
1467
1585
|
const data = { batch_number: batchNumber, items, summary, complete };
|
|
1468
1586
|
_batchProgressCache.set(projectId, { ts: Date.now(), data });
|
|
1469
1587
|
res.json(data);
|
|
@@ -1956,6 +2074,12 @@ router.post("/api/setup", (req, res) => {
|
|
|
1956
2074
|
return res.json({ ok: true, message: "Project already in config" });
|
|
1957
2075
|
}
|
|
1958
2076
|
// Match CLI wizard agent structure: { cwd, command, auto_approve, mcp_inject }
|
|
2077
|
+
// #343: default Codex-backed agents to reasoning_effort="medium"
|
|
2078
|
+
// instead of the upstream xhigh/high default. high/xhigh is the
|
|
2079
|
+
// provider-side capacity-failure hot spot; medium is the
|
|
2080
|
+
// safe-default for fresh installs so new projects don't hit
|
|
2081
|
+
// "Selected model is at capacity" out of the box. Operators can
|
|
2082
|
+
// bump individual agents back up via the Agent Models widget.
|
|
1959
2083
|
const agents = {};
|
|
1960
2084
|
for (const agentId of ["head", "reviewer1", "reviewer2", "dev"]) {
|
|
1961
2085
|
const cmd = (backends && backends[agentId]) || "claude";
|
|
@@ -1966,6 +2090,7 @@ router.post("/api/setup", (req, res) => {
|
|
|
1966
2090
|
command: cmd,
|
|
1967
2091
|
auto_approve: autoApprove,
|
|
1968
2092
|
mcp_inject: injectMode,
|
|
2093
|
+
...(cliBase === "codex" ? { reasoning_effort: "medium" } : {}),
|
|
1969
2094
|
};
|
|
1970
2095
|
}
|
|
1971
2096
|
// Use pre-assigned ports/token from agentchattr-config step if provided,
|
|
@@ -2175,6 +2300,66 @@ function telegramConfigToml(projectId) {
|
|
|
2175
2300
|
return path.join(CONFIG_DIR, `telegram-${projectId}.toml`);
|
|
2176
2301
|
}
|
|
2177
2302
|
|
|
2303
|
+
// #353: per-project log file for the bridge subprocess. The start
|
|
2304
|
+
// handler redirects stdout + stderr here so crashes (ImportError,
|
|
2305
|
+
// config parse, auth failure) are recoverable instead of
|
|
2306
|
+
// /dev/null'd by `stdio: "ignore"`.
|
|
2307
|
+
function telegramBridgeLog(projectId) {
|
|
2308
|
+
return path.join(CONFIG_DIR, `telegram-bridge-${projectId}.log`);
|
|
2309
|
+
}
|
|
2310
|
+
|
|
2311
|
+
// Tail the last N lines of a file without reading the whole thing
|
|
2312
|
+
// into memory if it is huge. For the bridge log we care about the
|
|
2313
|
+
// final crash frame, not historical output.
|
|
2314
|
+
function readLastLines(filePath, n) {
|
|
2315
|
+
try {
|
|
2316
|
+
if (!fs.existsSync(filePath)) return "";
|
|
2317
|
+
const stat = fs.statSync(filePath);
|
|
2318
|
+
const readBytes = Math.min(stat.size, 64 * 1024);
|
|
2319
|
+
if (readBytes === 0) return "";
|
|
2320
|
+
const buf = Buffer.alloc(readBytes);
|
|
2321
|
+
const fd = fs.openSync(filePath, "r");
|
|
2322
|
+
try {
|
|
2323
|
+
fs.readSync(fd, buf, 0, readBytes, Math.max(0, stat.size - readBytes));
|
|
2324
|
+
} finally {
|
|
2325
|
+
fs.closeSync(fd);
|
|
2326
|
+
}
|
|
2327
|
+
const text = buf.toString("utf-8");
|
|
2328
|
+
const lines = text.split(/\r?\n/).filter((l) => l.length > 0);
|
|
2329
|
+
return lines.slice(-n).join("\n");
|
|
2330
|
+
} catch {
|
|
2331
|
+
return "";
|
|
2332
|
+
}
|
|
2333
|
+
}
|
|
2334
|
+
|
|
2335
|
+
// Verify that the bridge's Python runtime has its required modules
|
|
2336
|
+
// available. Cheap pre-flight so a missing `requests` install
|
|
2337
|
+
// produces a readable error instead of a silent Start → Stopped
|
|
2338
|
+
// flicker. Returns { ok: true } on success, { ok: false, error }
|
|
2339
|
+
// otherwise. Keep the import list small and close to what the
|
|
2340
|
+
// bridge actually needs; add modules here if the bridge gains new
|
|
2341
|
+
// hard deps.
|
|
2342
|
+
function checkTelegramBridgePythonDeps() {
|
|
2343
|
+
try {
|
|
2344
|
+
// Only check the third-party module the bridge actually needs
|
|
2345
|
+
// at import time — `requests`. Toml parsing differs between
|
|
2346
|
+
// Python versions (tomllib on 3.11+, tomli on 3.10-), and any
|
|
2347
|
+
// genuine toml import failure will now be captured in the
|
|
2348
|
+
// bridge log file on spawn, so this pre-flight stays narrow
|
|
2349
|
+
// and avoids false negatives on older Python installs.
|
|
2350
|
+
execFileSync("python3", ["-c", "import requests"], {
|
|
2351
|
+
encoding: "utf-8",
|
|
2352
|
+
timeout: 10000,
|
|
2353
|
+
stdio: ["ignore", "pipe", "pipe"],
|
|
2354
|
+
});
|
|
2355
|
+
return { ok: true };
|
|
2356
|
+
} catch (err) {
|
|
2357
|
+
const stderr = (err && err.stderr && err.stderr.toString && err.stderr.toString()) || "";
|
|
2358
|
+
const msg = stderr.trim() || (err && err.message) || "python3 import check failed";
|
|
2359
|
+
return { ok: false, error: msg };
|
|
2360
|
+
}
|
|
2361
|
+
}
|
|
2362
|
+
|
|
2178
2363
|
function isTelegramRunning(projectId) {
|
|
2179
2364
|
const pf = telegramPidFile(projectId);
|
|
2180
2365
|
if (!fs.existsSync(pf)) return false;
|
|
@@ -2273,12 +2458,29 @@ router.get("/api/telegram", async (req, res) => {
|
|
|
2273
2458
|
}
|
|
2274
2459
|
} catch { /* non-fatal — widget will just show no username */ }
|
|
2275
2460
|
}
|
|
2461
|
+
// #353: if the bridge is not running but a log file exists with
|
|
2462
|
+
// content, tail it and expose it as `last_error` so the widget
|
|
2463
|
+
// can surface runtime crashes (bad token mid-session, network
|
|
2464
|
+
// failure, config parse error) that happen after the initial
|
|
2465
|
+
// 500 ms post-spawn liveness check and would otherwise just
|
|
2466
|
+
// revert the pill to Stopped with no explanation.
|
|
2467
|
+
const running = isTelegramRunning(projectId);
|
|
2468
|
+
let lastError = "";
|
|
2469
|
+
if (!running) {
|
|
2470
|
+
const logPath = telegramBridgeLog(projectId);
|
|
2471
|
+
try {
|
|
2472
|
+
if (fs.existsSync(logPath) && fs.statSync(logPath).size > 0) {
|
|
2473
|
+
lastError = readLastLines(logPath, 20);
|
|
2474
|
+
}
|
|
2475
|
+
} catch {}
|
|
2476
|
+
}
|
|
2276
2477
|
res.json({
|
|
2277
|
-
running
|
|
2478
|
+
running,
|
|
2278
2479
|
configured,
|
|
2279
2480
|
chat_id: chatId,
|
|
2280
2481
|
bot_username: botUsername,
|
|
2281
2482
|
bridge_installed: bridgeInstalled,
|
|
2483
|
+
last_error: lastError,
|
|
2282
2484
|
});
|
|
2283
2485
|
});
|
|
2284
2486
|
|
|
@@ -2301,15 +2503,39 @@ router.post("/api/telegram", async (req, res) => {
|
|
|
2301
2503
|
}
|
|
2302
2504
|
}
|
|
2303
2505
|
case "install": {
|
|
2506
|
+
// #353: pip3 can exit 0 on some systems (PEP 668 externally-
|
|
2507
|
+
// managed environments, non-writable site-packages) even when
|
|
2508
|
+
// the subsequent import still fails. After the pip step, run
|
|
2509
|
+
// a post-install import check and surface both the pip output
|
|
2510
|
+
// and the import error together if the check fails — that's
|
|
2511
|
+
// the signal the operator needs to know whether to pick a
|
|
2512
|
+
// virtualenv, use --user, or --break-system-packages.
|
|
2513
|
+
let pipOutput = "";
|
|
2304
2514
|
try {
|
|
2305
2515
|
if (!fs.existsSync(BRIDGE_DIR)) {
|
|
2306
2516
|
execFileSync("gh", ["repo", "clone", "realproject7/agentchattr-telegram", BRIDGE_DIR], { encoding: "utf-8", timeout: 30000 });
|
|
2307
2517
|
}
|
|
2308
|
-
|
|
2309
|
-
|
|
2518
|
+
pipOutput = execFileSync(
|
|
2519
|
+
"pip3",
|
|
2520
|
+
["install", "-r", path.join(BRIDGE_DIR, "requirements.txt")],
|
|
2521
|
+
{ encoding: "utf-8", timeout: 60000 },
|
|
2522
|
+
);
|
|
2310
2523
|
} catch (err) {
|
|
2311
2524
|
return res.json({ ok: false, error: err.message || "Install failed" });
|
|
2312
2525
|
}
|
|
2526
|
+
const depCheck = checkTelegramBridgePythonDeps();
|
|
2527
|
+
if (!depCheck.ok) {
|
|
2528
|
+
return res.json({
|
|
2529
|
+
ok: false,
|
|
2530
|
+
error:
|
|
2531
|
+
"pip3 reported success but the bridge's Python deps still fail to import. " +
|
|
2532
|
+
"This usually means pip installed into a location python3 cannot see " +
|
|
2533
|
+
"(externally-managed environment / PEP 668 / mismatched interpreter).\n\n" +
|
|
2534
|
+
`Import error: ${depCheck.error}\n\n` +
|
|
2535
|
+
`pip output tail:\n${pipOutput.split("\n").slice(-10).join("\n")}`,
|
|
2536
|
+
});
|
|
2537
|
+
}
|
|
2538
|
+
return res.json({ ok: true });
|
|
2313
2539
|
}
|
|
2314
2540
|
case "start": {
|
|
2315
2541
|
const projectId = body.project_id;
|
|
@@ -2323,14 +2549,75 @@ router.post("/api/telegram", async (req, res) => {
|
|
|
2323
2549
|
const tomlContent = `[telegram]\nbot_token = "${tg.bot_token}"\nchat_id = "${tg.chat_id}"\n\n[agentchattr]\nurl = "${tg.agentchattr_url}"\n`;
|
|
2324
2550
|
fs.writeFileSync(tomlPath, tomlContent, { mode: 0o600 });
|
|
2325
2551
|
fs.chmodSync(tomlPath, 0o600);
|
|
2552
|
+
// #353: pre-flight import check so a fresh install with no
|
|
2553
|
+
// `requests` module produces a readable error instead of the
|
|
2554
|
+
// Start → Running → Stopped flicker that the v1 code path
|
|
2555
|
+
// produced with `stdio: "ignore"`.
|
|
2556
|
+
const depCheck = checkTelegramBridgePythonDeps();
|
|
2557
|
+
if (!depCheck.ok) {
|
|
2558
|
+
return res.json({
|
|
2559
|
+
ok: false,
|
|
2560
|
+
error:
|
|
2561
|
+
"Bridge Python dependencies not installed. Click \"Install Bridge\" to install them, " +
|
|
2562
|
+
"or run: pip3 install -r " + path.join(BRIDGE_DIR, "requirements.txt") + "\n\n" +
|
|
2563
|
+
`Import error: ${depCheck.error}`,
|
|
2564
|
+
});
|
|
2565
|
+
}
|
|
2566
|
+
// #353: capture stdout + stderr to a per-project log file so
|
|
2567
|
+
// bridge crashes (bad token, network failure, config parse
|
|
2568
|
+
// error, etc.) are recoverable. The handle must be opened
|
|
2569
|
+
// BEFORE spawn and passed through stdio so the detached
|
|
2570
|
+
// child keeps writing after the parent unrefs it.
|
|
2571
|
+
const logPath = telegramBridgeLog(projectId);
|
|
2572
|
+
// #353 follow-up: truncate the log at the start of every
|
|
2573
|
+
// spawn so the status endpoint's last_error tail only ever
|
|
2574
|
+
// reflects the *current* session. Otherwise a previous
|
|
2575
|
+
// crash's trace would linger forever and the widget would
|
|
2576
|
+
// keep surfacing a stale error even after the operator
|
|
2577
|
+
// fixed the underlying problem and restarted cleanly.
|
|
2578
|
+
try { fs.writeFileSync(logPath, ""); } catch {}
|
|
2579
|
+
let outFd, errFd;
|
|
2580
|
+
try {
|
|
2581
|
+
outFd = fs.openSync(logPath, "a");
|
|
2582
|
+
errFd = fs.openSync(logPath, "a");
|
|
2583
|
+
} catch (err) {
|
|
2584
|
+
return res.json({ ok: false, error: `Could not open bridge log file: ${err.message}` });
|
|
2585
|
+
}
|
|
2586
|
+
let child;
|
|
2326
2587
|
try {
|
|
2327
|
-
|
|
2588
|
+
child = spawn("python3", [bridgeScript, "--config", tomlPath], {
|
|
2589
|
+
detached: true,
|
|
2590
|
+
stdio: ["ignore", outFd, errFd],
|
|
2591
|
+
});
|
|
2328
2592
|
child.unref();
|
|
2329
2593
|
if (child.pid) fs.writeFileSync(telegramPidFile(projectId), String(child.pid));
|
|
2330
|
-
return res.json({ ok: true, running: true, pid: child.pid });
|
|
2331
2594
|
} catch (err) {
|
|
2595
|
+
try { fs.closeSync(outFd); } catch {}
|
|
2596
|
+
try { fs.closeSync(errFd); } catch {}
|
|
2332
2597
|
return res.json({ ok: false, error: err.message || "Start failed" });
|
|
2333
2598
|
}
|
|
2599
|
+
// Close our copies of the fds in the parent now that the
|
|
2600
|
+
// child has inherited them — otherwise the parent holds the
|
|
2601
|
+
// log file open forever.
|
|
2602
|
+
try { fs.closeSync(outFd); } catch {}
|
|
2603
|
+
try { fs.closeSync(errFd); } catch {}
|
|
2604
|
+
// #353: liveness check — wait 500ms, then verify the child
|
|
2605
|
+
// is still running. If it already died, tail the log file
|
|
2606
|
+
// and return those lines as the error.
|
|
2607
|
+
await new Promise((r) => setTimeout(r, 500));
|
|
2608
|
+
let alive = true;
|
|
2609
|
+
try { process.kill(child.pid, 0); } catch { alive = false; }
|
|
2610
|
+
if (!alive) {
|
|
2611
|
+
const tail = readLastLines(logPath, 20);
|
|
2612
|
+
try { fs.unlinkSync(telegramPidFile(projectId)); } catch {}
|
|
2613
|
+
return res.json({
|
|
2614
|
+
ok: false,
|
|
2615
|
+
error:
|
|
2616
|
+
"Bridge crashed on start (exited within 500ms).\n\n" +
|
|
2617
|
+
`Last log lines (${logPath}):\n${tail || "(log empty)"}`,
|
|
2618
|
+
});
|
|
2619
|
+
}
|
|
2620
|
+
return res.json({ ok: true, running: true, pid: child.pid });
|
|
2334
2621
|
}
|
|
2335
2622
|
case "stop": {
|
|
2336
2623
|
const projectId = body.project_id;
|
|
@@ -2404,4 +2691,82 @@ router.post("/api/telegram", async (req, res) => {
|
|
|
2404
2691
|
}
|
|
2405
2692
|
});
|
|
2406
2693
|
|
|
2694
|
+
// #343: per-agent model + reasoning-effort settings endpoint.
|
|
2695
|
+
// GET returns the rows the dashboard Agent Models widget needs;
|
|
2696
|
+
// PUT persists a single row back to config.json. Kept narrow on
|
|
2697
|
+
// purpose — only `model` and `reasoning_effort` are writable
|
|
2698
|
+
// here, and codex is the only backend that accepts
|
|
2699
|
+
// reasoning_effort today. The launch-time wiring lives in
|
|
2700
|
+
// server/index.js buildAgentArgs; this endpoint is purely
|
|
2701
|
+
// config storage.
|
|
2702
|
+
const ALLOWED_REASONING_EFFORTS = new Set(["minimal", "low", "medium", "high"]);
|
|
2703
|
+
|
|
2704
|
+
router.get("/api/project/:projectId/agent-models", (req, res) => {
|
|
2705
|
+
try {
|
|
2706
|
+
const cfg = JSON.parse(fs.readFileSync(CONFIG_PATH, "utf-8"));
|
|
2707
|
+
const project = cfg.projects?.find((p) => p.id === req.params.projectId);
|
|
2708
|
+
if (!project) return res.status(404).json({ error: "Unknown project" });
|
|
2709
|
+
const rows = ["head", "reviewer1", "reviewer2", "dev"].map((agentId) => {
|
|
2710
|
+
const a = project.agents?.[agentId] || {};
|
|
2711
|
+
const command = a.command || "claude";
|
|
2712
|
+
const cliBase = command.split("/").pop().split(" ")[0];
|
|
2713
|
+
return {
|
|
2714
|
+
agent_id: agentId,
|
|
2715
|
+
backend: cliBase,
|
|
2716
|
+
model: a.model || "",
|
|
2717
|
+
reasoning_effort: a.reasoning_effort || "",
|
|
2718
|
+
reasoning_supported: cliBase === "codex",
|
|
2719
|
+
};
|
|
2720
|
+
});
|
|
2721
|
+
return res.json({ agents: rows });
|
|
2722
|
+
} catch (err) {
|
|
2723
|
+
return res.status(500).json({ error: err.message || "read failed" });
|
|
2724
|
+
}
|
|
2725
|
+
});
|
|
2726
|
+
|
|
2727
|
+
router.put("/api/project/:projectId/agent-models/:agentId", (req, res) => {
|
|
2728
|
+
const { projectId, agentId } = req.params;
|
|
2729
|
+
if (!["head", "reviewer1", "reviewer2", "dev"].includes(agentId)) {
|
|
2730
|
+
return res.json({ ok: false, error: "Unknown agent" });
|
|
2731
|
+
}
|
|
2732
|
+
const body = req.body || {};
|
|
2733
|
+
// Accept empty string as "clear override → fall back to CLI default".
|
|
2734
|
+
const model = typeof body.model === "string" ? body.model.trim() : undefined;
|
|
2735
|
+
const reasoning = typeof body.reasoning_effort === "string" ? body.reasoning_effort.trim() : undefined;
|
|
2736
|
+
if (reasoning && reasoning !== "" && !ALLOWED_REASONING_EFFORTS.has(reasoning)) {
|
|
2737
|
+
return res.json({ ok: false, error: `Invalid reasoning_effort: ${reasoning}` });
|
|
2738
|
+
}
|
|
2739
|
+
try {
|
|
2740
|
+
const raw = fs.readFileSync(CONFIG_PATH, "utf-8");
|
|
2741
|
+
const cfg = JSON.parse(raw);
|
|
2742
|
+
const project = cfg.projects?.find((p) => p.id === projectId);
|
|
2743
|
+
if (!project) return res.status(404).json({ ok: false, error: "Unknown project" });
|
|
2744
|
+
if (!project.agents) project.agents = {};
|
|
2745
|
+
const a = project.agents[agentId] || {};
|
|
2746
|
+
if (model !== undefined) {
|
|
2747
|
+
if (model === "") delete a.model;
|
|
2748
|
+
else a.model = model;
|
|
2749
|
+
}
|
|
2750
|
+
if (reasoning !== undefined) {
|
|
2751
|
+
if (reasoning === "") delete a.reasoning_effort;
|
|
2752
|
+
else a.reasoning_effort = reasoning;
|
|
2753
|
+
}
|
|
2754
|
+
project.agents[agentId] = a;
|
|
2755
|
+
fs.writeFileSync(CONFIG_PATH, JSON.stringify(cfg, null, 2));
|
|
2756
|
+
return res.json({ ok: true, agent: { agent_id: agentId, model: a.model || "", reasoning_effort: a.reasoning_effort || "" } });
|
|
2757
|
+
} catch (err) {
|
|
2758
|
+
return res.json({ ok: false, error: err.message || "write failed" });
|
|
2759
|
+
}
|
|
2760
|
+
});
|
|
2761
|
+
|
|
2407
2762
|
module.exports = router;
|
|
2763
|
+
// #341: export parseActiveBatch for unit tests. No production callers
|
|
2764
|
+
// outside this file; the export is strictly for the node:assert
|
|
2765
|
+
// script at server/routes.parseActiveBatch.test.js.
|
|
2766
|
+
module.exports.parseActiveBatch = parseActiveBatch;
|
|
2767
|
+
// #350: same pattern — expose the no-linked-PR row builder and
|
|
2768
|
+
// summarizeItems for the batch-progress fixture test.
|
|
2769
|
+
module.exports.buildNoPrRow = buildNoPrRow;
|
|
2770
|
+
module.exports.summarizeItems = summarizeItems;
|
|
2771
|
+
// #353: expose readLastLines for the telegram-bridge test.
|
|
2772
|
+
module.exports.readLastLines = readLastLines;
|
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
// #341 / quadwork#341: parseActiveBatch regex tests. Plain
|
|
2
|
+
// node:assert script — no test runner is wired up. Run with
|
|
3
|
+
// `node server/routes.parseActiveBatch.test.js`.
|
|
4
|
+
//
|
|
5
|
+
// parseActiveBatch is re-exported from server/routes.js for this
|
|
6
|
+
// test only; it has no production callers outside routes.js.
|
|
7
|
+
|
|
8
|
+
const assert = require("node:assert/strict");
|
|
9
|
+
const { parseActiveBatch } = require("./routes");
|
|
10
|
+
|
|
11
|
+
function wrap(body, batchLine = "**Batch:** 33") {
|
|
12
|
+
return `# Overnight Queue\n\n## Active Batch\n\n${batchLine}\n\n${body}\n\n## Backlog\n\n- #999 something else\n`;
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
// 1) #341 regression: GFM checkbox items (space between `[` and `#`)
|
|
16
|
+
// must populate the list.
|
|
17
|
+
{
|
|
18
|
+
const text = wrap(
|
|
19
|
+
[
|
|
20
|
+
"- [ ] #338 — Remove home hero",
|
|
21
|
+
"- [ ] #337 — Stack SERVER",
|
|
22
|
+
"- [x] #332 — Commit port drafts",
|
|
23
|
+
"- [X] #334 — Snapshot stale check",
|
|
24
|
+
].join("\n"),
|
|
25
|
+
);
|
|
26
|
+
const { batchNumber, issueNumbers } = parseActiveBatch(text);
|
|
27
|
+
assert.equal(batchNumber, 33, "batch number parsed");
|
|
28
|
+
assert.deepEqual(issueNumbers, [338, 337, 332, 334], "checkbox items parsed in order");
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
// 2) Existing shapes keep working.
|
|
32
|
+
{
|
|
33
|
+
const text = wrap(
|
|
34
|
+
[
|
|
35
|
+
"- #295 sub-A heartbeat",
|
|
36
|
+
"* #296 sub-B",
|
|
37
|
+
"1. #297 sub-C",
|
|
38
|
+
"#298 sub-D",
|
|
39
|
+
"- [#299] sub-E",
|
|
40
|
+
"[#300] sub-F",
|
|
41
|
+
].join("\n"),
|
|
42
|
+
);
|
|
43
|
+
const { issueNumbers } = parseActiveBatch(text);
|
|
44
|
+
assert.deepEqual(issueNumbers, [295, 296, 297, 298, 299, 300], "legacy shapes still parsed");
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// 3) Prose references still rejected.
|
|
48
|
+
{
|
|
49
|
+
const text = wrap(
|
|
50
|
+
[
|
|
51
|
+
"- [ ] #400 real item",
|
|
52
|
+
"Tracking umbrella: #293",
|
|
53
|
+
"Assigned next after #294 merged.",
|
|
54
|
+
"See #295 for context.",
|
|
55
|
+
].join("\n"),
|
|
56
|
+
);
|
|
57
|
+
const { issueNumbers } = parseActiveBatch(text);
|
|
58
|
+
assert.deepEqual(issueNumbers, [400], "prose references rejected, only real item kept");
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
// 4) De-dup: same issue number on multiple lines collapses.
|
|
62
|
+
{
|
|
63
|
+
const text = wrap(
|
|
64
|
+
[
|
|
65
|
+
"- [ ] #100 first mention",
|
|
66
|
+
"- [x] #100 second mention",
|
|
67
|
+
"- [ ] #101 another",
|
|
68
|
+
].join("\n"),
|
|
69
|
+
);
|
|
70
|
+
const { issueNumbers } = parseActiveBatch(text);
|
|
71
|
+
assert.deepEqual(issueNumbers, [100, 101], "de-dup keeps first occurrence");
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// 5) Items in Backlog section are NOT picked up.
|
|
75
|
+
{
|
|
76
|
+
const text = wrap("- [ ] #500 active item");
|
|
77
|
+
const { issueNumbers } = parseActiveBatch(text);
|
|
78
|
+
assert.deepEqual(issueNumbers, [500], "Backlog section not scanned");
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
// 6) Empty / missing Active Batch returns empty.
|
|
82
|
+
{
|
|
83
|
+
const { batchNumber, issueNumbers } = parseActiveBatch("# no active batch here\n");
|
|
84
|
+
assert.equal(batchNumber, null);
|
|
85
|
+
assert.deepEqual(issueNumbers, []);
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
console.log("routes.parseActiveBatch.test.js: all assertions passed (6 cases)");
|