kc-beta 0.7.5 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/README.md +47 -0
  2. package/package.json +3 -2
  3. package/src/agent/engine.js +390 -100
  4. package/src/agent/pipelines/_advance-hints.js +92 -0
  5. package/src/agent/pipelines/_milestone-derive.js +247 -13
  6. package/src/agent/pipelines/skill-authoring.js +30 -1
  7. package/src/agent/tools/agent-tool.js +2 -2
  8. package/src/agent/tools/consult-skill.js +15 -0
  9. package/src/agent/tools/dashboard-render.js +48 -1
  10. package/src/agent/tools/document-parse.js +31 -2
  11. package/src/agent/tools/phase-advance.js +17 -13
  12. package/src/agent/tools/release.js +250 -7
  13. package/src/agent/tools/sandbox-exec.js +65 -8
  14. package/src/agent/tools/worker-llm-call.js +95 -15
  15. package/src/agent/workspace.js +25 -4
  16. package/src/cli/components.js +4 -1
  17. package/src/cli/index.js +97 -1
  18. package/src/config.js +19 -2
  19. package/src/marathon/driver.js +217 -0
  20. package/src/marathon/prompts.js +93 -0
  21. package/template/.env.template +16 -0
  22. package/template/skills/en/bootstrap-workspace/SKILL.md +14 -0
  23. package/template/skills/en/quality-control/SKILL.md +9 -0
  24. package/template/skills/en/skill-authoring/SKILL.md +39 -0
  25. package/template/skills/en/skill-to-workflow/SKILL.md +53 -0
  26. package/template/skills/en/work-decomposition/SKILL.md +34 -0
  27. package/template/skills/phase_skills.yaml +5 -0
  28. package/template/skills/zh/bootstrap-workspace/SKILL.md +14 -0
  29. package/template/skills/zh/compliance-judgment/SKILL.md +37 -37
  30. package/template/skills/zh/document-chunking/SKILL.md +21 -14
  31. package/template/skills/zh/document-parsing/SKILL.md +65 -65
  32. package/template/skills/zh/entity-extraction/SKILL.md +68 -68
  33. package/template/skills/zh/quality-control/SKILL.md +9 -0
  34. package/template/skills/zh/skill-authoring/SKILL.md +39 -0
  35. package/template/skills/zh/skill-creator/SKILL.md +204 -200
  36. package/template/skills/zh/skill-to-workflow/SKILL.md +53 -0
  37. package/template/skills/zh/tree-processing/SKILL.md +67 -63
  38. package/template/skills/zh/work-decomposition/SKILL.md +34 -0
  39. package/template/workflows/common/llm_client.py +168 -0
  40. package/template/workflows/common/utils.py +132 -0
@@ -85,13 +85,19 @@ export class ReleaseTool extends BaseTool {
85
85
  return new ToolResult(`release template missing at ${TEMPLATE_DIR}`, true);
86
86
  }
87
87
 
88
- // 1. Snapshot first locks in commit + tag, regardless of whether bundle build succeeds
89
- const snapResult = await this._snapshot.execute({
90
- label: `release-${slug}`,
91
- notes: `Release ${label} bundle source`,
92
- });
93
- if (snapResult.isError) return new ToolResult(`snapshot failed: ${snapResult.content}`, true);
94
- const { tag: snapshotTag, commit: snapshotCommit } = this._readSnapshotMeta(`release-${slug}`);
88
+ // v0.8.1 P9-C: defer the snapshot (git tag) until AFTER the bundle
89
+ // is written + verified. v0.8.0 ordered snapshot-first to "lock in
90
+ // commit + tag regardless of bundle outcome," but E2E #11 资管 v0.8
91
+ // audit found `release-v1` tags with no corresponding bundle dir —
92
+ // tag without bundle confuses downstream consumers. New order:
93
+ // 1. Build bundle (catalog read, copy template, write fixtures, manifest, README)
94
+ // 2. Verify bundle (manifest.json + README.md exist + non-empty)
95
+ // 3. ONLY THEN snapshot (creates the git tag) + back-fill manifest
96
+ // with snapshot tag/commit
97
+ // If verification fails, a `.failed_release` marker is written into
98
+ // the bundle dir and NO tag is created.
99
+ let snapshotTag = null;
100
+ let snapshotCommit = null;
95
101
 
96
102
  // 2. Read catalog and filter
97
103
  const catalogPath = path.join(this._workspace.cwd, "rules", "catalog.json");
@@ -294,6 +300,77 @@ export class ReleaseTool extends BaseTool {
294
300
  }
295
301
  }
296
302
 
303
+ // v0.8.1 P9-C: bundle verification + transactional snapshot.
304
+ // The manifest + README were written above. Verify they exist with
305
+ // substance (≥200 bytes README, valid JSON manifest with `slug` field).
306
+ // If verification fails, write `.failed_release` marker and skip
307
+ // the git-tag step — no tag-without-bundle.
308
+ const manifestPath = path.join(bundleAbs, "manifest.json");
309
+ const readmePath = path.join(bundleAbs, "README.md");
310
+ let verifyError = null;
311
+ try {
312
+ const mStat = fs.statSync(manifestPath);
313
+ const rStat = fs.statSync(readmePath);
314
+ if (!mStat.isFile() || mStat.size < 50) verifyError = "manifest.json missing or too small";
315
+ else if (!rStat.isFile() || rStat.size < 200) verifyError = "README.md missing or too small";
316
+ else {
317
+ const m = JSON.parse(fs.readFileSync(manifestPath, "utf-8"));
318
+ if (m.slug !== slug) verifyError = `manifest.slug=${m.slug} doesn't match expected ${slug}`;
319
+ }
320
+ } catch (e) {
321
+ verifyError = `bundle verification threw: ${e.message}`;
322
+ }
323
+
324
+ if (verifyError) {
325
+ try {
326
+ fs.writeFileSync(
327
+ path.join(bundleAbs, ".failed_release"),
328
+ JSON.stringify({
329
+ failed_at: new Date().toISOString(),
330
+ reason: verifyError,
331
+ label,
332
+ slug,
333
+ }, null, 2),
334
+ );
335
+ } catch { /* best-effort */ }
336
+ return new ToolResult(
337
+ `Release bundle verification failed (${verifyError}). NO git tag created. ` +
338
+ `See .failed_release marker in ${bundleRel}/ for details. Fix the bundle issue and re-run.`,
339
+ true,
340
+ );
341
+ }
342
+
343
+ // Bundle verified. NOW snapshot — creates the durable git tag.
344
+ const snapResult = await this._snapshot.execute({
345
+ label: `release-${slug}`,
346
+ notes: `Release ${label} bundle source`,
347
+ });
348
+ if (snapResult.isError) {
349
+ // Bundle exists but tagging failed. Surface but don't roll back —
350
+ // the bundle is still usable; the user can manually tag later.
351
+ return new ToolResult(
352
+ `Release '${label}' bundled at ${bundleRel} but snapshot tag FAILED: ${snapResult.content}. ` +
353
+ `Bundle is valid; create the snapshot tag manually if needed.`,
354
+ );
355
+ }
356
+ const meta = this._readSnapshotMeta(`release-${slug}`);
357
+ snapshotTag = meta.tag;
358
+ snapshotCommit = meta.commit;
359
+
360
+ // Back-fill the manifest with the now-known snapshot tag/commit.
361
+ try {
362
+ const m = JSON.parse(fs.readFileSync(manifestPath, "utf-8"));
363
+ m.snapshot_tag = snapshotTag;
364
+ m.snapshot_commit = snapshotCommit;
365
+ fs.writeFileSync(manifestPath, JSON.stringify(m, null, 2) + "\n");
366
+ // Also back-fill the README's snapshot placeholders if still placeholder.
367
+ const readme = fs.readFileSync(readmePath, "utf-8");
368
+ const updated = readme
369
+ .replace(/\(no tag — git unavailable\)/g, snapshotTag || "")
370
+ .replace(/\(unknown\)/g, snapshotCommit || "(unknown)");
371
+ if (updated !== readme) fs.writeFileSync(readmePath, updated);
372
+ } catch { /* best-effort back-fill */ }
373
+
297
374
  // Bundle dir is in output/ (gitignored). Snapshot manifest in snapshots/ IS tracked.
298
375
  const lines = [
299
376
  `Release '${label}' bundled at ${bundleRel}`,
@@ -576,10 +653,175 @@ export class ReleaseTool extends BaseTool {
576
653
  }
577
654
  }
578
655
 
656
+ // 3) v0.8 P0-C: production_qc_results.json + qc_results_v*.json shapes
657
+ // (资管 + 贷款 v0.7.5 audits both shipped empty historical_accuracy
658
+ // because the v0.7.2 aggregator only recognized rule_stats / full_test_results).
659
+ if (tally.size === 0) {
660
+ const qcFiles = files
661
+ .filter((f) =>
662
+ /^production_qc(?:_results)?(?:_v\d+)?\.json$/i.test(f.name) ||
663
+ /^qc_results(?:_v\d+)?\.json$/i.test(f.name)
664
+ )
665
+ .sort((a, b) => a.name.localeCompare(b.name));
666
+ for (const f of qcFiles.slice(0, 5)) {
667
+ try {
668
+ const d = JSON.parse(fs.readFileSync(f.path, "utf-8"));
669
+ const results = d.results;
670
+ if (!results) continue;
671
+
672
+ // Shape 3a (资管): nested rule-keyed map
673
+ // {results: {<rid>: {<doc_id>: {verdict, ...}}}}
674
+ if (typeof results === "object" && !Array.isArray(results)) {
675
+ for (const [rid, docs] of Object.entries(results)) {
676
+ if (!isRuleId(rid) || !docs || typeof docs !== "object") continue;
677
+ for (const r of Object.values(docs)) {
678
+ if (!r || typeof r !== "object") continue;
679
+ const verdict = (r.verdict || "").toString().toUpperCase();
680
+ if (verdict === "PASS") bump(rid, "pass");
681
+ else if (verdict === "FAIL") bump(rid, "fail");
682
+ else if (verdict === "NOT_APPLICABLE" || verdict === "NA" || verdict === "WARNING") bump(rid, "na");
683
+ }
684
+ }
685
+ if (tally.size > 0) sourceFiles.push(path.relative(this._workspace.cwd, f.path));
686
+ }
687
+ // Shape 3b (贷款): per-doc rollup list with failed_rules
688
+ // {results: [{filename, actual, correct, failed_rules: [...]}], total_tested: N}
689
+ // For each rule: failures counted from failed_rules union; passes
690
+ // inferred as (total_tested - failures) for rules that appear in the catalog.
691
+ else if (Array.isArray(results)) {
692
+ const catalogPath = path.join(this._workspace.cwd, "rules", "catalog.json");
693
+ let catalogRules = [];
694
+ try {
695
+ const cat = JSON.parse(fs.readFileSync(catalogPath, "utf-8"));
696
+ const list = Array.isArray(cat) ? cat : Array.isArray(cat?.rules) ? cat.rules : [];
697
+ catalogRules = list.map((r) => r?.id || r?.rule_id).filter((x) => isRuleId(x));
698
+ } catch { /* catalog optional */ }
699
+
700
+ const failCountByRule = new Map();
701
+ let docCount = 0;
702
+ for (const row of results) {
703
+ if (!row || typeof row !== "object") continue;
704
+ docCount += 1;
705
+ const failed = Array.isArray(row.failed_rules) ? row.failed_rules : [];
706
+ for (const rid of failed) {
707
+ if (!isRuleId(rid)) continue;
708
+ failCountByRule.set(rid, (failCountByRule.get(rid) || 0) + 1);
709
+ }
710
+ }
711
+ if (docCount > 0) {
712
+ const ruleSet = new Set([...catalogRules, ...failCountByRule.keys()]);
713
+ for (const rid of ruleSet) {
714
+ const fails = failCountByRule.get(rid) || 0;
715
+ const passes = Math.max(0, docCount - fails);
716
+ const t = tally.get(rid) || { pass: 0, fail: 0, na: 0, n: 0 };
717
+ t.pass += passes; t.fail += fails; t.n += docCount;
718
+ tally.set(rid, t);
719
+ }
720
+ if (tally.size > 0) sourceFiles.push(path.relative(this._workspace.cwd, f.path));
721
+ }
722
+ }
723
+ } catch { /* try next file */ }
724
+ if (tally.size > 0) break;
725
+ }
726
+ }
727
+
728
+ // 4) v0.8.1 P9-A: top-level fail_by_rule + pass_by_rule maps (贷款
729
+ // v0.8 production_qc_report.json shape). Direct per-rule counts —
730
+ // no per-doc rollup, no verdict literals to scan.
731
+ // {accuracy, total_checks, fail_by_rule: {<rid>: N}, pass_by_rule: {<rid>: N}}
732
+ if (tally.size === 0) {
733
+ for (const f of files) {
734
+ if (!/qc|prod|report|result/i.test(f.name)) continue;
735
+ try {
736
+ const d = JSON.parse(fs.readFileSync(f.path, "utf-8"));
737
+ const failMap = d?.fail_by_rule;
738
+ const passMap = d?.pass_by_rule;
739
+ if (
740
+ failMap && typeof failMap === "object" && !Array.isArray(failMap) &&
741
+ passMap && typeof passMap === "object" && !Array.isArray(passMap)
742
+ ) {
743
+ const allRules = new Set([...Object.keys(failMap), ...Object.keys(passMap)]);
744
+ let matched = false;
745
+ for (const rid of allRules) {
746
+ if (!isRuleId(rid)) continue;
747
+ const fails = Number(failMap[rid]) || 0;
748
+ const passes = Number(passMap[rid]) || 0;
749
+ if (fails + passes === 0) continue;
750
+ const t = tally.get(rid) || { pass: 0, fail: 0, na: 0, n: 0 };
751
+ t.pass += passes;
752
+ t.fail += fails;
753
+ t.n += passes + fails;
754
+ tally.set(rid, t);
755
+ matched = true;
756
+ }
757
+ if (matched) {
758
+ sourceFiles.push(path.relative(this._workspace.cwd, f.path));
759
+ break;
760
+ }
761
+ }
762
+ } catch { /* skip non-JSON */ }
763
+ }
764
+ }
765
+
766
+ // 5) Fallback (belt-and-suspenders per v0.8 plan Risk #7):
767
+ // walk any output/*.json with a top-level rule_id-keyed shape that has
768
+ // verdict-like leaf objects. Catches future schema drift before the
769
+ // next audit cycle.
770
+ if (tally.size === 0) {
771
+ for (const f of files) {
772
+ if (!/qc|verdict|result/i.test(f.name)) continue;
773
+ try {
774
+ const d = JSON.parse(fs.readFileSync(f.path, "utf-8"));
775
+ const root = d?.results || d;
776
+ if (!root || typeof root !== "object" || Array.isArray(root)) continue;
777
+ let matched = false;
778
+ for (const [rid, val] of Object.entries(root)) {
779
+ if (!isRuleId(rid) || !val || typeof val !== "object") continue;
780
+ // val might be {verdict, ...} OR {<doc>: {verdict, ...}}
781
+ const probe = val.verdict ? [val] : Object.values(val);
782
+ for (const r of probe) {
783
+ if (!r || typeof r !== "object") continue;
784
+ const verdict = (r.verdict || "").toString().toUpperCase();
785
+ if (verdict === "PASS") { bump(rid, "pass"); matched = true; }
786
+ else if (verdict === "FAIL") { bump(rid, "fail"); matched = true; }
787
+ else if (verdict === "NOT_APPLICABLE" || verdict === "NA") { bump(rid, "na"); matched = true; }
788
+ }
789
+ }
790
+ if (matched) {
791
+ sourceFiles.push(path.relative(this._workspace.cwd, f.path) + " (fallback shape)");
792
+ break;
793
+ }
794
+ } catch { /* skip non-JSON */ }
795
+ }
796
+ }
797
+
579
798
  if (tally.size === 0) return null;
580
799
 
800
+ // v0.8.1 P9-D: filter tally to rule_ids in the current catalog.
801
+ // E2E #11 资管 v0.8 audit: confidence_calibration aggregated from
802
+ // an abandoned 39-rule pipeline included only 2 of 4 final samples.
803
+ // Filtering to catalog.json keeps the calibration scoped to the
804
+ // rules that actually ship in the release.
805
+ let catalogRuleIds = null;
806
+ try {
807
+ const catalogPath = path.join(this._workspace.cwd, "rules", "catalog.json");
808
+ if (fs.existsSync(catalogPath)) {
809
+ const cat = JSON.parse(fs.readFileSync(catalogPath, "utf-8"));
810
+ const list = Array.isArray(cat) ? cat : Array.isArray(cat?.rules) ? cat.rules : [];
811
+ catalogRuleIds = new Set(
812
+ list.map((r) => r?.id || r?.rule_id).filter((x) => isRuleId(x))
813
+ );
814
+ if (catalogRuleIds.size === 0) catalogRuleIds = null;
815
+ }
816
+ } catch { /* skip filter if catalog missing/malformed */ }
817
+
581
818
  const historical_accuracy = {};
819
+ const droppedRules = [];
582
820
  for (const [rid, t] of tally.entries()) {
821
+ if (catalogRuleIds && !catalogRuleIds.has(rid)) {
822
+ droppedRules.push(rid);
823
+ continue;
824
+ }
583
825
  const fired = t.pass + t.fail;
584
826
  historical_accuracy[rid] = {
585
827
  pass_rate: fired > 0 ? +(t.pass / fired).toFixed(4) : null,
@@ -593,6 +835,7 @@ export class ReleaseTool extends BaseTool {
593
835
  historical_accuracy,
594
836
  computed_at: new Date().toISOString(),
595
837
  source_files: sourceFiles,
838
+ ...(droppedRules.length > 0 ? { dropped_off_catalog: droppedRules } : {}),
596
839
  };
597
840
  }
598
841
 
@@ -25,16 +25,38 @@ function detectSharedFileWrites(command) {
25
25
  * Execute shell commands in the workspace directory.
26
26
  * Uses child_process.spawn so pipes, redirects, && all work.
27
27
  * Output (stdout + stderr combined) is capped at 10K chars.
28
+ *
29
+ * v0.8 P1-F timeout model:
30
+ * - Default: KC_EXEC_DEFAULT_TIMEOUT_MS (env) or 120000ms (2 min)
31
+ * - Hard cap: KC_EXEC_MAX_TIMEOUT_MS (env) or 600000ms (10 min)
32
+ * - Per-call `timeout_ms` overrides default, clamped to [1000, max]
33
+ * - Legacy `KC_EXEC_TIMEOUT` (seconds) still accepted as a deprecation
34
+ * alias for the default; emits a warning to stderr on first read.
28
35
  */
29
36
  export class SandboxExecTool extends BaseTool {
30
37
  /**
31
38
  * @param {import('../workspace.js').Workspace} workspace
32
- * @param {number} [timeout=30]
39
+ * @param {object|number} [opts] — either a config object (new) OR
40
+ * a number meaning the legacy timeout-in-seconds (old). The number
41
+ * form is preserved for callers that haven't been updated yet.
42
+ * @param {number} [opts.defaultTimeoutMs] — default 120000
43
+ * @param {number} [opts.maxTimeoutMs] — default 600000
33
44
  */
34
- constructor(workspace, timeout = 30) {
45
+ constructor(workspace, opts = {}) {
35
46
  super();
36
47
  this._workspace = workspace;
37
- this._timeout = timeout;
48
+
49
+ // Legacy: opts is a bare number = seconds. Convert to ms.
50
+ if (typeof opts === "number") {
51
+ this._defaultTimeoutMs = opts * 1000;
52
+ this._maxTimeoutMs = Math.max(this._defaultTimeoutMs, 600_000);
53
+ } else {
54
+ this._defaultTimeoutMs = opts.defaultTimeoutMs ?? 120_000;
55
+ this._maxTimeoutMs = opts.maxTimeoutMs ?? 600_000;
56
+ }
57
+ // Floor: keep at least 1s. Cap: max can't be below default.
58
+ this._defaultTimeoutMs = Math.max(1000, this._defaultTimeoutMs);
59
+ this._maxTimeoutMs = Math.max(this._defaultTimeoutMs, this._maxTimeoutMs);
38
60
  }
39
61
 
40
62
  get name() { return "sandbox_exec"; }
@@ -47,7 +69,10 @@ export class SandboxExecTool extends BaseTool {
47
69
  "Pipes, redirects, and chained commands (&&) are supported. " +
48
70
  "stdout + stderr combined are capped at 10,000 chars; longer output is truncated. " +
49
71
  "For reading individual files larger than ~10 KB (e.g. regulation documents), " +
50
- "prefer workspace_file (operation=read) which has a larger 50 KB cap."
72
+ "prefer workspace_file (operation=read) which has a larger 50 KB cap. " +
73
+ `Default timeout ${Math.round(this._defaultTimeoutMs / 1000)}s; pass timeout_ms ` +
74
+ `to extend up to ${Math.round(this._maxTimeoutMs / 1000)}s for known-slow commands ` +
75
+ `(LLM batch processing, document parsing, large regression runs).`
51
76
  );
52
77
  }
53
78
 
@@ -64,6 +89,10 @@ export class SandboxExecTool extends BaseTool {
64
89
  enum: ["workspace", "project"],
65
90
  description: "Working directory. 'workspace' (default) = KC's workspace. 'project' = user's project directory.",
66
91
  },
92
+ timeout_ms: {
93
+ type: "integer",
94
+ description: `Optional per-call timeout in milliseconds. Default ${this._defaultTimeoutMs}ms; clamped to [1000, ${this._maxTimeoutMs}]. Pass for commands you expect to take longer than the default (LLM batches, parsing, regressions).`,
95
+ },
67
96
  },
68
97
  required: ["command"],
69
98
  };
@@ -76,6 +105,22 @@ export class SandboxExecTool extends BaseTool {
76
105
  return new ToolResult("No command provided", true);
77
106
  }
78
107
 
108
+ // v0.8 P1-F: per-call timeout clamping
109
+ let effectiveTimeoutMs = this._defaultTimeoutMs;
110
+ let clampedMessage = null;
111
+ if (Number.isFinite(input.timeout_ms) && input.timeout_ms > 0) {
112
+ const requested = Math.floor(input.timeout_ms);
113
+ if (requested < 1000) {
114
+ effectiveTimeoutMs = 1000;
115
+ clampedMessage = `timeout_ms=${requested} below 1000ms floor; using 1000ms.`;
116
+ } else if (requested > this._maxTimeoutMs) {
117
+ effectiveTimeoutMs = this._maxTimeoutMs;
118
+ clampedMessage = `timeout_ms=${requested} above ${this._maxTimeoutMs}ms ceiling; clamped to ${this._maxTimeoutMs}ms.`;
119
+ } else {
120
+ effectiveTimeoutMs = requested;
121
+ }
122
+ }
123
+
79
124
  const effectiveCwd = (cwdScope === "project" && this._workspace.projectDir)
80
125
  ? this._workspace.projectDir
81
126
  : this._workspace.cwd;
@@ -86,7 +131,7 @@ export class SandboxExecTool extends BaseTool {
86
131
  const sharedHits = detectSharedFileWrites(command);
87
132
 
88
133
  try {
89
- const { output, code } = await this._run(command, effectiveCwd);
134
+ const { output, code } = await this._run(command, effectiveCwd, effectiveTimeoutMs);
90
135
  let result = output;
91
136
  if (result.length > MAX_OUTPUT) {
92
137
  result = result.slice(0, MAX_OUTPUT) + "\n[truncated]";
@@ -101,10 +146,20 @@ export class SandboxExecTool extends BaseTool {
101
146
  ` Under concurrent subagents this races — use workspace_file or rule_catalog instead.\n\n`;
102
147
  result = prefix + result;
103
148
  }
149
+ if (clampedMessage) {
150
+ result = `[note] ${clampedMessage}\n\n` + result;
151
+ }
104
152
  return new ToolResult(result, code !== 0);
105
153
  } catch (err) {
106
154
  if (err.message === "timeout") {
107
- return new ToolResult(`Command timed out after ${this._timeout}s`, true);
155
+ const seconds = Math.round(effectiveTimeoutMs / 1000);
156
+ const hint = effectiveTimeoutMs < this._maxTimeoutMs
157
+ ? ` Pass timeout_ms (up to ${this._maxTimeoutMs}) for known-slow commands.`
158
+ : ` Already at max timeout (${this._maxTimeoutMs}ms); consider splitting the command into smaller batches or running it via a subagent.`;
159
+ return new ToolResult(
160
+ `Command timed out after ${seconds}s (${effectiveTimeoutMs}ms).${hint}`,
161
+ true,
162
+ );
108
163
  }
109
164
  return new ToolResult(`Execution error: ${err.message}`, true);
110
165
  }
@@ -112,9 +167,11 @@ export class SandboxExecTool extends BaseTool {
112
167
 
113
168
  /**
114
169
  * @param {string} command
170
+ * @param {string} cwd
171
+ * @param {number} timeoutMs
115
172
  * @returns {Promise<{output: string, code: number}>}
116
173
  */
117
- _run(command, cwd) {
174
+ _run(command, cwd, timeoutMs) {
118
175
  return new Promise((resolve, reject) => {
119
176
  const controller = new AbortController();
120
177
  const proc = spawn("sh", ["-c", command], {
@@ -130,7 +187,7 @@ export class SandboxExecTool extends BaseTool {
130
187
  const timer = setTimeout(() => {
131
188
  controller.abort();
132
189
  reject(new Error("timeout"));
133
- }, this._timeout * 1000);
190
+ }, timeoutMs);
134
191
 
135
192
  proc.on("close", (code) => {
136
193
  clearTimeout(timer);
@@ -49,7 +49,10 @@ export class WorkerLLMCallTool extends BaseTool {
49
49
  return (
50
50
  "Call a worker LLM at a specified tier (tier1-tier4) for extraction, " +
51
51
  "judgment, or other verification tasks. Tier1 is most capable/expensive, " +
52
- "tier4 is cheapest. Returns response with model used and token counts."
52
+ "tier4 is cheapest. Pass `prompt` for a single call OR `prompts: [...]` " +
53
+ "for batch (parallel up to concurrency=5). Returns response(s) with " +
54
+ "model used and token counts. v0.8 P2-B: batch mode keeps the engine " +
55
+ "visible to LLM usage instead of agents bypassing via direct HTTP."
53
56
  );
54
57
  }
55
58
 
@@ -58,29 +61,105 @@ export class WorkerLLMCallTool extends BaseTool {
58
61
  type: "object",
59
62
  properties: {
60
63
  tier: { type: "string", enum: ["tier1", "tier2", "tier3", "tier4"], description: "Worker LLM tier to use" },
61
- prompt: { type: "string", description: "The user/task prompt to send" },
62
- system_prompt: { type: "string", description: "Optional system prompt for context" },
63
- max_tokens: { type: "integer", description: "Maximum tokens in response (default 4096)" },
64
+ prompt: { type: "string", description: "The user/task prompt to send (single-call mode)" },
65
+ prompts: {
66
+ type: "array",
67
+ items: { type: "string" },
68
+ description: "Batch mode: array of prompts processed in parallel (up to concurrency=5). All share the same tier + system_prompt. Mutually exclusive with `prompt`.",
69
+ },
70
+ system_prompt: { type: "string", description: "Optional system prompt for context (shared across all prompts in batch mode)" },
71
+ max_tokens: { type: "integer", description: "Maximum tokens per response (default 4096)" },
72
+ concurrency: { type: "integer", description: "Batch mode only: max parallel requests (default 5, max 10)" },
64
73
  },
65
- required: ["tier", "prompt"],
74
+ required: ["tier"],
66
75
  };
67
76
  }
68
77
 
69
78
  async execute(input) {
70
79
  const tier = input.tier || "tier2";
71
- const prompt = input.prompt || "";
72
80
  const systemPrompt = input.system_prompt;
73
81
  const maxTokens = input.max_tokens || 4096;
74
82
 
75
- if (!prompt) return new ToolResult("No prompt provided", true);
76
83
  if (!this._apiKey) return new ToolResult("Worker LLM API key not configured", true);
77
84
 
85
+ // v0.8 P2-B: batch mode dispatch
86
+ if (Array.isArray(input.prompts)) {
87
+ return this._executeBatch(input.prompts, { tier, systemPrompt, maxTokens, concurrency: input.concurrency });
88
+ }
89
+
90
+ const prompt = input.prompt || "";
91
+ if (!prompt) return new ToolResult("No prompt provided (pass `prompt` for single-call or `prompts: [...]` for batch)", true);
92
+
93
+ const result = await this._executeOne({ prompt, tier, systemPrompt, maxTokens });
94
+ if (result.error) return new ToolResult(result.error, true);
95
+ return new ToolResult(JSON.stringify(result.payload, null, 2));
96
+ }
97
+
98
+ /**
99
+ * v0.8 P2-B: process N prompts in parallel with concurrency control.
100
+ * Returns aggregated results as a JSON array under "results" with
101
+ * summary stats (total_in, total_out, n_failed). Partial failures don't
102
+ * fail the whole call — individual results carry their own error flag.
103
+ */
104
+ async _executeBatch(prompts, { tier, systemPrompt, maxTokens, concurrency }) {
105
+ if (prompts.length === 0) return new ToolResult("Empty prompts array", true);
78
106
  this._loadTiers();
79
107
  const models = this._tierModels[tier] || [];
80
108
  if (models.length === 0) {
81
109
  return new ToolResult(`No models configured for ${tier}. Check .env TIER1-TIER4 settings.`, true);
82
110
  }
83
111
 
112
+ const limit = Math.max(1, Math.min(10, Number.isFinite(concurrency) ? concurrency : 5));
113
+ const results = new Array(prompts.length);
114
+ let cursor = 0;
115
+ let tokensIn = 0;
116
+ let tokensOut = 0;
117
+ let nFailed = 0;
118
+
119
+ const worker = async () => {
120
+ while (true) {
121
+ const idx = cursor++;
122
+ if (idx >= prompts.length) break;
123
+ const r = await this._executeOne({ prompt: prompts[idx], tier, systemPrompt, maxTokens });
124
+ if (r.error) {
125
+ results[idx] = { index: idx, error: r.error };
126
+ nFailed++;
127
+ } else {
128
+ results[idx] = { index: idx, ...r.payload };
129
+ tokensIn += r.payload.tokens_in || 0;
130
+ tokensOut += r.payload.tokens_out || 0;
131
+ }
132
+ }
133
+ };
134
+
135
+ await Promise.all(Array.from({ length: limit }, () => worker()));
136
+
137
+ const summary = {
138
+ n_total: prompts.length,
139
+ n_succeeded: prompts.length - nFailed,
140
+ n_failed: nFailed,
141
+ total_tokens_in: tokensIn,
142
+ total_tokens_out: tokensOut,
143
+ tier,
144
+ concurrency: limit,
145
+ results,
146
+ };
147
+ return new ToolResult(JSON.stringify(summary, null, 2), nFailed > 0 && nFailed === prompts.length);
148
+ }
149
+
150
+ /**
151
+ * Single-prompt path. Returns {error?: string, payload?: {...}}.
152
+ * Used by both single-call and batch modes; batch dedups the tier
153
+ * lookup and shares concurrency with multiple in-flight invocations.
154
+ */
155
+ async _executeOne({ prompt, tier, systemPrompt, maxTokens }) {
156
+ if (!prompt) return { error: "Empty prompt" };
157
+ this._loadTiers();
158
+ const models = this._tierModels[tier] || [];
159
+ if (models.length === 0) {
160
+ return { error: `No models configured for ${tier}. Check .env TIER1-TIER4 settings.` };
161
+ }
162
+
84
163
  const messages = [];
85
164
  if (systemPrompt) messages.push({ role: "system", content: systemPrompt });
86
165
  messages.push({ role: "user", content: prompt });
@@ -98,14 +177,15 @@ export class WorkerLLMCallTool extends BaseTool {
98
177
  if (resp.ok) {
99
178
  const data = await resp.json();
100
179
  const usage = data.usage || {};
101
- const result = {
102
- response: data.choices[0].message.content,
103
- model_used: model,
104
- tier,
105
- tokens_in: usage.prompt_tokens || 0,
106
- tokens_out: usage.completion_tokens || 0,
180
+ return {
181
+ payload: {
182
+ response: data.choices[0].message.content,
183
+ model_used: model,
184
+ tier,
185
+ tokens_in: usage.prompt_tokens || 0,
186
+ tokens_out: usage.completion_tokens || 0,
187
+ },
107
188
  };
108
- return new ToolResult(JSON.stringify(result, null, 2));
109
189
  }
110
190
  lastError = `${model}: HTTP ${resp.status}`;
111
191
  } catch (e) {
@@ -113,6 +193,6 @@ export class WorkerLLMCallTool extends BaseTool {
113
193
  }
114
194
  }
115
195
 
116
- return new ToolResult(`All models for ${tier} failed. Last error: ${lastError}`, true);
196
+ return { error: `All models for ${tier} failed. Last error: ${lastError}` };
117
197
  }
118
198
  }
@@ -170,11 +170,12 @@ export class Workspace {
170
170
  * @param {{timeoutMs?: number, retryMs?: number, staleMs?: number}} [opts]
171
171
  * @returns {Promise<T>}
172
172
  */
173
- async withFileLock(relPath, fn, { timeoutMs = 10_000, retryMs = 50, staleMs = 60_000 } = {}) {
173
+ async withFileLock(relPath, fn, { timeoutMs = 10_000, retryMs = 50, staleMs = 60_000, eventLog = null, blockedWarnMs = 5_000 } = {}) {
174
174
  const target = this.resolvePath(relPath);
175
175
  fs.mkdirSync(path.dirname(target), { recursive: true });
176
176
  const lockPath = target + ".lock";
177
177
  const start = Date.now();
178
+ let blockedWarned = false;
178
179
 
179
180
  while (true) {
180
181
  let fd;
@@ -193,7 +194,24 @@ export class Workspace {
193
194
  // Lockfile vanished between EEXIST and stat — retry to acquire.
194
195
  continue;
195
196
  }
196
- if (Date.now() - start > timeoutMs) {
197
+ // v0.8 P4-C: emit lock_blocked event once when wait crosses
198
+ // blockedWarnMs (default 5s). Lets parent see subagent contention
199
+ // before the call fails. 贷款 v0.7.5 audit: subagent burned 5 min
200
+ // on silent lock contention; parent only saw it as a long-running
201
+ // subagent. Now there's a visible signal.
202
+ const waited = Date.now() - start;
203
+ if (!blockedWarned && waited > blockedWarnMs && eventLog?.append) {
204
+ try {
205
+ eventLog.append("lock_blocked", {
206
+ path: relPath,
207
+ waited_ms: waited,
208
+ session_id: this.sessionId,
209
+ pid: process.pid,
210
+ });
211
+ } catch { /* best-effort */ }
212
+ blockedWarned = true;
213
+ }
214
+ if (waited > timeoutMs) {
197
215
  throw new Error(`Timeout acquiring lock on ${relPath} after ${timeoutMs}ms (held by another engine)`);
198
216
  }
199
217
  await new Promise((r) => setTimeout(r, retryMs));
@@ -221,8 +239,11 @@ export class Workspace {
221
239
  * Lets callsites uniformly wrap their writes without knowing which
222
240
  * paths are shared.
223
241
  */
224
- async withSharedLockIfApplicable(relPath, fn) {
225
- if (isSharedCoordinationPath(relPath)) return this.withFileLock(relPath, fn);
242
+ async withSharedLockIfApplicable(relPath, fn, opts = {}) {
243
+ // v0.8 P4-C: forward optional {eventLog, ...} through to withFileLock
244
+ // so lock_blocked events can fire from any call site (workspace_file,
245
+ // rule_catalog, etc.) once they pass their engine's eventLog.
246
+ if (isSharedCoordinationPath(relPath)) return this.withFileLock(relPath, fn, opts);
226
247
  return fn();
227
248
  }
228
249
 
@@ -89,7 +89,7 @@ function truncateVisual(s, maxCells) {
89
89
  return head + "…" + tail;
90
90
  }
91
91
 
92
- export function StatusBar({ sessionId, phase, contextTokens, contextLimit }) {
92
+ export function StatusBar({ sessionId, phase, contextTokens, contextLimit, marathonActive }) {
93
93
  const samplesRef = useRef([]);
94
94
  const peakRef = useRef(0);
95
95
 
@@ -136,6 +136,9 @@ export function StatusBar({ sessionId, phase, contextTokens, contextLimit }) {
136
136
  h(Text, { dimColor: true, wrap: "truncate-end" }, " ⏵⏵ KC "),
137
137
  h(Text, { dimColor: true, wrap: "truncate-end" }, displaySessionId ? `[${displaySessionId}]` : ""),
138
138
  phase ? h(Text, { color: "cyan", wrap: "truncate-end" }, ` ${phase.toUpperCase()}`) : null,
139
+ // v0.8.1 P8-A: marathon-mode indicator. Only renders when active —
140
+ // normal interactive mode shows no indicator (avoid clutter).
141
+ marathonActive ? h(Text, { color: "magenta", bold: true, wrap: "truncate-end" }, " 🏃 MARATHON") : null,
139
142
  h(Text, { color: "green", wrap: "truncate-end" }, " ● "),
140
143
  h(Text, { color: ctxColor, wrap: "truncate-end" }, `CTX: ${ctxLabel}/${limitLabel} (${pct}%)`),
141
144
  showPeak ? h(Text, { dimColor: true, wrap: "truncate-end" }, ` · peak ${fmt(peak)}`) : null,