cclaw-cli 0.27.0 → 0.28.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.d.ts CHANGED
@@ -1,6 +1,6 @@
1
1
  #!/usr/bin/env node
2
2
  import type { FlowTrack, HarnessId, InitProfile } from "./types.js";
3
- import type { EvalTier } from "./eval/types.js";
3
+ import type { EvalMode } from "./eval/types.js";
4
4
  type CommandName = "init" | "sync" | "doctor" | "upgrade" | "uninstall" | "archive" | "eval";
5
5
  interface ParsedArgs {
6
6
  command?: CommandName;
@@ -18,7 +18,7 @@ interface ParsedArgs {
18
18
  archiveSkipRetro?: boolean;
19
19
  archiveSkipRetroReason?: string;
20
20
  evalStage?: string;
21
- evalTier?: EvalTier;
21
+ evalMode?: EvalMode;
22
22
  evalSchemaOnly?: boolean;
23
23
  evalRules?: boolean;
24
24
  evalJudge?: boolean;
@@ -26,10 +26,14 @@ interface ParsedArgs {
26
26
  evalNoWrite?: boolean;
27
27
  evalUpdateBaseline?: boolean;
28
28
  evalConfirm?: boolean;
29
- /** Optional subcommand after `eval`. Currently only `diff` is supported. */
30
- evalSubcommand?: "diff";
29
+ evalQuiet?: boolean;
30
+ evalMaxCostUsd?: number;
31
+ /** Optional subcommand after `eval`. */
32
+ evalSubcommand?: "diff" | "runs";
31
33
  /** Positional arguments for eval subcommands (e.g. `diff <old> <new>`). */
32
34
  evalArgs?: string[];
35
+ evalBackground?: boolean;
36
+ evalCompareModel?: string;
33
37
  showHelp?: boolean;
34
38
  showVersion?: boolean;
35
39
  }
package/dist/cli.js CHANGED
@@ -1,5 +1,7 @@
1
1
  #!/usr/bin/env node
2
- import { readFileSync, realpathSync } from "node:fs";
2
+ import { createReadStream, readFileSync, realpathSync } from "node:fs";
3
+ import { spawn } from "node:child_process";
4
+ import fs from "node:fs/promises";
3
5
  import process from "node:process";
4
6
  import path from "node:path";
5
7
  import { createInterface } from "node:readline/promises";
@@ -14,10 +16,13 @@ import { createDefaultConfig, createProfileConfig } from "./config.js";
14
16
  import { detectHarnesses } from "./init-detect.js";
15
17
  import { HARNESS_ADAPTERS } from "./harness-adapters.js";
16
18
  import { runEval } from "./eval/runner.js";
19
+ import { createStderrProgressLogger } from "./eval/progress.js";
17
20
  import { writeBaselinesFromReport } from "./eval/baseline.js";
18
21
  import { writeJsonReport, writeMarkdownReport } from "./eval/report.js";
19
22
  import { formatDiffMarkdown, runEvalDiff } from "./eval/diff.js";
20
- import { EVAL_TIERS } from "./eval/types.js";
23
+ import { ensureRunDir, generateRunId, isRunAlive, listRuns, readRunStatus, resolveRunId, runLogPath, writeRunStatus } from "./eval/runs.js";
24
+ import { EVAL_MODES } from "./eval/types.js";
25
+ import { parseModeInput } from "./eval/mode.js";
21
26
  import { FLOW_STAGES } from "./types.js";
22
27
  const INSTALLER_COMMANDS = [
23
28
  "init",
@@ -56,22 +61,41 @@ Commands:
56
61
  --skip-retro Bypass mandatory retro gate (requires --retro-reason).
57
62
  --retro-reason=<t> Reason for bypassing retro gate.
58
63
  eval Run cclaw evals against .cclaw/evals/corpus (Phase 7: structural verifier + baselines).
59
- Flags: --stage=<id> Limit to one flow stage (${FLOW_STAGES.join("|")}) for Tier A/B.
60
- --tier=<A|B|C> Fidelity tier (A=single-shot, B=tools, C=multi-stage workflow).
64
+ Flags: --stage=<id> Limit to one flow stage (${FLOW_STAGES.join("|")}) for fixture/agent modes.
65
+ --mode=<${EVAL_MODES.join("|")}>
66
+ Evaluation mode:
67
+ fixture = verify existing artifacts with structural/rule/judge verifiers.
68
+ agent = LLM drafts one stage's artifact in a sandbox with tools.
69
+ workflow = LLM runs the full multi-stage flow (brainstorm→plan).
70
+ Legacy --tier=A|B|C still works (deprecated).
61
71
  --schema-only Run only structural verifiers (default).
62
72
  --rules Also run rule-based verifiers (keywords, regex, counts, uniqueness, traceability).
63
- --judge Run the LLM judge (median-of-N) against each case's rubric. Requires CCLAW_EVAL_API_KEY; Tier A runs the single-shot agent, Tier B/C the sandbox tool-using agent (read_file/write_file/glob/grep).
73
+ --judge Run the LLM judge (median-of-N) against each case's rubric. Requires CCLAW_EVAL_API_KEY; fixture mode judges an existing artifact, agent/workflow modes draft first and then judge.
64
74
  --dry-run Validate config + corpus, print summary, do not execute.
65
75
  --json Emit machine-readable JSON on stdout.
66
76
  --no-write Skip writing the report to .cclaw/evals/reports/.
67
77
  --update-baseline Overwrite baselines from the current run (requires --confirm).
68
78
  --confirm Acknowledge --update-baseline (prevents accidental resets).
79
+ --quiet Silence the stderr progress logger (default: emit one
80
+ line per case / stage to stderr so long runs are visible).
81
+ --max-cost-usd=<n> Abort the run if committed USD spend crosses <n>
82
+ (independent from the daily cap). Also readable from
83
+ CCLAW_EVAL_MAX_COST_USD.
84
+ --compare-model=<id> Run the same corpus twice — once with the configured model
85
+ and once with <id> — then diff the summaries. Exit code 1
86
+ when the override model regressed.
87
+ --background Spawn the run as a detached child process, write the
88
+ combined output to .cclaw/evals/runs/<id>/run.log, and
89
+ return immediately. Attach later with
90
+ \`cclaw eval runs tail <id|latest>\`.
69
91
 
70
92
  Subcommands:
71
93
  diff <old> <new> Compare two reports under .cclaw/evals/reports/.
72
94
  Each argument is a cclawVersion (e.g. 0.26.0), a filename,
73
95
  or the literal "latest". Exit code 1 when the diff shows a
74
96
  regression. Accepts --json to emit machine-readable output.
97
+ runs [action] [id] Inspect background runs under .cclaw/evals/runs/.
98
+ Actions: list (default) | status <id|latest> | tail <id|latest>.
75
99
  upgrade Refresh generated files in .cclaw without modifying user artifacts.
76
100
  uninstall Remove .cclaw runtime and the generated harness shim files.
77
101
 
@@ -85,9 +109,9 @@ Examples:
85
109
  cclaw archive --name=payments-revamp
86
110
  cclaw eval --dry-run
87
111
  cclaw eval --stage=brainstorm --schema-only
88
- cclaw eval --judge --tier=A --stage=brainstorm
89
- cclaw eval --judge --tier=B --stage=spec
90
- cclaw eval --tier=C --judge
112
+ cclaw eval --judge --mode=fixture --stage=brainstorm
113
+ cclaw eval --judge --mode=agent --stage=spec
114
+ cclaw eval --mode=workflow --judge
91
115
  cclaw eval diff 0.26.0 latest
92
116
 
93
117
  Docs: https://github.com/zuevrs/cclaw
@@ -144,12 +168,17 @@ function parseProfile(raw) {
144
168
  }
145
169
  return trimmed;
146
170
  }
147
- function parseEvalTier(raw) {
148
- const trimmed = raw.trim().toUpperCase();
149
- if (!EVAL_TIERS.includes(trimmed)) {
150
- throw new Error(`Unknown eval tier: ${raw}. Supported: ${EVAL_TIERS.join(", ")}`);
151
- }
152
- return trimmed;
171
+ function parseLegacyTier(raw) {
172
+ return parseModeInput(raw.toUpperCase(), {
173
+ source: "cli",
174
+ raw: `--tier=${raw}`
175
+ });
176
+ }
177
+ function parseEvalMode(raw) {
178
+ return parseModeInput(raw, {
179
+ source: "cli",
180
+ raw: `--mode=${raw}`
181
+ });
153
182
  }
154
183
  function parseEvalStage(raw) {
155
184
  const trimmed = raw.trim();
@@ -372,6 +401,18 @@ function printDoctorText(ctx, checks, options) {
372
401
  ctx.stdout.write("Doctor status: HEALTHY (no failing error checks)\n");
373
402
  }
374
403
  }
404
+ function resolveMaxCostOption(fromCli, env) {
405
+ if (fromCli !== undefined)
406
+ return { maxCostUsd: fromCli };
407
+ const raw = env.CCLAW_EVAL_MAX_COST_USD;
408
+ if (raw === undefined || raw.trim() === "")
409
+ return {};
410
+ const value = Number(raw);
411
+ if (!Number.isFinite(value) || value <= 0) {
412
+ throw new Error(`CCLAW_EVAL_MAX_COST_USD must be a positive number, got: ${raw}`);
413
+ }
414
+ return { maxCostUsd: value };
415
+ }
375
416
  function parseArgs(argv) {
376
417
  const parsed = {};
377
418
  const helpFlag = argv.find((arg) => arg === "--help" || arg === "-h");
@@ -406,8 +447,11 @@ function parseArgs(argv) {
406
447
  parsed.evalSubcommand = "diff";
407
448
  sawSubcommand = true;
408
449
  }
450
+ else if (token === "runs") {
451
+ parsed.evalSubcommand = "runs";
452
+ sawSubcommand = true;
453
+ }
409
454
  else {
410
- // Treat unknown positional as an eval arg for forward compat.
411
455
  evalArgs.push(token);
412
456
  }
413
457
  continue;
@@ -479,8 +523,12 @@ function parseArgs(argv) {
479
523
  parsed.evalStage = parseEvalStage(flag.replace("--stage=", ""));
480
524
  continue;
481
525
  }
526
+ if (flag.startsWith("--mode=")) {
527
+ parsed.evalMode = parseEvalMode(flag.replace("--mode=", ""));
528
+ continue;
529
+ }
482
530
  if (flag.startsWith("--tier=")) {
483
- parsed.evalTier = parseEvalTier(flag.replace("--tier=", ""));
531
+ parsed.evalMode = parseLegacyTier(flag.replace("--tier=", ""));
484
532
  continue;
485
533
  }
486
534
  if (flag === "--schema-only") {
@@ -507,14 +555,245 @@ function parseArgs(argv) {
507
555
  parsed.evalConfirm = true;
508
556
  continue;
509
557
  }
558
+ if (flag === "--background") {
559
+ parsed.evalBackground = true;
560
+ continue;
561
+ }
562
+ if (flag.startsWith("--compare-model=")) {
563
+ const value = flag.replace("--compare-model=", "").trim();
564
+ if (value.length === 0) {
565
+ throw new Error(`--compare-model requires a non-empty model id (e.g. --compare-model=gpt-4o-mini).`);
566
+ }
567
+ parsed.evalCompareModel = value;
568
+ continue;
569
+ }
570
+ if (flag.startsWith("--max-cost-usd=")) {
571
+ const raw = flag.replace("--max-cost-usd=", "").trim();
572
+ const value = Number(raw);
573
+ if (!Number.isFinite(value) || value <= 0) {
574
+ throw new Error(`--max-cost-usd requires a positive number, got: ${raw}`);
575
+ }
576
+ parsed.evalMaxCostUsd = value;
577
+ continue;
578
+ }
510
579
  }
511
580
  // `--json` is shared between doctor and eval. Disambiguate by command.
512
581
  if (parsed.command === "eval" && parsed.doctorJson === true) {
513
582
  parsed.evalJson = true;
514
583
  parsed.doctorJson = undefined;
515
584
  }
585
+ // `--quiet` on `eval` silences the stderr progress logger. On doctor it
586
+ // continues to mean "print only failing checks" — the flag slot is the
587
+ // same, the semantics depend on which command owns the invocation.
588
+ if (parsed.command === "eval" && parsed.doctorQuiet === true) {
589
+ parsed.evalQuiet = true;
590
+ parsed.doctorQuiet = undefined;
591
+ }
516
592
  return parsed;
517
593
  }
594
+ /**
595
+ * Spawn `cclaw eval` (without `--background`) in a detached child process
596
+ * and return immediately. The child's stdout+stderr are piped to
597
+ * `.cclaw/evals/runs/<id>/run.log` so the user can attach later with
598
+ * `cclaw eval runs tail`. We do NOT wait for the child — the whole point
599
+ * is to free the terminal while a multi-minute workflow-mode run
600
+ * proceeds in the background.
601
+ */
602
+ async function spawnBackgroundEval(parsed, ctx) {
603
+ const id = generateRunId();
604
+ await ensureRunDir(ctx.cwd, id);
605
+ const logPath = runLogPath(ctx.cwd, id);
606
+ const childArgv = process.argv.slice(2).filter((a) => a !== "--background");
607
+ const cliEntry = process.argv[1];
608
+ if (!cliEntry) {
609
+ error(ctx, "Could not resolve cclaw entrypoint for --background.");
610
+ return 1;
611
+ }
612
+ const logHandle = await fs.open(logPath, "a");
613
+ try {
614
+ const child = spawn(process.execPath, [cliEntry, ...childArgv], {
615
+ cwd: ctx.cwd,
616
+ detached: true,
617
+ stdio: ["ignore", logHandle.fd, logHandle.fd],
618
+ env: process.env
619
+ });
620
+ const pid = child.pid ?? -1;
621
+ await writeRunStatus(ctx.cwd, {
622
+ id,
623
+ startedAt: new Date().toISOString(),
624
+ pid,
625
+ argv: childArgv,
626
+ cwd: ctx.cwd,
627
+ state: "running"
628
+ });
629
+ child.unref();
630
+ const finalize = async (code) => {
631
+ const current = await readRunStatus(ctx.cwd, id);
632
+ if (!current)
633
+ return;
634
+ const exitCode = typeof code === "number" ? code : -1;
635
+ await writeRunStatus(ctx.cwd, {
636
+ ...current,
637
+ endedAt: new Date().toISOString(),
638
+ exitCode,
639
+ state: exitCode === 0 ? "succeeded" : "failed"
640
+ });
641
+ };
642
+ child.on("exit", (code) => {
643
+ void finalize(code);
644
+ });
645
+ child.on("error", (err) => {
646
+ void writeRunStatus(ctx.cwd, {
647
+ id,
648
+ startedAt: new Date().toISOString(),
649
+ pid,
650
+ argv: childArgv,
651
+ cwd: ctx.cwd,
652
+ endedAt: new Date().toISOString(),
653
+ exitCode: -1,
654
+ state: "failed"
655
+ });
656
+ error(ctx, `Background eval failed to start: ${err.message}`);
657
+ });
658
+ ctx.stdout.write(`cclaw eval: background run id=${id} pid=${pid}\n` +
659
+ ` log: ${logPath}\n` +
660
+ ` tail: cclaw eval runs tail ${id}\n` +
661
+ ` status: cclaw eval runs status ${id}\n`);
662
+ return 0;
663
+ }
664
+ finally {
665
+ await logHandle.close();
666
+ }
667
+ }
668
+ function formatRunRow(status) {
669
+ const ended = status.endedAt ? ` ended=${status.endedAt}` : "";
670
+ const exitCode = status.exitCode !== undefined ? ` exit=${status.exitCode}` : "";
671
+ const alive = status.state === "running" ? (isRunAlive(status) ? "" : " (stale)") : "";
672
+ return `${status.id} state=${status.state}${alive} pid=${status.pid} started=${status.startedAt}${ended}${exitCode}`;
673
+ }
674
+ async function runEvalRunsSubcommand(parsed, ctx) {
675
+ const args = parsed.evalArgs ?? [];
676
+ const action = args[0] ?? "list";
677
+ if (action === "list") {
678
+ const runs = await listRuns(ctx.cwd);
679
+ if (runs.length === 0) {
680
+ ctx.stdout.write("No eval runs recorded under .cclaw/evals/runs/.\n");
681
+ return 0;
682
+ }
683
+ if (parsed.evalJson === true) {
684
+ ctx.stdout.write(`${JSON.stringify(runs, null, 2)}\n`);
685
+ return 0;
686
+ }
687
+ for (const run of runs)
688
+ ctx.stdout.write(`${formatRunRow(run)}\n`);
689
+ return 0;
690
+ }
691
+ if (action === "status") {
692
+ const id = await resolveRunId(ctx.cwd, args[1]);
693
+ if (!id) {
694
+ error(ctx, `No such run: ${args[1] ?? "(none recorded)"}`);
695
+ return 1;
696
+ }
697
+ const status = await readRunStatus(ctx.cwd, id);
698
+ if (!status) {
699
+ error(ctx, `Run ${id} has no status file.`);
700
+ return 1;
701
+ }
702
+ if (parsed.evalJson === true) {
703
+ ctx.stdout.write(`${JSON.stringify(status, null, 2)}\n`);
704
+ }
705
+ else {
706
+ ctx.stdout.write(`${formatRunRow(status)}\n`);
707
+ ctx.stdout.write(`log: ${runLogPath(ctx.cwd, id)}\n`);
708
+ }
709
+ return status.state === "failed" ? 1 : 0;
710
+ }
711
+ if (action === "tail") {
712
+ const id = await resolveRunId(ctx.cwd, args[1]);
713
+ if (!id) {
714
+ error(ctx, `No such run: ${args[1] ?? "(none recorded)"}`);
715
+ return 1;
716
+ }
717
+ const logFile = runLogPath(ctx.cwd, id);
718
+ const stream = createReadStream(logFile, { encoding: "utf8" });
719
+ await new Promise((resolve, reject) => {
720
+ stream.on("data", (chunk) => ctx.stdout.write(chunk));
721
+ stream.on("end", () => resolve());
722
+ stream.on("error", reject);
723
+ });
724
+ return 0;
725
+ }
726
+ error(ctx, `Unknown \`cclaw eval runs\` action: ${action}. Use list | status | tail.`);
727
+ return 1;
728
+ }
729
+ /**
730
+ * Run the same corpus twice — once against the configured model, once
731
+ * against `--compare-model=<id>` — and print a summary comparing the
732
+ * two. Both reports are written to `.cclaw/evals/reports/` (unless
733
+ * `--no-write` is set) and a unified diff is emitted to stdout. Exit
734
+ * code is 1 when the override model regressed against the baseline
735
+ * model, 0 otherwise.
736
+ */
737
+ async function runCompareModel(parsed, ctx, progress) {
738
+ const baselineOpts = {
739
+ projectRoot: ctx.cwd,
740
+ stage: parsed.evalStage,
741
+ mode: parsed.evalMode,
742
+ schemaOnly: parsed.evalSchemaOnly === true,
743
+ rules: parsed.evalRules === true,
744
+ judge: parsed.evalJudge === true,
745
+ ...(progress ? { progress } : {}),
746
+ ...resolveMaxCostOption(parsed.evalMaxCostUsd, process.env)
747
+ };
748
+ ctx.stderr.write(`[cclaw eval] compare: running baseline model...\n`);
749
+ const baseline = await runEval(baselineOpts);
750
+ if ("kind" in baseline) {
751
+ error(ctx, "--compare-model is incompatible with --dry-run.");
752
+ return 1;
753
+ }
754
+ ctx.stderr.write(`[cclaw eval] compare: running ${parsed.evalCompareModel} ...\n`);
755
+ const candidate = await runEval({
756
+ ...baselineOpts,
757
+ modelOverride: parsed.evalCompareModel
758
+ });
759
+ if ("kind" in candidate) {
760
+ error(ctx, "--compare-model received an unexpected dry-run response.");
761
+ return 1;
762
+ }
763
+ if (parsed.evalNoWrite !== true) {
764
+ await writeJsonReport(ctx.cwd, baseline);
765
+ await writeMarkdownReport(ctx.cwd, baseline);
766
+ await writeJsonReport(ctx.cwd, candidate);
767
+ await writeMarkdownReport(ctx.cwd, candidate);
768
+ }
769
+ const passDelta = candidate.summary.passed - baseline.summary.passed;
770
+ const failDelta = candidate.summary.failed - baseline.summary.failed;
771
+ const costDelta = candidate.summary.totalCostUsd - baseline.summary.totalCostUsd;
772
+ if (parsed.evalJson === true) {
773
+ ctx.stdout.write(`${JSON.stringify({
774
+ baseline: {
775
+ model: baseline.model,
776
+ summary: baseline.summary
777
+ },
778
+ candidate: {
779
+ model: candidate.model,
780
+ summary: candidate.summary
781
+ },
782
+ delta: { passed: passDelta, failed: failDelta, costUsd: costDelta }
783
+ }, null, 2)}\n`);
784
+ }
785
+ else {
786
+ ctx.stdout.write(`cclaw eval compare-model:\n` +
787
+ ` baseline ${baseline.model}: pass=${baseline.summary.passed}/${baseline.summary.totalCases} ` +
788
+ `fail=${baseline.summary.failed} cost=$${baseline.summary.totalCostUsd.toFixed(4)}\n` +
789
+ ` candidate ${candidate.model}: pass=${candidate.summary.passed}/${candidate.summary.totalCases} ` +
790
+ `fail=${candidate.summary.failed} cost=$${candidate.summary.totalCostUsd.toFixed(4)}\n` +
791
+ ` delta: passed=${passDelta >= 0 ? "+" : ""}${passDelta} ` +
792
+ `failed=${failDelta >= 0 ? "+" : ""}${failDelta} ` +
793
+ `cost=${costDelta >= 0 ? "+" : ""}$${costDelta.toFixed(4)}\n`);
794
+ }
795
+ return failDelta > 0 ? 1 : 0;
796
+ }
518
797
  async function runCommand(parsed, ctx) {
519
798
  if (parsed.showHelp) {
520
799
  ctx.stdout.write(usage());
@@ -608,6 +887,12 @@ async function runCommand(parsed, ctx) {
608
887
  info(ctx, "Upgraded .cclaw runtime and regenerated generated files");
609
888
  return 0;
610
889
  }
890
+ if (command === "eval" && parsed.evalSubcommand === "runs") {
891
+ return runEvalRunsSubcommand(parsed, ctx);
892
+ }
893
+ if (command === "eval" && parsed.evalBackground === true) {
894
+ return spawnBackgroundEval(parsed, ctx);
895
+ }
611
896
  if (command === "eval" && parsed.evalSubcommand === "diff") {
612
897
  const args = parsed.evalArgs ?? [];
613
898
  if (args.length !== 2) {
@@ -636,14 +921,25 @@ async function runCommand(parsed, ctx) {
636
921
  }
637
922
  }
638
923
  if (command === "eval") {
924
+ const wantProgress = parsed.evalQuiet !== true &&
925
+ parsed.dryRun !== true &&
926
+ parsed.evalJson !== true;
927
+ const progress = wantProgress
928
+ ? createStderrProgressLogger({ writer: (s) => ctx.stderr.write(s) })
929
+ : undefined;
930
+ if (parsed.evalCompareModel !== undefined) {
931
+ return runCompareModel(parsed, ctx, progress);
932
+ }
639
933
  const result = await runEval({
640
934
  projectRoot: ctx.cwd,
641
935
  stage: parsed.evalStage,
642
- tier: parsed.evalTier,
936
+ mode: parsed.evalMode,
643
937
  schemaOnly: parsed.evalSchemaOnly === true,
644
938
  rules: parsed.evalRules === true,
645
939
  judge: parsed.evalJudge === true,
646
- dryRun: parsed.dryRun === true
940
+ dryRun: parsed.dryRun === true,
941
+ ...(progress ? { progress } : {}),
942
+ ...resolveMaxCostOption(parsed.evalMaxCostUsd, process.env)
647
943
  });
648
944
  if ("kind" in result) {
649
945
  if (parsed.evalJson === true) {
@@ -656,12 +952,12 @@ async function runCommand(parsed, ctx) {
656
952
  ctx.stdout.write(` model: ${result.config.model}\n`);
657
953
  ctx.stdout.write(` source: ${result.config.source}\n`);
658
954
  ctx.stdout.write(` apiKey: ${result.config.apiKey ? "set" : "unset"}\n`);
659
- ctx.stdout.write(` tier: ${result.plannedTier}\n`);
955
+ ctx.stdout.write(` mode: ${result.plannedMode}\n`);
660
956
  ctx.stdout.write(` corpus: ${result.corpus.total} case(s)\n`);
661
957
  for (const [stage, count] of Object.entries(result.corpus.byStage)) {
662
958
  ctx.stdout.write(` - ${stage}: ${count}\n`);
663
959
  }
664
- if (result.workflowCorpus.total > 0 || result.plannedTier === "C") {
960
+ if (result.workflowCorpus.total > 0 || result.plannedMode === "workflow") {
665
961
  ctx.stdout.write(` workflow corpus: ${result.workflowCorpus.total} case(s)\n`);
666
962
  for (const wf of result.workflowCorpus.cases) {
667
963
  ctx.stdout.write(` - ${wf.id}: ${wf.stages.join(" → ")}\n`);
@@ -4,8 +4,8 @@
4
4
  * scaffold is intentionally minimal: a usable default config plus short
5
5
  * READMEs that point at `docs/evals.md` for authoring guidance.
6
6
  */
7
- export declare const EVAL_CONFIG_YAML = "# cclaw eval config\n# See docs/evals.md for the full schema and rollout plan.\n#\n# All values can be overridden at runtime with CCLAW_EVAL_* environment\n# variables (env wins). Secrets like CCLAW_EVAL_API_KEY never live here.\nprovider: zai\nbaseUrl: https://api.z.ai/api/coding/paas/v4\nmodel: glm-5.1\n\n# Default fidelity tier when --tier is not supplied.\n# A = single-shot API call (cheap)\n# B = SDK with tool use (realistic)\n# C = multi-stage workflow (end-to-end)\ndefaultTier: A\n\n# Per-call timeout and retry budget.\ntimeoutMs: 120000\nmaxRetries: 2\n\n# Optional hard-stop on estimated USD spend per day. Leave unset for no cap.\n# dailyUsdCap: 5\n\n# Regression thresholds used by CI.\nregression:\n # Fail when overall score drops by more than this fraction (e.g. -0.15 = 15%).\n failIfDeltaBelow: -0.15\n # Fail when any single critical rubric drops below this absolute score.\n failIfCriticalBelow: 3.0\n";
8
- export declare const EVAL_CORPUS_README = "# Eval Corpus\n\nSeed cases live in `./<stage>/<id>.yaml`, one file per case.\nSee `docs/evals.md` for the schema.\n\nMinimal shape:\n\n```yaml\nid: brainstorm-01\nstage: brainstorm\ninput_prompt: |\n One short paragraph describing the user's task.\ncontext_files: []\nexpected:\n # verifier-specific hints; optional\n```\n\nStart with 3 structural cases per stage (24 total), then expand to 5 per\nstage (40 total) once rule verifiers land. Tier B/C runs may add\n`context_files` pulled from real projects to exercise the sandbox.\n";
7
+ export declare const EVAL_CONFIG_YAML = "# cclaw eval config\n# See docs/evals.md for the full schema and rollout plan.\n#\n# All values can be overridden at runtime with CCLAW_EVAL_* environment\n# variables (env wins). Secrets like CCLAW_EVAL_API_KEY never live here.\nprovider: zai\nbaseUrl: https://api.z.ai/api/coding/paas/v4\nmodel: glm-5.1\n\n# Default evaluation mode when --mode is not supplied.\n# fixture = verify existing artifacts (cheap, LLM-free unless --judge is set)\n# agent = LLM drafts one stage's artifact in a sandbox with tools\n# workflow = LLM runs the full multi-stage flow (brainstorm \u2192 plan)\n# (Legacy alias --tier=A|B|C still works; A\u2192fixture, B\u2192agent, C\u2192workflow.)\ndefaultMode: fixture\n\n# Per-call timeout and retry budget.\ntimeoutMs: 120000\nmaxRetries: 2\n\n# Optional hard-stop on estimated USD spend per day. Leave unset for no cap.\n# dailyUsdCap: 5\n\n# Regression thresholds used by CI.\nregression:\n # Fail when overall score drops by more than this fraction (e.g. -0.15 = 15%).\n failIfDeltaBelow: -0.15\n # Fail when any single critical rubric drops below this absolute score.\n failIfCriticalBelow: 3.0\n";
8
+ export declare const EVAL_CORPUS_README = "# Eval Corpus\n\nSeed cases live in `./<stage>/<id>.yaml`, one file per case.\nSee `docs/evals.md` for the schema.\n\nMinimal shape:\n\n```yaml\nid: brainstorm-01\nstage: brainstorm\ninput_prompt: |\n One short paragraph describing the user's task.\ncontext_files: []\nexpected:\n # verifier-specific hints; optional\n```\n\nStart with 3 structural cases per stage (24 total), then expand to 5 per\nstage (40 total) once rule verifiers land. Agent/workflow runs may add\n`context_files` pulled from real projects to exercise the sandbox.\n";
9
9
  export declare const EVAL_RUBRICS_README = "# Eval Rubrics\n\nLLM-judge rubrics. Each rubric is a short list of checks scored on a\n`1\u20135` scale with a rationale. The runner picks `<stage>.yaml` when\n`cclaw eval --judge` is invoked; every stage ships a starter rubric\nbelow \u2014 edit the checks to match what your team cares about, and add\n`critical: true` to the checks that should hard-fail nightly CI on\nregression.\n\n```yaml\nstage: brainstorm\nchecks:\n - id: distinctness\n prompt: \"Are the proposed directions genuinely distinct (not rephrasings)?\"\n scale: \"1-5 where 5=fully distinct approaches\"\n weight: 1.0\n critical: false\n```\n\nSee `docs/evals.md` for the full schema.\n";
10
10
  export declare const EVAL_RUBRIC_FILES: ReadonlyArray<{
11
11
  stage: string;
@@ -13,11 +13,12 @@ provider: zai
13
13
  baseUrl: https://api.z.ai/api/coding/paas/v4
14
14
  model: glm-5.1
15
15
 
16
- # Default fidelity tier when --tier is not supplied.
17
- # A = single-shot API call (cheap)
18
- # B = SDK with tool use (realistic)
19
- # C = multi-stage workflow (end-to-end)
20
- defaultTier: A
16
+ # Default evaluation mode when --mode is not supplied.
17
+ # fixture = verify existing artifacts (cheap, LLM-free unless --judge is set)
18
+ # agent = LLM drafts one stage's artifact in a sandbox with tools
19
+ # workflow = LLM runs the full multi-stage flow (brainstorm → plan)
20
+ # (Legacy alias --tier=A|B|C still works; A→fixture, B→agent, C→workflow.)
21
+ defaultMode: fixture
21
22
 
22
23
  # Per-call timeout and retry budget.
23
24
  timeoutMs: 120000
@@ -51,7 +52,7 @@ expected:
51
52
  \`\`\`
52
53
 
53
54
  Start with 3 structural cases per stage (24 total), then expand to 5 per
54
- stage (40 total) once rule verifiers land. Tier B/C runs may add
55
+ stage (40 total) once rule verifiers land. Agent/workflow runs may add
55
56
  \`context_files\` pulled from real projects to exercise the sandbox.
56
57
  `;
57
58
  export const EVAL_RUBRICS_README = `# Eval Rubrics
@@ -23,5 +23,5 @@ export interface SingleShotOutput {
23
23
  userPrompt: string;
24
24
  }
25
25
  export declare function loadStageSkill(projectRoot: string, stage: FlowStage): Promise<string>;
26
- /** Run the Tier A single-shot AUT and return the produced artifact. */
26
+ /** Run the single-shot AUT (fixture mode + --judge) and return the produced artifact. */
27
27
  export declare function runSingleShot(input: SingleShotInput): Promise<SingleShotOutput>;
@@ -1,5 +1,5 @@
1
1
  /**
2
- * Tier A single-shot agent.
2
+ * Single-shot agent used by fixture mode when `--judge` is set.
3
3
  *
4
4
  * Simplest realistic AUT: one LLM call with the stage's SKILL.md as the
5
5
  * system prompt and the case's `inputPrompt` as the user message. Output
@@ -9,7 +9,7 @@
9
9
  * Design notes:
10
10
  *
11
11
  * - No tools. No multi-turn. No reads of the project beyond the one
12
- * SKILL.md. Tier B/C layer complexity on top in later steps.
12
+ * SKILL.md. agent/workflow modes layer complexity on top.
13
13
  * - Errors are propagated as-is (`EvalLlmError` subclasses) so the
14
14
  * runner can surface them as verifier failures without swallowing the
15
15
  * cause.
@@ -27,7 +27,7 @@ export async function loadStageSkill(projectRoot, stage) {
27
27
  const file = path.join(projectRoot, RUNTIME_ROOT, "skills", folder, "SKILL.md");
28
28
  if (!(await exists(file))) {
29
29
  throw new Error(`Stage skill not found: ${path.relative(projectRoot, file)}. ` +
30
- `Run \`cclaw init\` (or \`cclaw sync\`) before \`cclaw eval --tier=A --judge\`.`);
30
+ `Run \`cclaw init\` (or \`cclaw sync\`) before \`cclaw eval --mode=fixture --judge\`.`);
31
31
  }
32
32
  return fs.readFile(file, "utf8");
33
33
  }
@@ -50,7 +50,7 @@ function buildUserPrompt(caseEntry) {
50
50
  `Do not wrap in code fences, do not add commentary before or after.`);
51
51
  return lines.join("\n");
52
52
  }
53
- /** Run the Tier A single-shot AUT and return the produced artifact. */
53
+ /** Run the single-shot AUT (fixture mode + --judge) and return the produced artifact. */
54
54
  export async function runSingleShot(input) {
55
55
  const { caseEntry, config, projectRoot, client } = input;
56
56
  const started = Date.now();
@@ -18,15 +18,15 @@ export interface WithToolsInput {
18
18
  createSandboxFn?: typeof createSandbox;
19
19
  /**
20
20
  * Reuse an externally-managed sandbox instead of creating + disposing a
21
- * per-call one. Tier C workflow orchestration uses this so every stage
22
- * shares the same sandbox and earlier artifacts remain visible. When
23
- * set, the caller is responsible for `dispose()`.
21
+ * per-call one. Workflow mode uses this so every stage shares the same
22
+ * sandbox and earlier artifacts remain visible. When set, the caller is
23
+ * responsible for `dispose()`.
24
24
  */
25
25
  externalSandbox?: Sandbox;
26
26
  /**
27
- * Optional override of the default user prompt prefix. Tier C uses this
28
- * to tell the model which stage it is on and where the prior artifacts
29
- * are located.
27
+ * Optional override of the default user prompt prefix. Workflow mode uses
28
+ * this to tell the model which stage it is on and where the prior
29
+ * artifacts are located.
30
30
  */
31
31
  promptPreamble?: string;
32
32
  }
@@ -1,11 +1,11 @@
1
1
  /**
2
- * Tier B with-tools agent.
2
+ * Multi-turn with-tools agent (agent mode, reused by workflow mode).
3
3
  *
4
4
  * Multi-turn loop with OpenAI-style function-calling over a set of
5
5
  * sandbox-confined tools. The AUT is given:
6
6
  *
7
- * - System prompt = stage SKILL.md (same contract as Tier A so the
8
- * single-shot baseline is comparable).
7
+ * - System prompt = stage SKILL.md (same contract as the single-shot path
8
+ * so the baseline is comparable).
9
9
  * - User prompt = task description + a short "tools available" hint
10
10
  * that names the sandbox root and the four built-in tools.
11
11
  * - Tools = `read_file`, `write_file`, `glob`, `grep` (see
@@ -29,7 +29,7 @@
29
29
  * Artifact resolution: the final assistant content is the artifact. If
30
30
  * the model used `write_file` to stage the artifact at
31
31
  * `artifact.md` (or `artifact/<stage>.md`), we prefer that file — it
32
- * mirrors the Tier C workflow where writes are the deliverable. The
32
+ * mirrors workflow mode where writes are the deliverable. The
33
33
  * fallback is the terminal assistant message so prompts that don't
34
34
  * call write_file still produce something judgable.
35
35
  */
@@ -42,7 +42,7 @@ import { loadStageSkill } from "./single-shot.js";
42
42
  export class MaxTurnsExceededError extends Error {
43
43
  turns;
44
44
  constructor(turns) {
45
- super(`Tier B agent exceeded the ${turns}-turn budget without a terminal stop.`);
45
+ super(`Agent loop exceeded the ${turns}-turn budget without a terminal stop.`);
46
46
  this.name = "MaxTurnsExceededError";
47
47
  this.turns = turns;
48
48
  }
@@ -12,6 +12,13 @@ export interface WorkflowInput {
12
12
  loadSkill?: (stage: WorkflowStageName) => Promise<string>;
13
13
  /** Override for the sandbox factory (test hook). */
14
14
  createSandboxFn?: typeof createSandbox;
15
+ /**
16
+ * Optional per-stage lifecycle hooks. The runner uses these to emit
17
+ * progress events to stderr so workflow-mode runs surface real-time
18
+ * status rather than going silent for minutes.
19
+ */
20
+ onStageStart?: (stage: WorkflowStageName) => void;
21
+ onStageEnd?: (stage: WorkflowStageName, result: WorkflowStageResult) => void;
15
22
  }
16
23
  export interface WorkflowOutput {
17
24
  caseId: string;