substrate-ai 0.1.28 → 0.1.30

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/cli/index.js +264 -15
  2. package/package.json +1 -1
package/dist/cli/index.js CHANGED
@@ -8362,16 +8362,40 @@ function getTokenUsageSummary(db, runId) {
8362
8362
  //#region src/persistence/queries/metrics.ts
8363
8363
  /**
8364
8364
  * Write or update run-level metrics.
8365
+ *
8366
+ * Uses INSERT ... ON CONFLICT DO UPDATE to avoid a TOCTOU race on the
8367
+ * `restarts` counter: when a row already exists, `restarts` is preserved from
8368
+ * the DB (so any `incrementRunRestarts()` calls made by the supervisor between
8369
+ * the caller's read and this write are not silently overwritten).
8365
8370
  */
8366
8371
  function writeRunMetrics(db, input) {
8367
8372
  const stmt = db.prepare(`
8368
- INSERT OR REPLACE INTO run_metrics (
8373
+ INSERT INTO run_metrics (
8369
8374
  run_id, methodology, status, started_at, completed_at,
8370
8375
  wall_clock_seconds, total_input_tokens, total_output_tokens, total_cost_usd,
8371
8376
  stories_attempted, stories_succeeded, stories_failed, stories_escalated,
8372
8377
  total_review_cycles, total_dispatches, concurrency_setting, max_concurrent_actual, restarts,
8373
8378
  is_baseline
8374
8379
  ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
8380
+ ON CONFLICT(run_id) DO UPDATE SET
8381
+ methodology = excluded.methodology,
8382
+ status = excluded.status,
8383
+ started_at = excluded.started_at,
8384
+ completed_at = excluded.completed_at,
8385
+ wall_clock_seconds = excluded.wall_clock_seconds,
8386
+ total_input_tokens = excluded.total_input_tokens,
8387
+ total_output_tokens = excluded.total_output_tokens,
8388
+ total_cost_usd = excluded.total_cost_usd,
8389
+ stories_attempted = excluded.stories_attempted,
8390
+ stories_succeeded = excluded.stories_succeeded,
8391
+ stories_failed = excluded.stories_failed,
8392
+ stories_escalated = excluded.stories_escalated,
8393
+ total_review_cycles = excluded.total_review_cycles,
8394
+ total_dispatches = excluded.total_dispatches,
8395
+ concurrency_setting = excluded.concurrency_setting,
8396
+ max_concurrent_actual = excluded.max_concurrent_actual,
8397
+ restarts = run_metrics.restarts,
8398
+ is_baseline = run_metrics.is_baseline
8375
8399
  `);
8376
8400
  stmt.run(input.run_id, input.methodology, input.status, input.started_at, input.completed_at ?? null, input.wall_clock_seconds ?? 0, input.total_input_tokens ?? 0, input.total_output_tokens ?? 0, input.total_cost_usd ?? 0, input.stories_attempted ?? 0, input.stories_succeeded ?? 0, input.stories_failed ?? 0, input.stories_escalated ?? 0, input.total_review_cycles ?? 0, input.total_dispatches ?? 0, input.concurrency_setting ?? 1, input.max_concurrent_actual ?? 1, input.restarts ?? 0, input.is_baseline ?? 0);
8377
8401
  }
@@ -8397,6 +8421,26 @@ function tagRunAsBaseline(db, runId) {
8397
8421
  })();
8398
8422
  }
8399
8423
  /**
8424
+ * Get the current baseline run metrics (if any).
8425
+ */
8426
+ function getBaselineRunMetrics(db) {
8427
+ return db.prepare("SELECT * FROM run_metrics WHERE is_baseline = 1 LIMIT 1").get();
8428
+ }
8429
+ /**
8430
+ * Increment the restart count for a run by 1.
8431
+ * Called by the supervisor each time it successfully restarts the pipeline.
8432
+ * If the run_id does not yet exist in run_metrics, a placeholder row is
8433
+ * inserted so the restart count is not lost — writeRunMetrics will overwrite
8434
+ * all other fields when the run reaches a terminal state.
8435
+ */
8436
+ function incrementRunRestarts(db, runId) {
8437
+ db.prepare(`
8438
+ INSERT INTO run_metrics (run_id, methodology, status, started_at, restarts)
8439
+ VALUES (?, 'unknown', 'running', datetime('now'), 1)
8440
+ ON CONFLICT(run_id) DO UPDATE SET restarts = run_metrics.restarts + 1
8441
+ `).run(runId);
8442
+ }
8443
+ /**
8400
8444
  * Write or update story-level metrics.
8401
8445
  */
8402
8446
  function writeStoryMetrics(db, input) {
@@ -8435,7 +8479,7 @@ function compareRunMetrics(db, runIdA, runIdB) {
8435
8479
  const a = getRunMetrics(db, runIdA);
8436
8480
  const b = getRunMetrics(db, runIdB);
8437
8481
  if (!a || !b) return null;
8438
- const pct = (base, diff) => base === 0 ? 0 : Math.round(diff / base * 100 * 10) / 10;
8482
+ const pct = (base, diff) => base === 0 ? null : Math.round(diff / base * 100 * 10) / 10;
8439
8483
  const inputDelta = b.total_input_tokens - a.total_input_tokens;
8440
8484
  const outputDelta = b.total_output_tokens - a.total_output_tokens;
8441
8485
  const clockDelta = (b.wall_clock_seconds ?? 0) - (a.wall_clock_seconds ?? 0);
@@ -10478,6 +10522,7 @@ function createImplementationOrchestrator(deps) {
10478
10522
  const _phaseStartMs = new Map();
10479
10523
  const _phaseEndMs = new Map();
10480
10524
  const _storyDispatches = new Map();
10525
+ let _maxConcurrentActual = 0;
10481
10526
  function startPhase(storyKey, phase) {
10482
10527
  if (!_phaseStartMs.has(storyKey)) _phaseStartMs.set(storyKey, new Map());
10483
10528
  _phaseStartMs.get(storyKey).set(phase, Date.now());
@@ -10494,9 +10539,14 @@ function createImplementationOrchestrator(deps) {
10494
10539
  const ends = _phaseEndMs.get(storyKey);
10495
10540
  if (!starts || starts.size === 0) return "{}";
10496
10541
  const durations = {};
10542
+ const nowMs = Date.now();
10497
10543
  for (const [phase, startMs] of starts) {
10498
- const endMs = ends?.get(phase) ?? Date.now();
10499
- durations[phase] = Math.round((endMs - startMs) / 1e3);
10544
+ const endMs = ends?.get(phase);
10545
+ if (endMs === void 0) logger$36.warn({
10546
+ storyKey,
10547
+ phase
10548
+ }, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
10549
+ durations[phase] = Math.round(((endMs ?? nowMs) - startMs) / 1e3);
10500
10550
  }
10501
10551
  return JSON.stringify(durations);
10502
10552
  }
@@ -10542,6 +10592,7 @@ function createImplementationOrchestrator(deps) {
10542
10592
  if (_startedAt !== void 0) status.totalDurationMs = new Date(_completedAt).getTime() - new Date(_startedAt).getTime();
10543
10593
  }
10544
10594
  if (_decomposition !== void 0) status.decomposition = { ..._decomposition };
10595
+ if (_maxConcurrentActual > 0) status.maxConcurrentActual = _maxConcurrentActual;
10545
10596
  return status;
10546
10597
  }
10547
10598
  function updateStory(storyKey, updates) {
@@ -11322,6 +11373,7 @@ function createImplementationOrchestrator(deps) {
11322
11373
  if (idx !== -1) running.splice(idx, 1);
11323
11374
  });
11324
11375
  running.push(p);
11376
+ if (running.length > _maxConcurrentActual) _maxConcurrentActual = running.length;
11325
11377
  }
11326
11378
  const initial = Math.min(maxConcurrency, queue.length);
11327
11379
  for (let i = 0; i < initial; i++) enqueue();
@@ -16156,6 +16208,108 @@ async function scaffoldClaudeSettings(projectRoot) {
16156
16208
  await writeFile(settingsPath, JSON.stringify(merged, null, 2) + "\n", "utf8");
16157
16209
  logger$3.info({ settingsPath }, "Wrote substrate settings to .claude/settings.json");
16158
16210
  }
16211
+ /**
16212
+ * Resolve the absolute path to bmad-method's installer lib directory.
16213
+ * Returns null if bmad-method is not installed.
16214
+ */
16215
+ function resolveBmadMethodInstallerLibPath(fromDir = __dirname) {
16216
+ try {
16217
+ const _require = createRequire$1(join(fromDir, "synthetic.js"));
16218
+ const pkgJsonPath = _require.resolve("bmad-method/package.json");
16219
+ return join(dirname(pkgJsonPath), "tools", "cli", "installers", "lib");
16220
+ } catch {
16221
+ return null;
16222
+ }
16223
+ }
16224
+ /**
16225
+ * Scan the _bmad/ directory for installed module names (excluding 'core' and '_config').
16226
+ * Returns module names that contain agents/, workflows/, or tasks/ subdirs.
16227
+ */
16228
+ function scanBmadModules(bmadDir) {
16229
+ const modules = [];
16230
+ try {
16231
+ const entries = readdirSync(bmadDir, { withFileTypes: true });
16232
+ for (const entry of entries) {
16233
+ if (!entry.isDirectory() || entry.name.startsWith(".") || entry.name.startsWith("_") || entry.name === "core") continue;
16234
+ const modPath = join(bmadDir, entry.name);
16235
+ const hasAgents = existsSync(join(modPath, "agents"));
16236
+ const hasWorkflows = existsSync(join(modPath, "workflows"));
16237
+ const hasTasks = existsSync(join(modPath, "tasks"));
16238
+ if (hasAgents || hasWorkflows || hasTasks) modules.push(entry.name);
16239
+ }
16240
+ } catch {}
16241
+ return modules;
16242
+ }
16243
+ /**
16244
+ * Remove existing bmad-*.md files from .claude/commands/ for idempotent regeneration.
16245
+ * Preserves user's custom (non-bmad) command files.
16246
+ */
16247
+ function clearBmadCommandFiles(commandsDir) {
16248
+ try {
16249
+ const entries = readdirSync(commandsDir);
16250
+ for (const entry of entries) if (entry.startsWith("bmad-") && entry.endsWith(".md")) try {
16251
+ unlinkSync(join(commandsDir, entry));
16252
+ } catch {}
16253
+ } catch {}
16254
+ }
16255
+ /**
16256
+ * Generate .claude/commands/ files by calling bmad-method's command generators.
16257
+ *
16258
+ * Uses the installed bmad-method package's AgentCommandGenerator,
16259
+ * WorkflowCommandGenerator, and TaskToolCommandGenerator classes via createRequire.
16260
+ * Generates CSV manifests first so workflow/task generators can discover content.
16261
+ *
16262
+ * Graceful degradation: warns but never fails init.
16263
+ */
16264
+ async function scaffoldClaudeCommands(projectRoot, outputFormat) {
16265
+ const bmadDir = join(projectRoot, "_bmad");
16266
+ if (!existsSync(bmadDir)) return;
16267
+ const installerLibPath = resolveBmadMethodInstallerLibPath();
16268
+ if (!installerLibPath) {
16269
+ if (outputFormat !== "json") process.stderr.write("Warning: bmad-method not found. Skipping .claude/commands/ generation.\n");
16270
+ return;
16271
+ }
16272
+ try {
16273
+ const _require = createRequire$1(join(__dirname, "synthetic.js"));
16274
+ const { AgentCommandGenerator } = _require(join(installerLibPath, "ide", "shared", "agent-command-generator.js"));
16275
+ const { WorkflowCommandGenerator } = _require(join(installerLibPath, "ide", "shared", "workflow-command-generator.js"));
16276
+ const { TaskToolCommandGenerator } = _require(join(installerLibPath, "ide", "shared", "task-tool-command-generator.js"));
16277
+ const { ManifestGenerator } = _require(join(installerLibPath, "core", "manifest-generator.js"));
16278
+ const nonCoreModules = scanBmadModules(bmadDir);
16279
+ const allModules = ["core", ...nonCoreModules];
16280
+ try {
16281
+ const manifestGen = new ManifestGenerator();
16282
+ await manifestGen.generateManifests(bmadDir, allModules, [], { ides: ["claude-code"] });
16283
+ } catch (manifestErr) {
16284
+ logger$3.warn({ err: manifestErr }, "ManifestGenerator failed; workflow/task commands may be incomplete");
16285
+ }
16286
+ const commandsDir = join(projectRoot, ".claude", "commands");
16287
+ mkdirSync(commandsDir, { recursive: true });
16288
+ clearBmadCommandFiles(commandsDir);
16289
+ const agentGen = new AgentCommandGenerator("_bmad");
16290
+ const { artifacts: agentArtifacts } = await agentGen.collectAgentArtifacts(bmadDir, nonCoreModules);
16291
+ const agentCount = await agentGen.writeDashArtifacts(commandsDir, agentArtifacts);
16292
+ const workflowGen = new WorkflowCommandGenerator("_bmad");
16293
+ const { artifacts: workflowArtifacts } = await workflowGen.collectWorkflowArtifacts(bmadDir);
16294
+ const workflowCount = await workflowGen.writeDashArtifacts(commandsDir, workflowArtifacts);
16295
+ const taskToolGen = new TaskToolCommandGenerator("_bmad");
16296
+ const { artifacts: taskToolArtifacts } = await taskToolGen.collectTaskToolArtifacts(bmadDir);
16297
+ const taskToolCount = await taskToolGen.writeDashArtifacts(commandsDir, taskToolArtifacts);
16298
+ const total = agentCount + workflowCount + taskToolCount;
16299
+ if (outputFormat !== "json") process.stdout.write(`Generated ${String(total)} Claude Code commands (${String(agentCount)} agents, ${String(workflowCount)} workflows, ${String(taskToolCount)} tasks/tools)\n`);
16300
+ logger$3.info({
16301
+ agentCount,
16302
+ workflowCount,
16303
+ taskToolCount,
16304
+ total,
16305
+ commandsDir
16306
+ }, "Generated .claude/commands/");
16307
+ } catch (err) {
16308
+ const msg = err instanceof Error ? err.message : String(err);
16309
+ if (outputFormat !== "json") process.stderr.write(`Warning: .claude/commands/ generation failed: ${msg}\n`);
16310
+ logger$3.warn({ err }, "scaffoldClaudeCommands failed; init continues");
16311
+ }
16312
+ }
16159
16313
  async function runAutoInit(options) {
16160
16314
  const { pack: packName, projectRoot, outputFormat, force = false } = options;
16161
16315
  const packPath = join(projectRoot, "packs", packName);
@@ -16206,6 +16360,7 @@ async function runAutoInit(options) {
16206
16360
  await scaffoldClaudeMd(projectRoot);
16207
16361
  await scaffoldStatuslineScript(projectRoot);
16208
16362
  await scaffoldClaudeSettings(projectRoot);
16363
+ await scaffoldClaudeCommands(projectRoot, outputFormat);
16209
16364
  const successMsg = `Pack '${packName}' and database initialized successfully at ${dbPath}`;
16210
16365
  if (outputFormat === "json") process.stdout.write(formatOutput({
16211
16366
  pack: packName,
@@ -16676,7 +16831,8 @@ async function runAutoRun(options) {
16676
16831
  stories_escalated: escalatedKeys.length,
16677
16832
  total_review_cycles: totalReviewCycles,
16678
16833
  total_dispatches: totalDispatches,
16679
- concurrency_setting: concurrency
16834
+ concurrency_setting: concurrency,
16835
+ max_concurrent_actual: status.maxConcurrentActual ?? Math.min(concurrency, storyKeys.length)
16680
16836
  });
16681
16837
  } catch (metricsErr) {
16682
16838
  logger$3.warn({ err: metricsErr }, "Failed to write run metrics (best-effort)");
@@ -17665,7 +17821,51 @@ function defaultSupervisorDeps() {
17665
17821
  process.kill(pid, signal);
17666
17822
  },
17667
17823
  resumePipeline: runAutoResume,
17668
- sleep: (ms) => new Promise((resolve$2) => setTimeout(resolve$2, ms))
17824
+ sleep: (ms) => new Promise((resolve$2) => setTimeout(resolve$2, ms)),
17825
+ incrementRestarts: (() => {
17826
+ let cachedDbWrapper = null;
17827
+ return (runId, projectRoot) => {
17828
+ try {
17829
+ if (cachedDbWrapper === null) {
17830
+ const dbDir = join(projectRoot, ".substrate");
17831
+ const dbPath = join(dbDir, "substrate.db");
17832
+ cachedDbWrapper = new DatabaseWrapper(dbPath);
17833
+ }
17834
+ incrementRunRestarts(cachedDbWrapper.getDb(), runId);
17835
+ } catch {
17836
+ try {
17837
+ cachedDbWrapper?.close();
17838
+ } catch {}
17839
+ cachedDbWrapper = null;
17840
+ }
17841
+ };
17842
+ })(),
17843
+ runAnalysis: async (runId, projectRoot) => {
17844
+ const dbPath = join(projectRoot, ".substrate", "substrate.db");
17845
+ if (!existsSync(dbPath)) return;
17846
+ const dbWrapper = new DatabaseWrapper(dbPath);
17847
+ try {
17848
+ dbWrapper.open();
17849
+ runMigrations(dbWrapper.db);
17850
+ const db = dbWrapper.db;
17851
+ const run = getRunMetrics(db, runId);
17852
+ if (!run) return;
17853
+ const stories = getStoryMetricsForRun(db, runId);
17854
+ const baseline = getBaselineRunMetrics(db);
17855
+ const baselineStories = baseline && baseline.run_id !== runId ? getStoryMetricsForRun(db, baseline.run_id) : [];
17856
+ const analysisPath = "../../modules/supervisor/analysis.js";
17857
+ const { generateAnalysisReport, writeAnalysisReport } = await import(
17858
+ /* @vite-ignore */
17859
+ analysisPath
17860
+ );
17861
+ const report = generateAnalysisReport(run, stories, baseline, baselineStories);
17862
+ writeAnalysisReport(report, projectRoot);
17863
+ } catch {} finally {
17864
+ try {
17865
+ dbWrapper.close();
17866
+ } catch {}
17867
+ }
17868
+ }
17669
17869
  };
17670
17870
  }
17671
17871
  /**
@@ -17680,8 +17880,8 @@ function defaultSupervisorDeps() {
17680
17880
  * 2 — max restarts exceeded (safety valve triggered)
17681
17881
  */
17682
17882
  async function runAutoSupervisor(options, deps = {}) {
17683
- const { pollInterval, stallThreshold, maxRestarts, outputFormat, projectRoot, runId, pack } = options;
17684
- const { getHealth, killPid, resumePipeline, sleep } = {
17883
+ const { pollInterval, stallThreshold, maxRestarts, outputFormat, projectRoot, runId, pack, experiment } = options;
17884
+ const { getHealth, killPid, resumePipeline, sleep, incrementRestarts, runAnalysis } = {
17685
17885
  ...defaultSupervisorDeps(),
17686
17886
  ...deps
17687
17887
  };
@@ -17721,6 +17921,52 @@ async function runAutoSupervisor(options, deps = {}) {
17721
17921
  restarts: restartCount
17722
17922
  });
17723
17923
  log(`\nPipeline reached terminal state. Elapsed: ${elapsedSeconds}s | succeeded: ${succeeded.length} | failed: ${failed.length} | restarts: ${restartCount}`);
17924
+ if (health.run_id !== null && runAnalysis !== void 0) {
17925
+ log(`[supervisor] Running post-run analysis for ${health.run_id}...`);
17926
+ await runAnalysis(health.run_id, projectRoot);
17927
+ log(`[supervisor] Analysis report written to _bmad-output/supervisor-reports/${health.run_id}-analysis.md`);
17928
+ emitEvent$1({
17929
+ type: "supervisor:analysis:complete",
17930
+ run_id: health.run_id
17931
+ });
17932
+ }
17933
+ if (experiment && health.run_id !== null) {
17934
+ log(`\n[supervisor] Experiment mode enabled. Checking for optimization recommendations...`);
17935
+ emitEvent$1({
17936
+ type: "supervisor:experiment:start",
17937
+ run_id: health.run_id
17938
+ });
17939
+ const analysisReportPath = join(projectRoot, "_bmad-output", "supervisor-reports", `${health.run_id}-analysis.json`);
17940
+ try {
17941
+ const { readFile: fsReadFile } = await import("fs/promises");
17942
+ const raw = await fsReadFile(analysisReportPath, "utf-8");
17943
+ const analysisData = JSON.parse(raw);
17944
+ const recommendations = analysisData.recommendations ?? [];
17945
+ if (recommendations.length === 0) {
17946
+ log(`[supervisor] No recommendations found in analysis report — skipping experiments.`);
17947
+ emitEvent$1({
17948
+ type: "supervisor:experiment:skip",
17949
+ run_id: health.run_id,
17950
+ reason: "no_recommendations"
17951
+ });
17952
+ } else {
17953
+ log(`[supervisor] Found ${recommendations.length} recommendation(s) to experiment with.`);
17954
+ emitEvent$1({
17955
+ type: "supervisor:experiment:recommendations",
17956
+ run_id: health.run_id,
17957
+ count: recommendations.length
17958
+ });
17959
+ }
17960
+ } catch {
17961
+ log(`[supervisor] Analysis report not found at ${analysisReportPath} — skipping experiments.`);
17962
+ log(`[supervisor] Run 'substrate auto metrics --analysis <run-id>' first to generate recommendations.`);
17963
+ emitEvent$1({
17964
+ type: "supervisor:experiment:skip",
17965
+ run_id: health.run_id,
17966
+ reason: "no_analysis_report"
17967
+ });
17968
+ }
17969
+ }
17724
17970
  return failed.length > 0 || escalated.length > 0 ? 1 : 0;
17725
17971
  }
17726
17972
  if (health.staleness_seconds >= stallThreshold) {
@@ -17767,6 +18013,7 @@ async function runAutoSupervisor(options, deps = {}) {
17767
18013
  return 2;
17768
18014
  }
17769
18015
  restartCount++;
18016
+ if (health.run_id !== null) incrementRestarts(health.run_id, projectRoot);
17770
18017
  emitEvent$1({
17771
18018
  type: "supervisor:restart",
17772
18019
  run_id: health.run_id,
@@ -18129,12 +18376,13 @@ async function runAutoMetrics(options) {
18129
18376
  if (outputFormat === "json") process.stdout.write(formatOutput(delta, "json", true) + "\n");
18130
18377
  else {
18131
18378
  const sign = (n) => n > 0 ? "+" : "";
18379
+ const fmtPct = (pct) => pct === null ? "N/A" : `${sign(pct)}${pct}%`;
18132
18380
  process.stdout.write(`\nMetrics Comparison: ${idA.slice(0, 8)} vs ${idB.slice(0, 8)}\n`);
18133
- process.stdout.write(` Input tokens: ${sign(delta.token_input_delta)}${delta.token_input_delta.toLocaleString()} (${sign(delta.token_input_pct)}${delta.token_input_pct}%)\n`);
18134
- process.stdout.write(` Output tokens: ${sign(delta.token_output_delta)}${delta.token_output_delta.toLocaleString()} (${sign(delta.token_output_pct)}${delta.token_output_pct}%)\n`);
18135
- process.stdout.write(` Wall clock: ${sign(delta.wall_clock_delta_seconds)}${delta.wall_clock_delta_seconds}s (${sign(delta.wall_clock_pct)}${delta.wall_clock_pct}%)\n`);
18136
- process.stdout.write(` Review cycles: ${sign(delta.review_cycles_delta)}${delta.review_cycles_delta} (${sign(delta.review_cycles_pct)}${delta.review_cycles_pct}%)\n`);
18137
- process.stdout.write(` Cost USD: ${sign(delta.cost_delta)}$${Math.abs(delta.cost_delta).toFixed(4)} (${sign(delta.cost_pct)}${delta.cost_pct}%)\n`);
18381
+ process.stdout.write(` Input tokens: ${sign(delta.token_input_delta)}${delta.token_input_delta.toLocaleString()} (${fmtPct(delta.token_input_pct)})\n`);
18382
+ process.stdout.write(` Output tokens: ${sign(delta.token_output_delta)}${delta.token_output_delta.toLocaleString()} (${fmtPct(delta.token_output_pct)})\n`);
18383
+ process.stdout.write(` Wall clock: ${sign(delta.wall_clock_delta_seconds)}${delta.wall_clock_delta_seconds}s (${fmtPct(delta.wall_clock_pct)})\n`);
18384
+ process.stdout.write(` Review cycles: ${sign(delta.review_cycles_delta)}${delta.review_cycles_delta} (${fmtPct(delta.review_cycles_pct)})\n`);
18385
+ process.stdout.write(` Cost USD: ${delta.cost_delta < 0 ? "-" : sign(delta.cost_delta)}$${Math.abs(delta.cost_delta).toFixed(4)} (${fmtPct(delta.cost_pct)})\n`);
18138
18386
  }
18139
18387
  return 0;
18140
18388
  }
@@ -18256,7 +18504,7 @@ function registerAutoCommand(program, _version = "0.0.0", projectRoot = process.
18256
18504
  });
18257
18505
  process.exitCode = exitCode;
18258
18506
  });
18259
- auto.command("supervisor").description("Monitor a pipeline run and automatically recover from stalls").option("--poll-interval <seconds>", "Health poll interval in seconds", (v) => parseInt(v, 10), 60).option("--stall-threshold <seconds>", "Staleness in seconds before killing a stalled pipeline", (v) => parseInt(v, 10), 600).option("--max-restarts <n>", "Maximum automatic restarts before aborting", (v) => parseInt(v, 10), 3).option("--run-id <id>", "Pipeline run ID to monitor (defaults to latest)").option("--pack <name>", "Methodology pack name", "bmad").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
18507
+ auto.command("supervisor").description("Monitor a pipeline run and automatically recover from stalls").option("--poll-interval <seconds>", "Health poll interval in seconds", (v) => parseInt(v, 10), 60).option("--stall-threshold <seconds>", "Staleness in seconds before killing a stalled pipeline", (v) => parseInt(v, 10), 600).option("--max-restarts <n>", "Maximum automatic restarts before aborting", (v) => parseInt(v, 10), 3).option("--run-id <id>", "Pipeline run ID to monitor (defaults to latest)").option("--pack <name>", "Methodology pack name", "bmad").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").option("--experiment", "After post-run analysis, enter experiment mode: create branches, apply modifications, run single-story experiments, and report verdicts (Story 17-4)", false).action(async (opts) => {
18260
18508
  const outputFormat = opts.outputFormat === "json" ? "json" : "human";
18261
18509
  const exitCode = await runAutoSupervisor({
18262
18510
  pollInterval: opts.pollInterval,
@@ -18265,7 +18513,8 @@ function registerAutoCommand(program, _version = "0.0.0", projectRoot = process.
18265
18513
  runId: opts.runId,
18266
18514
  pack: opts.pack,
18267
18515
  outputFormat,
18268
- projectRoot: opts.projectRoot
18516
+ projectRoot: opts.projectRoot,
18517
+ experiment: opts.experiment
18269
18518
  });
18270
18519
  process.exitCode = exitCode;
18271
18520
  });
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "substrate-ai",
3
- "version": "0.1.28",
3
+ "version": "0.1.30",
4
4
  "description": "Substrate — multi-agent orchestration daemon for AI coding agents",
5
5
  "type": "module",
6
6
  "license": "MIT",