substrate-ai 0.1.20 → 0.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js CHANGED
@@ -3,6 +3,7 @@ import { AdapterRegistry, ConfigError, ConfigIncompatibleFormatError, DatabaseWr
3
3
  import { CURRENT_CONFIG_FORMAT_VERSION, CURRENT_TASK_GRAPH_VERSION, PartialSubstrateConfigSchema, SUPPORTED_CONFIG_FORMAT_VERSIONS, SubstrateConfigSchema } from "../config-schema-C9tTMcm1.js";
4
4
  import { defaultConfigMigrator } from "../version-manager-impl-O25ieEjS.js";
5
5
  import { registerUpgradeCommand } from "../upgrade-CHhsJc_q.js";
6
+ import { createRequire } from "module";
6
7
  import { Command } from "commander";
7
8
  import { fileURLToPath } from "url";
8
9
  import { dirname, extname, isAbsolute, join, relative, resolve } from "path";
@@ -23,8 +24,12 @@ import * as readline$1 from "readline";
23
24
  import * as readline from "readline";
24
25
  import { createInterface as createInterface$1 } from "readline";
25
26
  import { randomUUID as randomUUID$1 } from "crypto";
26
- import { createRequire } from "node:module";
27
+ import { createRequire as createRequire$1 } from "node:module";
27
28
 
29
+ //#region rolldown:runtime
30
+ var __require = /* @__PURE__ */ createRequire(import.meta.url);
31
+
32
+ //#endregion
28
33
  //#region src/cli/utils/formatting.ts
29
34
  /**
30
35
  * Build adapter list rows from discovery results.
@@ -384,7 +389,7 @@ function listTemplates() {
384
389
 
385
390
  //#endregion
386
391
  //#region src/cli/commands/init.ts
387
- const logger$30 = createLogger("init");
392
+ const logger$32 = createLogger("init");
388
393
  /**
389
394
  * Detect whether the CLI was invoked via `npx substrate`.
390
395
  * When true, prefix suggested commands with `npx `.
@@ -568,7 +573,7 @@ async function runInit(options = {}) {
568
573
  discoveryReport = await registry.discoverAndRegister();
569
574
  } catch (err) {
570
575
  const message = err instanceof Error ? err.message : String(err);
571
- logger$30.error({ err }, "Adapter discovery failed");
576
+ logger$32.error({ err }, "Adapter discovery failed");
572
577
  process.stderr.write(` Error: adapter discovery failed — ${message}\n`);
573
578
  return INIT_EXIT_ERROR;
574
579
  }
@@ -606,7 +611,7 @@ async function runInit(options = {}) {
606
611
  await writeFile(routingPolicyPath, routingHeader + yaml.dump(routingPolicy), "utf-8");
607
612
  } catch (err) {
608
613
  const message = err instanceof Error ? err.message : String(err);
609
- logger$30.error({ err }, "Failed to write config files");
614
+ logger$32.error({ err }, "Failed to write config files");
610
615
  process.stderr.write(` Error: failed to write configuration — ${message}\n`);
611
616
  return INIT_EXIT_ERROR;
612
617
  }
@@ -681,7 +686,7 @@ function formatUnsupportedVersionError(formatType, version, supported) {
681
686
 
682
687
  //#endregion
683
688
  //#region src/modules/config/config-system-impl.ts
684
- const logger$29 = createLogger("config");
689
+ const logger$31 = createLogger("config");
685
690
  function deepMerge(base, override) {
686
691
  const result = { ...base };
687
692
  for (const [key, val] of Object.entries(override)) if (val !== null && val !== void 0 && typeof val === "object" && !Array.isArray(val) && typeof result[key] === "object" && result[key] !== null && !Array.isArray(result[key])) result[key] = deepMerge(result[key], val);
@@ -726,7 +731,7 @@ function readEnvOverrides() {
726
731
  }
727
732
  const parsed = PartialSubstrateConfigSchema.safeParse(overrides);
728
733
  if (!parsed.success) {
729
- logger$29.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
734
+ logger$31.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
730
735
  return {};
731
736
  }
732
737
  return parsed.data;
@@ -790,7 +795,7 @@ var ConfigSystemImpl = class {
790
795
  throw new ConfigError(`Configuration validation failed:\n${issues}`, { issues: result.error.issues });
791
796
  }
792
797
  this._config = result.data;
793
- logger$29.debug("Configuration loaded successfully");
798
+ logger$31.debug("Configuration loaded successfully");
794
799
  }
795
800
  getConfig() {
796
801
  if (this._config === null) throw new ConfigError("Configuration has not been loaded. Call load() before getConfig().", {});
@@ -853,7 +858,7 @@ var ConfigSystemImpl = class {
853
858
  if (version !== void 0 && typeof version === "string" && !isVersionSupported(version, SUPPORTED_CONFIG_FORMAT_VERSIONS)) if (defaultConfigMigrator.canMigrate(version, CURRENT_CONFIG_FORMAT_VERSION)) {
854
859
  const migrationOutput = defaultConfigMigrator.migrate(rawObj, version, CURRENT_CONFIG_FORMAT_VERSION, filePath);
855
860
  if (migrationOutput.result.success) {
856
- logger$29.info({
861
+ logger$31.info({
857
862
  from: version,
858
863
  to: CURRENT_CONFIG_FORMAT_VERSION,
859
864
  backup: migrationOutput.result.backupPath
@@ -896,7 +901,7 @@ function createConfigSystem(options = {}) {
896
901
 
897
902
  //#endregion
898
903
  //#region src/cli/commands/config.ts
899
- const logger$28 = createLogger("config-cmd");
904
+ const logger$30 = createLogger("config-cmd");
900
905
  const CONFIG_EXIT_SUCCESS = 0;
901
906
  const CONFIG_EXIT_ERROR = 1;
902
907
  const CONFIG_EXIT_INVALID = 2;
@@ -922,7 +927,7 @@ async function runConfigShow(opts = {}) {
922
927
  return CONFIG_EXIT_INVALID;
923
928
  }
924
929
  const message = err instanceof Error ? err.message : String(err);
925
- logger$28.error({ err }, "Failed to load configuration");
930
+ logger$30.error({ err }, "Failed to load configuration");
926
931
  process.stderr.write(` Error loading configuration: ${message}\n`);
927
932
  return CONFIG_EXIT_ERROR;
928
933
  }
@@ -996,7 +1001,7 @@ async function runConfigExport(opts = {}) {
996
1001
  return CONFIG_EXIT_INVALID;
997
1002
  }
998
1003
  const message = err instanceof Error ? err.message : String(err);
999
- logger$28.error({ err }, "Failed to load configuration");
1004
+ logger$30.error({ err }, "Failed to load configuration");
1000
1005
  process.stderr.write(`Error loading configuration: ${message}\n`);
1001
1006
  return CONFIG_EXIT_ERROR;
1002
1007
  }
@@ -1150,7 +1155,7 @@ function registerConfigCommand(program, _version) {
1150
1155
 
1151
1156
  //#endregion
1152
1157
  //#region src/cli/commands/merge.ts
1153
- const logger$27 = createLogger("merge-cmd");
1158
+ const logger$29 = createLogger("merge-cmd");
1154
1159
  const MERGE_EXIT_SUCCESS = 0;
1155
1160
  const MERGE_EXIT_CONFLICT = 1;
1156
1161
  const MERGE_EXIT_ERROR = 2;
@@ -1188,7 +1193,7 @@ async function mergeTask(taskId, targetBranch, projectRoot) {
1188
1193
  projectRoot
1189
1194
  });
1190
1195
  try {
1191
- logger$27.info({
1196
+ logger$29.info({
1192
1197
  taskId,
1193
1198
  targetBranch
1194
1199
  }, "Running conflict detection...");
@@ -1210,7 +1215,7 @@ async function mergeTask(taskId, targetBranch, projectRoot) {
1210
1215
  } catch (err) {
1211
1216
  const message = err instanceof Error ? err.message : String(err);
1212
1217
  console.error(`Error merging task "${taskId}": ${message}`);
1213
- logger$27.error({
1218
+ logger$29.error({
1214
1219
  taskId,
1215
1220
  err
1216
1221
  }, "merge --task failed");
@@ -1264,7 +1269,7 @@ async function mergeAll(targetBranch, projectRoot, taskIds) {
1264
1269
  error: message
1265
1270
  });
1266
1271
  console.log(` Error for task "${taskId}": ${message}`);
1267
- logger$27.error({
1272
+ logger$29.error({
1268
1273
  taskId,
1269
1274
  err
1270
1275
  }, "merge --all: task failed");
@@ -1317,7 +1322,7 @@ function registerMergeCommand(program, projectRoot = process.cwd()) {
1317
1322
 
1318
1323
  //#endregion
1319
1324
  //#region src/cli/commands/worktrees.ts
1320
- const logger$26 = createLogger("worktrees-cmd");
1325
+ const logger$28 = createLogger("worktrees-cmd");
1321
1326
  const WORKTREES_EXIT_SUCCESS = 0;
1322
1327
  const WORKTREES_EXIT_ERROR = 1;
1323
1328
  /** Valid task statuses for filtering */
@@ -1444,7 +1449,7 @@ async function listWorktreesAction(options) {
1444
1449
  try {
1445
1450
  worktreeInfos = await manager.listWorktrees();
1446
1451
  } catch (err) {
1447
- logger$26.error({ err }, "Failed to list worktrees");
1452
+ logger$28.error({ err }, "Failed to list worktrees");
1448
1453
  const message = err instanceof Error ? err.message : String(err);
1449
1454
  process.stderr.write(`Error listing worktrees: ${message}\n`);
1450
1455
  return WORKTREES_EXIT_ERROR;
@@ -1471,7 +1476,7 @@ async function listWorktreesAction(options) {
1471
1476
  } catch (err) {
1472
1477
  const message = err instanceof Error ? err.message : String(err);
1473
1478
  process.stderr.write(`Error: ${message}\n`);
1474
- logger$26.error({ err }, "listWorktreesAction failed");
1479
+ logger$28.error({ err }, "listWorktreesAction failed");
1475
1480
  return WORKTREES_EXIT_ERROR;
1476
1481
  }
1477
1482
  }
@@ -1733,7 +1738,7 @@ function getPlanningCostTotal(db, sessionId) {
1733
1738
 
1734
1739
  //#endregion
1735
1740
  //#region src/cli/commands/cost.ts
1736
- const logger$25 = createLogger("cost-cmd");
1741
+ const logger$27 = createLogger("cost-cmd");
1737
1742
  const COST_EXIT_SUCCESS = 0;
1738
1743
  const COST_EXIT_ERROR = 1;
1739
1744
  /**
@@ -1979,7 +1984,7 @@ async function runCostAction(options) {
1979
1984
  } catch (err) {
1980
1985
  const message = err instanceof Error ? err.message : String(err);
1981
1986
  process.stderr.write(`Error: ${message}\n`);
1982
- logger$25.error({ err }, "runCostAction failed");
1987
+ logger$27.error({ err }, "runCostAction failed");
1983
1988
  return COST_EXIT_ERROR;
1984
1989
  } finally {
1985
1990
  if (wrapper !== null) try {
@@ -2041,7 +2046,7 @@ function emitStatusSnapshot(snapshot) {
2041
2046
 
2042
2047
  //#endregion
2043
2048
  //#region src/recovery/crash-recovery.ts
2044
- const logger$24 = createLogger("crash-recovery");
2049
+ const logger$26 = createLogger("crash-recovery");
2045
2050
  var CrashRecoveryManager = class {
2046
2051
  db;
2047
2052
  gitWorktreeManager;
@@ -2094,7 +2099,7 @@ var CrashRecoveryManager = class {
2094
2099
  });
2095
2100
  }
2096
2101
  if (this.gitWorktreeManager !== void 0) this.cleanupOrphanedWorktrees().catch((err) => {
2097
- logger$24.warn({ err }, "Worktree cleanup failed during recovery (non-fatal)");
2102
+ logger$26.warn({ err }, "Worktree cleanup failed during recovery (non-fatal)");
2098
2103
  });
2099
2104
  let newlyReady = 0;
2100
2105
  if (sessionId !== void 0) {
@@ -2104,7 +2109,7 @@ var CrashRecoveryManager = class {
2104
2109
  const row = db.prepare("SELECT COUNT(*) as count FROM ready_tasks").get();
2105
2110
  newlyReady = row.count;
2106
2111
  }
2107
- logger$24.info({
2112
+ logger$26.info({
2108
2113
  event: "recovery:complete",
2109
2114
  recovered,
2110
2115
  failed,
@@ -2126,10 +2131,10 @@ var CrashRecoveryManager = class {
2126
2131
  if (this.gitWorktreeManager === void 0) return 0;
2127
2132
  try {
2128
2133
  const count = await this.gitWorktreeManager.cleanupAllWorktrees();
2129
- logger$24.info({ count }, "Cleaned up orphaned worktrees");
2134
+ logger$26.info({ count }, "Cleaned up orphaned worktrees");
2130
2135
  return count;
2131
2136
  } catch (err) {
2132
- logger$24.warn({ err }, "Failed to clean up orphaned worktrees — continuing");
2137
+ logger$26.warn({ err }, "Failed to clean up orphaned worktrees — continuing");
2133
2138
  return 0;
2134
2139
  }
2135
2140
  }
@@ -2212,7 +2217,7 @@ function setupGracefulShutdown(options) {
2212
2217
 
2213
2218
  //#endregion
2214
2219
  //#region src/cli/commands/start.ts
2215
- const logger$23 = createLogger("start-cmd");
2220
+ const logger$25 = createLogger("start-cmd");
2216
2221
  const START_EXIT_SUCCESS = 0;
2217
2222
  const START_EXIT_ERROR = 1;
2218
2223
  const START_EXIT_USAGE_ERROR = 2;
@@ -2321,7 +2326,7 @@ async function runStartAction(options) {
2321
2326
  let configWatcher$1 = null;
2322
2327
  const configFilePath = join(projectRoot, "substrate.config.yaml");
2323
2328
  if (noWatchConfig) {
2324
- logger$23.info("Config hot-reload disabled (--no-watch-config).");
2329
+ logger$25.info("Config hot-reload disabled (--no-watch-config).");
2325
2330
  process.stdout.write("Config hot-reload disabled (--no-watch-config).\n");
2326
2331
  } else {
2327
2332
  let currentHotConfig = config;
@@ -2336,7 +2341,7 @@ async function runStartAction(options) {
2336
2341
  const changedKeys = computeChangedKeys(previousConfig, newConfig);
2337
2342
  currentHotConfig = newConfig;
2338
2343
  const n = changedKeys.length;
2339
- logger$23.info({
2344
+ logger$25.info({
2340
2345
  changedKeys,
2341
2346
  configPath: configFilePath
2342
2347
  }, `Config reloaded: ${n} setting(s) changed`);
@@ -2348,7 +2353,7 @@ async function runStartAction(options) {
2348
2353
  });
2349
2354
  },
2350
2355
  onError: (err) => {
2351
- logger$23.error({
2356
+ logger$25.error({
2352
2357
  err,
2353
2358
  configPath: configFilePath
2354
2359
  }, `Config reload failed: ${err.message}. Continuing with previous config.`);
@@ -2361,7 +2366,7 @@ async function runStartAction(options) {
2361
2366
  let cleanupShutdown = null;
2362
2367
  if (resolvedGraphFile === null) if (interruptedSession !== void 0) {
2363
2368
  process.stdout.write(`Resuming interrupted session ${interruptedSession.id}\n`);
2364
- logger$23.info({ sessionId: interruptedSession.id }, "session:resumed");
2369
+ logger$25.info({ sessionId: interruptedSession.id }, "session:resumed");
2365
2370
  const recovery = new CrashRecoveryManager({
2366
2371
  db: databaseService.db,
2367
2372
  gitWorktreeManager
@@ -2485,7 +2490,7 @@ async function runStartAction(options) {
2485
2490
  } catch (err) {
2486
2491
  const message = err instanceof Error ? err.message : String(err);
2487
2492
  process.stderr.write(`Error: ${message}\n`);
2488
- logger$23.error({ err }, "runStartAction failed");
2493
+ logger$25.error({ err }, "runStartAction failed");
2489
2494
  return START_EXIT_ERROR;
2490
2495
  } finally {
2491
2496
  try {
@@ -2643,7 +2648,7 @@ function renderTaskGraph(snapshot, tasks) {
2643
2648
 
2644
2649
  //#endregion
2645
2650
  //#region src/cli/commands/status.ts
2646
- const logger$22 = createLogger("status-cmd");
2651
+ const logger$24 = createLogger("status-cmd");
2647
2652
  const STATUS_EXIT_SUCCESS = 0;
2648
2653
  const STATUS_EXIT_ERROR = 1;
2649
2654
  const STATUS_EXIT_NOT_FOUND = 2;
@@ -2796,7 +2801,7 @@ async function runStatusAction(options) {
2796
2801
  } catch (err) {
2797
2802
  const message = err instanceof Error ? err.message : String(err);
2798
2803
  process.stderr.write(`Error: ${message}\n`);
2799
- logger$22.error({ err }, "runStatusAction failed");
2804
+ logger$24.error({ err }, "runStatusAction failed");
2800
2805
  return STATUS_EXIT_ERROR;
2801
2806
  } finally {
2802
2807
  if (wrapper !== null) try {
@@ -2829,7 +2834,7 @@ function registerStatusCommand(program, _version = "0.0.0", projectRoot = proces
2829
2834
 
2830
2835
  //#endregion
2831
2836
  //#region src/cli/commands/pause.ts
2832
- const logger$21 = createLogger("pause-cmd");
2837
+ const logger$23 = createLogger("pause-cmd");
2833
2838
  const PAUSE_EXIT_SUCCESS = 0;
2834
2839
  const PAUSE_EXIT_ERROR = 1;
2835
2840
  const PAUSE_EXIT_USAGE_ERROR = 2;
@@ -2898,7 +2903,7 @@ async function runPauseAction(options) {
2898
2903
  } catch (err) {
2899
2904
  const message = err instanceof Error ? err.message : String(err);
2900
2905
  process.stderr.write(`Error: ${message}\n`);
2901
- logger$21.error({ err }, "runPauseAction failed");
2906
+ logger$23.error({ err }, "runPauseAction failed");
2902
2907
  return PAUSE_EXIT_ERROR;
2903
2908
  } finally {
2904
2909
  if (wrapper !== null) try {
@@ -2928,7 +2933,7 @@ function registerPauseCommand(program, version = "0.0.0", projectRoot = process.
2928
2933
 
2929
2934
  //#endregion
2930
2935
  //#region src/cli/commands/resume.ts
2931
- const logger$20 = createLogger("resume-cmd");
2936
+ const logger$22 = createLogger("resume-cmd");
2932
2937
  const RESUME_EXIT_SUCCESS = 0;
2933
2938
  const RESUME_EXIT_ERROR = 1;
2934
2939
  const RESUME_EXIT_USAGE_ERROR = 2;
@@ -3013,7 +3018,7 @@ async function runResumeAction(options) {
3013
3018
  } catch (err) {
3014
3019
  const message = err instanceof Error ? err.message : String(err);
3015
3020
  process.stderr.write(`Error: ${message}\n`);
3016
- logger$20.error({ err }, "runResumeAction failed");
3021
+ logger$22.error({ err }, "runResumeAction failed");
3017
3022
  return RESUME_EXIT_ERROR;
3018
3023
  } finally {
3019
3024
  if (wrapper !== null) try {
@@ -3046,7 +3051,7 @@ function registerResumeCommand(program, version = "0.0.0", projectRoot = process
3046
3051
 
3047
3052
  //#endregion
3048
3053
  //#region src/cli/commands/cancel.ts
3049
- const logger$19 = createLogger("cancel-cmd");
3054
+ const logger$21 = createLogger("cancel-cmd");
3050
3055
  const CANCEL_EXIT_SUCCESS = 0;
3051
3056
  const CANCEL_EXIT_ERROR = 1;
3052
3057
  const CANCEL_EXIT_USAGE_ERROR = 2;
@@ -3143,7 +3148,7 @@ async function runCancelAction(options) {
3143
3148
  } catch (err) {
3144
3149
  const message = err instanceof Error ? err.message : String(err);
3145
3150
  process.stderr.write(`Error: ${message}\n`);
3146
- logger$19.error({ err }, "runCancelAction failed");
3151
+ logger$21.error({ err }, "runCancelAction failed");
3147
3152
  return CANCEL_EXIT_ERROR;
3148
3153
  } finally {
3149
3154
  if (wrapper !== null) try {
@@ -3258,7 +3263,7 @@ function renderFailedTasksJson(tasks) {
3258
3263
 
3259
3264
  //#endregion
3260
3265
  //#region src/cli/commands/retry.ts
3261
- const logger$18 = createLogger("retry-cmd");
3266
+ const logger$20 = createLogger("retry-cmd");
3262
3267
  const RETRY_EXIT_SUCCESS = 0;
3263
3268
  const RETRY_EXIT_PARTIAL_FAILURE = 1;
3264
3269
  const RETRY_EXIT_USAGE_ERROR = 2;
@@ -3363,7 +3368,7 @@ async function runRetryAction(options) {
3363
3368
  } catch (err) {
3364
3369
  const message = err instanceof Error ? err.message : String(err);
3365
3370
  process.stderr.write(`Error: ${message}\n`);
3366
- logger$18.error({ err }, "runRetryAction failed");
3371
+ logger$20.error({ err }, "runRetryAction failed");
3367
3372
  return RETRY_EXIT_USAGE_ERROR;
3368
3373
  } finally {
3369
3374
  if (wrapper !== null) try {
@@ -3492,11 +3497,11 @@ async function runFollowMode(opts) {
3492
3497
  });
3493
3498
  });
3494
3499
  const sigintHandler = () => {
3495
- logger$18.info("SIGINT received — initiating graceful shutdown");
3500
+ logger$20.info("SIGINT received — initiating graceful shutdown");
3496
3501
  taskGraphEngine.cancelAll();
3497
3502
  };
3498
3503
  const sigtermHandler = () => {
3499
- logger$18.info("SIGTERM received — initiating graceful shutdown");
3504
+ logger$20.info("SIGTERM received — initiating graceful shutdown");
3500
3505
  taskGraphEngine.cancelAll();
3501
3506
  };
3502
3507
  process.once("SIGINT", sigintHandler);
@@ -3509,7 +3514,7 @@ async function runFollowMode(opts) {
3509
3514
  } catch (err) {
3510
3515
  const message = err instanceof Error ? err.message : String(err);
3511
3516
  process.stderr.write(`Error: ${message}\n`);
3512
- logger$18.error({ err }, "runFollowMode failed");
3517
+ logger$20.error({ err }, "runFollowMode failed");
3513
3518
  return RETRY_EXIT_USAGE_ERROR;
3514
3519
  } finally {
3515
3520
  try {
@@ -3969,7 +3974,7 @@ function buildMultiAgentInstructionsSection(agentCount) {
3969
3974
 
3970
3975
  //#endregion
3971
3976
  //#region src/modules/plan-generator/plan-generator.ts
3972
- const logger$17 = createLogger("plan-generator");
3977
+ const logger$19 = createLogger("plan-generator");
3973
3978
  /**
3974
3979
  * Wrapper around execFile that immediately closes stdin on the child process.
3975
3980
  * Some CLI tools (e.g. Claude Code) wait for stdin to close before processing
@@ -4146,7 +4151,7 @@ var PlanGenerator = class {
4146
4151
  else {
4147
4152
  const slugified = dep.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/^-|-$/g, "").slice(0, 64);
4148
4153
  if (taskKeys.has(slugified)) resolvedDeps.push(slugified);
4149
- else logger$17.warn({
4154
+ else logger$19.warn({
4150
4155
  taskKey,
4151
4156
  dep
4152
4157
  }, `depends_on reference '${dep}' not found in task keys; removing`);
@@ -4893,7 +4898,7 @@ function getLatestPlanVersion(db, planId) {
4893
4898
 
4894
4899
  //#endregion
4895
4900
  //#region src/modules/plan-generator/plan-refiner.ts
4896
- const logger$16 = createLogger("plan-refiner");
4901
+ const logger$18 = createLogger("plan-refiner");
4897
4902
  var PlanRefiner = class {
4898
4903
  db;
4899
4904
  planGenerator;
@@ -4936,7 +4941,7 @@ var PlanRefiner = class {
4936
4941
  newFeedback: feedback,
4937
4942
  availableAgents: this.availableAgents
4938
4943
  });
4939
- logger$16.info({
4944
+ logger$18.info({
4940
4945
  planId,
4941
4946
  currentVersion,
4942
4947
  feedbackRounds: feedbackHistory.length
@@ -4983,7 +4988,7 @@ var PlanRefiner = class {
4983
4988
  newVersion,
4984
4989
  taskCount
4985
4990
  });
4986
- logger$16.info({
4991
+ logger$18.info({
4987
4992
  planId,
4988
4993
  newVersion,
4989
4994
  taskCount
@@ -5065,7 +5070,7 @@ function normalizeForDiff(value) {
5065
5070
 
5066
5071
  //#endregion
5067
5072
  //#region src/cli/commands/plan-refine.ts
5068
- const logger$15 = createLogger("plan-refine-cmd");
5073
+ const logger$17 = createLogger("plan-refine-cmd");
5069
5074
  const REFINE_EXIT_SUCCESS = 0;
5070
5075
  const REFINE_EXIT_ERROR = 1;
5071
5076
  const REFINE_EXIT_USAGE_ERROR = 2;
@@ -5107,7 +5112,7 @@ async function runPlanRefineAction(options) {
5107
5112
  let result;
5108
5113
  try {
5109
5114
  result = await refiner.refine(planId, feedback, (event, payload) => {
5110
- logger$15.info({
5115
+ logger$17.info({
5111
5116
  event,
5112
5117
  payload
5113
5118
  }, "Plan refinement event");
@@ -5150,7 +5155,7 @@ async function runPlanRefineAction(options) {
5150
5155
  } catch (err) {
5151
5156
  const message = err instanceof Error ? err.message : String(err);
5152
5157
  process.stderr.write(`Error: ${message}\n`);
5153
- logger$15.error({ err }, "runPlanRefineAction failed");
5158
+ logger$17.error({ err }, "runPlanRefineAction failed");
5154
5159
  return REFINE_EXIT_ERROR;
5155
5160
  } finally {
5156
5161
  dbWrapper.close();
@@ -5175,7 +5180,7 @@ function registerPlanRefineCommand(planCmd, _version = "0.0.0", projectRoot = pr
5175
5180
 
5176
5181
  //#endregion
5177
5182
  //#region src/cli/commands/plan-diff.ts
5178
- const logger$14 = createLogger("plan-diff-cmd");
5183
+ const logger$16 = createLogger("plan-diff-cmd");
5179
5184
  const DIFF_EXIT_SUCCESS = 0;
5180
5185
  const DIFF_EXIT_ERROR = 1;
5181
5186
  const DIFF_EXIT_NOT_FOUND = 2;
@@ -5218,7 +5223,7 @@ async function runPlanDiffAction(options) {
5218
5223
  } catch (err) {
5219
5224
  const message = err instanceof Error ? err.message : String(err);
5220
5225
  process.stderr.write(`Error: ${message}\n`);
5221
- logger$14.error({ err }, "runPlanDiffAction failed");
5226
+ logger$16.error({ err }, "runPlanDiffAction failed");
5222
5227
  return DIFF_EXIT_ERROR;
5223
5228
  } finally {
5224
5229
  dbWrapper.close();
@@ -5266,7 +5271,7 @@ function registerPlanDiffCommand(planCmd, _version = "0.0.0", projectRoot = proc
5266
5271
 
5267
5272
  //#endregion
5268
5273
  //#region src/cli/commands/plan-rollback.ts
5269
- const logger$13 = createLogger("plan-rollback-cmd");
5274
+ const logger$15 = createLogger("plan-rollback-cmd");
5270
5275
  const ROLLBACK_EXIT_SUCCESS = 0;
5271
5276
  const ROLLBACK_EXIT_ERROR = 1;
5272
5277
  const ROLLBACK_EXIT_USAGE_ERROR = 2;
@@ -5314,7 +5319,7 @@ async function runPlanRollbackAction(options, onEvent) {
5314
5319
  toVersion,
5315
5320
  newVersion
5316
5321
  });
5317
- logger$13.info({
5322
+ logger$15.info({
5318
5323
  planId,
5319
5324
  fromVersion,
5320
5325
  toVersion,
@@ -5355,7 +5360,7 @@ async function runPlanRollbackAction(options, onEvent) {
5355
5360
  } catch (err) {
5356
5361
  const message = err instanceof Error ? err.message : String(err);
5357
5362
  process.stderr.write(`Error: ${message}\n`);
5358
- logger$13.error({ err }, "runPlanRollbackAction failed");
5363
+ logger$15.error({ err }, "runPlanRollbackAction failed");
5359
5364
  return ROLLBACK_EXIT_ERROR;
5360
5365
  } finally {
5361
5366
  dbWrapper.close();
@@ -5549,7 +5554,7 @@ function validatePlan(raw, adapterRegistry, options) {
5549
5554
 
5550
5555
  //#endregion
5551
5556
  //#region src/cli/commands/plan.ts
5552
- const logger$12 = createLogger("plan-cmd");
5557
+ const logger$14 = createLogger("plan-cmd");
5553
5558
  const PLAN_EXIT_SUCCESS = 0;
5554
5559
  const PLAN_EXIT_ERROR = 1;
5555
5560
  const PLAN_EXIT_USAGE_ERROR = 2;
@@ -5693,7 +5698,7 @@ async function runPlanReviewAction(options) {
5693
5698
  }
5694
5699
  const message = err instanceof Error ? err.message : String(err);
5695
5700
  process.stderr.write(`Error: ${message}\n`);
5696
- logger$12.error({ err }, "runPlanReviewAction failed");
5701
+ logger$14.error({ err }, "runPlanReviewAction failed");
5697
5702
  return PLAN_EXIT_ERROR;
5698
5703
  }
5699
5704
  if (dryRun) {
@@ -5719,7 +5724,7 @@ async function runPlanReviewAction(options) {
5719
5724
  if (ext.endsWith(".yaml") || ext.endsWith(".yml")) taskGraph = load(planYaml);
5720
5725
  else taskGraph = JSON.parse(planYaml);
5721
5726
  } catch {
5722
- logger$12.warn("Could not read generated plan file for DB storage");
5727
+ logger$14.warn("Could not read generated plan file for DB storage");
5723
5728
  }
5724
5729
  if (outputFormat === "json") {
5725
5730
  const envelope = {
@@ -6492,6 +6497,193 @@ const PIPELINE_EVENT_METADATA = [
6492
6497
  description: "Log message."
6493
6498
  }
6494
6499
  ]
6500
+ },
6501
+ {
6502
+ type: "pipeline:heartbeat",
6503
+ description: "Periodic heartbeat emitted every 30s when no other progress events have fired.",
6504
+ when: "Every 30 seconds during pipeline execution. Allows detection of stalled pipelines.",
6505
+ fields: [
6506
+ {
6507
+ name: "ts",
6508
+ type: "string",
6509
+ description: "ISO-8601 timestamp generated at emit time."
6510
+ },
6511
+ {
6512
+ name: "run_id",
6513
+ type: "string",
6514
+ description: "Pipeline run ID."
6515
+ },
6516
+ {
6517
+ name: "active_dispatches",
6518
+ type: "number",
6519
+ description: "Number of sub-agents currently running."
6520
+ },
6521
+ {
6522
+ name: "completed_dispatches",
6523
+ type: "number",
6524
+ description: "Number of dispatches completed."
6525
+ },
6526
+ {
6527
+ name: "queued_dispatches",
6528
+ type: "number",
6529
+ description: "Number of dispatches waiting to start."
6530
+ }
6531
+ ]
6532
+ },
6533
+ {
6534
+ type: "story:stall",
6535
+ description: "Emitted when the watchdog detects no progress for an extended period (default: 10 minutes).",
6536
+ when: "When a story has shown no progress for longer than the watchdog timeout. Indicates likely stall.",
6537
+ fields: [
6538
+ {
6539
+ name: "ts",
6540
+ type: "string",
6541
+ description: "ISO-8601 timestamp generated at emit time."
6542
+ },
6543
+ {
6544
+ name: "run_id",
6545
+ type: "string",
6546
+ description: "Pipeline run ID."
6547
+ },
6548
+ {
6549
+ name: "story_key",
6550
+ type: "string",
6551
+ description: "Story key that appears stalled."
6552
+ },
6553
+ {
6554
+ name: "phase",
6555
+ type: "string",
6556
+ description: "Phase the story was in when stall was detected."
6557
+ },
6558
+ {
6559
+ name: "elapsed_ms",
6560
+ type: "number",
6561
+ description: "Milliseconds since last progress event."
6562
+ }
6563
+ ]
6564
+ },
6565
+ {
6566
+ type: "supervisor:kill",
6567
+ description: "Emitted by the supervisor when it kills a stalled pipeline process tree.",
6568
+ when: "When the supervisor detects a STALLED verdict and staleness exceeds the stall threshold.",
6569
+ fields: [
6570
+ {
6571
+ name: "ts",
6572
+ type: "string",
6573
+ description: "ISO-8601 timestamp generated at emit time."
6574
+ },
6575
+ {
6576
+ name: "run_id",
6577
+ type: "string|null",
6578
+ description: "Pipeline run ID that was killed."
6579
+ },
6580
+ {
6581
+ name: "reason",
6582
+ type: "stall",
6583
+ description: "Reason for the kill — always \"stall\" for threshold-triggered kills."
6584
+ },
6585
+ {
6586
+ name: "staleness_seconds",
6587
+ type: "number",
6588
+ description: "Seconds the pipeline had been stalled."
6589
+ },
6590
+ {
6591
+ name: "pids",
6592
+ type: "number[]",
6593
+ description: "PIDs that were killed (orchestrator + child processes)."
6594
+ }
6595
+ ]
6596
+ },
6597
+ {
6598
+ type: "supervisor:restart",
6599
+ description: "Emitted by the supervisor when it restarts a killed pipeline via auto resume.",
6600
+ when: "Immediately after killing a stalled pipeline, when the restart count is within the max limit.",
6601
+ fields: [
6602
+ {
6603
+ name: "ts",
6604
+ type: "string",
6605
+ description: "ISO-8601 timestamp generated at emit time."
6606
+ },
6607
+ {
6608
+ name: "run_id",
6609
+ type: "string|null",
6610
+ description: "Pipeline run ID being resumed."
6611
+ },
6612
+ {
6613
+ name: "attempt",
6614
+ type: "number",
6615
+ description: "Restart attempt number (1-based)."
6616
+ }
6617
+ ]
6618
+ },
6619
+ {
6620
+ type: "supervisor:abort",
6621
+ description: "Emitted by the supervisor when it exceeds the maximum restart limit and gives up.",
6622
+ when: "When the restart count reaches or exceeds --max-restarts and another stall is detected.",
6623
+ fields: [
6624
+ {
6625
+ name: "ts",
6626
+ type: "string",
6627
+ description: "ISO-8601 timestamp generated at emit time."
6628
+ },
6629
+ {
6630
+ name: "run_id",
6631
+ type: "string|null",
6632
+ description: "Pipeline run ID that was abandoned."
6633
+ },
6634
+ {
6635
+ name: "reason",
6636
+ type: "max_restarts_exceeded",
6637
+ description: "Always \"max_restarts_exceeded\"."
6638
+ },
6639
+ {
6640
+ name: "attempts",
6641
+ type: "number",
6642
+ description: "Number of restart attempts that were made."
6643
+ }
6644
+ ]
6645
+ },
6646
+ {
6647
+ type: "supervisor:summary",
6648
+ description: "Emitted by the supervisor when the pipeline reaches a terminal state.",
6649
+ when: "When the supervisor detects a NO_PIPELINE_RUNNING verdict (completed, failed, or stopped).",
6650
+ fields: [
6651
+ {
6652
+ name: "ts",
6653
+ type: "string",
6654
+ description: "ISO-8601 timestamp generated at emit time."
6655
+ },
6656
+ {
6657
+ name: "run_id",
6658
+ type: "string|null",
6659
+ description: "Pipeline run ID."
6660
+ },
6661
+ {
6662
+ name: "elapsed_seconds",
6663
+ type: "number",
6664
+ description: "Total elapsed seconds from supervisor start to terminal state."
6665
+ },
6666
+ {
6667
+ name: "succeeded",
6668
+ type: "string[]",
6669
+ description: "Story keys that completed successfully."
6670
+ },
6671
+ {
6672
+ name: "failed",
6673
+ type: "string[]",
6674
+ description: "Story keys that failed (non-COMPLETE, non-PENDING phases)."
6675
+ },
6676
+ {
6677
+ name: "escalated",
6678
+ type: "string[]",
6679
+ description: "Story keys that were escalated."
6680
+ },
6681
+ {
6682
+ name: "restarts",
6683
+ type: "number",
6684
+ description: "Number of restart cycles performed by the supervisor."
6685
+ }
6686
+ ]
6495
6687
  }
6496
6688
  ];
6497
6689
  /**
@@ -6709,12 +6901,29 @@ async function resolveMainRepoRoot(cwd = process.cwd()) {
6709
6901
 
6710
6902
  //#endregion
6711
6903
  //#region src/modules/methodology-pack/schemas.ts
6904
+ /**
6905
+ * A reference to a context value to inject into a step prompt.
6906
+ * Sources can be params (runtime parameters) or decisions (from the decision store).
6907
+ */
6908
+ const ContextRefSchema = z.object({
6909
+ placeholder: z.string().min(1),
6910
+ source: z.string().min(1)
6911
+ });
6912
+ /**
6913
+ * A single step within a multi-step phase decomposition.
6914
+ */
6915
+ const StepDefinitionSchema = z.object({
6916
+ name: z.string().min(1),
6917
+ template: z.string().min(1),
6918
+ context: z.array(ContextRefSchema).default([])
6919
+ });
6712
6920
  const PhaseDefinitionSchema = z.object({
6713
6921
  name: z.string().min(1),
6714
6922
  description: z.string().min(1),
6715
6923
  entryGates: z.array(z.string()),
6716
6924
  exitGates: z.array(z.string()),
6717
- artifacts: z.array(z.string())
6925
+ artifacts: z.array(z.string()),
6926
+ steps: z.array(StepDefinitionSchema).optional()
6718
6927
  });
6719
6928
  const PackManifestSchema = z.object({
6720
6929
  name: z.string().min(1),
@@ -6972,7 +7181,7 @@ function truncateToTokens(text, maxTokens) {
6972
7181
 
6973
7182
  //#endregion
6974
7183
  //#region src/modules/context-compiler/context-compiler-impl.ts
6975
- const logger$11 = createLogger("context-compiler");
7184
+ const logger$13 = createLogger("context-compiler");
6976
7185
  /**
6977
7186
  * Fraction of the original token budget that must remain (after required +
6978
7187
  * important sections) before an optional section is included.
@@ -7064,7 +7273,7 @@ var ContextCompilerImpl = class {
7064
7273
  includedParts.push(truncated);
7065
7274
  remainingBudget -= truncatedTokens;
7066
7275
  anyTruncated = true;
7067
- logger$11.warn({
7276
+ logger$13.warn({
7068
7277
  section: section.name,
7069
7278
  originalTokens: tokens,
7070
7279
  budgetTokens: truncatedTokens
@@ -7078,7 +7287,7 @@ var ContextCompilerImpl = class {
7078
7287
  });
7079
7288
  } else {
7080
7289
  anyTruncated = true;
7081
- logger$11.warn({
7290
+ logger$13.warn({
7082
7291
  section: section.name,
7083
7292
  tokens
7084
7293
  }, "Context compiler: omitted \"important\" section — no budget remaining");
@@ -7105,7 +7314,7 @@ var ContextCompilerImpl = class {
7105
7314
  } else {
7106
7315
  if (tokens > 0) {
7107
7316
  anyTruncated = true;
7108
- logger$11.warn({
7317
+ logger$13.warn({
7109
7318
  section: section.name,
7110
7319
  tokens,
7111
7320
  budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
@@ -7206,7 +7415,17 @@ const DEFAULT_TIMEOUTS = {
7206
7415
  "dev-story": 18e5,
7207
7416
  "code-review": 9e5,
7208
7417
  "minor-fixes": 6e5,
7209
- "major-rework": 9e5
7418
+ "major-rework": 9e5,
7419
+ "analysis-vision": 18e4,
7420
+ "analysis-scope": 18e4,
7421
+ "planning-classification": 18e4,
7422
+ "planning-frs": 24e4,
7423
+ "planning-nfrs": 24e4,
7424
+ "arch-context": 18e4,
7425
+ "arch-decisions": 24e4,
7426
+ "arch-patterns": 24e4,
7427
+ "story-epics": 24e4,
7428
+ "story-stories": 3e5
7210
7429
  };
7211
7430
  /**
7212
7431
  * Default max agentic turns per task type.
@@ -7225,7 +7444,17 @@ const DEFAULT_MAX_TURNS = {
7225
7444
  "major-rework": 50,
7226
7445
  "code-review": 25,
7227
7446
  "create-story": 20,
7228
- "minor-fixes": 25
7447
+ "minor-fixes": 25,
7448
+ "analysis-vision": 8,
7449
+ "analysis-scope": 10,
7450
+ "planning-classification": 8,
7451
+ "planning-frs": 12,
7452
+ "planning-nfrs": 12,
7453
+ "arch-context": 10,
7454
+ "arch-decisions": 15,
7455
+ "arch-patterns": 12,
7456
+ "story-epics": 15,
7457
+ "story-stories": 20
7229
7458
  };
7230
7459
  /**
7231
7460
  * Error thrown when dispatch is attempted on a shutting-down dispatcher.
@@ -7351,7 +7580,7 @@ function parseYamlResult(yamlText, schema) {
7351
7580
 
7352
7581
  //#endregion
7353
7582
  //#region src/modules/agent-dispatch/dispatcher-impl.ts
7354
- const logger$10 = createLogger("agent-dispatch");
7583
+ const logger$12 = createLogger("agent-dispatch");
7355
7584
  const SHUTDOWN_GRACE_MS = 1e4;
7356
7585
  const SHUTDOWN_MAX_WAIT_MS = 3e4;
7357
7586
  const CHARS_PER_TOKEN = 4;
@@ -7420,7 +7649,7 @@ var DispatcherImpl = class {
7420
7649
  resolve: typedResolve,
7421
7650
  reject
7422
7651
  });
7423
- logger$10.debug({
7652
+ logger$12.debug({
7424
7653
  id,
7425
7654
  queueLength: this._queue.length
7426
7655
  }, "Dispatch queued");
@@ -7450,7 +7679,7 @@ var DispatcherImpl = class {
7450
7679
  }
7451
7680
  async shutdown() {
7452
7681
  this._shuttingDown = true;
7453
- logger$10.info({
7682
+ logger$12.info({
7454
7683
  running: this._running.size,
7455
7684
  queued: this._queue.length
7456
7685
  }, "Dispatcher shutting down");
@@ -7483,13 +7712,13 @@ var DispatcherImpl = class {
7483
7712
  }
7484
7713
  }, 50);
7485
7714
  });
7486
- logger$10.info("Dispatcher shutdown complete");
7715
+ logger$12.info("Dispatcher shutdown complete");
7487
7716
  }
7488
7717
  async _startDispatch(id, request, resolve$2) {
7489
7718
  const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns } = request;
7490
7719
  const adapter = this._adapterRegistry.get(agent);
7491
7720
  if (adapter === void 0) {
7492
- logger$10.warn({
7721
+ logger$12.warn({
7493
7722
  id,
7494
7723
  agent
7495
7724
  }, "No adapter found for agent");
@@ -7533,7 +7762,7 @@ var DispatcherImpl = class {
7533
7762
  });
7534
7763
  const startedAt = Date.now();
7535
7764
  proc.on("error", (err) => {
7536
- logger$10.error({
7765
+ logger$12.error({
7537
7766
  id,
7538
7767
  binary: cmd.binary,
7539
7768
  error: err.message
@@ -7541,7 +7770,7 @@ var DispatcherImpl = class {
7541
7770
  });
7542
7771
  if (proc.stdin !== null) {
7543
7772
  proc.stdin.on("error", (err) => {
7544
- if (err.code !== "EPIPE") logger$10.warn({
7773
+ if (err.code !== "EPIPE") logger$12.warn({
7545
7774
  id,
7546
7775
  error: err.message
7547
7776
  }, "stdin write error");
@@ -7583,7 +7812,7 @@ var DispatcherImpl = class {
7583
7812
  agent,
7584
7813
  taskType
7585
7814
  });
7586
- logger$10.debug({
7815
+ logger$12.debug({
7587
7816
  id,
7588
7817
  agent,
7589
7818
  taskType,
@@ -7600,7 +7829,7 @@ var DispatcherImpl = class {
7600
7829
  dispatchId: id,
7601
7830
  timeoutMs
7602
7831
  });
7603
- logger$10.warn({
7832
+ logger$12.warn({
7604
7833
  id,
7605
7834
  agent,
7606
7835
  taskType,
@@ -7654,7 +7883,7 @@ var DispatcherImpl = class {
7654
7883
  exitCode: code,
7655
7884
  output: stdout
7656
7885
  });
7657
- logger$10.debug({
7886
+ logger$12.debug({
7658
7887
  id,
7659
7888
  agent,
7660
7889
  taskType,
@@ -7680,7 +7909,7 @@ var DispatcherImpl = class {
7680
7909
  error: stderr || `Process exited with code ${String(code)}`,
7681
7910
  exitCode: code
7682
7911
  });
7683
- logger$10.debug({
7912
+ logger$12.debug({
7684
7913
  id,
7685
7914
  agent,
7686
7915
  taskType,
@@ -7732,7 +7961,7 @@ var DispatcherImpl = class {
7732
7961
  const next = this._queue.shift();
7733
7962
  if (next === void 0) return;
7734
7963
  next.handle.status = "running";
7735
- logger$10.debug({
7964
+ logger$12.debug({
7736
7965
  id: next.id,
7737
7966
  queueLength: this._queue.length
7738
7967
  }, "Dequeued dispatch");
@@ -8093,7 +8322,7 @@ function getTokenUsageSummary(db, runId) {
8093
8322
 
8094
8323
  //#endregion
8095
8324
  //#region src/modules/compiled-workflows/prompt-assembler.ts
8096
- const logger$9 = createLogger("compiled-workflows:prompt-assembler");
8325
+ const logger$11 = createLogger("compiled-workflows:prompt-assembler");
8097
8326
  /**
8098
8327
  * Assemble a final prompt from a template and sections map.
8099
8328
  *
@@ -8118,7 +8347,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
8118
8347
  tokenCount,
8119
8348
  truncated: false
8120
8349
  };
8121
- logger$9.warn({
8350
+ logger$11.warn({
8122
8351
  tokenCount,
8123
8352
  ceiling: tokenCeiling
8124
8353
  }, "Prompt exceeds token ceiling — truncating optional sections");
@@ -8134,10 +8363,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
8134
8363
  const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
8135
8364
  if (targetSectionTokens === 0) {
8136
8365
  contentMap[section.name] = "";
8137
- logger$9.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
8366
+ logger$11.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
8138
8367
  } else {
8139
8368
  contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
8140
- logger$9.warn({
8369
+ logger$11.warn({
8141
8370
  sectionName: section.name,
8142
8371
  targetSectionTokens
8143
8372
  }, "Section truncated to fit token budget");
@@ -8148,7 +8377,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
8148
8377
  }
8149
8378
  if (tokenCount <= tokenCeiling) break;
8150
8379
  }
8151
- if (tokenCount > tokenCeiling) logger$9.warn({
8380
+ if (tokenCount > tokenCeiling) logger$11.warn({
8152
8381
  tokenCount,
8153
8382
  ceiling: tokenCeiling
8154
8383
  }, "Required sections alone exceed token ceiling — returning over-budget prompt");
@@ -8289,7 +8518,7 @@ const CodeReviewResultSchema = z.object({
8289
8518
 
8290
8519
  //#endregion
8291
8520
  //#region src/modules/compiled-workflows/create-story.ts
8292
- const logger$8 = createLogger("compiled-workflows:create-story");
8521
+ const logger$10 = createLogger("compiled-workflows:create-story");
8293
8522
  /**
8294
8523
  * Hard ceiling for the assembled create-story prompt.
8295
8524
  */
@@ -8313,7 +8542,7 @@ const TOKEN_CEILING$2 = 3e3;
8313
8542
  */
8314
8543
  async function runCreateStory(deps, params) {
8315
8544
  const { epicId, storyKey, pipelineRunId } = params;
8316
- logger$8.debug({
8545
+ logger$10.debug({
8317
8546
  epicId,
8318
8547
  storyKey,
8319
8548
  pipelineRunId
@@ -8323,7 +8552,7 @@ async function runCreateStory(deps, params) {
8323
8552
  template = await deps.pack.getPrompt("create-story");
8324
8553
  } catch (err) {
8325
8554
  const error = err instanceof Error ? err.message : String(err);
8326
- logger$8.error({ error }, "Failed to retrieve create-story prompt template");
8555
+ logger$10.error({ error }, "Failed to retrieve create-story prompt template");
8327
8556
  return {
8328
8557
  result: "failed",
8329
8558
  error: `Failed to retrieve prompt template: ${error}`,
@@ -8365,7 +8594,7 @@ async function runCreateStory(deps, params) {
8365
8594
  priority: "important"
8366
8595
  }
8367
8596
  ], TOKEN_CEILING$2);
8368
- logger$8.debug({
8597
+ logger$10.debug({
8369
8598
  tokenCount,
8370
8599
  truncated,
8371
8600
  tokenCeiling: TOKEN_CEILING$2
@@ -8382,7 +8611,7 @@ async function runCreateStory(deps, params) {
8382
8611
  dispatchResult = await handle.result;
8383
8612
  } catch (err) {
8384
8613
  const error = err instanceof Error ? err.message : String(err);
8385
- logger$8.error({
8614
+ logger$10.error({
8386
8615
  epicId,
8387
8616
  storyKey,
8388
8617
  error
@@ -8403,7 +8632,7 @@ async function runCreateStory(deps, params) {
8403
8632
  if (dispatchResult.status === "failed") {
8404
8633
  const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
8405
8634
  const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
8406
- logger$8.warn({
8635
+ logger$10.warn({
8407
8636
  epicId,
8408
8637
  storyKey,
8409
8638
  exitCode: dispatchResult.exitCode
@@ -8415,7 +8644,7 @@ async function runCreateStory(deps, params) {
8415
8644
  };
8416
8645
  }
8417
8646
  if (dispatchResult.status === "timeout") {
8418
- logger$8.warn({
8647
+ logger$10.warn({
8419
8648
  epicId,
8420
8649
  storyKey
8421
8650
  }, "Create-story dispatch timed out");
@@ -8428,7 +8657,7 @@ async function runCreateStory(deps, params) {
8428
8657
  if (dispatchResult.parsed === null) {
8429
8658
  const details = dispatchResult.parseError ?? "No YAML block found in output";
8430
8659
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
8431
- logger$8.warn({
8660
+ logger$10.warn({
8432
8661
  epicId,
8433
8662
  storyKey,
8434
8663
  details,
@@ -8444,7 +8673,7 @@ async function runCreateStory(deps, params) {
8444
8673
  const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
8445
8674
  if (!parseResult.success) {
8446
8675
  const details = parseResult.error.message;
8447
- logger$8.warn({
8676
+ logger$10.warn({
8448
8677
  epicId,
8449
8678
  storyKey,
8450
8679
  details
@@ -8457,7 +8686,7 @@ async function runCreateStory(deps, params) {
8457
8686
  };
8458
8687
  }
8459
8688
  const parsed = parseResult.data;
8460
- logger$8.info({
8689
+ logger$10.info({
8461
8690
  epicId,
8462
8691
  storyKey,
8463
8692
  storyFile: parsed.story_file,
@@ -8479,7 +8708,7 @@ function getImplementationDecisions(deps) {
8479
8708
  try {
8480
8709
  return getDecisionsByPhase(deps.db, "implementation");
8481
8710
  } catch (err) {
8482
- logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
8711
+ logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
8483
8712
  return [];
8484
8713
  }
8485
8714
  }
@@ -8495,13 +8724,13 @@ function getEpicShard(decisions, epicId, projectRoot) {
8495
8724
  if (projectRoot) {
8496
8725
  const fallback = readEpicShardFromFile(projectRoot, epicId);
8497
8726
  if (fallback) {
8498
- logger$8.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
8727
+ logger$10.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
8499
8728
  return fallback;
8500
8729
  }
8501
8730
  }
8502
8731
  return "";
8503
8732
  } catch (err) {
8504
- logger$8.warn({
8733
+ logger$10.warn({
8505
8734
  epicId,
8506
8735
  error: err instanceof Error ? err.message : String(err)
8507
8736
  }, "Failed to retrieve epic shard");
@@ -8518,7 +8747,7 @@ function getPrevDevNotes(decisions, epicId) {
8518
8747
  if (devNotes.length === 0) return "";
8519
8748
  return devNotes[devNotes.length - 1].value;
8520
8749
  } catch (err) {
8521
- logger$8.warn({
8750
+ logger$10.warn({
8522
8751
  epicId,
8523
8752
  error: err instanceof Error ? err.message : String(err)
8524
8753
  }, "Failed to retrieve prev dev notes");
@@ -8538,13 +8767,13 @@ function getArchConstraints$1(deps) {
8538
8767
  if (deps.projectRoot) {
8539
8768
  const fallback = readArchConstraintsFromFile(deps.projectRoot);
8540
8769
  if (fallback) {
8541
- logger$8.info("Using file-based fallback for architecture constraints (decisions table empty)");
8770
+ logger$10.info("Using file-based fallback for architecture constraints (decisions table empty)");
8542
8771
  return fallback;
8543
8772
  }
8544
8773
  }
8545
8774
  return "";
8546
8775
  } catch (err) {
8547
- logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
8776
+ logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
8548
8777
  return "";
8549
8778
  }
8550
8779
  }
@@ -8564,7 +8793,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
8564
8793
  const match = pattern.exec(content);
8565
8794
  return match ? match[0].trim() : "";
8566
8795
  } catch (err) {
8567
- logger$8.warn({
8796
+ logger$10.warn({
8568
8797
  epicId,
8569
8798
  error: err instanceof Error ? err.message : String(err)
8570
8799
  }, "File-based epic shard fallback failed");
@@ -8587,7 +8816,7 @@ function readArchConstraintsFromFile(projectRoot) {
8587
8816
  const content = readFileSync$1(archPath, "utf-8");
8588
8817
  return content.slice(0, 1500);
8589
8818
  } catch (err) {
8590
- logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
8819
+ logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
8591
8820
  return "";
8592
8821
  }
8593
8822
  }
@@ -8600,14 +8829,14 @@ async function getStoryTemplate(deps) {
8600
8829
  try {
8601
8830
  return await deps.pack.getTemplate("story");
8602
8831
  } catch (err) {
8603
- logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
8832
+ logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
8604
8833
  return "";
8605
8834
  }
8606
8835
  }
8607
8836
 
8608
8837
  //#endregion
8609
8838
  //#region src/modules/compiled-workflows/git-helpers.ts
8610
- const logger$7 = createLogger("compiled-workflows:git-helpers");
8839
+ const logger$9 = createLogger("compiled-workflows:git-helpers");
8611
8840
  /**
8612
8841
  * Capture the full git diff for HEAD (working tree vs current commit).
8613
8842
  *
@@ -8731,7 +8960,7 @@ async function runGitCommand(args, cwd, logLabel) {
8731
8960
  stderr += chunk.toString("utf-8");
8732
8961
  });
8733
8962
  proc.on("error", (err) => {
8734
- logger$7.warn({
8963
+ logger$9.warn({
8735
8964
  label: logLabel,
8736
8965
  cwd,
8737
8966
  error: err.message
@@ -8740,7 +8969,7 @@ async function runGitCommand(args, cwd, logLabel) {
8740
8969
  });
8741
8970
  proc.on("close", (code) => {
8742
8971
  if (code !== 0) {
8743
- logger$7.warn({
8972
+ logger$9.warn({
8744
8973
  label: logLabel,
8745
8974
  cwd,
8746
8975
  code,
@@ -8756,7 +8985,7 @@ async function runGitCommand(args, cwd, logLabel) {
8756
8985
 
8757
8986
  //#endregion
8758
8987
  //#region src/modules/compiled-workflows/dev-story.ts
8759
- const logger$6 = createLogger("compiled-workflows:dev-story");
8988
+ const logger$8 = createLogger("compiled-workflows:dev-story");
8760
8989
  /** Hard token ceiling for the assembled dev-story prompt */
8761
8990
  const TOKEN_CEILING$1 = 24e3;
8762
8991
  /** Default timeout for dev-story dispatches in milliseconds (30 min) */
@@ -8778,7 +9007,7 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
8778
9007
  */
8779
9008
  async function runDevStory(deps, params) {
8780
9009
  const { storyKey, storyFilePath, taskScope, priorFiles } = params;
8781
- logger$6.info({
9010
+ logger$8.info({
8782
9011
  storyKey,
8783
9012
  storyFilePath
8784
9013
  }, "Starting compiled dev-story workflow");
@@ -8820,10 +9049,10 @@ async function runDevStory(deps, params) {
8820
9049
  let template;
8821
9050
  try {
8822
9051
  template = await deps.pack.getPrompt("dev-story");
8823
- logger$6.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
9052
+ logger$8.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
8824
9053
  } catch (err) {
8825
9054
  const error = err instanceof Error ? err.message : String(err);
8826
- logger$6.error({
9055
+ logger$8.error({
8827
9056
  storyKey,
8828
9057
  error
8829
9058
  }, "Failed to retrieve dev-story prompt template");
@@ -8834,14 +9063,14 @@ async function runDevStory(deps, params) {
8834
9063
  storyContent = await readFile$2(storyFilePath, "utf-8");
8835
9064
  } catch (err) {
8836
9065
  if (err.code === "ENOENT") {
8837
- logger$6.error({
9066
+ logger$8.error({
8838
9067
  storyKey,
8839
9068
  storyFilePath
8840
9069
  }, "Story file not found");
8841
9070
  return makeFailureResult("story_file_not_found");
8842
9071
  }
8843
9072
  const error = err instanceof Error ? err.message : String(err);
8844
- logger$6.error({
9073
+ logger$8.error({
8845
9074
  storyKey,
8846
9075
  storyFilePath,
8847
9076
  error
@@ -8849,7 +9078,7 @@ async function runDevStory(deps, params) {
8849
9078
  return makeFailureResult(`story_file_read_error: ${error}`);
8850
9079
  }
8851
9080
  if (storyContent.trim().length === 0) {
8852
- logger$6.error({
9081
+ logger$8.error({
8853
9082
  storyKey,
8854
9083
  storyFilePath
8855
9084
  }, "Story file is empty");
@@ -8861,17 +9090,17 @@ async function runDevStory(deps, params) {
8861
9090
  const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
8862
9091
  if (testPatternDecisions.length > 0) {
8863
9092
  testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
8864
- logger$6.debug({
9093
+ logger$8.debug({
8865
9094
  storyKey,
8866
9095
  count: testPatternDecisions.length
8867
9096
  }, "Loaded test patterns from decision store");
8868
9097
  } else {
8869
9098
  testPatternsContent = DEFAULT_VITEST_PATTERNS;
8870
- logger$6.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
9099
+ logger$8.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
8871
9100
  }
8872
9101
  } catch (err) {
8873
9102
  const error = err instanceof Error ? err.message : String(err);
8874
- logger$6.warn({
9103
+ logger$8.warn({
8875
9104
  storyKey,
8876
9105
  error
8877
9106
  }, "Failed to load test patterns — using defaults");
@@ -8914,7 +9143,7 @@ async function runDevStory(deps, params) {
8914
9143
  }
8915
9144
  ];
8916
9145
  const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING$1);
8917
- logger$6.info({
9146
+ logger$8.info({
8918
9147
  storyKey,
8919
9148
  tokenCount,
8920
9149
  ceiling: TOKEN_CEILING$1,
@@ -8933,7 +9162,7 @@ async function runDevStory(deps, params) {
8933
9162
  dispatchResult = await handle.result;
8934
9163
  } catch (err) {
8935
9164
  const error = err instanceof Error ? err.message : String(err);
8936
- logger$6.error({
9165
+ logger$8.error({
8937
9166
  storyKey,
8938
9167
  error
8939
9168
  }, "Dispatch threw an unexpected error");
@@ -8944,11 +9173,11 @@ async function runDevStory(deps, params) {
8944
9173
  output: dispatchResult.tokenEstimate.output
8945
9174
  };
8946
9175
  if (dispatchResult.status === "timeout") {
8947
- logger$6.error({
9176
+ logger$8.error({
8948
9177
  storyKey,
8949
9178
  durationMs: dispatchResult.durationMs
8950
9179
  }, "Dev-story dispatch timed out");
8951
- if (dispatchResult.output.length > 0) logger$6.info({
9180
+ if (dispatchResult.output.length > 0) logger$8.info({
8952
9181
  storyKey,
8953
9182
  partialOutput: dispatchResult.output.slice(0, 500)
8954
9183
  }, "Partial output before timeout");
@@ -8958,12 +9187,12 @@ async function runDevStory(deps, params) {
8958
9187
  };
8959
9188
  }
8960
9189
  if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
8961
- logger$6.error({
9190
+ logger$8.error({
8962
9191
  storyKey,
8963
9192
  exitCode: dispatchResult.exitCode,
8964
9193
  status: dispatchResult.status
8965
9194
  }, "Dev-story dispatch failed");
8966
- if (dispatchResult.output.length > 0) logger$6.info({
9195
+ if (dispatchResult.output.length > 0) logger$8.info({
8967
9196
  storyKey,
8968
9197
  partialOutput: dispatchResult.output.slice(0, 500)
8969
9198
  }, "Partial output from failed dispatch");
@@ -8975,7 +9204,7 @@ async function runDevStory(deps, params) {
8975
9204
  if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
8976
9205
  const details = dispatchResult.parseError ?? "parsed result was null";
8977
9206
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
8978
- logger$6.error({
9207
+ logger$8.error({
8979
9208
  storyKey,
8980
9209
  parseError: details,
8981
9210
  rawOutputSnippet: rawSnippet
@@ -8983,12 +9212,12 @@ async function runDevStory(deps, params) {
8983
9212
  let filesModified = [];
8984
9213
  try {
8985
9214
  filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
8986
- if (filesModified.length > 0) logger$6.info({
9215
+ if (filesModified.length > 0) logger$8.info({
8987
9216
  storyKey,
8988
9217
  fileCount: filesModified.length
8989
9218
  }, "Recovered files_modified from git status (YAML fallback)");
8990
9219
  } catch (err) {
8991
- logger$6.warn({
9220
+ logger$8.warn({
8992
9221
  storyKey,
8993
9222
  error: err instanceof Error ? err.message : String(err)
8994
9223
  }, "Failed to recover files_modified from git");
@@ -9005,7 +9234,7 @@ async function runDevStory(deps, params) {
9005
9234
  };
9006
9235
  }
9007
9236
  const parsed = dispatchResult.parsed;
9008
- logger$6.info({
9237
+ logger$8.info({
9009
9238
  storyKey,
9010
9239
  result: parsed.result,
9011
9240
  acMet: parsed.ac_met.length
@@ -9144,7 +9373,7 @@ function extractFilesInScope(storyContent) {
9144
9373
 
9145
9374
  //#endregion
9146
9375
  //#region src/modules/compiled-workflows/code-review.ts
9147
- const logger$5 = createLogger("compiled-workflows:code-review");
9376
+ const logger$7 = createLogger("compiled-workflows:code-review");
9148
9377
  /**
9149
9378
  * Hard token ceiling for the assembled code-review prompt (50,000 tokens).
9150
9379
  * Quality reviews require seeing actual code diffs, not just file names.
@@ -9184,7 +9413,7 @@ function defaultFailResult(error, tokenUsage) {
9184
9413
  async function runCodeReview(deps, params) {
9185
9414
  const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
9186
9415
  const cwd = workingDirectory ?? process.cwd();
9187
- logger$5.debug({
9416
+ logger$7.debug({
9188
9417
  storyKey,
9189
9418
  storyFilePath,
9190
9419
  cwd,
@@ -9195,7 +9424,7 @@ async function runCodeReview(deps, params) {
9195
9424
  template = await deps.pack.getPrompt("code-review");
9196
9425
  } catch (err) {
9197
9426
  const error = err instanceof Error ? err.message : String(err);
9198
- logger$5.error({ error }, "Failed to retrieve code-review prompt template");
9427
+ logger$7.error({ error }, "Failed to retrieve code-review prompt template");
9199
9428
  return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
9200
9429
  input: 0,
9201
9430
  output: 0
@@ -9206,7 +9435,7 @@ async function runCodeReview(deps, params) {
9206
9435
  storyContent = await readFile$2(storyFilePath, "utf-8");
9207
9436
  } catch (err) {
9208
9437
  const error = err instanceof Error ? err.message : String(err);
9209
- logger$5.error({
9438
+ logger$7.error({
9210
9439
  storyFilePath,
9211
9440
  error
9212
9441
  }, "Failed to read story file");
@@ -9226,12 +9455,12 @@ async function runCodeReview(deps, params) {
9226
9455
  const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
9227
9456
  if (scopedTotal <= TOKEN_CEILING) {
9228
9457
  gitDiffContent = scopedDiff;
9229
- logger$5.debug({
9458
+ logger$7.debug({
9230
9459
  fileCount: filesModified.length,
9231
9460
  tokenCount: scopedTotal
9232
9461
  }, "Using scoped file diff");
9233
9462
  } else {
9234
- logger$5.warn({
9463
+ logger$7.warn({
9235
9464
  estimatedTotal: scopedTotal,
9236
9465
  ceiling: TOKEN_CEILING,
9237
9466
  fileCount: filesModified.length
@@ -9245,7 +9474,7 @@ async function runCodeReview(deps, params) {
9245
9474
  const fullTotal = nonDiffTokens + countTokens(fullDiff);
9246
9475
  if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
9247
9476
  else {
9248
- logger$5.warn({
9477
+ logger$7.warn({
9249
9478
  estimatedTotal: fullTotal,
9250
9479
  ceiling: TOKEN_CEILING
9251
9480
  }, "Full git diff would exceed token ceiling — using stat-only summary");
@@ -9283,11 +9512,11 @@ async function runCodeReview(deps, params) {
9283
9512
  }
9284
9513
  ];
9285
9514
  const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
9286
- if (assembleResult.truncated) logger$5.warn({
9515
+ if (assembleResult.truncated) logger$7.warn({
9287
9516
  storyKey,
9288
9517
  tokenCount: assembleResult.tokenCount
9289
9518
  }, "Code-review prompt truncated to fit token ceiling");
9290
- logger$5.debug({
9519
+ logger$7.debug({
9291
9520
  storyKey,
9292
9521
  tokenCount: assembleResult.tokenCount,
9293
9522
  truncated: assembleResult.truncated
@@ -9305,7 +9534,7 @@ async function runCodeReview(deps, params) {
9305
9534
  dispatchResult = await handle.result;
9306
9535
  } catch (err) {
9307
9536
  const error = err instanceof Error ? err.message : String(err);
9308
- logger$5.error({
9537
+ logger$7.error({
9309
9538
  storyKey,
9310
9539
  error
9311
9540
  }, "Code-review dispatch threw unexpected error");
@@ -9321,7 +9550,7 @@ async function runCodeReview(deps, params) {
9321
9550
  const rawOutput = dispatchResult.output ?? void 0;
9322
9551
  if (dispatchResult.status === "failed") {
9323
9552
  const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
9324
- logger$5.warn({
9553
+ logger$7.warn({
9325
9554
  storyKey,
9326
9555
  exitCode: dispatchResult.exitCode
9327
9556
  }, "Code-review dispatch failed");
@@ -9331,7 +9560,7 @@ async function runCodeReview(deps, params) {
9331
9560
  };
9332
9561
  }
9333
9562
  if (dispatchResult.status === "timeout") {
9334
- logger$5.warn({ storyKey }, "Code-review dispatch timed out");
9563
+ logger$7.warn({ storyKey }, "Code-review dispatch timed out");
9335
9564
  return {
9336
9565
  ...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
9337
9566
  rawOutput
@@ -9339,7 +9568,7 @@ async function runCodeReview(deps, params) {
9339
9568
  }
9340
9569
  if (dispatchResult.parsed === null) {
9341
9570
  const details = dispatchResult.parseError ?? "No YAML block found in output";
9342
- logger$5.warn({
9571
+ logger$7.warn({
9343
9572
  storyKey,
9344
9573
  details
9345
9574
  }, "Code-review output schema validation failed");
@@ -9356,7 +9585,7 @@ async function runCodeReview(deps, params) {
9356
9585
  const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
9357
9586
  if (!parseResult.success) {
9358
9587
  const details = parseResult.error.message;
9359
- logger$5.warn({
9588
+ logger$7.warn({
9360
9589
  storyKey,
9361
9590
  details
9362
9591
  }, "Code-review output failed schema validation");
@@ -9371,13 +9600,13 @@ async function runCodeReview(deps, params) {
9371
9600
  };
9372
9601
  }
9373
9602
  const parsed = parseResult.data;
9374
- if (parsed.agentVerdict !== parsed.verdict) logger$5.info({
9603
+ if (parsed.agentVerdict !== parsed.verdict) logger$7.info({
9375
9604
  storyKey,
9376
9605
  agentVerdict: parsed.agentVerdict,
9377
9606
  pipelineVerdict: parsed.verdict,
9378
9607
  issues: parsed.issues
9379
9608
  }, "Pipeline overrode agent verdict based on issue severities");
9380
- logger$5.info({
9609
+ logger$7.info({
9381
9610
  storyKey,
9382
9611
  verdict: parsed.verdict,
9383
9612
  issues: parsed.issues
@@ -9402,7 +9631,7 @@ function getArchConstraints(deps) {
9402
9631
  if (constraints.length === 0) return "";
9403
9632
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
9404
9633
  } catch (err) {
9405
- logger$5.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
9634
+ logger$7.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
9406
9635
  return "";
9407
9636
  }
9408
9637
  }
@@ -9734,7 +9963,7 @@ function detectConflictGroups(storyKeys, config) {
9734
9963
 
9735
9964
  //#endregion
9736
9965
  //#region src/modules/implementation-orchestrator/seed-methodology-context.ts
9737
- const logger$4 = createLogger("implementation-orchestrator:seed");
9966
+ const logger$6 = createLogger("implementation-orchestrator:seed");
9738
9967
  /** Max chars for the architecture summary seeded into decisions */
9739
9968
  const MAX_ARCH_CHARS = 6e3;
9740
9969
  /** Max chars per epic shard */
@@ -9768,12 +9997,12 @@ function seedMethodologyContext(db, projectRoot) {
9768
9997
  const testCount = seedTestPatterns(db, projectRoot);
9769
9998
  if (testCount === -1) result.skippedCategories.push("test-patterns");
9770
9999
  else result.decisionsCreated += testCount;
9771
- logger$4.info({
10000
+ logger$6.info({
9772
10001
  decisionsCreated: result.decisionsCreated,
9773
10002
  skippedCategories: result.skippedCategories
9774
10003
  }, "Methodology context seeding complete");
9775
10004
  } catch (err) {
9776
- logger$4.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
10005
+ logger$6.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
9777
10006
  }
9778
10007
  return result;
9779
10008
  }
@@ -9817,7 +10046,7 @@ function seedArchitecture(db, projectRoot) {
9817
10046
  });
9818
10047
  count = 1;
9819
10048
  }
9820
- logger$4.debug({ count }, "Seeded architecture decisions");
10049
+ logger$6.debug({ count }, "Seeded architecture decisions");
9821
10050
  return count;
9822
10051
  }
9823
10052
  /**
@@ -9845,7 +10074,7 @@ function seedEpicShards(db, projectRoot) {
9845
10074
  });
9846
10075
  count++;
9847
10076
  }
9848
- logger$4.debug({ count }, "Seeded epic shard decisions");
10077
+ logger$6.debug({ count }, "Seeded epic shard decisions");
9849
10078
  return count;
9850
10079
  }
9851
10080
  /**
@@ -9866,7 +10095,7 @@ function seedTestPatterns(db, projectRoot) {
9866
10095
  value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
9867
10096
  rationale: "Detected from project configuration at orchestrator startup"
9868
10097
  });
9869
- logger$4.debug("Seeded test patterns decision");
10098
+ logger$6.debug("Seeded test patterns decision");
9870
10099
  return 1;
9871
10100
  }
9872
10101
  /**
@@ -10058,7 +10287,7 @@ function createPauseGate() {
10058
10287
  */
10059
10288
  function createImplementationOrchestrator(deps) {
10060
10289
  const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot } = deps;
10061
- const logger$31 = createLogger("implementation-orchestrator");
10290
+ const logger$33 = createLogger("implementation-orchestrator");
10062
10291
  let _state = "IDLE";
10063
10292
  let _startedAt;
10064
10293
  let _completedAt;
@@ -10066,6 +10295,10 @@ function createImplementationOrchestrator(deps) {
10066
10295
  const _stories = new Map();
10067
10296
  let _paused = false;
10068
10297
  let _pauseGate = null;
10298
+ let _lastProgressTs = Date.now();
10299
+ let _heartbeatTimer = null;
10300
+ const HEARTBEAT_INTERVAL_MS = 3e4;
10301
+ const WATCHDOG_TIMEOUT_MS = 6e5;
10069
10302
  function getStatus() {
10070
10303
  const stories = {};
10071
10304
  for (const [key, s] of _stories) stories[key] = { ...s };
@@ -10087,6 +10320,7 @@ function createImplementationOrchestrator(deps) {
10087
10320
  }
10088
10321
  function persistState() {
10089
10322
  if (config.pipelineRunId === void 0) return;
10323
+ recordProgress();
10090
10324
  try {
10091
10325
  const serialized = JSON.stringify(getStatus());
10092
10326
  updatePipelineRun(db, config.pipelineRunId, {
@@ -10094,7 +10328,51 @@ function createImplementationOrchestrator(deps) {
10094
10328
  token_usage_json: serialized
10095
10329
  });
10096
10330
  } catch (err) {
10097
- logger$31.warn("Failed to persist orchestrator state", { err });
10331
+ logger$33.warn("Failed to persist orchestrator state", { err });
10332
+ }
10333
+ }
10334
+ function recordProgress() {
10335
+ _lastProgressTs = Date.now();
10336
+ }
10337
+ function startHeartbeat() {
10338
+ if (_heartbeatTimer !== null) return;
10339
+ _heartbeatTimer = setInterval(() => {
10340
+ if (_state !== "RUNNING") return;
10341
+ let active = 0;
10342
+ let completed = 0;
10343
+ let queued = 0;
10344
+ for (const s of _stories.values()) if (s.phase === "COMPLETE" || s.phase === "ESCALATED") completed++;
10345
+ else if (s.phase === "PENDING") queued++;
10346
+ else active++;
10347
+ eventBus.emit("orchestrator:heartbeat", {
10348
+ runId: config.pipelineRunId ?? "",
10349
+ activeDispatches: active,
10350
+ completedDispatches: completed,
10351
+ queuedDispatches: queued
10352
+ });
10353
+ const elapsed = Date.now() - _lastProgressTs;
10354
+ if (elapsed >= WATCHDOG_TIMEOUT_MS) {
10355
+ for (const [key, s] of _stories) if (s.phase !== "PENDING" && s.phase !== "COMPLETE" && s.phase !== "ESCALATED") {
10356
+ logger$33.warn({
10357
+ storyKey: key,
10358
+ phase: s.phase,
10359
+ elapsedMs: elapsed
10360
+ }, "Watchdog: possible stall detected");
10361
+ eventBus.emit("orchestrator:stall", {
10362
+ runId: config.pipelineRunId ?? "",
10363
+ storyKey: key,
10364
+ phase: s.phase,
10365
+ elapsedMs: elapsed
10366
+ });
10367
+ }
10368
+ }
10369
+ }, HEARTBEAT_INTERVAL_MS);
10370
+ if (_heartbeatTimer && typeof _heartbeatTimer === "object" && "unref" in _heartbeatTimer) _heartbeatTimer.unref();
10371
+ }
10372
+ function stopHeartbeat() {
10373
+ if (_heartbeatTimer !== null) {
10374
+ clearInterval(_heartbeatTimer);
10375
+ _heartbeatTimer = null;
10098
10376
  }
10099
10377
  }
10100
10378
  /**
@@ -10111,7 +10389,7 @@ function createImplementationOrchestrator(deps) {
10111
10389
  * exhausted retries the story is ESCALATED.
10112
10390
  */
10113
10391
  async function processStory(storyKey) {
10114
- logger$31.info("Processing story", { storyKey });
10392
+ logger$33.info("Processing story", { storyKey });
10115
10393
  await waitIfPaused();
10116
10394
  if (_state !== "RUNNING") return;
10117
10395
  updateStory(storyKey, {
@@ -10125,7 +10403,7 @@ function createImplementationOrchestrator(deps) {
10125
10403
  const match = files.find((f) => f.startsWith(`${storyKey}-`) && f.endsWith(".md"));
10126
10404
  if (match) {
10127
10405
  storyFilePath = join$1(artifactsDir, match);
10128
- logger$31.info({
10406
+ logger$33.info({
10129
10407
  storyKey,
10130
10408
  storyFilePath
10131
10409
  }, "Found existing story file — skipping create-story");
@@ -10219,7 +10497,7 @@ function createImplementationOrchestrator(deps) {
10219
10497
  try {
10220
10498
  storyContentForAnalysis = await readFile$2(storyFilePath ?? "", "utf-8");
10221
10499
  } catch (err) {
10222
- logger$31.error({
10500
+ logger$33.error({
10223
10501
  storyKey,
10224
10502
  storyFilePath,
10225
10503
  error: err instanceof Error ? err.message : String(err)
@@ -10227,7 +10505,7 @@ function createImplementationOrchestrator(deps) {
10227
10505
  }
10228
10506
  const analysis = analyzeStoryComplexity(storyContentForAnalysis);
10229
10507
  const batches = planTaskBatches(analysis);
10230
- logger$31.info({
10508
+ logger$33.info({
10231
10509
  storyKey,
10232
10510
  estimatedScope: analysis.estimatedScope,
10233
10511
  batchCount: batches.length,
@@ -10245,7 +10523,7 @@ function createImplementationOrchestrator(deps) {
10245
10523
  if (_state !== "RUNNING") break;
10246
10524
  const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
10247
10525
  const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
10248
- logger$31.info({
10526
+ logger$33.info({
10249
10527
  storyKey,
10250
10528
  batchIndex: batch.batchIndex,
10251
10529
  taskCount: batch.taskIds.length
@@ -10268,7 +10546,7 @@ function createImplementationOrchestrator(deps) {
10268
10546
  });
10269
10547
  } catch (batchErr) {
10270
10548
  const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
10271
- logger$31.warn({
10549
+ logger$33.warn({
10272
10550
  storyKey,
10273
10551
  batchIndex: batch.batchIndex,
10274
10552
  error: errMsg
@@ -10288,7 +10566,7 @@ function createImplementationOrchestrator(deps) {
10288
10566
  filesModified: batchFilesModified,
10289
10567
  result: batchResult.result === "success" ? "success" : "failed"
10290
10568
  };
10291
- logger$31.info(batchMetrics, "Batch dev-story metrics");
10569
+ logger$33.info(batchMetrics, "Batch dev-story metrics");
10292
10570
  for (const f of batchFilesModified) allFilesModified.add(f);
10293
10571
  if (batchFilesModified.length > 0) batchFileGroups.push({
10294
10572
  batchIndex: batch.batchIndex,
@@ -10310,13 +10588,13 @@ function createImplementationOrchestrator(deps) {
10310
10588
  })
10311
10589
  });
10312
10590
  } catch (tokenErr) {
10313
- logger$31.warn({
10591
+ logger$33.warn({
10314
10592
  storyKey,
10315
10593
  batchIndex: batch.batchIndex,
10316
10594
  err: tokenErr
10317
10595
  }, "Failed to record batch token usage");
10318
10596
  }
10319
- if (batchResult.result === "failed") logger$31.warn({
10597
+ if (batchResult.result === "failed") logger$33.warn({
10320
10598
  storyKey,
10321
10599
  batchIndex: batch.batchIndex,
10322
10600
  error: batchResult.error
@@ -10348,7 +10626,7 @@ function createImplementationOrchestrator(deps) {
10348
10626
  result: devResult
10349
10627
  });
10350
10628
  persistState();
10351
- if (devResult.result === "failed") logger$31.warn("Dev-story reported failure, proceeding to code review", {
10629
+ if (devResult.result === "failed") logger$33.warn("Dev-story reported failure, proceeding to code review", {
10352
10630
  storyKey,
10353
10631
  error: devResult.error,
10354
10632
  filesModified: devFilesModified.length
@@ -10401,7 +10679,7 @@ function createImplementationOrchestrator(deps) {
10401
10679
  "NEEDS_MAJOR_REWORK": 2
10402
10680
  };
10403
10681
  for (const group of batchFileGroups) {
10404
- logger$31.info({
10682
+ logger$33.info({
10405
10683
  storyKey,
10406
10684
  batchIndex: group.batchIndex,
10407
10685
  fileCount: group.files.length
@@ -10437,7 +10715,7 @@ function createImplementationOrchestrator(deps) {
10437
10715
  rawOutput: lastRawOutput,
10438
10716
  tokenUsage: aggregateTokens
10439
10717
  };
10440
- logger$31.info({
10718
+ logger$33.info({
10441
10719
  storyKey,
10442
10720
  batchCount: batchFileGroups.length,
10443
10721
  verdict: worstVerdict,
@@ -10460,7 +10738,7 @@ function createImplementationOrchestrator(deps) {
10460
10738
  const isPhantomReview = reviewResult.verdict !== "SHIP_IT" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
10461
10739
  if (isPhantomReview && !timeoutRetried) {
10462
10740
  timeoutRetried = true;
10463
- logger$31.warn({
10741
+ logger$33.warn({
10464
10742
  storyKey,
10465
10743
  reviewCycles,
10466
10744
  error: reviewResult.error
@@ -10470,7 +10748,7 @@ function createImplementationOrchestrator(deps) {
10470
10748
  verdict = reviewResult.verdict;
10471
10749
  issueList = reviewResult.issue_list ?? [];
10472
10750
  if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
10473
- logger$31.info({
10751
+ logger$33.info({
10474
10752
  storyKey,
10475
10753
  originalVerdict: verdict,
10476
10754
  issuesBefore: previousIssueList.length,
@@ -10506,7 +10784,7 @@ function createImplementationOrchestrator(deps) {
10506
10784
  if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
10507
10785
  parts.push(`${fileCount} files`);
10508
10786
  parts.push(`${totalTokensK} tokens`);
10509
- logger$31.info({
10787
+ logger$33.info({
10510
10788
  storyKey,
10511
10789
  verdict,
10512
10790
  agentVerdict: reviewResult.agentVerdict
@@ -10558,7 +10836,7 @@ function createImplementationOrchestrator(deps) {
10558
10836
  persistState();
10559
10837
  return;
10560
10838
  }
10561
- logger$31.info({
10839
+ logger$33.info({
10562
10840
  storyKey,
10563
10841
  reviewCycles: finalReviewCycles,
10564
10842
  issueCount: issueList.length
@@ -10608,7 +10886,7 @@ function createImplementationOrchestrator(deps) {
10608
10886
  fixPrompt = assembled.prompt;
10609
10887
  } catch {
10610
10888
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
10611
- logger$31.warn("Failed to assemble auto-approve fix prompt, using fallback", { storyKey });
10889
+ logger$33.warn("Failed to assemble auto-approve fix prompt, using fallback", { storyKey });
10612
10890
  }
10613
10891
  const handle = dispatcher.dispatch({
10614
10892
  prompt: fixPrompt,
@@ -10625,9 +10903,9 @@ function createImplementationOrchestrator(deps) {
10625
10903
  output: fixResult.tokenEstimate.output
10626
10904
  } : void 0 }
10627
10905
  });
10628
- if (fixResult.status === "timeout") logger$31.warn("Auto-approve fix timed out — approving anyway (issues were minor)", { storyKey });
10906
+ if (fixResult.status === "timeout") logger$33.warn("Auto-approve fix timed out — approving anyway (issues were minor)", { storyKey });
10629
10907
  } catch (err) {
10630
- logger$31.warn("Auto-approve fix dispatch failed — approving anyway (issues were minor)", {
10908
+ logger$33.warn("Auto-approve fix dispatch failed — approving anyway (issues were minor)", {
10631
10909
  storyKey,
10632
10910
  err
10633
10911
  });
@@ -10697,7 +10975,7 @@ function createImplementationOrchestrator(deps) {
10697
10975
  fixPrompt = assembled.prompt;
10698
10976
  } catch {
10699
10977
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
10700
- logger$31.warn("Failed to assemble fix prompt, using fallback", {
10978
+ logger$33.warn("Failed to assemble fix prompt, using fallback", {
10701
10979
  storyKey,
10702
10980
  taskType
10703
10981
  });
@@ -10719,7 +10997,7 @@ function createImplementationOrchestrator(deps) {
10719
10997
  } : void 0 }
10720
10998
  });
10721
10999
  if (fixResult.status === "timeout") {
10722
- logger$31.warn("Fix dispatch timed out — escalating story", {
11000
+ logger$33.warn("Fix dispatch timed out — escalating story", {
10723
11001
  storyKey,
10724
11002
  taskType
10725
11003
  });
@@ -10737,13 +11015,13 @@ function createImplementationOrchestrator(deps) {
10737
11015
  persistState();
10738
11016
  return;
10739
11017
  }
10740
- if (fixResult.status === "failed") logger$31.warn("Fix dispatch failed", {
11018
+ if (fixResult.status === "failed") logger$33.warn("Fix dispatch failed", {
10741
11019
  storyKey,
10742
11020
  taskType,
10743
11021
  exitCode: fixResult.exitCode
10744
11022
  });
10745
11023
  } catch (err) {
10746
- logger$31.warn("Fix dispatch failed, continuing to next review", {
11024
+ logger$33.warn("Fix dispatch failed, continuing to next review", {
10747
11025
  storyKey,
10748
11026
  taskType,
10749
11027
  err
@@ -10796,11 +11074,11 @@ function createImplementationOrchestrator(deps) {
10796
11074
  }
10797
11075
  async function run(storyKeys) {
10798
11076
  if (_state === "RUNNING" || _state === "PAUSED") {
10799
- logger$31.warn("run() called while orchestrator is already running or paused — ignoring", { state: _state });
11077
+ logger$33.warn("run() called while orchestrator is already running or paused — ignoring", { state: _state });
10800
11078
  return getStatus();
10801
11079
  }
10802
11080
  if (_state === "COMPLETE") {
10803
- logger$31.warn("run() called on a COMPLETE orchestrator — ignoring", { state: _state });
11081
+ logger$33.warn("run() called on a COMPLETE orchestrator — ignoring", { state: _state });
10804
11082
  return getStatus();
10805
11083
  }
10806
11084
  _state = "RUNNING";
@@ -10814,15 +11092,17 @@ function createImplementationOrchestrator(deps) {
10814
11092
  pipelineRunId: config.pipelineRunId
10815
11093
  });
10816
11094
  persistState();
11095
+ recordProgress();
11096
+ startHeartbeat();
10817
11097
  if (projectRoot !== void 0) {
10818
11098
  const seedResult = seedMethodologyContext(db, projectRoot);
10819
- if (seedResult.decisionsCreated > 0) logger$31.info({
11099
+ if (seedResult.decisionsCreated > 0) logger$33.info({
10820
11100
  decisionsCreated: seedResult.decisionsCreated,
10821
11101
  skippedCategories: seedResult.skippedCategories
10822
11102
  }, "Methodology context seeded from planning artifacts");
10823
11103
  }
10824
11104
  const groups = detectConflictGroups(storyKeys);
10825
- logger$31.info("Orchestrator starting", {
11105
+ logger$33.info("Orchestrator starting", {
10826
11106
  storyCount: storyKeys.length,
10827
11107
  groupCount: groups.length,
10828
11108
  maxConcurrency: config.maxConcurrency
@@ -10830,12 +11110,14 @@ function createImplementationOrchestrator(deps) {
10830
11110
  try {
10831
11111
  await runWithConcurrency(groups, config.maxConcurrency);
10832
11112
  } catch (err) {
11113
+ stopHeartbeat();
10833
11114
  _state = "FAILED";
10834
11115
  _completedAt = new Date().toISOString();
10835
11116
  persistState();
10836
- logger$31.error("Orchestrator failed with unhandled error", { err });
11117
+ logger$33.error("Orchestrator failed with unhandled error", { err });
10837
11118
  return getStatus();
10838
11119
  }
11120
+ stopHeartbeat();
10839
11121
  _state = "COMPLETE";
10840
11122
  _completedAt = new Date().toISOString();
10841
11123
  let completed = 0;
@@ -10859,7 +11141,7 @@ function createImplementationOrchestrator(deps) {
10859
11141
  _pauseGate = createPauseGate();
10860
11142
  _state = "PAUSED";
10861
11143
  eventBus.emit("orchestrator:paused", {});
10862
- logger$31.info("Orchestrator paused");
11144
+ logger$33.info("Orchestrator paused");
10863
11145
  }
10864
11146
  function resume() {
10865
11147
  if (_state !== "PAUSED") return;
@@ -10870,7 +11152,7 @@ function createImplementationOrchestrator(deps) {
10870
11152
  }
10871
11153
  _state = "RUNNING";
10872
11154
  eventBus.emit("orchestrator:resumed", {});
10873
- logger$31.info("Orchestrator resumed");
11155
+ logger$33.info("Orchestrator resumed");
10874
11156
  }
10875
11157
  return {
10876
11158
  run,
@@ -11340,53 +11622,479 @@ function createPhaseOrchestrator(deps) {
11340
11622
  }
11341
11623
 
11342
11624
  //#endregion
11343
- //#region src/modules/phase-orchestrator/phases/schemas.ts
11344
- /**
11345
- * Zod schema for the ProductBrief structure emitted by the analysis agent.
11346
- * Validates that all required fields are present and non-empty.
11347
- */
11348
- const ProductBriefSchema = z.object({
11349
- problem_statement: z.string().min(10),
11350
- target_users: z.array(z.string().min(1)).min(1),
11351
- core_features: z.array(z.string().min(1)).min(1),
11352
- success_metrics: z.array(z.string().min(1)).min(1),
11353
- constraints: z.array(z.string()).default([])
11354
- });
11625
+ //#region src/modules/phase-orchestrator/budget-utils.ts
11355
11626
  /**
11356
- * Zod schema for the full YAML output emitted by the analysis agent.
11357
- * The agent must emit a YAML block with `result` and `product_brief` fields.
11358
- */
11359
- const AnalysisOutputSchema = z.object({
11360
- result: z.enum(["success", "failed"]),
11361
- product_brief: ProductBriefSchema
11362
- });
11363
- /**
11364
- * Zod schema for a single functional requirement.
11627
+ * Shared utilities for dynamic prompt token budget calculation
11628
+ * and decision summarization.
11629
+ *
11630
+ * Extracted from phases/solutioning.ts to avoid inappropriate dependency
11631
+ * direction (step-runner.ts importing from a phase-specific module).
11365
11632
  */
11366
- const FunctionalRequirementSchema = z.object({
11367
- description: z.string().min(5),
11368
- priority: z.enum([
11369
- "must",
11370
- "should",
11371
- "could"
11372
- ]).default("must")
11373
- });
11633
+ /** Absolute maximum prompt tokens (model context safety margin) */
11634
+ const ABSOLUTE_MAX_PROMPT_TOKENS = 12e3;
11635
+ /** Additional tokens per architecture decision injected into story generation prompt */
11636
+ const TOKENS_PER_DECISION = 100;
11637
+ /** Priority order for decision categories when summarizing (higher priority kept first) */
11638
+ const DECISION_CATEGORY_PRIORITY = [
11639
+ "data",
11640
+ "auth",
11641
+ "api",
11642
+ "frontend",
11643
+ "infra",
11644
+ "observability",
11645
+ "ci"
11646
+ ];
11374
11647
  /**
11375
- * Zod schema for a single non-functional requirement.
11648
+ * Calculate the dynamic prompt token budget based on the number of decisions
11649
+ * that will be injected into the prompt.
11650
+ *
11651
+ * Formula: base_budget + (decision_count * tokens_per_decision)
11652
+ * Capped at ABSOLUTE_MAX_PROMPT_TOKENS.
11653
+ *
11654
+ * @param baseBudget - Base token budget for the phase
11655
+ * @param decisionCount - Number of decisions to inject
11656
+ * @returns Calculated token budget, capped at ABSOLUTE_MAX_PROMPT_TOKENS
11376
11657
  */
11377
- const NonFunctionalRequirementSchema = z.object({
11378
- description: z.string().min(5),
11379
- category: z.string().min(1)
11380
- });
11658
+ function calculateDynamicBudget(baseBudget, decisionCount) {
11659
+ const budget = baseBudget + decisionCount * TOKENS_PER_DECISION;
11660
+ return Math.min(budget, ABSOLUTE_MAX_PROMPT_TOKENS);
11661
+ }
11381
11662
  /**
11382
- * Zod schema for a single user story.
11663
+ * Summarize architecture decisions into compact key:value one-liners,
11664
+ * dropping rationale and optionally dropping lower-priority categories
11665
+ * to fit within a character budget.
11666
+ *
11667
+ * Strategy:
11668
+ * 1. Sort decisions by priority (known categories first, then alphabetical)
11669
+ * 2. For each decision, produce a compact `key: value` one-liner (drop rationale)
11670
+ * 3. If still over budget, drop lower-priority categories
11671
+ * 4. Return the compact summary string
11672
+ *
11673
+ * @param decisions - Full architecture decisions from the decision store
11674
+ * @param maxChars - Maximum character budget for the summarized output
11675
+ * @returns Compact summary string
11383
11676
  */
11384
- const UserStorySchema = z.object({
11385
- title: z.string().min(3),
11386
- description: z.string().min(5)
11387
- });
11388
- /**
11389
- * Zod schema for the full YAML output emitted by the planning agent.
11677
+ function summarizeDecisions(decisions, maxChars) {
11678
+ const sorted = [...decisions].sort((a, b) => {
11679
+ const aCat = (a.category ?? "").toLowerCase();
11680
+ const bCat = (b.category ?? "").toLowerCase();
11681
+ const aIdx = DECISION_CATEGORY_PRIORITY.indexOf(aCat);
11682
+ const bIdx = DECISION_CATEGORY_PRIORITY.indexOf(bCat);
11683
+ const aPri = aIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : aIdx;
11684
+ const bPri = bIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : bIdx;
11685
+ return aPri - bPri;
11686
+ });
11687
+ const header = "## Architecture Decisions (Summarized)";
11688
+ const lines = [header];
11689
+ let currentLength = header.length;
11690
+ for (const d of sorted) {
11691
+ const truncatedValue = d.value.length > 120 ? d.value.slice(0, 117) + "..." : d.value;
11692
+ const line = `- ${d.key}: ${truncatedValue}`;
11693
+ if (currentLength + line.length + 1 > maxChars) break;
11694
+ lines.push(line);
11695
+ currentLength += line.length + 1;
11696
+ }
11697
+ return lines.join("\n");
11698
+ }
11699
+
11700
+ //#endregion
11701
+ //#region src/modules/phase-orchestrator/step-runner.ts
11702
+ const logger$5 = createLogger("step-runner");
11703
+ /**
11704
+ * Format an array of decision records into a markdown section for injection.
11705
+ *
11706
+ * @param decisions - Decision records from the store
11707
+ * @param sectionTitle - Title for the markdown section
11708
+ * @returns Formatted markdown string
11709
+ */
11710
+ function formatDecisionsForInjection(decisions, sectionTitle) {
11711
+ if (decisions.length === 0) return "";
11712
+ const parts = [];
11713
+ if (sectionTitle) parts.push(`## ${sectionTitle}`);
11714
+ for (const d of decisions) {
11715
+ const rationale = d.rationale ? ` (${d.rationale})` : "";
11716
+ try {
11717
+ const parsed = JSON.parse(d.value);
11718
+ if (Array.isArray(parsed)) {
11719
+ parts.push(`### ${d.key.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase())}`);
11720
+ for (const item of parsed) parts.push(`- ${String(item)}`);
11721
+ } else if (typeof parsed === "object" && parsed !== null) parts.push(`- **${d.key}**: ${JSON.stringify(parsed)}${rationale}`);
11722
+ else parts.push(`- **${d.key}**: ${String(parsed)}${rationale}`);
11723
+ } catch {
11724
+ parts.push(`- **${d.key}**: ${d.value}${rationale}`);
11725
+ }
11726
+ }
11727
+ return parts.join("\n");
11728
+ }
11729
+ /**
11730
+ * Resolve a single context reference to a string value.
11731
+ *
11732
+ * @param ref - The context reference to resolve
11733
+ * @param deps - Phase dependencies (for DB access)
11734
+ * @param runId - Pipeline run ID
11735
+ * @param params - Runtime parameters map
11736
+ * @param stepOutputs - Map of step name → raw parsed output from prior steps
11737
+ * @returns Resolved string value
11738
+ */
11739
+ function resolveContext(ref, deps, runId, params, stepOutputs) {
11740
+ const { source } = ref;
11741
+ if (source.startsWith("param:")) {
11742
+ const key = source.slice(6);
11743
+ return params[key] ?? "";
11744
+ }
11745
+ if (source.startsWith("decision:")) {
11746
+ const path$1 = source.slice(9);
11747
+ const [phase, category] = path$1.split(".");
11748
+ if (!phase || !category) return "";
11749
+ const decisions = getDecisionsByPhaseForRun(deps.db, runId, phase);
11750
+ const filtered = decisions.filter((d) => d.category === category);
11751
+ return formatDecisionsForInjection(filtered.map((d) => ({
11752
+ key: d.key,
11753
+ value: d.value,
11754
+ rationale: d.rationale ?? null
11755
+ })), category.replace(/-/g, " ").replace(/\b\w/g, (c) => c.toUpperCase()));
11756
+ }
11757
+ if (source.startsWith("step:")) {
11758
+ const stepName = source.slice(5);
11759
+ const output = stepOutputs.get(stepName);
11760
+ if (!output) return "";
11761
+ const parts = [];
11762
+ for (const [key, value] of Object.entries(output)) {
11763
+ if (key === "result") continue;
11764
+ if (Array.isArray(value)) {
11765
+ parts.push(`### ${key.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase())}`);
11766
+ for (const item of value) if (typeof item === "object" && item !== null) parts.push(`- ${JSON.stringify(item)}`);
11767
+ else parts.push(`- ${String(item)}`);
11768
+ } else if (typeof value === "object" && value !== null) parts.push(`- **${key}**: ${JSON.stringify(value)}`);
11769
+ else parts.push(`- **${key}**: ${String(value)}`);
11770
+ }
11771
+ return parts.join("\n");
11772
+ }
11773
+ return "";
11774
+ }
11775
+ /**
11776
+ * Execute a sequence of steps, accumulating context and persisting results.
11777
+ *
11778
+ * Halts on the first step that fails. Each step's output is available to
11779
+ * subsequent steps via the stepOutputs map and decision store.
11780
+ *
11781
+ * @param steps - Ordered list of step definitions to execute
11782
+ * @param deps - Shared phase dependencies
11783
+ * @param runId - Pipeline run ID
11784
+ * @param phase - Phase name (for decision store persistence)
11785
+ * @param params - Runtime parameters map (concept, product_brief, etc.)
11786
+ * @returns Aggregated multi-step result
11787
+ */
11788
+ async function runSteps(steps, deps, runId, phase, params) {
11789
+ const stepResults = [];
11790
+ const stepOutputs = new Map();
11791
+ let totalInput = 0;
11792
+ let totalOutput = 0;
11793
+ for (const step of steps) try {
11794
+ const template = await deps.pack.getPrompt(step.name);
11795
+ let prompt = template;
11796
+ for (const ref of step.context) {
11797
+ const value = resolveContext(ref, deps, runId, params, stepOutputs);
11798
+ prompt = prompt.replace(`{{${ref.placeholder}}}`, value);
11799
+ }
11800
+ const allDecisions = getDecisionsByPhaseForRun(deps.db, runId, phase);
11801
+ const budgetTokens = calculateDynamicBudget(4e3, allDecisions.length);
11802
+ let estimatedTokens = Math.ceil(prompt.length / 4);
11803
+ if (estimatedTokens > budgetTokens) {
11804
+ const decisionRefs = step.context.filter((ref) => ref.source.startsWith("decision:"));
11805
+ if (decisionRefs.length > 0) {
11806
+ logger$5.warn({
11807
+ step: step.name,
11808
+ estimatedTokens,
11809
+ budgetTokens
11810
+ }, "Prompt exceeds budget — attempting decision summarization");
11811
+ let summarizedPrompt = template;
11812
+ for (const ref of step.context) {
11813
+ let value;
11814
+ if (ref.source.startsWith("decision:")) {
11815
+ const path$1 = ref.source.slice(9);
11816
+ const [decPhase, decCategory] = path$1.split(".");
11817
+ if (decPhase && decCategory) {
11818
+ const decisions = getDecisionsByPhaseForRun(deps.db, runId, decPhase);
11819
+ const filtered = decisions.filter((d) => d.category === decCategory);
11820
+ const budgetChars = budgetTokens * 4;
11821
+ const availableChars = Math.max(200, Math.floor(budgetChars / decisionRefs.length));
11822
+ value = summarizeDecisions(filtered.map((d) => ({
11823
+ key: d.key,
11824
+ value: d.value,
11825
+ category: d.category
11826
+ })), availableChars);
11827
+ } else value = resolveContext(ref, deps, runId, params, stepOutputs);
11828
+ } else value = resolveContext(ref, deps, runId, params, stepOutputs);
11829
+ summarizedPrompt = summarizedPrompt.replace(`{{${ref.placeholder}}}`, value);
11830
+ }
11831
+ prompt = summarizedPrompt;
11832
+ estimatedTokens = Math.ceil(prompt.length / 4);
11833
+ if (estimatedTokens <= budgetTokens) logger$5.info({
11834
+ step: step.name,
11835
+ estimatedTokens,
11836
+ budgetTokens
11837
+ }, "Decision summarization brought prompt within budget");
11838
+ }
11839
+ if (estimatedTokens > budgetTokens) {
11840
+ const errorMsg = `Step '${step.name}' prompt exceeds token budget after summarization: ${estimatedTokens} tokens (max ${budgetTokens})`;
11841
+ stepResults.push({
11842
+ name: step.name,
11843
+ success: false,
11844
+ parsed: null,
11845
+ error: errorMsg,
11846
+ tokenUsage: {
11847
+ input: 0,
11848
+ output: 0
11849
+ }
11850
+ });
11851
+ return {
11852
+ success: false,
11853
+ steps: stepResults,
11854
+ tokenUsage: {
11855
+ input: totalInput,
11856
+ output: totalOutput
11857
+ },
11858
+ error: errorMsg
11859
+ };
11860
+ }
11861
+ }
11862
+ const handle = deps.dispatcher.dispatch({
11863
+ prompt,
11864
+ agent: "claude-code",
11865
+ taskType: step.taskType,
11866
+ outputSchema: step.outputSchema
11867
+ });
11868
+ const dispatchResult = await handle.result;
11869
+ const tokenUsage = {
11870
+ input: dispatchResult.tokenEstimate.input,
11871
+ output: dispatchResult.tokenEstimate.output
11872
+ };
11873
+ totalInput += tokenUsage.input;
11874
+ totalOutput += tokenUsage.output;
11875
+ if (dispatchResult.status === "timeout") {
11876
+ const errorMsg = `Step '${step.name}' timed out after ${dispatchResult.durationMs}ms`;
11877
+ stepResults.push({
11878
+ name: step.name,
11879
+ success: false,
11880
+ parsed: null,
11881
+ error: errorMsg,
11882
+ tokenUsage
11883
+ });
11884
+ return {
11885
+ success: false,
11886
+ steps: stepResults,
11887
+ tokenUsage: {
11888
+ input: totalInput,
11889
+ output: totalOutput
11890
+ },
11891
+ error: errorMsg
11892
+ };
11893
+ }
11894
+ if (dispatchResult.status === "failed") {
11895
+ const errorMsg = `Step '${step.name}' dispatch failed: ${dispatchResult.parseError ?? dispatchResult.output}`;
11896
+ stepResults.push({
11897
+ name: step.name,
11898
+ success: false,
11899
+ parsed: null,
11900
+ error: errorMsg,
11901
+ tokenUsage
11902
+ });
11903
+ return {
11904
+ success: false,
11905
+ steps: stepResults,
11906
+ tokenUsage: {
11907
+ input: totalInput,
11908
+ output: totalOutput
11909
+ },
11910
+ error: errorMsg
11911
+ };
11912
+ }
11913
+ if (dispatchResult.parsed === null || dispatchResult.parseError !== null) {
11914
+ const errorMsg = `Step '${step.name}' schema validation failed: ${dispatchResult.parseError ?? "No parsed output"}`;
11915
+ stepResults.push({
11916
+ name: step.name,
11917
+ success: false,
11918
+ parsed: null,
11919
+ error: errorMsg,
11920
+ tokenUsage
11921
+ });
11922
+ return {
11923
+ success: false,
11924
+ steps: stepResults,
11925
+ tokenUsage: {
11926
+ input: totalInput,
11927
+ output: totalOutput
11928
+ },
11929
+ error: errorMsg
11930
+ };
11931
+ }
11932
+ const parsed = dispatchResult.parsed;
11933
+ if (parsed.result === "failed") {
11934
+ const errorMsg = `Step '${step.name}' agent reported failure`;
11935
+ stepResults.push({
11936
+ name: step.name,
11937
+ success: false,
11938
+ parsed: null,
11939
+ error: errorMsg,
11940
+ tokenUsage
11941
+ });
11942
+ return {
11943
+ success: false,
11944
+ steps: stepResults,
11945
+ tokenUsage: {
11946
+ input: totalInput,
11947
+ output: totalOutput
11948
+ },
11949
+ error: errorMsg
11950
+ };
11951
+ }
11952
+ stepOutputs.set(step.name, parsed);
11953
+ for (const mapping of step.persist) {
11954
+ const fieldValue = parsed[mapping.field];
11955
+ if (fieldValue === void 0) continue;
11956
+ if (mapping.key === "array" && Array.isArray(fieldValue)) for (const [index, item] of fieldValue.entries()) upsertDecision(deps.db, {
11957
+ pipeline_run_id: runId,
11958
+ phase,
11959
+ category: mapping.category,
11960
+ key: `${step.name}-${index}`,
11961
+ value: typeof item === "object" ? JSON.stringify(item) : String(item)
11962
+ });
11963
+ else if (typeof fieldValue === "object" && fieldValue !== null) upsertDecision(deps.db, {
11964
+ pipeline_run_id: runId,
11965
+ phase,
11966
+ category: mapping.category,
11967
+ key: mapping.key,
11968
+ value: JSON.stringify(fieldValue)
11969
+ });
11970
+ else upsertDecision(deps.db, {
11971
+ pipeline_run_id: runId,
11972
+ phase,
11973
+ category: mapping.category,
11974
+ key: mapping.key,
11975
+ value: String(fieldValue)
11976
+ });
11977
+ }
11978
+ let artifactId;
11979
+ if (step.registerArtifact) {
11980
+ const artifact = registerArtifact(deps.db, {
11981
+ pipeline_run_id: runId,
11982
+ phase,
11983
+ type: step.registerArtifact.type,
11984
+ path: step.registerArtifact.path,
11985
+ summary: step.registerArtifact.summarize(parsed)
11986
+ });
11987
+ artifactId = artifact.id;
11988
+ }
11989
+ const stepResult = {
11990
+ name: step.name,
11991
+ success: true,
11992
+ parsed,
11993
+ error: null,
11994
+ tokenUsage
11995
+ };
11996
+ if (artifactId !== void 0) stepResult.artifactId = artifactId;
11997
+ stepResults.push(stepResult);
11998
+ } catch (err) {
11999
+ const message = err instanceof Error ? err.message : String(err);
12000
+ const errorMsg = `Step '${step.name}' unexpected error: ${message}`;
12001
+ stepResults.push({
12002
+ name: step.name,
12003
+ success: false,
12004
+ parsed: null,
12005
+ error: errorMsg,
12006
+ tokenUsage: {
12007
+ input: 0,
12008
+ output: 0
12009
+ }
12010
+ });
12011
+ return {
12012
+ success: false,
12013
+ steps: stepResults,
12014
+ tokenUsage: {
12015
+ input: totalInput,
12016
+ output: totalOutput
12017
+ },
12018
+ error: errorMsg
12019
+ };
12020
+ }
12021
+ return {
12022
+ success: true,
12023
+ steps: stepResults,
12024
+ tokenUsage: {
12025
+ input: totalInput,
12026
+ output: totalOutput
12027
+ }
12028
+ };
12029
+ }
12030
+
12031
+ //#endregion
12032
+ //#region src/modules/phase-orchestrator/phases/schemas.ts
12033
+ /**
12034
+ * Zod schema for the ProductBrief structure emitted by the analysis agent.
12035
+ * Validates that all required fields are present and non-empty.
12036
+ */
12037
+ const ProductBriefSchema = z.object({
12038
+ problem_statement: z.string().min(10),
12039
+ target_users: z.array(z.string().min(1)).min(1),
12040
+ core_features: z.array(z.string().min(1)).min(1),
12041
+ success_metrics: z.array(z.string().min(1)).min(1),
12042
+ constraints: z.array(z.string()).default([])
12043
+ });
12044
+ /**
12045
+ * Zod schema for the full YAML output emitted by the analysis agent.
12046
+ * The agent must emit a YAML block with `result` and `product_brief` fields.
12047
+ */
12048
+ const AnalysisOutputSchema = z.object({
12049
+ result: z.enum(["success", "failed"]),
12050
+ product_brief: ProductBriefSchema
12051
+ });
12052
+ /**
12053
+ * Step 1 output: Vision & problem analysis.
12054
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12055
+ */
12056
+ const AnalysisVisionOutputSchema = z.object({
12057
+ result: z.enum(["success", "failed"]),
12058
+ problem_statement: z.string().min(10).optional(),
12059
+ target_users: z.array(z.string().min(1)).min(1).optional()
12060
+ });
12061
+ /**
12062
+ * Step 2 output: Scope & features (builds on vision output).
12063
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12064
+ */
12065
+ const AnalysisScopeOutputSchema = z.object({
12066
+ result: z.enum(["success", "failed"]),
12067
+ core_features: z.array(z.string().min(1)).min(1).optional(),
12068
+ success_metrics: z.array(z.string().min(1)).min(1).optional(),
12069
+ constraints: z.array(z.string()).default([])
12070
+ });
12071
+ /**
12072
+ * Zod schema for a single functional requirement.
12073
+ */
12074
+ const FunctionalRequirementSchema = z.object({
12075
+ description: z.string().min(5),
12076
+ priority: z.enum([
12077
+ "must",
12078
+ "should",
12079
+ "could"
12080
+ ]).default("must")
12081
+ });
12082
+ /**
12083
+ * Zod schema for a single non-functional requirement.
12084
+ */
12085
+ const NonFunctionalRequirementSchema = z.object({
12086
+ description: z.string().min(5),
12087
+ category: z.string().min(1)
12088
+ });
12089
+ /**
12090
+ * Zod schema for a single user story.
12091
+ */
12092
+ const UserStorySchema = z.object({
12093
+ title: z.string().min(3),
12094
+ description: z.string().min(5)
12095
+ });
12096
+ /**
12097
+ * Zod schema for the full YAML output emitted by the planning agent.
11390
12098
  * The agent must emit a YAML block with all PRD fields.
11391
12099
  */
11392
12100
  const PlanningOutputSchema = z.object({
@@ -11399,6 +12107,36 @@ const PlanningOutputSchema = z.object({
11399
12107
  out_of_scope: z.array(z.string()).default([])
11400
12108
  });
11401
12109
  /**
12110
+ * Step 1 output: Project classification & vision.
12111
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12112
+ */
12113
+ const PlanningClassificationOutputSchema = z.object({
12114
+ result: z.enum(["success", "failed"]),
12115
+ project_type: z.string().min(1).optional(),
12116
+ vision: z.string().min(10).optional(),
12117
+ key_goals: z.array(z.string().min(1)).min(1).optional()
12118
+ });
12119
+ /**
12120
+ * Step 2 output: Functional requirements & user stories.
12121
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12122
+ */
12123
+ const PlanningFRsOutputSchema = z.object({
12124
+ result: z.enum(["success", "failed"]),
12125
+ functional_requirements: z.array(FunctionalRequirementSchema).min(3).optional(),
12126
+ user_stories: z.array(UserStorySchema).min(1).optional()
12127
+ });
12128
+ /**
12129
+ * Step 3 output: NFRs, tech stack, domain model, out-of-scope.
12130
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12131
+ */
12132
+ const PlanningNFRsOutputSchema = z.object({
12133
+ result: z.enum(["success", "failed"]),
12134
+ non_functional_requirements: z.array(NonFunctionalRequirementSchema).min(2).optional(),
12135
+ tech_stack: z.record(z.string(), z.string()).optional(),
12136
+ domain_model: z.record(z.string(), z.unknown()).optional(),
12137
+ out_of_scope: z.array(z.string()).default([])
12138
+ });
12139
+ /**
11402
12140
  * Zod schema for a single architecture decision emitted by the architecture agent.
11403
12141
  */
11404
12142
  const ArchitectureDecisionSchema = z.object({
@@ -11443,6 +12181,26 @@ const StoryGenerationOutputSchema = z.object({
11443
12181
  result: z.enum(["success", "failed"]),
11444
12182
  epics: z.array(EpicDefinitionSchema).min(1)
11445
12183
  });
12184
+ /**
12185
+ * Architecture Step 1 output: Context analysis — initial architecture decisions.
12186
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12187
+ */
12188
+ const ArchContextOutputSchema = z.object({
12189
+ result: z.enum(["success", "failed"]),
12190
+ architecture_decisions: z.array(ArchitectureDecisionSchema).min(1).optional()
12191
+ });
12192
+ /**
12193
+ * Epic Design Step output: Epic structure with FR coverage mapping.
12194
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12195
+ */
12196
+ const EpicDesignOutputSchema = z.object({
12197
+ result: z.enum(["success", "failed"]),
12198
+ epics: z.array(z.object({
12199
+ title: z.string().min(3),
12200
+ description: z.string().min(5),
12201
+ fr_coverage: z.array(z.string()).default([])
12202
+ })).min(1).optional()
12203
+ });
11446
12204
 
11447
12205
  //#endregion
11448
12206
  //#region src/modules/phase-orchestrator/phases/analysis.ts
@@ -11468,11 +12226,120 @@ const BRIEF_FIELDS$1 = [
11468
12226
  "constraints"
11469
12227
  ];
11470
12228
  /**
12229
+ * Build step definitions for 2-step analysis decomposition.
12230
+ */
12231
+ function buildAnalysisSteps() {
12232
+ return [{
12233
+ name: "analysis-step-1-vision",
12234
+ taskType: "analysis-vision",
12235
+ outputSchema: AnalysisVisionOutputSchema,
12236
+ context: [{
12237
+ placeholder: "concept",
12238
+ source: "param:concept"
12239
+ }],
12240
+ persist: [{
12241
+ field: "problem_statement",
12242
+ category: "product-brief",
12243
+ key: "problem_statement"
12244
+ }, {
12245
+ field: "target_users",
12246
+ category: "product-brief",
12247
+ key: "target_users"
12248
+ }]
12249
+ }, {
12250
+ name: "analysis-step-2-scope",
12251
+ taskType: "analysis-scope",
12252
+ outputSchema: AnalysisScopeOutputSchema,
12253
+ context: [{
12254
+ placeholder: "concept",
12255
+ source: "param:concept"
12256
+ }, {
12257
+ placeholder: "vision_output",
12258
+ source: "step:analysis-step-1-vision"
12259
+ }],
12260
+ persist: [
12261
+ {
12262
+ field: "core_features",
12263
+ category: "product-brief",
12264
+ key: "core_features"
12265
+ },
12266
+ {
12267
+ field: "success_metrics",
12268
+ category: "product-brief",
12269
+ key: "success_metrics"
12270
+ },
12271
+ {
12272
+ field: "constraints",
12273
+ category: "product-brief",
12274
+ key: "constraints"
12275
+ }
12276
+ ],
12277
+ registerArtifact: {
12278
+ type: "product-brief",
12279
+ path: "decision-store://analysis/product-brief",
12280
+ summarize: (parsed) => {
12281
+ const features = parsed.core_features;
12282
+ return features ? `${features.length} core features defined` : "Product brief complete";
12283
+ }
12284
+ }
12285
+ }];
12286
+ }
12287
+ /**
12288
+ * Run analysis phase using multi-step decomposition (2 steps).
12289
+ */
12290
+ async function runAnalysisMultiStep(deps, params) {
12291
+ const zeroTokenUsage = {
12292
+ input: 0,
12293
+ output: 0
12294
+ };
12295
+ try {
12296
+ let effectiveConcept = params.concept;
12297
+ if (params.concept.length > MAX_CONCEPT_CHARS) effectiveConcept = params.concept.slice(0, MAX_CONCEPT_CHARS) + "...";
12298
+ const steps = buildAnalysisSteps();
12299
+ const result = await runSteps(steps, deps, params.runId, "analysis", { concept: effectiveConcept });
12300
+ if (!result.success) return {
12301
+ result: "failed",
12302
+ error: result.error ?? "multi_step_failed",
12303
+ details: result.error ?? "Multi-step analysis failed",
12304
+ tokenUsage: result.tokenUsage
12305
+ };
12306
+ const visionOutput = result.steps[0]?.parsed;
12307
+ const scopeOutput = result.steps[1]?.parsed;
12308
+ if (!visionOutput || !scopeOutput) return {
12309
+ result: "failed",
12310
+ error: "incomplete_steps",
12311
+ details: "Not all analysis steps produced output",
12312
+ tokenUsage: result.tokenUsage
12313
+ };
12314
+ const brief = {
12315
+ problem_statement: visionOutput.problem_statement,
12316
+ target_users: visionOutput.target_users,
12317
+ core_features: scopeOutput.core_features,
12318
+ success_metrics: scopeOutput.success_metrics,
12319
+ constraints: scopeOutput.constraints ?? []
12320
+ };
12321
+ const analysisResult = {
12322
+ result: "success",
12323
+ product_brief: brief,
12324
+ tokenUsage: result.tokenUsage
12325
+ };
12326
+ const artifactId = result.steps[1]?.artifactId;
12327
+ if (artifactId !== void 0) analysisResult.artifact_id = artifactId;
12328
+ return analysisResult;
12329
+ } catch (err) {
12330
+ const message = err instanceof Error ? err.message : String(err);
12331
+ return {
12332
+ result: "failed",
12333
+ error: message,
12334
+ tokenUsage: zeroTokenUsage
12335
+ };
12336
+ }
12337
+ }
12338
+ /**
11471
12339
  * Execute the analysis phase of the BMAD pipeline.
11472
12340
  *
11473
- * Retrieves the compiled analysis prompt, injects the user concept,
11474
- * dispatches to a claude-code agent, validates the output, and persists
11475
- * the product brief to the decision store.
12341
+ * If the manifest defines steps for the analysis phase, uses multi-step
12342
+ * decomposition. Otherwise falls back to the single-dispatch code path.
11476
12343
  *
11477
12344
  * @param deps - Shared phase dependencies (db, pack, contextCompiler, dispatcher)
11478
12345
  * @param params - Phase parameters (runId, concept)
@@ -11481,6 +12348,8 @@ const BRIEF_FIELDS$1 = [
11481
12348
  async function runAnalysisPhase(deps, params) {
11482
12349
  const { db, pack, dispatcher } = deps;
11483
12350
  const { runId, concept, amendmentContext } = params;
12351
+ const analysisPhase = pack.manifest.phases?.find((p) => p.name === "analysis");
12352
+ if (analysisPhase?.steps && analysisPhase.steps.length > 0 && !amendmentContext) return runAnalysisMultiStep(deps, params);
11484
12353
  const zeroTokenUsage = {
11485
12354
  input: 0,
11486
12355
  output: 0
@@ -11625,11 +12494,199 @@ function formatProductBriefFromDecisions(decisions) {
11625
12494
  return parts.join("\n\n");
11626
12495
  }
11627
12496
  /**
12497
+ * Build step definitions for 3-step planning decomposition.
12498
+ */
12499
+ function buildPlanningSteps() {
12500
+ return [
12501
+ {
12502
+ name: "planning-step-1-classification",
12503
+ taskType: "planning-classification",
12504
+ outputSchema: PlanningClassificationOutputSchema,
12505
+ context: [{
12506
+ placeholder: "product_brief",
12507
+ source: "decision:analysis.product-brief"
12508
+ }],
12509
+ persist: [
12510
+ {
12511
+ field: "project_type",
12512
+ category: "classification",
12513
+ key: "project_type"
12514
+ },
12515
+ {
12516
+ field: "vision",
12517
+ category: "classification",
12518
+ key: "vision"
12519
+ },
12520
+ {
12521
+ field: "key_goals",
12522
+ category: "classification",
12523
+ key: "key_goals"
12524
+ }
12525
+ ]
12526
+ },
12527
+ {
12528
+ name: "planning-step-2-frs",
12529
+ taskType: "planning-frs",
12530
+ outputSchema: PlanningFRsOutputSchema,
12531
+ context: [{
12532
+ placeholder: "product_brief",
12533
+ source: "decision:analysis.product-brief"
12534
+ }, {
12535
+ placeholder: "classification",
12536
+ source: "step:planning-step-1-classification"
12537
+ }],
12538
+ persist: [{
12539
+ field: "functional_requirements",
12540
+ category: "functional-requirements",
12541
+ key: "array"
12542
+ }, {
12543
+ field: "user_stories",
12544
+ category: "user-stories",
12545
+ key: "array"
12546
+ }]
12547
+ },
12548
+ {
12549
+ name: "planning-step-3-nfrs",
12550
+ taskType: "planning-nfrs",
12551
+ outputSchema: PlanningNFRsOutputSchema,
12552
+ context: [
12553
+ {
12554
+ placeholder: "product_brief",
12555
+ source: "decision:analysis.product-brief"
12556
+ },
12557
+ {
12558
+ placeholder: "classification",
12559
+ source: "step:planning-step-1-classification"
12560
+ },
12561
+ {
12562
+ placeholder: "functional_requirements",
12563
+ source: "step:planning-step-2-frs"
12564
+ }
12565
+ ],
12566
+ persist: [
12567
+ {
12568
+ field: "non_functional_requirements",
12569
+ category: "non-functional-requirements",
12570
+ key: "array"
12571
+ },
12572
+ {
12573
+ field: "tech_stack",
12574
+ category: "tech-stack",
12575
+ key: "tech_stack"
12576
+ },
12577
+ {
12578
+ field: "domain_model",
12579
+ category: "domain-model",
12580
+ key: "entities"
12581
+ },
12582
+ {
12583
+ field: "out_of_scope",
12584
+ category: "out-of-scope",
12585
+ key: "items"
12586
+ }
12587
+ ],
12588
+ registerArtifact: {
12589
+ type: "prd",
12590
+ path: "decision-store://planning/prd",
12591
+ summarize: (parsed) => {
12592
+ const nfrs = parsed.non_functional_requirements;
12593
+ return `Planning complete: ${nfrs?.length ?? 0} NFRs, tech stack defined`;
12594
+ }
12595
+ }
12596
+ }
12597
+ ];
12598
+ }
12599
+ /**
12600
+ * Run planning phase using multi-step decomposition (3 steps).
12601
+ */
12602
+ async function runPlanningMultiStep(deps, params) {
12603
+ const { db, runId } = {
12604
+ db: deps.db,
12605
+ runId: params.runId
12606
+ };
12607
+ const zeroTokenUsage = {
12608
+ input: 0,
12609
+ output: 0
12610
+ };
12611
+ try {
12612
+ const allAnalysisDecisions = getDecisionsByPhaseForRun(db, runId, "analysis");
12613
+ const productBriefDecisions = allAnalysisDecisions.filter((d) => d.category === "product-brief");
12614
+ if (productBriefDecisions.length === 0) return {
12615
+ result: "failed",
12616
+ error: "missing_product_brief",
12617
+ details: "No product brief decisions found in the analysis phase.",
12618
+ tokenUsage: zeroTokenUsage
12619
+ };
12620
+ const steps = buildPlanningSteps();
12621
+ const result = await runSteps(steps, deps, params.runId, "planning", {});
12622
+ if (!result.success) return {
12623
+ result: "failed",
12624
+ error: result.error ?? "multi_step_failed",
12625
+ details: result.error ?? "Multi-step planning failed",
12626
+ tokenUsage: result.tokenUsage
12627
+ };
12628
+ const frsOutput = result.steps[1]?.parsed;
12629
+ const nfrsOutput = result.steps[2]?.parsed;
12630
+ if (!frsOutput || !nfrsOutput) return {
12631
+ result: "failed",
12632
+ error: "incomplete_steps",
12633
+ details: "Not all planning steps produced output",
12634
+ tokenUsage: result.tokenUsage
12635
+ };
12636
+ const frs = frsOutput.functional_requirements;
12637
+ const nfrs = nfrsOutput.non_functional_requirements;
12638
+ const userStories = frsOutput.user_stories;
12639
+ if (!frs?.length) return {
12640
+ result: "failed",
12641
+ error: "missing_functional_requirements",
12642
+ details: "FRs step did not return functional_requirements",
12643
+ tokenUsage: result.tokenUsage
12644
+ };
12645
+ if (!nfrs?.length) return {
12646
+ result: "failed",
12647
+ error: "missing_non_functional_requirements",
12648
+ details: "NFRs step did not return non_functional_requirements",
12649
+ tokenUsage: result.tokenUsage
12650
+ };
12651
+ for (const fr of frs) createRequirement(db, {
12652
+ pipeline_run_id: params.runId,
12653
+ source: "planning-phase",
12654
+ type: "functional",
12655
+ description: fr.description,
12656
+ priority: fr.priority
12657
+ });
12658
+ for (const nfr of nfrs) createRequirement(db, {
12659
+ pipeline_run_id: params.runId,
12660
+ source: "planning-phase",
12661
+ type: "non_functional",
12662
+ description: nfr.description,
12663
+ priority: "should"
12664
+ });
12665
+ const requirementsCount = frs.length + nfrs.length;
12666
+ const userStoriesCount = userStories?.length ?? 0;
12667
+ const planningResult = {
12668
+ result: "success",
12669
+ requirements_count: requirementsCount,
12670
+ user_stories_count: userStoriesCount,
12671
+ tokenUsage: result.tokenUsage
12672
+ };
12673
+ const artifactId = result.steps[2]?.artifactId;
12674
+ if (artifactId !== void 0) planningResult.artifact_id = artifactId;
12675
+ return planningResult;
12676
+ } catch (err) {
12677
+ const message = err instanceof Error ? err.message : String(err);
12678
+ return {
12679
+ result: "failed",
12680
+ error: message,
12681
+ tokenUsage: zeroTokenUsage
12682
+ };
12683
+ }
12684
+ }
12685
+ /**
11628
12686
  * Execute the planning phase of the BMAD pipeline.
11629
12687
  *
11630
- * Retrieves the compiled planning prompt, injects the product brief from the
11631
- * analysis phase decision store, dispatches to a claude-code agent, validates
11632
- * the output, creates requirement records, and persists planning decisions.
12688
+ * If the manifest defines steps for the planning phase, uses multi-step
12689
+ * decomposition. Otherwise falls back to the single-dispatch code path.
11633
12690
  *
11634
12691
  * @param deps - Shared phase dependencies (db, pack, contextCompiler, dispatcher)
11635
12692
  * @param params - Phase parameters (runId)
@@ -11638,6 +12695,8 @@ function formatProductBriefFromDecisions(decisions) {
11638
12695
  async function runPlanningPhase(deps, params) {
11639
12696
  const { db, pack, dispatcher } = deps;
11640
12697
  const { runId, amendmentContext } = params;
12698
+ const planningPhase = pack.manifest.phases?.find((p) => p.name === "planning");
12699
+ if (planningPhase?.steps && planningPhase.steps.length > 0 && !amendmentContext) return runPlanningMultiStep(deps, params);
11641
12700
  const zeroTokenUsage = {
11642
12701
  input: 0,
11643
12702
  output: 0
@@ -11876,82 +12935,27 @@ var QualityGateImpl = class {
11876
12935
  function createQualityGate(config) {
11877
12936
  return new QualityGateImpl(config);
11878
12937
  }
11879
-
11880
- //#endregion
11881
- //#region src/modules/phase-orchestrator/phases/solutioning.ts
11882
- /** Base token budget for architecture generation (covers template + requirements) */
11883
- const BASE_ARCH_PROMPT_TOKENS = 3e3;
11884
- /** Base token budget for story generation (covers template + requirements + architecture) */
11885
- const BASE_STORY_PROMPT_TOKENS = 4e3;
11886
- /** Additional tokens per architecture decision injected into story generation prompt */
11887
- const TOKENS_PER_DECISION = 100;
11888
- /** Absolute maximum prompt tokens (model context safety margin) */
11889
- const ABSOLUTE_MAX_PROMPT_TOKENS = 12e3;
11890
- /** Placeholder in architecture prompt template */
11891
- const REQUIREMENTS_PLACEHOLDER = "{{requirements}}";
11892
- /** Amendment context framing block prefix */
11893
- const AMENDMENT_CONTEXT_HEADER = "\n\n--- AMENDMENT CONTEXT (Parent Run Decisions) ---\n";
11894
- /** Amendment context framing block suffix */
11895
- const AMENDMENT_CONTEXT_FOOTER = "\n--- END AMENDMENT CONTEXT ---\n";
11896
- /** Marker appended when amendment context is truncated to fit token budget */
11897
- const TRUNCATED_MARKER = "\n[TRUNCATED]";
11898
- /** Placeholders in story generation prompt template */
11899
- const STORY_REQUIREMENTS_PLACEHOLDER = "{{requirements}}";
11900
- const STORY_ARCHITECTURE_PLACEHOLDER = "{{architecture_decisions}}";
11901
- /** Gap analysis placeholder used in retry prompt */
11902
- const GAP_ANALYSIS_PLACEHOLDER = "{{gap_analysis}}";
11903
- /** Priority order for decision categories when summarizing (higher priority kept first) */
11904
- const DECISION_CATEGORY_PRIORITY = [
11905
- "data",
11906
- "auth",
11907
- "api",
11908
- "frontend",
11909
- "infra",
11910
- "observability",
11911
- "ci"
11912
- ];
11913
- /**
11914
- * Calculate the dynamic prompt token budget based on the number of decisions
11915
- * that will be injected into the prompt.
11916
- *
11917
- * @param baseBudget - Base token budget for the phase
11918
- * @param decisionCount - Number of decisions to inject
11919
- * @returns Calculated token budget, capped at ABSOLUTE_MAX_PROMPT_TOKENS
11920
- */
11921
- function calculateDynamicBudget(baseBudget, decisionCount) {
11922
- const budget = baseBudget + decisionCount * TOKENS_PER_DECISION;
11923
- return Math.min(budget, ABSOLUTE_MAX_PROMPT_TOKENS);
11924
- }
11925
- /**
11926
- * Summarize architecture decisions into compact key:value one-liners,
11927
- * dropping rationale and optionally dropping lower-priority categories
11928
- * to fit within a character budget.
11929
- *
11930
- * @param decisions - Full architecture decisions from the decision store
11931
- * @param maxChars - Maximum character budget for the summarized output
11932
- * @returns Compact summary string
11933
- */
11934
- function summarizeDecisions(decisions, maxChars) {
11935
- const sorted = [...decisions].sort((a, b) => {
11936
- const aCat = (a.category ?? "").toLowerCase();
11937
- const bCat = (b.category ?? "").toLowerCase();
11938
- const aIdx = DECISION_CATEGORY_PRIORITY.indexOf(aCat);
11939
- const bIdx = DECISION_CATEGORY_PRIORITY.indexOf(bCat);
11940
- const aPri = aIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : aIdx;
11941
- const bPri = bIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : bIdx;
11942
- return aPri - bPri;
11943
- });
11944
- const lines = ["## Architecture Decisions (Summarized)"];
11945
- let currentLength = lines[0].length;
11946
- for (const d of sorted) {
11947
- const truncatedValue = d.value.length > 120 ? d.value.slice(0, 117) + "..." : d.value;
11948
- const line = `- ${d.key}: ${truncatedValue}`;
11949
- if (currentLength + line.length + 1 > maxChars) break;
11950
- lines.push(line);
11951
- currentLength += line.length + 1;
11952
- }
11953
- return lines.join("\n");
11954
- }
12938
+
12939
+ //#endregion
12940
+ //#region src/modules/phase-orchestrator/phases/solutioning.ts
12941
+ const logger$4 = createLogger("solutioning");
12942
+ /** Base token budget for architecture generation (covers template + requirements) */
12943
+ const BASE_ARCH_PROMPT_TOKENS = 3e3;
12944
+ /** Base token budget for story generation (covers template + requirements + architecture) */
12945
+ const BASE_STORY_PROMPT_TOKENS = 4e3;
12946
+ /** Placeholder in architecture prompt template */
12947
+ const REQUIREMENTS_PLACEHOLDER = "{{requirements}}";
12948
+ /** Amendment context framing block prefix */
12949
+ const AMENDMENT_CONTEXT_HEADER = "\n\n--- AMENDMENT CONTEXT (Parent Run Decisions) ---\n";
12950
+ /** Amendment context framing block suffix */
12951
+ const AMENDMENT_CONTEXT_FOOTER = "\n--- END AMENDMENT CONTEXT ---\n";
12952
+ /** Marker appended when amendment context is truncated to fit token budget */
12953
+ const TRUNCATED_MARKER = "\n[TRUNCATED]";
12954
+ /** Placeholders in story generation prompt template */
12955
+ const STORY_REQUIREMENTS_PLACEHOLDER = "{{requirements}}";
12956
+ const STORY_ARCHITECTURE_PLACEHOLDER = "{{architecture_decisions}}";
12957
+ /** Gap analysis placeholder used in retry prompt */
12958
+ const GAP_ANALYSIS_PLACEHOLDER = "{{gap_analysis}}";
11955
12959
  /**
11956
12960
  * Format functional and non-functional requirements from the planning phase
11957
12961
  * into a compact text block suitable for prompt injection.
@@ -12293,6 +13297,211 @@ async function runReadinessCheck(deps, runId) {
12293
13297
  };
12294
13298
  }
12295
13299
  /**
13300
+ * Build step definitions for 3-step architecture decomposition.
13301
+ */
13302
+ function buildArchitectureSteps() {
13303
+ return [
13304
+ {
13305
+ name: "architecture-step-1-context",
13306
+ taskType: "arch-context",
13307
+ outputSchema: ArchContextOutputSchema,
13308
+ context: [{
13309
+ placeholder: "requirements",
13310
+ source: "decision:planning.functional-requirements"
13311
+ }, {
13312
+ placeholder: "nfr",
13313
+ source: "decision:planning.non-functional-requirements"
13314
+ }],
13315
+ persist: [{
13316
+ field: "architecture_decisions",
13317
+ category: "architecture",
13318
+ key: "array"
13319
+ }]
13320
+ },
13321
+ {
13322
+ name: "architecture-step-2-decisions",
13323
+ taskType: "arch-decisions",
13324
+ outputSchema: ArchContextOutputSchema,
13325
+ context: [{
13326
+ placeholder: "requirements",
13327
+ source: "decision:planning.functional-requirements"
13328
+ }, {
13329
+ placeholder: "starter_decisions",
13330
+ source: "step:architecture-step-1-context"
13331
+ }],
13332
+ persist: [{
13333
+ field: "architecture_decisions",
13334
+ category: "architecture",
13335
+ key: "array"
13336
+ }]
13337
+ },
13338
+ {
13339
+ name: "architecture-step-3-patterns",
13340
+ taskType: "arch-patterns",
13341
+ outputSchema: ArchContextOutputSchema,
13342
+ context: [{
13343
+ placeholder: "architecture_decisions",
13344
+ source: "decision:solutioning.architecture"
13345
+ }],
13346
+ persist: [{
13347
+ field: "architecture_decisions",
13348
+ category: "architecture",
13349
+ key: "array"
13350
+ }],
13351
+ registerArtifact: {
13352
+ type: "architecture",
13353
+ path: "decision-store://solutioning/architecture",
13354
+ summarize: (parsed) => {
13355
+ const decisions = parsed.architecture_decisions;
13356
+ return `${decisions?.length ?? 0} pattern decisions (multi-step)`;
13357
+ }
13358
+ }
13359
+ }
13360
+ ];
13361
+ }
13362
+ /**
13363
+ * Run architecture generation using multi-step decomposition (3 steps).
13364
+ */
13365
+ async function runArchitectureGenerationMultiStep(deps, params) {
13366
+ const steps = buildArchitectureSteps();
13367
+ const result = await runSteps(steps, deps, params.runId, "solutioning", {});
13368
+ if (!result.success) return {
13369
+ error: result.error ?? "multi_step_arch_failed",
13370
+ tokenUsage: result.tokenUsage
13371
+ };
13372
+ const allDecisions = getDecisionsByPhaseForRun(deps.db, params.runId, "solutioning").filter((d) => d.category === "architecture");
13373
+ const decisions = allDecisions.map((d) => {
13374
+ try {
13375
+ const parsed = JSON.parse(d.value);
13376
+ return {
13377
+ category: parsed.category ?? d.category,
13378
+ key: parsed.key ?? d.key,
13379
+ value: parsed.value ?? d.value,
13380
+ rationale: parsed.rationale ?? d.rationale ?? ""
13381
+ };
13382
+ } catch {
13383
+ return {
13384
+ category: d.category,
13385
+ key: d.key,
13386
+ value: d.value,
13387
+ rationale: d.rationale ?? ""
13388
+ };
13389
+ }
13390
+ });
13391
+ const artifactId = result.steps[result.steps.length - 1]?.artifactId ?? "";
13392
+ return {
13393
+ decisions,
13394
+ artifactId,
13395
+ tokenUsage: result.tokenUsage
13396
+ };
13397
+ }
13398
+ /**
13399
+ * Build step definitions for 2-step story decomposition.
13400
+ */
13401
+ function buildStorySteps() {
13402
+ return [{
13403
+ name: "stories-step-1-epics",
13404
+ taskType: "story-epics",
13405
+ outputSchema: EpicDesignOutputSchema,
13406
+ context: [{
13407
+ placeholder: "requirements",
13408
+ source: "decision:planning.functional-requirements"
13409
+ }, {
13410
+ placeholder: "architecture_decisions",
13411
+ source: "decision:solutioning.architecture"
13412
+ }],
13413
+ persist: [{
13414
+ field: "epics",
13415
+ category: "epic-design",
13416
+ key: "array"
13417
+ }]
13418
+ }, {
13419
+ name: "stories-step-2-stories",
13420
+ taskType: "story-stories",
13421
+ outputSchema: StoryGenerationOutputSchema,
13422
+ context: [
13423
+ {
13424
+ placeholder: "epic_structure",
13425
+ source: "step:stories-step-1-epics"
13426
+ },
13427
+ {
13428
+ placeholder: "requirements",
13429
+ source: "decision:planning.functional-requirements"
13430
+ },
13431
+ {
13432
+ placeholder: "architecture_decisions",
13433
+ source: "decision:solutioning.architecture"
13434
+ }
13435
+ ],
13436
+ persist: [],
13437
+ registerArtifact: {
13438
+ type: "stories",
13439
+ path: "decision-store://solutioning/stories",
13440
+ summarize: (parsed) => {
13441
+ const epics = parsed.epics;
13442
+ const totalStories = epics?.reduce((sum, e) => sum + (e.stories?.length ?? 0), 0) ?? 0;
13443
+ return `${epics?.length ?? 0} epics, ${totalStories} stories (multi-step)`;
13444
+ }
13445
+ }
13446
+ }];
13447
+ }
13448
+ /**
13449
+ * Run story generation using multi-step decomposition (2 steps).
13450
+ */
13451
+ async function runStoryGenerationMultiStep(deps, params) {
13452
+ const steps = buildStorySteps();
13453
+ const result = await runSteps(steps, deps, params.runId, "solutioning", {});
13454
+ if (!result.success) return {
13455
+ error: result.error ?? "multi_step_story_failed",
13456
+ tokenUsage: result.tokenUsage
13457
+ };
13458
+ const storyStep = result.steps.find((s) => s.name === "stories-step-2-stories");
13459
+ const storyOutput = storyStep?.parsed;
13460
+ if (!storyOutput || !storyOutput.epics) return {
13461
+ error: "Story generation step produced no epics",
13462
+ tokenUsage: result.tokenUsage
13463
+ };
13464
+ const epics = storyOutput.epics;
13465
+ for (const [epicIndex, epic] of epics.entries()) {
13466
+ upsertDecision(deps.db, {
13467
+ pipeline_run_id: params.runId,
13468
+ phase: "solutioning",
13469
+ category: "epics",
13470
+ key: `epic-${epicIndex + 1}`,
13471
+ value: JSON.stringify({
13472
+ title: epic.title,
13473
+ description: epic.description
13474
+ })
13475
+ });
13476
+ for (const story of epic.stories) upsertDecision(deps.db, {
13477
+ pipeline_run_id: params.runId,
13478
+ phase: "solutioning",
13479
+ category: "stories",
13480
+ key: story.key,
13481
+ value: JSON.stringify({
13482
+ key: story.key,
13483
+ title: story.title,
13484
+ description: story.description,
13485
+ ac: story.acceptance_criteria,
13486
+ priority: story.priority
13487
+ })
13488
+ });
13489
+ }
13490
+ for (const epic of epics) for (const story of epic.stories) createRequirement(deps.db, {
13491
+ pipeline_run_id: params.runId,
13492
+ source: "solutioning-phase",
13493
+ type: "functional",
13494
+ description: `${story.title}: ${story.description}`,
13495
+ priority: story.priority
13496
+ });
13497
+ const artifactId = storyStep?.artifactId ?? "";
13498
+ return {
13499
+ epics,
13500
+ artifactId,
13501
+ tokenUsage: result.tokenUsage
13502
+ };
13503
+ }
13504
+ /**
12296
13505
  * Execute the solutioning phase of the BMAD pipeline.
12297
13506
  *
12298
13507
  * Orchestrates the two-phase dispatch strategy:
@@ -12314,12 +13523,20 @@ async function runSolutioningPhase(deps, params) {
12314
13523
  let totalInput = 0;
12315
13524
  let totalOutput = 0;
12316
13525
  try {
13526
+ const solutioningPhase = deps.pack.manifest.phases?.find((p) => p.name === "solutioning");
13527
+ const hasSteps = solutioningPhase?.steps && solutioningPhase.steps.length > 0 && !params.amendmentContext;
12317
13528
  const existingArchArtifact = getArtifactByTypeForRun(deps.db, params.runId, "solutioning", "architecture");
12318
13529
  let archResult;
12319
13530
  if (existingArchArtifact) {
12320
13531
  const existingDecisions = getDecisionsByPhaseForRun(deps.db, params.runId, "solutioning").filter((d) => d.category === "architecture");
13532
+ logger$4.info({
13533
+ runId: params.runId,
13534
+ artifactId: existingArchArtifact.id,
13535
+ decisionCount: existingDecisions.length
13536
+ }, "Architecture artifact already exists — skipping architecture sub-phase, transitioning to story generation");
12321
13537
  archResult = {
12322
13538
  decisions: existingDecisions.map((d) => ({
13539
+ category: d.category,
12323
13540
  key: d.key,
12324
13541
  value: d.value,
12325
13542
  rationale: d.rationale ?? ""
@@ -12330,7 +13547,8 @@ async function runSolutioningPhase(deps, params) {
12330
13547
  output: 0
12331
13548
  }
12332
13549
  };
12333
- } else archResult = await runArchitectureGeneration(deps, params);
13550
+ } else if (hasSteps) archResult = await runArchitectureGenerationMultiStep(deps, params);
13551
+ else archResult = await runArchitectureGeneration(deps, params);
12334
13552
  totalInput += archResult.tokenUsage.input;
12335
13553
  totalOutput += archResult.tokenUsage.output;
12336
13554
  if ("error" in archResult) return {
@@ -12342,7 +13560,12 @@ async function runSolutioningPhase(deps, params) {
12342
13560
  output: totalOutput
12343
13561
  }
12344
13562
  };
12345
- const storyResult = await runStoryGeneration(deps, params);
13563
+ logger$4.info({
13564
+ runId: params.runId,
13565
+ decisionCount: archResult.decisions.length,
13566
+ mode: hasSteps ? "multi-step" : "single-dispatch"
13567
+ }, "Architecture sub-phase complete — transitioning to story generation");
13568
+ const storyResult = hasSteps ? await runStoryGenerationMultiStep(deps, params) : await runStoryGeneration(deps, params);
12346
13569
  totalInput += storyResult.tokenUsage.input;
12347
13570
  totalOutput += storyResult.tokenUsage.output;
12348
13571
  if ("error" in storyResult) return {
@@ -13027,8 +14250,8 @@ const PACKAGE_ROOT = join(__dirname, "..", "..", "..");
13027
14250
  */
13028
14251
  function resolveBmadMethodSrcPath(fromDir = __dirname) {
13029
14252
  try {
13030
- const require = createRequire(join(fromDir, "synthetic.js"));
13031
- const pkgJsonPath = require.resolve("bmad-method/package.json");
14253
+ const require$1 = createRequire$1(join(fromDir, "synthetic.js"));
14254
+ const pkgJsonPath = require$1.resolve("bmad-method/package.json");
13032
14255
  return join(dirname(pkgJsonPath), "src");
13033
14256
  } catch {
13034
14257
  return null;
@@ -13040,9 +14263,9 @@ function resolveBmadMethodSrcPath(fromDir = __dirname) {
13040
14263
  */
13041
14264
  function resolveBmadMethodVersion(fromDir = __dirname) {
13042
14265
  try {
13043
- const require = createRequire(join(fromDir, "synthetic.js"));
13044
- const pkgJsonPath = require.resolve("bmad-method/package.json");
13045
- const pkg = require(pkgJsonPath);
14266
+ const require$1 = createRequire$1(join(fromDir, "synthetic.js"));
14267
+ const pkgJsonPath = require$1.resolve("bmad-method/package.json");
14268
+ const pkg = require$1(pkgJsonPath);
13046
14269
  return pkg.version ?? "unknown";
13047
14270
  } catch {
13048
14271
  return "unknown";
@@ -13163,7 +14386,9 @@ function buildPipelineStatusOutput(run, tokenSummary, decisionsCount, storiesCou
13163
14386
  cost_usd: totalCost
13164
14387
  },
13165
14388
  decisions_count: decisionsCount,
13166
- stories_count: storiesCount
14389
+ stories_count: storiesCount,
14390
+ last_activity: run.updated_at,
14391
+ staleness_seconds: Math.round((Date.now() - new Date(run.updated_at).getTime()) / 1e3)
13167
14392
  };
13168
14393
  }
13169
14394
  /**
@@ -13799,6 +15024,26 @@ async function runAutoRun(options) {
13799
15024
  msg: payload.msg
13800
15025
  });
13801
15026
  });
15027
+ eventBus.on("orchestrator:heartbeat", (payload) => {
15028
+ ndjsonEmitter.emit({
15029
+ type: "pipeline:heartbeat",
15030
+ ts: new Date().toISOString(),
15031
+ run_id: payload.runId,
15032
+ active_dispatches: payload.activeDispatches,
15033
+ completed_dispatches: payload.completedDispatches,
15034
+ queued_dispatches: payload.queuedDispatches
15035
+ });
15036
+ });
15037
+ eventBus.on("orchestrator:stall", (payload) => {
15038
+ ndjsonEmitter.emit({
15039
+ type: "story:stall",
15040
+ ts: new Date().toISOString(),
15041
+ run_id: payload.runId,
15042
+ story_key: payload.storyKey,
15043
+ phase: payload.phase,
15044
+ elapsed_ms: payload.elapsedMs
15045
+ });
15046
+ });
13802
15047
  }
13803
15048
  const orchestrator = createImplementationOrchestrator({
13804
15049
  db,
@@ -14501,6 +15746,397 @@ async function runAutoStatus(options) {
14501
15746
  } catch {}
14502
15747
  }
14503
15748
  }
15749
+ function inspectProcessTree() {
15750
+ const result = {
15751
+ orchestrator_pid: null,
15752
+ child_pids: [],
15753
+ zombies: []
15754
+ };
15755
+ try {
15756
+ const { execFileSync } = __require("node:child_process");
15757
+ const psOutput = execFileSync("ps", ["-eo", "pid,ppid,stat,command"], {
15758
+ encoding: "utf-8",
15759
+ timeout: 5e3
15760
+ });
15761
+ const lines = psOutput.split("\n");
15762
+ for (const line of lines) if (line.includes("substrate auto run") && !line.includes("grep")) {
15763
+ const match = line.trim().match(/^(\d+)/);
15764
+ if (match) {
15765
+ result.orchestrator_pid = parseInt(match[1], 10);
15766
+ break;
15767
+ }
15768
+ }
15769
+ if (result.orchestrator_pid !== null) for (const line of lines) {
15770
+ const parts = line.trim().split(/\s+/);
15771
+ if (parts.length >= 3) {
15772
+ const pid = parseInt(parts[0], 10);
15773
+ const ppid = parseInt(parts[1], 10);
15774
+ const stat$2 = parts[2];
15775
+ if (ppid === result.orchestrator_pid && pid !== result.orchestrator_pid) {
15776
+ result.child_pids.push(pid);
15777
+ if (stat$2.includes("Z")) result.zombies.push(pid);
15778
+ }
15779
+ }
15780
+ }
15781
+ } catch {}
15782
+ return result;
15783
+ }
15784
+ async function runAutoHealth(options) {
15785
+ const { outputFormat, runId, projectRoot } = options;
15786
+ const dbRoot = await resolveMainRepoRoot(projectRoot);
15787
+ const dbPath = join(dbRoot, ".substrate", "substrate.db");
15788
+ if (!existsSync(dbPath)) {
15789
+ const output = {
15790
+ verdict: "NO_PIPELINE_RUNNING",
15791
+ run_id: null,
15792
+ status: null,
15793
+ current_phase: null,
15794
+ staleness_seconds: 0,
15795
+ last_activity: "",
15796
+ process: {
15797
+ orchestrator_pid: null,
15798
+ child_pids: [],
15799
+ zombies: []
15800
+ },
15801
+ stories: {
15802
+ active: 0,
15803
+ completed: 0,
15804
+ escalated: 0,
15805
+ details: {}
15806
+ }
15807
+ };
15808
+ if (outputFormat === "json") process.stdout.write(formatOutput(output, "json", true) + "\n");
15809
+ else process.stdout.write("NO_PIPELINE_RUNNING — no substrate database found\n");
15810
+ return 0;
15811
+ }
15812
+ const dbWrapper = new DatabaseWrapper(dbPath);
15813
+ try {
15814
+ dbWrapper.open();
15815
+ const db = dbWrapper.db;
15816
+ let run;
15817
+ if (runId !== void 0) run = getPipelineRunById(db, runId);
15818
+ else run = getLatestRun(db);
15819
+ if (run === void 0) {
15820
+ const output$1 = {
15821
+ verdict: "NO_PIPELINE_RUNNING",
15822
+ run_id: null,
15823
+ status: null,
15824
+ current_phase: null,
15825
+ staleness_seconds: 0,
15826
+ last_activity: "",
15827
+ process: {
15828
+ orchestrator_pid: null,
15829
+ child_pids: [],
15830
+ zombies: []
15831
+ },
15832
+ stories: {
15833
+ active: 0,
15834
+ completed: 0,
15835
+ escalated: 0,
15836
+ details: {}
15837
+ }
15838
+ };
15839
+ if (outputFormat === "json") process.stdout.write(formatOutput(output$1, "json", true) + "\n");
15840
+ else process.stdout.write("NO_PIPELINE_RUNNING — no pipeline runs found\n");
15841
+ return 0;
15842
+ }
15843
+ const updatedAt = new Date(run.updated_at);
15844
+ const stalenessSeconds = Math.round((Date.now() - updatedAt.getTime()) / 1e3);
15845
+ let storyDetails = {};
15846
+ let active = 0;
15847
+ let completed = 0;
15848
+ let escalated = 0;
15849
+ try {
15850
+ if (run.token_usage_json) {
15851
+ const state = JSON.parse(run.token_usage_json);
15852
+ if (state.stories) for (const [key, s] of Object.entries(state.stories)) {
15853
+ storyDetails[key] = {
15854
+ phase: s.phase,
15855
+ review_cycles: s.reviewCycles
15856
+ };
15857
+ if (s.phase === "COMPLETE") completed++;
15858
+ else if (s.phase === "ESCALATED") escalated++;
15859
+ else if (s.phase !== "PENDING") active++;
15860
+ }
15861
+ }
15862
+ } catch {}
15863
+ const processInfo = inspectProcessTree();
15864
+ let verdict = "NO_PIPELINE_RUNNING";
15865
+ if (run.status === "running") if (processInfo.zombies.length > 0) verdict = "STALLED";
15866
+ else if (stalenessSeconds > 600) verdict = "STALLED";
15867
+ else if (processInfo.orchestrator_pid !== null && processInfo.child_pids.length === 0 && active > 0) verdict = "STALLED";
15868
+ else verdict = "HEALTHY";
15869
+ else if (run.status === "completed" || run.status === "failed" || run.status === "stopped") verdict = "NO_PIPELINE_RUNNING";
15870
+ const output = {
15871
+ verdict,
15872
+ run_id: run.id,
15873
+ status: run.status,
15874
+ current_phase: run.current_phase,
15875
+ staleness_seconds: stalenessSeconds,
15876
+ last_activity: run.updated_at,
15877
+ process: processInfo,
15878
+ stories: {
15879
+ active,
15880
+ completed,
15881
+ escalated,
15882
+ details: storyDetails
15883
+ }
15884
+ };
15885
+ if (outputFormat === "json") process.stdout.write(formatOutput(output, "json", true) + "\n");
15886
+ else {
15887
+ const verdictLabel = verdict === "HEALTHY" ? "HEALTHY" : verdict === "STALLED" ? "STALLED" : "NO PIPELINE RUNNING";
15888
+ process.stdout.write(`\nPipeline Health: ${verdictLabel}\n`);
15889
+ process.stdout.write(` Run: ${run.id}\n`);
15890
+ process.stdout.write(` Status: ${run.status}\n`);
15891
+ process.stdout.write(` Phase: ${run.current_phase ?? "N/A"}\n`);
15892
+ process.stdout.write(` Last Active: ${run.updated_at} (${stalenessSeconds}s ago)\n`);
15893
+ if (processInfo.orchestrator_pid !== null) {
15894
+ process.stdout.write(` Orchestrator: PID ${processInfo.orchestrator_pid}\n`);
15895
+ process.stdout.write(` Children: ${processInfo.child_pids.length} active`);
15896
+ if (processInfo.zombies.length > 0) process.stdout.write(` (${processInfo.zombies.length} ZOMBIE)`);
15897
+ process.stdout.write("\n");
15898
+ } else process.stdout.write(" Orchestrator: not running\n");
15899
+ if (Object.keys(storyDetails).length > 0) {
15900
+ process.stdout.write("\n Stories:\n");
15901
+ for (const [key, s] of Object.entries(storyDetails)) process.stdout.write(` ${key}: ${s.phase} (${s.review_cycles} review cycles)\n`);
15902
+ process.stdout.write(`\n Summary: ${active} active, ${completed} completed, ${escalated} escalated\n`);
15903
+ }
15904
+ }
15905
+ return 0;
15906
+ } catch (err) {
15907
+ const msg = err instanceof Error ? err.message : String(err);
15908
+ if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
15909
+ else process.stderr.write(`Error: ${msg}\n`);
15910
+ logger$3.error({ err }, "auto health failed");
15911
+ return 1;
15912
+ } finally {
15913
+ try {
15914
+ dbWrapper.close();
15915
+ } catch {}
15916
+ }
15917
+ }
15918
+ /**
15919
+ * Fetch pipeline health data as a structured object without any stdout side-effects.
15920
+ * Used by runAutoSupervisor to poll health without formatting overhead.
15921
+ *
15922
+ * Returns a NO_PIPELINE_RUNNING health object for all graceful "no data" cases
15923
+ * (missing DB, missing run, terminal run status). Throws only on unexpected errors.
15924
+ */
15925
+ async function getAutoHealthData(options) {
15926
+ const { runId, projectRoot } = options;
15927
+ const dbRoot = await resolveMainRepoRoot(projectRoot);
15928
+ const dbPath = join(dbRoot, ".substrate", "substrate.db");
15929
+ const NO_PIPELINE = {
15930
+ verdict: "NO_PIPELINE_RUNNING",
15931
+ run_id: null,
15932
+ status: null,
15933
+ current_phase: null,
15934
+ staleness_seconds: 0,
15935
+ last_activity: "",
15936
+ process: {
15937
+ orchestrator_pid: null,
15938
+ child_pids: [],
15939
+ zombies: []
15940
+ },
15941
+ stories: {
15942
+ active: 0,
15943
+ completed: 0,
15944
+ escalated: 0,
15945
+ details: {}
15946
+ }
15947
+ };
15948
+ if (!existsSync(dbPath)) return NO_PIPELINE;
15949
+ const dbWrapper = new DatabaseWrapper(dbPath);
15950
+ try {
15951
+ dbWrapper.open();
15952
+ const db = dbWrapper.db;
15953
+ let run;
15954
+ if (runId !== void 0) run = getPipelineRunById(db, runId);
15955
+ else run = getLatestRun(db);
15956
+ if (run === void 0) return NO_PIPELINE;
15957
+ const updatedAt = new Date(run.updated_at);
15958
+ const stalenessSeconds = Math.round((Date.now() - updatedAt.getTime()) / 1e3);
15959
+ let storyDetails = {};
15960
+ let active = 0;
15961
+ let completed = 0;
15962
+ let escalated = 0;
15963
+ try {
15964
+ if (run.token_usage_json) {
15965
+ const state = JSON.parse(run.token_usage_json);
15966
+ if (state.stories) for (const [key, s] of Object.entries(state.stories)) {
15967
+ storyDetails[key] = {
15968
+ phase: s.phase,
15969
+ review_cycles: s.reviewCycles
15970
+ };
15971
+ if (s.phase === "COMPLETE") completed++;
15972
+ else if (s.phase === "ESCALATED") escalated++;
15973
+ else if (s.phase !== "PENDING") active++;
15974
+ }
15975
+ }
15976
+ } catch {}
15977
+ const processInfo = inspectProcessTree();
15978
+ let verdict = "NO_PIPELINE_RUNNING";
15979
+ if (run.status === "running") if (processInfo.zombies.length > 0) verdict = "STALLED";
15980
+ else if (stalenessSeconds > 600) verdict = "STALLED";
15981
+ else if (processInfo.orchestrator_pid !== null && processInfo.child_pids.length === 0 && active > 0) verdict = "STALLED";
15982
+ else verdict = "HEALTHY";
15983
+ else if (run.status === "completed" || run.status === "failed" || run.status === "stopped") verdict = "NO_PIPELINE_RUNNING";
15984
+ return {
15985
+ verdict,
15986
+ run_id: run.id,
15987
+ status: run.status,
15988
+ current_phase: run.current_phase,
15989
+ staleness_seconds: stalenessSeconds,
15990
+ last_activity: run.updated_at,
15991
+ process: processInfo,
15992
+ stories: {
15993
+ active,
15994
+ completed,
15995
+ escalated,
15996
+ details: storyDetails
15997
+ }
15998
+ };
15999
+ } finally {
16000
+ try {
16001
+ dbWrapper.close();
16002
+ } catch {}
16003
+ }
16004
+ }
16005
+ function defaultSupervisorDeps() {
16006
+ return {
16007
+ getHealth: getAutoHealthData,
16008
+ killPid: (pid, signal) => {
16009
+ process.kill(pid, signal);
16010
+ },
16011
+ resumePipeline: runAutoResume,
16012
+ sleep: (ms) => new Promise((resolve$2) => setTimeout(resolve$2, ms))
16013
+ };
16014
+ }
16015
+ /**
16016
+ * Run the pipeline supervisor — a long-running watchdog that polls pipeline health
16017
+ * and automatically kills and restarts stalled pipelines.
16018
+ *
16019
+ * State machine: POLLING → (stall detected) → KILLING → RESTARTING → POLLING
16020
+ *
16021
+ * Exit codes:
16022
+ * 0 — pipeline reached terminal state with no failures
16023
+ * 1 — pipeline completed with failures or escalations
16024
+ * 2 — max restarts exceeded (safety valve triggered)
16025
+ */
16026
+ async function runAutoSupervisor(options, deps = {}) {
16027
+ const { pollInterval, stallThreshold, maxRestarts, outputFormat, projectRoot, runId, pack } = options;
16028
+ const { getHealth, killPid, resumePipeline, sleep } = {
16029
+ ...defaultSupervisorDeps(),
16030
+ ...deps
16031
+ };
16032
+ let restartCount = 0;
16033
+ const startTime = Date.now();
16034
+ function emitEvent$1(event) {
16035
+ if (outputFormat === "json") {
16036
+ const stamped = {
16037
+ ...event,
16038
+ ts: new Date().toISOString()
16039
+ };
16040
+ process.stdout.write(JSON.stringify(stamped) + "\n");
16041
+ }
16042
+ }
16043
+ function log(message) {
16044
+ if (outputFormat === "human") process.stdout.write(message + "\n");
16045
+ }
16046
+ while (true) {
16047
+ const health = await getHealth({
16048
+ runId,
16049
+ projectRoot
16050
+ });
16051
+ const ts = new Date().toISOString();
16052
+ log(`[${ts}] Health: ${health.verdict} | staleness=${health.staleness_seconds}s | stories: active=${health.stories.active} completed=${health.stories.completed} escalated=${health.stories.escalated}`);
16053
+ if (health.verdict === "NO_PIPELINE_RUNNING") {
16054
+ const elapsedSeconds = Math.round((Date.now() - startTime) / 1e3);
16055
+ const succeeded = Object.entries(health.stories.details).filter(([, s]) => s.phase === "COMPLETE").map(([k]) => k);
16056
+ const failed = Object.entries(health.stories.details).filter(([, s]) => s.phase !== "COMPLETE" && s.phase !== "PENDING").map(([k]) => k);
16057
+ const escalated = Object.entries(health.stories.details).filter(([, s]) => s.phase === "ESCALATED").map(([k]) => k);
16058
+ emitEvent$1({
16059
+ type: "supervisor:summary",
16060
+ run_id: health.run_id,
16061
+ elapsed_seconds: elapsedSeconds,
16062
+ succeeded,
16063
+ failed,
16064
+ escalated,
16065
+ restarts: restartCount
16066
+ });
16067
+ log(`\nPipeline reached terminal state. Elapsed: ${elapsedSeconds}s | succeeded: ${succeeded.length} | failed: ${failed.length} | restarts: ${restartCount}`);
16068
+ return failed.length > 0 ? 1 : 0;
16069
+ }
16070
+ if (health.staleness_seconds >= stallThreshold) {
16071
+ const pids = [...health.process.orchestrator_pid !== null ? [health.process.orchestrator_pid] : [], ...health.process.child_pids];
16072
+ emitEvent$1({
16073
+ type: "supervisor:kill",
16074
+ run_id: health.run_id,
16075
+ reason: "stall",
16076
+ staleness_seconds: health.staleness_seconds,
16077
+ pids
16078
+ });
16079
+ log(`Supervisor: Stall confirmed (${health.staleness_seconds}s ≥ ${stallThreshold}s threshold). Killing PIDs: ${pids.join(", ") || "none"}`);
16080
+ for (const pid of pids) try {
16081
+ killPid(pid, "SIGTERM");
16082
+ } catch {}
16083
+ await sleep(5e3);
16084
+ for (const pid of pids) try {
16085
+ killPid(pid, "SIGKILL");
16086
+ } catch {}
16087
+ if (pids.length > 0) {
16088
+ let allDead = false;
16089
+ for (let attempt = 0; attempt < 5; attempt++) {
16090
+ await sleep(1e3);
16091
+ allDead = pids.every((pid) => {
16092
+ try {
16093
+ process.kill(pid, 0);
16094
+ return false;
16095
+ } catch {
16096
+ return true;
16097
+ }
16098
+ });
16099
+ if (allDead) break;
16100
+ }
16101
+ if (!allDead) log(`Supervisor: Warning: Some PIDs may still be alive after SIGKILL`);
16102
+ }
16103
+ if (restartCount >= maxRestarts) {
16104
+ emitEvent$1({
16105
+ type: "supervisor:abort",
16106
+ run_id: health.run_id,
16107
+ reason: "max_restarts_exceeded",
16108
+ attempts: restartCount
16109
+ });
16110
+ log(`Supervisor: Max restarts (${maxRestarts}) exceeded. Aborting.`);
16111
+ return 2;
16112
+ }
16113
+ restartCount++;
16114
+ emitEvent$1({
16115
+ type: "supervisor:restart",
16116
+ run_id: health.run_id,
16117
+ attempt: restartCount
16118
+ });
16119
+ log(`Supervisor: Restarting pipeline (attempt ${restartCount}/${maxRestarts})`);
16120
+ resumePipeline({
16121
+ runId: health.run_id ?? void 0,
16122
+ outputFormat,
16123
+ projectRoot,
16124
+ concurrency: 3,
16125
+ pack
16126
+ }).catch((err) => {
16127
+ const message = err instanceof Error ? err.message : String(err);
16128
+ log(`Supervisor: Resume error: ${message}`);
16129
+ if (outputFormat === "json") process.stderr.write(JSON.stringify({
16130
+ type: "supervisor:error",
16131
+ reason: "resume_failed",
16132
+ message,
16133
+ ts: new Date().toISOString()
16134
+ }) + "\n");
16135
+ });
16136
+ }
16137
+ await sleep(pollInterval * 1e3);
16138
+ }
16139
+ }
14504
16140
  /**
14505
16141
  * Detect and apply supersessions after a phase completes in an amendment run.
14506
16142
  *
@@ -14869,6 +16505,28 @@ function registerAutoCommand(program, _version = "0.0.0", projectRoot = process.
14869
16505
  });
14870
16506
  process.exitCode = exitCode;
14871
16507
  });
16508
+ auto.command("health").description("Check pipeline health: process status, stall detection, and verdict").option("--run-id <id>", "Pipeline run ID to query (defaults to latest)").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
16509
+ const outputFormat = opts.outputFormat === "json" ? "json" : "human";
16510
+ const exitCode = await runAutoHealth({
16511
+ outputFormat,
16512
+ runId: opts.runId,
16513
+ projectRoot: opts.projectRoot
16514
+ });
16515
+ process.exitCode = exitCode;
16516
+ });
16517
+ auto.command("supervisor").description("Monitor a pipeline run and automatically recover from stalls").option("--poll-interval <seconds>", "Health poll interval in seconds", (v) => parseInt(v, 10), 60).option("--stall-threshold <seconds>", "Staleness in seconds before killing a stalled pipeline", (v) => parseInt(v, 10), 600).option("--max-restarts <n>", "Maximum automatic restarts before aborting", (v) => parseInt(v, 10), 3).option("--run-id <id>", "Pipeline run ID to monitor (defaults to latest)").option("--pack <name>", "Methodology pack name", "bmad").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
16518
+ const outputFormat = opts.outputFormat === "json" ? "json" : "human";
16519
+ const exitCode = await runAutoSupervisor({
16520
+ pollInterval: opts.pollInterval,
16521
+ stallThreshold: opts.stallThreshold,
16522
+ maxRestarts: opts.maxRestarts,
16523
+ runId: opts.runId,
16524
+ pack: opts.pack,
16525
+ outputFormat,
16526
+ projectRoot: opts.projectRoot
16527
+ });
16528
+ process.exitCode = exitCode;
16529
+ });
14872
16530
  auto.command("amend").description("Run an amendment pipeline against a completed run and an existing run").option("--concept <text>", "Amendment concept description (inline)").option("--concept-file <path>", "Path to concept file").option("--run-id <id>", "Parent run ID (defaults to latest completed run)").option("--stop-after <phase>", "Stop pipeline after this phase completes").option("--from <phase>", "Start pipeline from this phase").option("--pack <name>", "Methodology pack name", "bmad").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
14873
16531
  const exitCode = await runAmendCommand({
14874
16532
  concept: opts.concept,