substrate-ai 0.1.21 → 0.1.22

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js CHANGED
@@ -389,7 +389,7 @@ function listTemplates() {
389
389
 
390
390
  //#endregion
391
391
  //#region src/cli/commands/init.ts
392
- const logger$30 = createLogger("init");
392
+ const logger$32 = createLogger("init");
393
393
  /**
394
394
  * Detect whether the CLI was invoked via `npx substrate`.
395
395
  * When true, prefix suggested commands with `npx `.
@@ -573,7 +573,7 @@ async function runInit(options = {}) {
573
573
  discoveryReport = await registry.discoverAndRegister();
574
574
  } catch (err) {
575
575
  const message = err instanceof Error ? err.message : String(err);
576
- logger$30.error({ err }, "Adapter discovery failed");
576
+ logger$32.error({ err }, "Adapter discovery failed");
577
577
  process.stderr.write(` Error: adapter discovery failed — ${message}\n`);
578
578
  return INIT_EXIT_ERROR;
579
579
  }
@@ -611,7 +611,7 @@ async function runInit(options = {}) {
611
611
  await writeFile(routingPolicyPath, routingHeader + yaml.dump(routingPolicy), "utf-8");
612
612
  } catch (err) {
613
613
  const message = err instanceof Error ? err.message : String(err);
614
- logger$30.error({ err }, "Failed to write config files");
614
+ logger$32.error({ err }, "Failed to write config files");
615
615
  process.stderr.write(` Error: failed to write configuration — ${message}\n`);
616
616
  return INIT_EXIT_ERROR;
617
617
  }
@@ -686,7 +686,7 @@ function formatUnsupportedVersionError(formatType, version, supported) {
686
686
 
687
687
  //#endregion
688
688
  //#region src/modules/config/config-system-impl.ts
689
- const logger$29 = createLogger("config");
689
+ const logger$31 = createLogger("config");
690
690
  function deepMerge(base, override) {
691
691
  const result = { ...base };
692
692
  for (const [key, val] of Object.entries(override)) if (val !== null && val !== void 0 && typeof val === "object" && !Array.isArray(val) && typeof result[key] === "object" && result[key] !== null && !Array.isArray(result[key])) result[key] = deepMerge(result[key], val);
@@ -731,7 +731,7 @@ function readEnvOverrides() {
731
731
  }
732
732
  const parsed = PartialSubstrateConfigSchema.safeParse(overrides);
733
733
  if (!parsed.success) {
734
- logger$29.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
734
+ logger$31.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
735
735
  return {};
736
736
  }
737
737
  return parsed.data;
@@ -795,7 +795,7 @@ var ConfigSystemImpl = class {
795
795
  throw new ConfigError(`Configuration validation failed:\n${issues}`, { issues: result.error.issues });
796
796
  }
797
797
  this._config = result.data;
798
- logger$29.debug("Configuration loaded successfully");
798
+ logger$31.debug("Configuration loaded successfully");
799
799
  }
800
800
  getConfig() {
801
801
  if (this._config === null) throw new ConfigError("Configuration has not been loaded. Call load() before getConfig().", {});
@@ -858,7 +858,7 @@ var ConfigSystemImpl = class {
858
858
  if (version !== void 0 && typeof version === "string" && !isVersionSupported(version, SUPPORTED_CONFIG_FORMAT_VERSIONS)) if (defaultConfigMigrator.canMigrate(version, CURRENT_CONFIG_FORMAT_VERSION)) {
859
859
  const migrationOutput = defaultConfigMigrator.migrate(rawObj, version, CURRENT_CONFIG_FORMAT_VERSION, filePath);
860
860
  if (migrationOutput.result.success) {
861
- logger$29.info({
861
+ logger$31.info({
862
862
  from: version,
863
863
  to: CURRENT_CONFIG_FORMAT_VERSION,
864
864
  backup: migrationOutput.result.backupPath
@@ -901,7 +901,7 @@ function createConfigSystem(options = {}) {
901
901
 
902
902
  //#endregion
903
903
  //#region src/cli/commands/config.ts
904
- const logger$28 = createLogger("config-cmd");
904
+ const logger$30 = createLogger("config-cmd");
905
905
  const CONFIG_EXIT_SUCCESS = 0;
906
906
  const CONFIG_EXIT_ERROR = 1;
907
907
  const CONFIG_EXIT_INVALID = 2;
@@ -927,7 +927,7 @@ async function runConfigShow(opts = {}) {
927
927
  return CONFIG_EXIT_INVALID;
928
928
  }
929
929
  const message = err instanceof Error ? err.message : String(err);
930
- logger$28.error({ err }, "Failed to load configuration");
930
+ logger$30.error({ err }, "Failed to load configuration");
931
931
  process.stderr.write(` Error loading configuration: ${message}\n`);
932
932
  return CONFIG_EXIT_ERROR;
933
933
  }
@@ -1001,7 +1001,7 @@ async function runConfigExport(opts = {}) {
1001
1001
  return CONFIG_EXIT_INVALID;
1002
1002
  }
1003
1003
  const message = err instanceof Error ? err.message : String(err);
1004
- logger$28.error({ err }, "Failed to load configuration");
1004
+ logger$30.error({ err }, "Failed to load configuration");
1005
1005
  process.stderr.write(`Error loading configuration: ${message}\n`);
1006
1006
  return CONFIG_EXIT_ERROR;
1007
1007
  }
@@ -1155,7 +1155,7 @@ function registerConfigCommand(program, _version) {
1155
1155
 
1156
1156
  //#endregion
1157
1157
  //#region src/cli/commands/merge.ts
1158
- const logger$27 = createLogger("merge-cmd");
1158
+ const logger$29 = createLogger("merge-cmd");
1159
1159
  const MERGE_EXIT_SUCCESS = 0;
1160
1160
  const MERGE_EXIT_CONFLICT = 1;
1161
1161
  const MERGE_EXIT_ERROR = 2;
@@ -1193,7 +1193,7 @@ async function mergeTask(taskId, targetBranch, projectRoot) {
1193
1193
  projectRoot
1194
1194
  });
1195
1195
  try {
1196
- logger$27.info({
1196
+ logger$29.info({
1197
1197
  taskId,
1198
1198
  targetBranch
1199
1199
  }, "Running conflict detection...");
@@ -1215,7 +1215,7 @@ async function mergeTask(taskId, targetBranch, projectRoot) {
1215
1215
  } catch (err) {
1216
1216
  const message = err instanceof Error ? err.message : String(err);
1217
1217
  console.error(`Error merging task "${taskId}": ${message}`);
1218
- logger$27.error({
1218
+ logger$29.error({
1219
1219
  taskId,
1220
1220
  err
1221
1221
  }, "merge --task failed");
@@ -1269,7 +1269,7 @@ async function mergeAll(targetBranch, projectRoot, taskIds) {
1269
1269
  error: message
1270
1270
  });
1271
1271
  console.log(` Error for task "${taskId}": ${message}`);
1272
- logger$27.error({
1272
+ logger$29.error({
1273
1273
  taskId,
1274
1274
  err
1275
1275
  }, "merge --all: task failed");
@@ -1322,7 +1322,7 @@ function registerMergeCommand(program, projectRoot = process.cwd()) {
1322
1322
 
1323
1323
  //#endregion
1324
1324
  //#region src/cli/commands/worktrees.ts
1325
- const logger$26 = createLogger("worktrees-cmd");
1325
+ const logger$28 = createLogger("worktrees-cmd");
1326
1326
  const WORKTREES_EXIT_SUCCESS = 0;
1327
1327
  const WORKTREES_EXIT_ERROR = 1;
1328
1328
  /** Valid task statuses for filtering */
@@ -1449,7 +1449,7 @@ async function listWorktreesAction(options) {
1449
1449
  try {
1450
1450
  worktreeInfos = await manager.listWorktrees();
1451
1451
  } catch (err) {
1452
- logger$26.error({ err }, "Failed to list worktrees");
1452
+ logger$28.error({ err }, "Failed to list worktrees");
1453
1453
  const message = err instanceof Error ? err.message : String(err);
1454
1454
  process.stderr.write(`Error listing worktrees: ${message}\n`);
1455
1455
  return WORKTREES_EXIT_ERROR;
@@ -1476,7 +1476,7 @@ async function listWorktreesAction(options) {
1476
1476
  } catch (err) {
1477
1477
  const message = err instanceof Error ? err.message : String(err);
1478
1478
  process.stderr.write(`Error: ${message}\n`);
1479
- logger$26.error({ err }, "listWorktreesAction failed");
1479
+ logger$28.error({ err }, "listWorktreesAction failed");
1480
1480
  return WORKTREES_EXIT_ERROR;
1481
1481
  }
1482
1482
  }
@@ -1738,7 +1738,7 @@ function getPlanningCostTotal(db, sessionId) {
1738
1738
 
1739
1739
  //#endregion
1740
1740
  //#region src/cli/commands/cost.ts
1741
- const logger$25 = createLogger("cost-cmd");
1741
+ const logger$27 = createLogger("cost-cmd");
1742
1742
  const COST_EXIT_SUCCESS = 0;
1743
1743
  const COST_EXIT_ERROR = 1;
1744
1744
  /**
@@ -1984,7 +1984,7 @@ async function runCostAction(options) {
1984
1984
  } catch (err) {
1985
1985
  const message = err instanceof Error ? err.message : String(err);
1986
1986
  process.stderr.write(`Error: ${message}\n`);
1987
- logger$25.error({ err }, "runCostAction failed");
1987
+ logger$27.error({ err }, "runCostAction failed");
1988
1988
  return COST_EXIT_ERROR;
1989
1989
  } finally {
1990
1990
  if (wrapper !== null) try {
@@ -2046,7 +2046,7 @@ function emitStatusSnapshot(snapshot) {
2046
2046
 
2047
2047
  //#endregion
2048
2048
  //#region src/recovery/crash-recovery.ts
2049
- const logger$24 = createLogger("crash-recovery");
2049
+ const logger$26 = createLogger("crash-recovery");
2050
2050
  var CrashRecoveryManager = class {
2051
2051
  db;
2052
2052
  gitWorktreeManager;
@@ -2099,7 +2099,7 @@ var CrashRecoveryManager = class {
2099
2099
  });
2100
2100
  }
2101
2101
  if (this.gitWorktreeManager !== void 0) this.cleanupOrphanedWorktrees().catch((err) => {
2102
- logger$24.warn({ err }, "Worktree cleanup failed during recovery (non-fatal)");
2102
+ logger$26.warn({ err }, "Worktree cleanup failed during recovery (non-fatal)");
2103
2103
  });
2104
2104
  let newlyReady = 0;
2105
2105
  if (sessionId !== void 0) {
@@ -2109,7 +2109,7 @@ var CrashRecoveryManager = class {
2109
2109
  const row = db.prepare("SELECT COUNT(*) as count FROM ready_tasks").get();
2110
2110
  newlyReady = row.count;
2111
2111
  }
2112
- logger$24.info({
2112
+ logger$26.info({
2113
2113
  event: "recovery:complete",
2114
2114
  recovered,
2115
2115
  failed,
@@ -2131,10 +2131,10 @@ var CrashRecoveryManager = class {
2131
2131
  if (this.gitWorktreeManager === void 0) return 0;
2132
2132
  try {
2133
2133
  const count = await this.gitWorktreeManager.cleanupAllWorktrees();
2134
- logger$24.info({ count }, "Cleaned up orphaned worktrees");
2134
+ logger$26.info({ count }, "Cleaned up orphaned worktrees");
2135
2135
  return count;
2136
2136
  } catch (err) {
2137
- logger$24.warn({ err }, "Failed to clean up orphaned worktrees — continuing");
2137
+ logger$26.warn({ err }, "Failed to clean up orphaned worktrees — continuing");
2138
2138
  return 0;
2139
2139
  }
2140
2140
  }
@@ -2217,7 +2217,7 @@ function setupGracefulShutdown(options) {
2217
2217
 
2218
2218
  //#endregion
2219
2219
  //#region src/cli/commands/start.ts
2220
- const logger$23 = createLogger("start-cmd");
2220
+ const logger$25 = createLogger("start-cmd");
2221
2221
  const START_EXIT_SUCCESS = 0;
2222
2222
  const START_EXIT_ERROR = 1;
2223
2223
  const START_EXIT_USAGE_ERROR = 2;
@@ -2326,7 +2326,7 @@ async function runStartAction(options) {
2326
2326
  let configWatcher$1 = null;
2327
2327
  const configFilePath = join(projectRoot, "substrate.config.yaml");
2328
2328
  if (noWatchConfig) {
2329
- logger$23.info("Config hot-reload disabled (--no-watch-config).");
2329
+ logger$25.info("Config hot-reload disabled (--no-watch-config).");
2330
2330
  process.stdout.write("Config hot-reload disabled (--no-watch-config).\n");
2331
2331
  } else {
2332
2332
  let currentHotConfig = config;
@@ -2341,7 +2341,7 @@ async function runStartAction(options) {
2341
2341
  const changedKeys = computeChangedKeys(previousConfig, newConfig);
2342
2342
  currentHotConfig = newConfig;
2343
2343
  const n = changedKeys.length;
2344
- logger$23.info({
2344
+ logger$25.info({
2345
2345
  changedKeys,
2346
2346
  configPath: configFilePath
2347
2347
  }, `Config reloaded: ${n} setting(s) changed`);
@@ -2353,7 +2353,7 @@ async function runStartAction(options) {
2353
2353
  });
2354
2354
  },
2355
2355
  onError: (err) => {
2356
- logger$23.error({
2356
+ logger$25.error({
2357
2357
  err,
2358
2358
  configPath: configFilePath
2359
2359
  }, `Config reload failed: ${err.message}. Continuing with previous config.`);
@@ -2366,7 +2366,7 @@ async function runStartAction(options) {
2366
2366
  let cleanupShutdown = null;
2367
2367
  if (resolvedGraphFile === null) if (interruptedSession !== void 0) {
2368
2368
  process.stdout.write(`Resuming interrupted session ${interruptedSession.id}\n`);
2369
- logger$23.info({ sessionId: interruptedSession.id }, "session:resumed");
2369
+ logger$25.info({ sessionId: interruptedSession.id }, "session:resumed");
2370
2370
  const recovery = new CrashRecoveryManager({
2371
2371
  db: databaseService.db,
2372
2372
  gitWorktreeManager
@@ -2490,7 +2490,7 @@ async function runStartAction(options) {
2490
2490
  } catch (err) {
2491
2491
  const message = err instanceof Error ? err.message : String(err);
2492
2492
  process.stderr.write(`Error: ${message}\n`);
2493
- logger$23.error({ err }, "runStartAction failed");
2493
+ logger$25.error({ err }, "runStartAction failed");
2494
2494
  return START_EXIT_ERROR;
2495
2495
  } finally {
2496
2496
  try {
@@ -2648,7 +2648,7 @@ function renderTaskGraph(snapshot, tasks) {
2648
2648
 
2649
2649
  //#endregion
2650
2650
  //#region src/cli/commands/status.ts
2651
- const logger$22 = createLogger("status-cmd");
2651
+ const logger$24 = createLogger("status-cmd");
2652
2652
  const STATUS_EXIT_SUCCESS = 0;
2653
2653
  const STATUS_EXIT_ERROR = 1;
2654
2654
  const STATUS_EXIT_NOT_FOUND = 2;
@@ -2801,7 +2801,7 @@ async function runStatusAction(options) {
2801
2801
  } catch (err) {
2802
2802
  const message = err instanceof Error ? err.message : String(err);
2803
2803
  process.stderr.write(`Error: ${message}\n`);
2804
- logger$22.error({ err }, "runStatusAction failed");
2804
+ logger$24.error({ err }, "runStatusAction failed");
2805
2805
  return STATUS_EXIT_ERROR;
2806
2806
  } finally {
2807
2807
  if (wrapper !== null) try {
@@ -2834,7 +2834,7 @@ function registerStatusCommand(program, _version = "0.0.0", projectRoot = proces
2834
2834
 
2835
2835
  //#endregion
2836
2836
  //#region src/cli/commands/pause.ts
2837
- const logger$21 = createLogger("pause-cmd");
2837
+ const logger$23 = createLogger("pause-cmd");
2838
2838
  const PAUSE_EXIT_SUCCESS = 0;
2839
2839
  const PAUSE_EXIT_ERROR = 1;
2840
2840
  const PAUSE_EXIT_USAGE_ERROR = 2;
@@ -2903,7 +2903,7 @@ async function runPauseAction(options) {
2903
2903
  } catch (err) {
2904
2904
  const message = err instanceof Error ? err.message : String(err);
2905
2905
  process.stderr.write(`Error: ${message}\n`);
2906
- logger$21.error({ err }, "runPauseAction failed");
2906
+ logger$23.error({ err }, "runPauseAction failed");
2907
2907
  return PAUSE_EXIT_ERROR;
2908
2908
  } finally {
2909
2909
  if (wrapper !== null) try {
@@ -2933,7 +2933,7 @@ function registerPauseCommand(program, version = "0.0.0", projectRoot = process.
2933
2933
 
2934
2934
  //#endregion
2935
2935
  //#region src/cli/commands/resume.ts
2936
- const logger$20 = createLogger("resume-cmd");
2936
+ const logger$22 = createLogger("resume-cmd");
2937
2937
  const RESUME_EXIT_SUCCESS = 0;
2938
2938
  const RESUME_EXIT_ERROR = 1;
2939
2939
  const RESUME_EXIT_USAGE_ERROR = 2;
@@ -3018,7 +3018,7 @@ async function runResumeAction(options) {
3018
3018
  } catch (err) {
3019
3019
  const message = err instanceof Error ? err.message : String(err);
3020
3020
  process.stderr.write(`Error: ${message}\n`);
3021
- logger$20.error({ err }, "runResumeAction failed");
3021
+ logger$22.error({ err }, "runResumeAction failed");
3022
3022
  return RESUME_EXIT_ERROR;
3023
3023
  } finally {
3024
3024
  if (wrapper !== null) try {
@@ -3051,7 +3051,7 @@ function registerResumeCommand(program, version = "0.0.0", projectRoot = process
3051
3051
 
3052
3052
  //#endregion
3053
3053
  //#region src/cli/commands/cancel.ts
3054
- const logger$19 = createLogger("cancel-cmd");
3054
+ const logger$21 = createLogger("cancel-cmd");
3055
3055
  const CANCEL_EXIT_SUCCESS = 0;
3056
3056
  const CANCEL_EXIT_ERROR = 1;
3057
3057
  const CANCEL_EXIT_USAGE_ERROR = 2;
@@ -3148,7 +3148,7 @@ async function runCancelAction(options) {
3148
3148
  } catch (err) {
3149
3149
  const message = err instanceof Error ? err.message : String(err);
3150
3150
  process.stderr.write(`Error: ${message}\n`);
3151
- logger$19.error({ err }, "runCancelAction failed");
3151
+ logger$21.error({ err }, "runCancelAction failed");
3152
3152
  return CANCEL_EXIT_ERROR;
3153
3153
  } finally {
3154
3154
  if (wrapper !== null) try {
@@ -3263,7 +3263,7 @@ function renderFailedTasksJson(tasks) {
3263
3263
 
3264
3264
  //#endregion
3265
3265
  //#region src/cli/commands/retry.ts
3266
- const logger$18 = createLogger("retry-cmd");
3266
+ const logger$20 = createLogger("retry-cmd");
3267
3267
  const RETRY_EXIT_SUCCESS = 0;
3268
3268
  const RETRY_EXIT_PARTIAL_FAILURE = 1;
3269
3269
  const RETRY_EXIT_USAGE_ERROR = 2;
@@ -3368,7 +3368,7 @@ async function runRetryAction(options) {
3368
3368
  } catch (err) {
3369
3369
  const message = err instanceof Error ? err.message : String(err);
3370
3370
  process.stderr.write(`Error: ${message}\n`);
3371
- logger$18.error({ err }, "runRetryAction failed");
3371
+ logger$20.error({ err }, "runRetryAction failed");
3372
3372
  return RETRY_EXIT_USAGE_ERROR;
3373
3373
  } finally {
3374
3374
  if (wrapper !== null) try {
@@ -3497,11 +3497,11 @@ async function runFollowMode(opts) {
3497
3497
  });
3498
3498
  });
3499
3499
  const sigintHandler = () => {
3500
- logger$18.info("SIGINT received — initiating graceful shutdown");
3500
+ logger$20.info("SIGINT received — initiating graceful shutdown");
3501
3501
  taskGraphEngine.cancelAll();
3502
3502
  };
3503
3503
  const sigtermHandler = () => {
3504
- logger$18.info("SIGTERM received — initiating graceful shutdown");
3504
+ logger$20.info("SIGTERM received — initiating graceful shutdown");
3505
3505
  taskGraphEngine.cancelAll();
3506
3506
  };
3507
3507
  process.once("SIGINT", sigintHandler);
@@ -3514,7 +3514,7 @@ async function runFollowMode(opts) {
3514
3514
  } catch (err) {
3515
3515
  const message = err instanceof Error ? err.message : String(err);
3516
3516
  process.stderr.write(`Error: ${message}\n`);
3517
- logger$18.error({ err }, "runFollowMode failed");
3517
+ logger$20.error({ err }, "runFollowMode failed");
3518
3518
  return RETRY_EXIT_USAGE_ERROR;
3519
3519
  } finally {
3520
3520
  try {
@@ -3974,7 +3974,7 @@ function buildMultiAgentInstructionsSection(agentCount) {
3974
3974
 
3975
3975
  //#endregion
3976
3976
  //#region src/modules/plan-generator/plan-generator.ts
3977
- const logger$17 = createLogger("plan-generator");
3977
+ const logger$19 = createLogger("plan-generator");
3978
3978
  /**
3979
3979
  * Wrapper around execFile that immediately closes stdin on the child process.
3980
3980
  * Some CLI tools (e.g. Claude Code) wait for stdin to close before processing
@@ -4151,7 +4151,7 @@ var PlanGenerator = class {
4151
4151
  else {
4152
4152
  const slugified = dep.toLowerCase().replace(/[^a-z0-9]+/g, "-").replace(/^-|-$/g, "").slice(0, 64);
4153
4153
  if (taskKeys.has(slugified)) resolvedDeps.push(slugified);
4154
- else logger$17.warn({
4154
+ else logger$19.warn({
4155
4155
  taskKey,
4156
4156
  dep
4157
4157
  }, `depends_on reference '${dep}' not found in task keys; removing`);
@@ -4898,7 +4898,7 @@ function getLatestPlanVersion(db, planId) {
4898
4898
 
4899
4899
  //#endregion
4900
4900
  //#region src/modules/plan-generator/plan-refiner.ts
4901
- const logger$16 = createLogger("plan-refiner");
4901
+ const logger$18 = createLogger("plan-refiner");
4902
4902
  var PlanRefiner = class {
4903
4903
  db;
4904
4904
  planGenerator;
@@ -4941,7 +4941,7 @@ var PlanRefiner = class {
4941
4941
  newFeedback: feedback,
4942
4942
  availableAgents: this.availableAgents
4943
4943
  });
4944
- logger$16.info({
4944
+ logger$18.info({
4945
4945
  planId,
4946
4946
  currentVersion,
4947
4947
  feedbackRounds: feedbackHistory.length
@@ -4988,7 +4988,7 @@ var PlanRefiner = class {
4988
4988
  newVersion,
4989
4989
  taskCount
4990
4990
  });
4991
- logger$16.info({
4991
+ logger$18.info({
4992
4992
  planId,
4993
4993
  newVersion,
4994
4994
  taskCount
@@ -5070,7 +5070,7 @@ function normalizeForDiff(value) {
5070
5070
 
5071
5071
  //#endregion
5072
5072
  //#region src/cli/commands/plan-refine.ts
5073
- const logger$15 = createLogger("plan-refine-cmd");
5073
+ const logger$17 = createLogger("plan-refine-cmd");
5074
5074
  const REFINE_EXIT_SUCCESS = 0;
5075
5075
  const REFINE_EXIT_ERROR = 1;
5076
5076
  const REFINE_EXIT_USAGE_ERROR = 2;
@@ -5112,7 +5112,7 @@ async function runPlanRefineAction(options) {
5112
5112
  let result;
5113
5113
  try {
5114
5114
  result = await refiner.refine(planId, feedback, (event, payload) => {
5115
- logger$15.info({
5115
+ logger$17.info({
5116
5116
  event,
5117
5117
  payload
5118
5118
  }, "Plan refinement event");
@@ -5155,7 +5155,7 @@ async function runPlanRefineAction(options) {
5155
5155
  } catch (err) {
5156
5156
  const message = err instanceof Error ? err.message : String(err);
5157
5157
  process.stderr.write(`Error: ${message}\n`);
5158
- logger$15.error({ err }, "runPlanRefineAction failed");
5158
+ logger$17.error({ err }, "runPlanRefineAction failed");
5159
5159
  return REFINE_EXIT_ERROR;
5160
5160
  } finally {
5161
5161
  dbWrapper.close();
@@ -5180,7 +5180,7 @@ function registerPlanRefineCommand(planCmd, _version = "0.0.0", projectRoot = pr
5180
5180
 
5181
5181
  //#endregion
5182
5182
  //#region src/cli/commands/plan-diff.ts
5183
- const logger$14 = createLogger("plan-diff-cmd");
5183
+ const logger$16 = createLogger("plan-diff-cmd");
5184
5184
  const DIFF_EXIT_SUCCESS = 0;
5185
5185
  const DIFF_EXIT_ERROR = 1;
5186
5186
  const DIFF_EXIT_NOT_FOUND = 2;
@@ -5223,7 +5223,7 @@ async function runPlanDiffAction(options) {
5223
5223
  } catch (err) {
5224
5224
  const message = err instanceof Error ? err.message : String(err);
5225
5225
  process.stderr.write(`Error: ${message}\n`);
5226
- logger$14.error({ err }, "runPlanDiffAction failed");
5226
+ logger$16.error({ err }, "runPlanDiffAction failed");
5227
5227
  return DIFF_EXIT_ERROR;
5228
5228
  } finally {
5229
5229
  dbWrapper.close();
@@ -5271,7 +5271,7 @@ function registerPlanDiffCommand(planCmd, _version = "0.0.0", projectRoot = proc
5271
5271
 
5272
5272
  //#endregion
5273
5273
  //#region src/cli/commands/plan-rollback.ts
5274
- const logger$13 = createLogger("plan-rollback-cmd");
5274
+ const logger$15 = createLogger("plan-rollback-cmd");
5275
5275
  const ROLLBACK_EXIT_SUCCESS = 0;
5276
5276
  const ROLLBACK_EXIT_ERROR = 1;
5277
5277
  const ROLLBACK_EXIT_USAGE_ERROR = 2;
@@ -5319,7 +5319,7 @@ async function runPlanRollbackAction(options, onEvent) {
5319
5319
  toVersion,
5320
5320
  newVersion
5321
5321
  });
5322
- logger$13.info({
5322
+ logger$15.info({
5323
5323
  planId,
5324
5324
  fromVersion,
5325
5325
  toVersion,
@@ -5360,7 +5360,7 @@ async function runPlanRollbackAction(options, onEvent) {
5360
5360
  } catch (err) {
5361
5361
  const message = err instanceof Error ? err.message : String(err);
5362
5362
  process.stderr.write(`Error: ${message}\n`);
5363
- logger$13.error({ err }, "runPlanRollbackAction failed");
5363
+ logger$15.error({ err }, "runPlanRollbackAction failed");
5364
5364
  return ROLLBACK_EXIT_ERROR;
5365
5365
  } finally {
5366
5366
  dbWrapper.close();
@@ -5554,7 +5554,7 @@ function validatePlan(raw, adapterRegistry, options) {
5554
5554
 
5555
5555
  //#endregion
5556
5556
  //#region src/cli/commands/plan.ts
5557
- const logger$12 = createLogger("plan-cmd");
5557
+ const logger$14 = createLogger("plan-cmd");
5558
5558
  const PLAN_EXIT_SUCCESS = 0;
5559
5559
  const PLAN_EXIT_ERROR = 1;
5560
5560
  const PLAN_EXIT_USAGE_ERROR = 2;
@@ -5698,7 +5698,7 @@ async function runPlanReviewAction(options) {
5698
5698
  }
5699
5699
  const message = err instanceof Error ? err.message : String(err);
5700
5700
  process.stderr.write(`Error: ${message}\n`);
5701
- logger$12.error({ err }, "runPlanReviewAction failed");
5701
+ logger$14.error({ err }, "runPlanReviewAction failed");
5702
5702
  return PLAN_EXIT_ERROR;
5703
5703
  }
5704
5704
  if (dryRun) {
@@ -5724,7 +5724,7 @@ async function runPlanReviewAction(options) {
5724
5724
  if (ext.endsWith(".yaml") || ext.endsWith(".yml")) taskGraph = load(planYaml);
5725
5725
  else taskGraph = JSON.parse(planYaml);
5726
5726
  } catch {
5727
- logger$12.warn("Could not read generated plan file for DB storage");
5727
+ logger$14.warn("Could not read generated plan file for DB storage");
5728
5728
  }
5729
5729
  if (outputFormat === "json") {
5730
5730
  const envelope = {
@@ -6561,6 +6561,129 @@ const PIPELINE_EVENT_METADATA = [
6561
6561
  description: "Milliseconds since last progress event."
6562
6562
  }
6563
6563
  ]
6564
+ },
6565
+ {
6566
+ type: "supervisor:kill",
6567
+ description: "Emitted by the supervisor when it kills a stalled pipeline process tree.",
6568
+ when: "When the supervisor detects a STALLED verdict and staleness exceeds the stall threshold.",
6569
+ fields: [
6570
+ {
6571
+ name: "ts",
6572
+ type: "string",
6573
+ description: "ISO-8601 timestamp generated at emit time."
6574
+ },
6575
+ {
6576
+ name: "run_id",
6577
+ type: "string|null",
6578
+ description: "Pipeline run ID that was killed."
6579
+ },
6580
+ {
6581
+ name: "reason",
6582
+ type: "stall",
6583
+ description: "Reason for the kill — always \"stall\" for threshold-triggered kills."
6584
+ },
6585
+ {
6586
+ name: "staleness_seconds",
6587
+ type: "number",
6588
+ description: "Seconds the pipeline had been stalled."
6589
+ },
6590
+ {
6591
+ name: "pids",
6592
+ type: "number[]",
6593
+ description: "PIDs that were killed (orchestrator + child processes)."
6594
+ }
6595
+ ]
6596
+ },
6597
+ {
6598
+ type: "supervisor:restart",
6599
+ description: "Emitted by the supervisor when it restarts a killed pipeline via auto resume.",
6600
+ when: "Immediately after killing a stalled pipeline, when the restart count is within the max limit.",
6601
+ fields: [
6602
+ {
6603
+ name: "ts",
6604
+ type: "string",
6605
+ description: "ISO-8601 timestamp generated at emit time."
6606
+ },
6607
+ {
6608
+ name: "run_id",
6609
+ type: "string|null",
6610
+ description: "Pipeline run ID being resumed."
6611
+ },
6612
+ {
6613
+ name: "attempt",
6614
+ type: "number",
6615
+ description: "Restart attempt number (1-based)."
6616
+ }
6617
+ ]
6618
+ },
6619
+ {
6620
+ type: "supervisor:abort",
6621
+ description: "Emitted by the supervisor when it exceeds the maximum restart limit and gives up.",
6622
+ when: "When the restart count reaches or exceeds --max-restarts and another stall is detected.",
6623
+ fields: [
6624
+ {
6625
+ name: "ts",
6626
+ type: "string",
6627
+ description: "ISO-8601 timestamp generated at emit time."
6628
+ },
6629
+ {
6630
+ name: "run_id",
6631
+ type: "string|null",
6632
+ description: "Pipeline run ID that was abandoned."
6633
+ },
6634
+ {
6635
+ name: "reason",
6636
+ type: "max_restarts_exceeded",
6637
+ description: "Always \"max_restarts_exceeded\"."
6638
+ },
6639
+ {
6640
+ name: "attempts",
6641
+ type: "number",
6642
+ description: "Number of restart attempts that were made."
6643
+ }
6644
+ ]
6645
+ },
6646
+ {
6647
+ type: "supervisor:summary",
6648
+ description: "Emitted by the supervisor when the pipeline reaches a terminal state.",
6649
+ when: "When the supervisor detects a NO_PIPELINE_RUNNING verdict (completed, failed, or stopped).",
6650
+ fields: [
6651
+ {
6652
+ name: "ts",
6653
+ type: "string",
6654
+ description: "ISO-8601 timestamp generated at emit time."
6655
+ },
6656
+ {
6657
+ name: "run_id",
6658
+ type: "string|null",
6659
+ description: "Pipeline run ID."
6660
+ },
6661
+ {
6662
+ name: "elapsed_seconds",
6663
+ type: "number",
6664
+ description: "Total elapsed seconds from supervisor start to terminal state."
6665
+ },
6666
+ {
6667
+ name: "succeeded",
6668
+ type: "string[]",
6669
+ description: "Story keys that completed successfully."
6670
+ },
6671
+ {
6672
+ name: "failed",
6673
+ type: "string[]",
6674
+ description: "Story keys that failed (non-COMPLETE, non-PENDING phases)."
6675
+ },
6676
+ {
6677
+ name: "escalated",
6678
+ type: "string[]",
6679
+ description: "Story keys that were escalated."
6680
+ },
6681
+ {
6682
+ name: "restarts",
6683
+ type: "number",
6684
+ description: "Number of restart cycles performed by the supervisor."
6685
+ }
6686
+ ]
6564
6687
  }
6565
6688
  ];
6566
6689
  /**
@@ -6778,12 +6901,29 @@ async function resolveMainRepoRoot(cwd = process.cwd()) {
6778
6901
 
6779
6902
  //#endregion
6780
6903
  //#region src/modules/methodology-pack/schemas.ts
6904
+ /**
6905
+ * A reference to a context value to inject into a step prompt.
6906
+ * Sources can be params (runtime parameters) or decisions (from the decision store).
6907
+ */
6908
+ const ContextRefSchema = z.object({
6909
+ placeholder: z.string().min(1),
6910
+ source: z.string().min(1)
6911
+ });
6912
+ /**
6913
+ * A single step within a multi-step phase decomposition.
6914
+ */
6915
+ const StepDefinitionSchema = z.object({
6916
+ name: z.string().min(1),
6917
+ template: z.string().min(1),
6918
+ context: z.array(ContextRefSchema).default([])
6919
+ });
6781
6920
  const PhaseDefinitionSchema = z.object({
6782
6921
  name: z.string().min(1),
6783
6922
  description: z.string().min(1),
6784
6923
  entryGates: z.array(z.string()),
6785
6924
  exitGates: z.array(z.string()),
6786
- artifacts: z.array(z.string())
6925
+ artifacts: z.array(z.string()),
6926
+ steps: z.array(StepDefinitionSchema).optional()
6787
6927
  });
6788
6928
  const PackManifestSchema = z.object({
6789
6929
  name: z.string().min(1),
@@ -7041,7 +7181,7 @@ function truncateToTokens(text, maxTokens) {
7041
7181
 
7042
7182
  //#endregion
7043
7183
  //#region src/modules/context-compiler/context-compiler-impl.ts
7044
- const logger$11 = createLogger("context-compiler");
7184
+ const logger$13 = createLogger("context-compiler");
7045
7185
  /**
7046
7186
  * Fraction of the original token budget that must remain (after required +
7047
7187
  * important sections) before an optional section is included.
@@ -7133,7 +7273,7 @@ var ContextCompilerImpl = class {
7133
7273
  includedParts.push(truncated);
7134
7274
  remainingBudget -= truncatedTokens;
7135
7275
  anyTruncated = true;
7136
- logger$11.warn({
7276
+ logger$13.warn({
7137
7277
  section: section.name,
7138
7278
  originalTokens: tokens,
7139
7279
  budgetTokens: truncatedTokens
@@ -7147,7 +7287,7 @@ var ContextCompilerImpl = class {
7147
7287
  });
7148
7288
  } else {
7149
7289
  anyTruncated = true;
7150
- logger$11.warn({
7290
+ logger$13.warn({
7151
7291
  section: section.name,
7152
7292
  tokens
7153
7293
  }, "Context compiler: omitted \"important\" section — no budget remaining");
@@ -7174,7 +7314,7 @@ var ContextCompilerImpl = class {
7174
7314
  } else {
7175
7315
  if (tokens > 0) {
7176
7316
  anyTruncated = true;
7177
- logger$11.warn({
7317
+ logger$13.warn({
7178
7318
  section: section.name,
7179
7319
  tokens,
7180
7320
  budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
@@ -7275,7 +7415,17 @@ const DEFAULT_TIMEOUTS = {
7275
7415
  "dev-story": 18e5,
7276
7416
  "code-review": 9e5,
7277
7417
  "minor-fixes": 6e5,
7278
- "major-rework": 9e5
7418
+ "major-rework": 9e5,
7419
+ "analysis-vision": 18e4,
7420
+ "analysis-scope": 18e4,
7421
+ "planning-classification": 18e4,
7422
+ "planning-frs": 24e4,
7423
+ "planning-nfrs": 24e4,
7424
+ "arch-context": 18e4,
7425
+ "arch-decisions": 24e4,
7426
+ "arch-patterns": 24e4,
7427
+ "story-epics": 24e4,
7428
+ "story-stories": 3e5
7279
7429
  };
7280
7430
  /**
7281
7431
  * Default max agentic turns per task type.
@@ -7294,7 +7444,17 @@ const DEFAULT_MAX_TURNS = {
7294
7444
  "major-rework": 50,
7295
7445
  "code-review": 25,
7296
7446
  "create-story": 20,
7297
- "minor-fixes": 25
7447
+ "minor-fixes": 25,
7448
+ "analysis-vision": 8,
7449
+ "analysis-scope": 10,
7450
+ "planning-classification": 8,
7451
+ "planning-frs": 12,
7452
+ "planning-nfrs": 12,
7453
+ "arch-context": 10,
7454
+ "arch-decisions": 15,
7455
+ "arch-patterns": 12,
7456
+ "story-epics": 15,
7457
+ "story-stories": 20
7298
7458
  };
7299
7459
  /**
7300
7460
  * Error thrown when dispatch is attempted on a shutting-down dispatcher.
@@ -7420,7 +7580,7 @@ function parseYamlResult(yamlText, schema) {
7420
7580
 
7421
7581
  //#endregion
7422
7582
  //#region src/modules/agent-dispatch/dispatcher-impl.ts
7423
- const logger$10 = createLogger("agent-dispatch");
7583
+ const logger$12 = createLogger("agent-dispatch");
7424
7584
  const SHUTDOWN_GRACE_MS = 1e4;
7425
7585
  const SHUTDOWN_MAX_WAIT_MS = 3e4;
7426
7586
  const CHARS_PER_TOKEN = 4;
@@ -7489,7 +7649,7 @@ var DispatcherImpl = class {
7489
7649
  resolve: typedResolve,
7490
7650
  reject
7491
7651
  });
7492
- logger$10.debug({
7652
+ logger$12.debug({
7493
7653
  id,
7494
7654
  queueLength: this._queue.length
7495
7655
  }, "Dispatch queued");
@@ -7519,7 +7679,7 @@ var DispatcherImpl = class {
7519
7679
  }
7520
7680
  async shutdown() {
7521
7681
  this._shuttingDown = true;
7522
- logger$10.info({
7682
+ logger$12.info({
7523
7683
  running: this._running.size,
7524
7684
  queued: this._queue.length
7525
7685
  }, "Dispatcher shutting down");
@@ -7552,13 +7712,13 @@ var DispatcherImpl = class {
7552
7712
  }
7553
7713
  }, 50);
7554
7714
  });
7555
- logger$10.info("Dispatcher shutdown complete");
7715
+ logger$12.info("Dispatcher shutdown complete");
7556
7716
  }
7557
7717
  async _startDispatch(id, request, resolve$2) {
7558
7718
  const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns } = request;
7559
7719
  const adapter = this._adapterRegistry.get(agent);
7560
7720
  if (adapter === void 0) {
7561
- logger$10.warn({
7721
+ logger$12.warn({
7562
7722
  id,
7563
7723
  agent
7564
7724
  }, "No adapter found for agent");
@@ -7602,7 +7762,7 @@ var DispatcherImpl = class {
7602
7762
  });
7603
7763
  const startedAt = Date.now();
7604
7764
  proc.on("error", (err) => {
7605
- logger$10.error({
7765
+ logger$12.error({
7606
7766
  id,
7607
7767
  binary: cmd.binary,
7608
7768
  error: err.message
@@ -7610,7 +7770,7 @@ var DispatcherImpl = class {
7610
7770
  });
7611
7771
  if (proc.stdin !== null) {
7612
7772
  proc.stdin.on("error", (err) => {
7613
- if (err.code !== "EPIPE") logger$10.warn({
7773
+ if (err.code !== "EPIPE") logger$12.warn({
7614
7774
  id,
7615
7775
  error: err.message
7616
7776
  }, "stdin write error");
@@ -7652,7 +7812,7 @@ var DispatcherImpl = class {
7652
7812
  agent,
7653
7813
  taskType
7654
7814
  });
7655
- logger$10.debug({
7815
+ logger$12.debug({
7656
7816
  id,
7657
7817
  agent,
7658
7818
  taskType,
@@ -7669,7 +7829,7 @@ var DispatcherImpl = class {
7669
7829
  dispatchId: id,
7670
7830
  timeoutMs
7671
7831
  });
7672
- logger$10.warn({
7832
+ logger$12.warn({
7673
7833
  id,
7674
7834
  agent,
7675
7835
  taskType,
@@ -7723,7 +7883,7 @@ var DispatcherImpl = class {
7723
7883
  exitCode: code,
7724
7884
  output: stdout
7725
7885
  });
7726
- logger$10.debug({
7886
+ logger$12.debug({
7727
7887
  id,
7728
7888
  agent,
7729
7889
  taskType,
@@ -7749,7 +7909,7 @@ var DispatcherImpl = class {
7749
7909
  error: stderr || `Process exited with code ${String(code)}`,
7750
7910
  exitCode: code
7751
7911
  });
7752
- logger$10.debug({
7912
+ logger$12.debug({
7753
7913
  id,
7754
7914
  agent,
7755
7915
  taskType,
@@ -7801,7 +7961,7 @@ var DispatcherImpl = class {
7801
7961
  const next = this._queue.shift();
7802
7962
  if (next === void 0) return;
7803
7963
  next.handle.status = "running";
7804
- logger$10.debug({
7964
+ logger$12.debug({
7805
7965
  id: next.id,
7806
7966
  queueLength: this._queue.length
7807
7967
  }, "Dequeued dispatch");
@@ -8162,7 +8322,7 @@ function getTokenUsageSummary(db, runId) {
8162
8322
 
8163
8323
  //#endregion
8164
8324
  //#region src/modules/compiled-workflows/prompt-assembler.ts
8165
- const logger$9 = createLogger("compiled-workflows:prompt-assembler");
8325
+ const logger$11 = createLogger("compiled-workflows:prompt-assembler");
8166
8326
  /**
8167
8327
  * Assemble a final prompt from a template and sections map.
8168
8328
  *
@@ -8187,7 +8347,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
8187
8347
  tokenCount,
8188
8348
  truncated: false
8189
8349
  };
8190
- logger$9.warn({
8350
+ logger$11.warn({
8191
8351
  tokenCount,
8192
8352
  ceiling: tokenCeiling
8193
8353
  }, "Prompt exceeds token ceiling — truncating optional sections");
@@ -8203,10 +8363,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
8203
8363
  const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
8204
8364
  if (targetSectionTokens === 0) {
8205
8365
  contentMap[section.name] = "";
8206
- logger$9.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
8366
+ logger$11.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
8207
8367
  } else {
8208
8368
  contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
8209
- logger$9.warn({
8369
+ logger$11.warn({
8210
8370
  sectionName: section.name,
8211
8371
  targetSectionTokens
8212
8372
  }, "Section truncated to fit token budget");
@@ -8217,7 +8377,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
8217
8377
  }
8218
8378
  if (tokenCount <= tokenCeiling) break;
8219
8379
  }
8220
- if (tokenCount > tokenCeiling) logger$9.warn({
8380
+ if (tokenCount > tokenCeiling) logger$11.warn({
8221
8381
  tokenCount,
8222
8382
  ceiling: tokenCeiling
8223
8383
  }, "Required sections alone exceed token ceiling — returning over-budget prompt");
@@ -8358,7 +8518,7 @@ const CodeReviewResultSchema = z.object({
8358
8518
 
8359
8519
  //#endregion
8360
8520
  //#region src/modules/compiled-workflows/create-story.ts
8361
- const logger$8 = createLogger("compiled-workflows:create-story");
8521
+ const logger$10 = createLogger("compiled-workflows:create-story");
8362
8522
  /**
8363
8523
  * Hard ceiling for the assembled create-story prompt.
8364
8524
  */
@@ -8382,7 +8542,7 @@ const TOKEN_CEILING$2 = 3e3;
8382
8542
  */
8383
8543
  async function runCreateStory(deps, params) {
8384
8544
  const { epicId, storyKey, pipelineRunId } = params;
8385
- logger$8.debug({
8545
+ logger$10.debug({
8386
8546
  epicId,
8387
8547
  storyKey,
8388
8548
  pipelineRunId
@@ -8392,7 +8552,7 @@ async function runCreateStory(deps, params) {
8392
8552
  template = await deps.pack.getPrompt("create-story");
8393
8553
  } catch (err) {
8394
8554
  const error = err instanceof Error ? err.message : String(err);
8395
- logger$8.error({ error }, "Failed to retrieve create-story prompt template");
8555
+ logger$10.error({ error }, "Failed to retrieve create-story prompt template");
8396
8556
  return {
8397
8557
  result: "failed",
8398
8558
  error: `Failed to retrieve prompt template: ${error}`,
@@ -8434,7 +8594,7 @@ async function runCreateStory(deps, params) {
8434
8594
  priority: "important"
8435
8595
  }
8436
8596
  ], TOKEN_CEILING$2);
8437
- logger$8.debug({
8597
+ logger$10.debug({
8438
8598
  tokenCount,
8439
8599
  truncated,
8440
8600
  tokenCeiling: TOKEN_CEILING$2
@@ -8451,7 +8611,7 @@ async function runCreateStory(deps, params) {
8451
8611
  dispatchResult = await handle.result;
8452
8612
  } catch (err) {
8453
8613
  const error = err instanceof Error ? err.message : String(err);
8454
- logger$8.error({
8614
+ logger$10.error({
8455
8615
  epicId,
8456
8616
  storyKey,
8457
8617
  error
@@ -8472,7 +8632,7 @@ async function runCreateStory(deps, params) {
8472
8632
  if (dispatchResult.status === "failed") {
8473
8633
  const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
8474
8634
  const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
8475
- logger$8.warn({
8635
+ logger$10.warn({
8476
8636
  epicId,
8477
8637
  storyKey,
8478
8638
  exitCode: dispatchResult.exitCode
@@ -8484,7 +8644,7 @@ async function runCreateStory(deps, params) {
8484
8644
  };
8485
8645
  }
8486
8646
  if (dispatchResult.status === "timeout") {
8487
- logger$8.warn({
8647
+ logger$10.warn({
8488
8648
  epicId,
8489
8649
  storyKey
8490
8650
  }, "Create-story dispatch timed out");
@@ -8497,7 +8657,7 @@ async function runCreateStory(deps, params) {
8497
8657
  if (dispatchResult.parsed === null) {
8498
8658
  const details = dispatchResult.parseError ?? "No YAML block found in output";
8499
8659
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
8500
- logger$8.warn({
8660
+ logger$10.warn({
8501
8661
  epicId,
8502
8662
  storyKey,
8503
8663
  details,
@@ -8513,7 +8673,7 @@ async function runCreateStory(deps, params) {
8513
8673
  const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
8514
8674
  if (!parseResult.success) {
8515
8675
  const details = parseResult.error.message;
8516
- logger$8.warn({
8676
+ logger$10.warn({
8517
8677
  epicId,
8518
8678
  storyKey,
8519
8679
  details
@@ -8526,7 +8686,7 @@ async function runCreateStory(deps, params) {
8526
8686
  };
8527
8687
  }
8528
8688
  const parsed = parseResult.data;
8529
- logger$8.info({
8689
+ logger$10.info({
8530
8690
  epicId,
8531
8691
  storyKey,
8532
8692
  storyFile: parsed.story_file,
@@ -8548,7 +8708,7 @@ function getImplementationDecisions(deps) {
8548
8708
  try {
8549
8709
  return getDecisionsByPhase(deps.db, "implementation");
8550
8710
  } catch (err) {
8551
- logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
8711
+ logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
8552
8712
  return [];
8553
8713
  }
8554
8714
  }
@@ -8564,13 +8724,13 @@ function getEpicShard(decisions, epicId, projectRoot) {
8564
8724
  if (projectRoot) {
8565
8725
  const fallback = readEpicShardFromFile(projectRoot, epicId);
8566
8726
  if (fallback) {
8567
- logger$8.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
8727
+ logger$10.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
8568
8728
  return fallback;
8569
8729
  }
8570
8730
  }
8571
8731
  return "";
8572
8732
  } catch (err) {
8573
- logger$8.warn({
8733
+ logger$10.warn({
8574
8734
  epicId,
8575
8735
  error: err instanceof Error ? err.message : String(err)
8576
8736
  }, "Failed to retrieve epic shard");
@@ -8587,7 +8747,7 @@ function getPrevDevNotes(decisions, epicId) {
8587
8747
  if (devNotes.length === 0) return "";
8588
8748
  return devNotes[devNotes.length - 1].value;
8589
8749
  } catch (err) {
8590
- logger$8.warn({
8750
+ logger$10.warn({
8591
8751
  epicId,
8592
8752
  error: err instanceof Error ? err.message : String(err)
8593
8753
  }, "Failed to retrieve prev dev notes");
@@ -8607,13 +8767,13 @@ function getArchConstraints$1(deps) {
8607
8767
  if (deps.projectRoot) {
8608
8768
  const fallback = readArchConstraintsFromFile(deps.projectRoot);
8609
8769
  if (fallback) {
8610
- logger$8.info("Using file-based fallback for architecture constraints (decisions table empty)");
8770
+ logger$10.info("Using file-based fallback for architecture constraints (decisions table empty)");
8611
8771
  return fallback;
8612
8772
  }
8613
8773
  }
8614
8774
  return "";
8615
8775
  } catch (err) {
8616
- logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
8776
+ logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
8617
8777
  return "";
8618
8778
  }
8619
8779
  }
@@ -8633,7 +8793,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
8633
8793
  const match = pattern.exec(content);
8634
8794
  return match ? match[0].trim() : "";
8635
8795
  } catch (err) {
8636
- logger$8.warn({
8796
+ logger$10.warn({
8637
8797
  epicId,
8638
8798
  error: err instanceof Error ? err.message : String(err)
8639
8799
  }, "File-based epic shard fallback failed");
@@ -8656,7 +8816,7 @@ function readArchConstraintsFromFile(projectRoot) {
8656
8816
  const content = readFileSync$1(archPath, "utf-8");
8657
8817
  return content.slice(0, 1500);
8658
8818
  } catch (err) {
8659
- logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
8819
+ logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
8660
8820
  return "";
8661
8821
  }
8662
8822
  }
@@ -8669,14 +8829,14 @@ async function getStoryTemplate(deps) {
8669
8829
  try {
8670
8830
  return await deps.pack.getTemplate("story");
8671
8831
  } catch (err) {
8672
- logger$8.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
8832
+ logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
8673
8833
  return "";
8674
8834
  }
8675
8835
  }
8676
8836
 
8677
8837
  //#endregion
8678
8838
  //#region src/modules/compiled-workflows/git-helpers.ts
8679
- const logger$7 = createLogger("compiled-workflows:git-helpers");
8839
+ const logger$9 = createLogger("compiled-workflows:git-helpers");
8680
8840
  /**
8681
8841
  * Capture the full git diff for HEAD (working tree vs current commit).
8682
8842
  *
@@ -8800,7 +8960,7 @@ async function runGitCommand(args, cwd, logLabel) {
8800
8960
  stderr += chunk.toString("utf-8");
8801
8961
  });
8802
8962
  proc.on("error", (err) => {
8803
- logger$7.warn({
8963
+ logger$9.warn({
8804
8964
  label: logLabel,
8805
8965
  cwd,
8806
8966
  error: err.message
@@ -8809,7 +8969,7 @@ async function runGitCommand(args, cwd, logLabel) {
8809
8969
  });
8810
8970
  proc.on("close", (code) => {
8811
8971
  if (code !== 0) {
8812
- logger$7.warn({
8972
+ logger$9.warn({
8813
8973
  label: logLabel,
8814
8974
  cwd,
8815
8975
  code,
@@ -8825,7 +8985,7 @@ async function runGitCommand(args, cwd, logLabel) {
8825
8985
 
8826
8986
  //#endregion
8827
8987
  //#region src/modules/compiled-workflows/dev-story.ts
8828
- const logger$6 = createLogger("compiled-workflows:dev-story");
8988
+ const logger$8 = createLogger("compiled-workflows:dev-story");
8829
8989
  /** Hard token ceiling for the assembled dev-story prompt */
8830
8990
  const TOKEN_CEILING$1 = 24e3;
8831
8991
  /** Default timeout for dev-story dispatches in milliseconds (30 min) */
@@ -8847,7 +9007,7 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
8847
9007
  */
8848
9008
  async function runDevStory(deps, params) {
8849
9009
  const { storyKey, storyFilePath, taskScope, priorFiles } = params;
8850
- logger$6.info({
9010
+ logger$8.info({
8851
9011
  storyKey,
8852
9012
  storyFilePath
8853
9013
  }, "Starting compiled dev-story workflow");
@@ -8889,10 +9049,10 @@ async function runDevStory(deps, params) {
8889
9049
  let template;
8890
9050
  try {
8891
9051
  template = await deps.pack.getPrompt("dev-story");
8892
- logger$6.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
9052
+ logger$8.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
8893
9053
  } catch (err) {
8894
9054
  const error = err instanceof Error ? err.message : String(err);
8895
- logger$6.error({
9055
+ logger$8.error({
8896
9056
  storyKey,
8897
9057
  error
8898
9058
  }, "Failed to retrieve dev-story prompt template");
@@ -8903,14 +9063,14 @@ async function runDevStory(deps, params) {
8903
9063
  storyContent = await readFile$2(storyFilePath, "utf-8");
8904
9064
  } catch (err) {
8905
9065
  if (err.code === "ENOENT") {
8906
- logger$6.error({
9066
+ logger$8.error({
8907
9067
  storyKey,
8908
9068
  storyFilePath
8909
9069
  }, "Story file not found");
8910
9070
  return makeFailureResult("story_file_not_found");
8911
9071
  }
8912
9072
  const error = err instanceof Error ? err.message : String(err);
8913
- logger$6.error({
9073
+ logger$8.error({
8914
9074
  storyKey,
8915
9075
  storyFilePath,
8916
9076
  error
@@ -8918,7 +9078,7 @@ async function runDevStory(deps, params) {
8918
9078
  return makeFailureResult(`story_file_read_error: ${error}`);
8919
9079
  }
8920
9080
  if (storyContent.trim().length === 0) {
8921
- logger$6.error({
9081
+ logger$8.error({
8922
9082
  storyKey,
8923
9083
  storyFilePath
8924
9084
  }, "Story file is empty");
@@ -8930,17 +9090,17 @@ async function runDevStory(deps, params) {
8930
9090
  const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
8931
9091
  if (testPatternDecisions.length > 0) {
8932
9092
  testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
8933
- logger$6.debug({
9093
+ logger$8.debug({
8934
9094
  storyKey,
8935
9095
  count: testPatternDecisions.length
8936
9096
  }, "Loaded test patterns from decision store");
8937
9097
  } else {
8938
9098
  testPatternsContent = DEFAULT_VITEST_PATTERNS;
8939
- logger$6.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
9099
+ logger$8.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
8940
9100
  }
8941
9101
  } catch (err) {
8942
9102
  const error = err instanceof Error ? err.message : String(err);
8943
- logger$6.warn({
9103
+ logger$8.warn({
8944
9104
  storyKey,
8945
9105
  error
8946
9106
  }, "Failed to load test patterns — using defaults");
@@ -8983,7 +9143,7 @@ async function runDevStory(deps, params) {
8983
9143
  }
8984
9144
  ];
8985
9145
  const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING$1);
8986
- logger$6.info({
9146
+ logger$8.info({
8987
9147
  storyKey,
8988
9148
  tokenCount,
8989
9149
  ceiling: TOKEN_CEILING$1,
@@ -9002,7 +9162,7 @@ async function runDevStory(deps, params) {
9002
9162
  dispatchResult = await handle.result;
9003
9163
  } catch (err) {
9004
9164
  const error = err instanceof Error ? err.message : String(err);
9005
- logger$6.error({
9165
+ logger$8.error({
9006
9166
  storyKey,
9007
9167
  error
9008
9168
  }, "Dispatch threw an unexpected error");
@@ -9013,11 +9173,11 @@ async function runDevStory(deps, params) {
9013
9173
  output: dispatchResult.tokenEstimate.output
9014
9174
  };
9015
9175
  if (dispatchResult.status === "timeout") {
9016
- logger$6.error({
9176
+ logger$8.error({
9017
9177
  storyKey,
9018
9178
  durationMs: dispatchResult.durationMs
9019
9179
  }, "Dev-story dispatch timed out");
9020
- if (dispatchResult.output.length > 0) logger$6.info({
9180
+ if (dispatchResult.output.length > 0) logger$8.info({
9021
9181
  storyKey,
9022
9182
  partialOutput: dispatchResult.output.slice(0, 500)
9023
9183
  }, "Partial output before timeout");
@@ -9027,12 +9187,12 @@ async function runDevStory(deps, params) {
9027
9187
  };
9028
9188
  }
9029
9189
  if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
9030
- logger$6.error({
9190
+ logger$8.error({
9031
9191
  storyKey,
9032
9192
  exitCode: dispatchResult.exitCode,
9033
9193
  status: dispatchResult.status
9034
9194
  }, "Dev-story dispatch failed");
9035
- if (dispatchResult.output.length > 0) logger$6.info({
9195
+ if (dispatchResult.output.length > 0) logger$8.info({
9036
9196
  storyKey,
9037
9197
  partialOutput: dispatchResult.output.slice(0, 500)
9038
9198
  }, "Partial output from failed dispatch");
@@ -9044,7 +9204,7 @@ async function runDevStory(deps, params) {
9044
9204
  if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
9045
9205
  const details = dispatchResult.parseError ?? "parsed result was null";
9046
9206
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
9047
- logger$6.error({
9207
+ logger$8.error({
9048
9208
  storyKey,
9049
9209
  parseError: details,
9050
9210
  rawOutputSnippet: rawSnippet
@@ -9052,12 +9212,12 @@ async function runDevStory(deps, params) {
9052
9212
  let filesModified = [];
9053
9213
  try {
9054
9214
  filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
9055
- if (filesModified.length > 0) logger$6.info({
9215
+ if (filesModified.length > 0) logger$8.info({
9056
9216
  storyKey,
9057
9217
  fileCount: filesModified.length
9058
9218
  }, "Recovered files_modified from git status (YAML fallback)");
9059
9219
  } catch (err) {
9060
- logger$6.warn({
9220
+ logger$8.warn({
9061
9221
  storyKey,
9062
9222
  error: err instanceof Error ? err.message : String(err)
9063
9223
  }, "Failed to recover files_modified from git");
@@ -9074,7 +9234,7 @@ async function runDevStory(deps, params) {
9074
9234
  };
9075
9235
  }
9076
9236
  const parsed = dispatchResult.parsed;
9077
- logger$6.info({
9237
+ logger$8.info({
9078
9238
  storyKey,
9079
9239
  result: parsed.result,
9080
9240
  acMet: parsed.ac_met.length
@@ -9213,7 +9373,7 @@ function extractFilesInScope(storyContent) {
9213
9373
 
9214
9374
  //#endregion
9215
9375
  //#region src/modules/compiled-workflows/code-review.ts
9216
- const logger$5 = createLogger("compiled-workflows:code-review");
9376
+ const logger$7 = createLogger("compiled-workflows:code-review");
9217
9377
  /**
9218
9378
  * Hard token ceiling for the assembled code-review prompt (50,000 tokens).
9219
9379
  * Quality reviews require seeing actual code diffs, not just file names.
@@ -9253,7 +9413,7 @@ function defaultFailResult(error, tokenUsage) {
9253
9413
  async function runCodeReview(deps, params) {
9254
9414
  const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
9255
9415
  const cwd = workingDirectory ?? process.cwd();
9256
- logger$5.debug({
9416
+ logger$7.debug({
9257
9417
  storyKey,
9258
9418
  storyFilePath,
9259
9419
  cwd,
@@ -9264,7 +9424,7 @@ async function runCodeReview(deps, params) {
9264
9424
  template = await deps.pack.getPrompt("code-review");
9265
9425
  } catch (err) {
9266
9426
  const error = err instanceof Error ? err.message : String(err);
9267
- logger$5.error({ error }, "Failed to retrieve code-review prompt template");
9427
+ logger$7.error({ error }, "Failed to retrieve code-review prompt template");
9268
9428
  return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
9269
9429
  input: 0,
9270
9430
  output: 0
@@ -9275,7 +9435,7 @@ async function runCodeReview(deps, params) {
9275
9435
  storyContent = await readFile$2(storyFilePath, "utf-8");
9276
9436
  } catch (err) {
9277
9437
  const error = err instanceof Error ? err.message : String(err);
9278
- logger$5.error({
9438
+ logger$7.error({
9279
9439
  storyFilePath,
9280
9440
  error
9281
9441
  }, "Failed to read story file");
@@ -9295,12 +9455,12 @@ async function runCodeReview(deps, params) {
9295
9455
  const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
9296
9456
  if (scopedTotal <= TOKEN_CEILING) {
9297
9457
  gitDiffContent = scopedDiff;
9298
- logger$5.debug({
9458
+ logger$7.debug({
9299
9459
  fileCount: filesModified.length,
9300
9460
  tokenCount: scopedTotal
9301
9461
  }, "Using scoped file diff");
9302
9462
  } else {
9303
- logger$5.warn({
9463
+ logger$7.warn({
9304
9464
  estimatedTotal: scopedTotal,
9305
9465
  ceiling: TOKEN_CEILING,
9306
9466
  fileCount: filesModified.length
@@ -9314,7 +9474,7 @@ async function runCodeReview(deps, params) {
9314
9474
  const fullTotal = nonDiffTokens + countTokens(fullDiff);
9315
9475
  if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
9316
9476
  else {
9317
- logger$5.warn({
9477
+ logger$7.warn({
9318
9478
  estimatedTotal: fullTotal,
9319
9479
  ceiling: TOKEN_CEILING
9320
9480
  }, "Full git diff would exceed token ceiling — using stat-only summary");
@@ -9352,11 +9512,11 @@ async function runCodeReview(deps, params) {
9352
9512
  }
9353
9513
  ];
9354
9514
  const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
9355
- if (assembleResult.truncated) logger$5.warn({
9515
+ if (assembleResult.truncated) logger$7.warn({
9356
9516
  storyKey,
9357
9517
  tokenCount: assembleResult.tokenCount
9358
9518
  }, "Code-review prompt truncated to fit token ceiling");
9359
- logger$5.debug({
9519
+ logger$7.debug({
9360
9520
  storyKey,
9361
9521
  tokenCount: assembleResult.tokenCount,
9362
9522
  truncated: assembleResult.truncated
@@ -9374,7 +9534,7 @@ async function runCodeReview(deps, params) {
9374
9534
  dispatchResult = await handle.result;
9375
9535
  } catch (err) {
9376
9536
  const error = err instanceof Error ? err.message : String(err);
9377
- logger$5.error({
9537
+ logger$7.error({
9378
9538
  storyKey,
9379
9539
  error
9380
9540
  }, "Code-review dispatch threw unexpected error");
@@ -9390,7 +9550,7 @@ async function runCodeReview(deps, params) {
9390
9550
  const rawOutput = dispatchResult.output ?? void 0;
9391
9551
  if (dispatchResult.status === "failed") {
9392
9552
  const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
9393
- logger$5.warn({
9553
+ logger$7.warn({
9394
9554
  storyKey,
9395
9555
  exitCode: dispatchResult.exitCode
9396
9556
  }, "Code-review dispatch failed");
@@ -9400,7 +9560,7 @@ async function runCodeReview(deps, params) {
9400
9560
  };
9401
9561
  }
9402
9562
  if (dispatchResult.status === "timeout") {
9403
- logger$5.warn({ storyKey }, "Code-review dispatch timed out");
9563
+ logger$7.warn({ storyKey }, "Code-review dispatch timed out");
9404
9564
  return {
9405
9565
  ...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
9406
9566
  rawOutput
@@ -9408,7 +9568,7 @@ async function runCodeReview(deps, params) {
9408
9568
  }
9409
9569
  if (dispatchResult.parsed === null) {
9410
9570
  const details = dispatchResult.parseError ?? "No YAML block found in output";
9411
- logger$5.warn({
9571
+ logger$7.warn({
9412
9572
  storyKey,
9413
9573
  details
9414
9574
  }, "Code-review output schema validation failed");
@@ -9425,7 +9585,7 @@ async function runCodeReview(deps, params) {
9425
9585
  const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
9426
9586
  if (!parseResult.success) {
9427
9587
  const details = parseResult.error.message;
9428
- logger$5.warn({
9588
+ logger$7.warn({
9429
9589
  storyKey,
9430
9590
  details
9431
9591
  }, "Code-review output failed schema validation");
@@ -9440,13 +9600,13 @@ async function runCodeReview(deps, params) {
9440
9600
  };
9441
9601
  }
9442
9602
  const parsed = parseResult.data;
9443
- if (parsed.agentVerdict !== parsed.verdict) logger$5.info({
9603
+ if (parsed.agentVerdict !== parsed.verdict) logger$7.info({
9444
9604
  storyKey,
9445
9605
  agentVerdict: parsed.agentVerdict,
9446
9606
  pipelineVerdict: parsed.verdict,
9447
9607
  issues: parsed.issues
9448
9608
  }, "Pipeline overrode agent verdict based on issue severities");
9449
- logger$5.info({
9609
+ logger$7.info({
9450
9610
  storyKey,
9451
9611
  verdict: parsed.verdict,
9452
9612
  issues: parsed.issues
@@ -9471,7 +9631,7 @@ function getArchConstraints(deps) {
9471
9631
  if (constraints.length === 0) return "";
9472
9632
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
9473
9633
  } catch (err) {
9474
- logger$5.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
9634
+ logger$7.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
9475
9635
  return "";
9476
9636
  }
9477
9637
  }
@@ -9803,7 +9963,7 @@ function detectConflictGroups(storyKeys, config) {
9803
9963
 
9804
9964
  //#endregion
9805
9965
  //#region src/modules/implementation-orchestrator/seed-methodology-context.ts
9806
- const logger$4 = createLogger("implementation-orchestrator:seed");
9966
+ const logger$6 = createLogger("implementation-orchestrator:seed");
9807
9967
  /** Max chars for the architecture summary seeded into decisions */
9808
9968
  const MAX_ARCH_CHARS = 6e3;
9809
9969
  /** Max chars per epic shard */
@@ -9837,12 +9997,12 @@ function seedMethodologyContext(db, projectRoot) {
9837
9997
  const testCount = seedTestPatterns(db, projectRoot);
9838
9998
  if (testCount === -1) result.skippedCategories.push("test-patterns");
9839
9999
  else result.decisionsCreated += testCount;
9840
- logger$4.info({
10000
+ logger$6.info({
9841
10001
  decisionsCreated: result.decisionsCreated,
9842
10002
  skippedCategories: result.skippedCategories
9843
10003
  }, "Methodology context seeding complete");
9844
10004
  } catch (err) {
9845
- logger$4.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
10005
+ logger$6.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
9846
10006
  }
9847
10007
  return result;
9848
10008
  }
@@ -9886,7 +10046,7 @@ function seedArchitecture(db, projectRoot) {
9886
10046
  });
9887
10047
  count = 1;
9888
10048
  }
9889
- logger$4.debug({ count }, "Seeded architecture decisions");
10049
+ logger$6.debug({ count }, "Seeded architecture decisions");
9890
10050
  return count;
9891
10051
  }
9892
10052
  /**
@@ -9914,7 +10074,7 @@ function seedEpicShards(db, projectRoot) {
9914
10074
  });
9915
10075
  count++;
9916
10076
  }
9917
- logger$4.debug({ count }, "Seeded epic shard decisions");
10077
+ logger$6.debug({ count }, "Seeded epic shard decisions");
9918
10078
  return count;
9919
10079
  }
9920
10080
  /**
@@ -9935,7 +10095,7 @@ function seedTestPatterns(db, projectRoot) {
9935
10095
  value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
9936
10096
  rationale: "Detected from project configuration at orchestrator startup"
9937
10097
  });
9938
- logger$4.debug("Seeded test patterns decision");
10098
+ logger$6.debug("Seeded test patterns decision");
9939
10099
  return 1;
9940
10100
  }
9941
10101
  /**
@@ -10127,7 +10287,7 @@ function createPauseGate() {
10127
10287
  */
10128
10288
  function createImplementationOrchestrator(deps) {
10129
10289
  const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot } = deps;
10130
- const logger$31 = createLogger("implementation-orchestrator");
10290
+ const logger$33 = createLogger("implementation-orchestrator");
10131
10291
  let _state = "IDLE";
10132
10292
  let _startedAt;
10133
10293
  let _completedAt;
@@ -10168,7 +10328,7 @@ function createImplementationOrchestrator(deps) {
10168
10328
  token_usage_json: serialized
10169
10329
  });
10170
10330
  } catch (err) {
10171
- logger$31.warn("Failed to persist orchestrator state", { err });
10331
+ logger$33.warn("Failed to persist orchestrator state", { err });
10172
10332
  }
10173
10333
  }
10174
10334
  function recordProgress() {
@@ -10193,7 +10353,7 @@ function createImplementationOrchestrator(deps) {
10193
10353
  const elapsed = Date.now() - _lastProgressTs;
10194
10354
  if (elapsed >= WATCHDOG_TIMEOUT_MS) {
10195
10355
  for (const [key, s] of _stories) if (s.phase !== "PENDING" && s.phase !== "COMPLETE" && s.phase !== "ESCALATED") {
10196
- logger$31.warn({
10356
+ logger$33.warn({
10197
10357
  storyKey: key,
10198
10358
  phase: s.phase,
10199
10359
  elapsedMs: elapsed
@@ -10229,7 +10389,7 @@ function createImplementationOrchestrator(deps) {
10229
10389
  * exhausted retries the story is ESCALATED.
10230
10390
  */
10231
10391
  async function processStory(storyKey) {
10232
- logger$31.info("Processing story", { storyKey });
10392
+ logger$33.info("Processing story", { storyKey });
10233
10393
  await waitIfPaused();
10234
10394
  if (_state !== "RUNNING") return;
10235
10395
  updateStory(storyKey, {
@@ -10243,7 +10403,7 @@ function createImplementationOrchestrator(deps) {
10243
10403
  const match = files.find((f) => f.startsWith(`${storyKey}-`) && f.endsWith(".md"));
10244
10404
  if (match) {
10245
10405
  storyFilePath = join$1(artifactsDir, match);
10246
- logger$31.info({
10406
+ logger$33.info({
10247
10407
  storyKey,
10248
10408
  storyFilePath
10249
10409
  }, "Found existing story file — skipping create-story");
@@ -10337,7 +10497,7 @@ function createImplementationOrchestrator(deps) {
10337
10497
  try {
10338
10498
  storyContentForAnalysis = await readFile$2(storyFilePath ?? "", "utf-8");
10339
10499
  } catch (err) {
10340
- logger$31.error({
10500
+ logger$33.error({
10341
10501
  storyKey,
10342
10502
  storyFilePath,
10343
10503
  error: err instanceof Error ? err.message : String(err)
@@ -10345,7 +10505,7 @@ function createImplementationOrchestrator(deps) {
10345
10505
  }
10346
10506
  const analysis = analyzeStoryComplexity(storyContentForAnalysis);
10347
10507
  const batches = planTaskBatches(analysis);
10348
- logger$31.info({
10508
+ logger$33.info({
10349
10509
  storyKey,
10350
10510
  estimatedScope: analysis.estimatedScope,
10351
10511
  batchCount: batches.length,
@@ -10363,7 +10523,7 @@ function createImplementationOrchestrator(deps) {
10363
10523
  if (_state !== "RUNNING") break;
10364
10524
  const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
10365
10525
  const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
10366
- logger$31.info({
10526
+ logger$33.info({
10367
10527
  storyKey,
10368
10528
  batchIndex: batch.batchIndex,
10369
10529
  taskCount: batch.taskIds.length
@@ -10386,7 +10546,7 @@ function createImplementationOrchestrator(deps) {
10386
10546
  });
10387
10547
  } catch (batchErr) {
10388
10548
  const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
10389
- logger$31.warn({
10549
+ logger$33.warn({
10390
10550
  storyKey,
10391
10551
  batchIndex: batch.batchIndex,
10392
10552
  error: errMsg
@@ -10406,7 +10566,7 @@ function createImplementationOrchestrator(deps) {
10406
10566
  filesModified: batchFilesModified,
10407
10567
  result: batchResult.result === "success" ? "success" : "failed"
10408
10568
  };
10409
- logger$31.info(batchMetrics, "Batch dev-story metrics");
10569
+ logger$33.info(batchMetrics, "Batch dev-story metrics");
10410
10570
  for (const f of batchFilesModified) allFilesModified.add(f);
10411
10571
  if (batchFilesModified.length > 0) batchFileGroups.push({
10412
10572
  batchIndex: batch.batchIndex,
@@ -10428,13 +10588,13 @@ function createImplementationOrchestrator(deps) {
10428
10588
  })
10429
10589
  });
10430
10590
  } catch (tokenErr) {
10431
- logger$31.warn({
10591
+ logger$33.warn({
10432
10592
  storyKey,
10433
10593
  batchIndex: batch.batchIndex,
10434
10594
  err: tokenErr
10435
10595
  }, "Failed to record batch token usage");
10436
10596
  }
10437
- if (batchResult.result === "failed") logger$31.warn({
10597
+ if (batchResult.result === "failed") logger$33.warn({
10438
10598
  storyKey,
10439
10599
  batchIndex: batch.batchIndex,
10440
10600
  error: batchResult.error
@@ -10466,7 +10626,7 @@ function createImplementationOrchestrator(deps) {
10466
10626
  result: devResult
10467
10627
  });
10468
10628
  persistState();
10469
- if (devResult.result === "failed") logger$31.warn("Dev-story reported failure, proceeding to code review", {
10629
+ if (devResult.result === "failed") logger$33.warn("Dev-story reported failure, proceeding to code review", {
10470
10630
  storyKey,
10471
10631
  error: devResult.error,
10472
10632
  filesModified: devFilesModified.length
@@ -10519,7 +10679,7 @@ function createImplementationOrchestrator(deps) {
10519
10679
  "NEEDS_MAJOR_REWORK": 2
10520
10680
  };
10521
10681
  for (const group of batchFileGroups) {
10522
- logger$31.info({
10682
+ logger$33.info({
10523
10683
  storyKey,
10524
10684
  batchIndex: group.batchIndex,
10525
10685
  fileCount: group.files.length
@@ -10555,7 +10715,7 @@ function createImplementationOrchestrator(deps) {
10555
10715
  rawOutput: lastRawOutput,
10556
10716
  tokenUsage: aggregateTokens
10557
10717
  };
10558
- logger$31.info({
10718
+ logger$33.info({
10559
10719
  storyKey,
10560
10720
  batchCount: batchFileGroups.length,
10561
10721
  verdict: worstVerdict,
@@ -10578,7 +10738,7 @@ function createImplementationOrchestrator(deps) {
10578
10738
  const isPhantomReview = reviewResult.verdict !== "SHIP_IT" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
10579
10739
  if (isPhantomReview && !timeoutRetried) {
10580
10740
  timeoutRetried = true;
10581
- logger$31.warn({
10741
+ logger$33.warn({
10582
10742
  storyKey,
10583
10743
  reviewCycles,
10584
10744
  error: reviewResult.error
@@ -10588,7 +10748,7 @@ function createImplementationOrchestrator(deps) {
10588
10748
  verdict = reviewResult.verdict;
10589
10749
  issueList = reviewResult.issue_list ?? [];
10590
10750
  if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
10591
- logger$31.info({
10751
+ logger$33.info({
10592
10752
  storyKey,
10593
10753
  originalVerdict: verdict,
10594
10754
  issuesBefore: previousIssueList.length,
@@ -10624,7 +10784,7 @@ function createImplementationOrchestrator(deps) {
10624
10784
  if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
10625
10785
  parts.push(`${fileCount} files`);
10626
10786
  parts.push(`${totalTokensK} tokens`);
10627
- logger$31.info({
10787
+ logger$33.info({
10628
10788
  storyKey,
10629
10789
  verdict,
10630
10790
  agentVerdict: reviewResult.agentVerdict
@@ -10676,7 +10836,7 @@ function createImplementationOrchestrator(deps) {
10676
10836
  persistState();
10677
10837
  return;
10678
10838
  }
10679
- logger$31.info({
10839
+ logger$33.info({
10680
10840
  storyKey,
10681
10841
  reviewCycles: finalReviewCycles,
10682
10842
  issueCount: issueList.length
@@ -10726,7 +10886,7 @@ function createImplementationOrchestrator(deps) {
10726
10886
  fixPrompt = assembled.prompt;
10727
10887
  } catch {
10728
10888
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
10729
- logger$31.warn("Failed to assemble auto-approve fix prompt, using fallback", { storyKey });
10889
+ logger$33.warn("Failed to assemble auto-approve fix prompt, using fallback", { storyKey });
10730
10890
  }
10731
10891
  const handle = dispatcher.dispatch({
10732
10892
  prompt: fixPrompt,
@@ -10743,9 +10903,9 @@ function createImplementationOrchestrator(deps) {
10743
10903
  output: fixResult.tokenEstimate.output
10744
10904
  } : void 0 }
10745
10905
  });
10746
- if (fixResult.status === "timeout") logger$31.warn("Auto-approve fix timed out — approving anyway (issues were minor)", { storyKey });
10906
+ if (fixResult.status === "timeout") logger$33.warn("Auto-approve fix timed out — approving anyway (issues were minor)", { storyKey });
10747
10907
  } catch (err) {
10748
- logger$31.warn("Auto-approve fix dispatch failed — approving anyway (issues were minor)", {
10908
+ logger$33.warn("Auto-approve fix dispatch failed — approving anyway (issues were minor)", {
10749
10909
  storyKey,
10750
10910
  err
10751
10911
  });
@@ -10815,7 +10975,7 @@ function createImplementationOrchestrator(deps) {
10815
10975
  fixPrompt = assembled.prompt;
10816
10976
  } catch {
10817
10977
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
10818
- logger$31.warn("Failed to assemble fix prompt, using fallback", {
10978
+ logger$33.warn("Failed to assemble fix prompt, using fallback", {
10819
10979
  storyKey,
10820
10980
  taskType
10821
10981
  });
@@ -10837,7 +10997,7 @@ function createImplementationOrchestrator(deps) {
10837
10997
  } : void 0 }
10838
10998
  });
10839
10999
  if (fixResult.status === "timeout") {
10840
- logger$31.warn("Fix dispatch timed out — escalating story", {
11000
+ logger$33.warn("Fix dispatch timed out — escalating story", {
10841
11001
  storyKey,
10842
11002
  taskType
10843
11003
  });
@@ -10855,13 +11015,13 @@ function createImplementationOrchestrator(deps) {
10855
11015
  persistState();
10856
11016
  return;
10857
11017
  }
10858
- if (fixResult.status === "failed") logger$31.warn("Fix dispatch failed", {
11018
+ if (fixResult.status === "failed") logger$33.warn("Fix dispatch failed", {
10859
11019
  storyKey,
10860
11020
  taskType,
10861
11021
  exitCode: fixResult.exitCode
10862
11022
  });
10863
11023
  } catch (err) {
10864
- logger$31.warn("Fix dispatch failed, continuing to next review", {
11024
+ logger$33.warn("Fix dispatch failed, continuing to next review", {
10865
11025
  storyKey,
10866
11026
  taskType,
10867
11027
  err
@@ -10914,11 +11074,11 @@ function createImplementationOrchestrator(deps) {
10914
11074
  }
10915
11075
  async function run(storyKeys) {
10916
11076
  if (_state === "RUNNING" || _state === "PAUSED") {
10917
- logger$31.warn("run() called while orchestrator is already running or paused — ignoring", { state: _state });
11077
+ logger$33.warn("run() called while orchestrator is already running or paused — ignoring", { state: _state });
10918
11078
  return getStatus();
10919
11079
  }
10920
11080
  if (_state === "COMPLETE") {
10921
- logger$31.warn("run() called on a COMPLETE orchestrator — ignoring", { state: _state });
11081
+ logger$33.warn("run() called on a COMPLETE orchestrator — ignoring", { state: _state });
10922
11082
  return getStatus();
10923
11083
  }
10924
11084
  _state = "RUNNING";
@@ -10936,13 +11096,13 @@ function createImplementationOrchestrator(deps) {
10936
11096
  startHeartbeat();
10937
11097
  if (projectRoot !== void 0) {
10938
11098
  const seedResult = seedMethodologyContext(db, projectRoot);
10939
- if (seedResult.decisionsCreated > 0) logger$31.info({
11099
+ if (seedResult.decisionsCreated > 0) logger$33.info({
10940
11100
  decisionsCreated: seedResult.decisionsCreated,
10941
11101
  skippedCategories: seedResult.skippedCategories
10942
11102
  }, "Methodology context seeded from planning artifacts");
10943
11103
  }
10944
11104
  const groups = detectConflictGroups(storyKeys);
10945
- logger$31.info("Orchestrator starting", {
11105
+ logger$33.info("Orchestrator starting", {
10946
11106
  storyCount: storyKeys.length,
10947
11107
  groupCount: groups.length,
10948
11108
  maxConcurrency: config.maxConcurrency
@@ -10954,7 +11114,7 @@ function createImplementationOrchestrator(deps) {
10954
11114
  _state = "FAILED";
10955
11115
  _completedAt = new Date().toISOString();
10956
11116
  persistState();
10957
- logger$31.error("Orchestrator failed with unhandled error", { err });
11117
+ logger$33.error("Orchestrator failed with unhandled error", { err });
10958
11118
  return getStatus();
10959
11119
  }
10960
11120
  stopHeartbeat();
@@ -10981,7 +11141,7 @@ function createImplementationOrchestrator(deps) {
10981
11141
  _pauseGate = createPauseGate();
10982
11142
  _state = "PAUSED";
10983
11143
  eventBus.emit("orchestrator:paused", {});
10984
- logger$31.info("Orchestrator paused");
11144
+ logger$33.info("Orchestrator paused");
10985
11145
  }
10986
11146
  function resume() {
10987
11147
  if (_state !== "PAUSED") return;
@@ -10992,7 +11152,7 @@ function createImplementationOrchestrator(deps) {
10992
11152
  }
10993
11153
  _state = "RUNNING";
10994
11154
  eventBus.emit("orchestrator:resumed", {});
10995
- logger$31.info("Orchestrator resumed");
11155
+ logger$33.info("Orchestrator resumed");
10996
11156
  }
10997
11157
  return {
10998
11158
  run,
@@ -11462,72 +11622,528 @@ function createPhaseOrchestrator(deps) {
11462
11622
  }
11463
11623
 
11464
11624
  //#endregion
11465
- //#region src/modules/phase-orchestrator/phases/schemas.ts
11466
- /**
11467
- * Zod schema for the ProductBrief structure emitted by the analysis agent.
11468
- * Validates that all required fields are present and non-empty.
11469
- */
11470
- const ProductBriefSchema = z.object({
11471
- problem_statement: z.string().min(10),
11472
- target_users: z.array(z.string().min(1)).min(1),
11473
- core_features: z.array(z.string().min(1)).min(1),
11474
- success_metrics: z.array(z.string().min(1)).min(1),
11475
- constraints: z.array(z.string()).default([])
11476
- });
11625
+ //#region src/modules/phase-orchestrator/budget-utils.ts
11477
11626
  /**
11478
- * Zod schema for the full YAML output emitted by the analysis agent.
11479
- * The agent must emit a YAML block with `result` and `product_brief` fields.
11627
+ * Shared utilities for dynamic prompt token budget calculation
11628
+ * and decision summarization.
11629
+ *
11630
+ * Extracted from phases/solutioning.ts to avoid inappropriate dependency
11631
+ * direction (step-runner.ts importing from a phase-specific module).
11480
11632
  */
11481
- const AnalysisOutputSchema = z.object({
11482
- result: z.enum(["success", "failed"]),
11483
- product_brief: ProductBriefSchema
11484
- });
11633
+ /** Absolute maximum prompt tokens (model context safety margin) */
11634
+ const ABSOLUTE_MAX_PROMPT_TOKENS = 12e3;
11635
+ /** Additional tokens per architecture decision injected into story generation prompt */
11636
+ const TOKENS_PER_DECISION = 100;
11637
+ /** Priority order for decision categories when summarizing (higher priority kept first) */
11638
+ const DECISION_CATEGORY_PRIORITY = [
11639
+ "data",
11640
+ "auth",
11641
+ "api",
11642
+ "frontend",
11643
+ "infra",
11644
+ "observability",
11645
+ "ci"
11646
+ ];
11485
11647
  /**
11486
- * Zod schema for a single functional requirement.
11648
+ * Calculate the dynamic prompt token budget based on the number of decisions
11649
+ * that will be injected into the prompt.
11650
+ *
11651
+ * Formula: base_budget + (decision_count * tokens_per_decision)
11652
+ * Capped at ABSOLUTE_MAX_PROMPT_TOKENS.
11653
+ *
11654
+ * @param baseBudget - Base token budget for the phase
11655
+ * @param decisionCount - Number of decisions to inject
11656
+ * @returns Calculated token budget, capped at ABSOLUTE_MAX_PROMPT_TOKENS
11487
11657
  */
11488
- const FunctionalRequirementSchema = z.object({
11489
- description: z.string().min(5),
11490
- priority: z.enum([
11491
- "must",
11492
- "should",
11493
- "could"
11494
- ]).default("must")
11495
- });
11658
+ function calculateDynamicBudget(baseBudget, decisionCount) {
11659
+ const budget = baseBudget + decisionCount * TOKENS_PER_DECISION;
11660
+ return Math.min(budget, ABSOLUTE_MAX_PROMPT_TOKENS);
11661
+ }
11496
11662
  /**
11497
- * Zod schema for a single non-functional requirement.
11663
+ * Summarize architecture decisions into compact key:value one-liners,
11664
+ * dropping rationale and optionally dropping lower-priority categories
11665
+ * to fit within a character budget.
11666
+ *
11667
+ * Strategy:
11668
+ * 1. Sort decisions by priority (known categories first, then alphabetical)
11669
+ * 2. For each decision, produce a compact `key: value` one-liner (drop rationale)
11670
+ * 3. If still over budget, drop lower-priority categories
11671
+ * 4. Return the compact summary string
11672
+ *
11673
+ * @param decisions - Full architecture decisions from the decision store
11674
+ * @param maxChars - Maximum character budget for the summarized output
11675
+ * @returns Compact summary string
11498
11676
  */
11499
- const NonFunctionalRequirementSchema = z.object({
11500
- description: z.string().min(5),
11501
- category: z.string().min(1)
11502
- });
11677
+ function summarizeDecisions(decisions, maxChars) {
11678
+ const sorted = [...decisions].sort((a, b) => {
11679
+ const aCat = (a.category ?? "").toLowerCase();
11680
+ const bCat = (b.category ?? "").toLowerCase();
11681
+ const aIdx = DECISION_CATEGORY_PRIORITY.indexOf(aCat);
11682
+ const bIdx = DECISION_CATEGORY_PRIORITY.indexOf(bCat);
11683
+ const aPri = aIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : aIdx;
11684
+ const bPri = bIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : bIdx;
11685
+ return aPri - bPri;
11686
+ });
11687
+ const header = "## Architecture Decisions (Summarized)";
11688
+ const lines = [header];
11689
+ let currentLength = header.length;
11690
+ for (const d of sorted) {
11691
+ const truncatedValue = d.value.length > 120 ? d.value.slice(0, 117) + "..." : d.value;
11692
+ const line = `- ${d.key}: ${truncatedValue}`;
11693
+ if (currentLength + line.length + 1 > maxChars) break;
11694
+ lines.push(line);
11695
+ currentLength += line.length + 1;
11696
+ }
11697
+ return lines.join("\n");
11698
+ }
11699
+
11700
+ //#endregion
11701
+ //#region src/modules/phase-orchestrator/step-runner.ts
11702
+ const logger$5 = createLogger("step-runner");
11503
11703
  /**
11504
- * Zod schema for a single user story.
11505
- */
11506
- const UserStorySchema = z.object({
11507
- title: z.string().min(3),
11508
- description: z.string().min(5)
11509
- });
11704
+ * Format an array of decision records into a markdown section for injection.
11705
+ *
11706
+ * @param decisions - Decision records from the store
11707
+ * @param sectionTitle - Title for the markdown section
11708
+ * @returns Formatted markdown string
11709
+ */
11710
+ function formatDecisionsForInjection(decisions, sectionTitle) {
11711
+ if (decisions.length === 0) return "";
11712
+ const parts = [];
11713
+ if (sectionTitle) parts.push(`## ${sectionTitle}`);
11714
+ for (const d of decisions) {
11715
+ const rationale = d.rationale ? ` (${d.rationale})` : "";
11716
+ try {
11717
+ const parsed = JSON.parse(d.value);
11718
+ if (Array.isArray(parsed)) {
11719
+ parts.push(`### ${d.key.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase())}`);
11720
+ for (const item of parsed) parts.push(`- ${String(item)}`);
11721
+ } else if (typeof parsed === "object" && parsed !== null) parts.push(`- **${d.key}**: ${JSON.stringify(parsed)}${rationale}`);
11722
+ else parts.push(`- **${d.key}**: ${String(parsed)}${rationale}`);
11723
+ } catch {
11724
+ parts.push(`- **${d.key}**: ${d.value}${rationale}`);
11725
+ }
11726
+ }
11727
+ return parts.join("\n");
11728
+ }
11510
11729
  /**
11511
- * Zod schema for the full YAML output emitted by the planning agent.
11512
- * The agent must emit a YAML block with all PRD fields.
11513
- */
11514
- const PlanningOutputSchema = z.object({
11515
- result: z.enum(["success", "failed"]),
11516
- functional_requirements: z.array(FunctionalRequirementSchema).min(3),
11517
- non_functional_requirements: z.array(NonFunctionalRequirementSchema).min(2),
11518
- user_stories: z.array(UserStorySchema).min(1),
11519
- tech_stack: z.record(z.string(), z.string()),
11520
- domain_model: z.record(z.string(), z.unknown()),
11521
- out_of_scope: z.array(z.string()).default([])
11522
- });
11730
+ * Resolve a single context reference to a string value.
11731
+ *
11732
+ * @param ref - The context reference to resolve
11733
+ * @param deps - Phase dependencies (for DB access)
11734
+ * @param runId - Pipeline run ID
11735
+ * @param params - Runtime parameters map
11736
+ * @param stepOutputs - Map of step name → raw parsed output from prior steps
11737
+ * @returns Resolved string value
11738
+ */
11739
+ function resolveContext(ref, deps, runId, params, stepOutputs) {
11740
+ const { source } = ref;
11741
+ if (source.startsWith("param:")) {
11742
+ const key = source.slice(6);
11743
+ return params[key] ?? "";
11744
+ }
11745
+ if (source.startsWith("decision:")) {
11746
+ const path$1 = source.slice(9);
11747
+ const [phase, category] = path$1.split(".");
11748
+ if (!phase || !category) return "";
11749
+ const decisions = getDecisionsByPhaseForRun(deps.db, runId, phase);
11750
+ const filtered = decisions.filter((d) => d.category === category);
11751
+ return formatDecisionsForInjection(filtered.map((d) => ({
11752
+ key: d.key,
11753
+ value: d.value,
11754
+ rationale: d.rationale ?? null
11755
+ })), category.replace(/-/g, " ").replace(/\b\w/g, (c) => c.toUpperCase()));
11756
+ }
11757
+ if (source.startsWith("step:")) {
11758
+ const stepName = source.slice(5);
11759
+ const output = stepOutputs.get(stepName);
11760
+ if (!output) return "";
11761
+ const parts = [];
11762
+ for (const [key, value] of Object.entries(output)) {
11763
+ if (key === "result") continue;
11764
+ if (Array.isArray(value)) {
11765
+ parts.push(`### ${key.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase())}`);
11766
+ for (const item of value) if (typeof item === "object" && item !== null) parts.push(`- ${JSON.stringify(item)}`);
11767
+ else parts.push(`- ${String(item)}`);
11768
+ } else if (typeof value === "object" && value !== null) parts.push(`- **${key}**: ${JSON.stringify(value)}`);
11769
+ else parts.push(`- **${key}**: ${String(value)}`);
11770
+ }
11771
+ return parts.join("\n");
11772
+ }
11773
+ return "";
11774
+ }
11523
11775
  /**
11524
- * Zod schema for a single architecture decision emitted by the architecture agent.
11776
+ * Execute a sequence of steps, accumulating context and persisting results.
11777
+ *
11778
+ * Halts on the first step that fails. Each step's output is available to
11779
+ * subsequent steps via the stepOutputs map and decision store.
11780
+ *
11781
+ * @param steps - Ordered list of step definitions to execute
11782
+ * @param deps - Shared phase dependencies
11783
+ * @param runId - Pipeline run ID
11784
+ * @param phase - Phase name (for decision store persistence)
11785
+ * @param params - Runtime parameters map (concept, product_brief, etc.)
11786
+ * @returns Aggregated multi-step result
11525
11787
  */
11526
- const ArchitectureDecisionSchema = z.object({
11527
- category: z.string().min(1),
11528
- key: z.string().min(1),
11529
- value: z.string().min(1),
11530
- rationale: z.string().optional()
11788
+ async function runSteps(steps, deps, runId, phase, params) {
11789
+ const stepResults = [];
11790
+ const stepOutputs = new Map();
11791
+ let totalInput = 0;
11792
+ let totalOutput = 0;
11793
+ for (const step of steps) try {
11794
+ const template = await deps.pack.getPrompt(step.name);
11795
+ let prompt = template;
11796
+ for (const ref of step.context) {
11797
+ const value = resolveContext(ref, deps, runId, params, stepOutputs);
11798
+ prompt = prompt.replace(`{{${ref.placeholder}}}`, value);
11799
+ }
11800
+ const allDecisions = getDecisionsByPhaseForRun(deps.db, runId, phase);
11801
+ const budgetTokens = calculateDynamicBudget(4e3, allDecisions.length);
11802
+ let estimatedTokens = Math.ceil(prompt.length / 4);
11803
+ if (estimatedTokens > budgetTokens) {
11804
+ const decisionRefs = step.context.filter((ref) => ref.source.startsWith("decision:"));
11805
+ if (decisionRefs.length > 0) {
11806
+ logger$5.warn({
11807
+ step: step.name,
11808
+ estimatedTokens,
11809
+ budgetTokens
11810
+ }, "Prompt exceeds budget — attempting decision summarization");
11811
+ let summarizedPrompt = template;
11812
+ for (const ref of step.context) {
11813
+ let value;
11814
+ if (ref.source.startsWith("decision:")) {
11815
+ const path$1 = ref.source.slice(9);
11816
+ const [decPhase, decCategory] = path$1.split(".");
11817
+ if (decPhase && decCategory) {
11818
+ const decisions = getDecisionsByPhaseForRun(deps.db, runId, decPhase);
11819
+ const filtered = decisions.filter((d) => d.category === decCategory);
11820
+ const budgetChars = budgetTokens * 4;
11821
+ const availableChars = Math.max(200, Math.floor(budgetChars / decisionRefs.length));
11822
+ value = summarizeDecisions(filtered.map((d) => ({
11823
+ key: d.key,
11824
+ value: d.value,
11825
+ category: d.category
11826
+ })), availableChars);
11827
+ } else value = resolveContext(ref, deps, runId, params, stepOutputs);
11828
+ } else value = resolveContext(ref, deps, runId, params, stepOutputs);
11829
+ summarizedPrompt = summarizedPrompt.replace(`{{${ref.placeholder}}}`, value);
11830
+ }
11831
+ prompt = summarizedPrompt;
11832
+ estimatedTokens = Math.ceil(prompt.length / 4);
11833
+ if (estimatedTokens <= budgetTokens) logger$5.info({
11834
+ step: step.name,
11835
+ estimatedTokens,
11836
+ budgetTokens
11837
+ }, "Decision summarization brought prompt within budget");
11838
+ }
11839
+ if (estimatedTokens > budgetTokens) {
11840
+ const errorMsg = `Step '${step.name}' prompt exceeds token budget after summarization: ${estimatedTokens} tokens (max ${budgetTokens})`;
11841
+ stepResults.push({
11842
+ name: step.name,
11843
+ success: false,
11844
+ parsed: null,
11845
+ error: errorMsg,
11846
+ tokenUsage: {
11847
+ input: 0,
11848
+ output: 0
11849
+ }
11850
+ });
11851
+ return {
11852
+ success: false,
11853
+ steps: stepResults,
11854
+ tokenUsage: {
11855
+ input: totalInput,
11856
+ output: totalOutput
11857
+ },
11858
+ error: errorMsg
11859
+ };
11860
+ }
11861
+ }
11862
+ const handle = deps.dispatcher.dispatch({
11863
+ prompt,
11864
+ agent: "claude-code",
11865
+ taskType: step.taskType,
11866
+ outputSchema: step.outputSchema
11867
+ });
11868
+ const dispatchResult = await handle.result;
11869
+ const tokenUsage = {
11870
+ input: dispatchResult.tokenEstimate.input,
11871
+ output: dispatchResult.tokenEstimate.output
11872
+ };
11873
+ totalInput += tokenUsage.input;
11874
+ totalOutput += tokenUsage.output;
11875
+ if (dispatchResult.status === "timeout") {
11876
+ const errorMsg = `Step '${step.name}' timed out after ${dispatchResult.durationMs}ms`;
11877
+ stepResults.push({
11878
+ name: step.name,
11879
+ success: false,
11880
+ parsed: null,
11881
+ error: errorMsg,
11882
+ tokenUsage
11883
+ });
11884
+ return {
11885
+ success: false,
11886
+ steps: stepResults,
11887
+ tokenUsage: {
11888
+ input: totalInput,
11889
+ output: totalOutput
11890
+ },
11891
+ error: errorMsg
11892
+ };
11893
+ }
11894
+ if (dispatchResult.status === "failed") {
11895
+ const errorMsg = `Step '${step.name}' dispatch failed: ${dispatchResult.parseError ?? dispatchResult.output}`;
11896
+ stepResults.push({
11897
+ name: step.name,
11898
+ success: false,
11899
+ parsed: null,
11900
+ error: errorMsg,
11901
+ tokenUsage
11902
+ });
11903
+ return {
11904
+ success: false,
11905
+ steps: stepResults,
11906
+ tokenUsage: {
11907
+ input: totalInput,
11908
+ output: totalOutput
11909
+ },
11910
+ error: errorMsg
11911
+ };
11912
+ }
11913
+ if (dispatchResult.parsed === null || dispatchResult.parseError !== null) {
11914
+ const errorMsg = `Step '${step.name}' schema validation failed: ${dispatchResult.parseError ?? "No parsed output"}`;
11915
+ stepResults.push({
11916
+ name: step.name,
11917
+ success: false,
11918
+ parsed: null,
11919
+ error: errorMsg,
11920
+ tokenUsage
11921
+ });
11922
+ return {
11923
+ success: false,
11924
+ steps: stepResults,
11925
+ tokenUsage: {
11926
+ input: totalInput,
11927
+ output: totalOutput
11928
+ },
11929
+ error: errorMsg
11930
+ };
11931
+ }
11932
+ const parsed = dispatchResult.parsed;
11933
+ if (parsed.result === "failed") {
11934
+ const errorMsg = `Step '${step.name}' agent reported failure`;
11935
+ stepResults.push({
11936
+ name: step.name,
11937
+ success: false,
11938
+ parsed: null,
11939
+ error: errorMsg,
11940
+ tokenUsage
11941
+ });
11942
+ return {
11943
+ success: false,
11944
+ steps: stepResults,
11945
+ tokenUsage: {
11946
+ input: totalInput,
11947
+ output: totalOutput
11948
+ },
11949
+ error: errorMsg
11950
+ };
11951
+ }
11952
+ stepOutputs.set(step.name, parsed);
11953
+ for (const mapping of step.persist) {
11954
+ const fieldValue = parsed[mapping.field];
11955
+ if (fieldValue === void 0) continue;
11956
+ if (mapping.key === "array" && Array.isArray(fieldValue)) for (const [index, item] of fieldValue.entries()) upsertDecision(deps.db, {
11957
+ pipeline_run_id: runId,
11958
+ phase,
11959
+ category: mapping.category,
11960
+ key: `${step.name}-${index}`,
11961
+ value: typeof item === "object" ? JSON.stringify(item) : String(item)
11962
+ });
11963
+ else if (typeof fieldValue === "object" && fieldValue !== null) upsertDecision(deps.db, {
11964
+ pipeline_run_id: runId,
11965
+ phase,
11966
+ category: mapping.category,
11967
+ key: mapping.key,
11968
+ value: JSON.stringify(fieldValue)
11969
+ });
11970
+ else upsertDecision(deps.db, {
11971
+ pipeline_run_id: runId,
11972
+ phase,
11973
+ category: mapping.category,
11974
+ key: mapping.key,
11975
+ value: String(fieldValue)
11976
+ });
11977
+ }
11978
+ let artifactId;
11979
+ if (step.registerArtifact) {
11980
+ const artifact = registerArtifact(deps.db, {
11981
+ pipeline_run_id: runId,
11982
+ phase,
11983
+ type: step.registerArtifact.type,
11984
+ path: step.registerArtifact.path,
11985
+ summary: step.registerArtifact.summarize(parsed)
11986
+ });
11987
+ artifactId = artifact.id;
11988
+ }
11989
+ const stepResult = {
11990
+ name: step.name,
11991
+ success: true,
11992
+ parsed,
11993
+ error: null,
11994
+ tokenUsage
11995
+ };
11996
+ if (artifactId !== void 0) stepResult.artifactId = artifactId;
11997
+ stepResults.push(stepResult);
11998
+ } catch (err) {
11999
+ const message = err instanceof Error ? err.message : String(err);
12000
+ const errorMsg = `Step '${step.name}' unexpected error: ${message}`;
12001
+ stepResults.push({
12002
+ name: step.name,
12003
+ success: false,
12004
+ parsed: null,
12005
+ error: errorMsg,
12006
+ tokenUsage: {
12007
+ input: 0,
12008
+ output: 0
12009
+ }
12010
+ });
12011
+ return {
12012
+ success: false,
12013
+ steps: stepResults,
12014
+ tokenUsage: {
12015
+ input: totalInput,
12016
+ output: totalOutput
12017
+ },
12018
+ error: errorMsg
12019
+ };
12020
+ }
12021
+ return {
12022
+ success: true,
12023
+ steps: stepResults,
12024
+ tokenUsage: {
12025
+ input: totalInput,
12026
+ output: totalOutput
12027
+ }
12028
+ };
12029
+ }
12030
+
12031
+ //#endregion
12032
+ //#region src/modules/phase-orchestrator/phases/schemas.ts
12033
+ /**
12034
+ * Zod schema for the ProductBrief structure emitted by the analysis agent.
12035
+ * Validates that all required fields are present and non-empty.
12036
+ */
12037
+ const ProductBriefSchema = z.object({
12038
+ problem_statement: z.string().min(10),
12039
+ target_users: z.array(z.string().min(1)).min(1),
12040
+ core_features: z.array(z.string().min(1)).min(1),
12041
+ success_metrics: z.array(z.string().min(1)).min(1),
12042
+ constraints: z.array(z.string()).default([])
12043
+ });
12044
+ /**
12045
+ * Zod schema for the full YAML output emitted by the analysis agent.
12046
+ * The agent must emit a YAML block with `result` and `product_brief` fields.
12047
+ */
12048
+ const AnalysisOutputSchema = z.object({
12049
+ result: z.enum(["success", "failed"]),
12050
+ product_brief: ProductBriefSchema
12051
+ });
12052
+ /**
12053
+ * Step 1 output: Vision & problem analysis.
12054
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12055
+ */
12056
+ const AnalysisVisionOutputSchema = z.object({
12057
+ result: z.enum(["success", "failed"]),
12058
+ problem_statement: z.string().min(10).optional(),
12059
+ target_users: z.array(z.string().min(1)).min(1).optional()
12060
+ });
12061
+ /**
12062
+ * Step 2 output: Scope & features (builds on vision output).
12063
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12064
+ */
12065
+ const AnalysisScopeOutputSchema = z.object({
12066
+ result: z.enum(["success", "failed"]),
12067
+ core_features: z.array(z.string().min(1)).min(1).optional(),
12068
+ success_metrics: z.array(z.string().min(1)).min(1).optional(),
12069
+ constraints: z.array(z.string()).default([])
12070
+ });
12071
+ /**
12072
+ * Zod schema for a single functional requirement.
12073
+ */
12074
+ const FunctionalRequirementSchema = z.object({
12075
+ description: z.string().min(5),
12076
+ priority: z.enum([
12077
+ "must",
12078
+ "should",
12079
+ "could"
12080
+ ]).default("must")
12081
+ });
12082
+ /**
12083
+ * Zod schema for a single non-functional requirement.
12084
+ */
12085
+ const NonFunctionalRequirementSchema = z.object({
12086
+ description: z.string().min(5),
12087
+ category: z.string().min(1)
12088
+ });
12089
+ /**
12090
+ * Zod schema for a single user story.
12091
+ */
12092
+ const UserStorySchema = z.object({
12093
+ title: z.string().min(3),
12094
+ description: z.string().min(5)
12095
+ });
12096
+ /**
12097
+ * Zod schema for the full YAML output emitted by the planning agent.
12098
+ * The agent must emit a YAML block with all PRD fields.
12099
+ */
12100
+ const PlanningOutputSchema = z.object({
12101
+ result: z.enum(["success", "failed"]),
12102
+ functional_requirements: z.array(FunctionalRequirementSchema).min(3),
12103
+ non_functional_requirements: z.array(NonFunctionalRequirementSchema).min(2),
12104
+ user_stories: z.array(UserStorySchema).min(1),
12105
+ tech_stack: z.record(z.string(), z.string()),
12106
+ domain_model: z.record(z.string(), z.unknown()),
12107
+ out_of_scope: z.array(z.string()).default([])
12108
+ });
12109
+ /**
12110
+ * Step 1 output: Project classification & vision.
12111
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12112
+ */
12113
+ const PlanningClassificationOutputSchema = z.object({
12114
+ result: z.enum(["success", "failed"]),
12115
+ project_type: z.string().min(1).optional(),
12116
+ vision: z.string().min(10).optional(),
12117
+ key_goals: z.array(z.string().min(1)).min(1).optional()
12118
+ });
12119
+ /**
12120
+ * Step 2 output: Functional requirements & user stories.
12121
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12122
+ */
12123
+ const PlanningFRsOutputSchema = z.object({
12124
+ result: z.enum(["success", "failed"]),
12125
+ functional_requirements: z.array(FunctionalRequirementSchema).min(3).optional(),
12126
+ user_stories: z.array(UserStorySchema).min(1).optional()
12127
+ });
12128
+ /**
12129
+ * Step 3 output: NFRs, tech stack, domain model, out-of-scope.
12130
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12131
+ */
12132
+ const PlanningNFRsOutputSchema = z.object({
12133
+ result: z.enum(["success", "failed"]),
12134
+ non_functional_requirements: z.array(NonFunctionalRequirementSchema).min(2).optional(),
12135
+ tech_stack: z.record(z.string(), z.string()).optional(),
12136
+ domain_model: z.record(z.string(), z.unknown()).optional(),
12137
+ out_of_scope: z.array(z.string()).default([])
12138
+ });
12139
+ /**
12140
+ * Zod schema for a single architecture decision emitted by the architecture agent.
12141
+ */
12142
+ const ArchitectureDecisionSchema = z.object({
12143
+ category: z.string().min(1),
12144
+ key: z.string().min(1),
12145
+ value: z.string().min(1),
12146
+ rationale: z.string().optional()
11531
12147
  });
11532
12148
  /**
11533
12149
  * Zod schema for a single story definition emitted by the story generation agent.
@@ -11565,6 +12181,26 @@ const StoryGenerationOutputSchema = z.object({
11565
12181
  result: z.enum(["success", "failed"]),
11566
12182
  epics: z.array(EpicDefinitionSchema).min(1)
11567
12183
  });
12184
+ /**
12185
+ * Architecture Step 1 output: Context analysis — initial architecture decisions.
12186
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12187
+ */
12188
+ const ArchContextOutputSchema = z.object({
12189
+ result: z.enum(["success", "failed"]),
12190
+ architecture_decisions: z.array(ArchitectureDecisionSchema).min(1).optional()
12191
+ });
12192
+ /**
12193
+ * Epic Design Step output: Epic structure with FR coverage mapping.
12194
+ * Content fields are optional to allow `{result: 'failed'}` without Zod rejection.
12195
+ */
12196
+ const EpicDesignOutputSchema = z.object({
12197
+ result: z.enum(["success", "failed"]),
12198
+ epics: z.array(z.object({
12199
+ title: z.string().min(3),
12200
+ description: z.string().min(5),
12201
+ fr_coverage: z.array(z.string()).default([])
12202
+ })).min(1).optional()
12203
+ });
11568
12204
 
11569
12205
  //#endregion
11570
12206
  //#region src/modules/phase-orchestrator/phases/analysis.ts
@@ -11590,11 +12226,120 @@ const BRIEF_FIELDS$1 = [
11590
12226
  "constraints"
11591
12227
  ];
11592
12228
  /**
12229
+ * Build step definitions for 2-step analysis decomposition.
12230
+ */
12231
+ function buildAnalysisSteps() {
12232
+ return [{
12233
+ name: "analysis-step-1-vision",
12234
+ taskType: "analysis-vision",
12235
+ outputSchema: AnalysisVisionOutputSchema,
12236
+ context: [{
12237
+ placeholder: "concept",
12238
+ source: "param:concept"
12239
+ }],
12240
+ persist: [{
12241
+ field: "problem_statement",
12242
+ category: "product-brief",
12243
+ key: "problem_statement"
12244
+ }, {
12245
+ field: "target_users",
12246
+ category: "product-brief",
12247
+ key: "target_users"
12248
+ }]
12249
+ }, {
12250
+ name: "analysis-step-2-scope",
12251
+ taskType: "analysis-scope",
12252
+ outputSchema: AnalysisScopeOutputSchema,
12253
+ context: [{
12254
+ placeholder: "concept",
12255
+ source: "param:concept"
12256
+ }, {
12257
+ placeholder: "vision_output",
12258
+ source: "step:analysis-step-1-vision"
12259
+ }],
12260
+ persist: [
12261
+ {
12262
+ field: "core_features",
12263
+ category: "product-brief",
12264
+ key: "core_features"
12265
+ },
12266
+ {
12267
+ field: "success_metrics",
12268
+ category: "product-brief",
12269
+ key: "success_metrics"
12270
+ },
12271
+ {
12272
+ field: "constraints",
12273
+ category: "product-brief",
12274
+ key: "constraints"
12275
+ }
12276
+ ],
12277
+ registerArtifact: {
12278
+ type: "product-brief",
12279
+ path: "decision-store://analysis/product-brief",
12280
+ summarize: (parsed) => {
12281
+ const features = parsed.core_features;
12282
+ return features ? `${features.length} core features defined` : "Product brief complete";
12283
+ }
12284
+ }
12285
+ }];
12286
+ }
12287
+ /**
12288
+ * Run analysis phase using multi-step decomposition (2 steps).
12289
+ */
12290
+ async function runAnalysisMultiStep(deps, params) {
12291
+ const zeroTokenUsage = {
12292
+ input: 0,
12293
+ output: 0
12294
+ };
12295
+ try {
12296
+ let effectiveConcept = params.concept;
12297
+ if (params.concept.length > MAX_CONCEPT_CHARS) effectiveConcept = params.concept.slice(0, MAX_CONCEPT_CHARS) + "...";
12298
+ const steps = buildAnalysisSteps();
12299
+ const result = await runSteps(steps, deps, params.runId, "analysis", { concept: effectiveConcept });
12300
+ if (!result.success) return {
12301
+ result: "failed",
12302
+ error: result.error ?? "multi_step_failed",
12303
+ details: result.error ?? "Multi-step analysis failed",
12304
+ tokenUsage: result.tokenUsage
12305
+ };
12306
+ const visionOutput = result.steps[0]?.parsed;
12307
+ const scopeOutput = result.steps[1]?.parsed;
12308
+ if (!visionOutput || !scopeOutput) return {
12309
+ result: "failed",
12310
+ error: "incomplete_steps",
12311
+ details: "Not all analysis steps produced output",
12312
+ tokenUsage: result.tokenUsage
12313
+ };
12314
+ const brief = {
12315
+ problem_statement: visionOutput.problem_statement,
12316
+ target_users: visionOutput.target_users,
12317
+ core_features: scopeOutput.core_features,
12318
+ success_metrics: scopeOutput.success_metrics,
12319
+ constraints: scopeOutput.constraints ?? []
12320
+ };
12321
+ const analysisResult = {
12322
+ result: "success",
12323
+ product_brief: brief,
12324
+ tokenUsage: result.tokenUsage
12325
+ };
12326
+ const artifactId = result.steps[1]?.artifactId;
12327
+ if (artifactId !== void 0) analysisResult.artifact_id = artifactId;
12328
+ return analysisResult;
12329
+ } catch (err) {
12330
+ const message = err instanceof Error ? err.message : String(err);
12331
+ return {
12332
+ result: "failed",
12333
+ error: message,
12334
+ tokenUsage: zeroTokenUsage
12335
+ };
12336
+ }
12337
+ }
12338
+ /**
11593
12339
  * Execute the analysis phase of the BMAD pipeline.
11594
12340
  *
11595
- * Retrieves the compiled analysis prompt, injects the user concept,
11596
- * dispatches to a claude-code agent, validates the output, and persists
11597
- * the product brief to the decision store.
12341
+ * If the manifest defines steps for the analysis phase, uses multi-step
12342
+ * decomposition. Otherwise falls back to the single-dispatch code path.
11598
12343
  *
11599
12344
  * @param deps - Shared phase dependencies (db, pack, contextCompiler, dispatcher)
11600
12345
  * @param params - Phase parameters (runId, concept)
@@ -11603,6 +12348,8 @@ const BRIEF_FIELDS$1 = [
11603
12348
  async function runAnalysisPhase(deps, params) {
11604
12349
  const { db, pack, dispatcher } = deps;
11605
12350
  const { runId, concept, amendmentContext } = params;
12351
+ const analysisPhase = pack.manifest.phases?.find((p) => p.name === "analysis");
12352
+ if (analysisPhase?.steps && analysisPhase.steps.length > 0 && !amendmentContext) return runAnalysisMultiStep(deps, params);
11606
12353
  const zeroTokenUsage = {
11607
12354
  input: 0,
11608
12355
  output: 0
@@ -11744,14 +12491,202 @@ function formatProductBriefFromDecisions(decisions) {
11744
12491
  }
11745
12492
  parts.push(`### ${fieldLabel}\n${displayValue}`);
11746
12493
  }
11747
- return parts.join("\n\n");
12494
+ return parts.join("\n\n");
12495
+ }
12496
+ /**
12497
+ * Build step definitions for 3-step planning decomposition.
12498
+ */
12499
+ function buildPlanningSteps() {
12500
+ return [
12501
+ {
12502
+ name: "planning-step-1-classification",
12503
+ taskType: "planning-classification",
12504
+ outputSchema: PlanningClassificationOutputSchema,
12505
+ context: [{
12506
+ placeholder: "product_brief",
12507
+ source: "decision:analysis.product-brief"
12508
+ }],
12509
+ persist: [
12510
+ {
12511
+ field: "project_type",
12512
+ category: "classification",
12513
+ key: "project_type"
12514
+ },
12515
+ {
12516
+ field: "vision",
12517
+ category: "classification",
12518
+ key: "vision"
12519
+ },
12520
+ {
12521
+ field: "key_goals",
12522
+ category: "classification",
12523
+ key: "key_goals"
12524
+ }
12525
+ ]
12526
+ },
12527
+ {
12528
+ name: "planning-step-2-frs",
12529
+ taskType: "planning-frs",
12530
+ outputSchema: PlanningFRsOutputSchema,
12531
+ context: [{
12532
+ placeholder: "product_brief",
12533
+ source: "decision:analysis.product-brief"
12534
+ }, {
12535
+ placeholder: "classification",
12536
+ source: "step:planning-step-1-classification"
12537
+ }],
12538
+ persist: [{
12539
+ field: "functional_requirements",
12540
+ category: "functional-requirements",
12541
+ key: "array"
12542
+ }, {
12543
+ field: "user_stories",
12544
+ category: "user-stories",
12545
+ key: "array"
12546
+ }]
12547
+ },
12548
+ {
12549
+ name: "planning-step-3-nfrs",
12550
+ taskType: "planning-nfrs",
12551
+ outputSchema: PlanningNFRsOutputSchema,
12552
+ context: [
12553
+ {
12554
+ placeholder: "product_brief",
12555
+ source: "decision:analysis.product-brief"
12556
+ },
12557
+ {
12558
+ placeholder: "classification",
12559
+ source: "step:planning-step-1-classification"
12560
+ },
12561
+ {
12562
+ placeholder: "functional_requirements",
12563
+ source: "step:planning-step-2-frs"
12564
+ }
12565
+ ],
12566
+ persist: [
12567
+ {
12568
+ field: "non_functional_requirements",
12569
+ category: "non-functional-requirements",
12570
+ key: "array"
12571
+ },
12572
+ {
12573
+ field: "tech_stack",
12574
+ category: "tech-stack",
12575
+ key: "tech_stack"
12576
+ },
12577
+ {
12578
+ field: "domain_model",
12579
+ category: "domain-model",
12580
+ key: "entities"
12581
+ },
12582
+ {
12583
+ field: "out_of_scope",
12584
+ category: "out-of-scope",
12585
+ key: "items"
12586
+ }
12587
+ ],
12588
+ registerArtifact: {
12589
+ type: "prd",
12590
+ path: "decision-store://planning/prd",
12591
+ summarize: (parsed) => {
12592
+ const nfrs = parsed.non_functional_requirements;
12593
+ return `Planning complete: ${nfrs?.length ?? 0} NFRs, tech stack defined`;
12594
+ }
12595
+ }
12596
+ }
12597
+ ];
12598
+ }
12599
+ /**
12600
+ * Run planning phase using multi-step decomposition (3 steps).
12601
+ */
12602
+ async function runPlanningMultiStep(deps, params) {
12603
+ const { db, runId } = {
12604
+ db: deps.db,
12605
+ runId: params.runId
12606
+ };
12607
+ const zeroTokenUsage = {
12608
+ input: 0,
12609
+ output: 0
12610
+ };
12611
+ try {
12612
+ const allAnalysisDecisions = getDecisionsByPhaseForRun(db, runId, "analysis");
12613
+ const productBriefDecisions = allAnalysisDecisions.filter((d) => d.category === "product-brief");
12614
+ if (productBriefDecisions.length === 0) return {
12615
+ result: "failed",
12616
+ error: "missing_product_brief",
12617
+ details: "No product brief decisions found in the analysis phase.",
12618
+ tokenUsage: zeroTokenUsage
12619
+ };
12620
+ const steps = buildPlanningSteps();
12621
+ const result = await runSteps(steps, deps, params.runId, "planning", {});
12622
+ if (!result.success) return {
12623
+ result: "failed",
12624
+ error: result.error ?? "multi_step_failed",
12625
+ details: result.error ?? "Multi-step planning failed",
12626
+ tokenUsage: result.tokenUsage
12627
+ };
12628
+ const frsOutput = result.steps[1]?.parsed;
12629
+ const nfrsOutput = result.steps[2]?.parsed;
12630
+ if (!frsOutput || !nfrsOutput) return {
12631
+ result: "failed",
12632
+ error: "incomplete_steps",
12633
+ details: "Not all planning steps produced output",
12634
+ tokenUsage: result.tokenUsage
12635
+ };
12636
+ const frs = frsOutput.functional_requirements;
12637
+ const nfrs = nfrsOutput.non_functional_requirements;
12638
+ const userStories = frsOutput.user_stories;
12639
+ if (!frs?.length) return {
12640
+ result: "failed",
12641
+ error: "missing_functional_requirements",
12642
+ details: "FRs step did not return functional_requirements",
12643
+ tokenUsage: result.tokenUsage
12644
+ };
12645
+ if (!nfrs?.length) return {
12646
+ result: "failed",
12647
+ error: "missing_non_functional_requirements",
12648
+ details: "NFRs step did not return non_functional_requirements",
12649
+ tokenUsage: result.tokenUsage
12650
+ };
12651
+ for (const fr of frs) createRequirement(db, {
12652
+ pipeline_run_id: params.runId,
12653
+ source: "planning-phase",
12654
+ type: "functional",
12655
+ description: fr.description,
12656
+ priority: fr.priority
12657
+ });
12658
+ for (const nfr of nfrs) createRequirement(db, {
12659
+ pipeline_run_id: params.runId,
12660
+ source: "planning-phase",
12661
+ type: "non_functional",
12662
+ description: nfr.description,
12663
+ priority: "should"
12664
+ });
12665
+ const requirementsCount = frs.length + nfrs.length;
12666
+ const userStoriesCount = userStories?.length ?? 0;
12667
+ const planningResult = {
12668
+ result: "success",
12669
+ requirements_count: requirementsCount,
12670
+ user_stories_count: userStoriesCount,
12671
+ tokenUsage: result.tokenUsage
12672
+ };
12673
+ const artifactId = result.steps[2]?.artifactId;
12674
+ if (artifactId !== void 0) planningResult.artifact_id = artifactId;
12675
+ return planningResult;
12676
+ } catch (err) {
12677
+ const message = err instanceof Error ? err.message : String(err);
12678
+ return {
12679
+ result: "failed",
12680
+ error: message,
12681
+ tokenUsage: zeroTokenUsage
12682
+ };
12683
+ }
11748
12684
  }
11749
12685
  /**
11750
12686
  * Execute the planning phase of the BMAD pipeline.
11751
12687
  *
11752
- * Retrieves the compiled planning prompt, injects the product brief from the
11753
- * analysis phase decision store, dispatches to a claude-code agent, validates
11754
- * the output, creates requirement records, and persists planning decisions.
12688
+ * If the manifest defines steps for the planning phase, uses multi-step
12689
+ * decomposition. Otherwise falls back to the single-dispatch code path.
11755
12690
  *
11756
12691
  * @param deps - Shared phase dependencies (db, pack, contextCompiler, dispatcher)
11757
12692
  * @param params - Phase parameters (runId)
@@ -11760,6 +12695,8 @@ function formatProductBriefFromDecisions(decisions) {
11760
12695
  async function runPlanningPhase(deps, params) {
11761
12696
  const { db, pack, dispatcher } = deps;
11762
12697
  const { runId, amendmentContext } = params;
12698
+ const planningPhase = pack.manifest.phases?.find((p) => p.name === "planning");
12699
+ if (planningPhase?.steps && planningPhase.steps.length > 0 && !amendmentContext) return runPlanningMultiStep(deps, params);
11763
12700
  const zeroTokenUsage = {
11764
12701
  input: 0,
11765
12702
  output: 0
@@ -12001,14 +12938,11 @@ function createQualityGate(config) {
12001
12938
 
12002
12939
  //#endregion
12003
12940
  //#region src/modules/phase-orchestrator/phases/solutioning.ts
12941
+ const logger$4 = createLogger("solutioning");
12004
12942
  /** Base token budget for architecture generation (covers template + requirements) */
12005
12943
  const BASE_ARCH_PROMPT_TOKENS = 3e3;
12006
12944
  /** Base token budget for story generation (covers template + requirements + architecture) */
12007
12945
  const BASE_STORY_PROMPT_TOKENS = 4e3;
12008
- /** Additional tokens per architecture decision injected into story generation prompt */
12009
- const TOKENS_PER_DECISION = 100;
12010
- /** Absolute maximum prompt tokens (model context safety margin) */
12011
- const ABSOLUTE_MAX_PROMPT_TOKENS = 12e3;
12012
12946
  /** Placeholder in architecture prompt template */
12013
12947
  const REQUIREMENTS_PLACEHOLDER = "{{requirements}}";
12014
12948
  /** Amendment context framing block prefix */
@@ -12022,58 +12956,6 @@ const STORY_REQUIREMENTS_PLACEHOLDER = "{{requirements}}";
12022
12956
  const STORY_ARCHITECTURE_PLACEHOLDER = "{{architecture_decisions}}";
12023
12957
  /** Gap analysis placeholder used in retry prompt */
12024
12958
  const GAP_ANALYSIS_PLACEHOLDER = "{{gap_analysis}}";
12025
- /** Priority order for decision categories when summarizing (higher priority kept first) */
12026
- const DECISION_CATEGORY_PRIORITY = [
12027
- "data",
12028
- "auth",
12029
- "api",
12030
- "frontend",
12031
- "infra",
12032
- "observability",
12033
- "ci"
12034
- ];
12035
- /**
12036
- * Calculate the dynamic prompt token budget based on the number of decisions
12037
- * that will be injected into the prompt.
12038
- *
12039
- * @param baseBudget - Base token budget for the phase
12040
- * @param decisionCount - Number of decisions to inject
12041
- * @returns Calculated token budget, capped at ABSOLUTE_MAX_PROMPT_TOKENS
12042
- */
12043
- function calculateDynamicBudget(baseBudget, decisionCount) {
12044
- const budget = baseBudget + decisionCount * TOKENS_PER_DECISION;
12045
- return Math.min(budget, ABSOLUTE_MAX_PROMPT_TOKENS);
12046
- }
12047
- /**
12048
- * Summarize architecture decisions into compact key:value one-liners,
12049
- * dropping rationale and optionally dropping lower-priority categories
12050
- * to fit within a character budget.
12051
- *
12052
- * @param decisions - Full architecture decisions from the decision store
12053
- * @param maxChars - Maximum character budget for the summarized output
12054
- * @returns Compact summary string
12055
- */
12056
- function summarizeDecisions(decisions, maxChars) {
12057
- const sorted = [...decisions].sort((a, b) => {
12058
- const aCat = (a.category ?? "").toLowerCase();
12059
- const bCat = (b.category ?? "").toLowerCase();
12060
- const aIdx = DECISION_CATEGORY_PRIORITY.indexOf(aCat);
12061
- const bIdx = DECISION_CATEGORY_PRIORITY.indexOf(bCat);
12062
- const aPri = aIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : aIdx;
12063
- const bPri = bIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : bIdx;
12064
- return aPri - bPri;
12065
- });
12066
- const lines = ["## Architecture Decisions (Summarized)"];
12067
- let currentLength = lines[0].length;
12068
- for (const d of sorted) {
12069
- const truncatedValue = d.value.length > 120 ? d.value.slice(0, 117) + "..." : d.value;
12070
- const line = `- ${d.key}: ${truncatedValue}`;
12071
- if (currentLength + line.length + 1 > maxChars) break;
12072
- lines.push(line);
12073
- currentLength += line.length + 1;
12074
- }
12075
- return lines.join("\n");
12076
- }
12077
12959
  /**
12078
12960
  * Format functional and non-functional requirements from the planning phase
12079
12961
  * into a compact text block suitable for prompt injection.
@@ -12415,6 +13297,211 @@ async function runReadinessCheck(deps, runId) {
12415
13297
  };
12416
13298
  }
12417
13299
  /**
13300
+ * Build step definitions for 3-step architecture decomposition.
13301
+ */
13302
+ function buildArchitectureSteps() {
13303
+ return [
13304
+ {
13305
+ name: "architecture-step-1-context",
13306
+ taskType: "arch-context",
13307
+ outputSchema: ArchContextOutputSchema,
13308
+ context: [{
13309
+ placeholder: "requirements",
13310
+ source: "decision:planning.functional-requirements"
13311
+ }, {
13312
+ placeholder: "nfr",
13313
+ source: "decision:planning.non-functional-requirements"
13314
+ }],
13315
+ persist: [{
13316
+ field: "architecture_decisions",
13317
+ category: "architecture",
13318
+ key: "array"
13319
+ }]
13320
+ },
13321
+ {
13322
+ name: "architecture-step-2-decisions",
13323
+ taskType: "arch-decisions",
13324
+ outputSchema: ArchContextOutputSchema,
13325
+ context: [{
13326
+ placeholder: "requirements",
13327
+ source: "decision:planning.functional-requirements"
13328
+ }, {
13329
+ placeholder: "starter_decisions",
13330
+ source: "step:architecture-step-1-context"
13331
+ }],
13332
+ persist: [{
13333
+ field: "architecture_decisions",
13334
+ category: "architecture",
13335
+ key: "array"
13336
+ }]
13337
+ },
13338
+ {
13339
+ name: "architecture-step-3-patterns",
13340
+ taskType: "arch-patterns",
13341
+ outputSchema: ArchContextOutputSchema,
13342
+ context: [{
13343
+ placeholder: "architecture_decisions",
13344
+ source: "decision:solutioning.architecture"
13345
+ }],
13346
+ persist: [{
13347
+ field: "architecture_decisions",
13348
+ category: "architecture",
13349
+ key: "array"
13350
+ }],
13351
+ registerArtifact: {
13352
+ type: "architecture",
13353
+ path: "decision-store://solutioning/architecture",
13354
+ summarize: (parsed) => {
13355
+ const decisions = parsed.architecture_decisions;
13356
+ return `${decisions?.length ?? 0} pattern decisions (multi-step)`;
13357
+ }
13358
+ }
13359
+ }
13360
+ ];
13361
+ }
13362
+ /**
13363
+ * Run architecture generation using multi-step decomposition (3 steps).
13364
+ */
13365
+ async function runArchitectureGenerationMultiStep(deps, params) {
13366
+ const steps = buildArchitectureSteps();
13367
+ const result = await runSteps(steps, deps, params.runId, "solutioning", {});
13368
+ if (!result.success) return {
13369
+ error: result.error ?? "multi_step_arch_failed",
13370
+ tokenUsage: result.tokenUsage
13371
+ };
13372
+ const allDecisions = getDecisionsByPhaseForRun(deps.db, params.runId, "solutioning").filter((d) => d.category === "architecture");
13373
+ const decisions = allDecisions.map((d) => {
13374
+ try {
13375
+ const parsed = JSON.parse(d.value);
13376
+ return {
13377
+ category: parsed.category ?? d.category,
13378
+ key: parsed.key ?? d.key,
13379
+ value: parsed.value ?? d.value,
13380
+ rationale: parsed.rationale ?? d.rationale ?? ""
13381
+ };
13382
+ } catch {
13383
+ return {
13384
+ category: d.category,
13385
+ key: d.key,
13386
+ value: d.value,
13387
+ rationale: d.rationale ?? ""
13388
+ };
13389
+ }
13390
+ });
13391
+ const artifactId = result.steps[result.steps.length - 1]?.artifactId ?? "";
13392
+ return {
13393
+ decisions,
13394
+ artifactId,
13395
+ tokenUsage: result.tokenUsage
13396
+ };
13397
+ }
13398
+ /**
13399
+ * Build step definitions for 2-step story decomposition.
13400
+ */
13401
+ function buildStorySteps() {
13402
+ return [{
13403
+ name: "stories-step-1-epics",
13404
+ taskType: "story-epics",
13405
+ outputSchema: EpicDesignOutputSchema,
13406
+ context: [{
13407
+ placeholder: "requirements",
13408
+ source: "decision:planning.functional-requirements"
13409
+ }, {
13410
+ placeholder: "architecture_decisions",
13411
+ source: "decision:solutioning.architecture"
13412
+ }],
13413
+ persist: [{
13414
+ field: "epics",
13415
+ category: "epic-design",
13416
+ key: "array"
13417
+ }]
13418
+ }, {
13419
+ name: "stories-step-2-stories",
13420
+ taskType: "story-stories",
13421
+ outputSchema: StoryGenerationOutputSchema,
13422
+ context: [
13423
+ {
13424
+ placeholder: "epic_structure",
13425
+ source: "step:stories-step-1-epics"
13426
+ },
13427
+ {
13428
+ placeholder: "requirements",
13429
+ source: "decision:planning.functional-requirements"
13430
+ },
13431
+ {
13432
+ placeholder: "architecture_decisions",
13433
+ source: "decision:solutioning.architecture"
13434
+ }
13435
+ ],
13436
+ persist: [],
13437
+ registerArtifact: {
13438
+ type: "stories",
13439
+ path: "decision-store://solutioning/stories",
13440
+ summarize: (parsed) => {
13441
+ const epics = parsed.epics;
13442
+ const totalStories = epics?.reduce((sum, e) => sum + (e.stories?.length ?? 0), 0) ?? 0;
13443
+ return `${epics?.length ?? 0} epics, ${totalStories} stories (multi-step)`;
13444
+ }
13445
+ }
13446
+ }];
13447
+ }
13448
+ /**
13449
+ * Run story generation using multi-step decomposition (2 steps).
13450
+ */
13451
+ async function runStoryGenerationMultiStep(deps, params) {
13452
+ const steps = buildStorySteps();
13453
+ const result = await runSteps(steps, deps, params.runId, "solutioning", {});
13454
+ if (!result.success) return {
13455
+ error: result.error ?? "multi_step_story_failed",
13456
+ tokenUsage: result.tokenUsage
13457
+ };
13458
+ const storyStep = result.steps.find((s) => s.name === "stories-step-2-stories");
13459
+ const storyOutput = storyStep?.parsed;
13460
+ if (!storyOutput || !storyOutput.epics) return {
13461
+ error: "Story generation step produced no epics",
13462
+ tokenUsage: result.tokenUsage
13463
+ };
13464
+ const epics = storyOutput.epics;
13465
+ for (const [epicIndex, epic] of epics.entries()) {
13466
+ upsertDecision(deps.db, {
13467
+ pipeline_run_id: params.runId,
13468
+ phase: "solutioning",
13469
+ category: "epics",
13470
+ key: `epic-${epicIndex + 1}`,
13471
+ value: JSON.stringify({
13472
+ title: epic.title,
13473
+ description: epic.description
13474
+ })
13475
+ });
13476
+ for (const story of epic.stories) upsertDecision(deps.db, {
13477
+ pipeline_run_id: params.runId,
13478
+ phase: "solutioning",
13479
+ category: "stories",
13480
+ key: story.key,
13481
+ value: JSON.stringify({
13482
+ key: story.key,
13483
+ title: story.title,
13484
+ description: story.description,
13485
+ ac: story.acceptance_criteria,
13486
+ priority: story.priority
13487
+ })
13488
+ });
13489
+ }
13490
+ for (const epic of epics) for (const story of epic.stories) createRequirement(deps.db, {
13491
+ pipeline_run_id: params.runId,
13492
+ source: "solutioning-phase",
13493
+ type: "functional",
13494
+ description: `${story.title}: ${story.description}`,
13495
+ priority: story.priority
13496
+ });
13497
+ const artifactId = storyStep?.artifactId ?? "";
13498
+ return {
13499
+ epics,
13500
+ artifactId,
13501
+ tokenUsage: result.tokenUsage
13502
+ };
13503
+ }
13504
+ /**
12418
13505
  * Execute the solutioning phase of the BMAD pipeline.
12419
13506
  *
12420
13507
  * Orchestrates the two-phase dispatch strategy:
@@ -12436,12 +13523,20 @@ async function runSolutioningPhase(deps, params) {
12436
13523
  let totalInput = 0;
12437
13524
  let totalOutput = 0;
12438
13525
  try {
13526
+ const solutioningPhase = deps.pack.manifest.phases?.find((p) => p.name === "solutioning");
13527
+ const hasSteps = solutioningPhase?.steps && solutioningPhase.steps.length > 0 && !params.amendmentContext;
12439
13528
  const existingArchArtifact = getArtifactByTypeForRun(deps.db, params.runId, "solutioning", "architecture");
12440
13529
  let archResult;
12441
13530
  if (existingArchArtifact) {
12442
13531
  const existingDecisions = getDecisionsByPhaseForRun(deps.db, params.runId, "solutioning").filter((d) => d.category === "architecture");
13532
+ logger$4.info({
13533
+ runId: params.runId,
13534
+ artifactId: existingArchArtifact.id,
13535
+ decisionCount: existingDecisions.length
13536
+ }, "Architecture artifact already exists — skipping architecture sub-phase, transitioning to story generation");
12443
13537
  archResult = {
12444
13538
  decisions: existingDecisions.map((d) => ({
13539
+ category: d.category,
12445
13540
  key: d.key,
12446
13541
  value: d.value,
12447
13542
  rationale: d.rationale ?? ""
@@ -12452,7 +13547,8 @@ async function runSolutioningPhase(deps, params) {
12452
13547
  output: 0
12453
13548
  }
12454
13549
  };
12455
- } else archResult = await runArchitectureGeneration(deps, params);
13550
+ } else if (hasSteps) archResult = await runArchitectureGenerationMultiStep(deps, params);
13551
+ else archResult = await runArchitectureGeneration(deps, params);
12456
13552
  totalInput += archResult.tokenUsage.input;
12457
13553
  totalOutput += archResult.tokenUsage.output;
12458
13554
  if ("error" in archResult) return {
@@ -12464,7 +13560,12 @@ async function runSolutioningPhase(deps, params) {
12464
13560
  output: totalOutput
12465
13561
  }
12466
13562
  };
12467
- const storyResult = await runStoryGeneration(deps, params);
13563
+ logger$4.info({
13564
+ runId: params.runId,
13565
+ decisionCount: archResult.decisions.length,
13566
+ mode: hasSteps ? "multi-step" : "single-dispatch"
13567
+ }, "Architecture sub-phase complete — transitioning to story generation");
13568
+ const storyResult = hasSteps ? await runStoryGenerationMultiStep(deps, params) : await runStoryGeneration(deps, params);
12468
13569
  totalInput += storyResult.tokenUsage.input;
12469
13570
  totalOutput += storyResult.tokenUsage.output;
12470
13571
  if ("error" in storyResult) return {
@@ -14815,6 +15916,228 @@ async function runAutoHealth(options) {
14815
15916
  }
14816
15917
  }
14817
15918
  /**
15919
+ * Fetch pipeline health data as a structured object without any stdout side-effects.
15920
+ * Used by runAutoSupervisor to poll health without formatting overhead.
15921
+ *
15922
+ * Returns a NO_PIPELINE_RUNNING health object for all graceful "no data" cases
15923
+ * (missing DB, missing run, terminal run status). Throws only on unexpected errors.
15924
+ */
15925
+ async function getAutoHealthData(options) {
15926
+ const { runId, projectRoot } = options;
15927
+ const dbRoot = await resolveMainRepoRoot(projectRoot);
15928
+ const dbPath = join(dbRoot, ".substrate", "substrate.db");
15929
+ const NO_PIPELINE = {
15930
+ verdict: "NO_PIPELINE_RUNNING",
15931
+ run_id: null,
15932
+ status: null,
15933
+ current_phase: null,
15934
+ staleness_seconds: 0,
15935
+ last_activity: "",
15936
+ process: {
15937
+ orchestrator_pid: null,
15938
+ child_pids: [],
15939
+ zombies: []
15940
+ },
15941
+ stories: {
15942
+ active: 0,
15943
+ completed: 0,
15944
+ escalated: 0,
15945
+ details: {}
15946
+ }
15947
+ };
15948
+ if (!existsSync(dbPath)) return NO_PIPELINE;
15949
+ const dbWrapper = new DatabaseWrapper(dbPath);
15950
+ try {
15951
+ dbWrapper.open();
15952
+ const db = dbWrapper.db;
15953
+ let run;
15954
+ if (runId !== void 0) run = getPipelineRunById(db, runId);
15955
+ else run = getLatestRun(db);
15956
+ if (run === void 0) return NO_PIPELINE;
15957
+ const updatedAt = new Date(run.updated_at);
15958
+ const stalenessSeconds = Math.round((Date.now() - updatedAt.getTime()) / 1e3);
15959
+ let storyDetails = {};
15960
+ let active = 0;
15961
+ let completed = 0;
15962
+ let escalated = 0;
15963
+ try {
15964
+ if (run.token_usage_json) {
15965
+ const state = JSON.parse(run.token_usage_json);
15966
+ if (state.stories) for (const [key, s] of Object.entries(state.stories)) {
15967
+ storyDetails[key] = {
15968
+ phase: s.phase,
15969
+ review_cycles: s.reviewCycles
15970
+ };
15971
+ if (s.phase === "COMPLETE") completed++;
15972
+ else if (s.phase === "ESCALATED") escalated++;
15973
+ else if (s.phase !== "PENDING") active++;
15974
+ }
15975
+ }
15976
+ } catch {}
15977
+ const processInfo = inspectProcessTree();
15978
+ let verdict = "NO_PIPELINE_RUNNING";
15979
+ if (run.status === "running") if (processInfo.zombies.length > 0) verdict = "STALLED";
15980
+ else if (stalenessSeconds > 600) verdict = "STALLED";
15981
+ else if (processInfo.orchestrator_pid !== null && processInfo.child_pids.length === 0 && active > 0) verdict = "STALLED";
15982
+ else verdict = "HEALTHY";
15983
+ else if (run.status === "completed" || run.status === "failed" || run.status === "stopped") verdict = "NO_PIPELINE_RUNNING";
15984
+ return {
15985
+ verdict,
15986
+ run_id: run.id,
15987
+ status: run.status,
15988
+ current_phase: run.current_phase,
15989
+ staleness_seconds: stalenessSeconds,
15990
+ last_activity: run.updated_at,
15991
+ process: processInfo,
15992
+ stories: {
15993
+ active,
15994
+ completed,
15995
+ escalated,
15996
+ details: storyDetails
15997
+ }
15998
+ };
15999
+ } finally {
16000
+ try {
16001
+ dbWrapper.close();
16002
+ } catch {}
16003
+ }
16004
+ }
16005
+ function defaultSupervisorDeps() {
16006
+ return {
16007
+ getHealth: getAutoHealthData,
16008
+ killPid: (pid, signal) => {
16009
+ process.kill(pid, signal);
16010
+ },
16011
+ resumePipeline: runAutoResume,
16012
+ sleep: (ms) => new Promise((resolve$2) => setTimeout(resolve$2, ms))
16013
+ };
16014
+ }
16015
+ /**
16016
+ * Run the pipeline supervisor — a long-running watchdog that polls pipeline health
16017
+ * and automatically kills and restarts stalled pipelines.
16018
+ *
16019
+ * State machine: POLLING → (stall detected) → KILLING → RESTARTING → POLLING
16020
+ *
16021
+ * Exit codes:
16022
+ * 0 — pipeline reached terminal state with no failures
16023
+ * 1 — pipeline completed with failures or escalations
16024
+ * 2 — max restarts exceeded (safety valve triggered)
16025
+ */
16026
+ async function runAutoSupervisor(options, deps = {}) {
16027
+ const { pollInterval, stallThreshold, maxRestarts, outputFormat, projectRoot, runId, pack } = options;
16028
+ const { getHealth, killPid, resumePipeline, sleep } = {
16029
+ ...defaultSupervisorDeps(),
16030
+ ...deps
16031
+ };
16032
+ let restartCount = 0;
16033
+ const startTime = Date.now();
16034
+ function emitEvent$1(event) {
16035
+ if (outputFormat === "json") {
16036
+ const stamped = {
16037
+ ...event,
16038
+ ts: new Date().toISOString()
16039
+ };
16040
+ process.stdout.write(JSON.stringify(stamped) + "\n");
16041
+ }
16042
+ }
16043
+ function log(message) {
16044
+ if (outputFormat === "human") process.stdout.write(message + "\n");
16045
+ }
16046
+ while (true) {
16047
+ const health = await getHealth({
16048
+ runId,
16049
+ projectRoot
16050
+ });
16051
+ const ts = new Date().toISOString();
16052
+ log(`[${ts}] Health: ${health.verdict} | staleness=${health.staleness_seconds}s | stories: active=${health.stories.active} completed=${health.stories.completed} escalated=${health.stories.escalated}`);
16053
+ if (health.verdict === "NO_PIPELINE_RUNNING") {
16054
+ const elapsedSeconds = Math.round((Date.now() - startTime) / 1e3);
16055
+ const succeeded = Object.entries(health.stories.details).filter(([, s]) => s.phase === "COMPLETE").map(([k]) => k);
16056
+ const failed = Object.entries(health.stories.details).filter(([, s]) => s.phase !== "COMPLETE" && s.phase !== "PENDING").map(([k]) => k);
16057
+ const escalated = Object.entries(health.stories.details).filter(([, s]) => s.phase === "ESCALATED").map(([k]) => k);
16058
+ emitEvent$1({
16059
+ type: "supervisor:summary",
16060
+ run_id: health.run_id,
16061
+ elapsed_seconds: elapsedSeconds,
16062
+ succeeded,
16063
+ failed,
16064
+ escalated,
16065
+ restarts: restartCount
16066
+ });
16067
+ log(`\nPipeline reached terminal state. Elapsed: ${elapsedSeconds}s | succeeded: ${succeeded.length} | failed: ${failed.length} | restarts: ${restartCount}`);
16068
+ return failed.length > 0 ? 1 : 0;
16069
+ }
16070
+ if (health.staleness_seconds >= stallThreshold) {
16071
+ const pids = [...health.process.orchestrator_pid !== null ? [health.process.orchestrator_pid] : [], ...health.process.child_pids];
16072
+ emitEvent$1({
16073
+ type: "supervisor:kill",
16074
+ run_id: health.run_id,
16075
+ reason: "stall",
16076
+ staleness_seconds: health.staleness_seconds,
16077
+ pids
16078
+ });
16079
+ log(`Supervisor: Stall confirmed (${health.staleness_seconds}s ≥ ${stallThreshold}s threshold). Killing PIDs: ${pids.join(", ") || "none"}`);
16080
+ for (const pid of pids) try {
16081
+ killPid(pid, "SIGTERM");
16082
+ } catch {}
16083
+ await sleep(5e3);
16084
+ for (const pid of pids) try {
16085
+ killPid(pid, "SIGKILL");
16086
+ } catch {}
16087
+ if (pids.length > 0) {
16088
+ let allDead = false;
16089
+ for (let attempt = 0; attempt < 5; attempt++) {
16090
+ await sleep(1e3);
16091
+ allDead = pids.every((pid) => {
16092
+ try {
16093
+ process.kill(pid, 0);
16094
+ return false;
16095
+ } catch {
16096
+ return true;
16097
+ }
16098
+ });
16099
+ if (allDead) break;
16100
+ }
16101
+ if (!allDead) log(`Supervisor: Warning: Some PIDs may still be alive after SIGKILL`);
16102
+ }
16103
+ if (restartCount >= maxRestarts) {
16104
+ emitEvent$1({
16105
+ type: "supervisor:abort",
16106
+ run_id: health.run_id,
16107
+ reason: "max_restarts_exceeded",
16108
+ attempts: restartCount
16109
+ });
16110
+ log(`Supervisor: Max restarts (${maxRestarts}) exceeded. Aborting.`);
16111
+ return 2;
16112
+ }
16113
+ restartCount++;
16114
+ emitEvent$1({
16115
+ type: "supervisor:restart",
16116
+ run_id: health.run_id,
16117
+ attempt: restartCount
16118
+ });
16119
+ log(`Supervisor: Restarting pipeline (attempt ${restartCount}/${maxRestarts})`);
16120
+ resumePipeline({
16121
+ runId: health.run_id ?? void 0,
16122
+ outputFormat,
16123
+ projectRoot,
16124
+ concurrency: 3,
16125
+ pack
16126
+ }).catch((err) => {
16127
+ const message = err instanceof Error ? err.message : String(err);
16128
+ log(`Supervisor: Resume error: ${message}`);
16129
+ if (outputFormat === "json") process.stderr.write(JSON.stringify({
16130
+ type: "supervisor:error",
16131
+ reason: "resume_failed",
16132
+ message,
16133
+ ts: new Date().toISOString()
16134
+ }) + "\n");
16135
+ });
16136
+ }
16137
+ await sleep(pollInterval * 1e3);
16138
+ }
16139
+ }
16140
+ /**
14818
16141
  * Detect and apply supersessions after a phase completes in an amendment run.
14819
16142
  *
14820
16143
  * Compares new decisions from the amendment run for the given phase against
@@ -15191,6 +16514,19 @@ function registerAutoCommand(program, _version = "0.0.0", projectRoot = process.
15191
16514
  });
15192
16515
  process.exitCode = exitCode;
15193
16516
  });
16517
+ auto.command("supervisor").description("Monitor a pipeline run and automatically recover from stalls").option("--poll-interval <seconds>", "Health poll interval in seconds", (v) => parseInt(v, 10), 60).option("--stall-threshold <seconds>", "Staleness in seconds before killing a stalled pipeline", (v) => parseInt(v, 10), 600).option("--max-restarts <n>", "Maximum automatic restarts before aborting", (v) => parseInt(v, 10), 3).option("--run-id <id>", "Pipeline run ID to monitor (defaults to latest)").option("--pack <name>", "Methodology pack name", "bmad").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
16518
+ const outputFormat = opts.outputFormat === "json" ? "json" : "human";
16519
+ const exitCode = await runAutoSupervisor({
16520
+ pollInterval: opts.pollInterval,
16521
+ stallThreshold: opts.stallThreshold,
16522
+ maxRestarts: opts.maxRestarts,
16523
+ runId: opts.runId,
16524
+ pack: opts.pack,
16525
+ outputFormat,
16526
+ projectRoot: opts.projectRoot
16527
+ });
16528
+ process.exitCode = exitCode;
16529
+ });
15194
16530
  auto.command("amend").description("Run an amendment pipeline against a completed run and an existing run").option("--concept <text>", "Amendment concept description (inline)").option("--concept-file <path>", "Path to concept file").option("--run-id <id>", "Parent run ID (defaults to latest completed run)").option("--stop-after <phase>", "Stop pipeline after this phase completes").option("--from <phase>", "Start pipeline from this phase").option("--pack <name>", "Methodology pack name", "bmad").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
15195
16531
  const exitCode = await runAmendCommand({
15196
16532
  concept: opts.concept,