substrate-ai 0.19.8 → 0.19.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,7 +1,7 @@
1
1
  import { BMAD_BASELINE_TOKENS_FULL, DoltMergeConflict, FileStateStore, STOP_AFTER_VALID_PHASES, STORY_KEY_PATTERN, VALID_PHASES, WorkGraphRepository, __commonJS, __require, __toESM, buildPipelineStatusOutput, createDatabaseAdapter, detectCycles, formatOutput, formatPipelineSummary, formatTokenTelemetry, inspectProcessTree, parseDbTimestampAsUtc, resolveMainRepoRoot, validateStoryKey } from "./health-DJgGZhW-.js";
2
2
  import { createLogger } from "./logger-KeHncl-f.js";
3
3
  import { TypedEventBusImpl, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning, sleep } from "./helpers-CElYrONe.js";
4
- import { ADVISORY_NOTES, Categorizer, ConsumerAnalyzer, DEFAULT_GLOBAL_SETTINGS, DispatcherImpl, DoltClient, ESCALATION_DIAGNOSIS, EfficiencyScorer, IngestionServer, LogTurnAnalyzer, OPERATIONAL_FINDING, Recommender, RoutingRecommender, RoutingResolver, RoutingTelemetry, RoutingTokenAccumulator, RoutingTuner, STORY_METRICS, STORY_OUTCOME, SubstrateConfigSchema, TEST_EXPANSION_FINDING, TEST_PLAN, TelemetryNormalizer, TelemetryPipeline, TurnAnalyzer, addTokenUsage, aggregateTokenUsageForRun, aggregateTokenUsageForStory, callLLM, createConfigSystem, createDatabaseAdapter$1, createDecision, createPipelineRun, createRequirement, detectInterfaceChanges, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getPipelineRunById, getRunningPipelineRuns, getStoryMetricsForRun, getTokenUsageSummary, initSchema, loadModelRoutingConfig, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision, writeRunMetrics, writeStoryMetrics } from "./dist-adzGUKPc.js";
4
+ import { ADVISORY_NOTES, Categorizer, ConsumerAnalyzer, DEFAULT_GLOBAL_SETTINGS, DispatcherImpl, DoltClient, ESCALATION_DIAGNOSIS, EXPERIMENT_RESULT, EfficiencyScorer, IngestionServer, LogTurnAnalyzer, OPERATIONAL_FINDING, Recommender, RoutingRecommender, RoutingResolver, RoutingTelemetry, RoutingTokenAccumulator, RoutingTuner, STORY_METRICS, STORY_OUTCOME, SubstrateConfigSchema, TEST_EXPANSION_FINDING, TEST_PLAN, TelemetryNormalizer, TelemetryPipeline, TurnAnalyzer, addTokenUsage, aggregateTokenUsageForRun, aggregateTokenUsageForStory, callLLM, createConfigSystem, createDatabaseAdapter$1, createDecision, createPipelineRun, createRequirement, detectInterfaceChanges, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getStoryMetricsForRun, getTokenUsageSummary, initSchema, listRequirements, loadModelRoutingConfig, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision, writeRunMetrics, writeStoryMetrics } from "./dist-adzGUKPc.js";
5
5
  import { basename, dirname, extname, join } from "path";
6
6
  import { access, readFile, readdir, stat } from "fs/promises";
7
7
  import { EventEmitter } from "node:events";
@@ -9,7 +9,7 @@ import yaml from "js-yaml";
9
9
  import * as actualFS from "node:fs";
10
10
  import { accessSync, existsSync, mkdirSync, readFileSync, readdirSync, rmSync, unwatchFile, watchFile, writeFileSync } from "node:fs";
11
11
  import { exec, execFile, execSync, spawn } from "node:child_process";
12
- import path, { dirname as dirname$1, extname as extname$1, join as join$1, posix, resolve as resolve$1, win32 } from "node:path";
12
+ import path, { dirname as dirname$1, extname as extname$1, isAbsolute, join as join$1, posix, resolve as resolve$1, win32 } from "node:path";
13
13
  import { tmpdir } from "node:os";
14
14
  import { createHash, randomUUID } from "node:crypto";
15
15
  import { z } from "zod";
@@ -1950,7 +1950,7 @@ function truncateToTokens(text, maxTokens) {
1950
1950
 
1951
1951
  //#endregion
1952
1952
  //#region src/modules/context-compiler/context-compiler-impl.ts
1953
- const logger$20 = createLogger("context-compiler");
1953
+ const logger$21 = createLogger("context-compiler");
1954
1954
  /**
1955
1955
  * Fraction of the original token budget that must remain (after required +
1956
1956
  * important sections) before an optional section is included.
@@ -2011,7 +2011,7 @@ var ContextCompilerImpl = class {
2011
2011
  }
2012
2012
  _applyExclusionFilter(text, sectionName) {
2013
2013
  for (const excludedPath of this._excludedPaths) if (text.includes(excludedPath)) {
2014
- logger$20.warn({
2014
+ logger$21.warn({
2015
2015
  sectionName,
2016
2016
  excludedPath
2017
2017
  }, "ContextCompiler: section excluded — contains path from exclusion list");
@@ -2069,7 +2069,7 @@ var ContextCompilerImpl = class {
2069
2069
  includedParts.push(truncated);
2070
2070
  remainingBudget -= truncatedTokens;
2071
2071
  anyTruncated = true;
2072
- logger$20.warn({
2072
+ logger$21.warn({
2073
2073
  section: section.name,
2074
2074
  originalTokens: tokens,
2075
2075
  budgetTokens: truncatedTokens
@@ -2083,7 +2083,7 @@ var ContextCompilerImpl = class {
2083
2083
  });
2084
2084
  } else {
2085
2085
  anyTruncated = true;
2086
- logger$20.warn({
2086
+ logger$21.warn({
2087
2087
  section: section.name,
2088
2088
  tokens
2089
2089
  }, "Context compiler: omitted \"important\" section — no budget remaining");
@@ -2110,7 +2110,7 @@ var ContextCompilerImpl = class {
2110
2110
  } else {
2111
2111
  if (tokens > 0) {
2112
2112
  anyTruncated = true;
2113
- logger$20.warn({
2113
+ logger$21.warn({
2114
2114
  section: section.name,
2115
2115
  tokens,
2116
2116
  budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
@@ -2204,8 +2204,8 @@ var GrammarLoader = class {
2204
2204
  _extensionMap;
2205
2205
  _cache = new Map();
2206
2206
  _unavailable = false;
2207
- constructor(logger$21) {
2208
- this._logger = logger$21;
2207
+ constructor(logger$22) {
2208
+ this._logger = logger$22;
2209
2209
  this._extensionMap = new Map([
2210
2210
  [".ts", "tree-sitter-typescript/typescript"],
2211
2211
  [".tsx", "tree-sitter-typescript/tsx"],
@@ -2291,9 +2291,9 @@ const ERR_REPO_MAP_GIT_FAILED = "ERR_REPO_MAP_GIT_FAILED";
2291
2291
  var SymbolParser = class {
2292
2292
  _grammarLoader;
2293
2293
  _logger;
2294
- constructor(grammarLoader, logger$21) {
2294
+ constructor(grammarLoader, logger$22) {
2295
2295
  this._grammarLoader = grammarLoader;
2296
- this._logger = logger$21;
2296
+ this._logger = logger$22;
2297
2297
  }
2298
2298
  async parseFile(filePath) {
2299
2299
  const ext$1 = extname$1(filePath);
@@ -2438,9 +2438,9 @@ async function computeFileHash(filePath) {
2438
2438
  var DoltSymbolRepository = class {
2439
2439
  _client;
2440
2440
  _logger;
2441
- constructor(client, logger$21) {
2441
+ constructor(client, logger$22) {
2442
2442
  this._client = client;
2443
- this._logger = logger$21;
2443
+ this._logger = logger$22;
2444
2444
  }
2445
2445
  /**
2446
2446
  * Atomically replace all symbols for filePath.
@@ -2646,11 +2646,11 @@ var RepoMapStorage = class {
2646
2646
  _metaRepo;
2647
2647
  _gitClient;
2648
2648
  _logger;
2649
- constructor(symbolRepo, metaRepo, gitClient, logger$21) {
2649
+ constructor(symbolRepo, metaRepo, gitClient, logger$22) {
2650
2650
  this._symbolRepo = symbolRepo;
2651
2651
  this._metaRepo = metaRepo;
2652
2652
  this._gitClient = gitClient;
2653
- this._logger = logger$21;
2653
+ this._logger = logger$22;
2654
2654
  }
2655
2655
  /**
2656
2656
  * Returns true if the file's current content hash differs from the stored hash.
@@ -2767,8 +2767,8 @@ function runGit(args, cwd) {
2767
2767
  */
2768
2768
  var GitClient = class {
2769
2769
  _logger;
2770
- constructor(logger$21) {
2771
- this._logger = logger$21;
2770
+ constructor(logger$22) {
2771
+ this._logger = logger$22;
2772
2772
  }
2773
2773
  /**
2774
2774
  * Returns the current HEAD commit SHA.
@@ -4124,9 +4124,9 @@ var RepoMapQueryEngine = class {
4124
4124
  repo;
4125
4125
  logger;
4126
4126
  telemetry;
4127
- constructor(repo, logger$21, telemetry) {
4127
+ constructor(repo, logger$22, telemetry) {
4128
4128
  this.repo = repo;
4129
- this.logger = logger$21;
4129
+ this.logger = logger$22;
4130
4130
  this.telemetry = telemetry;
4131
4131
  }
4132
4132
  async query(q) {
@@ -4346,9 +4346,9 @@ var RepoMapFormatter = class {
4346
4346
  var RepoMapTelemetry = class {
4347
4347
  _telemetry;
4348
4348
  _logger;
4349
- constructor(telemetry, logger$21) {
4349
+ constructor(telemetry, logger$22) {
4350
4350
  this._telemetry = telemetry;
4351
- this._logger = logger$21;
4351
+ this._logger = logger$22;
4352
4352
  }
4353
4353
  /**
4354
4354
  * Emit a `repo_map.query` span.
@@ -4373,9 +4373,9 @@ var RepoMapTelemetry = class {
4373
4373
  var RepoMapModule = class {
4374
4374
  _metaRepo;
4375
4375
  _logger;
4376
- constructor(metaRepo, logger$21) {
4376
+ constructor(metaRepo, logger$22) {
4377
4377
  this._metaRepo = metaRepo;
4378
- this._logger = logger$21;
4378
+ this._logger = logger$22;
4379
4379
  }
4380
4380
  /**
4381
4381
  * Check whether the stored repo-map is stale relative to the current HEAD commit.
@@ -4419,9 +4419,9 @@ var RepoMapModule = class {
4419
4419
  var RepoMapInjector = class {
4420
4420
  _queryEngine;
4421
4421
  _logger;
4422
- constructor(queryEngine, logger$21) {
4422
+ constructor(queryEngine, logger$22) {
4423
4423
  this._queryEngine = queryEngine;
4424
- this._logger = logger$21;
4424
+ this._logger = logger$22;
4425
4425
  }
4426
4426
  /**
4427
4427
  * Build repo-map context by extracting file references from the story content,
@@ -4502,7 +4502,7 @@ const DEFAULT_TIMEOUTS = {
4502
4502
 
4503
4503
  //#endregion
4504
4504
  //#region src/modules/agent-dispatch/dispatcher-impl.ts
4505
- const logger$19 = createLogger("agent-dispatch");
4505
+ const logger$20 = createLogger("agent-dispatch");
4506
4506
  /**
4507
4507
  * Create a new Dispatcher instance.
4508
4508
  *
@@ -4646,7 +4646,7 @@ function runBuildVerification(options) {
4646
4646
  let cmd;
4647
4647
  if (verifyCommand === void 0) {
4648
4648
  const detection = detectPackageManager(projectRoot);
4649
- logger$19.info({
4649
+ logger$20.info({
4650
4650
  packageManager: detection.packageManager,
4651
4651
  lockfile: detection.lockfile,
4652
4652
  resolvedCommand: detection.command
@@ -4658,7 +4658,7 @@ function runBuildVerification(options) {
4658
4658
  const filters = deriveTurboFilters(changedFiles, projectRoot);
4659
4659
  if (filters.length > 0) {
4660
4660
  cmd = `${cmd} ${filters.join(" ")}`;
4661
- logger$19.info({
4661
+ logger$20.info({
4662
4662
  filters,
4663
4663
  originalCmd: options.verifyCommand ?? "(auto-detected)"
4664
4664
  }, "Build verification: scoped turbo build to affected packages");
@@ -4694,7 +4694,7 @@ function runBuildVerification(options) {
4694
4694
  };
4695
4695
  const missingScriptPattern = /Missing script[:\s]|No script found|Command "build" not found/i;
4696
4696
  if (missingScriptPattern.test(combinedOutput)) {
4697
- logger$19.warn("Build script not found — skipping pre-flight (greenfield repo)");
4697
+ logger$20.warn("Build script not found — skipping pre-flight (greenfield repo)");
4698
4698
  return {
4699
4699
  status: "skipped",
4700
4700
  exitCode,
@@ -4878,7 +4878,7 @@ function pickRecommendation(distribution, profile, totalIssues, reviewCycles, la
4878
4878
 
4879
4879
  //#endregion
4880
4880
  //#region src/modules/compiled-workflows/prompt-assembler.ts
4881
- const logger$18 = createLogger("compiled-workflows:prompt-assembler");
4881
+ const logger$19 = createLogger("compiled-workflows:prompt-assembler");
4882
4882
  /**
4883
4883
  * Assemble a final prompt from a template and sections map.
4884
4884
  *
@@ -4903,7 +4903,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
4903
4903
  tokenCount,
4904
4904
  truncated: false
4905
4905
  };
4906
- logger$18.warn({
4906
+ logger$19.warn({
4907
4907
  tokenCount,
4908
4908
  ceiling: tokenCeiling
4909
4909
  }, "Prompt exceeds token ceiling — truncating optional sections");
@@ -4919,10 +4919,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
4919
4919
  const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
4920
4920
  if (targetSectionTokens === 0) {
4921
4921
  contentMap[section.name] = "";
4922
- logger$18.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
4922
+ logger$19.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
4923
4923
  } else {
4924
4924
  contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
4925
- logger$18.warn({
4925
+ logger$19.warn({
4926
4926
  sectionName: section.name,
4927
4927
  targetSectionTokens
4928
4928
  }, "Section truncated to fit token budget");
@@ -4933,7 +4933,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
4933
4933
  }
4934
4934
  if (tokenCount <= tokenCeiling) break;
4935
4935
  }
4936
- if (tokenCount > tokenCeiling) logger$18.warn({
4936
+ if (tokenCount > tokenCeiling) logger$19.warn({
4937
4937
  tokenCount,
4938
4938
  ceiling: tokenCeiling
4939
4939
  }, "Required sections alone exceed token ceiling — returning over-budget prompt");
@@ -5235,7 +5235,7 @@ function getTokenCeiling(workflowType, tokenCeilings) {
5235
5235
 
5236
5236
  //#endregion
5237
5237
  //#region src/modules/compiled-workflows/create-story.ts
5238
- const logger$17 = createLogger("compiled-workflows:create-story");
5238
+ const logger$18 = createLogger("compiled-workflows:create-story");
5239
5239
  /**
5240
5240
  * Execute the compiled create-story workflow.
5241
5241
  *
@@ -5255,13 +5255,13 @@ const logger$17 = createLogger("compiled-workflows:create-story");
5255
5255
  */
5256
5256
  async function runCreateStory(deps, params) {
5257
5257
  const { epicId, storyKey, pipelineRunId } = params;
5258
- logger$17.debug({
5258
+ logger$18.debug({
5259
5259
  epicId,
5260
5260
  storyKey,
5261
5261
  pipelineRunId
5262
5262
  }, "Starting create-story workflow");
5263
5263
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("create-story", deps.tokenCeilings);
5264
- logger$17.info({
5264
+ logger$18.info({
5265
5265
  workflow: "create-story",
5266
5266
  ceiling: TOKEN_CEILING,
5267
5267
  source: tokenCeilingSource
@@ -5271,7 +5271,7 @@ async function runCreateStory(deps, params) {
5271
5271
  template = await deps.pack.getPrompt("create-story");
5272
5272
  } catch (err) {
5273
5273
  const error = err instanceof Error ? err.message : String(err);
5274
- logger$17.error({ error }, "Failed to retrieve create-story prompt template");
5274
+ logger$18.error({ error }, "Failed to retrieve create-story prompt template");
5275
5275
  return {
5276
5276
  result: "failed",
5277
5277
  error: `Failed to retrieve prompt template: ${error}`,
@@ -5313,7 +5313,7 @@ async function runCreateStory(deps, params) {
5313
5313
  priority: "important"
5314
5314
  }
5315
5315
  ], TOKEN_CEILING);
5316
- logger$17.debug({
5316
+ logger$18.debug({
5317
5317
  tokenCount,
5318
5318
  truncated,
5319
5319
  tokenCeiling: TOKEN_CEILING
@@ -5333,7 +5333,7 @@ async function runCreateStory(deps, params) {
5333
5333
  dispatchResult = await handle.result;
5334
5334
  } catch (err) {
5335
5335
  const error = err instanceof Error ? err.message : String(err);
5336
- logger$17.error({
5336
+ logger$18.error({
5337
5337
  epicId,
5338
5338
  storyKey,
5339
5339
  error
@@ -5354,7 +5354,7 @@ async function runCreateStory(deps, params) {
5354
5354
  if (dispatchResult.status === "failed") {
5355
5355
  const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
5356
5356
  const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
5357
- logger$17.warn({
5357
+ logger$18.warn({
5358
5358
  epicId,
5359
5359
  storyKey,
5360
5360
  exitCode: dispatchResult.exitCode,
@@ -5367,7 +5367,7 @@ async function runCreateStory(deps, params) {
5367
5367
  };
5368
5368
  }
5369
5369
  if (dispatchResult.status === "timeout") {
5370
- logger$17.warn({
5370
+ logger$18.warn({
5371
5371
  epicId,
5372
5372
  storyKey
5373
5373
  }, "Create-story dispatch timed out");
@@ -5380,7 +5380,7 @@ async function runCreateStory(deps, params) {
5380
5380
  if (dispatchResult.parsed === null) {
5381
5381
  const details = dispatchResult.parseError ?? "No YAML block found in output";
5382
5382
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
5383
- logger$17.warn({
5383
+ logger$18.warn({
5384
5384
  epicId,
5385
5385
  storyKey,
5386
5386
  details,
@@ -5396,7 +5396,7 @@ async function runCreateStory(deps, params) {
5396
5396
  const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
5397
5397
  if (!parseResult.success) {
5398
5398
  const details = parseResult.error.message;
5399
- logger$17.warn({
5399
+ logger$18.warn({
5400
5400
  epicId,
5401
5401
  storyKey,
5402
5402
  details
@@ -5409,7 +5409,7 @@ async function runCreateStory(deps, params) {
5409
5409
  };
5410
5410
  }
5411
5411
  const parsed = parseResult.data;
5412
- logger$17.info({
5412
+ logger$18.info({
5413
5413
  epicId,
5414
5414
  storyKey,
5415
5415
  storyFile: parsed.story_file,
@@ -5431,7 +5431,7 @@ async function getImplementationDecisions(deps) {
5431
5431
  try {
5432
5432
  return await getDecisionsByPhase(deps.db, "implementation");
5433
5433
  } catch (err) {
5434
- logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
5434
+ logger$18.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
5435
5435
  return [];
5436
5436
  }
5437
5437
  }
@@ -5479,7 +5479,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
5479
5479
  if (storyKey) {
5480
5480
  const perStoryShard = decisions.find((d) => d.category === "epic-shard" && d.key === storyKey);
5481
5481
  if (perStoryShard?.value) {
5482
- logger$17.debug({
5482
+ logger$18.debug({
5483
5483
  epicId,
5484
5484
  storyKey
5485
5485
  }, "Found per-story epic shard (direct lookup)");
@@ -5492,13 +5492,13 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
5492
5492
  if (storyKey) {
5493
5493
  const storySection = extractStorySection(shardContent, storyKey);
5494
5494
  if (storySection) {
5495
- logger$17.debug({
5495
+ logger$18.debug({
5496
5496
  epicId,
5497
5497
  storyKey
5498
5498
  }, "Extracted per-story section from epic shard (pre-37-0 fallback)");
5499
5499
  return storySection;
5500
5500
  }
5501
- logger$17.debug({
5501
+ logger$18.debug({
5502
5502
  epicId,
5503
5503
  storyKey
5504
5504
  }, "No matching story section found — using full epic shard");
@@ -5508,11 +5508,11 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
5508
5508
  if (projectRoot) {
5509
5509
  const fallback = readEpicShardFromFile(projectRoot, epicId);
5510
5510
  if (fallback) {
5511
- logger$17.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
5511
+ logger$18.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
5512
5512
  if (storyKey) {
5513
5513
  const storySection = extractStorySection(fallback, storyKey);
5514
5514
  if (storySection) {
5515
- logger$17.debug({
5515
+ logger$18.debug({
5516
5516
  epicId,
5517
5517
  storyKey
5518
5518
  }, "Extracted per-story section from file-based epic shard");
@@ -5524,7 +5524,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
5524
5524
  }
5525
5525
  return "";
5526
5526
  } catch (err) {
5527
- logger$17.warn({
5527
+ logger$18.warn({
5528
5528
  epicId,
5529
5529
  error: err instanceof Error ? err.message : String(err)
5530
5530
  }, "Failed to retrieve epic shard");
@@ -5541,7 +5541,7 @@ function getPrevDevNotes(decisions, epicId) {
5541
5541
  if (devNotes.length === 0) return "";
5542
5542
  return devNotes[devNotes.length - 1].value;
5543
5543
  } catch (err) {
5544
- logger$17.warn({
5544
+ logger$18.warn({
5545
5545
  epicId,
5546
5546
  error: err instanceof Error ? err.message : String(err)
5547
5547
  }, "Failed to retrieve prev dev notes");
@@ -5574,7 +5574,7 @@ async function getArchConstraints$3(deps) {
5574
5574
  const truncatedBody = body.length > 300 ? body.slice(0, 297) + "..." : body;
5575
5575
  return `${header}\n${truncatedBody}`;
5576
5576
  }).join("\n\n");
5577
- logger$17.info({
5577
+ logger$18.info({
5578
5578
  fullLength: full.length,
5579
5579
  summarizedLength: summarized.length,
5580
5580
  decisions: constraints.length
@@ -5584,13 +5584,13 @@ async function getArchConstraints$3(deps) {
5584
5584
  if (deps.projectRoot) {
5585
5585
  const fallback = readArchConstraintsFromFile(deps.projectRoot);
5586
5586
  if (fallback) {
5587
- logger$17.info("Using file-based fallback for architecture constraints (decisions table empty)");
5587
+ logger$18.info("Using file-based fallback for architecture constraints (decisions table empty)");
5588
5588
  return fallback.length > ARCH_CONSTRAINT_MAX_CHARS ? fallback.slice(0, ARCH_CONSTRAINT_MAX_CHARS) + "\n\n[truncated for token budget]" : fallback;
5589
5589
  }
5590
5590
  }
5591
5591
  return "";
5592
5592
  } catch (err) {
5593
- logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
5593
+ logger$18.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
5594
5594
  return "";
5595
5595
  }
5596
5596
  }
@@ -5618,7 +5618,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
5618
5618
  const endIdx = endMatch ? endMatch.index : content.length;
5619
5619
  return content.slice(startIdx, endIdx).trim();
5620
5620
  } catch (err) {
5621
- logger$17.warn({
5621
+ logger$18.warn({
5622
5622
  epicId,
5623
5623
  error: err instanceof Error ? err.message : String(err)
5624
5624
  }, "File-based epic shard fallback failed");
@@ -5641,7 +5641,7 @@ function readArchConstraintsFromFile(projectRoot) {
5641
5641
  const content = readFileSync(archPath, "utf-8");
5642
5642
  return content.slice(0, 1500);
5643
5643
  } catch (err) {
5644
- logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
5644
+ logger$18.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
5645
5645
  return "";
5646
5646
  }
5647
5647
  }
@@ -5654,7 +5654,7 @@ async function getStoryTemplate(deps) {
5654
5654
  try {
5655
5655
  return await deps.pack.getTemplate("story");
5656
5656
  } catch (err) {
5657
- logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
5657
+ logger$18.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
5658
5658
  return "";
5659
5659
  }
5660
5660
  }
@@ -5691,7 +5691,7 @@ async function isValidStoryFile(filePath) {
5691
5691
 
5692
5692
  //#endregion
5693
5693
  //#region src/modules/compiled-workflows/git-helpers.ts
5694
- const logger$16 = createLogger("compiled-workflows:git-helpers");
5694
+ const logger$17 = createLogger("compiled-workflows:git-helpers");
5695
5695
  /**
5696
5696
  * Check whether the repo at `cwd` has at least one commit (HEAD resolves).
5697
5697
  * Returns false for fresh repos with no commits, avoiding `fatal: bad revision 'HEAD'`.
@@ -5728,7 +5728,7 @@ function hasCommits(cwd) {
5728
5728
  */
5729
5729
  async function getGitDiffSummary(workingDirectory = process.cwd()) {
5730
5730
  if (!hasCommits(workingDirectory)) {
5731
- logger$16.debug({ cwd: workingDirectory }, "No commits in repo — returning empty diff");
5731
+ logger$17.debug({ cwd: workingDirectory }, "No commits in repo — returning empty diff");
5732
5732
  return "";
5733
5733
  }
5734
5734
  return runGitCommand(["diff", "HEAD"], workingDirectory, "git-diff-summary");
@@ -5745,7 +5745,7 @@ async function getGitDiffSummary(workingDirectory = process.cwd()) {
5745
5745
  */
5746
5746
  async function getGitDiffStatSummary(workingDirectory = process.cwd()) {
5747
5747
  if (!hasCommits(workingDirectory)) {
5748
- logger$16.debug({ cwd: workingDirectory }, "No commits in repo — returning empty stat");
5748
+ logger$17.debug({ cwd: workingDirectory }, "No commits in repo — returning empty stat");
5749
5749
  return "";
5750
5750
  }
5751
5751
  return runGitCommand([
@@ -5771,7 +5771,7 @@ async function getGitDiffStatSummary(workingDirectory = process.cwd()) {
5771
5771
  async function getGitDiffForFiles(files, workingDirectory = process.cwd()) {
5772
5772
  if (files.length === 0) return "";
5773
5773
  if (!hasCommits(workingDirectory)) {
5774
- logger$16.debug({ cwd: workingDirectory }, "No commits in repo — returning empty diff for files");
5774
+ logger$17.debug({ cwd: workingDirectory }, "No commits in repo — returning empty diff for files");
5775
5775
  return "";
5776
5776
  }
5777
5777
  await stageIntentToAdd(files, workingDirectory);
@@ -5798,7 +5798,7 @@ async function getGitDiffForFiles(files, workingDirectory = process.cwd()) {
5798
5798
  async function getGitDiffStatForFiles(files, workingDirectory = process.cwd()) {
5799
5799
  if (files.length === 0) return "";
5800
5800
  if (!hasCommits(workingDirectory)) {
5801
- logger$16.debug({ cwd: workingDirectory }, "No commits in repo — returning empty stat for files");
5801
+ logger$17.debug({ cwd: workingDirectory }, "No commits in repo — returning empty stat for files");
5802
5802
  return "";
5803
5803
  }
5804
5804
  return runGitCommand([
@@ -5847,7 +5847,7 @@ async function stageIntentToAdd(files, workingDirectory) {
5847
5847
  if (files.length === 0) return;
5848
5848
  const existing = files.filter((f$1) => {
5849
5849
  const exists = existsSync(f$1);
5850
- if (!exists) logger$16.debug({ file: f$1 }, "Skipping nonexistent file in stageIntentToAdd");
5850
+ if (!exists) logger$17.debug({ file: f$1 }, "Skipping nonexistent file in stageIntentToAdd");
5851
5851
  return exists;
5852
5852
  });
5853
5853
  if (existing.length === 0) return;
@@ -5881,7 +5881,7 @@ async function runGitCommand(args, cwd, logLabel) {
5881
5881
  stderr += chunk.toString("utf-8");
5882
5882
  });
5883
5883
  proc$1.on("error", (err) => {
5884
- logger$16.warn({
5884
+ logger$17.warn({
5885
5885
  label: logLabel,
5886
5886
  cwd,
5887
5887
  error: err.message
@@ -5890,7 +5890,7 @@ async function runGitCommand(args, cwd, logLabel) {
5890
5890
  });
5891
5891
  proc$1.on("close", (code) => {
5892
5892
  if (code !== 0) {
5893
- logger$16.warn({
5893
+ logger$17.warn({
5894
5894
  label: logLabel,
5895
5895
  cwd,
5896
5896
  code,
@@ -5906,7 +5906,7 @@ async function runGitCommand(args, cwd, logLabel) {
5906
5906
 
5907
5907
  //#endregion
5908
5908
  //#region src/modules/implementation-orchestrator/project-findings.ts
5909
- const logger$15 = createLogger("project-findings");
5909
+ const logger$16 = createLogger("project-findings");
5910
5910
  /** Maximum character length for the findings summary */
5911
5911
  const MAX_CHARS = 2e3;
5912
5912
  /**
@@ -5972,7 +5972,7 @@ async function getProjectFindings(db) {
5972
5972
  if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
5973
5973
  return summary;
5974
5974
  } catch (err) {
5975
- logger$15.warn({ err }, "Failed to query project findings (graceful fallback)");
5975
+ logger$16.warn({ err }, "Failed to query project findings (graceful fallback)");
5976
5976
  return "";
5977
5977
  }
5978
5978
  }
@@ -5995,7 +5995,7 @@ function extractRecurringPatterns(outcomes) {
5995
5995
 
5996
5996
  //#endregion
5997
5997
  //#region src/modules/compiled-workflows/story-complexity.ts
5998
- const logger$14 = createLogger("compiled-workflows:story-complexity");
5998
+ const logger$15 = createLogger("compiled-workflows:story-complexity");
5999
5999
  /**
6000
6000
  * Compute a complexity score from story markdown content.
6001
6001
  *
@@ -6047,7 +6047,7 @@ function resolveFixStoryMaxTurns(complexityScore) {
6047
6047
  * @param resolvedMaxTurns - Turn limit resolved for this dispatch
6048
6048
  */
6049
6049
  function logComplexityResult(storyKey, complexity, resolvedMaxTurns) {
6050
- logger$14.info({
6050
+ logger$15.info({
6051
6051
  storyKey,
6052
6052
  taskCount: complexity.taskCount,
6053
6053
  subtaskCount: complexity.subtaskCount,
@@ -6303,7 +6303,7 @@ function resolveInstallCommand(projectRoot) {
6303
6303
 
6304
6304
  //#endregion
6305
6305
  //#region src/modules/compiled-workflows/dev-story.ts
6306
- const logger$13 = createLogger("compiled-workflows:dev-story");
6306
+ const logger$14 = createLogger("compiled-workflows:dev-story");
6307
6307
  /** Default timeout for dev-story dispatches in milliseconds (30 min) */
6308
6308
  const DEFAULT_TIMEOUT_MS$1 = 18e5;
6309
6309
  /**
@@ -6315,12 +6315,12 @@ const DEFAULT_TIMEOUT_MS$1 = 18e5;
6315
6315
  */
6316
6316
  async function runDevStory(deps, params) {
6317
6317
  const { storyKey, storyFilePath, taskScope, priorFiles } = params;
6318
- logger$13.info({
6318
+ logger$14.info({
6319
6319
  storyKey,
6320
6320
  storyFilePath
6321
6321
  }, "Starting compiled dev-story workflow");
6322
6322
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("dev-story", deps.tokenCeilings);
6323
- logger$13.info({
6323
+ logger$14.info({
6324
6324
  workflow: "dev-story",
6325
6325
  ceiling: TOKEN_CEILING,
6326
6326
  source: tokenCeilingSource
@@ -6363,10 +6363,10 @@ async function runDevStory(deps, params) {
6363
6363
  let template;
6364
6364
  try {
6365
6365
  template = await deps.pack.getPrompt("dev-story");
6366
- logger$13.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
6366
+ logger$14.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
6367
6367
  } catch (err) {
6368
6368
  const error = err instanceof Error ? err.message : String(err);
6369
- logger$13.error({
6369
+ logger$14.error({
6370
6370
  storyKey,
6371
6371
  error
6372
6372
  }, "Failed to retrieve dev-story prompt template");
@@ -6377,14 +6377,14 @@ async function runDevStory(deps, params) {
6377
6377
  storyContent = await readFile$1(storyFilePath, "utf-8");
6378
6378
  } catch (err) {
6379
6379
  if (err.code === "ENOENT") {
6380
- logger$13.error({
6380
+ logger$14.error({
6381
6381
  storyKey,
6382
6382
  storyFilePath
6383
6383
  }, "Story file not found");
6384
6384
  return makeFailureResult("story_file_not_found");
6385
6385
  }
6386
6386
  const error = err instanceof Error ? err.message : String(err);
6387
- logger$13.error({
6387
+ logger$14.error({
6388
6388
  storyKey,
6389
6389
  storyFilePath,
6390
6390
  error
@@ -6392,7 +6392,7 @@ async function runDevStory(deps, params) {
6392
6392
  return makeFailureResult(`story_file_read_error: ${error}`);
6393
6393
  }
6394
6394
  if (storyContent.trim().length === 0) {
6395
- logger$13.error({
6395
+ logger$14.error({
6396
6396
  storyKey,
6397
6397
  storyFilePath
6398
6398
  }, "Story file is empty");
@@ -6400,7 +6400,7 @@ async function runDevStory(deps, params) {
6400
6400
  }
6401
6401
  const staleStatus = detectDeprecatedStatusField(storyContent);
6402
6402
  if (staleStatus !== null) {
6403
- logger$13.warn({
6403
+ logger$14.warn({
6404
6404
  storyFilePath,
6405
6405
  staleStatus
6406
6406
  }, "Story spec contains deprecated Status field — stripped before dispatch (status is managed by Dolt work graph)");
@@ -6415,17 +6415,17 @@ async function runDevStory(deps, params) {
6415
6415
  const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
6416
6416
  if (testPatternDecisions.length > 0) {
6417
6417
  testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
6418
- logger$13.debug({
6418
+ logger$14.debug({
6419
6419
  storyKey,
6420
6420
  count: testPatternDecisions.length
6421
6421
  }, "Loaded test patterns from decision store");
6422
6422
  } else {
6423
6423
  testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
6424
- logger$13.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
6424
+ logger$14.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
6425
6425
  }
6426
6426
  } catch (err) {
6427
6427
  const error = err instanceof Error ? err.message : String(err);
6428
- logger$13.warn({
6428
+ logger$14.warn({
6429
6429
  storyKey,
6430
6430
  error
6431
6431
  }, "Failed to load test patterns — using defaults");
@@ -6439,7 +6439,7 @@ async function runDevStory(deps, params) {
6439
6439
  if (deps.repoMapInjector !== void 0) {
6440
6440
  const injection = await deps.repoMapInjector.buildContext(storyContent, deps.maxRepoMapTokens ?? 2e3);
6441
6441
  repoContextContent = injection.text;
6442
- logger$13.info({
6442
+ logger$14.info({
6443
6443
  storyKey,
6444
6444
  repoMapTokens: Math.ceil(injection.text.length / 4),
6445
6445
  symbolCount: injection.symbolCount,
@@ -6451,7 +6451,7 @@ async function runDevStory(deps, params) {
6451
6451
  const findings = await getProjectFindings(deps.db);
6452
6452
  if (findings.length > 0) {
6453
6453
  priorFindingsContent = "Previous pipeline runs encountered these issues — avoid repeating them:\n\n" + findings;
6454
- logger$13.debug({
6454
+ logger$14.debug({
6455
6455
  storyKey,
6456
6456
  findingsLen: findings.length
6457
6457
  }, "Injecting prior findings into dev-story prompt");
@@ -6471,7 +6471,7 @@ async function runDevStory(deps, params) {
6471
6471
  if (plan.test_categories && plan.test_categories.length > 0) parts.push(`\n### Categories: ${plan.test_categories.join(", ")}`);
6472
6472
  if (plan.coverage_notes) parts.push(`\n### Coverage Notes\n${plan.coverage_notes}`);
6473
6473
  testPlanContent = parts.join("\n");
6474
- logger$13.debug({ storyKey }, "Injecting test plan into dev-story prompt");
6474
+ logger$14.debug({ storyKey }, "Injecting test plan into dev-story prompt");
6475
6475
  }
6476
6476
  } catch {}
6477
6477
  const sections = [
@@ -6532,7 +6532,7 @@ async function runDevStory(deps, params) {
6532
6532
  }
6533
6533
  ];
6534
6534
  const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING);
6535
- logger$13.info({
6535
+ logger$14.info({
6536
6536
  storyKey,
6537
6537
  tokenCount,
6538
6538
  ceiling: TOKEN_CEILING,
@@ -6556,7 +6556,7 @@ async function runDevStory(deps, params) {
6556
6556
  dispatchResult = await handle.result;
6557
6557
  } catch (err) {
6558
6558
  const error = err instanceof Error ? err.message : String(err);
6559
- logger$13.error({
6559
+ logger$14.error({
6560
6560
  storyKey,
6561
6561
  error
6562
6562
  }, "Dispatch threw an unexpected error");
@@ -6567,11 +6567,11 @@ async function runDevStory(deps, params) {
6567
6567
  output: dispatchResult.tokenEstimate.output
6568
6568
  };
6569
6569
  if (dispatchResult.status === "timeout") {
6570
- logger$13.error({
6570
+ logger$14.error({
6571
6571
  storyKey,
6572
6572
  durationMs: dispatchResult.durationMs
6573
6573
  }, "Dev-story dispatch timed out");
6574
- if (dispatchResult.output.length > 0) logger$13.info({
6574
+ if (dispatchResult.output.length > 0) logger$14.info({
6575
6575
  storyKey,
6576
6576
  partialOutput: dispatchResult.output.slice(0, 500)
6577
6577
  }, "Partial output before timeout");
@@ -6581,12 +6581,12 @@ async function runDevStory(deps, params) {
6581
6581
  };
6582
6582
  }
6583
6583
  if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
6584
- logger$13.error({
6584
+ logger$14.error({
6585
6585
  storyKey,
6586
6586
  exitCode: dispatchResult.exitCode,
6587
6587
  status: dispatchResult.status
6588
6588
  }, "Dev-story dispatch failed");
6589
- if (dispatchResult.output.length > 0) logger$13.info({
6589
+ if (dispatchResult.output.length > 0) logger$14.info({
6590
6590
  storyKey,
6591
6591
  partialOutput: dispatchResult.output.slice(0, 500)
6592
6592
  }, "Partial output from failed dispatch");
@@ -6598,7 +6598,7 @@ async function runDevStory(deps, params) {
6598
6598
  if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
6599
6599
  const details = dispatchResult.parseError ?? "parsed result was null";
6600
6600
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
6601
- logger$13.error({
6601
+ logger$14.error({
6602
6602
  storyKey,
6603
6603
  parseError: details,
6604
6604
  rawOutputSnippet: rawSnippet
@@ -6606,12 +6606,12 @@ async function runDevStory(deps, params) {
6606
6606
  let filesModified = [];
6607
6607
  try {
6608
6608
  filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
6609
- if (filesModified.length > 0) logger$13.info({
6609
+ if (filesModified.length > 0) logger$14.info({
6610
6610
  storyKey,
6611
6611
  fileCount: filesModified.length
6612
6612
  }, "Recovered files_modified from git status (YAML fallback)");
6613
6613
  } catch (err) {
6614
- logger$13.warn({
6614
+ logger$14.warn({
6615
6615
  storyKey,
6616
6616
  error: err instanceof Error ? err.message : String(err)
6617
6617
  }, "Failed to recover files_modified from git");
@@ -6628,7 +6628,7 @@ async function runDevStory(deps, params) {
6628
6628
  };
6629
6629
  }
6630
6630
  const parsed = dispatchResult.parsed;
6631
- logger$13.info({
6631
+ logger$14.info({
6632
6632
  storyKey,
6633
6633
  result: parsed.result,
6634
6634
  acMet: parsed.ac_met.length
@@ -6767,7 +6767,7 @@ function extractFilesInScope(storyContent) {
6767
6767
 
6768
6768
  //#endregion
6769
6769
  //#region src/modules/compiled-workflows/code-review.ts
6770
- const logger$12 = createLogger("compiled-workflows:code-review");
6770
+ const logger$13 = createLogger("compiled-workflows:code-review");
6771
6771
  /**
6772
6772
  * Default fallback result when dispatch fails or times out.
6773
6773
  * Uses NEEDS_MINOR_FIXES (not NEEDS_MAJOR_REWORK) so a parse/schema failure
@@ -6842,14 +6842,14 @@ async function countTestMetrics(filesModified, cwd) {
6842
6842
  async function runCodeReview(deps, params) {
6843
6843
  const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
6844
6844
  const cwd = workingDirectory ?? process.cwd();
6845
- logger$12.debug({
6845
+ logger$13.debug({
6846
6846
  storyKey,
6847
6847
  storyFilePath,
6848
6848
  cwd,
6849
6849
  pipelineRunId
6850
6850
  }, "Starting code-review workflow");
6851
6851
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("code-review", deps.tokenCeilings);
6852
- logger$12.info({
6852
+ logger$13.info({
6853
6853
  workflow: "code-review",
6854
6854
  ceiling: TOKEN_CEILING,
6855
6855
  source: tokenCeilingSource
@@ -6859,7 +6859,7 @@ async function runCodeReview(deps, params) {
6859
6859
  template = await deps.pack.getPrompt("code-review");
6860
6860
  } catch (err) {
6861
6861
  const error = err instanceof Error ? err.message : String(err);
6862
- logger$12.error({ error }, "Failed to retrieve code-review prompt template");
6862
+ logger$13.error({ error }, "Failed to retrieve code-review prompt template");
6863
6863
  return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
6864
6864
  input: 0,
6865
6865
  output: 0
@@ -6870,7 +6870,7 @@ async function runCodeReview(deps, params) {
6870
6870
  storyContent = await readFile$1(storyFilePath, "utf-8");
6871
6871
  } catch (err) {
6872
6872
  const error = err instanceof Error ? err.message : String(err);
6873
- logger$12.error({
6873
+ logger$13.error({
6874
6874
  storyFilePath,
6875
6875
  error
6876
6876
  }, "Failed to read story file");
@@ -6890,12 +6890,12 @@ async function runCodeReview(deps, params) {
6890
6890
  const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
6891
6891
  if (scopedTotal <= TOKEN_CEILING) {
6892
6892
  gitDiffContent = scopedDiff;
6893
- logger$12.debug({
6893
+ logger$13.debug({
6894
6894
  fileCount: filesModified.length,
6895
6895
  tokenCount: scopedTotal
6896
6896
  }, "Using scoped file diff");
6897
6897
  } else {
6898
- logger$12.warn({
6898
+ logger$13.warn({
6899
6899
  estimatedTotal: scopedTotal,
6900
6900
  ceiling: TOKEN_CEILING,
6901
6901
  fileCount: filesModified.length
@@ -6909,7 +6909,7 @@ async function runCodeReview(deps, params) {
6909
6909
  const fullTotal = nonDiffTokens + countTokens(fullDiff);
6910
6910
  if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
6911
6911
  else {
6912
- logger$12.warn({
6912
+ logger$13.warn({
6913
6913
  estimatedTotal: fullTotal,
6914
6914
  ceiling: TOKEN_CEILING
6915
6915
  }, "Full git diff would exceed token ceiling — using stat-only summary");
@@ -6917,7 +6917,7 @@ async function runCodeReview(deps, params) {
6917
6917
  }
6918
6918
  }
6919
6919
  if (gitDiffContent.trim().length === 0) {
6920
- logger$12.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
6920
+ logger$13.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
6921
6921
  return {
6922
6922
  verdict: "SHIP_IT",
6923
6923
  issues: 0,
@@ -6933,7 +6933,7 @@ async function runCodeReview(deps, params) {
6933
6933
  if (deps.repoMapInjector !== void 0) {
6934
6934
  const injection = await deps.repoMapInjector.buildContext(storyContent, deps.maxRepoMapTokens ?? 2e3);
6935
6935
  repoContextContent = injection.text;
6936
- logger$12.info({
6936
+ logger$13.info({
6937
6937
  storyKey,
6938
6938
  repoMapTokens: Math.ceil(injection.text.length / 4),
6939
6939
  symbolCount: injection.symbolCount,
@@ -6953,14 +6953,14 @@ async function runCodeReview(deps, params) {
6953
6953
  const findings = await getProjectFindings(deps.db);
6954
6954
  if (findings.length > 0) {
6955
6955
  priorFindingsContent = "Previous reviews found these recurring patterns — pay special attention:\n\n" + findings;
6956
- logger$12.debug({
6956
+ logger$13.debug({
6957
6957
  storyKey,
6958
6958
  findingsLen: findings.length
6959
6959
  }, "Injecting prior findings into code-review prompt");
6960
6960
  }
6961
6961
  } catch {}
6962
6962
  const testMetricsContent = await countTestMetrics(filesModified, cwd);
6963
- if (testMetricsContent) logger$12.debug({ storyKey }, "Injecting verified test-count metrics into code-review context");
6963
+ if (testMetricsContent) logger$13.debug({ storyKey }, "Injecting verified test-count metrics into code-review context");
6964
6964
  const sections = [
6965
6965
  {
6966
6966
  name: "story_content",
@@ -6999,11 +6999,11 @@ async function runCodeReview(deps, params) {
6999
6999
  }
7000
7000
  ];
7001
7001
  const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
7002
- if (assembleResult.truncated) logger$12.warn({
7002
+ if (assembleResult.truncated) logger$13.warn({
7003
7003
  storyKey,
7004
7004
  tokenCount: assembleResult.tokenCount
7005
7005
  }, "Code-review prompt truncated to fit token ceiling");
7006
- logger$12.debug({
7006
+ logger$13.debug({
7007
7007
  storyKey,
7008
7008
  tokenCount: assembleResult.tokenCount,
7009
7009
  truncated: assembleResult.truncated
@@ -7024,7 +7024,7 @@ async function runCodeReview(deps, params) {
7024
7024
  dispatchResult = await handle.result;
7025
7025
  } catch (err) {
7026
7026
  const error = err instanceof Error ? err.message : String(err);
7027
- logger$12.error({
7027
+ logger$13.error({
7028
7028
  storyKey,
7029
7029
  error
7030
7030
  }, "Code-review dispatch threw unexpected error");
@@ -7040,7 +7040,7 @@ async function runCodeReview(deps, params) {
7040
7040
  const rawOutput = dispatchResult.output ?? void 0;
7041
7041
  if (dispatchResult.status === "failed") {
7042
7042
  const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
7043
- logger$12.warn({
7043
+ logger$13.warn({
7044
7044
  storyKey,
7045
7045
  exitCode: dispatchResult.exitCode
7046
7046
  }, "Code-review dispatch failed");
@@ -7050,7 +7050,7 @@ async function runCodeReview(deps, params) {
7050
7050
  };
7051
7051
  }
7052
7052
  if (dispatchResult.status === "timeout") {
7053
- logger$12.warn({ storyKey }, "Code-review dispatch timed out");
7053
+ logger$13.warn({ storyKey }, "Code-review dispatch timed out");
7054
7054
  return {
7055
7055
  ...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
7056
7056
  rawOutput
@@ -7058,7 +7058,7 @@ async function runCodeReview(deps, params) {
7058
7058
  }
7059
7059
  if (dispatchResult.parsed === null) {
7060
7060
  const details = dispatchResult.parseError ?? "No YAML block found in output";
7061
- logger$12.warn({
7061
+ logger$13.warn({
7062
7062
  storyKey,
7063
7063
  details
7064
7064
  }, "Code-review output schema validation failed");
@@ -7075,7 +7075,7 @@ async function runCodeReview(deps, params) {
7075
7075
  const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
7076
7076
  if (!parseResult.success) {
7077
7077
  const details = parseResult.error.message;
7078
- logger$12.warn({
7078
+ logger$13.warn({
7079
7079
  storyKey,
7080
7080
  details
7081
7081
  }, "Code-review output failed schema validation");
@@ -7090,13 +7090,13 @@ async function runCodeReview(deps, params) {
7090
7090
  };
7091
7091
  }
7092
7092
  const parsed = parseResult.data;
7093
- if (parsed.agentVerdict !== parsed.verdict) logger$12.info({
7093
+ if (parsed.agentVerdict !== parsed.verdict) logger$13.info({
7094
7094
  storyKey,
7095
7095
  agentVerdict: parsed.agentVerdict,
7096
7096
  pipelineVerdict: parsed.verdict,
7097
7097
  issues: parsed.issues
7098
7098
  }, "Pipeline overrode agent verdict based on issue severities");
7099
- logger$12.info({
7099
+ logger$13.info({
7100
7100
  storyKey,
7101
7101
  verdict: parsed.verdict,
7102
7102
  issues: parsed.issues
@@ -7121,14 +7121,14 @@ async function getArchConstraints$2(deps) {
7121
7121
  if (constraints.length === 0) return "";
7122
7122
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
7123
7123
  } catch (err) {
7124
- logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
7124
+ logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
7125
7125
  return "";
7126
7126
  }
7127
7127
  }
7128
7128
 
7129
7129
  //#endregion
7130
7130
  //#region src/modules/compiled-workflows/test-plan.ts
7131
- const logger$11 = createLogger("compiled-workflows:test-plan");
7131
+ const logger$12 = createLogger("compiled-workflows:test-plan");
7132
7132
  /** Default timeout for test-plan dispatches in milliseconds (5 min — lightweight call) */
7133
7133
  const DEFAULT_TIMEOUT_MS = 3e5;
7134
7134
  /**
@@ -7140,12 +7140,12 @@ const DEFAULT_TIMEOUT_MS = 3e5;
7140
7140
  */
7141
7141
  async function runTestPlan(deps, params) {
7142
7142
  const { storyKey, storyFilePath, pipelineRunId } = params;
7143
- logger$11.info({
7143
+ logger$12.info({
7144
7144
  storyKey,
7145
7145
  storyFilePath
7146
7146
  }, "Starting compiled test-plan workflow");
7147
7147
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-plan", deps.tokenCeilings);
7148
- logger$11.info({
7148
+ logger$12.info({
7149
7149
  workflow: "test-plan",
7150
7150
  ceiling: TOKEN_CEILING,
7151
7151
  source: tokenCeilingSource
@@ -7153,10 +7153,10 @@ async function runTestPlan(deps, params) {
7153
7153
  let template;
7154
7154
  try {
7155
7155
  template = await deps.pack.getPrompt("test-plan");
7156
- logger$11.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
7156
+ logger$12.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
7157
7157
  } catch (err) {
7158
7158
  const error = err instanceof Error ? err.message : String(err);
7159
- logger$11.warn({
7159
+ logger$12.warn({
7160
7160
  storyKey,
7161
7161
  error
7162
7162
  }, "Failed to retrieve test-plan prompt template");
@@ -7167,14 +7167,14 @@ async function runTestPlan(deps, params) {
7167
7167
  storyContent = await readFile$1(storyFilePath, "utf-8");
7168
7168
  } catch (err) {
7169
7169
  if (err.code === "ENOENT") {
7170
- logger$11.warn({
7170
+ logger$12.warn({
7171
7171
  storyKey,
7172
7172
  storyFilePath
7173
7173
  }, "Story file not found for test planning");
7174
7174
  return makeTestPlanFailureResult("story_file_not_found");
7175
7175
  }
7176
7176
  const error = err instanceof Error ? err.message : String(err);
7177
- logger$11.warn({
7177
+ logger$12.warn({
7178
7178
  storyKey,
7179
7179
  storyFilePath,
7180
7180
  error
@@ -7188,13 +7188,13 @@ async function runTestPlan(deps, params) {
7188
7188
  const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
7189
7189
  if (testPatternDecisions.length > 0) {
7190
7190
  testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
7191
- logger$11.debug({
7191
+ logger$12.debug({
7192
7192
  storyKey,
7193
7193
  count: testPatternDecisions.length
7194
7194
  }, "Loaded test patterns from decision store");
7195
7195
  } else {
7196
7196
  testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
7197
- logger$11.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
7197
+ logger$12.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
7198
7198
  }
7199
7199
  } catch {
7200
7200
  testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
@@ -7216,7 +7216,7 @@ async function runTestPlan(deps, params) {
7216
7216
  priority: "optional"
7217
7217
  }
7218
7218
  ], TOKEN_CEILING);
7219
- logger$11.info({
7219
+ logger$12.info({
7220
7220
  storyKey,
7221
7221
  tokenCount,
7222
7222
  ceiling: TOKEN_CEILING,
@@ -7237,7 +7237,7 @@ async function runTestPlan(deps, params) {
7237
7237
  dispatchResult = await handle.result;
7238
7238
  } catch (err) {
7239
7239
  const error = err instanceof Error ? err.message : String(err);
7240
- logger$11.warn({
7240
+ logger$12.warn({
7241
7241
  storyKey,
7242
7242
  error
7243
7243
  }, "Test-plan dispatch threw an unexpected error");
@@ -7248,7 +7248,7 @@ async function runTestPlan(deps, params) {
7248
7248
  output: dispatchResult.tokenEstimate.output
7249
7249
  };
7250
7250
  if (dispatchResult.status === "timeout") {
7251
- logger$11.warn({
7251
+ logger$12.warn({
7252
7252
  storyKey,
7253
7253
  durationMs: dispatchResult.durationMs
7254
7254
  }, "Test-plan dispatch timed out");
@@ -7258,7 +7258,7 @@ async function runTestPlan(deps, params) {
7258
7258
  };
7259
7259
  }
7260
7260
  if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
7261
- logger$11.warn({
7261
+ logger$12.warn({
7262
7262
  storyKey,
7263
7263
  exitCode: dispatchResult.exitCode,
7264
7264
  status: dispatchResult.status
@@ -7270,7 +7270,7 @@ async function runTestPlan(deps, params) {
7270
7270
  }
7271
7271
  if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
7272
7272
  const details = dispatchResult.parseError ?? "parsed result was null";
7273
- logger$11.warn({
7273
+ logger$12.warn({
7274
7274
  storyKey,
7275
7275
  parseError: details
7276
7276
  }, "Test-plan YAML schema validation failed");
@@ -7293,19 +7293,19 @@ async function runTestPlan(deps, params) {
7293
7293
  }),
7294
7294
  rationale: `Test plan for ${storyKey}: ${parsed.test_files.length} test files, categories: ${parsed.test_categories.join(", ")}`
7295
7295
  });
7296
- logger$11.info({
7296
+ logger$12.info({
7297
7297
  storyKey,
7298
7298
  fileCount: parsed.test_files.length,
7299
7299
  categories: parsed.test_categories
7300
7300
  }, "Test plan stored in decision store");
7301
7301
  } catch (err) {
7302
7302
  const error = err instanceof Error ? err.message : String(err);
7303
- logger$11.warn({
7303
+ logger$12.warn({
7304
7304
  storyKey,
7305
7305
  error
7306
7306
  }, "Failed to store test plan in decision store — proceeding anyway");
7307
7307
  }
7308
- logger$11.info({
7308
+ logger$12.info({
7309
7309
  storyKey,
7310
7310
  result: parsed.result
7311
7311
  }, "Test-plan workflow completed");
@@ -7345,14 +7345,14 @@ async function getArchConstraints$1(deps) {
7345
7345
  if (constraints.length === 0) return "";
7346
7346
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
7347
7347
  } catch (err) {
7348
- logger$11.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
7348
+ logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
7349
7349
  return "";
7350
7350
  }
7351
7351
  }
7352
7352
 
7353
7353
  //#endregion
7354
7354
  //#region src/modules/compiled-workflows/test-expansion.ts
7355
- const logger$10 = createLogger("compiled-workflows:test-expansion");
7355
+ const logger$11 = createLogger("compiled-workflows:test-expansion");
7356
7356
  function defaultFallbackResult(error, tokenUsage) {
7357
7357
  return {
7358
7358
  expansion_priority: "low",
@@ -7382,14 +7382,14 @@ function defaultFallbackResult(error, tokenUsage) {
7382
7382
  async function runTestExpansion(deps, params) {
7383
7383
  const { storyKey, storyFilePath, pipelineRunId, filesModified, workingDirectory } = params;
7384
7384
  const cwd = workingDirectory ?? process.cwd();
7385
- logger$10.debug({
7385
+ logger$11.debug({
7386
7386
  storyKey,
7387
7387
  storyFilePath,
7388
7388
  cwd,
7389
7389
  pipelineRunId
7390
7390
  }, "Starting test-expansion workflow");
7391
7391
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-expansion", deps.tokenCeilings);
7392
- logger$10.info({
7392
+ logger$11.info({
7393
7393
  workflow: "test-expansion",
7394
7394
  ceiling: TOKEN_CEILING,
7395
7395
  source: tokenCeilingSource
@@ -7399,7 +7399,7 @@ async function runTestExpansion(deps, params) {
7399
7399
  template = await deps.pack.getPrompt("test-expansion");
7400
7400
  } catch (err) {
7401
7401
  const error = err instanceof Error ? err.message : String(err);
7402
- logger$10.warn({ error }, "Failed to retrieve test-expansion prompt template");
7402
+ logger$11.warn({ error }, "Failed to retrieve test-expansion prompt template");
7403
7403
  return defaultFallbackResult(`Failed to retrieve prompt template: ${error}`, {
7404
7404
  input: 0,
7405
7405
  output: 0
@@ -7410,7 +7410,7 @@ async function runTestExpansion(deps, params) {
7410
7410
  storyContent = await readFile$1(storyFilePath, "utf-8");
7411
7411
  } catch (err) {
7412
7412
  const error = err instanceof Error ? err.message : String(err);
7413
- logger$10.warn({
7413
+ logger$11.warn({
7414
7414
  storyFilePath,
7415
7415
  error
7416
7416
  }, "Failed to read story file");
@@ -7426,13 +7426,13 @@ async function runTestExpansion(deps, params) {
7426
7426
  const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
7427
7427
  if (testPatternDecisions.length > 0) {
7428
7428
  testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
7429
- logger$10.debug({
7429
+ logger$11.debug({
7430
7430
  storyKey,
7431
7431
  count: testPatternDecisions.length
7432
7432
  }, "Loaded test patterns from decision store");
7433
7433
  } else {
7434
7434
  testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
7435
- logger$10.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
7435
+ logger$11.debug({ storyKey }, "No test-pattern decisions — using stack-aware defaults");
7436
7436
  }
7437
7437
  } catch {
7438
7438
  testPatternsContent = resolveDefaultTestPatterns(deps.projectRoot);
@@ -7447,12 +7447,12 @@ async function runTestExpansion(deps, params) {
7447
7447
  const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
7448
7448
  if (scopedTotal <= TOKEN_CEILING) {
7449
7449
  gitDiffContent = scopedDiff;
7450
- logger$10.debug({
7450
+ logger$11.debug({
7451
7451
  fileCount: filesModified.length,
7452
7452
  tokenCount: scopedTotal
7453
7453
  }, "Using scoped file diff");
7454
7454
  } else {
7455
- logger$10.warn({
7455
+ logger$11.warn({
7456
7456
  estimatedTotal: scopedTotal,
7457
7457
  ceiling: TOKEN_CEILING,
7458
7458
  fileCount: filesModified.length
@@ -7460,7 +7460,7 @@ async function runTestExpansion(deps, params) {
7460
7460
  gitDiffContent = await getGitDiffStatForFiles(filesModified, cwd);
7461
7461
  }
7462
7462
  } catch (err) {
7463
- logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
7463
+ logger$11.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
7464
7464
  }
7465
7465
  const sections = [
7466
7466
  {
@@ -7485,11 +7485,11 @@ async function runTestExpansion(deps, params) {
7485
7485
  }
7486
7486
  ];
7487
7487
  const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
7488
- if (assembleResult.truncated) logger$10.warn({
7488
+ if (assembleResult.truncated) logger$11.warn({
7489
7489
  storyKey,
7490
7490
  tokenCount: assembleResult.tokenCount
7491
7491
  }, "Test-expansion prompt truncated to fit token ceiling");
7492
- logger$10.debug({
7492
+ logger$11.debug({
7493
7493
  storyKey,
7494
7494
  tokenCount: assembleResult.tokenCount,
7495
7495
  truncated: assembleResult.truncated
@@ -7509,7 +7509,7 @@ async function runTestExpansion(deps, params) {
7509
7509
  dispatchResult = await handle.result;
7510
7510
  } catch (err) {
7511
7511
  const error = err instanceof Error ? err.message : String(err);
7512
- logger$10.warn({
7512
+ logger$11.warn({
7513
7513
  storyKey,
7514
7514
  error
7515
7515
  }, "Test-expansion dispatch threw unexpected error");
@@ -7524,19 +7524,19 @@ async function runTestExpansion(deps, params) {
7524
7524
  };
7525
7525
  if (dispatchResult.status === "failed") {
7526
7526
  const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""}`.trim();
7527
- logger$10.warn({
7527
+ logger$11.warn({
7528
7528
  storyKey,
7529
7529
  exitCode: dispatchResult.exitCode
7530
7530
  }, "Test-expansion dispatch failed");
7531
7531
  return defaultFallbackResult(errorMsg, tokenUsage);
7532
7532
  }
7533
7533
  if (dispatchResult.status === "timeout") {
7534
- logger$10.warn({ storyKey }, "Test-expansion dispatch timed out");
7534
+ logger$11.warn({ storyKey }, "Test-expansion dispatch timed out");
7535
7535
  return defaultFallbackResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage);
7536
7536
  }
7537
7537
  if (dispatchResult.parsed === null) {
7538
7538
  const details = dispatchResult.parseError ?? "No YAML block found in output";
7539
- logger$10.warn({
7539
+ logger$11.warn({
7540
7540
  storyKey,
7541
7541
  details
7542
7542
  }, "Test-expansion output has no parseable YAML");
@@ -7545,14 +7545,14 @@ async function runTestExpansion(deps, params) {
7545
7545
  const parseResult = TestExpansionResultSchema.safeParse(dispatchResult.parsed);
7546
7546
  if (!parseResult.success) {
7547
7547
  const details = parseResult.error.message;
7548
- logger$10.warn({
7548
+ logger$11.warn({
7549
7549
  storyKey,
7550
7550
  details
7551
7551
  }, "Test-expansion output failed schema validation");
7552
7552
  return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
7553
7553
  }
7554
7554
  const parsed = parseResult.data;
7555
- logger$10.info({
7555
+ logger$11.info({
7556
7556
  storyKey,
7557
7557
  expansion_priority: parsed.expansion_priority,
7558
7558
  coverage_gaps: parsed.coverage_gaps.length,
@@ -7577,7 +7577,7 @@ async function getArchConstraints(deps) {
7577
7577
  if (constraints.length === 0) return "";
7578
7578
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
7579
7579
  } catch (err) {
7580
- logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
7580
+ logger$11.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
7581
7581
  return "";
7582
7582
  }
7583
7583
  }
@@ -7885,6 +7885,16 @@ function detectConflictGroups(storyKeys, config) {
7885
7885
  if (existing !== void 0) existing.push(key);
7886
7886
  else moduleToStories.set(module$1, [key]);
7887
7887
  }
7888
+ if (moduleToStories.size === 1 && storyKeys.length >= 4) {
7889
+ const epicGroups = new Map();
7890
+ for (const key of storyKeys) {
7891
+ const epicNum = key.split("-")[0] ?? key;
7892
+ const existing = epicGroups.get(epicNum);
7893
+ if (existing !== void 0) existing.push(key);
7894
+ else epicGroups.set(epicNum, [key]);
7895
+ }
7896
+ if (epicGroups.size > 1) return Array.from(epicGroups.values());
7897
+ }
7888
7898
  return Array.from(moduleToStories.values());
7889
7899
  }
7890
7900
  /**
@@ -8009,7 +8019,7 @@ function detectConflictGroupsWithContracts(storyKeys, config, declarations) {
8009
8019
 
8010
8020
  //#endregion
8011
8021
  //#region src/modules/implementation-orchestrator/seed-methodology-context.ts
8012
- const logger$9 = createLogger("implementation-orchestrator:seed");
8022
+ const logger$10 = createLogger("implementation-orchestrator:seed");
8013
8023
  /** Max chars for the architecture summary seeded into decisions */
8014
8024
  const MAX_ARCH_CHARS = 6e3;
8015
8025
  /** Max chars per epic-shard decision value (per-story or per-epic fallback) */
@@ -8043,12 +8053,12 @@ async function seedMethodologyContext(db, projectRoot) {
8043
8053
  const testCount = await seedTestPatterns(db, projectRoot);
8044
8054
  if (testCount === -1) result.skippedCategories.push("test-patterns");
8045
8055
  else result.decisionsCreated += testCount;
8046
- logger$9.info({
8056
+ logger$10.info({
8047
8057
  decisionsCreated: result.decisionsCreated,
8048
8058
  skippedCategories: result.skippedCategories
8049
8059
  }, "Methodology context seeding complete");
8050
8060
  } catch (err) {
8051
- logger$9.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
8061
+ logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
8052
8062
  }
8053
8063
  return result;
8054
8064
  }
@@ -8092,7 +8102,7 @@ async function seedArchitecture(db, projectRoot) {
8092
8102
  });
8093
8103
  count = 1;
8094
8104
  }
8095
- logger$9.debug({ count }, "Seeded architecture decisions");
8105
+ logger$10.debug({ count }, "Seeded architecture decisions");
8096
8106
  return count;
8097
8107
  }
8098
8108
  /**
@@ -8116,11 +8126,11 @@ async function seedEpicShards(db, projectRoot) {
8116
8126
  const storedHashDecision = implementationDecisions.find((d) => d.category === "epic-shard-hash" && d.key === "epics-file");
8117
8127
  const storedHash = storedHashDecision?.value;
8118
8128
  if (storedHash === currentHash) {
8119
- logger$9.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
8129
+ logger$10.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
8120
8130
  return -1;
8121
8131
  }
8122
8132
  if (implementationDecisions.some((d) => d.category === "epic-shard")) {
8123
- logger$9.debug({
8133
+ logger$10.debug({
8124
8134
  storedHash,
8125
8135
  currentHash
8126
8136
  }, "Epics file changed — deleting stale epic-shard decisions");
@@ -8151,7 +8161,7 @@ async function seedEpicShards(db, projectRoot) {
8151
8161
  value: currentHash,
8152
8162
  rationale: "SHA-256 hash of epics file content for change detection"
8153
8163
  });
8154
- logger$9.debug({
8164
+ logger$10.debug({
8155
8165
  count,
8156
8166
  hash: currentHash
8157
8167
  }, "Seeded epic shard decisions");
@@ -8175,7 +8185,7 @@ async function seedTestPatterns(db, projectRoot) {
8175
8185
  value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
8176
8186
  rationale: "Detected from project configuration at orchestrator startup"
8177
8187
  });
8178
- logger$9.debug("Seeded test patterns decision");
8188
+ logger$10.debug("Seeded test patterns decision");
8179
8189
  return 1;
8180
8190
  }
8181
8191
  /**
@@ -8632,7 +8642,7 @@ function findArtifact(projectRoot, candidates) {
8632
8642
 
8633
8643
  //#endregion
8634
8644
  //#region src/modules/implementation-orchestrator/package-snapshot.ts
8635
- const logger$8 = createLogger("package-snapshot");
8645
+ const logger$9 = createLogger("package-snapshot");
8636
8646
  /**
8637
8647
  * Discover all package.json paths in a workspace monorepo.
8638
8648
  * Checks the `workspaces` field in root package.json,
@@ -8724,7 +8734,7 @@ function restorePackageSnapshot(snapshot, options) {
8724
8734
  writeFileSync(filePath, content, "utf-8");
8725
8735
  filesRestored++;
8726
8736
  } catch (err) {
8727
- logger$8.warn({
8737
+ logger$9.warn({
8728
8738
  filePath,
8729
8739
  err
8730
8740
  }, "Failed to restore file from snapshot");
@@ -8735,7 +8745,7 @@ function restorePackageSnapshot(snapshot, options) {
8735
8745
  encoding: "utf-8",
8736
8746
  stdio: "pipe"
8737
8747
  });
8738
- logger$8.info({
8748
+ logger$9.info({
8739
8749
  filesRestored,
8740
8750
  installCommand: snapshot.installCommand
8741
8751
  }, "Package snapshot restored successfully");
@@ -8746,7 +8756,7 @@ function restorePackageSnapshot(snapshot, options) {
8746
8756
  };
8747
8757
  } catch (err) {
8748
8758
  const exitCode = err.status ?? 1;
8749
- logger$8.warn({
8759
+ logger$9.warn({
8750
8760
  filesRestored,
8751
8761
  exitCode,
8752
8762
  err
@@ -9092,7 +9102,7 @@ const RecommendationSchema = z.object({
9092
9102
 
9093
9103
  //#endregion
9094
9104
  //#region src/modules/telemetry/adapter-persistence.ts
9095
- const logger$7 = createLogger("telemetry:adapter-persistence");
9105
+ const logger$8 = createLogger("telemetry:adapter-persistence");
9096
9106
  /**
9097
9107
  * Concrete DatabaseAdapter-backed telemetry persistence.
9098
9108
  *
@@ -9267,7 +9277,7 @@ var AdapterTelemetryPersistence = class {
9267
9277
  ]);
9268
9278
  }
9269
9279
  });
9270
- logger$7.debug({
9280
+ logger$8.debug({
9271
9281
  storyKey,
9272
9282
  count: turns.length
9273
9283
  }, "Stored turn analysis");
@@ -9339,7 +9349,7 @@ var AdapterTelemetryPersistence = class {
9339
9349
  score.taskType ?? null,
9340
9350
  score.phase ?? null
9341
9351
  ]);
9342
- logger$7.debug({
9352
+ logger$8.debug({
9343
9353
  storyKey: score.storyKey,
9344
9354
  compositeScore: score.compositeScore
9345
9355
  }, "Stored efficiency score");
@@ -9407,7 +9417,7 @@ var AdapterTelemetryPersistence = class {
9407
9417
  ]);
9408
9418
  }
9409
9419
  });
9410
- logger$7.debug({
9420
+ logger$8.debug({
9411
9421
  storyKey,
9412
9422
  count: recs.length
9413
9423
  }, "Saved recommendations");
@@ -9486,7 +9496,7 @@ var AdapterTelemetryPersistence = class {
9486
9496
  ]);
9487
9497
  } catch {}
9488
9498
  });
9489
- logger$7.debug({
9499
+ logger$8.debug({
9490
9500
  storyKey,
9491
9501
  count: stats.length
9492
9502
  }, "Stored category stats");
@@ -9531,7 +9541,7 @@ var AdapterTelemetryPersistence = class {
9531
9541
  ]);
9532
9542
  } catch {}
9533
9543
  });
9534
- logger$7.debug({
9544
+ logger$8.debug({
9535
9545
  storyKey,
9536
9546
  count: consumers.length
9537
9547
  }, "Stored consumer stats");
@@ -9563,14 +9573,14 @@ var AdapterTelemetryPersistence = class {
9563
9573
  await adapter.query("DELETE FROM category_stats WHERE story_key = ?", [storyKey]);
9564
9574
  await adapter.query("DELETE FROM consumer_stats WHERE story_key = ?", [storyKey]);
9565
9575
  });
9566
- logger$7.debug({ storyKey }, "Purged stale telemetry data for story");
9576
+ logger$8.debug({ storyKey }, "Purged stale telemetry data for story");
9567
9577
  }
9568
9578
  /**
9569
9579
  * Record a named span with arbitrary attributes.
9570
9580
  * Currently logs the span at debug level; no DB persistence.
9571
9581
  */
9572
9582
  recordSpan(span) {
9573
- logger$7.debug({ span }, "recordSpan");
9583
+ logger$8.debug({ span }, "recordSpan");
9574
9584
  }
9575
9585
  };
9576
9586
 
@@ -9649,7 +9659,7 @@ var TelemetryPersistence = class {
9649
9659
 
9650
9660
  //#endregion
9651
9661
  //#region src/modules/telemetry/telemetry-advisor.ts
9652
- const logger$6 = createLogger("telemetry-advisor");
9662
+ const logger$7 = createLogger("telemetry-advisor");
9653
9663
  /**
9654
9664
  * Reads telemetry efficiency data to support retry gate decisions.
9655
9665
  */
@@ -9671,7 +9681,7 @@ var TelemetryAdvisor = class {
9671
9681
  try {
9672
9682
  const score = await this._persistence.getEfficiencyScore(storyKey);
9673
9683
  if (score === null) {
9674
- logger$6.debug({ storyKey }, "No efficiency score found for story");
9684
+ logger$7.debug({ storyKey }, "No efficiency score found for story");
9675
9685
  return null;
9676
9686
  }
9677
9687
  return {
@@ -9686,7 +9696,7 @@ var TelemetryAdvisor = class {
9686
9696
  coldStartTurnsExcluded: score.coldStartTurnsExcluded ?? 0
9687
9697
  };
9688
9698
  } catch (err) {
9689
- logger$6.warn({
9699
+ logger$7.warn({
9690
9700
  err,
9691
9701
  storyKey
9692
9702
  }, "Failed to retrieve efficiency score");
@@ -9723,7 +9733,7 @@ var TelemetryAdvisor = class {
9723
9733
  merged.sort((a, b) => (severityOrder[a.severity] ?? 3) - (severityOrder[b.severity] ?? 3));
9724
9734
  return merged;
9725
9735
  } catch (err) {
9726
- logger$6.warn({ err }, "Failed to retrieve recommendations for run — returning empty");
9736
+ logger$7.warn({ err }, "Failed to retrieve recommendations for run — returning empty");
9727
9737
  return [];
9728
9738
  }
9729
9739
  }
@@ -9753,7 +9763,7 @@ var TelemetryAdvisor = class {
9753
9763
  const lines = actionable.map((r) => `OPTIMIZATION (${r.severity}): ${r.title}. ${r.description}`);
9754
9764
  const full = lines.join("\n");
9755
9765
  if (full.length <= MAX_CHARS$1) {
9756
- logger$6.debug({
9766
+ logger$7.debug({
9757
9767
  count: actionable.length,
9758
9768
  chars: full.length
9759
9769
  }, "Formatting optimization directives");
@@ -9761,7 +9771,7 @@ var TelemetryAdvisor = class {
9761
9771
  }
9762
9772
  const cutAt = full.lastIndexOf(" ", MAX_CHARS$1);
9763
9773
  const truncated = (cutAt > 0 ? full.slice(0, cutAt) : full.slice(0, MAX_CHARS$1)) + "…";
9764
- logger$6.debug({
9774
+ logger$7.debug({
9765
9775
  count: actionable.length,
9766
9776
  chars: truncated.length
9767
9777
  }, "Optimization directives truncated to budget");
@@ -10517,7 +10527,7 @@ function checkProfileStaleness(projectRoot) {
10517
10527
  */
10518
10528
  function createImplementationOrchestrator(deps) {
10519
10529
  const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot, tokenCeilings, stateStore, telemetryPersistence, ingestionServer, repoMapInjector, maxRepoMapTokens } = deps;
10520
- const logger$21 = createLogger("implementation-orchestrator");
10530
+ const logger$22 = createLogger("implementation-orchestrator");
10521
10531
  const telemetryAdvisor = db !== void 0 ? createTelemetryAdvisor({ db }) : void 0;
10522
10532
  const wgRepo = new WorkGraphRepository(db);
10523
10533
  const _wgInProgressWritten = new Set();
@@ -10568,7 +10578,7 @@ function createImplementationOrchestrator(deps) {
10568
10578
  const nowMs = Date.now();
10569
10579
  for (const [phase, startMs] of starts) {
10570
10580
  const endMs = ends?.get(phase);
10571
- if (endMs === void 0) logger$21.warn({
10581
+ if (endMs === void 0) logger$22.warn({
10572
10582
  storyKey,
10573
10583
  phase
10574
10584
  }, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
@@ -10615,7 +10625,7 @@ function createImplementationOrchestrator(deps) {
10615
10625
  recordedAt: completedAt,
10616
10626
  timestamp: completedAt
10617
10627
  }).catch((storeErr) => {
10618
- logger$21.warn({
10628
+ logger$22.warn({
10619
10629
  err: storeErr,
10620
10630
  storyKey
10621
10631
  }, "Failed to record metric to StateStore (best-effort)");
@@ -10637,7 +10647,7 @@ function createImplementationOrchestrator(deps) {
10637
10647
  rationale: `Story ${storyKey} completed with result=${result} in ${wallClockSeconds}s. Tokens: ${tokenAgg.input}+${tokenAgg.output}. Review cycles: ${reviewCycles}.`
10638
10648
  });
10639
10649
  } catch (decisionErr) {
10640
- logger$21.warn({
10650
+ logger$22.warn({
10641
10651
  err: decisionErr,
10642
10652
  storyKey
10643
10653
  }, "Failed to write story-metrics decision (best-effort)");
@@ -10665,13 +10675,13 @@ function createImplementationOrchestrator(deps) {
10665
10675
  dispatches: _storyDispatches.get(storyKey) ?? 0
10666
10676
  });
10667
10677
  } catch (emitErr) {
10668
- logger$21.warn({
10678
+ logger$22.warn({
10669
10679
  err: emitErr,
10670
10680
  storyKey
10671
10681
  }, "Failed to emit story:metrics event (best-effort)");
10672
10682
  }
10673
10683
  } catch (err) {
10674
- logger$21.warn({
10684
+ logger$22.warn({
10675
10685
  err,
10676
10686
  storyKey
10677
10687
  }, "Failed to write story metrics (best-effort)");
@@ -10700,7 +10710,7 @@ function createImplementationOrchestrator(deps) {
10700
10710
  rationale: `Story ${storyKey} ${outcome} after ${reviewCycles} review cycle(s).`
10701
10711
  });
10702
10712
  } catch (err) {
10703
- logger$21.warn({
10713
+ logger$22.warn({
10704
10714
  err,
10705
10715
  storyKey
10706
10716
  }, "Failed to write story-outcome decision (best-effort)");
@@ -10726,7 +10736,7 @@ function createImplementationOrchestrator(deps) {
10726
10736
  rationale: `Escalation diagnosis for ${payload.storyKey}: ${diagnosis.recommendedAction} — ${diagnosis.rationale}`
10727
10737
  });
10728
10738
  } catch (err) {
10729
- logger$21.warn({
10739
+ logger$22.warn({
10730
10740
  err,
10731
10741
  storyKey: payload.storyKey
10732
10742
  }, "Failed to persist escalation diagnosis (best-effort)");
@@ -10776,7 +10786,7 @@ function createImplementationOrchestrator(deps) {
10776
10786
  const existing = _stories.get(storyKey);
10777
10787
  if (existing !== void 0) {
10778
10788
  Object.assign(existing, updates);
10779
- persistStoryState(storyKey, existing).catch((err) => logger$21.warn({
10789
+ persistStoryState(storyKey, existing).catch((err) => logger$22.warn({
10780
10790
  err,
10781
10791
  storyKey
10782
10792
  }, "StateStore write failed after updateStory"));
@@ -10785,12 +10795,12 @@ function createImplementationOrchestrator(deps) {
10785
10795
  storyKey,
10786
10796
  conflict: err
10787
10797
  });
10788
- else logger$21.warn({
10798
+ else logger$22.warn({
10789
10799
  err,
10790
10800
  storyKey
10791
10801
  }, "mergeStory failed");
10792
10802
  });
10793
- else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$21.warn({
10803
+ else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$22.warn({
10794
10804
  err,
10795
10805
  storyKey
10796
10806
  }, "rollbackStory failed — branch may persist"));
@@ -10802,7 +10812,7 @@ function createImplementationOrchestrator(deps) {
10802
10812
  ...updates
10803
10813
  };
10804
10814
  const opts = targetStatus === "complete" || targetStatus === "escalated" ? { completedAt: fullUpdated.completedAt } : void 0;
10805
- wgRepo.updateStoryStatus(storyKey, targetStatus, opts).catch((err) => logger$21.warn({
10815
+ wgRepo.updateStoryStatus(storyKey, targetStatus, opts).catch((err) => logger$22.warn({
10806
10816
  err,
10807
10817
  storyKey
10808
10818
  }, "wg_stories status update failed (best-effort)"));
@@ -10833,7 +10843,7 @@ function createImplementationOrchestrator(deps) {
10833
10843
  };
10834
10844
  await stateStore.setStoryState(storyKey, record);
10835
10845
  } catch (err) {
10836
- logger$21.warn({
10846
+ logger$22.warn({
10837
10847
  err,
10838
10848
  storyKey
10839
10849
  }, "StateStore.setStoryState failed (best-effort)");
@@ -10849,7 +10859,7 @@ function createImplementationOrchestrator(deps) {
10849
10859
  token_usage_json: serialized
10850
10860
  });
10851
10861
  } catch (err) {
10852
- logger$21.warn({ err }, "Failed to persist orchestrator state");
10862
+ logger$22.warn({ err }, "Failed to persist orchestrator state");
10853
10863
  }
10854
10864
  }
10855
10865
  function recordProgress() {
@@ -10876,7 +10886,7 @@ function createImplementationOrchestrator(deps) {
10876
10886
  queuedDispatches: queued
10877
10887
  });
10878
10888
  if (config.pipelineRunId !== void 0) updatePipelineRun(db, config.pipelineRunId, { current_phase: "implementation" }).catch((err) => {
10879
- logger$21.debug({ err }, "Heartbeat: failed to touch updated_at (non-fatal)");
10889
+ logger$22.debug({ err }, "Heartbeat: failed to touch updated_at (non-fatal)");
10880
10890
  });
10881
10891
  const elapsed = Date.now() - _lastProgressTs;
10882
10892
  let childPids = [];
@@ -10898,7 +10908,7 @@ function createImplementationOrchestrator(deps) {
10898
10908
  }
10899
10909
  if (childActive) {
10900
10910
  _lastProgressTs = Date.now();
10901
- logger$21.debug({
10911
+ logger$22.debug({
10902
10912
  storyKey: key,
10903
10913
  phase: s$1.phase,
10904
10914
  childPids
@@ -10907,7 +10917,7 @@ function createImplementationOrchestrator(deps) {
10907
10917
  }
10908
10918
  _stalledStories.add(key);
10909
10919
  _storiesWithStall.add(key);
10910
- logger$21.warn({
10920
+ logger$22.warn({
10911
10921
  storyKey: key,
10912
10922
  phase: s$1.phase,
10913
10923
  elapsedMs: elapsed,
@@ -10952,7 +10962,7 @@ function createImplementationOrchestrator(deps) {
10952
10962
  for (let attempt = 0; attempt < MEMORY_PRESSURE_BACKOFF_MS.length; attempt++) {
10953
10963
  const memState = dispatcher.getMemoryState();
10954
10964
  if (!memState.isPressured) return true;
10955
- logger$21.warn({
10965
+ logger$22.warn({
10956
10966
  storyKey,
10957
10967
  freeMB: memState.freeMB,
10958
10968
  thresholdMB: memState.thresholdMB,
@@ -10972,11 +10982,11 @@ function createImplementationOrchestrator(deps) {
10972
10982
  * exhausted retries the story is ESCALATED.
10973
10983
  */
10974
10984
  async function processStory(storyKey, storyOptions) {
10975
- logger$21.info({ storyKey }, "Processing story");
10985
+ logger$22.info({ storyKey }, "Processing story");
10976
10986
  {
10977
10987
  const memoryOk = await checkMemoryPressure(storyKey);
10978
10988
  if (!memoryOk) {
10979
- logger$21.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
10989
+ logger$22.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
10980
10990
  const memPressureState = {
10981
10991
  phase: "ESCALATED",
10982
10992
  reviewCycles: 0,
@@ -10985,7 +10995,7 @@ function createImplementationOrchestrator(deps) {
10985
10995
  completedAt: new Date().toISOString()
10986
10996
  };
10987
10997
  _stories.set(storyKey, memPressureState);
10988
- persistStoryState(storyKey, memPressureState).catch((err) => logger$21.warn({
10998
+ persistStoryState(storyKey, memPressureState).catch((err) => logger$22.warn({
10989
10999
  err,
10990
11000
  storyKey
10991
11001
  }, "StateStore write failed after memory-pressure escalation"));
@@ -11002,7 +11012,7 @@ function createImplementationOrchestrator(deps) {
11002
11012
  }
11003
11013
  await waitIfPaused();
11004
11014
  if (_state !== "RUNNING") return;
11005
- stateStore?.branchForStory(storyKey).catch((err) => logger$21.warn({
11015
+ stateStore?.branchForStory(storyKey).catch((err) => logger$22.warn({
11006
11016
  err,
11007
11017
  storyKey
11008
11018
  }, "branchForStory failed — continuing without branch isolation"));
@@ -11019,14 +11029,14 @@ function createImplementationOrchestrator(deps) {
11019
11029
  if (match$1) {
11020
11030
  const candidatePath = join$1(artifactsDir, match$1);
11021
11031
  const validation = await isValidStoryFile(candidatePath);
11022
- if (!validation.valid) logger$21.warn({
11032
+ if (!validation.valid) logger$22.warn({
11023
11033
  storyKey,
11024
11034
  storyFilePath: candidatePath,
11025
11035
  reason: validation.reason
11026
11036
  }, `Existing story file for ${storyKey} is invalid (${validation.reason}) — re-creating`);
11027
11037
  else {
11028
11038
  storyFilePath = candidatePath;
11029
- logger$21.info({
11039
+ logger$22.info({
11030
11040
  storyKey,
11031
11041
  storyFilePath
11032
11042
  }, "Found existing story file — skipping create-story");
@@ -11045,7 +11055,7 @@ function createImplementationOrchestrator(deps) {
11045
11055
  }
11046
11056
  } catch {}
11047
11057
  if (storyFilePath === void 0 && projectRoot && isImplicitlyCovered(storyKey, projectRoot)) {
11048
- logger$21.info({ storyKey }, `Story ${storyKey} appears implicitly covered — all expected new files already exist. Skipping create-story.`);
11058
+ logger$22.info({ storyKey }, `Story ${storyKey} appears implicitly covered — all expected new files already exist. Skipping create-story.`);
11049
11059
  endPhase(storyKey, "create-story");
11050
11060
  eventBus.emit("orchestrator:story-phase-complete", {
11051
11061
  storyKey,
@@ -11094,7 +11104,7 @@ function createImplementationOrchestrator(deps) {
11094
11104
  metadata: JSON.stringify({ storyKey })
11095
11105
  });
11096
11106
  } catch (tokenErr) {
11097
- logger$21.warn({
11107
+ logger$22.warn({
11098
11108
  storyKey,
11099
11109
  err: tokenErr
11100
11110
  }, "Failed to record create-story token usage");
@@ -11103,7 +11113,7 @@ function createImplementationOrchestrator(deps) {
11103
11113
  if (createResult.result === "failed") {
11104
11114
  const errMsg = createResult.error ?? "create-story failed";
11105
11115
  const stderrSnippet = errMsg.includes("--- stderr ---") ? errMsg.slice(errMsg.indexOf("--- stderr ---") + 15, errMsg.indexOf("--- stderr ---") + 515) : errMsg.slice(0, 500);
11106
- logger$21.error({
11116
+ logger$22.error({
11107
11117
  storyKey,
11108
11118
  stderrSnippet
11109
11119
  }, `Create-story failed: ${stderrSnippet.split("\n")[0]}`);
@@ -11156,7 +11166,7 @@ function createImplementationOrchestrator(deps) {
11156
11166
  const overlap = computeTitleOverlap(expectedTitle, createResult.story_title);
11157
11167
  if (overlap < TITLE_OVERLAP_WARNING_THRESHOLD) {
11158
11168
  const msg = `Story title mismatch: expected "${expectedTitle}" but got "${createResult.story_title}" (word overlap: ${Math.round(overlap * 100)}%). This may indicate the create-story agent received truncated context.`;
11159
- logger$21.warn({
11169
+ logger$22.warn({
11160
11170
  storyKey,
11161
11171
  expectedTitle,
11162
11172
  generatedTitle: createResult.story_title,
@@ -11166,7 +11176,7 @@ function createImplementationOrchestrator(deps) {
11166
11176
  storyKey,
11167
11177
  msg
11168
11178
  });
11169
- } else logger$21.debug({
11179
+ } else logger$22.debug({
11170
11180
  storyKey,
11171
11181
  expectedTitle,
11172
11182
  generatedTitle: createResult.story_title,
@@ -11175,7 +11185,7 @@ function createImplementationOrchestrator(deps) {
11175
11185
  }
11176
11186
  }
11177
11187
  } catch (titleValidationErr) {
11178
- logger$21.debug({
11188
+ logger$22.debug({
11179
11189
  storyKey,
11180
11190
  err: titleValidationErr
11181
11191
  }, "Story title validation skipped due to error");
@@ -11223,14 +11233,14 @@ function createImplementationOrchestrator(deps) {
11223
11233
  ...contract.transport !== void 0 ? { transport: contract.transport } : {}
11224
11234
  })
11225
11235
  });
11226
- logger$21.info({
11236
+ logger$22.info({
11227
11237
  storyKey,
11228
11238
  contractCount: contracts.length,
11229
11239
  contracts
11230
11240
  }, "Stored interface contract declarations");
11231
11241
  }
11232
11242
  } catch (err) {
11233
- logger$21.warn({
11243
+ logger$22.warn({
11234
11244
  storyKey,
11235
11245
  error: err instanceof Error ? err.message : String(err)
11236
11246
  }, "Failed to parse interface contracts — continuing without contract declarations");
@@ -11258,10 +11268,10 @@ function createImplementationOrchestrator(deps) {
11258
11268
  });
11259
11269
  testPlanPhaseResult = testPlanResult.result;
11260
11270
  testPlanTokenUsage = testPlanResult.tokenUsage;
11261
- if (testPlanResult.result === "success") logger$21.info({ storyKey }, "Test plan generated successfully");
11262
- else logger$21.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
11271
+ if (testPlanResult.result === "success") logger$22.info({ storyKey }, "Test plan generated successfully");
11272
+ else logger$22.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
11263
11273
  } catch (err) {
11264
- logger$21.warn({
11274
+ logger$22.warn({
11265
11275
  storyKey,
11266
11276
  err
11267
11277
  }, "Test planning failed — proceeding to dev-story without test plan");
@@ -11277,7 +11287,7 @@ function createImplementationOrchestrator(deps) {
11277
11287
  metadata: JSON.stringify({ storyKey })
11278
11288
  });
11279
11289
  } catch (tokenErr) {
11280
- logger$21.warn({
11290
+ logger$22.warn({
11281
11291
  storyKey,
11282
11292
  err: tokenErr
11283
11293
  }, "Failed to record test-plan token usage");
@@ -11313,7 +11323,7 @@ function createImplementationOrchestrator(deps) {
11313
11323
  try {
11314
11324
  storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
11315
11325
  } catch (err) {
11316
- logger$21.error({
11326
+ logger$22.error({
11317
11327
  storyKey,
11318
11328
  storyFilePath,
11319
11329
  error: err instanceof Error ? err.message : String(err)
@@ -11321,7 +11331,7 @@ function createImplementationOrchestrator(deps) {
11321
11331
  }
11322
11332
  const analysis = analyzeStoryComplexity(storyContentForAnalysis);
11323
11333
  const batches = planTaskBatches(analysis);
11324
- logger$21.info({
11334
+ logger$22.info({
11325
11335
  storyKey,
11326
11336
  estimatedScope: analysis.estimatedScope,
11327
11337
  batchCount: batches.length,
@@ -11339,7 +11349,7 @@ function createImplementationOrchestrator(deps) {
11339
11349
  if (_state !== "RUNNING") break;
11340
11350
  const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
11341
11351
  const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
11342
- logger$21.info({
11352
+ logger$22.info({
11343
11353
  storyKey,
11344
11354
  batchIndex: batch.batchIndex,
11345
11355
  taskCount: batch.taskIds.length
@@ -11369,7 +11379,7 @@ function createImplementationOrchestrator(deps) {
11369
11379
  });
11370
11380
  } catch (batchErr) {
11371
11381
  const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
11372
- logger$21.warn({
11382
+ logger$22.warn({
11373
11383
  storyKey,
11374
11384
  batchIndex: batch.batchIndex,
11375
11385
  error: errMsg
@@ -11389,7 +11399,7 @@ function createImplementationOrchestrator(deps) {
11389
11399
  filesModified: batchFilesModified,
11390
11400
  result: batchResult.result === "success" ? "success" : "failed"
11391
11401
  };
11392
- logger$21.info(batchMetrics, "Batch dev-story metrics");
11402
+ logger$22.info(batchMetrics, "Batch dev-story metrics");
11393
11403
  for (const f$1 of batchFilesModified) allFilesModified.add(f$1);
11394
11404
  if (batchFilesModified.length > 0) batchFileGroups.push({
11395
11405
  batchIndex: batch.batchIndex,
@@ -11411,13 +11421,13 @@ function createImplementationOrchestrator(deps) {
11411
11421
  })
11412
11422
  });
11413
11423
  } catch (tokenErr) {
11414
- logger$21.warn({
11424
+ logger$22.warn({
11415
11425
  storyKey,
11416
11426
  batchIndex: batch.batchIndex,
11417
11427
  err: tokenErr
11418
11428
  }, "Failed to record batch token usage");
11419
11429
  }
11420
- if (batchResult.result === "failed") logger$21.warn({
11430
+ if (batchResult.result === "failed") logger$22.warn({
11421
11431
  storyKey,
11422
11432
  batchIndex: batch.batchIndex,
11423
11433
  error: batchResult.error
@@ -11461,7 +11471,7 @@ function createImplementationOrchestrator(deps) {
11461
11471
  metadata: JSON.stringify({ storyKey })
11462
11472
  });
11463
11473
  } catch (tokenErr) {
11464
- logger$21.warn({
11474
+ logger$22.warn({
11465
11475
  storyKey,
11466
11476
  err: tokenErr
11467
11477
  }, "Failed to record dev-story token usage");
@@ -11477,7 +11487,7 @@ function createImplementationOrchestrator(deps) {
11477
11487
  endPhase(storyKey, "dev-story");
11478
11488
  const timeoutFiles = checkGitDiffFiles(projectRoot ?? process.cwd());
11479
11489
  if (timeoutFiles.length === 0) {
11480
- logger$21.warn({ storyKey }, "Dev-story timeout with zero modified files — escalating immediately (no checkpoint)");
11490
+ logger$22.warn({ storyKey }, "Dev-story timeout with zero modified files — escalating immediately (no checkpoint)");
11481
11491
  updateStory(storyKey, {
11482
11492
  phase: "ESCALATED",
11483
11493
  error: "timeout-no-files",
@@ -11493,7 +11503,7 @@ function createImplementationOrchestrator(deps) {
11493
11503
  await persistState();
11494
11504
  return;
11495
11505
  }
11496
- logger$21.info({
11506
+ logger$22.info({
11497
11507
  storyKey,
11498
11508
  filesCount: timeoutFiles.length
11499
11509
  }, "Dev-story timeout with partial files — capturing checkpoint");
@@ -11510,7 +11520,7 @@ function createImplementationOrchestrator(deps) {
11510
11520
  ]
11511
11521
  }).trim();
11512
11522
  } catch (diffErr) {
11513
- logger$21.warn({
11523
+ logger$22.warn({
11514
11524
  storyKey,
11515
11525
  error: diffErr instanceof Error ? diffErr.message : String(diffErr)
11516
11526
  }, "Failed to capture git diff for checkpoint — proceeding with empty diff");
@@ -11537,7 +11547,7 @@ function createImplementationOrchestrator(deps) {
11537
11547
  recordedAt: new Date().toISOString(),
11538
11548
  sprint: config.sprint
11539
11549
  }).catch((storeErr) => {
11540
- logger$21.warn({
11550
+ logger$22.warn({
11541
11551
  err: storeErr,
11542
11552
  storyKey
11543
11553
  }, "Failed to record timeout metric to StateStore (best-effort)");
@@ -11596,9 +11606,9 @@ function createImplementationOrchestrator(deps) {
11596
11606
  checkpointRetryPrompt = assembled.prompt;
11597
11607
  } catch {
11598
11608
  checkpointRetryPrompt = `Continue story ${storyKey} from checkpoint. Your prior attempt timed out. Do not redo completed work.`;
11599
- logger$21.warn({ storyKey }, "Failed to assemble checkpoint retry prompt — using fallback");
11609
+ logger$22.warn({ storyKey }, "Failed to assemble checkpoint retry prompt — using fallback");
11600
11610
  }
11601
- logger$21.info({
11611
+ logger$22.info({
11602
11612
  storyKey,
11603
11613
  filesCount: checkpointData.filesModified.length
11604
11614
  }, "Dispatching checkpoint retry for timed-out story");
@@ -11627,7 +11637,7 @@ function createImplementationOrchestrator(deps) {
11627
11637
  } : void 0 }
11628
11638
  });
11629
11639
  if (checkpointRetryResult.status === "timeout") {
11630
- logger$21.warn({ storyKey }, "Checkpoint retry dispatch timed out — escalating story");
11640
+ logger$22.warn({ storyKey }, "Checkpoint retry dispatch timed out — escalating story");
11631
11641
  updateStory(storyKey, {
11632
11642
  phase: "ESCALATED",
11633
11643
  error: "checkpoint-retry-timeout",
@@ -11646,7 +11656,7 @@ function createImplementationOrchestrator(deps) {
11646
11656
  const retryParsed = checkpointRetryResult.parsed;
11647
11657
  devFilesModified = retryParsed?.files_modified ?? checkGitDiffFiles(projectRoot ?? process.cwd());
11648
11658
  if (checkpointRetryResult.status === "completed" && retryParsed?.result === "success") devStoryWasSuccess = true;
11649
- else logger$21.warn({
11659
+ else logger$22.warn({
11650
11660
  storyKey,
11651
11661
  status: checkpointRetryResult.status
11652
11662
  }, "Checkpoint retry completed with failure — proceeding to code review");
@@ -11654,13 +11664,13 @@ function createImplementationOrchestrator(deps) {
11654
11664
  }
11655
11665
  if (!checkpointHandled) if (devResult.result === "success") devStoryWasSuccess = true;
11656
11666
  else {
11657
- logger$21.warn({
11667
+ logger$22.warn({
11658
11668
  storyKey,
11659
11669
  error: devResult.error,
11660
11670
  filesModified: devFilesModified.length
11661
11671
  }, "Dev-story reported failure, proceeding to code review");
11662
11672
  if (!devResult.error?.startsWith("dispatch_timeout")) {
11663
- logger$21.warn({
11673
+ logger$22.warn({
11664
11674
  storyKey,
11665
11675
  error: devResult.error
11666
11676
  }, "Agent process failure (non-timeout) — story will proceed to code review with partial work");
@@ -11707,12 +11717,12 @@ function createImplementationOrchestrator(deps) {
11707
11717
  }).trim();
11708
11718
  hasNewCommits = currentHead !== baselineHeadSha;
11709
11719
  } catch {}
11710
- if (hasNewCommits) logger$21.info({
11720
+ if (hasNewCommits) logger$22.info({
11711
11721
  storyKey,
11712
11722
  baselineHeadSha
11713
11723
  }, "Working tree clean but new commits detected since dispatch — skipping zero-diff escalation");
11714
11724
  else {
11715
- logger$21.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes and no new commits");
11725
+ logger$22.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes and no new commits");
11716
11726
  eventBus.emit("orchestrator:zero-diff-escalation", {
11717
11727
  storyKey,
11718
11728
  reason: "zero-diff-on-complete"
@@ -11761,10 +11771,10 @@ function createImplementationOrchestrator(deps) {
11761
11771
  "pipe"
11762
11772
  ]
11763
11773
  });
11764
- logger$21.info({ storyKey }, "Secondary typecheck (tsc --noEmit) passed");
11774
+ logger$22.info({ storyKey }, "Secondary typecheck (tsc --noEmit) passed");
11765
11775
  } catch (tscErr) {
11766
11776
  const tscOutput = tscErr instanceof Error && "stdout" in tscErr ? String(tscErr.stdout ?? "").slice(0, 2e3) : "";
11767
- logger$21.warn({
11777
+ logger$22.warn({
11768
11778
  storyKey,
11769
11779
  tscOutput
11770
11780
  }, "Secondary typecheck (tsc --noEmit) failed — treating as build failure");
@@ -11778,7 +11788,7 @@ function createImplementationOrchestrator(deps) {
11778
11788
  }
11779
11789
  if (buildVerifyResult.status === "passed") {
11780
11790
  eventBus.emit("story:build-verification-passed", { storyKey });
11781
- logger$21.info({ storyKey }, "Build verification passed");
11791
+ logger$22.info({ storyKey }, "Build verification passed");
11782
11792
  } else if (buildVerifyResult.status === "failed" || buildVerifyResult.status === "timeout") {
11783
11793
  const truncatedOutput = (buildVerifyResult.output ?? "").slice(0, 2e3);
11784
11794
  const reason = buildVerifyResult.reason ?? "build-verification-failed";
@@ -11787,7 +11797,7 @@ function createImplementationOrchestrator(deps) {
11787
11797
  const resolvedRoot = projectRoot ?? process.cwd();
11788
11798
  const hasChanges = detectPackageChanges(_packageSnapshot, resolvedRoot);
11789
11799
  if (hasChanges) {
11790
- logger$21.warn({ storyKey }, "Package files changed since snapshot — restoring to prevent cascade");
11800
+ logger$22.warn({ storyKey }, "Package files changed since snapshot — restoring to prevent cascade");
11791
11801
  const restoreResult = restorePackageSnapshot(_packageSnapshot, { projectRoot: resolvedRoot });
11792
11802
  if (restoreResult.restored) {
11793
11803
  const retryAfterRestore = runBuildVerification({
@@ -11799,11 +11809,11 @@ function createImplementationOrchestrator(deps) {
11799
11809
  if (retryAfterRestore.status === "passed") {
11800
11810
  retryPassed = true;
11801
11811
  eventBus.emit("story:build-verification-passed", { storyKey });
11802
- logger$21.warn({
11812
+ logger$22.warn({
11803
11813
  storyKey,
11804
11814
  filesRestored: restoreResult.filesRestored
11805
11815
  }, "Build passed after package snapshot restore — cross-story pollution detected and cleaned");
11806
- } else logger$21.warn({
11816
+ } else logger$22.warn({
11807
11817
  storyKey,
11808
11818
  filesRestored: restoreResult.filesRestored
11809
11819
  }, "Build still fails after snapshot restore — story has its own build errors");
@@ -11815,7 +11825,7 @@ function createImplementationOrchestrator(deps) {
11815
11825
  if (missingPkgMatch && buildVerifyResult.status !== "timeout") {
11816
11826
  const missingPkg = missingPkgMatch[1].replace(/^(@[^/]+\/[^/]+)\/.*$/, "$1").replace(/^([^@][^/]*)\/.*$/, "$1");
11817
11827
  const resolvedRoot = projectRoot ?? process.cwd();
11818
- logger$21.warn({
11828
+ logger$22.warn({
11819
11829
  storyKey,
11820
11830
  missingPkg
11821
11831
  }, "Build-fix retry: detected missing npm package — attempting npm install");
@@ -11826,7 +11836,7 @@ function createImplementationOrchestrator(deps) {
11826
11836
  encoding: "utf-8",
11827
11837
  stdio: "pipe"
11828
11838
  });
11829
- logger$21.warn({
11839
+ logger$22.warn({
11830
11840
  storyKey,
11831
11841
  missingPkg
11832
11842
  }, "Build-fix retry: npm install succeeded — retrying build verification");
@@ -11839,18 +11849,18 @@ function createImplementationOrchestrator(deps) {
11839
11849
  if (retryResult.status === "passed") {
11840
11850
  retryPassed = true;
11841
11851
  eventBus.emit("story:build-verification-passed", { storyKey });
11842
- logger$21.warn({
11852
+ logger$22.warn({
11843
11853
  storyKey,
11844
11854
  missingPkg
11845
11855
  }, "Build-fix retry: build verification passed after installing missing package");
11846
- } else logger$21.warn({
11856
+ } else logger$22.warn({
11847
11857
  storyKey,
11848
11858
  missingPkg,
11849
11859
  retryStatus: retryResult.status
11850
11860
  }, "Build-fix retry: build still fails after installing missing package — escalating");
11851
11861
  } catch (installErr) {
11852
11862
  const installMsg = installErr instanceof Error ? installErr.message : String(installErr);
11853
- logger$21.warn({
11863
+ logger$22.warn({
11854
11864
  storyKey,
11855
11865
  missingPkg,
11856
11866
  error: installMsg
@@ -11860,7 +11870,7 @@ function createImplementationOrchestrator(deps) {
11860
11870
  if (!retryPassed) {
11861
11871
  let buildFixPassed = false;
11862
11872
  if (buildVerifyResult.status === "failed" && storyFilePath !== void 0) try {
11863
- logger$21.info({ storyKey }, "Dispatching build-fix agent");
11873
+ logger$22.info({ storyKey }, "Dispatching build-fix agent");
11864
11874
  startPhase(storyKey, "build-fix");
11865
11875
  const storyContent = await readFile$1(storyFilePath, "utf-8");
11866
11876
  let buildFixTemplate;
@@ -11896,11 +11906,11 @@ function createImplementationOrchestrator(deps) {
11896
11906
  if (retryAfterFix.status === "passed") {
11897
11907
  buildFixPassed = true;
11898
11908
  eventBus.emit("story:build-verification-passed", { storyKey });
11899
- logger$21.info({ storyKey }, "Build passed after build-fix dispatch");
11900
- } else logger$21.warn({ storyKey }, "Build still fails after build-fix dispatch — escalating");
11909
+ logger$22.info({ storyKey }, "Build passed after build-fix dispatch");
11910
+ } else logger$22.warn({ storyKey }, "Build still fails after build-fix dispatch — escalating");
11901
11911
  } catch (fixErr) {
11902
11912
  const fixMsg = fixErr instanceof Error ? fixErr.message : String(fixErr);
11903
- logger$21.warn({
11913
+ logger$22.warn({
11904
11914
  storyKey,
11905
11915
  error: fixMsg
11906
11916
  }, "Build-fix dispatch failed — escalating");
@@ -11911,7 +11921,7 @@ function createImplementationOrchestrator(deps) {
11911
11921
  exitCode: buildVerifyResult.exitCode ?? 1,
11912
11922
  output: truncatedOutput
11913
11923
  });
11914
- logger$21.warn({
11924
+ logger$22.warn({
11915
11925
  storyKey,
11916
11926
  reason,
11917
11927
  exitCode: buildVerifyResult.exitCode
@@ -11943,7 +11953,7 @@ function createImplementationOrchestrator(deps) {
11943
11953
  storyKey
11944
11954
  });
11945
11955
  if (icResult.potentiallyAffectedTests.length > 0) {
11946
- logger$21.warn({
11956
+ logger$22.warn({
11947
11957
  storyKey,
11948
11958
  modifiedInterfaces: icResult.modifiedInterfaces,
11949
11959
  potentiallyAffectedTests: icResult.potentiallyAffectedTests
@@ -11989,7 +11999,7 @@ function createImplementationOrchestrator(deps) {
11989
11999
  "NEEDS_MAJOR_REWORK": 2
11990
12000
  };
11991
12001
  for (const group of batchFileGroups) {
11992
- logger$21.info({
12002
+ logger$22.info({
11993
12003
  storyKey,
11994
12004
  batchIndex: group.batchIndex,
11995
12005
  fileCount: group.files.length
@@ -12031,7 +12041,7 @@ function createImplementationOrchestrator(deps) {
12031
12041
  rawOutput: lastRawOutput,
12032
12042
  tokenUsage: aggregateTokens
12033
12043
  };
12034
- logger$21.info({
12044
+ logger$22.info({
12035
12045
  storyKey,
12036
12046
  batchCount: batchFileGroups.length,
12037
12047
  verdict: worstVerdict,
@@ -12072,7 +12082,7 @@ function createImplementationOrchestrator(deps) {
12072
12082
  })
12073
12083
  });
12074
12084
  } catch (tokenErr) {
12075
- logger$21.warn({
12085
+ logger$22.warn({
12076
12086
  storyKey,
12077
12087
  err: tokenErr
12078
12088
  }, "Failed to record code-review token usage");
@@ -12080,7 +12090,7 @@ function createImplementationOrchestrator(deps) {
12080
12090
  const isPhantomReview = reviewResult.dispatchFailed === true || reviewResult.verdict !== "SHIP_IT" && reviewResult.verdict !== "LGTM_WITH_NOTES" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
12081
12091
  if (isPhantomReview && !timeoutRetried) {
12082
12092
  timeoutRetried = true;
12083
- logger$21.warn({
12093
+ logger$22.warn({
12084
12094
  storyKey,
12085
12095
  reviewCycles,
12086
12096
  error: reviewResult.error
@@ -12088,7 +12098,7 @@ function createImplementationOrchestrator(deps) {
12088
12098
  continue;
12089
12099
  }
12090
12100
  if (isPhantomReview && timeoutRetried) {
12091
- logger$21.warn({
12101
+ logger$22.warn({
12092
12102
  storyKey,
12093
12103
  reviewCycles,
12094
12104
  error: reviewResult.error
@@ -12112,7 +12122,7 @@ function createImplementationOrchestrator(deps) {
12112
12122
  verdict = reviewResult.verdict;
12113
12123
  issueList = reviewResult.issue_list ?? [];
12114
12124
  if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
12115
- logger$21.info({
12125
+ logger$22.info({
12116
12126
  storyKey,
12117
12127
  originalVerdict: verdict,
12118
12128
  issuesBefore: previousIssueList.length,
@@ -12148,7 +12158,7 @@ function createImplementationOrchestrator(deps) {
12148
12158
  if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
12149
12159
  parts.push(`${fileCount} files`);
12150
12160
  parts.push(`${totalTokensK} tokens`);
12151
- logger$21.info({
12161
+ logger$22.info({
12152
12162
  storyKey,
12153
12163
  verdict,
12154
12164
  agentVerdict: reviewResult.agentVerdict
@@ -12197,9 +12207,9 @@ function createImplementationOrchestrator(deps) {
12197
12207
  }),
12198
12208
  rationale: `Advisory notes from LGTM_WITH_NOTES review of ${storyKey}`
12199
12209
  });
12200
- logger$21.info({ storyKey }, "Advisory notes persisted to decision store");
12210
+ logger$22.info({ storyKey }, "Advisory notes persisted to decision store");
12201
12211
  } catch (advisoryErr) {
12202
- logger$21.warn({
12212
+ logger$22.warn({
12203
12213
  storyKey,
12204
12214
  error: advisoryErr instanceof Error ? advisoryErr.message : String(advisoryErr)
12205
12215
  }, "Failed to persist advisory notes (best-effort)");
@@ -12207,27 +12217,27 @@ function createImplementationOrchestrator(deps) {
12207
12217
  if (telemetryPersistence !== void 0) try {
12208
12218
  const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
12209
12219
  if (turns.length > 0) {
12210
- const scorer = new EfficiencyScorer(logger$21);
12220
+ const scorer = new EfficiencyScorer(logger$22);
12211
12221
  const effScore = scorer.score(storyKey, turns);
12212
12222
  await telemetryPersistence.storeEfficiencyScore(effScore);
12213
- logger$21.info({
12223
+ logger$22.info({
12214
12224
  storyKey,
12215
12225
  compositeScore: effScore.compositeScore,
12216
12226
  modelCount: effScore.perModelBreakdown.length
12217
12227
  }, "Efficiency score computed and persisted");
12218
- } else logger$21.debug({ storyKey }, "No turn analysis data available — skipping efficiency scoring");
12228
+ } else logger$22.debug({ storyKey }, "No turn analysis data available — skipping efficiency scoring");
12219
12229
  } catch (effErr) {
12220
- logger$21.warn({
12230
+ logger$22.warn({
12221
12231
  storyKey,
12222
12232
  error: effErr instanceof Error ? effErr.message : String(effErr)
12223
12233
  }, "Efficiency scoring failed — story verdict unchanged");
12224
12234
  }
12225
12235
  if (telemetryPersistence !== void 0) try {
12226
12236
  const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
12227
- if (turns.length === 0) logger$21.debug({ storyKey }, "No turn analysis data for telemetry categorization — skipping");
12237
+ if (turns.length === 0) logger$22.debug({ storyKey }, "No turn analysis data for telemetry categorization — skipping");
12228
12238
  else {
12229
- const categorizer = new Categorizer(logger$21);
12230
- const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$21);
12239
+ const categorizer = new Categorizer(logger$22);
12240
+ const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$22);
12231
12241
  const categoryStats = categorizer.computeCategoryStatsFromTurns(turns);
12232
12242
  const consumerStats = consumerAnalyzer.analyzeFromTurns(turns);
12233
12243
  await telemetryPersistence.storeCategoryStats(storyKey, categoryStats);
@@ -12235,7 +12245,7 @@ function createImplementationOrchestrator(deps) {
12235
12245
  const growingCount = categoryStats.filter((c) => c.trend === "growing").length;
12236
12246
  const topCategory = categoryStats[0]?.category ?? "none";
12237
12247
  const topConsumer = consumerStats[0]?.consumerKey ?? "none";
12238
- logger$21.info({
12248
+ logger$22.info({
12239
12249
  storyKey,
12240
12250
  topCategory,
12241
12251
  topConsumer,
@@ -12243,7 +12253,7 @@ function createImplementationOrchestrator(deps) {
12243
12253
  }, "Semantic categorization and consumer analysis complete");
12244
12254
  }
12245
12255
  } catch (catErr) {
12246
- logger$21.warn({
12256
+ logger$22.warn({
12247
12257
  storyKey,
12248
12258
  error: catErr instanceof Error ? catErr.message : String(catErr)
12249
12259
  }, "Semantic categorization failed — story verdict unchanged");
@@ -12264,7 +12274,7 @@ function createImplementationOrchestrator(deps) {
12264
12274
  filesModified: devFilesModified,
12265
12275
  workingDirectory: projectRoot
12266
12276
  });
12267
- logger$21.debug({
12277
+ logger$22.debug({
12268
12278
  storyKey,
12269
12279
  expansion_priority: expansionResult.expansion_priority,
12270
12280
  coverage_gaps: expansionResult.coverage_gaps.length
@@ -12277,7 +12287,7 @@ function createImplementationOrchestrator(deps) {
12277
12287
  value: JSON.stringify(expansionResult)
12278
12288
  });
12279
12289
  } catch (expansionErr) {
12280
- logger$21.warn({
12290
+ logger$22.warn({
12281
12291
  storyKey,
12282
12292
  error: expansionErr instanceof Error ? expansionErr.message : String(expansionErr)
12283
12293
  }, "Test expansion failed — story verdict unchanged");
@@ -12304,7 +12314,7 @@ function createImplementationOrchestrator(deps) {
12304
12314
  await persistState();
12305
12315
  return;
12306
12316
  }
12307
- logger$21.info({
12317
+ logger$22.info({
12308
12318
  storyKey,
12309
12319
  reviewCycles: finalReviewCycles,
12310
12320
  issueCount: issueList.length
@@ -12364,7 +12374,7 @@ function createImplementationOrchestrator(deps) {
12364
12374
  fixPrompt = assembled.prompt;
12365
12375
  } catch {
12366
12376
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
12367
- logger$21.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
12377
+ logger$22.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
12368
12378
  }
12369
12379
  const handle = dispatcher.dispatch({
12370
12380
  prompt: fixPrompt,
@@ -12385,9 +12395,9 @@ function createImplementationOrchestrator(deps) {
12385
12395
  output: fixResult.tokenEstimate.output
12386
12396
  } : void 0 }
12387
12397
  });
12388
- if (fixResult.status === "timeout") logger$21.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
12398
+ if (fixResult.status === "timeout") logger$22.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
12389
12399
  } catch (err) {
12390
- logger$21.warn({
12400
+ logger$22.warn({
12391
12401
  storyKey,
12392
12402
  err
12393
12403
  }, "Auto-approve fix dispatch failed — approving anyway (issues were minor)");
@@ -12519,7 +12529,7 @@ function createImplementationOrchestrator(deps) {
12519
12529
  fixPrompt = assembled.prompt;
12520
12530
  } catch {
12521
12531
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
12522
- logger$21.warn({
12532
+ logger$22.warn({
12523
12533
  storyKey,
12524
12534
  taskType
12525
12535
  }, "Failed to assemble fix prompt, using fallback");
@@ -12555,7 +12565,7 @@ function createImplementationOrchestrator(deps) {
12555
12565
  } : void 0 }
12556
12566
  });
12557
12567
  if (fixResult.status === "timeout") {
12558
- logger$21.warn({
12568
+ logger$22.warn({
12559
12569
  storyKey,
12560
12570
  taskType
12561
12571
  }, "Fix dispatch timed out — escalating story");
@@ -12577,7 +12587,7 @@ function createImplementationOrchestrator(deps) {
12577
12587
  }
12578
12588
  if (fixResult.status === "failed") {
12579
12589
  if (isMajorRework) {
12580
- logger$21.warn({
12590
+ logger$22.warn({
12581
12591
  storyKey,
12582
12592
  exitCode: fixResult.exitCode
12583
12593
  }, "Major rework dispatch failed — escalating story");
@@ -12597,14 +12607,14 @@ function createImplementationOrchestrator(deps) {
12597
12607
  await persistState();
12598
12608
  return;
12599
12609
  }
12600
- logger$21.warn({
12610
+ logger$22.warn({
12601
12611
  storyKey,
12602
12612
  taskType,
12603
12613
  exitCode: fixResult.exitCode
12604
12614
  }, "Fix dispatch failed");
12605
12615
  }
12606
12616
  } catch (err) {
12607
- logger$21.warn({
12617
+ logger$22.warn({
12608
12618
  storyKey,
12609
12619
  taskType,
12610
12620
  err
@@ -12638,13 +12648,13 @@ function createImplementationOrchestrator(deps) {
12638
12648
  const directives = telemetryAdvisor.formatOptimizationDirectives(recs);
12639
12649
  if (directives.length > 0) {
12640
12650
  optimizationDirectives = directives;
12641
- logger$21.debug({
12651
+ logger$22.debug({
12642
12652
  storyKey,
12643
12653
  directiveCount: recs.filter((r) => r.severity !== "info").length
12644
12654
  }, "Optimization directives ready for dispatch");
12645
12655
  }
12646
12656
  } catch (err) {
12647
- logger$21.debug({
12657
+ logger$22.debug({
12648
12658
  err,
12649
12659
  storyKey
12650
12660
  }, "Failed to fetch optimization directives — proceeding without");
@@ -12682,11 +12692,11 @@ function createImplementationOrchestrator(deps) {
12682
12692
  }
12683
12693
  async function run(storyKeys) {
12684
12694
  if (_state === "RUNNING" || _state === "PAUSED") {
12685
- logger$21.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
12695
+ logger$22.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
12686
12696
  return getStatus();
12687
12697
  }
12688
12698
  if (_state === "COMPLETE") {
12689
- logger$21.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
12699
+ logger$22.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
12690
12700
  return getStatus();
12691
12701
  }
12692
12702
  _state = "RUNNING";
@@ -12710,7 +12720,7 @@ function createImplementationOrchestrator(deps) {
12710
12720
  const seedStart = Date.now();
12711
12721
  const seedResult = await seedMethodologyContext(db, projectRoot);
12712
12722
  _startupTimings.seedMethodologyMs = Date.now() - seedStart;
12713
- if (seedResult.decisionsCreated > 0) logger$21.info({
12723
+ if (seedResult.decisionsCreated > 0) logger$22.info({
12714
12724
  decisionsCreated: seedResult.decisionsCreated,
12715
12725
  skippedCategories: seedResult.skippedCategories,
12716
12726
  durationMs: _startupTimings.seedMethodologyMs
@@ -12720,12 +12730,12 @@ function createImplementationOrchestrator(deps) {
12720
12730
  const ingestStart = Date.now();
12721
12731
  try {
12722
12732
  const ingestResult = await autoIngestEpicsDependencies(db, projectRoot);
12723
- if (ingestResult.storiesIngested > 0 || ingestResult.dependenciesIngested > 0) logger$21.info({
12733
+ if (ingestResult.storiesIngested > 0 || ingestResult.dependenciesIngested > 0) logger$22.info({
12724
12734
  ...ingestResult,
12725
12735
  durationMs: Date.now() - ingestStart
12726
12736
  }, "Auto-ingested stories and dependencies from epics document");
12727
12737
  } catch (err) {
12728
- logger$21.debug({ err }, "Auto-ingest from epics document skipped — work graph may be unavailable");
12738
+ logger$22.debug({ err }, "Auto-ingest from epics document skipped — work graph may be unavailable");
12729
12739
  }
12730
12740
  }
12731
12741
  try {
@@ -12735,7 +12745,7 @@ function createImplementationOrchestrator(deps) {
12735
12745
  _startupTimings.stateStoreInitMs = Date.now() - stateStoreInitStart;
12736
12746
  for (const key of storyKeys) {
12737
12747
  const pendingState = _stories.get(key);
12738
- if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$21.warn({
12748
+ if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$22.warn({
12739
12749
  err,
12740
12750
  storyKey: key
12741
12751
  }, "StateStore write failed during PENDING init"));
@@ -12746,12 +12756,12 @@ function createImplementationOrchestrator(deps) {
12746
12756
  _startupTimings.queryStoriesMs = Date.now() - queryStoriesStart;
12747
12757
  for (const record of existingRecords) _stateStoreCache.set(record.storyKey, record);
12748
12758
  } catch (err) {
12749
- logger$21.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
12759
+ logger$22.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
12750
12760
  }
12751
12761
  }
12752
12762
  if (ingestionServer !== void 0) {
12753
12763
  if (telemetryPersistence !== void 0) try {
12754
- const pipelineLogger = logger$21;
12764
+ const pipelineLogger = logger$22;
12755
12765
  const telemetryPipeline = new TelemetryPipeline({
12756
12766
  normalizer: new TelemetryNormalizer(pipelineLogger),
12757
12767
  turnAnalyzer: new TurnAnalyzer(pipelineLogger),
@@ -12763,14 +12773,14 @@ function createImplementationOrchestrator(deps) {
12763
12773
  persistence: telemetryPersistence
12764
12774
  });
12765
12775
  ingestionServer.setPipeline(telemetryPipeline);
12766
- logger$21.info("TelemetryPipeline wired to IngestionServer");
12776
+ logger$22.info("TelemetryPipeline wired to IngestionServer");
12767
12777
  } catch (pipelineErr) {
12768
- logger$21.warn({ err: pipelineErr }, "Failed to create TelemetryPipeline — continuing without analysis pipeline");
12778
+ logger$22.warn({ err: pipelineErr }, "Failed to create TelemetryPipeline — continuing without analysis pipeline");
12769
12779
  }
12770
- await ingestionServer.start().catch((err) => logger$21.warn({ err }, "IngestionServer.start() failed — continuing without telemetry (best-effort)"));
12780
+ await ingestionServer.start().catch((err) => logger$22.warn({ err }, "IngestionServer.start() failed — continuing without telemetry (best-effort)"));
12771
12781
  try {
12772
12782
  _otlpEndpoint = ingestionServer.getOtlpEnvVars().OTEL_EXPORTER_OTLP_ENDPOINT;
12773
- logger$21.info({ otlpEndpoint: _otlpEndpoint }, "OTLP telemetry ingestion active");
12783
+ logger$22.info({ otlpEndpoint: _otlpEndpoint }, "OTLP telemetry ingestion active");
12774
12784
  } catch {}
12775
12785
  }
12776
12786
  let contractDeclarations = [];
@@ -12810,12 +12820,12 @@ function createImplementationOrchestrator(deps) {
12810
12820
  const conflictDetectStart = Date.now();
12811
12821
  const { batches, edges: contractEdges } = detectConflictGroupsWithContracts(storyKeys, { moduleMap: pack.manifest.conflictGroups }, contractDeclarations);
12812
12822
  _startupTimings.conflictDetectMs = Date.now() - conflictDetectStart;
12813
- if (contractEdges.length > 0) logger$21.info({
12823
+ if (contractEdges.length > 0) logger$22.info({
12814
12824
  contractEdges,
12815
12825
  edgeCount: contractEdges.length
12816
12826
  }, "Contract dependency edges detected — applying contract-aware dispatch ordering");
12817
- wgRepo.addContractDependencies(contractEdges).catch((err) => logger$21.warn({ err }, "contract dep persistence failed (best-effort)"));
12818
- logger$21.info({
12827
+ wgRepo.addContractDependencies(contractEdges).catch((err) => logger$22.warn({ err }, "contract dep persistence failed (best-effort)"));
12828
+ logger$22.info({
12819
12829
  storyCount: storyKeys.length,
12820
12830
  groupCount: batches.reduce((sum, b) => sum + b.length, 0),
12821
12831
  batchCount: batches.length,
@@ -12825,7 +12835,7 @@ function createImplementationOrchestrator(deps) {
12825
12835
  groups: batch.map((g) => g.join(","))
12826
12836
  }))
12827
12837
  }, "Orchestrator starting");
12828
- logger$21.info({
12838
+ logger$22.info({
12829
12839
  storyCount: storyKeys.length,
12830
12840
  conflictGroups: batches.length,
12831
12841
  maxConcurrency: config.maxConcurrency
@@ -12846,7 +12856,7 @@ function createImplementationOrchestrator(deps) {
12846
12856
  exitCode,
12847
12857
  output: truncatedOutput
12848
12858
  });
12849
- logger$21.error({
12859
+ logger$22.error({
12850
12860
  exitCode,
12851
12861
  reason: preFlightResult.reason
12852
12862
  }, "Pre-flight build check failed — aborting pipeline before any story dispatch");
@@ -12855,19 +12865,19 @@ function createImplementationOrchestrator(deps) {
12855
12865
  await persistState();
12856
12866
  return getStatus();
12857
12867
  }
12858
- if (preFlightResult.status !== "skipped") logger$21.info("Pre-flight build check passed");
12868
+ if (preFlightResult.status !== "skipped") logger$22.info("Pre-flight build check passed");
12859
12869
  }
12860
- logger$21.info(_startupTimings, "Orchestrator startup timings (ms)");
12870
+ logger$22.info(_startupTimings, "Orchestrator startup timings (ms)");
12861
12871
  const totalGroups = batches.reduce((sum, b) => sum + b.length, 0);
12862
12872
  const actualConcurrency = Math.min(config.maxConcurrency, totalGroups);
12863
12873
  if (actualConcurrency > 1 && projectRoot !== void 0) try {
12864
12874
  _packageSnapshot = capturePackageSnapshot({ projectRoot });
12865
- logger$21.info({
12875
+ logger$22.info({
12866
12876
  fileCount: _packageSnapshot.files.size,
12867
12877
  installCommand: _packageSnapshot.installCommand
12868
12878
  }, "Package snapshot captured for concurrent story protection");
12869
12879
  } catch (snapErr) {
12870
- logger$21.warn({ err: snapErr }, "Failed to capture package snapshot — continuing without protection");
12880
+ logger$22.warn({ err: snapErr }, "Failed to capture package snapshot — continuing without protection");
12871
12881
  }
12872
12882
  try {
12873
12883
  for (const batchGroups of batches) await runWithConcurrency(batchGroups, config.maxConcurrency);
@@ -12876,7 +12886,7 @@ function createImplementationOrchestrator(deps) {
12876
12886
  _state = "FAILED";
12877
12887
  _completedAt = new Date().toISOString();
12878
12888
  await persistState();
12879
- logger$21.error({ err }, "Orchestrator failed with unhandled error");
12889
+ logger$22.error({ err }, "Orchestrator failed with unhandled error");
12880
12890
  return getStatus();
12881
12891
  }
12882
12892
  stopHeartbeat();
@@ -12886,7 +12896,7 @@ function createImplementationOrchestrator(deps) {
12886
12896
  const totalDeclarations = contractDeclarations.length;
12887
12897
  const currentSprintDeclarations = contractDeclarations.filter((d) => storyKeys.includes(d.storyKey));
12888
12898
  const stalePruned = totalDeclarations - currentSprintDeclarations.length;
12889
- if (stalePruned > 0) logger$21.info({
12899
+ if (stalePruned > 0) logger$22.info({
12890
12900
  stalePruned,
12891
12901
  remaining: currentSprintDeclarations.length
12892
12902
  }, "Pruned stale contract declarations from previous epics");
@@ -12900,11 +12910,11 @@ function createImplementationOrchestrator(deps) {
12900
12910
  contractName: mismatch.contractName,
12901
12911
  mismatchDescription: mismatch.mismatchDescription
12902
12912
  });
12903
- logger$21.warn({
12913
+ logger$22.warn({
12904
12914
  mismatchCount: mismatches.length,
12905
12915
  mismatches
12906
12916
  }, "Post-sprint contract verification found mismatches — manual review required");
12907
- } else if (currentSprintDeclarations.length > 0) logger$21.info("Post-sprint contract verification passed — all declared contracts satisfied");
12917
+ } else if (currentSprintDeclarations.length > 0) logger$22.info("Post-sprint contract verification passed — all declared contracts satisfied");
12908
12918
  eventBus.emit("pipeline:contract-verification-summary", {
12909
12919
  verified: currentSprintDeclarations.length,
12910
12920
  stalePruned,
@@ -12939,12 +12949,12 @@ function createImplementationOrchestrator(deps) {
12939
12949
  });
12940
12950
  await stateStore.setContractVerification(sk, records);
12941
12951
  }
12942
- logger$21.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
12952
+ logger$22.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
12943
12953
  } catch (persistErr) {
12944
- logger$21.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
12954
+ logger$22.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
12945
12955
  }
12946
12956
  } catch (err) {
12947
- logger$21.error({ err }, "Post-sprint contract verification threw an error — skipping");
12957
+ logger$22.error({ err }, "Post-sprint contract verification threw an error — skipping");
12948
12958
  }
12949
12959
  if (projectRoot !== void 0) try {
12950
12960
  const indicators = checkProfileStaleness(projectRoot);
@@ -12954,10 +12964,10 @@ function createImplementationOrchestrator(deps) {
12954
12964
  message,
12955
12965
  indicators
12956
12966
  });
12957
- logger$21.warn({ indicators }, message);
12967
+ logger$22.warn({ indicators }, message);
12958
12968
  }
12959
12969
  } catch (err) {
12960
- logger$21.debug({ err }, "Profile staleness check failed (best-effort)");
12970
+ logger$22.debug({ err }, "Profile staleness check failed (best-effort)");
12961
12971
  }
12962
12972
  let completed = 0;
12963
12973
  let escalated = 0;
@@ -12974,8 +12984,8 @@ function createImplementationOrchestrator(deps) {
12974
12984
  await persistState();
12975
12985
  return getStatus();
12976
12986
  } finally {
12977
- if (stateStore !== void 0) await stateStore.close().catch((err) => logger$21.warn({ err }, "StateStore.close() failed (best-effort)"));
12978
- if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$21.warn({ err }, "IngestionServer.stop() failed (best-effort)"));
12987
+ if (stateStore !== void 0) await stateStore.close().catch((err) => logger$22.warn({ err }, "StateStore.close() failed (best-effort)"));
12988
+ if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$22.warn({ err }, "IngestionServer.stop() failed (best-effort)"));
12979
12989
  }
12980
12990
  }
12981
12991
  function pause() {
@@ -12984,7 +12994,7 @@ function createImplementationOrchestrator(deps) {
12984
12994
  _pauseGate = createPauseGate();
12985
12995
  _state = "PAUSED";
12986
12996
  eventBus.emit("orchestrator:paused", {});
12987
- logger$21.info("Orchestrator paused");
12997
+ logger$22.info("Orchestrator paused");
12988
12998
  }
12989
12999
  function resume() {
12990
13000
  if (_state !== "PAUSED") return;
@@ -12995,7 +13005,7 @@ function createImplementationOrchestrator(deps) {
12995
13005
  }
12996
13006
  _state = "RUNNING";
12997
13007
  eventBus.emit("orchestrator:resumed", {});
12998
- logger$21.info("Orchestrator resumed");
13008
+ logger$22.info("Orchestrator resumed");
12999
13009
  }
13000
13010
  return {
13001
13011
  run,
@@ -13674,7 +13684,7 @@ const CritiqueOutputSchema = z.object({
13674
13684
 
13675
13685
  //#endregion
13676
13686
  //#region src/modules/phase-orchestrator/critique-loop.ts
13677
- const logger$5 = createLogger("critique-loop");
13687
+ const logger$6 = createLogger("critique-loop");
13678
13688
  /**
13679
13689
  * Maps a phase name to the critique prompt template name.
13680
13690
  * Falls back to `critique-${phase}` for unknown phases.
@@ -13728,7 +13738,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13728
13738
  critiquePrompt = critiqueTemplate.replace("{{artifact_content}}", currentArtifact).replace("{{project_context}}", projectContext);
13729
13739
  } catch (err) {
13730
13740
  const message = err instanceof Error ? err.message : String(err);
13731
- logger$5.warn({
13741
+ logger$6.warn({
13732
13742
  phaseId,
13733
13743
  promptName: critiquePromptName,
13734
13744
  err: message
@@ -13756,7 +13766,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13756
13766
  critiqueTokens.output += result.tokenEstimate.output;
13757
13767
  if (result.status !== "completed" || result.parsed === null) {
13758
13768
  const errMsg = result.parseError ?? `Critique dispatch ended with status '${result.status}'`;
13759
- logger$5.warn({
13769
+ logger$6.warn({
13760
13770
  phaseId,
13761
13771
  iteration: i + 1,
13762
13772
  err: errMsg
@@ -13775,7 +13785,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13775
13785
  lastCritiqueOutput = critiqueOutput;
13776
13786
  } catch (err) {
13777
13787
  const message = err instanceof Error ? err.message : String(err);
13778
- logger$5.warn({
13788
+ logger$6.warn({
13779
13789
  phaseId,
13780
13790
  iteration: i + 1,
13781
13791
  err: message
@@ -13815,14 +13825,14 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13815
13825
  });
13816
13826
  } catch (err) {
13817
13827
  const message = err instanceof Error ? err.message : String(err);
13818
- logger$5.warn({
13828
+ logger$6.warn({
13819
13829
  phaseId,
13820
13830
  iteration: i + 1,
13821
13831
  err: message
13822
13832
  }, "Critique loop: failed to store critique decision — continuing");
13823
13833
  }
13824
13834
  if (critiqueOutput.verdict === "pass") {
13825
- logger$5.info({
13835
+ logger$6.info({
13826
13836
  phaseId,
13827
13837
  iteration: i + 1
13828
13838
  }, "Critique loop: artifact passed critique — loop complete");
@@ -13835,7 +13845,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13835
13845
  totalMs: Date.now() - startMs
13836
13846
  };
13837
13847
  }
13838
- logger$5.info({
13848
+ logger$6.info({
13839
13849
  phaseId,
13840
13850
  iteration: i + 1,
13841
13851
  issueCount: critiqueOutput.issue_count
@@ -13848,7 +13858,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13848
13858
  refinePrompt = refineTemplate.replace("{{original_artifact}}", currentArtifact).replace("{{critique_issues}}", issuesText).replace("{{phase_context}}", phaseContext);
13849
13859
  } catch (err) {
13850
13860
  const message = err instanceof Error ? err.message : String(err);
13851
- logger$5.warn({
13861
+ logger$6.warn({
13852
13862
  phaseId,
13853
13863
  iteration: i + 1,
13854
13864
  err: message
@@ -13869,7 +13879,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13869
13879
  const originalLength = currentArtifact.length;
13870
13880
  const refinedLength = refineResult.output.length;
13871
13881
  const delta = refinedLength - originalLength;
13872
- logger$5.info({
13882
+ logger$6.info({
13873
13883
  phaseId,
13874
13884
  iteration: i + 1,
13875
13885
  originalLength,
@@ -13878,7 +13888,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13878
13888
  }, "Critique loop: refinement complete");
13879
13889
  currentArtifact = refineResult.output;
13880
13890
  } else {
13881
- logger$5.warn({
13891
+ logger$6.warn({
13882
13892
  phaseId,
13883
13893
  iteration: i + 1,
13884
13894
  status: refineResult.status
@@ -13887,7 +13897,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13887
13897
  }
13888
13898
  } catch (err) {
13889
13899
  const message = err instanceof Error ? err.message : String(err);
13890
- logger$5.warn({
13900
+ logger$6.warn({
13891
13901
  phaseId,
13892
13902
  iteration: i + 1,
13893
13903
  err: message
@@ -13898,12 +13908,12 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13898
13908
  }
13899
13909
  const remainingIssues = lastCritiqueOutput?.issues ?? [];
13900
13910
  if (remainingIssues.length > 0) {
13901
- logger$5.warn({
13911
+ logger$6.warn({
13902
13912
  phaseId,
13903
13913
  maxIterations,
13904
13914
  issueCount: remainingIssues.length
13905
13915
  }, "Critique loop: max iterations reached with unresolved issues");
13906
- for (const issue of remainingIssues) logger$5.warn({
13916
+ for (const issue of remainingIssues) logger$6.warn({
13907
13917
  phaseId,
13908
13918
  severity: issue.severity,
13909
13919
  category: issue.category,
@@ -13922,7 +13932,7 @@ async function runCritiqueLoop(artifact, phaseId, runId, phase, deps, options =
13922
13932
 
13923
13933
  //#endregion
13924
13934
  //#region src/modules/phase-orchestrator/elicitation-selector.ts
13925
- const logger$4 = createLogger("elicitation-selector");
13935
+ const logger$5 = createLogger("elicitation-selector");
13926
13936
  /**
13927
13937
  * Affinity scores (0.0–1.0) for each category per content type.
13928
13938
  *
@@ -14044,10 +14054,10 @@ function loadElicitationMethods() {
14044
14054
  try {
14045
14055
  const content = readFileSync$1(csvPath, "utf-8");
14046
14056
  const methods = parseMethodsCsv(content);
14047
- logger$4.debug({ count: methods.length }, "Loaded elicitation methods");
14057
+ logger$5.debug({ count: methods.length }, "Loaded elicitation methods");
14048
14058
  return methods;
14049
14059
  } catch (err) {
14050
- logger$4.warn({
14060
+ logger$5.warn({
14051
14061
  csvPath,
14052
14062
  err
14053
14063
  }, "Failed to load elicitation methods CSV");
@@ -14367,7 +14377,7 @@ const ElicitationOutputSchema = z.object({
14367
14377
 
14368
14378
  //#endregion
14369
14379
  //#region src/modules/phase-orchestrator/step-runner.ts
14370
- const logger$3 = createLogger("step-runner");
14380
+ const logger$4 = createLogger("step-runner");
14371
14381
  /**
14372
14382
  * Format an array of decision records into a markdown section for injection.
14373
14383
  *
@@ -14474,7 +14484,7 @@ async function runSteps(steps, deps, runId, phase, params) {
14474
14484
  if (estimatedTokens > budgetTokens) {
14475
14485
  const decisionRefs = step.context.filter((ref) => ref.source.startsWith("decision:"));
14476
14486
  if (decisionRefs.length > 0) {
14477
- logger$3.warn({
14487
+ logger$4.warn({
14478
14488
  step: step.name,
14479
14489
  estimatedTokens,
14480
14490
  budgetTokens
@@ -14501,7 +14511,7 @@ async function runSteps(steps, deps, runId, phase, params) {
14501
14511
  }
14502
14512
  prompt = summarizedPrompt;
14503
14513
  estimatedTokens = Math.ceil(prompt.length / 4);
14504
- if (estimatedTokens <= budgetTokens) logger$3.info({
14514
+ if (estimatedTokens <= budgetTokens) logger$4.info({
14505
14515
  step: step.name,
14506
14516
  estimatedTokens,
14507
14517
  budgetTokens
@@ -14682,7 +14692,7 @@ async function runSteps(steps, deps, runId, phase, params) {
14682
14692
  const critiqueResult = await runCritiqueLoop(artifactContent, phase, runId, phase, deps);
14683
14693
  totalInput += critiqueResult.critiqueTokens.input + critiqueResult.refinementTokens.input;
14684
14694
  totalOutput += critiqueResult.critiqueTokens.output + critiqueResult.refinementTokens.output;
14685
- logger$3.info({
14695
+ logger$4.info({
14686
14696
  step: step.name,
14687
14697
  verdict: critiqueResult.verdict,
14688
14698
  iterations: critiqueResult.iterations,
@@ -14690,7 +14700,7 @@ async function runSteps(steps, deps, runId, phase, params) {
14690
14700
  }, "Step critique loop complete");
14691
14701
  } catch (critiqueErr) {
14692
14702
  const critiqueMsg = critiqueErr instanceof Error ? critiqueErr.message : String(critiqueErr);
14693
- logger$3.warn({
14703
+ logger$4.warn({
14694
14704
  step: step.name,
14695
14705
  err: critiqueMsg
14696
14706
  }, "Step critique loop threw an error — continuing without critique");
@@ -14700,7 +14710,7 @@ async function runSteps(steps, deps, runId, phase, params) {
14700
14710
  const contentType = deriveContentType(phase, step.name);
14701
14711
  const selectedMethods = selectMethods({ content_type: contentType }, usedElicitationMethods);
14702
14712
  if (selectedMethods.length > 0) {
14703
- logger$3.info({
14713
+ logger$4.info({
14704
14714
  step: step.name,
14705
14715
  methods: selectedMethods.map((m) => m.name),
14706
14716
  contentType
@@ -14739,13 +14749,13 @@ async function runSteps(steps, deps, runId, phase, params) {
14739
14749
  key: `${phase}-round-${roundIndex}-insights`,
14740
14750
  value: elicitParsed.insights
14741
14751
  });
14742
- logger$3.info({
14752
+ logger$4.info({
14743
14753
  step: step.name,
14744
14754
  method: method.name,
14745
14755
  roundIndex
14746
14756
  }, "Elicitation insights stored in decision store");
14747
14757
  }
14748
- } else logger$3.warn({
14758
+ } else logger$4.warn({
14749
14759
  step: step.name,
14750
14760
  method: method.name,
14751
14761
  status: elicitResult.status
@@ -14761,7 +14771,7 @@ async function runSteps(steps, deps, runId, phase, params) {
14761
14771
  }
14762
14772
  } catch (elicitErr) {
14763
14773
  const elicitMsg = elicitErr instanceof Error ? elicitErr.message : String(elicitErr);
14764
- logger$3.warn({
14774
+ logger$4.warn({
14765
14775
  step: step.name,
14766
14776
  err: elicitMsg
14767
14777
  }, "Step elicitation threw an error — continuing without elicitation");
@@ -15129,7 +15139,7 @@ async function runAnalysisPhase(deps, params) {
15129
15139
 
15130
15140
  //#endregion
15131
15141
  //#region src/modules/phase-orchestrator/phases/planning.ts
15132
- const logger$2 = createLogger("planning-phase");
15142
+ const logger$3 = createLogger("planning-phase");
15133
15143
  /** Maximum total prompt length in tokens (3,500 tokens × 4 chars/token = 14,000 chars) */
15134
15144
  const MAX_PROMPT_TOKENS = 3500;
15135
15145
  const MAX_PROMPT_CHARS = MAX_PROMPT_TOKENS * 4;
@@ -15183,7 +15193,7 @@ function formatProductBriefFromDecisions(decisions) {
15183
15193
  for (const field of BRIEF_FIELDS) {
15184
15194
  const rawValue = briefMap[field];
15185
15195
  if (rawValue === void 0) continue;
15186
- const fieldLabel = field.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase());
15196
+ const fieldLabel$1 = field.replace(/_/g, " ").replace(/\b\w/g, (c) => c.toUpperCase());
15187
15197
  let displayValue;
15188
15198
  try {
15189
15199
  const parsed = JSON.parse(rawValue);
@@ -15192,7 +15202,7 @@ function formatProductBriefFromDecisions(decisions) {
15192
15202
  } catch {
15193
15203
  displayValue = rawValue;
15194
15204
  }
15195
- parts.push(`### ${fieldLabel}\n${displayValue}`);
15205
+ parts.push(`### ${fieldLabel$1}\n${displayValue}`);
15196
15206
  }
15197
15207
  return parts.join("\n\n");
15198
15208
  }
@@ -15356,7 +15366,7 @@ async function runPlanningMultiStep(deps, params) {
15356
15366
  const techConstraintDecisions = allAnalysisDecisions.filter((d) => d.category === "technology-constraints");
15357
15367
  const violation = detectTechStackViolation(techStack, techConstraintDecisions);
15358
15368
  if (violation) {
15359
- logger$2.warn({ violation }, "Tech stack constraint violation detected — retrying step 3 with correction");
15369
+ logger$3.warn({ violation }, "Tech stack constraint violation detected — retrying step 3 with correction");
15360
15370
  const correctionPrefix = `CRITICAL CORRECTION: Your previous output was rejected because it violates the stated technology constraints.\n\nViolation: ${violation}\n\nYou MUST NOT use TypeScript, JavaScript, or Node.js for ANY backend service. Choose from Go, Kotlin/JVM, or Rust as stated in the technology constraints.\n\nRe-generate your output with a compliant tech stack. Everything else (NFRs, domain model, out-of-scope) can remain the same.\n\n---\n\n`;
15361
15371
  const step3Template = await deps.pack.getPrompt("planning-step-3-nfrs");
15362
15372
  const stepOutputs = new Map();
@@ -15383,10 +15393,10 @@ async function runPlanningMultiStep(deps, params) {
15383
15393
  const retryTechStack = retryParsed.tech_stack;
15384
15394
  const retryViolation = retryTechStack ? detectTechStackViolation(retryTechStack, techConstraintDecisions) : null;
15385
15395
  if (!retryViolation) {
15386
- logger$2.info("Retry produced compliant tech stack — using corrected output");
15396
+ logger$3.info("Retry produced compliant tech stack — using corrected output");
15387
15397
  nfrsOutput = retryParsed;
15388
- } else logger$2.warn({ retryViolation }, "Retry still violates constraints — using original output");
15389
- } else logger$2.warn("Retry dispatch failed — using original output");
15398
+ } else logger$3.warn({ retryViolation }, "Retry still violates constraints — using original output");
15399
+ } else logger$3.warn("Retry dispatch failed — using original output");
15390
15400
  }
15391
15401
  }
15392
15402
  const frs = frsOutput.functional_requirements;
@@ -15672,7 +15682,7 @@ const ReadinessOutputSchema = z.object({
15672
15682
 
15673
15683
  //#endregion
15674
15684
  //#region src/modules/phase-orchestrator/phases/solutioning.ts
15675
- const logger$1 = createLogger("solutioning");
15685
+ const logger$2 = createLogger("solutioning");
15676
15686
  /** Base token budget for architecture generation (covers template + requirements) */
15677
15687
  const BASE_ARCH_PROMPT_TOKENS = 3e3;
15678
15688
  /** Base token budget for story generation (covers template + requirements + architecture) */
@@ -16081,7 +16091,7 @@ async function runReadinessCheck(deps, runId) {
16081
16091
  input: tokenEstimate.input,
16082
16092
  output: tokenEstimate.output
16083
16093
  };
16084
- logger$1.info({
16094
+ logger$2.info({
16085
16095
  runId,
16086
16096
  durationMs: dispatchResult.durationMs,
16087
16097
  tokens: tokenEstimate
@@ -16342,7 +16352,7 @@ async function runSolutioningPhase(deps, params) {
16342
16352
  let archResult;
16343
16353
  if (existingArchArtifact) {
16344
16354
  const existingDecisions = (await getDecisionsByPhaseForRun(deps.db, params.runId, "solutioning")).filter((d) => d.category === "architecture");
16345
- logger$1.info({
16355
+ logger$2.info({
16346
16356
  runId: params.runId,
16347
16357
  artifactId: existingArchArtifact.id,
16348
16358
  decisionCount: existingDecisions.length
@@ -16373,7 +16383,7 @@ async function runSolutioningPhase(deps, params) {
16373
16383
  output: totalOutput
16374
16384
  }
16375
16385
  };
16376
- logger$1.info({
16386
+ logger$2.info({
16377
16387
  runId: params.runId,
16378
16388
  decisionCount: archResult.decisions.length,
16379
16389
  mode: hasSteps ? "multi-step" : "single-dispatch"
@@ -16395,7 +16405,7 @@ async function runSolutioningPhase(deps, params) {
16395
16405
  totalInput += readinessResult.tokenUsage.input;
16396
16406
  totalOutput += readinessResult.tokenUsage.output;
16397
16407
  if (readinessResult.verdict === "error") {
16398
- logger$1.error({
16408
+ logger$2.error({
16399
16409
  runId: params.runId,
16400
16410
  error: readinessResult.error
16401
16411
  }, "Readiness check agent failed");
@@ -16411,7 +16421,7 @@ async function runSolutioningPhase(deps, params) {
16411
16421
  }
16412
16422
  };
16413
16423
  }
16414
- logger$1.info({
16424
+ logger$2.info({
16415
16425
  runId: params.runId,
16416
16426
  verdict: readinessResult.verdict,
16417
16427
  coverageScore: readinessResult.coverageScore,
@@ -16427,7 +16437,7 @@ async function runSolutioningPhase(deps, params) {
16427
16437
  key: `finding-${i + 1}`,
16428
16438
  value: JSON.stringify(finding)
16429
16439
  });
16430
- logger$1.error({
16440
+ logger$2.error({
16431
16441
  runId: params.runId,
16432
16442
  verdict: "NOT_READY",
16433
16443
  coverageScore: readinessResult.coverageScore,
@@ -16493,7 +16503,7 @@ async function runSolutioningPhase(deps, params) {
16493
16503
  "",
16494
16504
  "Please generate additional or revised stories to specifically address each blocker above."
16495
16505
  ].join("\n");
16496
- logger$1.info({
16506
+ logger$2.info({
16497
16507
  runId: params.runId,
16498
16508
  blockerCount: blockers.length
16499
16509
  }, "Readiness NEEDS_WORK with blockers — retrying story generation with gap analysis");
@@ -16532,7 +16542,7 @@ async function runSolutioningPhase(deps, params) {
16532
16542
  };
16533
16543
  if (retryReadiness.verdict === "NOT_READY" || retryReadiness.verdict === "NEEDS_WORK") {
16534
16544
  const retryBlockers = retryReadiness.findings.filter((f$1) => f$1.severity === "blocker");
16535
- logger$1.error({
16545
+ logger$2.error({
16536
16546
  runId: params.runId,
16537
16547
  verdict: retryReadiness.verdict,
16538
16548
  retryBlockers: retryBlockers.length
@@ -16556,7 +16566,7 @@ async function runSolutioningPhase(deps, params) {
16556
16566
  }
16557
16567
  const retryStories = retryResult.epics.reduce((sum, epic) => sum + epic.stories.length, 0);
16558
16568
  const minorFindings$1 = retryReadiness.findings.filter((f$1) => f$1.severity === "minor");
16559
- if (minorFindings$1.length > 0) logger$1.warn({
16569
+ if (minorFindings$1.length > 0) logger$2.warn({
16560
16570
  runId: params.runId,
16561
16571
  minorFindings: minorFindings$1
16562
16572
  }, "Readiness READY with minor findings after retry");
@@ -16585,7 +16595,7 @@ async function runSolutioningPhase(deps, params) {
16585
16595
  };
16586
16596
  }
16587
16597
  const majorFindings = readinessResult.findings.filter((f$1) => f$1.severity === "major");
16588
- logger$1.warn({
16598
+ logger$2.warn({
16589
16599
  runId: params.runId,
16590
16600
  majorCount: majorFindings.length,
16591
16601
  findings: readinessResult.findings
@@ -16601,7 +16611,7 @@ async function runSolutioningPhase(deps, params) {
16601
16611
  const minorFindings = readinessResult.findings.filter((f$1) => f$1.severity === "minor");
16602
16612
  if (minorFindings.length > 0) {
16603
16613
  const verdictLabel = readinessResult.verdict === "READY" ? "READY" : "NEEDS_WORK (no blockers)";
16604
- logger$1.warn({
16614
+ logger$2.warn({
16605
16615
  runId: params.runId,
16606
16616
  verdict: readinessResult.verdict,
16607
16617
  minorFindings
@@ -17012,6 +17022,709 @@ async function runResearchPhase(deps, params) {
17012
17022
  }
17013
17023
  }
17014
17024
 
17025
+ //#endregion
17026
+ //#region src/modules/export/renderers.ts
17027
+ /** Fields from analysis/product-brief decisions to render, in display order */
17028
+ const PRODUCT_BRIEF_FIELDS = [
17029
+ "problem_statement",
17030
+ "target_users",
17031
+ "core_features",
17032
+ "success_metrics",
17033
+ "constraints",
17034
+ "technology_constraints"
17035
+ ];
17036
+ /**
17037
+ * Known acronyms that should appear fully uppercased when they are a standalone
17038
+ * word in a label (e.g. 'fr_coverage' → 'FR Coverage', 'api_style' → 'API Style').
17039
+ */
17040
+ const UPPERCASE_ACRONYMS = new Set([
17041
+ "fr",
17042
+ "nfr",
17043
+ "ux",
17044
+ "api",
17045
+ "db",
17046
+ "id",
17047
+ "url"
17048
+ ]);
17049
+ /**
17050
+ * Convert a snake_case key to Title Case for display headings.
17051
+ * Known acronyms (fr, nfr, ux, api, db, id, url) are rendered fully uppercased.
17052
+ */
17053
+ function fieldLabel(key) {
17054
+ return key.replace(/_/g, " ").replace(/\b\w+/g, (word) => {
17055
+ const lower = word.toLowerCase();
17056
+ if (UPPERCASE_ACRONYMS.has(lower)) return lower.toUpperCase();
17057
+ return word.charAt(0).toUpperCase() + word.slice(1).toLowerCase();
17058
+ });
17059
+ }
17060
+ /**
17061
+ * Safely parse a JSON string; returns the original string if parsing fails.
17062
+ */
17063
+ function safeParseJson(value) {
17064
+ try {
17065
+ return JSON.parse(value);
17066
+ } catch {
17067
+ return value;
17068
+ }
17069
+ }
17070
+ /**
17071
+ * Render a decision value to a markdown-friendly string.
17072
+ * - Arrays → bulleted list items
17073
+ * - Objects → key: value lines
17074
+ * - Primitives → plain string
17075
+ */
17076
+ function renderValue(rawValue) {
17077
+ const parsed = safeParseJson(rawValue);
17078
+ if (Array.isArray(parsed)) return parsed.map((item) => `- ${String(item)}`).join("\n");
17079
+ if (typeof parsed === "object" && parsed !== null) return Object.entries(parsed).map(([k, v]) => `- **${fieldLabel(k)}**: ${String(v)}`).join("\n");
17080
+ return String(parsed);
17081
+ }
17082
+ /**
17083
+ * Render analysis-phase decisions as a `product-brief.md` file.
17084
+ *
17085
+ * Merges `product-brief` category decisions with `technology-constraints`
17086
+ * category decisions (they are stored separately in the decision store).
17087
+ *
17088
+ * @param decisions - All decisions from the analysis phase (any category)
17089
+ * @returns Formatted markdown content for product-brief.md
17090
+ */
17091
+ function renderProductBrief(decisions) {
17092
+ const briefDecisions = decisions.filter((d) => d.category === "product-brief");
17093
+ const techConstraintDecisions = decisions.filter((d) => d.category === "technology-constraints");
17094
+ const briefMap = Object.fromEntries(briefDecisions.map((d) => [d.key, d.value]));
17095
+ if (techConstraintDecisions.length > 0 && briefMap["technology_constraints"] === void 0) {
17096
+ const tcBullets = techConstraintDecisions.flatMap((d) => {
17097
+ const parsed = safeParseJson(d.value);
17098
+ if (Array.isArray(parsed)) return parsed.map((item) => String(item));
17099
+ return [String(parsed)];
17100
+ });
17101
+ briefMap["technology_constraints"] = JSON.stringify(tcBullets);
17102
+ }
17103
+ if (briefDecisions.length === 0 && techConstraintDecisions.length === 0) return "";
17104
+ const parts = ["# Product Brief", ""];
17105
+ for (const field of PRODUCT_BRIEF_FIELDS) {
17106
+ const rawValue = briefMap[field];
17107
+ if (rawValue === void 0) continue;
17108
+ parts.push(`## ${fieldLabel(field)}`);
17109
+ parts.push("");
17110
+ parts.push(renderValue(rawValue));
17111
+ parts.push("");
17112
+ }
17113
+ return parts.join("\n");
17114
+ }
17115
+ /**
17116
+ * Render planning-phase decisions (and requirements table) as a `prd.md` file.
17117
+ *
17118
+ * Sections rendered (when data is present):
17119
+ * - Project Classification (classification decisions)
17120
+ * - Functional Requirements (functional-requirements decisions)
17121
+ * - Non-Functional Requirements (non-functional-requirements decisions)
17122
+ * - Domain Model (domain-model decisions)
17123
+ * - User Stories (user-stories decisions)
17124
+ * - Tech Stack (tech-stack decisions)
17125
+ * - Out of Scope (out-of-scope decisions)
17126
+ *
17127
+ * @param decisions - All decisions from the planning phase
17128
+ * @param requirements - Requirements records from the requirements table (optional)
17129
+ * @returns Formatted markdown content for prd.md
17130
+ */
17131
+ function renderPrd(decisions, requirements = []) {
17132
+ if (decisions.length === 0) return "";
17133
+ const parts = ["# Product Requirements Document", ""];
17134
+ const classificationDecisions = decisions.filter((d) => d.category === "classification");
17135
+ if (classificationDecisions.length > 0) {
17136
+ parts.push("## Project Classification");
17137
+ parts.push("");
17138
+ for (const d of classificationDecisions) {
17139
+ const parsed = safeParseJson(d.value);
17140
+ if (Array.isArray(parsed)) {
17141
+ parts.push(`**${fieldLabel(d.key)}**:`);
17142
+ for (const item of parsed) parts.push(`- ${String(item)}`);
17143
+ } else parts.push(`**${fieldLabel(d.key)}**: ${String(parsed)}`);
17144
+ }
17145
+ parts.push("");
17146
+ }
17147
+ const frDecisions = decisions.filter((d) => d.category === "functional-requirements");
17148
+ if (frDecisions.length > 0) {
17149
+ parts.push("## Functional Requirements");
17150
+ parts.push("");
17151
+ for (const d of frDecisions) {
17152
+ const parsed = safeParseJson(d.value);
17153
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
17154
+ const fr = parsed;
17155
+ const id = fr.id ?? d.key;
17156
+ const priority = fr.priority ? ` [${fr.priority.toUpperCase()}]` : "";
17157
+ parts.push(`- **${id}**${priority}: ${fr.description ?? d.value}`);
17158
+ if (fr.acceptance_criteria && fr.acceptance_criteria.length > 0) for (const ac of fr.acceptance_criteria) parts.push(` - ${ac}`);
17159
+ } else parts.push(`- **${d.key}**: ${renderValue(d.value)}`);
17160
+ }
17161
+ parts.push("");
17162
+ }
17163
+ const nfrDecisions = decisions.filter((d) => d.category === "non-functional-requirements");
17164
+ if (nfrDecisions.length > 0) {
17165
+ parts.push("## Non-Functional Requirements");
17166
+ parts.push("");
17167
+ for (const d of nfrDecisions) {
17168
+ const parsed = safeParseJson(d.value);
17169
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
17170
+ const nfr = parsed;
17171
+ const id = nfr.id ?? d.key;
17172
+ const cat = nfr.category ? ` [${nfr.category.toUpperCase()}]` : "";
17173
+ parts.push(`- **${id}**${cat}: ${nfr.description ?? d.value}`);
17174
+ } else parts.push(`- **${d.key}**: ${renderValue(d.value)}`);
17175
+ }
17176
+ parts.push("");
17177
+ }
17178
+ const domainDecisions = decisions.filter((d) => d.category === "domain-model");
17179
+ if (domainDecisions.length > 0) {
17180
+ parts.push("## Domain Model");
17181
+ parts.push("");
17182
+ for (const d of domainDecisions) parts.push(renderValue(d.value));
17183
+ parts.push("");
17184
+ }
17185
+ const userStoryDecisions = decisions.filter((d) => d.category === "user-stories");
17186
+ if (userStoryDecisions.length > 0) {
17187
+ parts.push("## User Stories");
17188
+ parts.push("");
17189
+ for (const d of userStoryDecisions) {
17190
+ const parsed = safeParseJson(d.value);
17191
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
17192
+ const us = parsed;
17193
+ if (us.title) {
17194
+ parts.push(`### ${us.title}`);
17195
+ parts.push("");
17196
+ if (us.description) {
17197
+ parts.push(us.description);
17198
+ parts.push("");
17199
+ }
17200
+ } else {
17201
+ parts.push(renderValue(d.value));
17202
+ parts.push("");
17203
+ }
17204
+ } else {
17205
+ parts.push(renderValue(d.value));
17206
+ parts.push("");
17207
+ }
17208
+ }
17209
+ }
17210
+ const techStackDecisions = decisions.filter((d) => d.category === "tech-stack");
17211
+ if (techStackDecisions.length > 0) {
17212
+ parts.push("## Tech Stack");
17213
+ parts.push("");
17214
+ for (const d of techStackDecisions) if (d.key === "tech_stack") {
17215
+ const parsed = safeParseJson(d.value);
17216
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) for (const [k, v] of Object.entries(parsed)) parts.push(`- **${fieldLabel(k)}**: ${String(v)}`);
17217
+ else parts.push(`- **${fieldLabel(d.key)}**: ${d.value}`);
17218
+ } else parts.push(`- **${fieldLabel(d.key)}**: ${d.value}`);
17219
+ parts.push("");
17220
+ }
17221
+ const outOfScopeDecisions = decisions.filter((d) => d.category === "out-of-scope");
17222
+ if (outOfScopeDecisions.length > 0) {
17223
+ parts.push("## Out of Scope");
17224
+ parts.push("");
17225
+ for (const d of outOfScopeDecisions) parts.push(renderValue(d.value));
17226
+ parts.push("");
17227
+ }
17228
+ const functionalReqs = requirements.filter((r) => r.type === "functional");
17229
+ const nonFunctionalReqs = requirements.filter((r) => r.type === "non_functional");
17230
+ if ((functionalReqs.length > 0 || nonFunctionalReqs.length > 0) && frDecisions.length === 0 && nfrDecisions.length === 0) {
17231
+ parts.push("## Requirements (from Requirements Table)");
17232
+ parts.push("");
17233
+ if (functionalReqs.length > 0) {
17234
+ parts.push("### Functional Requirements");
17235
+ parts.push("");
17236
+ for (const r of functionalReqs) {
17237
+ const priority = r.priority ? ` [${r.priority.toUpperCase()}]` : "";
17238
+ parts.push(`- ${r.source ?? ""}${priority}: ${r.description}`);
17239
+ }
17240
+ parts.push("");
17241
+ }
17242
+ if (nonFunctionalReqs.length > 0) {
17243
+ parts.push("### Non-Functional Requirements");
17244
+ parts.push("");
17245
+ for (const r of nonFunctionalReqs) {
17246
+ const priority = r.priority ? ` [${r.priority.toUpperCase()}]` : "";
17247
+ parts.push(`- ${priority}: ${r.description}`);
17248
+ }
17249
+ parts.push("");
17250
+ }
17251
+ }
17252
+ return parts.join("\n");
17253
+ }
17254
+ /**
17255
+ * Render solutioning-phase architecture decisions as an `architecture.md` file.
17256
+ *
17257
+ * Groups all architecture decisions into a single `## Architecture Decisions`
17258
+ * section, formatting each as `**key**: value` with italicised rationale where
17259
+ * present. The heading pattern matches the regex used by `seedMethodologyContext()`
17260
+ * so that the exported file can be round-tripped back into the decision store.
17261
+ *
17262
+ * @param decisions - All decisions from the solutioning phase (any category)
17263
+ * @returns Formatted markdown content for architecture.md, or '' if no data
17264
+ */
17265
+ function renderArchitecture(decisions) {
17266
+ const archDecisions = decisions.filter((d) => d.category === "architecture");
17267
+ if (archDecisions.length === 0) return "";
17268
+ const parts = ["# Architecture", ""];
17269
+ parts.push("## Architecture Decisions");
17270
+ parts.push("");
17271
+ for (const d of archDecisions) {
17272
+ const value = safeParseJson(d.value);
17273
+ let displayValue;
17274
+ if (typeof value === "object" && value !== null && !Array.isArray(value)) {
17275
+ displayValue = Object.entries(value).map(([k, v]) => ` - *${fieldLabel(k)}*: ${String(v)}`).join("\n");
17276
+ parts.push(`**${d.key}**:`);
17277
+ parts.push(displayValue);
17278
+ } else if (Array.isArray(value)) {
17279
+ displayValue = value.map((item) => ` - ${String(item)}`).join("\n");
17280
+ parts.push(`**${d.key}**:`);
17281
+ parts.push(displayValue);
17282
+ } else {
17283
+ displayValue = String(value);
17284
+ if (d.rationale) parts.push(`**${d.key}**: ${displayValue} *(${d.rationale})*`);
17285
+ else parts.push(`**${d.key}**: ${displayValue}`);
17286
+ }
17287
+ }
17288
+ parts.push("");
17289
+ return parts.join("\n");
17290
+ }
17291
+ /**
17292
+ * Render solutioning-phase epics and stories decisions as an `epics.md` file.
17293
+ *
17294
+ * Output format:
17295
+ * ```
17296
+ * ## Epic 1: Title
17297
+ * Description
17298
+ *
17299
+ * ### Story 1-1: Title
17300
+ * **Priority**: must
17301
+ * **Description**: ...
17302
+ * **Acceptance Criteria**:
17303
+ * - AC1
17304
+ * - AC2
17305
+ * ```
17306
+ *
17307
+ * The `## Epic N:` heading pattern is parsed by `parseEpicShards()` in
17308
+ * `seed-methodology-context.ts`, satisfying the round-trip contract (AC5).
17309
+ *
17310
+ * Stories are associated with their parent epic by the numeric prefix of the
17311
+ * story key (e.g., story key `2-3` → epic 2).
17312
+ *
17313
+ * @param decisions - All decisions from the solutioning phase (any category)
17314
+ * @returns Formatted markdown content for epics.md, or '' if no data
17315
+ */
17316
+ function renderEpics(decisions) {
17317
+ const epicDecisions = decisions.filter((d) => d.category === "epics");
17318
+ const storyDecisions = decisions.filter((d) => d.category === "stories");
17319
+ if (epicDecisions.length === 0 && storyDecisions.length === 0) return "";
17320
+ const epicMap = new Map();
17321
+ for (const d of epicDecisions) {
17322
+ const match$1 = /^epic-(\d+)$/i.exec(d.key);
17323
+ if (match$1 === null) continue;
17324
+ const epicNum = parseInt(match$1[1], 10);
17325
+ const parsed = safeParseJson(d.value);
17326
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
17327
+ const p = parsed;
17328
+ epicMap.set(epicNum, {
17329
+ num: epicNum,
17330
+ title: p.title ?? `Epic ${epicNum}`,
17331
+ description: p.description ?? ""
17332
+ });
17333
+ } else epicMap.set(epicNum, {
17334
+ num: epicNum,
17335
+ title: String(parsed),
17336
+ description: ""
17337
+ });
17338
+ }
17339
+ const storyMap = new Map();
17340
+ for (const d of storyDecisions) {
17341
+ const parsed = safeParseJson(d.value);
17342
+ let story;
17343
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
17344
+ const p = parsed;
17345
+ const storyKey = p.key ?? d.key;
17346
+ const keyMatch = /^(\d+)-(\d+)/.exec(storyKey);
17347
+ if (keyMatch === null) continue;
17348
+ const epicNum = parseInt(keyMatch[1], 10);
17349
+ const storyNum = parseInt(keyMatch[2], 10);
17350
+ story = {
17351
+ key: storyKey,
17352
+ epicNum,
17353
+ storyNum,
17354
+ title: p.title ?? `Story ${storyKey}`,
17355
+ description: p.description ?? "",
17356
+ ac: p.acceptance_criteria ?? p.ac ?? [],
17357
+ priority: p.priority ?? "must"
17358
+ };
17359
+ } else {
17360
+ const storyKey = d.key;
17361
+ const keyMatch = /^(\d+)-(\d+)/.exec(storyKey);
17362
+ if (keyMatch === null) continue;
17363
+ const epicNum = parseInt(keyMatch[1], 10);
17364
+ const storyNum = parseInt(keyMatch[2], 10);
17365
+ story = {
17366
+ key: storyKey,
17367
+ epicNum,
17368
+ storyNum,
17369
+ title: `Story ${storyKey}`,
17370
+ description: String(parsed),
17371
+ ac: [],
17372
+ priority: "must"
17373
+ };
17374
+ }
17375
+ if (!storyMap.has(story.epicNum)) storyMap.set(story.epicNum, []);
17376
+ storyMap.get(story.epicNum).push(story);
17377
+ }
17378
+ for (const stories of storyMap.values()) stories.sort((a, b) => a.storyNum - b.storyNum);
17379
+ const allEpicNums = new Set([...epicMap.keys(), ...storyMap.keys()]);
17380
+ const sortedEpicNums = [...allEpicNums].sort((a, b) => a - b);
17381
+ const parts = ["# Epics and Stories", ""];
17382
+ for (const epicNum of sortedEpicNums) {
17383
+ const epic = epicMap.get(epicNum);
17384
+ const epicTitle = epic?.title ?? `Epic ${epicNum}`;
17385
+ const epicDescription = epic?.description ?? "";
17386
+ parts.push(`## Epic ${epicNum}: ${epicTitle}`);
17387
+ parts.push("");
17388
+ if (epicDescription) {
17389
+ parts.push(epicDescription);
17390
+ parts.push("");
17391
+ }
17392
+ const stories = storyMap.get(epicNum) ?? [];
17393
+ for (const story of stories) {
17394
+ parts.push(`### Story ${story.key}: ${story.title}`);
17395
+ parts.push("");
17396
+ parts.push(`**Priority**: ${story.priority}`);
17397
+ if (story.description) parts.push(`**Description**: ${story.description}`);
17398
+ if (story.ac.length > 0) {
17399
+ parts.push("**Acceptance Criteria**:");
17400
+ for (const ac of story.ac) parts.push(`- ${ac}`);
17401
+ }
17402
+ parts.push("");
17403
+ }
17404
+ }
17405
+ return parts.join("\n");
17406
+ }
17407
+ /**
17408
+ * Render `operational-finding` category decisions as an "Operational Findings" section.
17409
+ *
17410
+ * Groups findings by run key (for run-summary decisions) and stall key (for stall decisions).
17411
+ * Returns '' if no matching decisions are found.
17412
+ *
17413
+ * @param decisions - Decisions of any category; filters for 'operational-finding'
17414
+ * @returns Formatted markdown content, or '' if empty
17415
+ */
17416
+ function renderOperationalFindings(decisions) {
17417
+ const findings = decisions.filter((d) => d.category === "operational-finding");
17418
+ if (findings.length === 0) return "";
17419
+ const parts = ["## Operational Findings", ""];
17420
+ const runSummaries = findings.filter((d) => d.key.startsWith("run-summary:"));
17421
+ const stallFindings = findings.filter((d) => d.key.startsWith("stall:"));
17422
+ const otherFindings = findings.filter((d) => !d.key.startsWith("run-summary:") && !d.key.startsWith("stall:"));
17423
+ if (runSummaries.length > 0) {
17424
+ parts.push("### Run Summaries");
17425
+ parts.push("");
17426
+ for (const d of runSummaries) {
17427
+ const runId = d.key.replace("run-summary:", "");
17428
+ const parsed = safeParseJson(d.value);
17429
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
17430
+ const s$1 = parsed;
17431
+ parts.push(`**Run: ${runId}**`);
17432
+ parts.push(`- Succeeded: ${(s$1.succeeded ?? []).join(", ") || "none"}`);
17433
+ parts.push(`- Failed: ${(s$1.failed ?? []).join(", ") || "none"}`);
17434
+ parts.push(`- Escalated: ${(s$1.escalated ?? []).join(", ") || "none"}`);
17435
+ parts.push(`- Total restarts: ${s$1.total_restarts ?? 0}`);
17436
+ parts.push(`- Elapsed: ${s$1.elapsed_seconds ?? 0}s`);
17437
+ parts.push(`- Tokens: ${s$1.total_input_tokens ?? 0} in / ${s$1.total_output_tokens ?? 0} out`);
17438
+ } else parts.push(`**Run: ${runId}**: ${String(parsed)}`);
17439
+ parts.push("");
17440
+ }
17441
+ }
17442
+ if (stallFindings.length > 0) {
17443
+ parts.push("### Stall Events");
17444
+ parts.push("");
17445
+ for (const d of stallFindings) {
17446
+ const parsed = safeParseJson(d.value);
17447
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
17448
+ const s$1 = parsed;
17449
+ const outcome = s$1.outcome ?? "unknown";
17450
+ parts.push(`- **${d.key}**: phase=${s$1.phase ?? "?"} staleness=${s$1.staleness_secs ?? 0}s attempt=${s$1.attempt ?? 0} outcome=${outcome}`);
17451
+ } else parts.push(`- **${d.key}**: ${String(parsed)}`);
17452
+ }
17453
+ parts.push("");
17454
+ }
17455
+ if (otherFindings.length > 0) {
17456
+ for (const d of otherFindings) parts.push(`- **${d.key}**: ${renderValue(d.value)}`);
17457
+ parts.push("");
17458
+ }
17459
+ return parts.join("\n");
17460
+ }
17461
+ /**
17462
+ * Render `experiment-result` category decisions as an "Experiments" section.
17463
+ *
17464
+ * Lists each experiment with its verdict, metric delta, and branch name.
17465
+ * Returns '' if no matching decisions are found.
17466
+ *
17467
+ * @param decisions - Decisions of any category; filters for 'experiment-result'
17468
+ * @returns Formatted markdown content, or '' if empty
17469
+ */
17470
+ function renderExperiments(decisions) {
17471
+ const experiments = decisions.filter((d) => d.category === "experiment-result");
17472
+ if (experiments.length === 0) return "";
17473
+ const parts = ["## Experiments", ""];
17474
+ const improved = experiments.filter((d) => {
17475
+ const p = safeParseJson(d.value);
17476
+ return typeof p === "object" && p !== null && p["verdict"] === "IMPROVED";
17477
+ });
17478
+ const mixed = experiments.filter((d) => {
17479
+ const p = safeParseJson(d.value);
17480
+ return typeof p === "object" && p !== null && p["verdict"] === "MIXED";
17481
+ });
17482
+ const regressed = experiments.filter((d) => {
17483
+ const p = safeParseJson(d.value);
17484
+ return typeof p === "object" && p !== null && p["verdict"] === "REGRESSED";
17485
+ });
17486
+ parts.push(`**Total**: ${experiments.length} | **Improved**: ${improved.length} | **Mixed**: ${mixed.length} | **Regressed**: ${regressed.length}`);
17487
+ parts.push("");
17488
+ for (const d of experiments) {
17489
+ const parsed = safeParseJson(d.value);
17490
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
17491
+ const e = parsed;
17492
+ const verdict = e.verdict ?? "UNKNOWN";
17493
+ const metric = e.target_metric ?? "unknown";
17494
+ const branch = e.branch_name ? ` → \`${e.branch_name}\`` : "";
17495
+ parts.push(`- **[${verdict}]** ${metric}: before=${e.before ?? "?"} after=${e.after ?? "?"}${branch}`);
17496
+ } else parts.push(`- ${String(parsed)}`);
17497
+ }
17498
+ parts.push("");
17499
+ return parts.join("\n");
17500
+ }
17501
+ /**
17502
+ * Render solutioning-phase readiness-findings decisions as a `readiness-report.md`.
17503
+ *
17504
+ * Groups findings by category, shows severity per finding, and emits an
17505
+ * overall pass/fail verdict based on whether any blockers were found.
17506
+ *
17507
+ * @param decisions - All decisions from the solutioning phase (any category)
17508
+ * @returns Formatted markdown content for readiness-report.md, or '' if no data
17509
+ */
17510
+ function renderReadinessReport(decisions) {
17511
+ const findingDecisions = decisions.filter((d) => d.category === "readiness-findings");
17512
+ if (findingDecisions.length === 0) return "";
17513
+ const findings = [];
17514
+ for (const d of findingDecisions) {
17515
+ const parsed = safeParseJson(d.value);
17516
+ if (typeof parsed === "object" && parsed !== null && !Array.isArray(parsed)) {
17517
+ const p = parsed;
17518
+ findings.push({
17519
+ category: p.category ?? "general",
17520
+ severity: p.severity ?? "minor",
17521
+ description: p.description ?? String(parsed),
17522
+ affected_items: p.affected_items ?? []
17523
+ });
17524
+ } else findings.push({
17525
+ category: "general",
17526
+ severity: "minor",
17527
+ description: String(parsed),
17528
+ affected_items: []
17529
+ });
17530
+ }
17531
+ const hasCritical = findings.some((f$1) => f$1.severity === "blocker" || f$1.severity === "major");
17532
+ const verdict = hasCritical ? "FAIL" : "PASS";
17533
+ const parts = ["# Readiness Report", ""];
17534
+ parts.push(`**Overall Verdict**: ${verdict}`);
17535
+ parts.push("");
17536
+ parts.push(`**Total Findings**: ${findings.length}`);
17537
+ parts.push(`**Blockers**: ${findings.filter((f$1) => f$1.severity === "blocker").length}`);
17538
+ parts.push(`**Major**: ${findings.filter((f$1) => f$1.severity === "major").length}`);
17539
+ parts.push(`**Minor**: ${findings.filter((f$1) => f$1.severity === "minor").length}`);
17540
+ parts.push("");
17541
+ const byCategory = new Map();
17542
+ for (const finding of findings) {
17543
+ if (!byCategory.has(finding.category)) byCategory.set(finding.category, []);
17544
+ byCategory.get(finding.category).push(finding);
17545
+ }
17546
+ const categoryOrder = [
17547
+ "fr_coverage",
17548
+ "architecture_compliance",
17549
+ "story_quality",
17550
+ "ux_alignment",
17551
+ "dependency_validity",
17552
+ "general"
17553
+ ];
17554
+ const sortedCategories = [...byCategory.keys()].sort((a, b) => {
17555
+ const ai = categoryOrder.indexOf(a);
17556
+ const bi = categoryOrder.indexOf(b);
17557
+ return (ai === -1 ? 999 : ai) - (bi === -1 ? 999 : bi);
17558
+ });
17559
+ for (const category of sortedCategories) {
17560
+ const categoryFindings = byCategory.get(category);
17561
+ const categoryLabel = fieldLabel(category);
17562
+ parts.push(`## ${categoryLabel}`);
17563
+ parts.push("");
17564
+ for (const finding of categoryFindings) {
17565
+ const severityTag = `[${finding.severity.toUpperCase()}]`;
17566
+ parts.push(`- ${severityTag} ${finding.description}`);
17567
+ if (finding.affected_items.length > 0) parts.push(` - *Affected*: ${finding.affected_items.join(", ")}`);
17568
+ }
17569
+ parts.push("");
17570
+ }
17571
+ return parts.join("\n");
17572
+ }
17573
+
17574
+ //#endregion
17575
+ //#region src/cli/commands/export.ts
17576
+ const logger$1 = createLogger("export-cmd");
17577
+ /**
17578
+ * Execute the export action.
17579
+ * Returns an exit code (0 = success, 1 = error).
17580
+ */
17581
+ async function runExportAction(options) {
17582
+ const { runId, outputDir, projectRoot, outputFormat } = options;
17583
+ let adapter;
17584
+ try {
17585
+ const dbRoot = await resolveMainRepoRoot(projectRoot);
17586
+ const dbPath = join$1(dbRoot, ".substrate", "substrate.db");
17587
+ const doltDir = join$1(dbRoot, ".substrate", "state", ".dolt");
17588
+ if (!existsSync(dbPath) && !existsSync(doltDir)) {
17589
+ const errorMsg = `Decision store not initialized. Run 'substrate init' first.`;
17590
+ if (outputFormat === "json") process.stdout.write(JSON.stringify({ error: errorMsg }) + "\n");
17591
+ else process.stderr.write(`Error: ${errorMsg}\n`);
17592
+ return 1;
17593
+ }
17594
+ adapter = createDatabaseAdapter({
17595
+ backend: "auto",
17596
+ basePath: dbRoot
17597
+ });
17598
+ await initSchema(adapter);
17599
+ let run;
17600
+ if (runId !== void 0 && runId !== "") run = await getPipelineRunById(adapter, runId);
17601
+ else run = await getLatestRun(adapter);
17602
+ if (run === void 0) {
17603
+ const errorMsg = runId !== void 0 ? `Pipeline run '${runId}' not found.` : "No pipeline runs found. Run `substrate run` first.";
17604
+ if (outputFormat === "json") process.stdout.write(JSON.stringify({ error: errorMsg }) + "\n");
17605
+ else process.stderr.write(`Error: ${errorMsg}\n`);
17606
+ return 1;
17607
+ }
17608
+ const activeRunId = run.id;
17609
+ const resolvedOutputDir = isAbsolute(outputDir) ? outputDir : join$1(projectRoot, outputDir);
17610
+ if (!existsSync(resolvedOutputDir)) mkdirSync(resolvedOutputDir, { recursive: true });
17611
+ const filesWritten = [];
17612
+ const phasesExported = [];
17613
+ const analysisDecisions = await getDecisionsByPhaseForRun(adapter, activeRunId, "analysis");
17614
+ if (analysisDecisions.length > 0) {
17615
+ const content = renderProductBrief(analysisDecisions);
17616
+ if (content !== "") {
17617
+ const filePath = join$1(resolvedOutputDir, "product-brief.md");
17618
+ writeFileSync(filePath, content, "utf-8");
17619
+ filesWritten.push(filePath);
17620
+ phasesExported.push("analysis");
17621
+ if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
17622
+ }
17623
+ }
17624
+ const planningDecisions = await getDecisionsByPhaseForRun(adapter, activeRunId, "planning");
17625
+ if (planningDecisions.length > 0) {
17626
+ const requirements = (await listRequirements(adapter)).filter((r) => r.pipeline_run_id === activeRunId);
17627
+ const content = renderPrd(planningDecisions, requirements);
17628
+ if (content !== "") {
17629
+ const filePath = join$1(resolvedOutputDir, "prd.md");
17630
+ writeFileSync(filePath, content, "utf-8");
17631
+ filesWritten.push(filePath);
17632
+ if (!phasesExported.includes("planning")) phasesExported.push("planning");
17633
+ if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
17634
+ }
17635
+ }
17636
+ const solutioningDecisions = await getDecisionsByPhaseForRun(adapter, activeRunId, "solutioning");
17637
+ if (solutioningDecisions.length > 0) {
17638
+ const archContent = renderArchitecture(solutioningDecisions);
17639
+ if (archContent !== "") {
17640
+ const filePath = join$1(resolvedOutputDir, "architecture.md");
17641
+ writeFileSync(filePath, archContent, "utf-8");
17642
+ filesWritten.push(filePath);
17643
+ if (!phasesExported.includes("solutioning")) phasesExported.push("solutioning");
17644
+ if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
17645
+ }
17646
+ const epicsContent = renderEpics(solutioningDecisions);
17647
+ if (epicsContent !== "") {
17648
+ const filePath = join$1(resolvedOutputDir, "epics.md");
17649
+ writeFileSync(filePath, epicsContent, "utf-8");
17650
+ filesWritten.push(filePath);
17651
+ if (!phasesExported.includes("solutioning")) phasesExported.push("solutioning");
17652
+ if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
17653
+ }
17654
+ const readinessContent = renderReadinessReport(solutioningDecisions);
17655
+ if (readinessContent !== "") {
17656
+ const filePath = join$1(resolvedOutputDir, "readiness-report.md");
17657
+ writeFileSync(filePath, readinessContent, "utf-8");
17658
+ filesWritten.push(filePath);
17659
+ if (!phasesExported.includes("solutioning")) phasesExported.push("solutioning");
17660
+ if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
17661
+ }
17662
+ }
17663
+ const operationalDecisions = await getDecisionsByCategory(adapter, OPERATIONAL_FINDING);
17664
+ if (operationalDecisions.length > 0) {
17665
+ const operationalContent = renderOperationalFindings(operationalDecisions);
17666
+ if (operationalContent !== "") {
17667
+ const filePath = join$1(resolvedOutputDir, "operational-findings.md");
17668
+ writeFileSync(filePath, operationalContent, "utf-8");
17669
+ filesWritten.push(filePath);
17670
+ if (!phasesExported.includes("operational")) phasesExported.push("operational");
17671
+ if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
17672
+ }
17673
+ }
17674
+ const experimentDecisions = await getDecisionsByCategory(adapter, EXPERIMENT_RESULT);
17675
+ if (experimentDecisions.length > 0) {
17676
+ const experimentsContent = renderExperiments(experimentDecisions);
17677
+ if (experimentsContent !== "") {
17678
+ const filePath = join$1(resolvedOutputDir, "experiments.md");
17679
+ writeFileSync(filePath, experimentsContent, "utf-8");
17680
+ filesWritten.push(filePath);
17681
+ if (!phasesExported.includes("operational")) phasesExported.push("operational");
17682
+ if (outputFormat === "human") process.stdout.write(` Written: ${filePath}\n`);
17683
+ }
17684
+ }
17685
+ if (outputFormat === "json") {
17686
+ const result = {
17687
+ files_written: filesWritten,
17688
+ run_id: activeRunId,
17689
+ phases_exported: phasesExported
17690
+ };
17691
+ process.stdout.write(JSON.stringify(result) + "\n");
17692
+ } else {
17693
+ if (filesWritten.length === 0) process.stdout.write(`No data found for run ${activeRunId}. The pipeline may not have completed any phases.\n`);
17694
+ else process.stdout.write(`\nExported ${filesWritten.length} file(s) from run ${activeRunId}.\n`);
17695
+ const skippedPhases = [];
17696
+ if (!phasesExported.includes("analysis")) skippedPhases.push("analysis");
17697
+ if (!phasesExported.includes("planning")) skippedPhases.push("planning");
17698
+ if (!phasesExported.includes("solutioning")) skippedPhases.push("solutioning");
17699
+ if (skippedPhases.length > 0) process.stdout.write(`Phases with no data (skipped): ${skippedPhases.join(", ")}\n`);
17700
+ }
17701
+ return 0;
17702
+ } catch (err) {
17703
+ const msg = err instanceof Error ? err.message : String(err);
17704
+ if (outputFormat === "json") process.stdout.write(JSON.stringify({ error: msg }) + "\n");
17705
+ else process.stderr.write(`Error: ${msg}\n`);
17706
+ logger$1.error({ err }, "export action failed");
17707
+ return 1;
17708
+ } finally {
17709
+ if (adapter !== void 0) try {
17710
+ await adapter.close();
17711
+ } catch {}
17712
+ }
17713
+ }
17714
+ function registerExportCommand(program, _version = "0.0.0", projectRoot = process.cwd()) {
17715
+ program.command("export").description("Export decision store contents as human-readable markdown files").option("--run-id <id>", "Pipeline run ID to export (defaults to latest run)").option("--output-dir <path>", "Directory to write exported files to", "_bmad-output/planning-artifacts/").option("--project-root <path>", "Project root directory", projectRoot).option("--output-format <format>", "Output format: human (default) or json", "human").action(async (opts) => {
17716
+ if (opts.outputFormat !== "json" && opts.outputFormat !== "human") process.stderr.write(`Warning: unknown --output-format '${opts.outputFormat}', defaulting to 'human'\n`);
17717
+ const outputFormat = opts.outputFormat === "json" ? "json" : "human";
17718
+ const exitCode = await runExportAction({
17719
+ runId: opts.runId,
17720
+ outputDir: opts.outputDir,
17721
+ projectRoot: opts.projectRoot,
17722
+ outputFormat
17723
+ });
17724
+ process.exitCode = exitCode;
17725
+ });
17726
+ }
17727
+
17015
17728
  //#endregion
17016
17729
  //#region packages/sdlc/dist/handlers/sdlc-create-story-handler.js
17017
17730
  /**
@@ -29549,16 +30262,16 @@ var require_ajv = __commonJS({ "node_modules/ajv/lib/ajv.js"(exports, module) {
29549
30262
  return metaOpts;
29550
30263
  }
29551
30264
  function setLogger(self) {
29552
- var logger$21 = self._opts.logger;
29553
- if (logger$21 === false) self.logger = {
30265
+ var logger$22 = self._opts.logger;
30266
+ if (logger$22 === false) self.logger = {
29554
30267
  log: noop,
29555
30268
  warn: noop,
29556
30269
  error: noop
29557
30270
  };
29558
30271
  else {
29559
- if (logger$21 === void 0) logger$21 = console;
29560
- if (!(typeof logger$21 == "object" && logger$21.log && logger$21.warn && logger$21.error)) throw new Error("logger must implement log, warn and error methods");
29561
- self.logger = logger$21;
30272
+ if (logger$22 === void 0) logger$22 = console;
30273
+ if (!(typeof logger$22 == "object" && logger$22.log && logger$22.warn && logger$22.error)) throw new Error("logger must implement log, warn and error methods");
30274
+ self.logger = logger$22;
29562
30275
  }
29563
30276
  }
29564
30277
  function noop() {}
@@ -40187,7 +40900,8 @@ async function runRunAction(options) {
40187
40900
  ts: new Date().toISOString(),
40188
40901
  run_id: pipelineRun.id,
40189
40902
  stories: storyKeys,
40190
- concurrency
40903
+ concurrency,
40904
+ engine: resolvedEngine
40191
40905
  });
40192
40906
  wireNdjsonEmitter(eventBus, ndjsonEmitter);
40193
40907
  }
@@ -40487,6 +41201,13 @@ async function runFullPipeline(options) {
40487
41201
  db: adapter,
40488
41202
  pack
40489
41203
  });
41204
+ try {
41205
+ const staleRuns = await getRunningPipelineRuns(adapter) ?? [];
41206
+ if (staleRuns.length > 0) {
41207
+ for (const stale of staleRuns) await updatePipelineRun(adapter, stale.id, { status: "failed" });
41208
+ logger.info({ count: staleRuns.length }, "Swept stale pipeline run(s) from crashed orchestrator");
41209
+ }
41210
+ } catch {}
40490
41211
  const startedAt = Date.now();
40491
41212
  const runId = await phaseOrchestrator.startRun(concept ?? "", startPhase);
40492
41213
  const runIdFilePath = join(dbDir, "current-run-id");
@@ -40771,6 +41492,19 @@ async function runFullPipeline(options) {
40771
41492
  ts: new Date().toISOString(),
40772
41493
  phase: currentPhase
40773
41494
  });
41495
+ if ([
41496
+ "analysis",
41497
+ "planning",
41498
+ "solutioning"
41499
+ ].includes(currentPhase)) try {
41500
+ const exportDir = join(projectRoot, "_bmad-output", "planning-artifacts");
41501
+ await runExportAction({
41502
+ runId,
41503
+ outputDir: exportDir,
41504
+ projectRoot,
41505
+ outputFormat: "json"
41506
+ });
41507
+ } catch {}
40774
41508
  if (stopAfter !== void 0 && currentPhase === stopAfter) {
40775
41509
  const gate = createStopAfterGate(stopAfter);
40776
41510
  if (gate.shouldHalt()) {
@@ -40885,5 +41619,5 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
40885
41619
  }
40886
41620
 
40887
41621
  //#endregion
40888
- export { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, normalizeGraphSummaryToStatus, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
40889
- //# sourceMappingURL=run-9CV1Feo5.js.map
41622
+ export { AdapterTelemetryPersistence, AppError, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, EpicIngester, GitClient, GrammarLoader, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SymbolParser, createContextCompiler, createDispatcher, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, createTelemetryAdvisor, formatPhaseCompletionSummary, getFactoryRunSummaries, getScenarioResultsForRun, getTwinRunsForRun, listGraphRuns, normalizeGraphSummaryToStatus, registerExportCommand, registerFactoryCommand, registerRunCommand, registerScenariosCommand, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
41623
+ //# sourceMappingURL=run-Dg_BEJB6.js.map