substrate-ai 0.5.3 → 0.5.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -704,7 +704,7 @@ function createDoltClient(options) {
704
704
 
705
705
  //#endregion
706
706
  //#region src/persistence/adapter.ts
707
- const logger$25 = createLogger("persistence:adapter");
707
+ const logger$26 = createLogger("persistence:adapter");
708
708
  /** Type guard: check if a DatabaseAdapter also implements SyncAdapter. */
709
709
  function isSyncAdapter(adapter) {
710
710
  return typeof adapter.querySync === "function";
@@ -730,20 +730,20 @@ function createDatabaseAdapter(config = { backend: "auto" }) {
730
730
  const basePath = config.basePath ?? process.cwd();
731
731
  const doltRepoPath = join$1(basePath, ".substrate", "state");
732
732
  if (backend === "dolt") {
733
- logger$25.debug("Using DoltDatabaseAdapter (explicit config)");
733
+ logger$26.debug("Using DoltDatabaseAdapter (explicit config)");
734
734
  const client = new DoltClient({ repoPath: doltRepoPath });
735
735
  return new DoltDatabaseAdapter(client);
736
736
  }
737
737
  if (backend === "memory") {
738
- logger$25.debug("Using InMemoryDatabaseAdapter (explicit config)");
738
+ logger$26.debug("Using InMemoryDatabaseAdapter (explicit config)");
739
739
  return new InMemoryDatabaseAdapter();
740
740
  }
741
741
  if (isDoltAvailable(basePath)) {
742
- logger$25.debug("Dolt detected, using DoltDatabaseAdapter");
742
+ logger$26.debug("Dolt detected, using DoltDatabaseAdapter");
743
743
  const client = new DoltClient({ repoPath: doltRepoPath });
744
744
  return new DoltDatabaseAdapter(client);
745
745
  }
746
- logger$25.debug("Dolt not available, using InMemoryDatabaseAdapter");
746
+ logger$26.debug("Dolt not available, using InMemoryDatabaseAdapter");
747
747
  return new InMemoryDatabaseAdapter();
748
748
  }
749
749
 
@@ -1101,10 +1101,20 @@ async function initSchema(adapter) {
1101
1101
  tool_name VARCHAR(128),
1102
1102
  is_context_spike BOOLEAN NOT NULL DEFAULT 0,
1103
1103
  child_spans_json TEXT NOT NULL DEFAULT '[]',
1104
+ task_type VARCHAR(64),
1105
+ phase VARCHAR(64),
1106
+ dispatch_id VARCHAR(64),
1104
1107
  PRIMARY KEY (story_key, span_id)
1105
1108
  )
1106
1109
  `);
1107
1110
  await adapter.exec("CREATE INDEX IF NOT EXISTS idx_turn_analysis_story ON turn_analysis (story_key, turn_number)");
1111
+ for (const col of [
1112
+ "task_type",
1113
+ "phase",
1114
+ "dispatch_id"
1115
+ ]) try {
1116
+ await adapter.exec(`ALTER TABLE turn_analysis ADD COLUMN ${col} VARCHAR(64)`);
1117
+ } catch {}
1108
1118
  await adapter.exec(`
1109
1119
  CREATE TABLE IF NOT EXISTS efficiency_scores (
1110
1120
  story_key VARCHAR(64) NOT NULL,
@@ -1119,9 +1129,19 @@ async function initSchema(adapter) {
1119
1129
  total_turns INTEGER NOT NULL DEFAULT 0,
1120
1130
  per_model_json TEXT NOT NULL DEFAULT '[]',
1121
1131
  per_source_json TEXT NOT NULL DEFAULT '[]',
1132
+ dispatch_id TEXT,
1133
+ task_type TEXT,
1134
+ phase TEXT,
1122
1135
  PRIMARY KEY (story_key, timestamp)
1123
1136
  )
1124
1137
  `);
1138
+ for (const col of [
1139
+ "dispatch_id",
1140
+ "task_type",
1141
+ "phase"
1142
+ ]) try {
1143
+ await adapter.exec(`ALTER TABLE efficiency_scores ADD COLUMN ${col} TEXT`);
1144
+ } catch {}
1125
1145
  await adapter.exec(`
1126
1146
  CREATE TABLE IF NOT EXISTS recommendations (
1127
1147
  id VARCHAR(16) NOT NULL,
@@ -2100,7 +2120,7 @@ function formatUnsupportedVersionError(formatType, version, supported) {
2100
2120
 
2101
2121
  //#endregion
2102
2122
  //#region src/modules/config/config-system-impl.ts
2103
- const logger$24 = createLogger("config");
2123
+ const logger$25 = createLogger("config");
2104
2124
  function deepMerge(base, override) {
2105
2125
  const result = { ...base };
2106
2126
  for (const [key, val] of Object.entries(override)) if (val !== null && val !== void 0 && typeof val === "object" && !Array.isArray(val) && typeof result[key] === "object" && result[key] !== null && !Array.isArray(result[key])) result[key] = deepMerge(result[key], val);
@@ -2145,7 +2165,7 @@ function readEnvOverrides() {
2145
2165
  }
2146
2166
  const parsed = PartialSubstrateConfigSchema.safeParse(overrides);
2147
2167
  if (!parsed.success) {
2148
- logger$24.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
2168
+ logger$25.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
2149
2169
  return {};
2150
2170
  }
2151
2171
  return parsed.data;
@@ -2209,7 +2229,7 @@ var ConfigSystemImpl = class {
2209
2229
  throw new ConfigError(`Configuration validation failed:\n${issues}`, { issues: result.error.issues });
2210
2230
  }
2211
2231
  this._config = result.data;
2212
- logger$24.debug("Configuration loaded successfully");
2232
+ logger$25.debug("Configuration loaded successfully");
2213
2233
  }
2214
2234
  getConfig() {
2215
2235
  if (this._config === null) throw new ConfigError("Configuration has not been loaded. Call load() before getConfig().", {});
@@ -2272,7 +2292,7 @@ var ConfigSystemImpl = class {
2272
2292
  if (version !== void 0 && typeof version === "string" && !isVersionSupported(version, SUPPORTED_CONFIG_FORMAT_VERSIONS)) if (defaultConfigMigrator.canMigrate(version, CURRENT_CONFIG_FORMAT_VERSION)) {
2273
2293
  const migrationOutput = defaultConfigMigrator.migrate(rawObj, version, CURRENT_CONFIG_FORMAT_VERSION, filePath);
2274
2294
  if (migrationOutput.result.success) {
2275
- logger$24.info({
2295
+ logger$25.info({
2276
2296
  from: version,
2277
2297
  to: CURRENT_CONFIG_FORMAT_VERSION,
2278
2298
  backup: migrationOutput.result.backupPath
@@ -3753,7 +3773,7 @@ function truncateToTokens(text, maxTokens) {
3753
3773
 
3754
3774
  //#endregion
3755
3775
  //#region src/modules/context-compiler/context-compiler-impl.ts
3756
- const logger$23 = createLogger("context-compiler");
3776
+ const logger$24 = createLogger("context-compiler");
3757
3777
  /**
3758
3778
  * Fraction of the original token budget that must remain (after required +
3759
3779
  * important sections) before an optional section is included.
@@ -3844,7 +3864,7 @@ var ContextCompilerImpl = class {
3844
3864
  includedParts.push(truncated);
3845
3865
  remainingBudget -= truncatedTokens;
3846
3866
  anyTruncated = true;
3847
- logger$23.warn({
3867
+ logger$24.warn({
3848
3868
  section: section.name,
3849
3869
  originalTokens: tokens,
3850
3870
  budgetTokens: truncatedTokens
@@ -3858,7 +3878,7 @@ var ContextCompilerImpl = class {
3858
3878
  });
3859
3879
  } else {
3860
3880
  anyTruncated = true;
3861
- logger$23.warn({
3881
+ logger$24.warn({
3862
3882
  section: section.name,
3863
3883
  tokens
3864
3884
  }, "Context compiler: omitted \"important\" section — no budget remaining");
@@ -3885,7 +3905,7 @@ var ContextCompilerImpl = class {
3885
3905
  } else {
3886
3906
  if (tokens > 0) {
3887
3907
  anyTruncated = true;
3888
- logger$23.warn({
3908
+ logger$24.warn({
3889
3909
  section: section.name,
3890
3910
  tokens,
3891
3911
  budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
@@ -3979,8 +3999,8 @@ var GrammarLoader = class {
3979
3999
  _extensionMap;
3980
4000
  _cache = new Map();
3981
4001
  _unavailable = false;
3982
- constructor(logger$26) {
3983
- this._logger = logger$26;
4002
+ constructor(logger$27) {
4003
+ this._logger = logger$27;
3984
4004
  this._extensionMap = new Map([
3985
4005
  [".ts", "tree-sitter-typescript/typescript"],
3986
4006
  [".tsx", "tree-sitter-typescript/tsx"],
@@ -4066,9 +4086,9 @@ const ERR_REPO_MAP_GIT_FAILED = "ERR_REPO_MAP_GIT_FAILED";
4066
4086
  var SymbolParser = class {
4067
4087
  _grammarLoader;
4068
4088
  _logger;
4069
- constructor(grammarLoader, logger$26) {
4089
+ constructor(grammarLoader, logger$27) {
4070
4090
  this._grammarLoader = grammarLoader;
4071
- this._logger = logger$26;
4091
+ this._logger = logger$27;
4072
4092
  }
4073
4093
  async parseFile(filePath) {
4074
4094
  const ext$1 = extname(filePath);
@@ -4213,9 +4233,9 @@ async function computeFileHash(filePath) {
4213
4233
  var DoltSymbolRepository = class {
4214
4234
  _client;
4215
4235
  _logger;
4216
- constructor(client, logger$26) {
4236
+ constructor(client, logger$27) {
4217
4237
  this._client = client;
4218
- this._logger = logger$26;
4238
+ this._logger = logger$27;
4219
4239
  }
4220
4240
  /**
4221
4241
  * Atomically replace all symbols for filePath.
@@ -4421,11 +4441,11 @@ var RepoMapStorage = class {
4421
4441
  _metaRepo;
4422
4442
  _gitClient;
4423
4443
  _logger;
4424
- constructor(symbolRepo, metaRepo, gitClient, logger$26) {
4444
+ constructor(symbolRepo, metaRepo, gitClient, logger$27) {
4425
4445
  this._symbolRepo = symbolRepo;
4426
4446
  this._metaRepo = metaRepo;
4427
4447
  this._gitClient = gitClient;
4428
- this._logger = logger$26;
4448
+ this._logger = logger$27;
4429
4449
  }
4430
4450
  /**
4431
4451
  * Returns true if the file's current content hash differs from the stored hash.
@@ -4542,8 +4562,8 @@ function runGit(args, cwd) {
4542
4562
  */
4543
4563
  var GitClient = class {
4544
4564
  _logger;
4545
- constructor(logger$26) {
4546
- this._logger = logger$26;
4565
+ constructor(logger$27) {
4566
+ this._logger = logger$27;
4547
4567
  }
4548
4568
  /**
4549
4569
  * Returns the current HEAD commit SHA.
@@ -5899,9 +5919,9 @@ var RepoMapQueryEngine = class {
5899
5919
  repo;
5900
5920
  logger;
5901
5921
  telemetry;
5902
- constructor(repo, logger$26, telemetry) {
5922
+ constructor(repo, logger$27, telemetry) {
5903
5923
  this.repo = repo;
5904
- this.logger = logger$26;
5924
+ this.logger = logger$27;
5905
5925
  this.telemetry = telemetry;
5906
5926
  }
5907
5927
  async query(q) {
@@ -6121,9 +6141,9 @@ var RepoMapFormatter = class {
6121
6141
  var RepoMapTelemetry = class {
6122
6142
  _telemetry;
6123
6143
  _logger;
6124
- constructor(telemetry, logger$26) {
6144
+ constructor(telemetry, logger$27) {
6125
6145
  this._telemetry = telemetry;
6126
- this._logger = logger$26;
6146
+ this._logger = logger$27;
6127
6147
  }
6128
6148
  /**
6129
6149
  * Emit a `repo_map.query` span.
@@ -6148,9 +6168,9 @@ var RepoMapTelemetry = class {
6148
6168
  var RepoMapModule = class {
6149
6169
  _metaRepo;
6150
6170
  _logger;
6151
- constructor(metaRepo, logger$26) {
6171
+ constructor(metaRepo, logger$27) {
6152
6172
  this._metaRepo = metaRepo;
6153
- this._logger = logger$26;
6173
+ this._logger = logger$27;
6154
6174
  }
6155
6175
  /**
6156
6176
  * Check whether the stored repo-map is stale relative to the current HEAD commit.
@@ -6194,9 +6214,9 @@ var RepoMapModule = class {
6194
6214
  var RepoMapInjector = class {
6195
6215
  _queryEngine;
6196
6216
  _logger;
6197
- constructor(queryEngine, logger$26) {
6217
+ constructor(queryEngine, logger$27) {
6198
6218
  this._queryEngine = queryEngine;
6199
- this._logger = logger$26;
6219
+ this._logger = logger$27;
6200
6220
  }
6201
6221
  /**
6202
6222
  * Build repo-map context by extracting file references from the story content,
@@ -6489,7 +6509,7 @@ function sanitizeYamlEscapes(yamlText) {
6489
6509
 
6490
6510
  //#endregion
6491
6511
  //#region src/modules/agent-dispatch/dispatcher-impl.ts
6492
- const logger$22 = createLogger("agent-dispatch");
6512
+ const logger$23 = createLogger("agent-dispatch");
6493
6513
  const SHUTDOWN_GRACE_MS = 1e4;
6494
6514
  const SHUTDOWN_MAX_WAIT_MS = 3e4;
6495
6515
  const CHARS_PER_TOKEN = 4;
@@ -6534,7 +6554,7 @@ function getAvailableMemory() {
6534
6554
  }).trim(), 10);
6535
6555
  _lastKnownPressureLevel = pressureLevel;
6536
6556
  if (pressureLevel >= 4) {
6537
- logger$22.warn({ pressureLevel }, "macOS kernel reports critical memory pressure");
6557
+ logger$23.warn({ pressureLevel }, "macOS kernel reports critical memory pressure");
6538
6558
  return 0;
6539
6559
  }
6540
6560
  } catch {}
@@ -6549,7 +6569,7 @@ function getAvailableMemory() {
6549
6569
  const speculative = parseInt(vmstat.match(/Pages speculative:\s+(\d+)/)?.[1] ?? "0", 10);
6550
6570
  const available = (free + purgeable + speculative) * pageSize;
6551
6571
  if (pressureLevel >= 2) {
6552
- logger$22.warn({
6572
+ logger$23.warn({
6553
6573
  pressureLevel,
6554
6574
  availableBeforeDiscount: available
6555
6575
  }, "macOS kernel reports memory pressure — discounting estimate");
@@ -6631,7 +6651,7 @@ var DispatcherImpl = class {
6631
6651
  resolve: typedResolve,
6632
6652
  reject
6633
6653
  });
6634
- logger$22.debug({
6654
+ logger$23.debug({
6635
6655
  id,
6636
6656
  queueLength: this._queue.length
6637
6657
  }, "Dispatch queued");
@@ -6662,7 +6682,7 @@ var DispatcherImpl = class {
6662
6682
  async shutdown() {
6663
6683
  this._shuttingDown = true;
6664
6684
  this._stopMemoryPressureTimer();
6665
- logger$22.info({
6685
+ logger$23.info({
6666
6686
  running: this._running.size,
6667
6687
  queued: this._queue.length
6668
6688
  }, "Dispatcher shutting down");
@@ -6695,10 +6715,10 @@ var DispatcherImpl = class {
6695
6715
  }
6696
6716
  }, 50);
6697
6717
  });
6698
- logger$22.info("Dispatcher shutdown complete");
6718
+ logger$23.info("Dispatcher shutdown complete");
6699
6719
  }
6700
6720
  async _startDispatch(id, request, resolve$2) {
6701
- const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns, otlpEndpoint, storyKey } = request;
6721
+ const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns, maxContextTokens, otlpEndpoint, storyKey, optimizationDirectives } = request;
6702
6722
  let effectiveModel = model;
6703
6723
  if (effectiveModel === void 0 && this._routingResolver !== null) {
6704
6724
  const resolution = this._routingResolver.resolveModel(taskType);
@@ -6711,13 +6731,13 @@ var DispatcherImpl = class {
6711
6731
  phase: resolution.phase,
6712
6732
  source: resolution.source
6713
6733
  });
6714
- logger$22.debug({
6734
+ logger$23.debug({
6715
6735
  id,
6716
6736
  taskType,
6717
6737
  model: resolution.model,
6718
6738
  routingSource: resolution.source
6719
6739
  }, "Routing resolved model");
6720
- } else logger$22.debug({
6740
+ } else logger$23.debug({
6721
6741
  id,
6722
6742
  taskType,
6723
6743
  routingSource: "fallback"
@@ -6725,7 +6745,7 @@ var DispatcherImpl = class {
6725
6745
  }
6726
6746
  const adapter = this._adapterRegistry.get(agent);
6727
6747
  if (adapter === void 0) {
6728
- logger$22.warn({
6748
+ logger$23.warn({
6729
6749
  id,
6730
6750
  agent
6731
6751
  }, "No adapter found for agent");
@@ -6753,8 +6773,10 @@ var DispatcherImpl = class {
6753
6773
  billingMode: "subscription",
6754
6774
  ...effectiveModel !== void 0 ? { model: effectiveModel } : {},
6755
6775
  ...resolvedMaxTurns !== void 0 ? { maxTurns: resolvedMaxTurns } : {},
6776
+ ...maxContextTokens !== void 0 ? { maxContextTokens } : {},
6756
6777
  ...otlpEndpoint !== void 0 ? { otlpEndpoint } : {},
6757
- ...storyKey !== void 0 ? { storyKey } : {}
6778
+ ...storyKey !== void 0 ? { storyKey } : {},
6779
+ ...optimizationDirectives !== void 0 ? { optimizationDirectives } : {}
6758
6780
  });
6759
6781
  const timeoutMs = timeout ?? this._config.defaultTimeouts[taskType] ?? DEFAULT_TIMEOUTS[taskType] ?? 3e5;
6760
6782
  const env = { ...process.env };
@@ -6773,7 +6795,7 @@ var DispatcherImpl = class {
6773
6795
  });
6774
6796
  const startedAt = Date.now();
6775
6797
  proc.on("error", (err) => {
6776
- logger$22.error({
6798
+ logger$23.error({
6777
6799
  id,
6778
6800
  binary: cmd.binary,
6779
6801
  error: err.message
@@ -6781,7 +6803,7 @@ var DispatcherImpl = class {
6781
6803
  });
6782
6804
  if (proc.stdin !== null) {
6783
6805
  proc.stdin.on("error", (err) => {
6784
- if (err.code !== "EPIPE") logger$22.warn({
6806
+ if (err.code !== "EPIPE") logger$23.warn({
6785
6807
  id,
6786
6808
  error: err.message
6787
6809
  }, "stdin write error");
@@ -6823,7 +6845,7 @@ var DispatcherImpl = class {
6823
6845
  agent,
6824
6846
  taskType
6825
6847
  });
6826
- logger$22.debug({
6848
+ logger$23.debug({
6827
6849
  id,
6828
6850
  agent,
6829
6851
  taskType,
@@ -6840,7 +6862,7 @@ var DispatcherImpl = class {
6840
6862
  dispatchId: id,
6841
6863
  timeoutMs
6842
6864
  });
6843
- logger$22.warn({
6865
+ logger$23.warn({
6844
6866
  id,
6845
6867
  agent,
6846
6868
  taskType,
@@ -6896,7 +6918,7 @@ var DispatcherImpl = class {
6896
6918
  inputTokens,
6897
6919
  outputTokens: Math.ceil(stdout.length / CHARS_PER_TOKEN)
6898
6920
  });
6899
- logger$22.debug({
6921
+ logger$23.debug({
6900
6922
  id,
6901
6923
  agent,
6902
6924
  taskType,
@@ -6922,7 +6944,7 @@ var DispatcherImpl = class {
6922
6944
  error: stderr || `Process exited with code ${String(code)}`,
6923
6945
  exitCode: code
6924
6946
  });
6925
- logger$22.debug({
6947
+ logger$23.debug({
6926
6948
  id,
6927
6949
  agent,
6928
6950
  taskType,
@@ -6981,7 +7003,7 @@ var DispatcherImpl = class {
6981
7003
  const next = this._queue.shift();
6982
7004
  if (next === void 0) return;
6983
7005
  next.handle.status = "running";
6984
- logger$22.debug({
7006
+ logger$23.debug({
6985
7007
  id: next.id,
6986
7008
  queueLength: this._queue.length
6987
7009
  }, "Dequeued dispatch");
@@ -6994,7 +7016,7 @@ var DispatcherImpl = class {
6994
7016
  _isMemoryPressured() {
6995
7017
  const free = getAvailableMemory();
6996
7018
  if (free < MIN_FREE_MEMORY_BYTES) {
6997
- logger$22.warn({
7019
+ logger$23.warn({
6998
7020
  freeMB: Math.round(free / 1024 / 1024),
6999
7021
  thresholdMB: Math.round(MIN_FREE_MEMORY_BYTES / 1024 / 1024),
7000
7022
  pressureLevel: _lastKnownPressureLevel
@@ -7110,7 +7132,7 @@ function runBuildVerification(options) {
7110
7132
  let cmd;
7111
7133
  if (verifyCommand === void 0) {
7112
7134
  const detection = detectPackageManager(projectRoot);
7113
- logger$22.info({
7135
+ logger$23.info({
7114
7136
  packageManager: detection.packageManager,
7115
7137
  lockfile: detection.lockfile,
7116
7138
  resolvedCommand: detection.command
@@ -8206,7 +8228,7 @@ var DoltStateStore = class DoltStateStore {
8206
8228
 
8207
8229
  //#endregion
8208
8230
  //#region src/modules/state/index.ts
8209
- const logger$21 = createLogger("state:factory");
8231
+ const logger$22 = createLogger("state:factory");
8210
8232
  /**
8211
8233
  * Synchronously check whether Dolt is available and a Dolt repo exists at the
8212
8234
  * canonical state path under `basePath`.
@@ -8253,14 +8275,14 @@ function createStateStore(config = {}) {
8253
8275
  const repoPath = config.basePath ?? process.cwd();
8254
8276
  const detection = detectDoltAvailableSync(repoPath);
8255
8277
  if (detection.available) {
8256
- logger$21.debug(`Dolt detected, using DoltStateStore (state path: ${join$1(repoPath, ".substrate", "state")})`);
8278
+ logger$22.debug(`Dolt detected, using DoltStateStore (state path: ${join$1(repoPath, ".substrate", "state")})`);
8257
8279
  const client = new DoltClient({ repoPath });
8258
8280
  return new DoltStateStore({
8259
8281
  repoPath,
8260
8282
  client
8261
8283
  });
8262
8284
  } else {
8263
- logger$21.debug(`Dolt not found, using FileStateStore (reason: ${detection.reason})`);
8285
+ logger$22.debug(`Dolt not found, using FileStateStore (reason: ${detection.reason})`);
8264
8286
  return new FileStateStore({ basePath: config.basePath });
8265
8287
  }
8266
8288
  }
@@ -8354,7 +8376,7 @@ function pickRecommendation(distribution, profile, totalIssues, reviewCycles, la
8354
8376
 
8355
8377
  //#endregion
8356
8378
  //#region src/modules/compiled-workflows/prompt-assembler.ts
8357
- const logger$20 = createLogger("compiled-workflows:prompt-assembler");
8379
+ const logger$21 = createLogger("compiled-workflows:prompt-assembler");
8358
8380
  /**
8359
8381
  * Assemble a final prompt from a template and sections map.
8360
8382
  *
@@ -8379,7 +8401,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
8379
8401
  tokenCount,
8380
8402
  truncated: false
8381
8403
  };
8382
- logger$20.warn({
8404
+ logger$21.warn({
8383
8405
  tokenCount,
8384
8406
  ceiling: tokenCeiling
8385
8407
  }, "Prompt exceeds token ceiling — truncating optional sections");
@@ -8395,10 +8417,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
8395
8417
  const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
8396
8418
  if (targetSectionTokens === 0) {
8397
8419
  contentMap[section.name] = "";
8398
- logger$20.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
8420
+ logger$21.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
8399
8421
  } else {
8400
8422
  contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
8401
- logger$20.warn({
8423
+ logger$21.warn({
8402
8424
  sectionName: section.name,
8403
8425
  targetSectionTokens
8404
8426
  }, "Section truncated to fit token budget");
@@ -8409,7 +8431,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
8409
8431
  }
8410
8432
  if (tokenCount <= tokenCeiling) break;
8411
8433
  }
8412
- if (tokenCount > tokenCeiling) logger$20.warn({
8434
+ if (tokenCount > tokenCeiling) logger$21.warn({
8413
8435
  tokenCount,
8414
8436
  ceiling: tokenCeiling
8415
8437
  }, "Required sections alone exceed token ceiling — returning over-budget prompt");
@@ -8707,7 +8729,7 @@ function getTokenCeiling(workflowType, tokenCeilings) {
8707
8729
 
8708
8730
  //#endregion
8709
8731
  //#region src/modules/compiled-workflows/create-story.ts
8710
- const logger$19 = createLogger("compiled-workflows:create-story");
8732
+ const logger$20 = createLogger("compiled-workflows:create-story");
8711
8733
  /**
8712
8734
  * Execute the compiled create-story workflow.
8713
8735
  *
@@ -8727,13 +8749,13 @@ const logger$19 = createLogger("compiled-workflows:create-story");
8727
8749
  */
8728
8750
  async function runCreateStory(deps, params) {
8729
8751
  const { epicId, storyKey, pipelineRunId } = params;
8730
- logger$19.debug({
8752
+ logger$20.debug({
8731
8753
  epicId,
8732
8754
  storyKey,
8733
8755
  pipelineRunId
8734
8756
  }, "Starting create-story workflow");
8735
8757
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("create-story", deps.tokenCeilings);
8736
- logger$19.info({
8758
+ logger$20.info({
8737
8759
  workflow: "create-story",
8738
8760
  ceiling: TOKEN_CEILING,
8739
8761
  source: tokenCeilingSource
@@ -8743,7 +8765,7 @@ async function runCreateStory(deps, params) {
8743
8765
  template = await deps.pack.getPrompt("create-story");
8744
8766
  } catch (err) {
8745
8767
  const error = err instanceof Error ? err.message : String(err);
8746
- logger$19.error({ error }, "Failed to retrieve create-story prompt template");
8768
+ logger$20.error({ error }, "Failed to retrieve create-story prompt template");
8747
8769
  return {
8748
8770
  result: "failed",
8749
8771
  error: `Failed to retrieve prompt template: ${error}`,
@@ -8785,7 +8807,7 @@ async function runCreateStory(deps, params) {
8785
8807
  priority: "important"
8786
8808
  }
8787
8809
  ], TOKEN_CEILING);
8788
- logger$19.debug({
8810
+ logger$20.debug({
8789
8811
  tokenCount,
8790
8812
  truncated,
8791
8813
  tokenCeiling: TOKEN_CEILING
@@ -8804,7 +8826,7 @@ async function runCreateStory(deps, params) {
8804
8826
  dispatchResult = await handle.result;
8805
8827
  } catch (err) {
8806
8828
  const error = err instanceof Error ? err.message : String(err);
8807
- logger$19.error({
8829
+ logger$20.error({
8808
8830
  epicId,
8809
8831
  storyKey,
8810
8832
  error
@@ -8825,7 +8847,7 @@ async function runCreateStory(deps, params) {
8825
8847
  if (dispatchResult.status === "failed") {
8826
8848
  const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
8827
8849
  const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
8828
- logger$19.warn({
8850
+ logger$20.warn({
8829
8851
  epicId,
8830
8852
  storyKey,
8831
8853
  exitCode: dispatchResult.exitCode
@@ -8837,7 +8859,7 @@ async function runCreateStory(deps, params) {
8837
8859
  };
8838
8860
  }
8839
8861
  if (dispatchResult.status === "timeout") {
8840
- logger$19.warn({
8862
+ logger$20.warn({
8841
8863
  epicId,
8842
8864
  storyKey
8843
8865
  }, "Create-story dispatch timed out");
@@ -8850,7 +8872,7 @@ async function runCreateStory(deps, params) {
8850
8872
  if (dispatchResult.parsed === null) {
8851
8873
  const details = dispatchResult.parseError ?? "No YAML block found in output";
8852
8874
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
8853
- logger$19.warn({
8875
+ logger$20.warn({
8854
8876
  epicId,
8855
8877
  storyKey,
8856
8878
  details,
@@ -8866,7 +8888,7 @@ async function runCreateStory(deps, params) {
8866
8888
  const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
8867
8889
  if (!parseResult.success) {
8868
8890
  const details = parseResult.error.message;
8869
- logger$19.warn({
8891
+ logger$20.warn({
8870
8892
  epicId,
8871
8893
  storyKey,
8872
8894
  details
@@ -8879,7 +8901,7 @@ async function runCreateStory(deps, params) {
8879
8901
  };
8880
8902
  }
8881
8903
  const parsed = parseResult.data;
8882
- logger$19.info({
8904
+ logger$20.info({
8883
8905
  epicId,
8884
8906
  storyKey,
8885
8907
  storyFile: parsed.story_file,
@@ -8901,7 +8923,7 @@ async function getImplementationDecisions(deps) {
8901
8923
  try {
8902
8924
  return await getDecisionsByPhase(deps.db, "implementation");
8903
8925
  } catch (err) {
8904
- logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
8926
+ logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
8905
8927
  return [];
8906
8928
  }
8907
8929
  }
@@ -8944,13 +8966,13 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
8944
8966
  if (storyKey) {
8945
8967
  const storySection = extractStorySection(shardContent, storyKey);
8946
8968
  if (storySection) {
8947
- logger$19.debug({
8969
+ logger$20.debug({
8948
8970
  epicId,
8949
8971
  storyKey
8950
8972
  }, "Extracted per-story section from epic shard");
8951
8973
  return storySection;
8952
8974
  }
8953
- logger$19.debug({
8975
+ logger$20.debug({
8954
8976
  epicId,
8955
8977
  storyKey
8956
8978
  }, "No matching story section found — using full epic shard");
@@ -8960,11 +8982,11 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
8960
8982
  if (projectRoot) {
8961
8983
  const fallback = readEpicShardFromFile(projectRoot, epicId);
8962
8984
  if (fallback) {
8963
- logger$19.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
8985
+ logger$20.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
8964
8986
  if (storyKey) {
8965
8987
  const storySection = extractStorySection(fallback, storyKey);
8966
8988
  if (storySection) {
8967
- logger$19.debug({
8989
+ logger$20.debug({
8968
8990
  epicId,
8969
8991
  storyKey
8970
8992
  }, "Extracted per-story section from file-based epic shard");
@@ -8976,7 +8998,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
8976
8998
  }
8977
8999
  return "";
8978
9000
  } catch (err) {
8979
- logger$19.warn({
9001
+ logger$20.warn({
8980
9002
  epicId,
8981
9003
  error: err instanceof Error ? err.message : String(err)
8982
9004
  }, "Failed to retrieve epic shard");
@@ -8993,7 +9015,7 @@ function getPrevDevNotes(decisions, epicId) {
8993
9015
  if (devNotes.length === 0) return "";
8994
9016
  return devNotes[devNotes.length - 1].value;
8995
9017
  } catch (err) {
8996
- logger$19.warn({
9018
+ logger$20.warn({
8997
9019
  epicId,
8998
9020
  error: err instanceof Error ? err.message : String(err)
8999
9021
  }, "Failed to retrieve prev dev notes");
@@ -9013,13 +9035,13 @@ async function getArchConstraints$3(deps) {
9013
9035
  if (deps.projectRoot) {
9014
9036
  const fallback = readArchConstraintsFromFile(deps.projectRoot);
9015
9037
  if (fallback) {
9016
- logger$19.info("Using file-based fallback for architecture constraints (decisions table empty)");
9038
+ logger$20.info("Using file-based fallback for architecture constraints (decisions table empty)");
9017
9039
  return fallback;
9018
9040
  }
9019
9041
  }
9020
9042
  return "";
9021
9043
  } catch (err) {
9022
- logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
9044
+ logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
9023
9045
  return "";
9024
9046
  }
9025
9047
  }
@@ -9039,7 +9061,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
9039
9061
  const match$1 = pattern.exec(content);
9040
9062
  return match$1 ? match$1[0].trim() : "";
9041
9063
  } catch (err) {
9042
- logger$19.warn({
9064
+ logger$20.warn({
9043
9065
  epicId,
9044
9066
  error: err instanceof Error ? err.message : String(err)
9045
9067
  }, "File-based epic shard fallback failed");
@@ -9062,7 +9084,7 @@ function readArchConstraintsFromFile(projectRoot) {
9062
9084
  const content = readFileSync$1(archPath, "utf-8");
9063
9085
  return content.slice(0, 1500);
9064
9086
  } catch (err) {
9065
- logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
9087
+ logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
9066
9088
  return "";
9067
9089
  }
9068
9090
  }
@@ -9075,7 +9097,7 @@ async function getStoryTemplate(deps) {
9075
9097
  try {
9076
9098
  return await deps.pack.getTemplate("story");
9077
9099
  } catch (err) {
9078
- logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
9100
+ logger$20.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
9079
9101
  return "";
9080
9102
  }
9081
9103
  }
@@ -9112,7 +9134,7 @@ async function isValidStoryFile(filePath) {
9112
9134
 
9113
9135
  //#endregion
9114
9136
  //#region src/modules/compiled-workflows/git-helpers.ts
9115
- const logger$18 = createLogger("compiled-workflows:git-helpers");
9137
+ const logger$19 = createLogger("compiled-workflows:git-helpers");
9116
9138
  /**
9117
9139
  * Capture the full git diff for HEAD (working tree vs current commit).
9118
9140
  *
@@ -9208,7 +9230,7 @@ async function stageIntentToAdd(files, workingDirectory) {
9208
9230
  if (files.length === 0) return;
9209
9231
  const existing = files.filter((f) => {
9210
9232
  const exists = existsSync$1(f);
9211
- if (!exists) logger$18.debug({ file: f }, "Skipping nonexistent file in stageIntentToAdd");
9233
+ if (!exists) logger$19.debug({ file: f }, "Skipping nonexistent file in stageIntentToAdd");
9212
9234
  return exists;
9213
9235
  });
9214
9236
  if (existing.length === 0) return;
@@ -9242,7 +9264,7 @@ async function runGitCommand(args, cwd, logLabel) {
9242
9264
  stderr += chunk.toString("utf-8");
9243
9265
  });
9244
9266
  proc.on("error", (err) => {
9245
- logger$18.warn({
9267
+ logger$19.warn({
9246
9268
  label: logLabel,
9247
9269
  cwd,
9248
9270
  error: err.message
@@ -9251,7 +9273,7 @@ async function runGitCommand(args, cwd, logLabel) {
9251
9273
  });
9252
9274
  proc.on("close", (code) => {
9253
9275
  if (code !== 0) {
9254
- logger$18.warn({
9276
+ logger$19.warn({
9255
9277
  label: logLabel,
9256
9278
  cwd,
9257
9279
  code,
@@ -9267,7 +9289,7 @@ async function runGitCommand(args, cwd, logLabel) {
9267
9289
 
9268
9290
  //#endregion
9269
9291
  //#region src/modules/implementation-orchestrator/project-findings.ts
9270
- const logger$17 = createLogger("project-findings");
9292
+ const logger$18 = createLogger("project-findings");
9271
9293
  /** Maximum character length for the findings summary */
9272
9294
  const MAX_CHARS = 2e3;
9273
9295
  /**
@@ -9333,7 +9355,7 @@ async function getProjectFindings(db) {
9333
9355
  if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
9334
9356
  return summary;
9335
9357
  } catch (err) {
9336
- logger$17.warn({ err }, "Failed to query project findings (graceful fallback)");
9358
+ logger$18.warn({ err }, "Failed to query project findings (graceful fallback)");
9337
9359
  return "";
9338
9360
  }
9339
9361
  }
@@ -9356,7 +9378,7 @@ function extractRecurringPatterns(outcomes) {
9356
9378
 
9357
9379
  //#endregion
9358
9380
  //#region src/modules/compiled-workflows/story-complexity.ts
9359
- const logger$16 = createLogger("compiled-workflows:story-complexity");
9381
+ const logger$17 = createLogger("compiled-workflows:story-complexity");
9360
9382
  /**
9361
9383
  * Compute a complexity score from story markdown content.
9362
9384
  *
@@ -9408,7 +9430,7 @@ function resolveFixStoryMaxTurns(complexityScore) {
9408
9430
  * @param resolvedMaxTurns - Turn limit resolved for this dispatch
9409
9431
  */
9410
9432
  function logComplexityResult(storyKey, complexity, resolvedMaxTurns) {
9411
- logger$16.info({
9433
+ logger$17.info({
9412
9434
  storyKey,
9413
9435
  taskCount: complexity.taskCount,
9414
9436
  subtaskCount: complexity.subtaskCount,
@@ -9502,7 +9524,7 @@ function detectDeprecatedStatusField(content) {
9502
9524
 
9503
9525
  //#endregion
9504
9526
  //#region src/modules/compiled-workflows/dev-story.ts
9505
- const logger$15 = createLogger("compiled-workflows:dev-story");
9527
+ const logger$16 = createLogger("compiled-workflows:dev-story");
9506
9528
  /** Default timeout for dev-story dispatches in milliseconds (30 min) */
9507
9529
  const DEFAULT_TIMEOUT_MS$1 = 18e5;
9508
9530
  /** Default Vitest test patterns injected when no test-pattern decisions exist */
@@ -9525,12 +9547,12 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
9525
9547
  */
9526
9548
  async function runDevStory(deps, params) {
9527
9549
  const { storyKey, storyFilePath, taskScope, priorFiles } = params;
9528
- logger$15.info({
9550
+ logger$16.info({
9529
9551
  storyKey,
9530
9552
  storyFilePath
9531
9553
  }, "Starting compiled dev-story workflow");
9532
9554
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("dev-story", deps.tokenCeilings);
9533
- logger$15.info({
9555
+ logger$16.info({
9534
9556
  workflow: "dev-story",
9535
9557
  ceiling: TOKEN_CEILING,
9536
9558
  source: tokenCeilingSource
@@ -9573,10 +9595,10 @@ async function runDevStory(deps, params) {
9573
9595
  let template;
9574
9596
  try {
9575
9597
  template = await deps.pack.getPrompt("dev-story");
9576
- logger$15.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
9598
+ logger$16.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
9577
9599
  } catch (err) {
9578
9600
  const error = err instanceof Error ? err.message : String(err);
9579
- logger$15.error({
9601
+ logger$16.error({
9580
9602
  storyKey,
9581
9603
  error
9582
9604
  }, "Failed to retrieve dev-story prompt template");
@@ -9587,14 +9609,14 @@ async function runDevStory(deps, params) {
9587
9609
  storyContent = await readFile$1(storyFilePath, "utf-8");
9588
9610
  } catch (err) {
9589
9611
  if (err.code === "ENOENT") {
9590
- logger$15.error({
9612
+ logger$16.error({
9591
9613
  storyKey,
9592
9614
  storyFilePath
9593
9615
  }, "Story file not found");
9594
9616
  return makeFailureResult("story_file_not_found");
9595
9617
  }
9596
9618
  const error = err instanceof Error ? err.message : String(err);
9597
- logger$15.error({
9619
+ logger$16.error({
9598
9620
  storyKey,
9599
9621
  storyFilePath,
9600
9622
  error
@@ -9602,7 +9624,7 @@ async function runDevStory(deps, params) {
9602
9624
  return makeFailureResult(`story_file_read_error: ${error}`);
9603
9625
  }
9604
9626
  if (storyContent.trim().length === 0) {
9605
- logger$15.error({
9627
+ logger$16.error({
9606
9628
  storyKey,
9607
9629
  storyFilePath
9608
9630
  }, "Story file is empty");
@@ -9610,7 +9632,7 @@ async function runDevStory(deps, params) {
9610
9632
  }
9611
9633
  const staleStatus = detectDeprecatedStatusField(storyContent);
9612
9634
  if (staleStatus !== null) {
9613
- logger$15.warn({
9635
+ logger$16.warn({
9614
9636
  storyFilePath,
9615
9637
  staleStatus
9616
9638
  }, "Story spec contains deprecated Status field — stripped before dispatch (status is managed by Dolt work graph)");
@@ -9625,17 +9647,17 @@ async function runDevStory(deps, params) {
9625
9647
  const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
9626
9648
  if (testPatternDecisions.length > 0) {
9627
9649
  testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
9628
- logger$15.debug({
9650
+ logger$16.debug({
9629
9651
  storyKey,
9630
9652
  count: testPatternDecisions.length
9631
9653
  }, "Loaded test patterns from decision store");
9632
9654
  } else {
9633
9655
  testPatternsContent = DEFAULT_VITEST_PATTERNS;
9634
- logger$15.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
9656
+ logger$16.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
9635
9657
  }
9636
9658
  } catch (err) {
9637
9659
  const error = err instanceof Error ? err.message : String(err);
9638
- logger$15.warn({
9660
+ logger$16.warn({
9639
9661
  storyKey,
9640
9662
  error
9641
9663
  }, "Failed to load test patterns — using defaults");
@@ -9649,7 +9671,7 @@ async function runDevStory(deps, params) {
9649
9671
  if (deps.repoMapInjector !== void 0) {
9650
9672
  const injection = await deps.repoMapInjector.buildContext(storyContent, deps.maxRepoMapTokens ?? 2e3);
9651
9673
  repoContextContent = injection.text;
9652
- logger$15.info({
9674
+ logger$16.info({
9653
9675
  storyKey,
9654
9676
  repoMapTokens: Math.ceil(injection.text.length / 4),
9655
9677
  symbolCount: injection.symbolCount,
@@ -9661,7 +9683,7 @@ async function runDevStory(deps, params) {
9661
9683
  const findings = await getProjectFindings(deps.db);
9662
9684
  if (findings.length > 0) {
9663
9685
  priorFindingsContent = "Previous pipeline runs encountered these issues — avoid repeating them:\n\n" + findings;
9664
- logger$15.debug({
9686
+ logger$16.debug({
9665
9687
  storyKey,
9666
9688
  findingsLen: findings.length
9667
9689
  }, "Injecting prior findings into dev-story prompt");
@@ -9681,7 +9703,7 @@ async function runDevStory(deps, params) {
9681
9703
  if (plan.test_categories && plan.test_categories.length > 0) parts.push(`\n### Categories: ${plan.test_categories.join(", ")}`);
9682
9704
  if (plan.coverage_notes) parts.push(`\n### Coverage Notes\n${plan.coverage_notes}`);
9683
9705
  testPlanContent = parts.join("\n");
9684
- logger$15.debug({ storyKey }, "Injecting test plan into dev-story prompt");
9706
+ logger$16.debug({ storyKey }, "Injecting test plan into dev-story prompt");
9685
9707
  }
9686
9708
  } catch {}
9687
9709
  const sections = [
@@ -9732,7 +9754,7 @@ async function runDevStory(deps, params) {
9732
9754
  }
9733
9755
  ];
9734
9756
  const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING);
9735
- logger$15.info({
9757
+ logger$16.info({
9736
9758
  storyKey,
9737
9759
  tokenCount,
9738
9760
  ceiling: TOKEN_CEILING,
@@ -9749,12 +9771,14 @@ async function runDevStory(deps, params) {
9749
9771
  maxTurns: resolvedMaxTurns,
9750
9772
  ...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
9751
9773
  ...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {},
9774
+ ...deps.maxContextTokens !== void 0 ? { maxContextTokens: deps.maxContextTokens } : {},
9775
+ ...deps.optimizationDirectives !== void 0 ? { optimizationDirectives: deps.optimizationDirectives } : {},
9752
9776
  storyKey
9753
9777
  });
9754
9778
  dispatchResult = await handle.result;
9755
9779
  } catch (err) {
9756
9780
  const error = err instanceof Error ? err.message : String(err);
9757
- logger$15.error({
9781
+ logger$16.error({
9758
9782
  storyKey,
9759
9783
  error
9760
9784
  }, "Dispatch threw an unexpected error");
@@ -9765,11 +9789,11 @@ async function runDevStory(deps, params) {
9765
9789
  output: dispatchResult.tokenEstimate.output
9766
9790
  };
9767
9791
  if (dispatchResult.status === "timeout") {
9768
- logger$15.error({
9792
+ logger$16.error({
9769
9793
  storyKey,
9770
9794
  durationMs: dispatchResult.durationMs
9771
9795
  }, "Dev-story dispatch timed out");
9772
- if (dispatchResult.output.length > 0) logger$15.info({
9796
+ if (dispatchResult.output.length > 0) logger$16.info({
9773
9797
  storyKey,
9774
9798
  partialOutput: dispatchResult.output.slice(0, 500)
9775
9799
  }, "Partial output before timeout");
@@ -9779,12 +9803,12 @@ async function runDevStory(deps, params) {
9779
9803
  };
9780
9804
  }
9781
9805
  if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
9782
- logger$15.error({
9806
+ logger$16.error({
9783
9807
  storyKey,
9784
9808
  exitCode: dispatchResult.exitCode,
9785
9809
  status: dispatchResult.status
9786
9810
  }, "Dev-story dispatch failed");
9787
- if (dispatchResult.output.length > 0) logger$15.info({
9811
+ if (dispatchResult.output.length > 0) logger$16.info({
9788
9812
  storyKey,
9789
9813
  partialOutput: dispatchResult.output.slice(0, 500)
9790
9814
  }, "Partial output from failed dispatch");
@@ -9796,7 +9820,7 @@ async function runDevStory(deps, params) {
9796
9820
  if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
9797
9821
  const details = dispatchResult.parseError ?? "parsed result was null";
9798
9822
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
9799
- logger$15.error({
9823
+ logger$16.error({
9800
9824
  storyKey,
9801
9825
  parseError: details,
9802
9826
  rawOutputSnippet: rawSnippet
@@ -9804,12 +9828,12 @@ async function runDevStory(deps, params) {
9804
9828
  let filesModified = [];
9805
9829
  try {
9806
9830
  filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
9807
- if (filesModified.length > 0) logger$15.info({
9831
+ if (filesModified.length > 0) logger$16.info({
9808
9832
  storyKey,
9809
9833
  fileCount: filesModified.length
9810
9834
  }, "Recovered files_modified from git status (YAML fallback)");
9811
9835
  } catch (err) {
9812
- logger$15.warn({
9836
+ logger$16.warn({
9813
9837
  storyKey,
9814
9838
  error: err instanceof Error ? err.message : String(err)
9815
9839
  }, "Failed to recover files_modified from git");
@@ -9826,7 +9850,7 @@ async function runDevStory(deps, params) {
9826
9850
  };
9827
9851
  }
9828
9852
  const parsed = dispatchResult.parsed;
9829
- logger$15.info({
9853
+ logger$16.info({
9830
9854
  storyKey,
9831
9855
  result: parsed.result,
9832
9856
  acMet: parsed.ac_met.length
@@ -9965,7 +9989,7 @@ function extractFilesInScope(storyContent) {
9965
9989
 
9966
9990
  //#endregion
9967
9991
  //#region src/modules/compiled-workflows/code-review.ts
9968
- const logger$14 = createLogger("compiled-workflows:code-review");
9992
+ const logger$15 = createLogger("compiled-workflows:code-review");
9969
9993
  /**
9970
9994
  * Default fallback result when dispatch fails or times out.
9971
9995
  * Uses NEEDS_MINOR_FIXES (not NEEDS_MAJOR_REWORK) so a parse/schema failure
@@ -10003,14 +10027,14 @@ function defaultFailResult(error, tokenUsage) {
10003
10027
  async function runCodeReview(deps, params) {
10004
10028
  const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
10005
10029
  const cwd = workingDirectory ?? process.cwd();
10006
- logger$14.debug({
10030
+ logger$15.debug({
10007
10031
  storyKey,
10008
10032
  storyFilePath,
10009
10033
  cwd,
10010
10034
  pipelineRunId
10011
10035
  }, "Starting code-review workflow");
10012
10036
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("code-review", deps.tokenCeilings);
10013
- logger$14.info({
10037
+ logger$15.info({
10014
10038
  workflow: "code-review",
10015
10039
  ceiling: TOKEN_CEILING,
10016
10040
  source: tokenCeilingSource
@@ -10020,7 +10044,7 @@ async function runCodeReview(deps, params) {
10020
10044
  template = await deps.pack.getPrompt("code-review");
10021
10045
  } catch (err) {
10022
10046
  const error = err instanceof Error ? err.message : String(err);
10023
- logger$14.error({ error }, "Failed to retrieve code-review prompt template");
10047
+ logger$15.error({ error }, "Failed to retrieve code-review prompt template");
10024
10048
  return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
10025
10049
  input: 0,
10026
10050
  output: 0
@@ -10031,7 +10055,7 @@ async function runCodeReview(deps, params) {
10031
10055
  storyContent = await readFile$1(storyFilePath, "utf-8");
10032
10056
  } catch (err) {
10033
10057
  const error = err instanceof Error ? err.message : String(err);
10034
- logger$14.error({
10058
+ logger$15.error({
10035
10059
  storyFilePath,
10036
10060
  error
10037
10061
  }, "Failed to read story file");
@@ -10051,12 +10075,12 @@ async function runCodeReview(deps, params) {
10051
10075
  const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
10052
10076
  if (scopedTotal <= TOKEN_CEILING) {
10053
10077
  gitDiffContent = scopedDiff;
10054
- logger$14.debug({
10078
+ logger$15.debug({
10055
10079
  fileCount: filesModified.length,
10056
10080
  tokenCount: scopedTotal
10057
10081
  }, "Using scoped file diff");
10058
10082
  } else {
10059
- logger$14.warn({
10083
+ logger$15.warn({
10060
10084
  estimatedTotal: scopedTotal,
10061
10085
  ceiling: TOKEN_CEILING,
10062
10086
  fileCount: filesModified.length
@@ -10070,7 +10094,7 @@ async function runCodeReview(deps, params) {
10070
10094
  const fullTotal = nonDiffTokens + countTokens(fullDiff);
10071
10095
  if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
10072
10096
  else {
10073
- logger$14.warn({
10097
+ logger$15.warn({
10074
10098
  estimatedTotal: fullTotal,
10075
10099
  ceiling: TOKEN_CEILING
10076
10100
  }, "Full git diff would exceed token ceiling — using stat-only summary");
@@ -10078,7 +10102,7 @@ async function runCodeReview(deps, params) {
10078
10102
  }
10079
10103
  }
10080
10104
  if (gitDiffContent.trim().length === 0) {
10081
- logger$14.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
10105
+ logger$15.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
10082
10106
  return {
10083
10107
  verdict: "SHIP_IT",
10084
10108
  issues: 0,
@@ -10094,7 +10118,7 @@ async function runCodeReview(deps, params) {
10094
10118
  if (deps.repoMapInjector !== void 0) {
10095
10119
  const injection = await deps.repoMapInjector.buildContext(storyContent, deps.maxRepoMapTokens ?? 2e3);
10096
10120
  repoContextContent = injection.text;
10097
- logger$14.info({
10121
+ logger$15.info({
10098
10122
  storyKey,
10099
10123
  repoMapTokens: Math.ceil(injection.text.length / 4),
10100
10124
  symbolCount: injection.symbolCount,
@@ -10114,7 +10138,7 @@ async function runCodeReview(deps, params) {
10114
10138
  const findings = await getProjectFindings(deps.db);
10115
10139
  if (findings.length > 0) {
10116
10140
  priorFindingsContent = "Previous reviews found these recurring patterns — pay special attention:\n\n" + findings;
10117
- logger$14.debug({
10141
+ logger$15.debug({
10118
10142
  storyKey,
10119
10143
  findingsLen: findings.length
10120
10144
  }, "Injecting prior findings into code-review prompt");
@@ -10153,11 +10177,11 @@ async function runCodeReview(deps, params) {
10153
10177
  }
10154
10178
  ];
10155
10179
  const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
10156
- if (assembleResult.truncated) logger$14.warn({
10180
+ if (assembleResult.truncated) logger$15.warn({
10157
10181
  storyKey,
10158
10182
  tokenCount: assembleResult.tokenCount
10159
10183
  }, "Code-review prompt truncated to fit token ceiling");
10160
- logger$14.debug({
10184
+ logger$15.debug({
10161
10185
  storyKey,
10162
10186
  tokenCount: assembleResult.tokenCount,
10163
10187
  truncated: assembleResult.truncated
@@ -10170,6 +10194,7 @@ async function runCodeReview(deps, params) {
10170
10194
  outputSchema: CodeReviewResultSchema,
10171
10195
  workingDirectory: deps.projectRoot,
10172
10196
  ...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {},
10197
+ ...deps.maxContextTokens !== void 0 ? { maxContextTokens: deps.maxContextTokens } : {},
10173
10198
  storyKey
10174
10199
  });
10175
10200
  let dispatchResult;
@@ -10177,7 +10202,7 @@ async function runCodeReview(deps, params) {
10177
10202
  dispatchResult = await handle.result;
10178
10203
  } catch (err) {
10179
10204
  const error = err instanceof Error ? err.message : String(err);
10180
- logger$14.error({
10205
+ logger$15.error({
10181
10206
  storyKey,
10182
10207
  error
10183
10208
  }, "Code-review dispatch threw unexpected error");
@@ -10193,7 +10218,7 @@ async function runCodeReview(deps, params) {
10193
10218
  const rawOutput = dispatchResult.output ?? void 0;
10194
10219
  if (dispatchResult.status === "failed") {
10195
10220
  const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
10196
- logger$14.warn({
10221
+ logger$15.warn({
10197
10222
  storyKey,
10198
10223
  exitCode: dispatchResult.exitCode
10199
10224
  }, "Code-review dispatch failed");
@@ -10203,7 +10228,7 @@ async function runCodeReview(deps, params) {
10203
10228
  };
10204
10229
  }
10205
10230
  if (dispatchResult.status === "timeout") {
10206
- logger$14.warn({ storyKey }, "Code-review dispatch timed out");
10231
+ logger$15.warn({ storyKey }, "Code-review dispatch timed out");
10207
10232
  return {
10208
10233
  ...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
10209
10234
  rawOutput
@@ -10211,7 +10236,7 @@ async function runCodeReview(deps, params) {
10211
10236
  }
10212
10237
  if (dispatchResult.parsed === null) {
10213
10238
  const details = dispatchResult.parseError ?? "No YAML block found in output";
10214
- logger$14.warn({
10239
+ logger$15.warn({
10215
10240
  storyKey,
10216
10241
  details
10217
10242
  }, "Code-review output schema validation failed");
@@ -10228,7 +10253,7 @@ async function runCodeReview(deps, params) {
10228
10253
  const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
10229
10254
  if (!parseResult.success) {
10230
10255
  const details = parseResult.error.message;
10231
- logger$14.warn({
10256
+ logger$15.warn({
10232
10257
  storyKey,
10233
10258
  details
10234
10259
  }, "Code-review output failed schema validation");
@@ -10243,13 +10268,13 @@ async function runCodeReview(deps, params) {
10243
10268
  };
10244
10269
  }
10245
10270
  const parsed = parseResult.data;
10246
- if (parsed.agentVerdict !== parsed.verdict) logger$14.info({
10271
+ if (parsed.agentVerdict !== parsed.verdict) logger$15.info({
10247
10272
  storyKey,
10248
10273
  agentVerdict: parsed.agentVerdict,
10249
10274
  pipelineVerdict: parsed.verdict,
10250
10275
  issues: parsed.issues
10251
10276
  }, "Pipeline overrode agent verdict based on issue severities");
10252
- logger$14.info({
10277
+ logger$15.info({
10253
10278
  storyKey,
10254
10279
  verdict: parsed.verdict,
10255
10280
  issues: parsed.issues
@@ -10274,14 +10299,14 @@ async function getArchConstraints$2(deps) {
10274
10299
  if (constraints.length === 0) return "";
10275
10300
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
10276
10301
  } catch (err) {
10277
- logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
10302
+ logger$15.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
10278
10303
  return "";
10279
10304
  }
10280
10305
  }
10281
10306
 
10282
10307
  //#endregion
10283
10308
  //#region src/modules/compiled-workflows/test-plan.ts
10284
- const logger$13 = createLogger("compiled-workflows:test-plan");
10309
+ const logger$14 = createLogger("compiled-workflows:test-plan");
10285
10310
  /** Default timeout for test-plan dispatches in milliseconds (5 min — lightweight call) */
10286
10311
  const DEFAULT_TIMEOUT_MS = 3e5;
10287
10312
  /**
@@ -10293,12 +10318,12 @@ const DEFAULT_TIMEOUT_MS = 3e5;
10293
10318
  */
10294
10319
  async function runTestPlan(deps, params) {
10295
10320
  const { storyKey, storyFilePath, pipelineRunId } = params;
10296
- logger$13.info({
10321
+ logger$14.info({
10297
10322
  storyKey,
10298
10323
  storyFilePath
10299
10324
  }, "Starting compiled test-plan workflow");
10300
10325
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-plan", deps.tokenCeilings);
10301
- logger$13.info({
10326
+ logger$14.info({
10302
10327
  workflow: "test-plan",
10303
10328
  ceiling: TOKEN_CEILING,
10304
10329
  source: tokenCeilingSource
@@ -10306,10 +10331,10 @@ async function runTestPlan(deps, params) {
10306
10331
  let template;
10307
10332
  try {
10308
10333
  template = await deps.pack.getPrompt("test-plan");
10309
- logger$13.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
10334
+ logger$14.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
10310
10335
  } catch (err) {
10311
10336
  const error = err instanceof Error ? err.message : String(err);
10312
- logger$13.warn({
10337
+ logger$14.warn({
10313
10338
  storyKey,
10314
10339
  error
10315
10340
  }, "Failed to retrieve test-plan prompt template");
@@ -10320,14 +10345,14 @@ async function runTestPlan(deps, params) {
10320
10345
  storyContent = await readFile$1(storyFilePath, "utf-8");
10321
10346
  } catch (err) {
10322
10347
  if (err.code === "ENOENT") {
10323
- logger$13.warn({
10348
+ logger$14.warn({
10324
10349
  storyKey,
10325
10350
  storyFilePath
10326
10351
  }, "Story file not found for test planning");
10327
10352
  return makeTestPlanFailureResult("story_file_not_found");
10328
10353
  }
10329
10354
  const error = err instanceof Error ? err.message : String(err);
10330
- logger$13.warn({
10355
+ logger$14.warn({
10331
10356
  storyKey,
10332
10357
  storyFilePath,
10333
10358
  error
@@ -10344,7 +10369,7 @@ async function runTestPlan(deps, params) {
10344
10369
  content: archConstraintsContent,
10345
10370
  priority: "optional"
10346
10371
  }], TOKEN_CEILING);
10347
- logger$13.info({
10372
+ logger$14.info({
10348
10373
  storyKey,
10349
10374
  tokenCount,
10350
10375
  ceiling: TOKEN_CEILING,
@@ -10365,7 +10390,7 @@ async function runTestPlan(deps, params) {
10365
10390
  dispatchResult = await handle.result;
10366
10391
  } catch (err) {
10367
10392
  const error = err instanceof Error ? err.message : String(err);
10368
- logger$13.warn({
10393
+ logger$14.warn({
10369
10394
  storyKey,
10370
10395
  error
10371
10396
  }, "Test-plan dispatch threw an unexpected error");
@@ -10376,7 +10401,7 @@ async function runTestPlan(deps, params) {
10376
10401
  output: dispatchResult.tokenEstimate.output
10377
10402
  };
10378
10403
  if (dispatchResult.status === "timeout") {
10379
- logger$13.warn({
10404
+ logger$14.warn({
10380
10405
  storyKey,
10381
10406
  durationMs: dispatchResult.durationMs
10382
10407
  }, "Test-plan dispatch timed out");
@@ -10386,7 +10411,7 @@ async function runTestPlan(deps, params) {
10386
10411
  };
10387
10412
  }
10388
10413
  if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
10389
- logger$13.warn({
10414
+ logger$14.warn({
10390
10415
  storyKey,
10391
10416
  exitCode: dispatchResult.exitCode,
10392
10417
  status: dispatchResult.status
@@ -10398,7 +10423,7 @@ async function runTestPlan(deps, params) {
10398
10423
  }
10399
10424
  if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
10400
10425
  const details = dispatchResult.parseError ?? "parsed result was null";
10401
- logger$13.warn({
10426
+ logger$14.warn({
10402
10427
  storyKey,
10403
10428
  parseError: details
10404
10429
  }, "Test-plan YAML schema validation failed");
@@ -10421,19 +10446,19 @@ async function runTestPlan(deps, params) {
10421
10446
  }),
10422
10447
  rationale: `Test plan for ${storyKey}: ${parsed.test_files.length} test files, categories: ${parsed.test_categories.join(", ")}`
10423
10448
  });
10424
- logger$13.info({
10449
+ logger$14.info({
10425
10450
  storyKey,
10426
10451
  fileCount: parsed.test_files.length,
10427
10452
  categories: parsed.test_categories
10428
10453
  }, "Test plan stored in decision store");
10429
10454
  } catch (err) {
10430
10455
  const error = err instanceof Error ? err.message : String(err);
10431
- logger$13.warn({
10456
+ logger$14.warn({
10432
10457
  storyKey,
10433
10458
  error
10434
10459
  }, "Failed to store test plan in decision store — proceeding anyway");
10435
10460
  }
10436
- logger$13.info({
10461
+ logger$14.info({
10437
10462
  storyKey,
10438
10463
  result: parsed.result
10439
10464
  }, "Test-plan workflow completed");
@@ -10473,14 +10498,14 @@ async function getArchConstraints$1(deps) {
10473
10498
  if (constraints.length === 0) return "";
10474
10499
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
10475
10500
  } catch (err) {
10476
- logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
10501
+ logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
10477
10502
  return "";
10478
10503
  }
10479
10504
  }
10480
10505
 
10481
10506
  //#endregion
10482
10507
  //#region src/modules/compiled-workflows/test-expansion.ts
10483
- const logger$12 = createLogger("compiled-workflows:test-expansion");
10508
+ const logger$13 = createLogger("compiled-workflows:test-expansion");
10484
10509
  function defaultFallbackResult(error, tokenUsage) {
10485
10510
  return {
10486
10511
  expansion_priority: "low",
@@ -10510,14 +10535,14 @@ function defaultFallbackResult(error, tokenUsage) {
10510
10535
  async function runTestExpansion(deps, params) {
10511
10536
  const { storyKey, storyFilePath, pipelineRunId, filesModified, workingDirectory } = params;
10512
10537
  const cwd = workingDirectory ?? process.cwd();
10513
- logger$12.debug({
10538
+ logger$13.debug({
10514
10539
  storyKey,
10515
10540
  storyFilePath,
10516
10541
  cwd,
10517
10542
  pipelineRunId
10518
10543
  }, "Starting test-expansion workflow");
10519
10544
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-expansion", deps.tokenCeilings);
10520
- logger$12.info({
10545
+ logger$13.info({
10521
10546
  workflow: "test-expansion",
10522
10547
  ceiling: TOKEN_CEILING,
10523
10548
  source: tokenCeilingSource
@@ -10527,7 +10552,7 @@ async function runTestExpansion(deps, params) {
10527
10552
  template = await deps.pack.getPrompt("test-expansion");
10528
10553
  } catch (err) {
10529
10554
  const error = err instanceof Error ? err.message : String(err);
10530
- logger$12.warn({ error }, "Failed to retrieve test-expansion prompt template");
10555
+ logger$13.warn({ error }, "Failed to retrieve test-expansion prompt template");
10531
10556
  return defaultFallbackResult(`Failed to retrieve prompt template: ${error}`, {
10532
10557
  input: 0,
10533
10558
  output: 0
@@ -10538,7 +10563,7 @@ async function runTestExpansion(deps, params) {
10538
10563
  storyContent = await readFile$1(storyFilePath, "utf-8");
10539
10564
  } catch (err) {
10540
10565
  const error = err instanceof Error ? err.message : String(err);
10541
- logger$12.warn({
10566
+ logger$13.warn({
10542
10567
  storyFilePath,
10543
10568
  error
10544
10569
  }, "Failed to read story file");
@@ -10558,12 +10583,12 @@ async function runTestExpansion(deps, params) {
10558
10583
  const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
10559
10584
  if (scopedTotal <= TOKEN_CEILING) {
10560
10585
  gitDiffContent = scopedDiff;
10561
- logger$12.debug({
10586
+ logger$13.debug({
10562
10587
  fileCount: filesModified.length,
10563
10588
  tokenCount: scopedTotal
10564
10589
  }, "Using scoped file diff");
10565
10590
  } else {
10566
- logger$12.warn({
10591
+ logger$13.warn({
10567
10592
  estimatedTotal: scopedTotal,
10568
10593
  ceiling: TOKEN_CEILING,
10569
10594
  fileCount: filesModified.length
@@ -10571,7 +10596,7 @@ async function runTestExpansion(deps, params) {
10571
10596
  gitDiffContent = await getGitDiffStatSummary(cwd);
10572
10597
  }
10573
10598
  } catch (err) {
10574
- logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
10599
+ logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
10575
10600
  }
10576
10601
  const sections = [
10577
10602
  {
@@ -10591,11 +10616,11 @@ async function runTestExpansion(deps, params) {
10591
10616
  }
10592
10617
  ];
10593
10618
  const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
10594
- if (assembleResult.truncated) logger$12.warn({
10619
+ if (assembleResult.truncated) logger$13.warn({
10595
10620
  storyKey,
10596
10621
  tokenCount: assembleResult.tokenCount
10597
10622
  }, "Test-expansion prompt truncated to fit token ceiling");
10598
- logger$12.debug({
10623
+ logger$13.debug({
10599
10624
  storyKey,
10600
10625
  tokenCount: assembleResult.tokenCount,
10601
10626
  truncated: assembleResult.truncated
@@ -10615,7 +10640,7 @@ async function runTestExpansion(deps, params) {
10615
10640
  dispatchResult = await handle.result;
10616
10641
  } catch (err) {
10617
10642
  const error = err instanceof Error ? err.message : String(err);
10618
- logger$12.warn({
10643
+ logger$13.warn({
10619
10644
  storyKey,
10620
10645
  error
10621
10646
  }, "Test-expansion dispatch threw unexpected error");
@@ -10630,19 +10655,19 @@ async function runTestExpansion(deps, params) {
10630
10655
  };
10631
10656
  if (dispatchResult.status === "failed") {
10632
10657
  const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""}`.trim();
10633
- logger$12.warn({
10658
+ logger$13.warn({
10634
10659
  storyKey,
10635
10660
  exitCode: dispatchResult.exitCode
10636
10661
  }, "Test-expansion dispatch failed");
10637
10662
  return defaultFallbackResult(errorMsg, tokenUsage);
10638
10663
  }
10639
10664
  if (dispatchResult.status === "timeout") {
10640
- logger$12.warn({ storyKey }, "Test-expansion dispatch timed out");
10665
+ logger$13.warn({ storyKey }, "Test-expansion dispatch timed out");
10641
10666
  return defaultFallbackResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage);
10642
10667
  }
10643
10668
  if (dispatchResult.parsed === null) {
10644
10669
  const details = dispatchResult.parseError ?? "No YAML block found in output";
10645
- logger$12.warn({
10670
+ logger$13.warn({
10646
10671
  storyKey,
10647
10672
  details
10648
10673
  }, "Test-expansion output has no parseable YAML");
@@ -10651,14 +10676,14 @@ async function runTestExpansion(deps, params) {
10651
10676
  const parseResult = TestExpansionResultSchema.safeParse(dispatchResult.parsed);
10652
10677
  if (!parseResult.success) {
10653
10678
  const details = parseResult.error.message;
10654
- logger$12.warn({
10679
+ logger$13.warn({
10655
10680
  storyKey,
10656
10681
  details
10657
10682
  }, "Test-expansion output failed schema validation");
10658
10683
  return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
10659
10684
  }
10660
10685
  const parsed = parseResult.data;
10661
- logger$12.info({
10686
+ logger$13.info({
10662
10687
  storyKey,
10663
10688
  expansion_priority: parsed.expansion_priority,
10664
10689
  coverage_gaps: parsed.coverage_gaps.length,
@@ -10683,7 +10708,7 @@ async function getArchConstraints(deps) {
10683
10708
  if (constraints.length === 0) return "";
10684
10709
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
10685
10710
  } catch (err) {
10686
- logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
10711
+ logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
10687
10712
  return "";
10688
10713
  }
10689
10714
  }
@@ -11115,7 +11140,7 @@ function detectConflictGroupsWithContracts(storyKeys, config, declarations) {
11115
11140
 
11116
11141
  //#endregion
11117
11142
  //#region src/cli/commands/health.ts
11118
- const logger$11 = createLogger("health-cmd");
11143
+ const logger$12 = createLogger("health-cmd");
11119
11144
  /** Default stall threshold in seconds — also used by supervisor default */
11120
11145
  const DEFAULT_STALL_THRESHOLD_SECONDS = 600;
11121
11146
  /**
@@ -11440,7 +11465,7 @@ async function runHealthAction(options) {
11440
11465
  const msg = err instanceof Error ? err.message : String(err);
11441
11466
  if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
11442
11467
  else process.stderr.write(`Error: ${msg}\n`);
11443
- logger$11.error({ err }, "health action failed");
11468
+ logger$12.error({ err }, "health action failed");
11444
11469
  return 1;
11445
11470
  }
11446
11471
  }
@@ -11487,7 +11512,7 @@ function registerHealthCommand(program, _version = "0.0.0", projectRoot = proces
11487
11512
 
11488
11513
  //#endregion
11489
11514
  //#region src/modules/implementation-orchestrator/seed-methodology-context.ts
11490
- const logger$10 = createLogger("implementation-orchestrator:seed");
11515
+ const logger$11 = createLogger("implementation-orchestrator:seed");
11491
11516
  /** Max chars for the architecture summary seeded into decisions */
11492
11517
  const MAX_ARCH_CHARS = 6e3;
11493
11518
  /** Max chars per epic shard (fallback when per-story extraction returns null) */
@@ -11521,12 +11546,12 @@ async function seedMethodologyContext(db, projectRoot) {
11521
11546
  const testCount = await seedTestPatterns(db, projectRoot);
11522
11547
  if (testCount === -1) result.skippedCategories.push("test-patterns");
11523
11548
  else result.decisionsCreated += testCount;
11524
- logger$10.info({
11549
+ logger$11.info({
11525
11550
  decisionsCreated: result.decisionsCreated,
11526
11551
  skippedCategories: result.skippedCategories
11527
11552
  }, "Methodology context seeding complete");
11528
11553
  } catch (err) {
11529
- logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
11554
+ logger$11.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
11530
11555
  }
11531
11556
  return result;
11532
11557
  }
@@ -11570,7 +11595,7 @@ async function seedArchitecture(db, projectRoot) {
11570
11595
  });
11571
11596
  count = 1;
11572
11597
  }
11573
- logger$10.debug({ count }, "Seeded architecture decisions");
11598
+ logger$11.debug({ count }, "Seeded architecture decisions");
11574
11599
  return count;
11575
11600
  }
11576
11601
  /**
@@ -11594,11 +11619,11 @@ async function seedEpicShards(db, projectRoot) {
11594
11619
  const storedHashDecision = implementationDecisions.find((d) => d.category === "epic-shard-hash" && d.key === "epics-file");
11595
11620
  const storedHash = storedHashDecision?.value;
11596
11621
  if (storedHash === currentHash) {
11597
- logger$10.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
11622
+ logger$11.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
11598
11623
  return -1;
11599
11624
  }
11600
11625
  if (implementationDecisions.some((d) => d.category === "epic-shard")) {
11601
- logger$10.debug({
11626
+ logger$11.debug({
11602
11627
  storedHash,
11603
11628
  currentHash
11604
11629
  }, "Epics file changed — deleting stale epic-shard decisions");
@@ -11626,7 +11651,7 @@ async function seedEpicShards(db, projectRoot) {
11626
11651
  value: currentHash,
11627
11652
  rationale: "SHA-256 hash of epics file content for change detection"
11628
11653
  });
11629
- logger$10.debug({
11654
+ logger$11.debug({
11630
11655
  count,
11631
11656
  hash: currentHash
11632
11657
  }, "Seeded epic shard decisions");
@@ -11650,7 +11675,7 @@ async function seedTestPatterns(db, projectRoot) {
11650
11675
  value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
11651
11676
  rationale: "Detected from project configuration at orchestrator startup"
11652
11677
  });
11653
- logger$10.debug("Seeded test patterns decision");
11678
+ logger$11.debug("Seeded test patterns decision");
11654
11679
  return 1;
11655
11680
  }
11656
11681
  /**
@@ -11823,7 +11848,7 @@ function findArtifact(projectRoot, candidates) {
11823
11848
 
11824
11849
  //#endregion
11825
11850
  //#region src/modules/agent-dispatch/interface-change-detector.ts
11826
- const logger$9 = createLogger("interface-change-detector");
11851
+ const logger$10 = createLogger("interface-change-detector");
11827
11852
  /**
11828
11853
  * Extract exported interface and type names from TypeScript source content.
11829
11854
  *
@@ -11870,7 +11895,7 @@ function detectInterfaceChanges(options) {
11870
11895
  for (const name of names) allNames.add(name);
11871
11896
  sourceDirs.push(dirname$1(relPath));
11872
11897
  } catch {
11873
- logger$9.debug({
11898
+ logger$10.debug({
11874
11899
  absPath,
11875
11900
  storyKey
11876
11901
  }, "Could not read modified file for interface extraction");
@@ -11911,7 +11936,7 @@ function detectInterfaceChanges(options) {
11911
11936
  potentiallyAffectedTests: Array.from(affectedTests)
11912
11937
  };
11913
11938
  } catch (err) {
11914
- logger$9.warn({
11939
+ logger$10.warn({
11915
11940
  err,
11916
11941
  storyKey: options.storyKey
11917
11942
  }, "Interface change detection failed — skipping");
@@ -12107,7 +12132,10 @@ const TurnAnalysisSchema = z.object({
12107
12132
  contextDelta: z.number(),
12108
12133
  toolName: z.string().optional(),
12109
12134
  isContextSpike: z.boolean(),
12110
- childSpans: z.array(ChildSpanSummarySchema)
12135
+ childSpans: z.array(ChildSpanSummarySchema),
12136
+ taskType: z.string().optional(),
12137
+ phase: z.string().optional(),
12138
+ dispatchId: z.string().optional()
12111
12139
  });
12112
12140
  const SemanticCategorySchema = z.enum([
12113
12141
  "tool_outputs",
@@ -12169,7 +12197,10 @@ const EfficiencyScoreSchema = z.object({
12169
12197
  contextSpikeCount: z.number().int().nonnegative(),
12170
12198
  totalTurns: z.number().int().nonnegative(),
12171
12199
  perModelBreakdown: z.array(ModelEfficiencySchema),
12172
- perSourceBreakdown: z.array(SourceEfficiencySchema)
12200
+ perSourceBreakdown: z.array(SourceEfficiencySchema),
12201
+ dispatchId: z.string().optional(),
12202
+ taskType: z.string().optional(),
12203
+ phase: z.string().optional()
12173
12204
  });
12174
12205
  const RuleIdSchema = z.enum([
12175
12206
  "biggest_consumers",
@@ -12179,7 +12210,8 @@ const RuleIdSchema = z.enum([
12179
12210
  "context_growth_spike",
12180
12211
  "growing_categories",
12181
12212
  "cache_efficiency",
12182
- "per_model_comparison"
12213
+ "per_model_comparison",
12214
+ "cache_delta_regression"
12183
12215
  ]);
12184
12216
  const RecommendationSeveritySchema = z.enum([
12185
12217
  "critical",
@@ -12202,7 +12234,7 @@ const RecommendationSchema = z.object({
12202
12234
 
12203
12235
  //#endregion
12204
12236
  //#region src/modules/telemetry/adapter-persistence.ts
12205
- const logger$8 = createLogger("telemetry:adapter-persistence");
12237
+ const logger$9 = createLogger("telemetry:adapter-persistence");
12206
12238
  /**
12207
12239
  * Concrete DatabaseAdapter-backed telemetry persistence.
12208
12240
  *
@@ -12239,6 +12271,9 @@ var AdapterTelemetryPersistence = class {
12239
12271
  tool_name VARCHAR(128),
12240
12272
  is_context_spike BOOLEAN NOT NULL DEFAULT 0,
12241
12273
  child_spans_json TEXT NOT NULL DEFAULT '[]',
12274
+ task_type VARCHAR(64),
12275
+ phase VARCHAR(64),
12276
+ dispatch_id VARCHAR(64),
12242
12277
  PRIMARY KEY (story_key, span_id)
12243
12278
  )
12244
12279
  `);
@@ -12260,6 +12295,9 @@ var AdapterTelemetryPersistence = class {
12260
12295
  total_turns INTEGER NOT NULL DEFAULT 0,
12261
12296
  per_model_json TEXT NOT NULL DEFAULT '[]',
12262
12297
  per_source_json TEXT NOT NULL DEFAULT '[]',
12298
+ dispatch_id TEXT,
12299
+ task_type TEXT,
12300
+ phase TEXT,
12263
12301
  PRIMARY KEY (story_key, timestamp)
12264
12302
  )
12265
12303
  `);
@@ -12329,11 +12367,13 @@ var AdapterTelemetryPersistence = class {
12329
12367
  story_key, span_id, turn_number, name, timestamp, source, model,
12330
12368
  input_tokens, output_tokens, cache_read_tokens, fresh_tokens,
12331
12369
  cache_hit_rate, cost_usd, duration_ms, context_size, context_delta,
12332
- tool_name, is_context_spike, child_spans_json
12370
+ tool_name, is_context_spike, child_spans_json,
12371
+ task_type, phase, dispatch_id
12333
12372
  ) VALUES (
12334
12373
  ?, ?, ?, ?, ?, ?, ?,
12335
12374
  ?, ?, ?, ?,
12336
12375
  ?, ?, ?, ?, ?,
12376
+ ?, ?, ?,
12337
12377
  ?, ?, ?
12338
12378
  )`, [
12339
12379
  storyKey,
@@ -12354,11 +12394,14 @@ var AdapterTelemetryPersistence = class {
12354
12394
  turn.contextDelta,
12355
12395
  turn.toolName ?? null,
12356
12396
  turn.isContextSpike ? 1 : 0,
12357
- JSON.stringify(turn.childSpans)
12397
+ JSON.stringify(turn.childSpans),
12398
+ turn.taskType ?? null,
12399
+ turn.phase ?? null,
12400
+ turn.dispatchId ?? null
12358
12401
  ]);
12359
12402
  }
12360
12403
  });
12361
- logger$8.debug({
12404
+ logger$9.debug({
12362
12405
  storyKey,
12363
12406
  count: turns.length
12364
12407
  }, "Stored turn analysis");
@@ -12385,7 +12428,10 @@ var AdapterTelemetryPersistence = class {
12385
12428
  contextDelta: row.context_delta,
12386
12429
  toolName: row.tool_name ?? void 0,
12387
12430
  isContextSpike: row.is_context_spike === 1,
12388
- childSpans: JSON.parse(row.child_spans_json)
12431
+ childSpans: JSON.parse(row.child_spans_json),
12432
+ taskType: row.task_type ?? void 0,
12433
+ phase: row.phase ?? void 0,
12434
+ dispatchId: row.dispatch_id ?? void 0
12389
12435
  };
12390
12436
  return TurnAnalysisSchema.parse(raw);
12391
12437
  });
@@ -12396,12 +12442,14 @@ var AdapterTelemetryPersistence = class {
12396
12442
  story_key, timestamp, composite_score,
12397
12443
  cache_hit_sub_score, io_ratio_sub_score, context_management_sub_score,
12398
12444
  avg_cache_hit_rate, avg_io_ratio, context_spike_count, total_turns,
12399
- per_model_json, per_source_json
12445
+ per_model_json, per_source_json,
12446
+ dispatch_id, task_type, phase
12400
12447
  ) VALUES (
12401
12448
  ?, ?, ?,
12402
12449
  ?, ?, ?,
12403
12450
  ?, ?, ?, ?,
12404
- ?, ?
12451
+ ?, ?,
12452
+ ?, ?, ?
12405
12453
  )`, [
12406
12454
  score.storyKey,
12407
12455
  score.timestamp,
@@ -12414,17 +12462,17 @@ var AdapterTelemetryPersistence = class {
12414
12462
  score.contextSpikeCount,
12415
12463
  score.totalTurns,
12416
12464
  JSON.stringify(score.perModelBreakdown),
12417
- JSON.stringify(score.perSourceBreakdown)
12465
+ JSON.stringify(score.perSourceBreakdown),
12466
+ score.dispatchId ?? null,
12467
+ score.taskType ?? null,
12468
+ score.phase ?? null
12418
12469
  ]);
12419
- logger$8.debug({
12470
+ logger$9.debug({
12420
12471
  storyKey: score.storyKey,
12421
12472
  compositeScore: score.compositeScore
12422
12473
  }, "Stored efficiency score");
12423
12474
  }
12424
- async getEfficiencyScore(storyKey) {
12425
- const rows = await this._adapter.query(`SELECT * FROM efficiency_scores WHERE story_key = ? ORDER BY timestamp DESC LIMIT 1`, [storyKey]);
12426
- if (rows.length === 0) return null;
12427
- const row = rows[0];
12475
+ _rowToEfficiencyScore(row) {
12428
12476
  const raw = {
12429
12477
  storyKey: row.story_key,
12430
12478
  timestamp: row.timestamp,
@@ -12437,30 +12485,27 @@ var AdapterTelemetryPersistence = class {
12437
12485
  contextSpikeCount: row.context_spike_count,
12438
12486
  totalTurns: row.total_turns,
12439
12487
  perModelBreakdown: JSON.parse(row.per_model_json),
12440
- perSourceBreakdown: JSON.parse(row.per_source_json)
12488
+ perSourceBreakdown: JSON.parse(row.per_source_json),
12489
+ ...row.dispatch_id != null && { dispatchId: row.dispatch_id },
12490
+ ...row.task_type != null && { taskType: row.task_type },
12491
+ ...row.phase != null && { phase: row.phase }
12441
12492
  };
12442
12493
  return EfficiencyScoreSchema.parse(raw);
12443
12494
  }
12495
+ async getEfficiencyScore(storyKey) {
12496
+ const rows = await this._adapter.query(`SELECT * FROM efficiency_scores WHERE story_key = ? AND dispatch_id IS NULL ORDER BY timestamp DESC LIMIT 1`, [storyKey]);
12497
+ if (rows.length === 0) return null;
12498
+ return this._rowToEfficiencyScore(rows[0]);
12499
+ }
12444
12500
  async getEfficiencyScores(limit = 20) {
12445
- const rows = await this._adapter.query(`SELECT * FROM efficiency_scores ORDER BY timestamp DESC LIMIT ?`, [limit]);
12501
+ const rows = await this._adapter.query(`SELECT * FROM efficiency_scores WHERE dispatch_id IS NULL ORDER BY timestamp DESC LIMIT ?`, [limit]);
12446
12502
  if (rows.length === 0) return [];
12447
- return rows.map((row) => {
12448
- const raw = {
12449
- storyKey: row.story_key,
12450
- timestamp: row.timestamp,
12451
- compositeScore: row.composite_score,
12452
- cacheHitSubScore: row.cache_hit_sub_score,
12453
- ioRatioSubScore: row.io_ratio_sub_score,
12454
- contextManagementSubScore: row.context_management_sub_score,
12455
- avgCacheHitRate: row.avg_cache_hit_rate,
12456
- avgIoRatio: row.avg_io_ratio,
12457
- contextSpikeCount: row.context_spike_count,
12458
- totalTurns: row.total_turns,
12459
- perModelBreakdown: JSON.parse(row.per_model_json),
12460
- perSourceBreakdown: JSON.parse(row.per_source_json)
12461
- };
12462
- return EfficiencyScoreSchema.parse(raw);
12463
- });
12503
+ return rows.map((row) => this._rowToEfficiencyScore(row));
12504
+ }
12505
+ async getDispatchEfficiencyScores(storyKey) {
12506
+ const rows = await this._adapter.query(`SELECT * FROM efficiency_scores WHERE story_key = ? AND dispatch_id IS NOT NULL ORDER BY timestamp ASC`, [storyKey]);
12507
+ if (rows.length === 0) return [];
12508
+ return rows.map((row) => this._rowToEfficiencyScore(row));
12464
12509
  }
12465
12510
  async saveRecommendations(storyKey, recs) {
12466
12511
  if (recs.length === 0) return;
@@ -12488,7 +12533,7 @@ var AdapterTelemetryPersistence = class {
12488
12533
  ]);
12489
12534
  }
12490
12535
  });
12491
- logger$8.debug({
12536
+ logger$9.debug({
12492
12537
  storyKey,
12493
12538
  count: recs.length
12494
12539
  }, "Saved recommendations");
@@ -12567,7 +12612,7 @@ var AdapterTelemetryPersistence = class {
12567
12612
  ]);
12568
12613
  } catch {}
12569
12614
  });
12570
- logger$8.debug({
12615
+ logger$9.debug({
12571
12616
  storyKey,
12572
12617
  count: stats.length
12573
12618
  }, "Stored category stats");
@@ -12612,7 +12657,7 @@ var AdapterTelemetryPersistence = class {
12612
12657
  ]);
12613
12658
  } catch {}
12614
12659
  });
12615
- logger$8.debug({
12660
+ logger$9.debug({
12616
12661
  storyKey,
12617
12662
  count: consumers.length
12618
12663
  }, "Stored consumer stats");
@@ -12637,7 +12682,77 @@ var AdapterTelemetryPersistence = class {
12637
12682
  * Currently logs the span at debug level; no DB persistence.
12638
12683
  */
12639
12684
  recordSpan(span) {
12640
- logger$8.debug({ span }, "recordSpan");
12685
+ logger$9.debug({ span }, "recordSpan");
12686
+ }
12687
+ };
12688
+
12689
+ //#endregion
12690
+ //#region src/modules/telemetry/persistence.ts
12691
+ /**
12692
+ * Concrete DatabaseAdapter-backed telemetry persistence.
12693
+ *
12694
+ * Accepts a DatabaseAdapter and delegates all operations to
12695
+ * AdapterTelemetryPersistence. Provides schema initialization via initSchema().
12696
+ *
12697
+ * Accepts a DatabaseAdapter and uses it for all persistence operations.
12698
+ */
12699
+ var TelemetryPersistence = class {
12700
+ _impl;
12701
+ constructor(adapter) {
12702
+ this._impl = new AdapterTelemetryPersistence(adapter);
12703
+ }
12704
+ /**
12705
+ * Apply the telemetry schema DDL to the database.
12706
+ * Idempotent — uses CREATE TABLE IF NOT EXISTS.
12707
+ */
12708
+ async initSchema() {
12709
+ await this._impl.initSchema();
12710
+ }
12711
+ async storeTurnAnalysis(storyKey, turns) {
12712
+ return this._impl.storeTurnAnalysis(storyKey, turns);
12713
+ }
12714
+ async getTurnAnalysis(storyKey) {
12715
+ return this._impl.getTurnAnalysis(storyKey);
12716
+ }
12717
+ async storeEfficiencyScore(score) {
12718
+ return this._impl.storeEfficiencyScore(score);
12719
+ }
12720
+ async getEfficiencyScore(storyKey) {
12721
+ return this._impl.getEfficiencyScore(storyKey);
12722
+ }
12723
+ async getEfficiencyScores(limit = 20) {
12724
+ return this._impl.getEfficiencyScores(limit);
12725
+ }
12726
+ async getDispatchEfficiencyScores(storyKey) {
12727
+ return this._impl.getDispatchEfficiencyScores(storyKey);
12728
+ }
12729
+ async saveRecommendations(storyKey, recs) {
12730
+ return this._impl.saveRecommendations(storyKey, recs);
12731
+ }
12732
+ async getRecommendations(storyKey) {
12733
+ return this._impl.getRecommendations(storyKey);
12734
+ }
12735
+ async getAllRecommendations(limit = 20) {
12736
+ return this._impl.getAllRecommendations(limit);
12737
+ }
12738
+ async storeCategoryStats(storyKey, stats) {
12739
+ return this._impl.storeCategoryStats(storyKey, stats);
12740
+ }
12741
+ async getCategoryStats(storyKey) {
12742
+ return this._impl.getCategoryStats(storyKey);
12743
+ }
12744
+ async storeConsumerStats(storyKey, consumers) {
12745
+ return this._impl.storeConsumerStats(storyKey, consumers);
12746
+ }
12747
+ async getConsumerStats(storyKey) {
12748
+ return this._impl.getConsumerStats(storyKey);
12749
+ }
12750
+ /**
12751
+ * Record a named span with arbitrary attributes.
12752
+ * Currently logs the span at debug level; no DB persistence.
12753
+ */
12754
+ recordSpan(span) {
12755
+ this._impl.recordSpan(span);
12641
12756
  }
12642
12757
  };
12643
12758
 
@@ -12671,6 +12786,14 @@ var BatchBuffer = class extends EventEmitter {
12671
12786
  if (typeof this._timer.unref === "function") this._timer.unref();
12672
12787
  }
12673
12788
  /**
12789
+ * Trigger an immediate flush of buffered items without stopping the interval timer.
12790
+ * Use this to force-flush between pipeline phases while keeping the timer active.
12791
+ * No-op when the buffer is empty.
12792
+ */
12793
+ flush() {
12794
+ this._flush();
12795
+ }
12796
+ /**
12674
12797
  * Stop the interval timer and flush any remaining items.
12675
12798
  * Safe to call multiple times — subsequent calls are ignored.
12676
12799
  */
@@ -12754,7 +12877,7 @@ function detectSource(body) {
12754
12877
 
12755
12878
  //#endregion
12756
12879
  //#region src/modules/telemetry/ingestion-server.ts
12757
- const logger$7 = createLogger("telemetry:ingestion-server");
12880
+ const logger$8 = createLogger("telemetry:ingestion-server");
12758
12881
  /**
12759
12882
  * Error thrown by IngestionServer for server lifecycle violations.
12760
12883
  * Extends AppError to align with the project-standard error-handling pattern
@@ -12778,6 +12901,8 @@ var IngestionServer = class {
12778
12901
  _flushIntervalMs;
12779
12902
  _buffer;
12780
12903
  _pendingBatches = new Set();
12904
+ /** Map from storyKey → DispatchContext, tracking active dispatches. */
12905
+ _activeDispatches = new Map();
12781
12906
  constructor(options = {}) {
12782
12907
  this._port = options.port ?? 4318;
12783
12908
  this._batchSize = options.batchSize ?? 100;
@@ -12790,11 +12915,37 @@ var IngestionServer = class {
12790
12915
  */
12791
12916
  setPipeline(pipeline) {
12792
12917
  if (this._server !== null) {
12793
- logger$7.warn("IngestionServer.setPipeline() called after start() — ignoring");
12918
+ logger$8.warn("IngestionServer.setPipeline() called after start() — ignoring");
12794
12919
  return;
12795
12920
  }
12796
12921
  this._initPipeline(pipeline);
12797
12922
  }
12923
+ /**
12924
+ * Register an active dispatch context for a story.
12925
+ * All OTLP payloads received while this context is active will be stamped
12926
+ * with the dispatch context so per-phase analysis is possible.
12927
+ *
12928
+ * @param storyKey - The story key being dispatched
12929
+ * @param context - The dispatch context to associate with this story
12930
+ */
12931
+ setActiveDispatch(storyKey, context) {
12932
+ this._activeDispatches.set(storyKey, context);
12933
+ logger$8.debug({
12934
+ storyKey,
12935
+ taskType: context.taskType,
12936
+ phase: context.phase
12937
+ }, "IngestionServer: active dispatch registered");
12938
+ }
12939
+ /**
12940
+ * Clear the active dispatch context for a story.
12941
+ * Should be called after the dispatch completes (success or failure).
12942
+ *
12943
+ * @param storyKey - The story key whose dispatch context should be cleared
12944
+ */
12945
+ clearActiveDispatch(storyKey) {
12946
+ this._activeDispatches.delete(storyKey);
12947
+ logger$8.debug({ storyKey }, "IngestionServer: active dispatch cleared");
12948
+ }
12798
12949
  _initPipeline(pipeline) {
12799
12950
  this._buffer = new BatchBuffer({
12800
12951
  batchSize: this._batchSize,
@@ -12802,7 +12953,7 @@ var IngestionServer = class {
12802
12953
  });
12803
12954
  this._buffer.on("flush", (items) => {
12804
12955
  const pending = pipeline.processBatch(items).catch((err) => {
12805
- logger$7.warn({ err }, "TelemetryPipeline.processBatch failed (batch flush)");
12956
+ logger$8.warn({ err }, "TelemetryPipeline.processBatch failed (batch flush)");
12806
12957
  });
12807
12958
  this._pendingBatches.add(pending);
12808
12959
  pending.then(() => {
@@ -12811,24 +12962,36 @@ var IngestionServer = class {
12811
12962
  });
12812
12963
  }
12813
12964
  /**
12965
+ * Force-flush buffered OTLP payloads and await all in-flight processBatch() calls.
12966
+ * Call this between story dispatches to ensure story N's telemetry (including
12967
+ * recommendations) is fully persisted before story N+1 begins.
12968
+ *
12969
+ * No-op when no TelemetryPipeline is wired.
12970
+ */
12971
+ async flushAndAwait() {
12972
+ if (this._buffer === void 0) return;
12973
+ this._buffer.flush();
12974
+ if (this._pendingBatches.size > 0) await Promise.all([...this._pendingBatches]);
12975
+ }
12976
+ /**
12814
12977
  * Start the HTTP ingestion server.
12815
12978
  * Resolves when the server is listening and ready to accept connections.
12816
12979
  */
12817
12980
  async start() {
12818
12981
  if (this._server !== null) {
12819
- logger$7.warn("IngestionServer.start() called while already started — ignoring");
12982
+ logger$8.warn("IngestionServer.start() called while already started — ignoring");
12820
12983
  return;
12821
12984
  }
12822
12985
  return new Promise((resolve$2, reject) => {
12823
12986
  const server = createServer(this._handleRequest.bind(this));
12824
12987
  server.on("error", (err) => {
12825
- logger$7.error({ err }, "IngestionServer failed to start");
12988
+ logger$8.error({ err }, "IngestionServer failed to start");
12826
12989
  reject(err);
12827
12990
  });
12828
12991
  server.listen(this._port, "127.0.0.1", () => {
12829
12992
  this._server = server;
12830
12993
  const addr = server.address();
12831
- logger$7.info({ port: addr.port }, "IngestionServer listening");
12994
+ logger$8.info({ port: addr.port }, "IngestionServer listening");
12832
12995
  this._buffer?.start();
12833
12996
  resolve$2();
12834
12997
  });
@@ -12849,7 +13012,7 @@ var IngestionServer = class {
12849
13012
  server.close((err) => {
12850
13013
  if (err !== void 0 && err !== null) reject(err);
12851
13014
  else {
12852
- logger$7.info("IngestionServer stopped");
13015
+ logger$8.info("IngestionServer stopped");
12853
13016
  resolve$2();
12854
13017
  }
12855
13018
  });
@@ -12872,6 +13035,34 @@ var IngestionServer = class {
12872
13035
  OTEL_EXPORTER_OTLP_ENDPOINT: endpoint
12873
13036
  };
12874
13037
  }
13038
+ /**
13039
+ * Extract the substrate.story_key attribute from a raw OTLP payload body.
13040
+ * Looks in resourceSpans[].resource.attributes and resourceLogs[].resource.attributes.
13041
+ */
13042
+ _extractStoryKeyFromPayload(body) {
13043
+ if (!body || typeof body !== "object") return void 0;
13044
+ const payload = body;
13045
+ const extractFromResources = (resources) => {
13046
+ if (!Array.isArray(resources)) return void 0;
13047
+ for (const entry of resources) {
13048
+ if (!entry || typeof entry !== "object") continue;
13049
+ const resource = entry.resource;
13050
+ if (!resource || typeof resource !== "object") continue;
13051
+ const attrs = resource.attributes;
13052
+ if (!Array.isArray(attrs)) continue;
13053
+ for (const attr of attrs) {
13054
+ if (!attr || typeof attr !== "object") continue;
13055
+ const a = attr;
13056
+ if (a.key === "substrate.story_key") {
13057
+ const val = a.value;
13058
+ if (val && typeof val.stringValue === "string") return val.stringValue;
13059
+ }
13060
+ }
13061
+ }
13062
+ return void 0;
13063
+ };
13064
+ return extractFromResources(payload.resourceSpans) ?? extractFromResources(payload.resourceLogs);
13065
+ }
12875
13066
  _handleRequest(req, res) {
12876
13067
  if (req.url === "/health" && req.method === "GET") {
12877
13068
  res.writeHead(200, { "Content-Type": "application/json" });
@@ -12887,21 +13078,24 @@ var IngestionServer = class {
12887
13078
  });
12888
13079
  stream.on("end", () => {
12889
13080
  const bodyStr = Buffer.concat(chunks).toString("utf-8");
12890
- logger$7.trace({
13081
+ logger$8.trace({
12891
13082
  url: req.url,
12892
13083
  bodyLength: bodyStr.length
12893
13084
  }, "OTLP payload received");
12894
13085
  if (this._buffer !== void 0) try {
12895
13086
  const body = JSON.parse(bodyStr);
12896
13087
  const source = detectSource(body);
13088
+ const storyKey = this._extractStoryKeyFromPayload(body);
13089
+ const dispatchContext = storyKey !== void 0 ? this._activeDispatches.get(storyKey) : void 0;
12897
13090
  const payload = {
12898
13091
  body,
12899
13092
  source,
12900
- receivedAt: Date.now()
13093
+ receivedAt: Date.now(),
13094
+ ...dispatchContext !== void 0 && { dispatchContext }
12901
13095
  };
12902
13096
  this._buffer.push(payload);
12903
13097
  } catch (err) {
12904
- logger$7.warn({
13098
+ logger$8.warn({
12905
13099
  err,
12906
13100
  url: req.url
12907
13101
  }, "Failed to parse OTLP payload JSON — discarding");
@@ -12910,7 +13104,7 @@ var IngestionServer = class {
12910
13104
  res.end("{}");
12911
13105
  });
12912
13106
  stream.on("error", (err) => {
12913
- logger$7.warn({ err }, "Error reading OTLP request body");
13107
+ logger$8.warn({ err }, "Error reading OTLP request body");
12914
13108
  if (!res.headersSent) {
12915
13109
  res.writeHead(400);
12916
13110
  res.end("Bad Request");
@@ -12923,8 +13117,8 @@ var IngestionServer = class {
12923
13117
  //#region src/modules/telemetry/efficiency-scorer.ts
12924
13118
  var EfficiencyScorer = class {
12925
13119
  _logger;
12926
- constructor(logger$26) {
12927
- this._logger = logger$26;
13120
+ constructor(logger$27) {
13121
+ this._logger = logger$27;
12928
13122
  }
12929
13123
  /**
12930
13124
  * Compute an efficiency score for a story given its turn analyses.
@@ -13111,6 +13305,19 @@ var EfficiencyScorer = class {
13111
13305
 
13112
13306
  //#endregion
13113
13307
  //#region src/modules/telemetry/categorizer.ts
13308
+ /**
13309
+ * Tier 0: task-type to semantic category mapping.
13310
+ *
13311
+ * When a TurnAnalysis has a known taskType, the category is determined
13312
+ * directly from this map without consulting lower tiers.
13313
+ */
13314
+ const TASK_TYPE_CATEGORY_MAP = new Map([
13315
+ ["create-story", "system_prompts"],
13316
+ ["dev-story", "tool_outputs"],
13317
+ ["code-review", "conversation_history"],
13318
+ ["test-plan", "system_prompts"],
13319
+ ["minor-fixes", "tool_outputs"]
13320
+ ]);
13114
13321
  const EXACT_CATEGORY_MAP = new Map([
13115
13322
  ["read_file", "file_reads"],
13116
13323
  ["write_file", "tool_outputs"],
@@ -13171,16 +13378,21 @@ const ALL_CATEGORIES = [
13171
13378
  ];
13172
13379
  var Categorizer = class {
13173
13380
  _logger;
13174
- constructor(logger$26) {
13175
- this._logger = logger$26;
13381
+ constructor(logger$27) {
13382
+ this._logger = logger$27;
13176
13383
  }
13177
13384
  /**
13178
- * Classify an operation into a SemanticCategory using three-tier logic.
13385
+ * Classify an operation into a SemanticCategory using tiered logic.
13179
13386
  *
13180
13387
  * @param operationName - Span operation name (e.g. 'read_file', 'bash')
13181
13388
  * @param toolName - Optional tool name; non-empty value overrides fallback to tool_outputs
13389
+ * @param taskType - Optional task type (e.g. 'dev-story', 'code-review'); acts as Tier 0 (highest priority)
13182
13390
  */
13183
- classify(operationName, toolName) {
13391
+ classify(operationName, toolName, taskType) {
13392
+ if (taskType !== void 0 && taskType.length > 0) {
13393
+ const taskCategory = TASK_TYPE_CATEGORY_MAP.get(taskType);
13394
+ if (taskCategory !== void 0) return taskCategory;
13395
+ }
13184
13396
  const exact = EXACT_CATEGORY_MAP.get(operationName);
13185
13397
  if (exact !== void 0) return exact;
13186
13398
  for (const { pattern, category } of PREFIX_PATTERNS) if (pattern.test(operationName)) return category;
@@ -13256,7 +13468,7 @@ var Categorizer = class {
13256
13468
  const half = Math.floor(turns.length / 2);
13257
13469
  for (let i = 0; i < turns.length; i++) {
13258
13470
  const turn = turns[i];
13259
- const cat = this.classify(turn.name, turn.toolName);
13471
+ const cat = this.classify(turn.name, turn.toolName, turn.taskType);
13260
13472
  const bucket = buckets.get(cat);
13261
13473
  const tokens = turn.inputTokens + turn.outputTokens;
13262
13474
  bucket.total += tokens;
@@ -13368,9 +13580,9 @@ function extractToolNameFromSpan(span) {
13368
13580
  var ConsumerAnalyzer = class {
13369
13581
  _categorizer;
13370
13582
  _logger;
13371
- constructor(categorizer, logger$26) {
13583
+ constructor(categorizer, logger$27) {
13372
13584
  this._categorizer = categorizer;
13373
- this._logger = logger$26;
13585
+ this._logger = logger$27;
13374
13586
  }
13375
13587
  /**
13376
13588
  * Group spans by consumer key, rank by totalTokens descending, and return
@@ -13504,8 +13716,8 @@ var ConsumerAnalyzer = class {
13504
13716
  //#region src/modules/telemetry/recommender.ts
13505
13717
  var Recommender = class {
13506
13718
  _logger;
13507
- constructor(logger$26) {
13508
- this._logger = logger$26;
13719
+ constructor(logger$27) {
13720
+ this._logger = logger$27;
13509
13721
  }
13510
13722
  /**
13511
13723
  * Run all 8 rules against the given context and return sorted recommendations.
@@ -13521,7 +13733,8 @@ var Recommender = class {
13521
13733
  ...this._runContextGrowthSpikes(context),
13522
13734
  ...this._runGrowingCategories(context),
13523
13735
  ...this._runCacheEfficiency(context),
13524
- ...this._runModelComparison(context)
13736
+ ...this._runModelComparison(context),
13737
+ ...this._runCacheDeltaRegression(context)
13525
13738
  ];
13526
13739
  const severityOrder = {
13527
13740
  critical: 0,
@@ -13810,6 +14023,40 @@ var Recommender = class {
13810
14023
  generatedAt
13811
14024
  }];
13812
14025
  }
14026
+ /**
14027
+ * Detect significant cache hit rate drops between consecutive dispatches.
14028
+ * >30pp drop → warning; >50pp drop → critical.
14029
+ * Requires dispatchScores with at least 2 entries; otherwise returns [].
14030
+ */
14031
+ _runCacheDeltaRegression(ctx) {
14032
+ const { dispatchScores, storyKey, sprintId, generatedAt } = ctx;
14033
+ if (dispatchScores === void 0 || dispatchScores.length < 2) return [];
14034
+ const sorted = [...dispatchScores].sort((a, b) => a.timestamp - b.timestamp);
14035
+ const recs = [];
14036
+ for (let i = 0; i < sorted.length - 1; i++) {
14037
+ const prev = sorted[i];
14038
+ const curr = sorted[i + 1];
14039
+ const deltaPP = (prev.avgCacheHitRate - curr.avgCacheHitRate) * 100;
14040
+ if (deltaPP <= 30) continue;
14041
+ const severity = deltaPP > 50 ? "critical" : "warning";
14042
+ const prevId = prev.dispatchId ?? `dispatch-${i}`;
14043
+ const currId = curr.dispatchId ?? `dispatch-${i + 1}`;
14044
+ const pairKey = `${prevId}→${currId}`;
14045
+ const id = this._makeId("cache_delta_regression", storyKey, pairKey, i);
14046
+ recs.push({
14047
+ id,
14048
+ storyKey,
14049
+ sprintId,
14050
+ ruleId: "cache_delta_regression",
14051
+ severity,
14052
+ title: `Cache regression between dispatches: ${pairKey}`,
14053
+ description: `Cache hit rate dropped ${deltaPP.toFixed(1)} percentage points between dispatch "${prevId}" (${(prev.avgCacheHitRate * 100).toFixed(1)}%) and "${currId}" (${(curr.avgCacheHitRate * 100).toFixed(1)}%). This likely indicates a prompt prefix change broke cache alignment. Investigate whether the system prompt or context prefix was restructured between these dispatches.`,
14054
+ actionTarget: pairKey,
14055
+ generatedAt
14056
+ });
14057
+ }
14058
+ return recs;
14059
+ }
13813
14060
  _isToolNameMatch(span) {
13814
14061
  const toolName = span.attributes?.["tool.name"];
13815
14062
  return toolName === "bash" || toolName === "execute_command" || span.name === "bash" || span.name === "execute_command" || span.operationName === "bash" || span.operationName === "execute_command";
@@ -13820,8 +14067,8 @@ var Recommender = class {
13820
14067
  //#region src/modules/telemetry/turn-analyzer.ts
13821
14068
  var TurnAnalyzer = class {
13822
14069
  _logger;
13823
- constructor(logger$26) {
13824
- this._logger = logger$26;
14070
+ constructor(logger$27) {
14071
+ this._logger = logger$27;
13825
14072
  }
13826
14073
  /**
13827
14074
  * Analyze a list of NormalizedSpan records and produce TurnAnalysis[].
@@ -13890,8 +14137,8 @@ var TurnAnalyzer = class {
13890
14137
  //#region src/modules/telemetry/log-turn-analyzer.ts
13891
14138
  var LogTurnAnalyzer = class {
13892
14139
  _logger;
13893
- constructor(logger$26) {
13894
- this._logger = logger$26;
14140
+ constructor(logger$27) {
14141
+ this._logger = logger$27;
13895
14142
  }
13896
14143
  /**
13897
14144
  * Analyze a list of NormalizedLog records and produce TurnAnalysis[].
@@ -13963,7 +14210,10 @@ var LogTurnAnalyzer = class {
13963
14210
  contextDelta: runningContext - prevContext,
13964
14211
  toolName: log$2.toolName,
13965
14212
  isContextSpike: false,
13966
- childSpans: []
14213
+ childSpans: [],
14214
+ ...log$2.taskType !== void 0 && { taskType: log$2.taskType },
14215
+ ...log$2.phase !== void 0 && { phase: log$2.phase },
14216
+ ...log$2.dispatchId !== void 0 && { dispatchId: log$2.dispatchId }
13967
14217
  };
13968
14218
  });
13969
14219
  const avg = turns.reduce((sum, t) => sum + t.inputTokens, 0) / turns.length;
@@ -14373,8 +14623,8 @@ function generateLogId() {
14373
14623
  */
14374
14624
  var TelemetryNormalizer = class {
14375
14625
  _logger;
14376
- constructor(logger$26) {
14377
- this._logger = logger$26;
14626
+ constructor(logger$27) {
14627
+ this._logger = logger$27;
14378
14628
  }
14379
14629
  /**
14380
14630
  * Normalize a raw OTLP trace payload into an array of `NormalizedSpan`.
@@ -14464,17 +14714,18 @@ var TelemetryNormalizer = class {
14464
14714
  * Normalize a raw OTLP log payload into an array of `NormalizedLog`.
14465
14715
  *
14466
14716
  * @param raw - Raw OTLP log payload (resourceLogs structure)
14717
+ * @param dispatchContext - Optional dispatch context to stamp on each log (Story 30-1)
14467
14718
  * @returns Array of normalized logs; empty on error or empty input
14468
14719
  */
14469
- normalizeLog(raw) {
14720
+ normalizeLog(raw, dispatchContext) {
14470
14721
  try {
14471
- return this._normalizeLogInternal(raw);
14722
+ return this._normalizeLogInternal(raw, dispatchContext);
14472
14723
  } catch (err) {
14473
14724
  this._logger.warn({ err }, "TelemetryNormalizer.normalizeLog: unexpected error");
14474
14725
  return [];
14475
14726
  }
14476
14727
  }
14477
- _normalizeLogInternal(raw) {
14728
+ _normalizeLogInternal(raw, dispatchContext) {
14478
14729
  if (!raw || typeof raw !== "object") return [];
14479
14730
  const payload = raw;
14480
14731
  if (!Array.isArray(payload.resourceLogs)) return [];
@@ -14488,7 +14739,7 @@ var TelemetryNormalizer = class {
14488
14739
  for (const record of scopeLog.logRecords) {
14489
14740
  if (!record) continue;
14490
14741
  try {
14491
- const normalized = this._normalizeOneLog(record, resourceAttrs);
14742
+ const normalized = this._normalizeOneLog(record, resourceAttrs, dispatchContext);
14492
14743
  results.push(normalized);
14493
14744
  } catch (err) {
14494
14745
  this._logger.warn({ err }, "Failed to normalize log record — skipping");
@@ -14498,7 +14749,7 @@ var TelemetryNormalizer = class {
14498
14749
  }
14499
14750
  return results;
14500
14751
  }
14501
- _normalizeOneLog(record, resourceAttrs) {
14752
+ _normalizeOneLog(record, resourceAttrs, dispatchContext) {
14502
14753
  const logId = record.logRecordId ?? generateLogId();
14503
14754
  const timestamp = normalizeTimestamp(record.timeUnixNano);
14504
14755
  const bodyStr = extractBodyString(record.body);
@@ -14526,14 +14777,19 @@ var TelemetryNormalizer = class {
14526
14777
  cacheReadTokens: tokens.cacheRead,
14527
14778
  costUsd,
14528
14779
  model,
14529
- storyKey
14780
+ storyKey,
14781
+ ...dispatchContext !== void 0 && {
14782
+ taskType: dispatchContext.taskType,
14783
+ phase: dispatchContext.phase,
14784
+ dispatchId: dispatchContext.dispatchId
14785
+ }
14530
14786
  };
14531
14787
  }
14532
14788
  };
14533
14789
 
14534
14790
  //#endregion
14535
14791
  //#region src/modules/telemetry/telemetry-pipeline.ts
14536
- const logger$6 = createLogger("telemetry:pipeline");
14792
+ const logger$7 = createLogger("telemetry:pipeline");
14537
14793
  /**
14538
14794
  * Wires together the full OTLP analysis and persistence pipeline.
14539
14795
  *
@@ -14574,7 +14830,7 @@ var TelemetryPipeline = class {
14574
14830
  */
14575
14831
  async processBatch(items) {
14576
14832
  if (items.length === 0) return;
14577
- logger$6.debug({ count: items.length }, "TelemetryPipeline.processBatch start");
14833
+ logger$7.debug({ count: items.length }, "TelemetryPipeline.processBatch start");
14578
14834
  const allSpans = [];
14579
14835
  const allLogs = [];
14580
14836
  for (const item of items) {
@@ -14582,21 +14838,21 @@ var TelemetryPipeline = class {
14582
14838
  const spans = this._normalizer.normalizeSpan(item.body);
14583
14839
  allSpans.push(...spans);
14584
14840
  } catch (err) {
14585
- logger$6.warn({ err }, "TelemetryPipeline: normalizeSpan failed — skipping payload");
14841
+ logger$7.warn({ err }, "TelemetryPipeline: normalizeSpan failed — skipping payload");
14586
14842
  }
14587
14843
  try {
14588
- const logs = this._normalizer.normalizeLog(item.body);
14844
+ const logs = this._normalizer.normalizeLog(item.body, item.dispatchContext);
14589
14845
  allLogs.push(...logs);
14590
14846
  } catch (err) {
14591
- logger$6.warn({ err }, "TelemetryPipeline: normalizeLog failed — skipping payload");
14847
+ logger$7.warn({ err }, "TelemetryPipeline: normalizeLog failed — skipping payload");
14592
14848
  }
14593
14849
  }
14594
- logger$6.debug({
14850
+ logger$7.debug({
14595
14851
  spans: allSpans.length,
14596
14852
  logs: allLogs.length
14597
14853
  }, "TelemetryPipeline: normalized batch");
14598
14854
  if (allSpans.length === 0 && allLogs.length === 0) {
14599
- logger$6.debug("TelemetryPipeline: no spans or logs normalized from batch");
14855
+ logger$7.debug("TelemetryPipeline: no spans or logs normalized from batch");
14600
14856
  return;
14601
14857
  }
14602
14858
  const unknownStoryKey = "__unknown__";
@@ -14621,7 +14877,7 @@ var TelemetryPipeline = class {
14621
14877
  if (storyKey === unknownStoryKey) {
14622
14878
  const spanCount = spansByStory.get(unknownStoryKey)?.length ?? 0;
14623
14879
  const logCount = logsByStory.get(unknownStoryKey)?.length ?? 0;
14624
- logger$6.debug({
14880
+ logger$7.debug({
14625
14881
  spanCount,
14626
14882
  logCount
14627
14883
  }, "TelemetryPipeline: data without storyKey — skipping analysis");
@@ -14636,13 +14892,27 @@ var TelemetryPipeline = class {
14636
14892
  if (spans.length > 0) await this._processStory(storyKey, spans, mergedTurns);
14637
14893
  else await this._processStoryFromTurns(storyKey, mergedTurns);
14638
14894
  } catch (err) {
14639
- logger$6.warn({
14895
+ logger$7.warn({
14640
14896
  err,
14641
14897
  storyKey
14642
14898
  }, "TelemetryPipeline: story processing failed — skipping");
14643
14899
  }
14644
14900
  }
14645
- logger$6.debug({ storyCount: allStoryKeys.size }, "TelemetryPipeline.processBatch complete");
14901
+ logger$7.debug({ storyCount: allStoryKeys.size }, "TelemetryPipeline.processBatch complete");
14902
+ }
14903
+ /**
14904
+ * Group turns by dispatchId for per-dispatch scoring.
14905
+ * Only turns with a non-empty dispatchId are included.
14906
+ */
14907
+ _groupTurnsByDispatchId(turns) {
14908
+ const groups = new Map();
14909
+ for (const turn of turns) {
14910
+ if (turn.dispatchId === void 0 || turn.dispatchId === "") continue;
14911
+ const existing = groups.get(turn.dispatchId);
14912
+ if (existing !== void 0) existing.push(turn);
14913
+ else groups.set(turn.dispatchId, [turn]);
14914
+ }
14915
+ return groups;
14646
14916
  }
14647
14917
  /**
14648
14918
  * Merge span-derived and log-derived turns, deduplicating by spanId.
@@ -14667,7 +14937,24 @@ var TelemetryPipeline = class {
14667
14937
  const turns = mergedTurns;
14668
14938
  const categories = this._categorizer.computeCategoryStats(spans, turns);
14669
14939
  const consumers = this._consumerAnalyzer.analyze(spans);
14670
- const efficiencyScore = this._efficiencyScorer.score(storyKey, turns);
14940
+ const baseTimestamp = Date.now();
14941
+ const storyScore = this._efficiencyScorer.score(storyKey, turns);
14942
+ const efficiencyScore = {
14943
+ ...storyScore,
14944
+ timestamp: baseTimestamp
14945
+ };
14946
+ const dispatchGroups = this._groupTurnsByDispatchId(turns);
14947
+ const dispatchScores = Array.from(dispatchGroups.entries()).map(([dispatchId, dispatchTurns], idx) => {
14948
+ const firstTurn = dispatchTurns[0];
14949
+ const scored = this._efficiencyScorer.score(storyKey, dispatchTurns);
14950
+ return {
14951
+ ...scored,
14952
+ timestamp: baseTimestamp + 1 + idx,
14953
+ dispatchId,
14954
+ taskType: firstTurn?.taskType,
14955
+ phase: firstTurn?.phase
14956
+ };
14957
+ });
14671
14958
  const generatedAt = new Date().toISOString();
14672
14959
  const context = {
14673
14960
  storyKey,
@@ -14676,69 +14963,236 @@ var TelemetryPipeline = class {
14676
14963
  categories,
14677
14964
  consumers,
14678
14965
  efficiencyScore,
14679
- allSpans: spans
14966
+ allSpans: spans,
14967
+ dispatchScores
14680
14968
  };
14681
14969
  const recommendations = this._recommender.analyze(context);
14682
- await Promise.all([
14683
- turns.length > 0 ? this._persistence.storeTurnAnalysis(storyKey, turns).catch((err) => logger$6.warn({
14684
- err,
14685
- storyKey
14686
- }, "Failed to store turn analysis")) : Promise.resolve(),
14687
- categories.length > 0 ? this._persistence.storeCategoryStats(storyKey, categories).catch((err) => logger$6.warn({
14688
- err,
14689
- storyKey
14690
- }, "Failed to store category stats")) : Promise.resolve(),
14691
- consumers.length > 0 ? this._persistence.storeConsumerStats(storyKey, consumers).catch((err) => logger$6.warn({
14692
- err,
14693
- storyKey
14694
- }, "Failed to store consumer stats")) : Promise.resolve(),
14695
- this._persistence.storeEfficiencyScore(efficiencyScore).catch((err) => logger$6.warn({
14696
- err,
14697
- storyKey
14698
- }, "Failed to store efficiency score")),
14699
- recommendations.length > 0 ? this._persistence.saveRecommendations(storyKey, recommendations).catch((err) => logger$6.warn({
14700
- err,
14701
- storyKey
14702
- }, "Failed to save recommendations")) : Promise.resolve()
14703
- ]);
14704
- logger$6.info({
14970
+ await this._persistStoryData(storyKey, {
14971
+ turns,
14972
+ efficiencyScore,
14973
+ categoryStats: categories,
14974
+ consumerStats: consumers,
14975
+ recommendations,
14976
+ dispatchScores
14977
+ });
14978
+ logger$7.info({
14705
14979
  storyKey,
14706
14980
  turns: turns.length,
14707
14981
  compositeScore: efficiencyScore.compositeScore,
14708
- recommendations: recommendations.length
14982
+ recommendations: recommendations.length,
14983
+ dispatchScores: dispatchScores.length
14709
14984
  }, "TelemetryPipeline: story analysis complete");
14710
14985
  }
14711
14986
  /**
14712
- * Log-only analysis path (AC3, AC6): processes turns from LogTurnAnalyzer
14713
- * through efficiency scoring, category stats, and persistence.
14987
+ * Log-only analysis path: processes turns from LogTurnAnalyzer through full
14988
+ * analysis and persistence mirrors span path via _persistStoryData (Story 30-4).
14714
14989
  */
14715
14990
  async _processStoryFromTurns(storyKey, turns) {
14716
14991
  if (turns.length === 0) return;
14717
- const efficiencyScore = this._efficiencyScorer.score(storyKey, turns);
14992
+ const baseTimestamp = Date.now();
14993
+ const storyScore = this._efficiencyScorer.score(storyKey, turns);
14994
+ const efficiencyScore = {
14995
+ ...storyScore,
14996
+ timestamp: baseTimestamp
14997
+ };
14718
14998
  const categoryStats = this._categorizer.computeCategoryStatsFromTurns(turns);
14999
+ const consumerStats = this._consumerAnalyzer.analyzeFromTurns(turns);
15000
+ const dispatchGroups = this._groupTurnsByDispatchId(turns);
15001
+ const dispatchScores = Array.from(dispatchGroups.entries()).map(([dispatchId, dispatchTurns], idx) => {
15002
+ const firstTurn = dispatchTurns[0];
15003
+ const scored = this._efficiencyScorer.score(storyKey, dispatchTurns);
15004
+ return {
15005
+ ...scored,
15006
+ timestamp: baseTimestamp + 1 + idx,
15007
+ dispatchId,
15008
+ taskType: firstTurn?.taskType,
15009
+ phase: firstTurn?.phase
15010
+ };
15011
+ });
15012
+ const generatedAt = new Date().toISOString();
15013
+ const context = {
15014
+ storyKey,
15015
+ generatedAt,
15016
+ turns,
15017
+ categories: categoryStats,
15018
+ consumers: consumerStats,
15019
+ efficiencyScore,
15020
+ allSpans: [],
15021
+ dispatchScores
15022
+ };
15023
+ const recommendations = this._recommender.analyze(context);
15024
+ await this._persistStoryData(storyKey, {
15025
+ turns,
15026
+ efficiencyScore,
15027
+ categoryStats,
15028
+ consumerStats,
15029
+ recommendations,
15030
+ dispatchScores
15031
+ });
15032
+ logger$7.info({
15033
+ storyKey,
15034
+ turns: turns.length,
15035
+ compositeScore: efficiencyScore.compositeScore,
15036
+ categories: categoryStats.length,
15037
+ recommendations: recommendations.length,
15038
+ dispatchScores: dispatchScores.length
15039
+ }, "TelemetryPipeline: story analysis from turns complete");
15040
+ }
15041
+ /**
15042
+ * Shared persistence helper — called by both _processStory and _processStoryFromTurns.
15043
+ * All 5 persistence calls are made here with individual error guards so a single
15044
+ * failure does not abort the others (AC5).
15045
+ */
15046
+ async _persistStoryData(storyKey, data) {
15047
+ const { turns, efficiencyScore, categoryStats, consumerStats, recommendations, dispatchScores } = data;
14719
15048
  await Promise.all([
14720
- this._persistence.storeTurnAnalysis(storyKey, turns).catch((err) => logger$6.warn({
15049
+ turns.length > 0 ? this._persistence.storeTurnAnalysis(storyKey, turns).catch((err) => logger$7.warn({
14721
15050
  err,
14722
15051
  storyKey
14723
- }, "Failed to store turn analysis")),
14724
- this._persistence.storeEfficiencyScore(efficiencyScore).catch((err) => logger$6.warn({
15052
+ }, "Failed to store turn analysis")) : Promise.resolve(),
15053
+ this._persistence.storeEfficiencyScore(efficiencyScore).catch((err) => logger$7.warn({
14725
15054
  err,
14726
15055
  storyKey
14727
15056
  }, "Failed to store efficiency score")),
14728
- categoryStats.length > 0 ? this._persistence.storeCategoryStats(storyKey, categoryStats).catch((err) => logger$6.warn({
15057
+ categoryStats.length > 0 ? this._persistence.storeCategoryStats(storyKey, categoryStats).catch((err) => logger$7.warn({
15058
+ err,
15059
+ storyKey
15060
+ }, "Failed to store category stats")) : Promise.resolve(),
15061
+ consumerStats.length > 0 ? this._persistence.storeConsumerStats(storyKey, consumerStats).catch((err) => logger$7.warn({
15062
+ err,
15063
+ storyKey
15064
+ }, "Failed to store consumer stats")) : Promise.resolve(),
15065
+ recommendations.length > 0 ? this._persistence.saveRecommendations(storyKey, recommendations).catch((err) => logger$7.warn({
14729
15066
  err,
14730
15067
  storyKey
14731
- }, "Failed to store category stats")) : Promise.resolve()
15068
+ }, "Failed to save recommendations")) : Promise.resolve(),
15069
+ ...dispatchScores.map((ds) => this._persistence.storeEfficiencyScore(ds).catch((err) => logger$7.warn({
15070
+ err,
15071
+ storyKey,
15072
+ dispatchId: ds.dispatchId
15073
+ }, "Failed to store dispatch efficiency score")))
14732
15074
  ]);
14733
- logger$6.info({
14734
- storyKey,
14735
- turns: turns.length,
14736
- compositeScore: efficiencyScore.compositeScore,
14737
- categories: categoryStats.length
14738
- }, "TelemetryPipeline: story analysis from turns complete");
14739
15075
  }
14740
15076
  };
14741
15077
 
15078
+ //#endregion
15079
+ //#region src/modules/telemetry/telemetry-advisor.ts
15080
+ const logger$6 = createLogger("telemetry-advisor");
15081
+ /**
15082
+ * Reads telemetry efficiency data to support retry gate decisions.
15083
+ */
15084
+ var TelemetryAdvisor = class {
15085
+ _persistence;
15086
+ constructor(deps) {
15087
+ this._persistence = new TelemetryPersistence(deps.db);
15088
+ }
15089
+ /**
15090
+ * Retrieve the efficiency profile for a story.
15091
+ *
15092
+ * Returns null when no efficiency score has been persisted for the story
15093
+ * (e.g. first run, telemetry disabled, or no turns recorded).
15094
+ *
15095
+ * @param storyKey - The story identifier (e.g. "30-5")
15096
+ * @returns EfficiencyProfile or null
15097
+ */
15098
+ async getEfficiencyProfile(storyKey) {
15099
+ try {
15100
+ const score = await this._persistence.getEfficiencyScore(storyKey);
15101
+ if (score === null) {
15102
+ logger$6.debug({ storyKey }, "No efficiency score found for story");
15103
+ return null;
15104
+ }
15105
+ return {
15106
+ storyKey: score.storyKey,
15107
+ compositeScore: score.compositeScore,
15108
+ cacheHitSubScore: score.cacheHitSubScore,
15109
+ ioRatioSubScore: score.ioRatioSubScore,
15110
+ contextManagementSubScore: score.contextManagementSubScore,
15111
+ totalTurns: score.totalTurns,
15112
+ contextSpikeCount: score.contextSpikeCount
15113
+ };
15114
+ } catch (err) {
15115
+ logger$6.warn({
15116
+ err,
15117
+ storyKey
15118
+ }, "Failed to retrieve efficiency score");
15119
+ return null;
15120
+ }
15121
+ }
15122
+ /**
15123
+ * Aggregate recommendations across all completed stories in a run.
15124
+ *
15125
+ * Queries getRecommendations() for each storyKey in parallel, merges results,
15126
+ * deduplicates by recommendation id (first occurrence wins), and sorts by
15127
+ * severity: critical → warning → info.
15128
+ *
15129
+ * Returns an empty array when completedStoryKeys is empty or no recommendations exist.
15130
+ *
15131
+ * @param completedStoryKeys - Story keys that have already finished in this run
15132
+ * @returns Merged, deduplicated, sorted recommendations
15133
+ */
15134
+ async getRecommendationsForRun(completedStoryKeys) {
15135
+ if (completedStoryKeys.length === 0) return [];
15136
+ try {
15137
+ const results = await Promise.all(completedStoryKeys.map((key) => this._persistence.getRecommendations(key)));
15138
+ const seen = new Set();
15139
+ const merged = [];
15140
+ for (const recs of results) for (const rec of recs) if (!seen.has(rec.id)) {
15141
+ seen.add(rec.id);
15142
+ merged.push(rec);
15143
+ }
15144
+ const severityOrder = {
15145
+ critical: 0,
15146
+ warning: 1,
15147
+ info: 2
15148
+ };
15149
+ merged.sort((a, b) => (severityOrder[a.severity] ?? 3) - (severityOrder[b.severity] ?? 3));
15150
+ return merged;
15151
+ } catch (err) {
15152
+ logger$6.warn({ err }, "Failed to retrieve recommendations for run — returning empty");
15153
+ return [];
15154
+ }
15155
+ }
15156
+ /**
15157
+ * Format a list of recommendations as optimization directives for prompt injection.
15158
+ *
15159
+ * Filters to only critical and warning items, formats each as a natural-language
15160
+ * OPTIMIZATION line, and truncates to a maximum of 2000 characters at a word boundary.
15161
+ *
15162
+ * Returns an empty string when no critical or warning recommendations are present.
15163
+ *
15164
+ * @param recommendations - Recommendations to format (typically from getRecommendationsForRun)
15165
+ * @returns Formatted directives string, or "" if nothing actionable
15166
+ */
15167
+ formatOptimizationDirectives(recommendations) {
15168
+ const MAX_CHARS$1 = 2e3;
15169
+ const actionable = recommendations.filter((r) => r.severity === "critical" || r.severity === "warning");
15170
+ if (actionable.length === 0) return "";
15171
+ const lines = actionable.map((r) => `OPTIMIZATION (${r.severity}): ${r.title}. ${r.description}`);
15172
+ const full = lines.join("\n");
15173
+ if (full.length <= MAX_CHARS$1) {
15174
+ logger$6.debug({
15175
+ count: actionable.length,
15176
+ chars: full.length
15177
+ }, "Formatting optimization directives");
15178
+ return full;
15179
+ }
15180
+ const cutAt = full.lastIndexOf(" ", MAX_CHARS$1);
15181
+ const truncated = (cutAt > 0 ? full.slice(0, cutAt) : full.slice(0, MAX_CHARS$1)) + "…";
15182
+ logger$6.debug({
15183
+ count: actionable.length,
15184
+ chars: truncated.length
15185
+ }, "Optimization directives truncated to budget");
15186
+ return truncated;
15187
+ }
15188
+ };
15189
+ /**
15190
+ * Create a TelemetryAdvisor for the given database adapter.
15191
+ */
15192
+ function createTelemetryAdvisor(deps) {
15193
+ return new TelemetryAdvisor(deps);
15194
+ }
15195
+
14742
15196
  //#endregion
14743
15197
  //#region src/modules/implementation-orchestrator/orchestrator-impl.ts
14744
15198
  function createPauseGate() {
@@ -14797,7 +15251,8 @@ function wgStatusForPhase(phase) {
14797
15251
  */
14798
15252
  function createImplementationOrchestrator(deps) {
14799
15253
  const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot, tokenCeilings, stateStore, telemetryPersistence, ingestionServer, repoMapInjector, maxRepoMapTokens } = deps;
14800
- const logger$26 = createLogger("implementation-orchestrator");
15254
+ const logger$27 = createLogger("implementation-orchestrator");
15255
+ const telemetryAdvisor = db !== void 0 ? createTelemetryAdvisor({ db }) : void 0;
14801
15256
  const wgRepo = new WorkGraphRepository(db);
14802
15257
  const _wgInProgressWritten = new Set();
14803
15258
  let _state = "IDLE";
@@ -14845,7 +15300,7 @@ function createImplementationOrchestrator(deps) {
14845
15300
  const nowMs = Date.now();
14846
15301
  for (const [phase, startMs] of starts) {
14847
15302
  const endMs = ends?.get(phase);
14848
- if (endMs === void 0) logger$26.warn({
15303
+ if (endMs === void 0) logger$27.warn({
14849
15304
  storyKey,
14850
15305
  phase
14851
15306
  }, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
@@ -14892,7 +15347,7 @@ function createImplementationOrchestrator(deps) {
14892
15347
  recordedAt: completedAt,
14893
15348
  timestamp: completedAt
14894
15349
  }).catch((storeErr) => {
14895
- logger$26.warn({
15350
+ logger$27.warn({
14896
15351
  err: storeErr,
14897
15352
  storyKey
14898
15353
  }, "Failed to record metric to StateStore (best-effort)");
@@ -14914,7 +15369,7 @@ function createImplementationOrchestrator(deps) {
14914
15369
  rationale: `Story ${storyKey} completed with result=${result} in ${wallClockSeconds}s. Tokens: ${tokenAgg.input}+${tokenAgg.output}. Review cycles: ${reviewCycles}.`
14915
15370
  });
14916
15371
  } catch (decisionErr) {
14917
- logger$26.warn({
15372
+ logger$27.warn({
14918
15373
  err: decisionErr,
14919
15374
  storyKey
14920
15375
  }, "Failed to write story-metrics decision (best-effort)");
@@ -14942,13 +15397,13 @@ function createImplementationOrchestrator(deps) {
14942
15397
  dispatches: _storyDispatches.get(storyKey) ?? 0
14943
15398
  });
14944
15399
  } catch (emitErr) {
14945
- logger$26.warn({
15400
+ logger$27.warn({
14946
15401
  err: emitErr,
14947
15402
  storyKey
14948
15403
  }, "Failed to emit story:metrics event (best-effort)");
14949
15404
  }
14950
15405
  } catch (err) {
14951
- logger$26.warn({
15406
+ logger$27.warn({
14952
15407
  err,
14953
15408
  storyKey
14954
15409
  }, "Failed to write story metrics (best-effort)");
@@ -14977,7 +15432,7 @@ function createImplementationOrchestrator(deps) {
14977
15432
  rationale: `Story ${storyKey} ${outcome} after ${reviewCycles} review cycle(s).`
14978
15433
  });
14979
15434
  } catch (err) {
14980
- logger$26.warn({
15435
+ logger$27.warn({
14981
15436
  err,
14982
15437
  storyKey
14983
15438
  }, "Failed to write story-outcome decision (best-effort)");
@@ -15003,7 +15458,7 @@ function createImplementationOrchestrator(deps) {
15003
15458
  rationale: `Escalation diagnosis for ${payload.storyKey}: ${diagnosis.recommendedAction} — ${diagnosis.rationale}`
15004
15459
  });
15005
15460
  } catch (err) {
15006
- logger$26.warn({
15461
+ logger$27.warn({
15007
15462
  err,
15008
15463
  storyKey: payload.storyKey
15009
15464
  }, "Failed to persist escalation diagnosis (best-effort)");
@@ -15052,7 +15507,7 @@ function createImplementationOrchestrator(deps) {
15052
15507
  const existing = _stories.get(storyKey);
15053
15508
  if (existing !== void 0) {
15054
15509
  Object.assign(existing, updates);
15055
- persistStoryState(storyKey, existing).catch((err) => logger$26.warn({
15510
+ persistStoryState(storyKey, existing).catch((err) => logger$27.warn({
15056
15511
  err,
15057
15512
  storyKey
15058
15513
  }, "StateStore write failed after updateStory"));
@@ -15061,12 +15516,12 @@ function createImplementationOrchestrator(deps) {
15061
15516
  storyKey,
15062
15517
  conflict: err
15063
15518
  });
15064
- else logger$26.warn({
15519
+ else logger$27.warn({
15065
15520
  err,
15066
15521
  storyKey
15067
15522
  }, "mergeStory failed");
15068
15523
  });
15069
- else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$26.warn({
15524
+ else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$27.warn({
15070
15525
  err,
15071
15526
  storyKey
15072
15527
  }, "rollbackStory failed — branch may persist"));
@@ -15078,7 +15533,7 @@ function createImplementationOrchestrator(deps) {
15078
15533
  ...updates
15079
15534
  };
15080
15535
  const opts = targetStatus === "complete" || targetStatus === "escalated" ? { completedAt: fullUpdated.completedAt } : void 0;
15081
- wgRepo.updateStoryStatus(storyKey, targetStatus, opts).catch((err) => logger$26.warn({
15536
+ wgRepo.updateStoryStatus(storyKey, targetStatus, opts).catch((err) => logger$27.warn({
15082
15537
  err,
15083
15538
  storyKey
15084
15539
  }, "wg_stories status update failed (best-effort)"));
@@ -15108,7 +15563,7 @@ function createImplementationOrchestrator(deps) {
15108
15563
  };
15109
15564
  await stateStore.setStoryState(storyKey, record);
15110
15565
  } catch (err) {
15111
- logger$26.warn({
15566
+ logger$27.warn({
15112
15567
  err,
15113
15568
  storyKey
15114
15569
  }, "StateStore.setStoryState failed (best-effort)");
@@ -15124,7 +15579,7 @@ function createImplementationOrchestrator(deps) {
15124
15579
  token_usage_json: serialized
15125
15580
  });
15126
15581
  } catch (err) {
15127
- logger$26.warn({ err }, "Failed to persist orchestrator state");
15582
+ logger$27.warn({ err }, "Failed to persist orchestrator state");
15128
15583
  }
15129
15584
  }
15130
15585
  function recordProgress() {
@@ -15171,7 +15626,7 @@ function createImplementationOrchestrator(deps) {
15171
15626
  }
15172
15627
  if (childActive) {
15173
15628
  _lastProgressTs = Date.now();
15174
- logger$26.debug({
15629
+ logger$27.debug({
15175
15630
  storyKey: key,
15176
15631
  phase: s.phase,
15177
15632
  childPids
@@ -15180,7 +15635,7 @@ function createImplementationOrchestrator(deps) {
15180
15635
  }
15181
15636
  _stalledStories.add(key);
15182
15637
  _storiesWithStall.add(key);
15183
- logger$26.warn({
15638
+ logger$27.warn({
15184
15639
  storyKey: key,
15185
15640
  phase: s.phase,
15186
15641
  elapsedMs: elapsed,
@@ -15225,7 +15680,7 @@ function createImplementationOrchestrator(deps) {
15225
15680
  for (let attempt = 0; attempt < MEMORY_PRESSURE_BACKOFF_MS.length; attempt++) {
15226
15681
  const memState = dispatcher.getMemoryState();
15227
15682
  if (!memState.isPressured) return true;
15228
- logger$26.warn({
15683
+ logger$27.warn({
15229
15684
  storyKey,
15230
15685
  freeMB: memState.freeMB,
15231
15686
  thresholdMB: memState.thresholdMB,
@@ -15244,12 +15699,12 @@ function createImplementationOrchestrator(deps) {
15244
15699
  * to maxReviewCycles). On SHIP_IT the story is marked COMPLETE. On
15245
15700
  * exhausted retries the story is ESCALATED.
15246
15701
  */
15247
- async function processStory(storyKey) {
15248
- logger$26.info({ storyKey }, "Processing story");
15702
+ async function processStory(storyKey, storyOptions) {
15703
+ logger$27.info({ storyKey }, "Processing story");
15249
15704
  {
15250
15705
  const memoryOk = await checkMemoryPressure(storyKey);
15251
15706
  if (!memoryOk) {
15252
- logger$26.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
15707
+ logger$27.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
15253
15708
  const memPressureState = {
15254
15709
  phase: "ESCALATED",
15255
15710
  reviewCycles: 0,
@@ -15258,7 +15713,7 @@ function createImplementationOrchestrator(deps) {
15258
15713
  completedAt: new Date().toISOString()
15259
15714
  };
15260
15715
  _stories.set(storyKey, memPressureState);
15261
- persistStoryState(storyKey, memPressureState).catch((err) => logger$26.warn({
15716
+ persistStoryState(storyKey, memPressureState).catch((err) => logger$27.warn({
15262
15717
  err,
15263
15718
  storyKey
15264
15719
  }, "StateStore write failed after memory-pressure escalation"));
@@ -15275,7 +15730,7 @@ function createImplementationOrchestrator(deps) {
15275
15730
  }
15276
15731
  await waitIfPaused();
15277
15732
  if (_state !== "RUNNING") return;
15278
- stateStore?.branchForStory(storyKey).catch((err) => logger$26.warn({
15733
+ stateStore?.branchForStory(storyKey).catch((err) => logger$27.warn({
15279
15734
  err,
15280
15735
  storyKey
15281
15736
  }, "branchForStory failed — continuing without branch isolation"));
@@ -15292,14 +15747,14 @@ function createImplementationOrchestrator(deps) {
15292
15747
  if (match$1) {
15293
15748
  const candidatePath = join$1(artifactsDir, match$1);
15294
15749
  const validation = await isValidStoryFile(candidatePath);
15295
- if (!validation.valid) logger$26.warn({
15750
+ if (!validation.valid) logger$27.warn({
15296
15751
  storyKey,
15297
15752
  storyFilePath: candidatePath,
15298
15753
  reason: validation.reason
15299
15754
  }, `Existing story file for ${storyKey} is invalid (${validation.reason}) — re-creating`);
15300
15755
  else {
15301
15756
  storyFilePath = candidatePath;
15302
- logger$26.info({
15757
+ logger$27.info({
15303
15758
  storyKey,
15304
15759
  storyFilePath
15305
15760
  }, "Found existing story file — skipping create-story");
@@ -15348,7 +15803,7 @@ function createImplementationOrchestrator(deps) {
15348
15803
  metadata: JSON.stringify({ storyKey })
15349
15804
  });
15350
15805
  } catch (tokenErr) {
15351
- logger$26.warn({
15806
+ logger$27.warn({
15352
15807
  storyKey,
15353
15808
  err: tokenErr
15354
15809
  }, "Failed to record create-story token usage");
@@ -15432,14 +15887,14 @@ function createImplementationOrchestrator(deps) {
15432
15887
  ...contract.transport !== void 0 ? { transport: contract.transport } : {}
15433
15888
  })
15434
15889
  });
15435
- logger$26.info({
15890
+ logger$27.info({
15436
15891
  storyKey,
15437
15892
  contractCount: contracts.length,
15438
15893
  contracts
15439
15894
  }, "Stored interface contract declarations");
15440
15895
  }
15441
15896
  } catch (err) {
15442
- logger$26.warn({
15897
+ logger$27.warn({
15443
15898
  storyKey,
15444
15899
  error: err instanceof Error ? err.message : String(err)
15445
15900
  }, "Failed to parse interface contracts — continuing without contract declarations");
@@ -15467,10 +15922,10 @@ function createImplementationOrchestrator(deps) {
15467
15922
  });
15468
15923
  testPlanPhaseResult = testPlanResult.result;
15469
15924
  testPlanTokenUsage = testPlanResult.tokenUsage;
15470
- if (testPlanResult.result === "success") logger$26.info({ storyKey }, "Test plan generated successfully");
15471
- else logger$26.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
15925
+ if (testPlanResult.result === "success") logger$27.info({ storyKey }, "Test plan generated successfully");
15926
+ else logger$27.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
15472
15927
  } catch (err) {
15473
- logger$26.warn({
15928
+ logger$27.warn({
15474
15929
  storyKey,
15475
15930
  err
15476
15931
  }, "Test planning failed — proceeding to dev-story without test plan");
@@ -15486,7 +15941,7 @@ function createImplementationOrchestrator(deps) {
15486
15941
  metadata: JSON.stringify({ storyKey })
15487
15942
  });
15488
15943
  } catch (tokenErr) {
15489
- logger$26.warn({
15944
+ logger$27.warn({
15490
15945
  storyKey,
15491
15946
  err: tokenErr
15492
15947
  }, "Failed to record test-plan token usage");
@@ -15509,7 +15964,7 @@ function createImplementationOrchestrator(deps) {
15509
15964
  try {
15510
15965
  storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
15511
15966
  } catch (err) {
15512
- logger$26.error({
15967
+ logger$27.error({
15513
15968
  storyKey,
15514
15969
  storyFilePath,
15515
15970
  error: err instanceof Error ? err.message : String(err)
@@ -15517,7 +15972,7 @@ function createImplementationOrchestrator(deps) {
15517
15972
  }
15518
15973
  const analysis = analyzeStoryComplexity(storyContentForAnalysis);
15519
15974
  const batches = planTaskBatches(analysis);
15520
- logger$26.info({
15975
+ logger$27.info({
15521
15976
  storyKey,
15522
15977
  estimatedScope: analysis.estimatedScope,
15523
15978
  batchCount: batches.length,
@@ -15535,7 +15990,7 @@ function createImplementationOrchestrator(deps) {
15535
15990
  if (_state !== "RUNNING") break;
15536
15991
  const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
15537
15992
  const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
15538
- logger$26.info({
15993
+ logger$27.info({
15539
15994
  storyKey,
15540
15995
  batchIndex: batch.batchIndex,
15541
15996
  taskCount: batch.taskIds.length
@@ -15553,7 +16008,9 @@ function createImplementationOrchestrator(deps) {
15553
16008
  tokenCeilings,
15554
16009
  otlpEndpoint: _otlpEndpoint,
15555
16010
  repoMapInjector,
15556
- maxRepoMapTokens
16011
+ maxRepoMapTokens,
16012
+ ...config.perStoryContextCeilings?.[storyKey] !== void 0 ? { maxContextTokens: config.perStoryContextCeilings[storyKey] } : {},
16013
+ ...storyOptions?.optimizationDirectives !== void 0 ? { optimizationDirectives: storyOptions.optimizationDirectives } : {}
15557
16014
  }, {
15558
16015
  storyKey,
15559
16016
  storyFilePath: storyFilePath ?? "",
@@ -15563,7 +16020,7 @@ function createImplementationOrchestrator(deps) {
15563
16020
  });
15564
16021
  } catch (batchErr) {
15565
16022
  const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
15566
- logger$26.warn({
16023
+ logger$27.warn({
15567
16024
  storyKey,
15568
16025
  batchIndex: batch.batchIndex,
15569
16026
  error: errMsg
@@ -15583,7 +16040,7 @@ function createImplementationOrchestrator(deps) {
15583
16040
  filesModified: batchFilesModified,
15584
16041
  result: batchResult.result === "success" ? "success" : "failed"
15585
16042
  };
15586
- logger$26.info(batchMetrics, "Batch dev-story metrics");
16043
+ logger$27.info(batchMetrics, "Batch dev-story metrics");
15587
16044
  for (const f of batchFilesModified) allFilesModified.add(f);
15588
16045
  if (batchFilesModified.length > 0) batchFileGroups.push({
15589
16046
  batchIndex: batch.batchIndex,
@@ -15605,13 +16062,13 @@ function createImplementationOrchestrator(deps) {
15605
16062
  })
15606
16063
  });
15607
16064
  } catch (tokenErr) {
15608
- logger$26.warn({
16065
+ logger$27.warn({
15609
16066
  storyKey,
15610
16067
  batchIndex: batch.batchIndex,
15611
16068
  err: tokenErr
15612
16069
  }, "Failed to record batch token usage");
15613
16070
  }
15614
- if (batchResult.result === "failed") logger$26.warn({
16071
+ if (batchResult.result === "failed") logger$27.warn({
15615
16072
  storyKey,
15616
16073
  batchIndex: batch.batchIndex,
15617
16074
  error: batchResult.error
@@ -15636,7 +16093,9 @@ function createImplementationOrchestrator(deps) {
15636
16093
  tokenCeilings,
15637
16094
  otlpEndpoint: _otlpEndpoint,
15638
16095
  repoMapInjector,
15639
- maxRepoMapTokens
16096
+ maxRepoMapTokens,
16097
+ ...config.perStoryContextCeilings?.[storyKey] !== void 0 ? { maxContextTokens: config.perStoryContextCeilings[storyKey] } : {},
16098
+ ...storyOptions?.optimizationDirectives !== void 0 ? { optimizationDirectives: storyOptions.optimizationDirectives } : {}
15640
16099
  }, {
15641
16100
  storyKey,
15642
16101
  storyFilePath: storyFilePath ?? "",
@@ -15653,7 +16112,7 @@ function createImplementationOrchestrator(deps) {
15653
16112
  metadata: JSON.stringify({ storyKey })
15654
16113
  });
15655
16114
  } catch (tokenErr) {
15656
- logger$26.warn({
16115
+ logger$27.warn({
15657
16116
  storyKey,
15658
16117
  err: tokenErr
15659
16118
  }, "Failed to record dev-story token usage");
@@ -15665,7 +16124,7 @@ function createImplementationOrchestrator(deps) {
15665
16124
  });
15666
16125
  await persistState();
15667
16126
  if (devResult.result === "success") devStoryWasSuccess = true;
15668
- else logger$26.warn({
16127
+ else logger$27.warn({
15669
16128
  storyKey,
15670
16129
  error: devResult.error,
15671
16130
  filesModified: devFilesModified.length
@@ -15693,7 +16152,7 @@ function createImplementationOrchestrator(deps) {
15693
16152
  if (devStoryWasSuccess) {
15694
16153
  gitDiffFiles = checkGitDiffFiles(projectRoot ?? process.cwd());
15695
16154
  if (gitDiffFiles.length === 0) {
15696
- logger$26.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes in git working tree");
16155
+ logger$27.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes in git working tree");
15697
16156
  eventBus.emit("orchestrator:zero-diff-escalation", {
15698
16157
  storyKey,
15699
16158
  reason: "zero-diff-on-complete"
@@ -15724,7 +16183,7 @@ function createImplementationOrchestrator(deps) {
15724
16183
  });
15725
16184
  if (buildVerifyResult.status === "passed") {
15726
16185
  eventBus.emit("story:build-verification-passed", { storyKey });
15727
- logger$26.info({ storyKey }, "Build verification passed");
16186
+ logger$27.info({ storyKey }, "Build verification passed");
15728
16187
  } else if (buildVerifyResult.status === "failed" || buildVerifyResult.status === "timeout") {
15729
16188
  const truncatedOutput = (buildVerifyResult.output ?? "").slice(0, 2e3);
15730
16189
  const reason = buildVerifyResult.reason ?? "build-verification-failed";
@@ -15733,7 +16192,7 @@ function createImplementationOrchestrator(deps) {
15733
16192
  exitCode: buildVerifyResult.exitCode ?? 1,
15734
16193
  output: truncatedOutput
15735
16194
  });
15736
- logger$26.warn({
16195
+ logger$27.warn({
15737
16196
  storyKey,
15738
16197
  reason,
15739
16198
  exitCode: buildVerifyResult.exitCode
@@ -15763,7 +16222,7 @@ function createImplementationOrchestrator(deps) {
15763
16222
  storyKey
15764
16223
  });
15765
16224
  if (icResult.potentiallyAffectedTests.length > 0) {
15766
- logger$26.warn({
16225
+ logger$27.warn({
15767
16226
  storyKey,
15768
16227
  modifiedInterfaces: icResult.modifiedInterfaces,
15769
16228
  potentiallyAffectedTests: icResult.potentiallyAffectedTests
@@ -15809,7 +16268,7 @@ function createImplementationOrchestrator(deps) {
15809
16268
  "NEEDS_MAJOR_REWORK": 2
15810
16269
  };
15811
16270
  for (const group of batchFileGroups) {
15812
- logger$26.info({
16271
+ logger$27.info({
15813
16272
  storyKey,
15814
16273
  batchIndex: group.batchIndex,
15815
16274
  fileCount: group.files.length
@@ -15824,7 +16283,8 @@ function createImplementationOrchestrator(deps) {
15824
16283
  tokenCeilings,
15825
16284
  otlpEndpoint: _otlpEndpoint,
15826
16285
  repoMapInjector,
15827
- maxRepoMapTokens
16286
+ maxRepoMapTokens,
16287
+ ...config.perStoryContextCeilings?.[storyKey] !== void 0 ? { maxContextTokens: config.perStoryContextCeilings[storyKey] } : {}
15828
16288
  }, {
15829
16289
  storyKey,
15830
16290
  storyFilePath: storyFilePath ?? "",
@@ -15850,7 +16310,7 @@ function createImplementationOrchestrator(deps) {
15850
16310
  rawOutput: lastRawOutput,
15851
16311
  tokenUsage: aggregateTokens
15852
16312
  };
15853
- logger$26.info({
16313
+ logger$27.info({
15854
16314
  storyKey,
15855
16315
  batchCount: batchFileGroups.length,
15856
16316
  verdict: worstVerdict,
@@ -15867,7 +16327,8 @@ function createImplementationOrchestrator(deps) {
15867
16327
  tokenCeilings,
15868
16328
  otlpEndpoint: _otlpEndpoint,
15869
16329
  repoMapInjector,
15870
- maxRepoMapTokens
16330
+ maxRepoMapTokens,
16331
+ ...config.perStoryContextCeilings?.[storyKey] !== void 0 ? { maxContextTokens: config.perStoryContextCeilings[storyKey] } : {}
15871
16332
  }, {
15872
16333
  storyKey,
15873
16334
  storyFilePath: storyFilePath ?? "",
@@ -15890,7 +16351,7 @@ function createImplementationOrchestrator(deps) {
15890
16351
  })
15891
16352
  });
15892
16353
  } catch (tokenErr) {
15893
- logger$26.warn({
16354
+ logger$27.warn({
15894
16355
  storyKey,
15895
16356
  err: tokenErr
15896
16357
  }, "Failed to record code-review token usage");
@@ -15898,7 +16359,7 @@ function createImplementationOrchestrator(deps) {
15898
16359
  const isPhantomReview = reviewResult.dispatchFailed === true || reviewResult.verdict !== "SHIP_IT" && reviewResult.verdict !== "LGTM_WITH_NOTES" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
15899
16360
  if (isPhantomReview && !timeoutRetried) {
15900
16361
  timeoutRetried = true;
15901
- logger$26.warn({
16362
+ logger$27.warn({
15902
16363
  storyKey,
15903
16364
  reviewCycles,
15904
16365
  error: reviewResult.error
@@ -15908,7 +16369,7 @@ function createImplementationOrchestrator(deps) {
15908
16369
  verdict = reviewResult.verdict;
15909
16370
  issueList = reviewResult.issue_list ?? [];
15910
16371
  if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
15911
- logger$26.info({
16372
+ logger$27.info({
15912
16373
  storyKey,
15913
16374
  originalVerdict: verdict,
15914
16375
  issuesBefore: previousIssueList.length,
@@ -15944,7 +16405,7 @@ function createImplementationOrchestrator(deps) {
15944
16405
  if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
15945
16406
  parts.push(`${fileCount} files`);
15946
16407
  parts.push(`${totalTokensK} tokens`);
15947
- logger$26.info({
16408
+ logger$27.info({
15948
16409
  storyKey,
15949
16410
  verdict,
15950
16411
  agentVerdict: reviewResult.agentVerdict
@@ -15993,9 +16454,9 @@ function createImplementationOrchestrator(deps) {
15993
16454
  }),
15994
16455
  rationale: `Advisory notes from LGTM_WITH_NOTES review of ${storyKey}`
15995
16456
  });
15996
- logger$26.info({ storyKey }, "Advisory notes persisted to decision store");
16457
+ logger$27.info({ storyKey }, "Advisory notes persisted to decision store");
15997
16458
  } catch (advisoryErr) {
15998
- logger$26.warn({
16459
+ logger$27.warn({
15999
16460
  storyKey,
16000
16461
  error: advisoryErr instanceof Error ? advisoryErr.message : String(advisoryErr)
16001
16462
  }, "Failed to persist advisory notes (best-effort)");
@@ -16003,27 +16464,27 @@ function createImplementationOrchestrator(deps) {
16003
16464
  if (telemetryPersistence !== void 0) try {
16004
16465
  const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
16005
16466
  if (turns.length > 0) {
16006
- const scorer = new EfficiencyScorer(logger$26);
16467
+ const scorer = new EfficiencyScorer(logger$27);
16007
16468
  const effScore = scorer.score(storyKey, turns);
16008
16469
  await telemetryPersistence.storeEfficiencyScore(effScore);
16009
- logger$26.info({
16470
+ logger$27.info({
16010
16471
  storyKey,
16011
16472
  compositeScore: effScore.compositeScore,
16012
16473
  modelCount: effScore.perModelBreakdown.length
16013
16474
  }, "Efficiency score computed and persisted");
16014
- } else logger$26.debug({ storyKey }, "No turn analysis data available — skipping efficiency scoring");
16475
+ } else logger$27.debug({ storyKey }, "No turn analysis data available — skipping efficiency scoring");
16015
16476
  } catch (effErr) {
16016
- logger$26.warn({
16477
+ logger$27.warn({
16017
16478
  storyKey,
16018
16479
  error: effErr instanceof Error ? effErr.message : String(effErr)
16019
16480
  }, "Efficiency scoring failed — story verdict unchanged");
16020
16481
  }
16021
16482
  if (telemetryPersistence !== void 0) try {
16022
16483
  const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
16023
- if (turns.length === 0) logger$26.debug({ storyKey }, "No turn analysis data for telemetry categorization — skipping");
16484
+ if (turns.length === 0) logger$27.debug({ storyKey }, "No turn analysis data for telemetry categorization — skipping");
16024
16485
  else {
16025
- const categorizer = new Categorizer(logger$26);
16026
- const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$26);
16486
+ const categorizer = new Categorizer(logger$27);
16487
+ const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$27);
16027
16488
  const categoryStats = categorizer.computeCategoryStatsFromTurns(turns);
16028
16489
  const consumerStats = consumerAnalyzer.analyzeFromTurns(turns);
16029
16490
  await telemetryPersistence.storeCategoryStats(storyKey, categoryStats);
@@ -16031,7 +16492,7 @@ function createImplementationOrchestrator(deps) {
16031
16492
  const growingCount = categoryStats.filter((c) => c.trend === "growing").length;
16032
16493
  const topCategory = categoryStats[0]?.category ?? "none";
16033
16494
  const topConsumer = consumerStats[0]?.consumerKey ?? "none";
16034
- logger$26.info({
16495
+ logger$27.info({
16035
16496
  storyKey,
16036
16497
  topCategory,
16037
16498
  topConsumer,
@@ -16039,7 +16500,7 @@ function createImplementationOrchestrator(deps) {
16039
16500
  }, "Semantic categorization and consumer analysis complete");
16040
16501
  }
16041
16502
  } catch (catErr) {
16042
- logger$26.warn({
16503
+ logger$27.warn({
16043
16504
  storyKey,
16044
16505
  error: catErr instanceof Error ? catErr.message : String(catErr)
16045
16506
  }, "Semantic categorization failed — story verdict unchanged");
@@ -16060,7 +16521,7 @@ function createImplementationOrchestrator(deps) {
16060
16521
  filesModified: devFilesModified,
16061
16522
  workingDirectory: projectRoot
16062
16523
  });
16063
- logger$26.debug({
16524
+ logger$27.debug({
16064
16525
  storyKey,
16065
16526
  expansion_priority: expansionResult.expansion_priority,
16066
16527
  coverage_gaps: expansionResult.coverage_gaps.length
@@ -16073,7 +16534,7 @@ function createImplementationOrchestrator(deps) {
16073
16534
  value: JSON.stringify(expansionResult)
16074
16535
  });
16075
16536
  } catch (expansionErr) {
16076
- logger$26.warn({
16537
+ logger$27.warn({
16077
16538
  storyKey,
16078
16539
  error: expansionErr instanceof Error ? expansionErr.message : String(expansionErr)
16079
16540
  }, "Test expansion failed — story verdict unchanged");
@@ -16100,7 +16561,7 @@ function createImplementationOrchestrator(deps) {
16100
16561
  await persistState();
16101
16562
  return;
16102
16563
  }
16103
- logger$26.info({
16564
+ logger$27.info({
16104
16565
  storyKey,
16105
16566
  reviewCycles: finalReviewCycles,
16106
16567
  issueCount: issueList.length
@@ -16160,7 +16621,7 @@ function createImplementationOrchestrator(deps) {
16160
16621
  fixPrompt = assembled.prompt;
16161
16622
  } catch {
16162
16623
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
16163
- logger$26.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
16624
+ logger$27.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
16164
16625
  }
16165
16626
  const handle = dispatcher.dispatch({
16166
16627
  prompt: fixPrompt,
@@ -16168,6 +16629,7 @@ function createImplementationOrchestrator(deps) {
16168
16629
  taskType: "minor-fixes",
16169
16630
  workingDirectory: projectRoot,
16170
16631
  ...autoApproveMaxTurns !== void 0 ? { maxTurns: autoApproveMaxTurns } : {},
16632
+ ...config.perStoryContextCeilings?.[storyKey] !== void 0 ? { maxContextTokens: config.perStoryContextCeilings[storyKey] } : {},
16171
16633
  ..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {},
16172
16634
  storyKey
16173
16635
  });
@@ -16180,9 +16642,9 @@ function createImplementationOrchestrator(deps) {
16180
16642
  output: fixResult.tokenEstimate.output
16181
16643
  } : void 0 }
16182
16644
  });
16183
- if (fixResult.status === "timeout") logger$26.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
16645
+ if (fixResult.status === "timeout") logger$27.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
16184
16646
  } catch (err) {
16185
- logger$26.warn({
16647
+ logger$27.warn({
16186
16648
  storyKey,
16187
16649
  err
16188
16650
  }, "Auto-approve fix dispatch failed — approving anyway (issues were minor)");
@@ -16299,7 +16761,7 @@ function createImplementationOrchestrator(deps) {
16299
16761
  fixPrompt = assembled.prompt;
16300
16762
  } catch {
16301
16763
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
16302
- logger$26.warn({
16764
+ logger$27.warn({
16303
16765
  storyKey,
16304
16766
  taskType
16305
16767
  }, "Failed to assemble fix prompt, using fallback");
@@ -16312,6 +16774,7 @@ function createImplementationOrchestrator(deps) {
16312
16774
  ...fixModel !== void 0 ? { model: fixModel } : {},
16313
16775
  outputSchema: DevStoryResultSchema,
16314
16776
  ...fixMaxTurns !== void 0 ? { maxTurns: fixMaxTurns } : {},
16777
+ ...config.perStoryContextCeilings?.[storyKey] !== void 0 ? { maxContextTokens: config.perStoryContextCeilings[storyKey] } : {},
16315
16778
  ...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {},
16316
16779
  ..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {}
16317
16780
  }) : dispatcher.dispatch({
@@ -16320,6 +16783,7 @@ function createImplementationOrchestrator(deps) {
16320
16783
  taskType,
16321
16784
  ...fixModel !== void 0 ? { model: fixModel } : {},
16322
16785
  ...fixMaxTurns !== void 0 ? { maxTurns: fixMaxTurns } : {},
16786
+ ...config.perStoryContextCeilings?.[storyKey] !== void 0 ? { maxContextTokens: config.perStoryContextCeilings[storyKey] } : {},
16323
16787
  ...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {},
16324
16788
  ..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {}
16325
16789
  });
@@ -16333,7 +16797,7 @@ function createImplementationOrchestrator(deps) {
16333
16797
  } : void 0 }
16334
16798
  });
16335
16799
  if (fixResult.status === "timeout") {
16336
- logger$26.warn({
16800
+ logger$27.warn({
16337
16801
  storyKey,
16338
16802
  taskType
16339
16803
  }, "Fix dispatch timed out — escalating story");
@@ -16355,7 +16819,7 @@ function createImplementationOrchestrator(deps) {
16355
16819
  }
16356
16820
  if (fixResult.status === "failed") {
16357
16821
  if (isMajorRework) {
16358
- logger$26.warn({
16822
+ logger$27.warn({
16359
16823
  storyKey,
16360
16824
  exitCode: fixResult.exitCode
16361
16825
  }, "Major rework dispatch failed — escalating story");
@@ -16375,14 +16839,14 @@ function createImplementationOrchestrator(deps) {
16375
16839
  await persistState();
16376
16840
  return;
16377
16841
  }
16378
- logger$26.warn({
16842
+ logger$27.warn({
16379
16843
  storyKey,
16380
16844
  taskType,
16381
16845
  exitCode: fixResult.exitCode
16382
16846
  }, "Fix dispatch failed");
16383
16847
  }
16384
16848
  } catch (err) {
16385
- logger$26.warn({
16849
+ logger$27.warn({
16386
16850
  storyKey,
16387
16851
  taskType,
16388
16852
  err
@@ -16408,8 +16872,27 @@ function createImplementationOrchestrator(deps) {
16408
16872
  * story dispatch (Story 23-8, AC2).
16409
16873
  */
16410
16874
  async function processConflictGroup(group) {
16875
+ const completedStoryKeys = [];
16411
16876
  for (const storyKey of group) {
16412
- await processStory(storyKey);
16877
+ let optimizationDirectives;
16878
+ if (telemetryAdvisor !== void 0 && completedStoryKeys.length > 0) try {
16879
+ const recs = await telemetryAdvisor.getRecommendationsForRun(completedStoryKeys);
16880
+ const directives = telemetryAdvisor.formatOptimizationDirectives(recs);
16881
+ if (directives.length > 0) {
16882
+ optimizationDirectives = directives;
16883
+ logger$27.debug({
16884
+ storyKey,
16885
+ directiveCount: recs.filter((r) => r.severity !== "info").length
16886
+ }, "Optimization directives ready for dispatch");
16887
+ }
16888
+ } catch (err) {
16889
+ logger$27.debug({
16890
+ err,
16891
+ storyKey
16892
+ }, "Failed to fetch optimization directives — proceeding without");
16893
+ }
16894
+ await processStory(storyKey, { optimizationDirectives });
16895
+ completedStoryKeys.push(storyKey);
16413
16896
  globalThis.gc?.();
16414
16897
  const gcPauseMs = config.gcPauseMs ?? 2e3;
16415
16898
  await sleep(gcPauseMs);
@@ -16445,11 +16928,11 @@ function createImplementationOrchestrator(deps) {
16445
16928
  }
16446
16929
  async function run(storyKeys) {
16447
16930
  if (_state === "RUNNING" || _state === "PAUSED") {
16448
- logger$26.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
16931
+ logger$27.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
16449
16932
  return getStatus();
16450
16933
  }
16451
16934
  if (_state === "COMPLETE") {
16452
- logger$26.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
16935
+ logger$27.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
16453
16936
  return getStatus();
16454
16937
  }
16455
16938
  _state = "RUNNING";
@@ -16473,7 +16956,7 @@ function createImplementationOrchestrator(deps) {
16473
16956
  const seedStart = Date.now();
16474
16957
  const seedResult = await seedMethodologyContext(db, projectRoot);
16475
16958
  _startupTimings.seedMethodologyMs = Date.now() - seedStart;
16476
- if (seedResult.decisionsCreated > 0) logger$26.info({
16959
+ if (seedResult.decisionsCreated > 0) logger$27.info({
16477
16960
  decisionsCreated: seedResult.decisionsCreated,
16478
16961
  skippedCategories: seedResult.skippedCategories,
16479
16962
  durationMs: _startupTimings.seedMethodologyMs
@@ -16486,7 +16969,7 @@ function createImplementationOrchestrator(deps) {
16486
16969
  _startupTimings.stateStoreInitMs = Date.now() - stateStoreInitStart;
16487
16970
  for (const key of storyKeys) {
16488
16971
  const pendingState = _stories.get(key);
16489
- if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$26.warn({
16972
+ if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$27.warn({
16490
16973
  err,
16491
16974
  storyKey: key
16492
16975
  }, "StateStore write failed during PENDING init"));
@@ -16497,12 +16980,12 @@ function createImplementationOrchestrator(deps) {
16497
16980
  _startupTimings.queryStoriesMs = Date.now() - queryStoriesStart;
16498
16981
  for (const record of existingRecords) _stateStoreCache.set(record.storyKey, record);
16499
16982
  } catch (err) {
16500
- logger$26.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
16983
+ logger$27.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
16501
16984
  }
16502
16985
  }
16503
16986
  if (ingestionServer !== void 0) {
16504
16987
  if (telemetryPersistence !== void 0) try {
16505
- const pipelineLogger = logger$26;
16988
+ const pipelineLogger = logger$27;
16506
16989
  const telemetryPipeline = new TelemetryPipeline({
16507
16990
  normalizer: new TelemetryNormalizer(pipelineLogger),
16508
16991
  turnAnalyzer: new TurnAnalyzer(pipelineLogger),
@@ -16514,14 +16997,14 @@ function createImplementationOrchestrator(deps) {
16514
16997
  persistence: telemetryPersistence
16515
16998
  });
16516
16999
  ingestionServer.setPipeline(telemetryPipeline);
16517
- logger$26.info("TelemetryPipeline wired to IngestionServer");
17000
+ logger$27.info("TelemetryPipeline wired to IngestionServer");
16518
17001
  } catch (pipelineErr) {
16519
- logger$26.warn({ err: pipelineErr }, "Failed to create TelemetryPipeline — continuing without analysis pipeline");
17002
+ logger$27.warn({ err: pipelineErr }, "Failed to create TelemetryPipeline — continuing without analysis pipeline");
16520
17003
  }
16521
- await ingestionServer.start().catch((err) => logger$26.warn({ err }, "IngestionServer.start() failed — continuing without telemetry (best-effort)"));
17004
+ await ingestionServer.start().catch((err) => logger$27.warn({ err }, "IngestionServer.start() failed — continuing without telemetry (best-effort)"));
16522
17005
  try {
16523
17006
  _otlpEndpoint = ingestionServer.getOtlpEnvVars().OTEL_EXPORTER_OTLP_ENDPOINT;
16524
- logger$26.info({ otlpEndpoint: _otlpEndpoint }, "OTLP telemetry ingestion active");
17007
+ logger$27.info({ otlpEndpoint: _otlpEndpoint }, "OTLP telemetry ingestion active");
16525
17008
  } catch {}
16526
17009
  }
16527
17010
  let contractDeclarations = [];
@@ -16561,12 +17044,12 @@ function createImplementationOrchestrator(deps) {
16561
17044
  const conflictDetectStart = Date.now();
16562
17045
  const { batches, edges: contractEdges } = detectConflictGroupsWithContracts(storyKeys, { moduleMap: pack.manifest.conflictGroups }, contractDeclarations);
16563
17046
  _startupTimings.conflictDetectMs = Date.now() - conflictDetectStart;
16564
- if (contractEdges.length > 0) logger$26.info({
17047
+ if (contractEdges.length > 0) logger$27.info({
16565
17048
  contractEdges,
16566
17049
  edgeCount: contractEdges.length
16567
17050
  }, "Contract dependency edges detected — applying contract-aware dispatch ordering");
16568
- wgRepo.addContractDependencies(contractEdges).catch((err) => logger$26.warn({ err }, "contract dep persistence failed (best-effort)"));
16569
- logger$26.info({
17051
+ wgRepo.addContractDependencies(contractEdges).catch((err) => logger$27.warn({ err }, "contract dep persistence failed (best-effort)"));
17052
+ logger$27.info({
16570
17053
  storyCount: storyKeys.length,
16571
17054
  groupCount: batches.reduce((sum, b) => sum + b.length, 0),
16572
17055
  batchCount: batches.length,
@@ -16588,7 +17071,7 @@ function createImplementationOrchestrator(deps) {
16588
17071
  exitCode,
16589
17072
  output: truncatedOutput
16590
17073
  });
16591
- logger$26.error({
17074
+ logger$27.error({
16592
17075
  exitCode,
16593
17076
  reason: preFlightResult.reason
16594
17077
  }, "Pre-flight build check failed — aborting pipeline before any story dispatch");
@@ -16597,9 +17080,9 @@ function createImplementationOrchestrator(deps) {
16597
17080
  await persistState();
16598
17081
  return getStatus();
16599
17082
  }
16600
- if (preFlightResult.status !== "skipped") logger$26.info("Pre-flight build check passed");
17083
+ if (preFlightResult.status !== "skipped") logger$27.info("Pre-flight build check passed");
16601
17084
  }
16602
- logger$26.info(_startupTimings, "Orchestrator startup timings (ms)");
17085
+ logger$27.info(_startupTimings, "Orchestrator startup timings (ms)");
16603
17086
  try {
16604
17087
  for (const batchGroups of batches) await runWithConcurrency(batchGroups, config.maxConcurrency);
16605
17088
  } catch (err) {
@@ -16607,7 +17090,7 @@ function createImplementationOrchestrator(deps) {
16607
17090
  _state = "FAILED";
16608
17091
  _completedAt = new Date().toISOString();
16609
17092
  await persistState();
16610
- logger$26.error({ err }, "Orchestrator failed with unhandled error");
17093
+ logger$27.error({ err }, "Orchestrator failed with unhandled error");
16611
17094
  return getStatus();
16612
17095
  }
16613
17096
  stopHeartbeat();
@@ -16617,7 +17100,7 @@ function createImplementationOrchestrator(deps) {
16617
17100
  const totalDeclarations = contractDeclarations.length;
16618
17101
  const currentSprintDeclarations = contractDeclarations.filter((d) => storyKeys.includes(d.storyKey));
16619
17102
  const stalePruned = totalDeclarations - currentSprintDeclarations.length;
16620
- if (stalePruned > 0) logger$26.info({
17103
+ if (stalePruned > 0) logger$27.info({
16621
17104
  stalePruned,
16622
17105
  remaining: currentSprintDeclarations.length
16623
17106
  }, "Pruned stale contract declarations from previous epics");
@@ -16631,11 +17114,11 @@ function createImplementationOrchestrator(deps) {
16631
17114
  contractName: mismatch.contractName,
16632
17115
  mismatchDescription: mismatch.mismatchDescription
16633
17116
  });
16634
- logger$26.warn({
17117
+ logger$27.warn({
16635
17118
  mismatchCount: mismatches.length,
16636
17119
  mismatches
16637
17120
  }, "Post-sprint contract verification found mismatches — manual review required");
16638
- } else if (currentSprintDeclarations.length > 0) logger$26.info("Post-sprint contract verification passed — all declared contracts satisfied");
17121
+ } else if (currentSprintDeclarations.length > 0) logger$27.info("Post-sprint contract verification passed — all declared contracts satisfied");
16639
17122
  eventBus.emit("pipeline:contract-verification-summary", {
16640
17123
  verified: currentSprintDeclarations.length,
16641
17124
  stalePruned,
@@ -16670,12 +17153,12 @@ function createImplementationOrchestrator(deps) {
16670
17153
  });
16671
17154
  await stateStore.setContractVerification(sk, records);
16672
17155
  }
16673
- logger$26.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
17156
+ logger$27.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
16674
17157
  } catch (persistErr) {
16675
- logger$26.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
17158
+ logger$27.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
16676
17159
  }
16677
17160
  } catch (err) {
16678
- logger$26.error({ err }, "Post-sprint contract verification threw an error — skipping");
17161
+ logger$27.error({ err }, "Post-sprint contract verification threw an error — skipping");
16679
17162
  }
16680
17163
  let completed = 0;
16681
17164
  let escalated = 0;
@@ -16692,8 +17175,8 @@ function createImplementationOrchestrator(deps) {
16692
17175
  await persistState();
16693
17176
  return getStatus();
16694
17177
  } finally {
16695
- if (stateStore !== void 0) await stateStore.close().catch((err) => logger$26.warn({ err }, "StateStore.close() failed (best-effort)"));
16696
- if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$26.warn({ err }, "IngestionServer.stop() failed (best-effort)"));
17178
+ if (stateStore !== void 0) await stateStore.close().catch((err) => logger$27.warn({ err }, "StateStore.close() failed (best-effort)"));
17179
+ if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$27.warn({ err }, "IngestionServer.stop() failed (best-effort)"));
16697
17180
  }
16698
17181
  }
16699
17182
  function pause() {
@@ -16702,7 +17185,7 @@ function createImplementationOrchestrator(deps) {
16702
17185
  _pauseGate = createPauseGate();
16703
17186
  _state = "PAUSED";
16704
17187
  eventBus.emit("orchestrator:paused", {});
16705
- logger$26.info("Orchestrator paused");
17188
+ logger$27.info("Orchestrator paused");
16706
17189
  }
16707
17190
  function resume() {
16708
17191
  if (_state !== "PAUSED") return;
@@ -16713,7 +17196,7 @@ function createImplementationOrchestrator(deps) {
16713
17196
  }
16714
17197
  _state = "RUNNING";
16715
17198
  eventBus.emit("orchestrator:resumed", {});
16716
- logger$26.info("Orchestrator resumed");
17199
+ logger$27.info("Orchestrator resumed");
16717
17200
  }
16718
17201
  return {
16719
17202
  run,
@@ -21163,7 +21646,8 @@ async function runRunAction(options) {
21163
21646
  start_phase: "implementation",
21164
21647
  config_json: JSON.stringify({
21165
21648
  storyKeys,
21166
- concurrency
21649
+ concurrency,
21650
+ ...parsedStoryKeys.length > 0 ? { explicitStories: parsedStoryKeys } : {}
21167
21651
  })
21168
21652
  });
21169
21653
  const eventBus = createEventBus();
@@ -21814,6 +22298,16 @@ async function runFullPipeline(options) {
21814
22298
  });
21815
22299
  const startedAt = Date.now();
21816
22300
  const runId = await phaseOrchestrator.startRun(concept ?? "", startPhase);
22301
+ if (explicitStories !== void 0 && explicitStories.length > 0 || options.epic !== void 0) {
22302
+ const existingRun = (await adapter.query("SELECT config_json FROM pipeline_runs WHERE id = ?", [runId]))[0];
22303
+ const existing = JSON.parse(existingRun?.config_json ?? "{}");
22304
+ const updated = {
22305
+ ...existing,
22306
+ ...explicitStories !== void 0 && explicitStories.length > 0 ? { explicitStories } : {},
22307
+ ...options.epic !== void 0 ? { epic: options.epic } : {}
22308
+ };
22309
+ await adapter.query("UPDATE pipeline_runs SET config_json = ? WHERE id = ?", [JSON.stringify(updated), runId]);
22310
+ }
21817
22311
  if (outputFormat === "human") {
21818
22312
  process.stdout.write(`Starting full pipeline from phase: ${startPhase}\n`);
21819
22313
  process.stdout.write(`Pipeline run ID: ${runId}\n`);
@@ -22120,5 +22614,5 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
22120
22614
  }
22121
22615
 
22122
22616
  //#endregion
22123
- export { AdapterTelemetryPersistence, AppError, DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DoltClient, DoltNotInstalled, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, FileStateStore, GitClient, GrammarLoader, IngestionServer, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SUBSTRATE_OWNED_SETTINGS_KEYS, SymbolParser, VALID_PHASES, WorkGraphRepository, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDatabaseAdapter, createDispatcher, createDoltClient, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, detectCycles, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initSchema, initializeDolt, isSyncAdapter, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
22124
- //# sourceMappingURL=run-D7a-qzk9.js.map
22617
+ export { AdapterTelemetryPersistence, AppError, DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DoltClient, DoltNotInstalled, DoltRepoMapMetaRepository, DoltSymbolRepository, ERR_REPO_MAP_STORAGE_WRITE, FileStateStore, GitClient, GrammarLoader, IngestionServer, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SUBSTRATE_OWNED_SETTINGS_KEYS, SymbolParser, VALID_PHASES, WorkGraphRepository, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDatabaseAdapter, createDispatcher, createDoltClient, createEventEmitter, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, createTelemetryAdvisor, detectCycles, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initSchema, initializeDolt, isSyncAdapter, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
22618
+ //# sourceMappingURL=run-BVqGAkUO.js.map