substrate-ai 0.3.1 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  import { createLogger, deepMask } from "./logger-D2fS2ccL.js";
2
- import { CURRENT_CONFIG_FORMAT_VERSION, PartialSubstrateConfigSchema, SUPPORTED_CONFIG_FORMAT_VERSIONS, SubstrateConfigSchema, defaultConfigMigrator } from "./config-migrator-DSi8KhQC.js";
2
+ import { CURRENT_CONFIG_FORMAT_VERSION, PartialSubstrateConfigSchema, SUPPORTED_CONFIG_FORMAT_VERSIONS, SubstrateConfigSchema, defaultConfigMigrator } from "./config-migrator-CQmBdKeG.js";
3
3
  import { ConfigError, ConfigIncompatibleFormatError, createEventBus, createTuiApp, isTuiCapable, printNonTtyWarning, sleep } from "./helpers-RL22dYtn.js";
4
4
  import { addTokenUsage, createDecision, createPipelineRun, createRequirement, getArtifactByTypeForRun, getArtifactsByRun, getDecisionsByCategory, getDecisionsByPhase, getDecisionsByPhaseForRun, getLatestRun, getPipelineRunById, getRunningPipelineRuns, getTokenUsageSummary, registerArtifact, updatePipelineRun, updatePipelineRunConfig, upsertDecision } from "./decisions-Dq4cAA2L.js";
5
5
  import { ADVISORY_NOTES, ESCALATION_DIAGNOSIS, OPERATIONAL_FINDING, STORY_METRICS, STORY_OUTCOME, TEST_EXPANSION_FINDING, TEST_PLAN, aggregateTokenUsageForRun, aggregateTokenUsageForStory, getStoryMetricsForRun, writeRunMetrics, writeStoryMetrics } from "./operational-Bovj4fS-.js";
@@ -19,6 +19,7 @@ import { existsSync as existsSync$1, readFileSync as readFileSync$1, readdirSync
19
19
  import { homedir } from "os";
20
20
  import { freemem, platform } from "node:os";
21
21
  import { createHash, randomUUID } from "node:crypto";
22
+ import { createServer } from "node:http";
22
23
 
23
24
  //#region rolldown:runtime
24
25
  var __require = /* @__PURE__ */ createRequire(import.meta.url);
@@ -605,7 +606,7 @@ const migration010RunMetrics = {
605
606
 
606
607
  //#endregion
607
608
  //#region src/persistence/migrations/index.ts
608
- const logger$23 = createLogger("persistence:migrations");
609
+ const logger$25 = createLogger("persistence:migrations");
609
610
  const MIGRATIONS = [
610
611
  initialSchemaMigration,
611
612
  costTrackerSchemaMigration,
@@ -623,7 +624,7 @@ const MIGRATIONS = [
623
624
  * Safe to call multiple times — already-applied migrations are skipped.
624
625
  */
625
626
  function runMigrations(db) {
626
- logger$23.info("Starting migration runner");
627
+ logger$25.info("Starting migration runner");
627
628
  db.exec(`
628
629
  CREATE TABLE IF NOT EXISTS schema_migrations (
629
630
  version INTEGER PRIMARY KEY,
@@ -634,12 +635,12 @@ function runMigrations(db) {
634
635
  const appliedVersions = new Set(db.prepare("SELECT version FROM schema_migrations").all().map((row) => row.version));
635
636
  const pending = MIGRATIONS.filter((m) => !appliedVersions.has(m.version)).sort((a, b) => a.version - b.version);
636
637
  if (pending.length === 0) {
637
- logger$23.info("No pending migrations");
638
+ logger$25.info("No pending migrations");
638
639
  return;
639
640
  }
640
641
  const insertMigration = db.prepare("INSERT INTO schema_migrations (version, name) VALUES (?, ?)");
641
642
  for (const migration of pending) {
642
- logger$23.info({
643
+ logger$25.info({
643
644
  version: migration.version,
644
645
  name: migration.name
645
646
  }, "Applying migration");
@@ -653,14 +654,14 @@ function runMigrations(db) {
653
654
  });
654
655
  applyMigration();
655
656
  }
656
- logger$23.info({ version: migration.version }, "Migration applied successfully");
657
+ logger$25.info({ version: migration.version }, "Migration applied successfully");
657
658
  }
658
- logger$23.info({ count: pending.length }, "All pending migrations applied");
659
+ logger$25.info({ count: pending.length }, "All pending migrations applied");
659
660
  }
660
661
 
661
662
  //#endregion
662
663
  //#region src/persistence/database.ts
663
- const logger$22 = createLogger("persistence:database");
664
+ const logger$24 = createLogger("persistence:database");
664
665
  /**
665
666
  * Thin wrapper that opens a SQLite database, applies required PRAGMAs,
666
667
  * and exposes the raw BetterSqlite3 instance.
@@ -677,14 +678,14 @@ var DatabaseWrapper = class {
677
678
  */
678
679
  open() {
679
680
  if (this._db !== null) return;
680
- logger$22.info({ path: this._path }, "Opening SQLite database");
681
+ logger$24.info({ path: this._path }, "Opening SQLite database");
681
682
  this._db = new Database(this._path);
682
683
  const walResult = this._db.pragma("journal_mode = WAL");
683
- if (walResult?.[0]?.journal_mode !== "wal") logger$22.warn({ result: walResult?.[0]?.journal_mode }, "WAL pragma did not return expected \"wal\" — journal_mode may be \"memory\" or unsupported");
684
+ if (walResult?.[0]?.journal_mode !== "wal") logger$24.warn({ result: walResult?.[0]?.journal_mode }, "WAL pragma did not return expected \"wal\" — journal_mode may be \"memory\" or unsupported");
684
685
  this._db.pragma("busy_timeout = 5000");
685
686
  this._db.pragma("synchronous = NORMAL");
686
687
  this._db.pragma("foreign_keys = ON");
687
- logger$22.info({ path: this._path }, "SQLite database opened with WAL mode");
688
+ logger$24.info({ path: this._path }, "SQLite database opened with WAL mode");
688
689
  }
689
690
  /**
690
691
  * Close the database. Idempotent — calling close() when already closed is a no-op.
@@ -693,7 +694,7 @@ var DatabaseWrapper = class {
693
694
  if (this._db === null) return;
694
695
  this._db.close();
695
696
  this._db = null;
696
- logger$22.info({ path: this._path }, "SQLite database closed");
697
+ logger$24.info({ path: this._path }, "SQLite database closed");
697
698
  }
698
699
  /**
699
700
  * Return the raw BetterSqlite3 instance.
@@ -1610,7 +1611,7 @@ function formatUnsupportedVersionError(formatType, version, supported) {
1610
1611
 
1611
1612
  //#endregion
1612
1613
  //#region src/modules/config/config-system-impl.ts
1613
- const logger$21 = createLogger("config");
1614
+ const logger$23 = createLogger("config");
1614
1615
  function deepMerge(base, override) {
1615
1616
  const result = { ...base };
1616
1617
  for (const [key, val] of Object.entries(override)) if (val !== null && val !== void 0 && typeof val === "object" && !Array.isArray(val) && typeof result[key] === "object" && result[key] !== null && !Array.isArray(result[key])) result[key] = deepMerge(result[key], val);
@@ -1655,7 +1656,7 @@ function readEnvOverrides() {
1655
1656
  }
1656
1657
  const parsed = PartialSubstrateConfigSchema.safeParse(overrides);
1657
1658
  if (!parsed.success) {
1658
- logger$21.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
1659
+ logger$23.warn({ errors: parsed.error.issues }, "Invalid environment variable overrides ignored");
1659
1660
  return {};
1660
1661
  }
1661
1662
  return parsed.data;
@@ -1719,7 +1720,7 @@ var ConfigSystemImpl = class {
1719
1720
  throw new ConfigError(`Configuration validation failed:\n${issues}`, { issues: result.error.issues });
1720
1721
  }
1721
1722
  this._config = result.data;
1722
- logger$21.debug("Configuration loaded successfully");
1723
+ logger$23.debug("Configuration loaded successfully");
1723
1724
  }
1724
1725
  getConfig() {
1725
1726
  if (this._config === null) throw new ConfigError("Configuration has not been loaded. Call load() before getConfig().", {});
@@ -1782,7 +1783,7 @@ var ConfigSystemImpl = class {
1782
1783
  if (version !== void 0 && typeof version === "string" && !isVersionSupported(version, SUPPORTED_CONFIG_FORMAT_VERSIONS)) if (defaultConfigMigrator.canMigrate(version, CURRENT_CONFIG_FORMAT_VERSION)) {
1783
1784
  const migrationOutput = defaultConfigMigrator.migrate(rawObj, version, CURRENT_CONFIG_FORMAT_VERSION, filePath);
1784
1785
  if (migrationOutput.result.success) {
1785
- logger$21.info({
1786
+ logger$23.info({
1786
1787
  from: version,
1787
1788
  to: CURRENT_CONFIG_FORMAT_VERSION,
1788
1789
  backup: migrationOutput.result.backupPath
@@ -3191,7 +3192,7 @@ function truncateToTokens(text, maxTokens) {
3191
3192
 
3192
3193
  //#endregion
3193
3194
  //#region src/modules/context-compiler/context-compiler-impl.ts
3194
- const logger$20 = createLogger("context-compiler");
3195
+ const logger$22 = createLogger("context-compiler");
3195
3196
  /**
3196
3197
  * Fraction of the original token budget that must remain (after required +
3197
3198
  * important sections) before an optional section is included.
@@ -3283,7 +3284,7 @@ var ContextCompilerImpl = class {
3283
3284
  includedParts.push(truncated);
3284
3285
  remainingBudget -= truncatedTokens;
3285
3286
  anyTruncated = true;
3286
- logger$20.warn({
3287
+ logger$22.warn({
3287
3288
  section: section.name,
3288
3289
  originalTokens: tokens,
3289
3290
  budgetTokens: truncatedTokens
@@ -3297,7 +3298,7 @@ var ContextCompilerImpl = class {
3297
3298
  });
3298
3299
  } else {
3299
3300
  anyTruncated = true;
3300
- logger$20.warn({
3301
+ logger$22.warn({
3301
3302
  section: section.name,
3302
3303
  tokens
3303
3304
  }, "Context compiler: omitted \"important\" section — no budget remaining");
@@ -3324,7 +3325,7 @@ var ContextCompilerImpl = class {
3324
3325
  } else {
3325
3326
  if (tokens > 0) {
3326
3327
  anyTruncated = true;
3327
- logger$20.warn({
3328
+ logger$22.warn({
3328
3329
  section: section.name,
3329
3330
  tokens,
3330
3331
  budgetFractionRemaining: budgetFractionRemaining.toFixed(2)
@@ -3609,7 +3610,7 @@ function parseYamlResult(yamlText, schema) {
3609
3610
 
3610
3611
  //#endregion
3611
3612
  //#region src/modules/agent-dispatch/dispatcher-impl.ts
3612
- const logger$19 = createLogger("agent-dispatch");
3613
+ const logger$21 = createLogger("agent-dispatch");
3613
3614
  const SHUTDOWN_GRACE_MS = 1e4;
3614
3615
  const SHUTDOWN_MAX_WAIT_MS = 3e4;
3615
3616
  const CHARS_PER_TOKEN = 4;
@@ -3654,7 +3655,7 @@ function getAvailableMemory() {
3654
3655
  }).trim(), 10);
3655
3656
  _lastKnownPressureLevel = pressureLevel;
3656
3657
  if (pressureLevel >= 4) {
3657
- logger$19.warn({ pressureLevel }, "macOS kernel reports critical memory pressure");
3658
+ logger$21.warn({ pressureLevel }, "macOS kernel reports critical memory pressure");
3658
3659
  return 0;
3659
3660
  }
3660
3661
  } catch {}
@@ -3669,7 +3670,7 @@ function getAvailableMemory() {
3669
3670
  const speculative = parseInt(vmstat.match(/Pages speculative:\s+(\d+)/)?.[1] ?? "0", 10);
3670
3671
  const available = (free + purgeable + speculative) * pageSize;
3671
3672
  if (pressureLevel >= 2) {
3672
- logger$19.warn({
3673
+ logger$21.warn({
3673
3674
  pressureLevel,
3674
3675
  availableBeforeDiscount: available
3675
3676
  }, "macOS kernel reports memory pressure — discounting estimate");
@@ -3749,7 +3750,7 @@ var DispatcherImpl = class {
3749
3750
  resolve: typedResolve,
3750
3751
  reject
3751
3752
  });
3752
- logger$19.debug({
3753
+ logger$21.debug({
3753
3754
  id,
3754
3755
  queueLength: this._queue.length
3755
3756
  }, "Dispatch queued");
@@ -3780,7 +3781,7 @@ var DispatcherImpl = class {
3780
3781
  async shutdown() {
3781
3782
  this._shuttingDown = true;
3782
3783
  this._stopMemoryPressureTimer();
3783
- logger$19.info({
3784
+ logger$21.info({
3784
3785
  running: this._running.size,
3785
3786
  queued: this._queue.length
3786
3787
  }, "Dispatcher shutting down");
@@ -3813,13 +3814,13 @@ var DispatcherImpl = class {
3813
3814
  }
3814
3815
  }, 50);
3815
3816
  });
3816
- logger$19.info("Dispatcher shutdown complete");
3817
+ logger$21.info("Dispatcher shutdown complete");
3817
3818
  }
3818
3819
  async _startDispatch(id, request, resolve$2) {
3819
- const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns } = request;
3820
+ const { prompt, agent, taskType, timeout, outputSchema, workingDirectory, model, maxTurns, otlpEndpoint } = request;
3820
3821
  const adapter = this._adapterRegistry.get(agent);
3821
3822
  if (adapter === void 0) {
3822
- logger$19.warn({
3823
+ logger$21.warn({
3823
3824
  id,
3824
3825
  agent
3825
3826
  }, "No adapter found for agent");
@@ -3846,7 +3847,8 @@ var DispatcherImpl = class {
3846
3847
  worktreePath,
3847
3848
  billingMode: "subscription",
3848
3849
  ...model !== void 0 ? { model } : {},
3849
- ...resolvedMaxTurns !== void 0 ? { maxTurns: resolvedMaxTurns } : {}
3850
+ ...resolvedMaxTurns !== void 0 ? { maxTurns: resolvedMaxTurns } : {},
3851
+ ...otlpEndpoint !== void 0 ? { otlpEndpoint } : {}
3850
3852
  });
3851
3853
  const timeoutMs = timeout ?? this._config.defaultTimeouts[taskType] ?? DEFAULT_TIMEOUTS[taskType] ?? 3e5;
3852
3854
  const env = { ...process.env };
@@ -3865,7 +3867,7 @@ var DispatcherImpl = class {
3865
3867
  });
3866
3868
  const startedAt = Date.now();
3867
3869
  proc.on("error", (err) => {
3868
- logger$19.error({
3870
+ logger$21.error({
3869
3871
  id,
3870
3872
  binary: cmd.binary,
3871
3873
  error: err.message
@@ -3873,7 +3875,7 @@ var DispatcherImpl = class {
3873
3875
  });
3874
3876
  if (proc.stdin !== null) {
3875
3877
  proc.stdin.on("error", (err) => {
3876
- if (err.code !== "EPIPE") logger$19.warn({
3878
+ if (err.code !== "EPIPE") logger$21.warn({
3877
3879
  id,
3878
3880
  error: err.message
3879
3881
  }, "stdin write error");
@@ -3915,7 +3917,7 @@ var DispatcherImpl = class {
3915
3917
  agent,
3916
3918
  taskType
3917
3919
  });
3918
- logger$19.debug({
3920
+ logger$21.debug({
3919
3921
  id,
3920
3922
  agent,
3921
3923
  taskType,
@@ -3932,7 +3934,7 @@ var DispatcherImpl = class {
3932
3934
  dispatchId: id,
3933
3935
  timeoutMs
3934
3936
  });
3935
- logger$19.warn({
3937
+ logger$21.warn({
3936
3938
  id,
3937
3939
  agent,
3938
3940
  taskType,
@@ -3986,7 +3988,7 @@ var DispatcherImpl = class {
3986
3988
  exitCode: code,
3987
3989
  output: stdout
3988
3990
  });
3989
- logger$19.debug({
3991
+ logger$21.debug({
3990
3992
  id,
3991
3993
  agent,
3992
3994
  taskType,
@@ -4012,7 +4014,7 @@ var DispatcherImpl = class {
4012
4014
  error: stderr || `Process exited with code ${String(code)}`,
4013
4015
  exitCode: code
4014
4016
  });
4015
- logger$19.debug({
4017
+ logger$21.debug({
4016
4018
  id,
4017
4019
  agent,
4018
4020
  taskType,
@@ -4071,7 +4073,7 @@ var DispatcherImpl = class {
4071
4073
  const next = this._queue.shift();
4072
4074
  if (next === void 0) return;
4073
4075
  next.handle.status = "running";
4074
- logger$19.debug({
4076
+ logger$21.debug({
4075
4077
  id: next.id,
4076
4078
  queueLength: this._queue.length
4077
4079
  }, "Dequeued dispatch");
@@ -4084,7 +4086,7 @@ var DispatcherImpl = class {
4084
4086
  _isMemoryPressured() {
4085
4087
  const free = getAvailableMemory();
4086
4088
  if (free < MIN_FREE_MEMORY_BYTES) {
4087
- logger$19.warn({
4089
+ logger$21.warn({
4088
4090
  freeMB: Math.round(free / 1024 / 1024),
4089
4091
  thresholdMB: Math.round(MIN_FREE_MEMORY_BYTES / 1024 / 1024),
4090
4092
  pressureLevel: _lastKnownPressureLevel
@@ -4200,7 +4202,7 @@ function runBuildVerification(options) {
4200
4202
  let cmd;
4201
4203
  if (verifyCommand === void 0) {
4202
4204
  const detection = detectPackageManager(projectRoot);
4203
- logger$19.info({
4205
+ logger$21.info({
4204
4206
  packageManager: detection.packageManager,
4205
4207
  lockfile: detection.lockfile,
4206
4208
  resolvedCommand: detection.command
@@ -4399,7 +4401,7 @@ function pickRecommendation(distribution, profile, totalIssues, reviewCycles, la
4399
4401
 
4400
4402
  //#endregion
4401
4403
  //#region src/modules/compiled-workflows/prompt-assembler.ts
4402
- const logger$18 = createLogger("compiled-workflows:prompt-assembler");
4404
+ const logger$20 = createLogger("compiled-workflows:prompt-assembler");
4403
4405
  /**
4404
4406
  * Assemble a final prompt from a template and sections map.
4405
4407
  *
@@ -4424,7 +4426,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
4424
4426
  tokenCount,
4425
4427
  truncated: false
4426
4428
  };
4427
- logger$18.warn({
4429
+ logger$20.warn({
4428
4430
  tokenCount,
4429
4431
  ceiling: tokenCeiling
4430
4432
  }, "Prompt exceeds token ceiling — truncating optional sections");
@@ -4440,10 +4442,10 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
4440
4442
  const targetSectionTokens = Math.max(0, currentSectionTokens - overBy);
4441
4443
  if (targetSectionTokens === 0) {
4442
4444
  contentMap[section.name] = "";
4443
- logger$18.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
4445
+ logger$20.warn({ sectionName: section.name }, "Section eliminated to fit token budget");
4444
4446
  } else {
4445
4447
  contentMap[section.name] = truncateToTokens(section.content, targetSectionTokens);
4446
- logger$18.warn({
4448
+ logger$20.warn({
4447
4449
  sectionName: section.name,
4448
4450
  targetSectionTokens
4449
4451
  }, "Section truncated to fit token budget");
@@ -4454,7 +4456,7 @@ function assemblePrompt(template, sections, tokenCeiling = 2200) {
4454
4456
  }
4455
4457
  if (tokenCount <= tokenCeiling) break;
4456
4458
  }
4457
- if (tokenCount > tokenCeiling) logger$18.warn({
4459
+ if (tokenCount > tokenCeiling) logger$20.warn({
4458
4460
  tokenCount,
4459
4461
  ceiling: tokenCeiling
4460
4462
  }, "Required sections alone exceed token ceiling — returning over-budget prompt");
@@ -4752,7 +4754,7 @@ function getTokenCeiling(workflowType, tokenCeilings) {
4752
4754
 
4753
4755
  //#endregion
4754
4756
  //#region src/modules/compiled-workflows/create-story.ts
4755
- const logger$17 = createLogger("compiled-workflows:create-story");
4757
+ const logger$19 = createLogger("compiled-workflows:create-story");
4756
4758
  /**
4757
4759
  * Execute the compiled create-story workflow.
4758
4760
  *
@@ -4772,13 +4774,13 @@ const logger$17 = createLogger("compiled-workflows:create-story");
4772
4774
  */
4773
4775
  async function runCreateStory(deps, params) {
4774
4776
  const { epicId, storyKey, pipelineRunId } = params;
4775
- logger$17.debug({
4777
+ logger$19.debug({
4776
4778
  epicId,
4777
4779
  storyKey,
4778
4780
  pipelineRunId
4779
4781
  }, "Starting create-story workflow");
4780
4782
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("create-story", deps.tokenCeilings);
4781
- logger$17.info({
4783
+ logger$19.info({
4782
4784
  workflow: "create-story",
4783
4785
  ceiling: TOKEN_CEILING,
4784
4786
  source: tokenCeilingSource
@@ -4788,7 +4790,7 @@ async function runCreateStory(deps, params) {
4788
4790
  template = await deps.pack.getPrompt("create-story");
4789
4791
  } catch (err) {
4790
4792
  const error = err instanceof Error ? err.message : String(err);
4791
- logger$17.error({ error }, "Failed to retrieve create-story prompt template");
4793
+ logger$19.error({ error }, "Failed to retrieve create-story prompt template");
4792
4794
  return {
4793
4795
  result: "failed",
4794
4796
  error: `Failed to retrieve prompt template: ${error}`,
@@ -4830,7 +4832,7 @@ async function runCreateStory(deps, params) {
4830
4832
  priority: "important"
4831
4833
  }
4832
4834
  ], TOKEN_CEILING);
4833
- logger$17.debug({
4835
+ logger$19.debug({
4834
4836
  tokenCount,
4835
4837
  truncated,
4836
4838
  tokenCeiling: TOKEN_CEILING
@@ -4840,14 +4842,15 @@ async function runCreateStory(deps, params) {
4840
4842
  agent: "claude-code",
4841
4843
  taskType: "create-story",
4842
4844
  outputSchema: CreateStoryResultSchema,
4843
- ...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
4845
+ ...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
4846
+ ...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
4844
4847
  });
4845
4848
  let dispatchResult;
4846
4849
  try {
4847
4850
  dispatchResult = await handle.result;
4848
4851
  } catch (err) {
4849
4852
  const error = err instanceof Error ? err.message : String(err);
4850
- logger$17.error({
4853
+ logger$19.error({
4851
4854
  epicId,
4852
4855
  storyKey,
4853
4856
  error
@@ -4868,7 +4871,7 @@ async function runCreateStory(deps, params) {
4868
4871
  if (dispatchResult.status === "failed") {
4869
4872
  const errorMsg = dispatchResult.parseError ?? `Dispatch failed with exit code ${dispatchResult.exitCode}`;
4870
4873
  const stderrDetail = dispatchResult.output ? ` Output: ${dispatchResult.output}` : "";
4871
- logger$17.warn({
4874
+ logger$19.warn({
4872
4875
  epicId,
4873
4876
  storyKey,
4874
4877
  exitCode: dispatchResult.exitCode
@@ -4880,7 +4883,7 @@ async function runCreateStory(deps, params) {
4880
4883
  };
4881
4884
  }
4882
4885
  if (dispatchResult.status === "timeout") {
4883
- logger$17.warn({
4886
+ logger$19.warn({
4884
4887
  epicId,
4885
4888
  storyKey
4886
4889
  }, "Create-story dispatch timed out");
@@ -4893,7 +4896,7 @@ async function runCreateStory(deps, params) {
4893
4896
  if (dispatchResult.parsed === null) {
4894
4897
  const details = dispatchResult.parseError ?? "No YAML block found in output";
4895
4898
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
4896
- logger$17.warn({
4899
+ logger$19.warn({
4897
4900
  epicId,
4898
4901
  storyKey,
4899
4902
  details,
@@ -4909,7 +4912,7 @@ async function runCreateStory(deps, params) {
4909
4912
  const parseResult = CreateStoryResultSchema.safeParse(dispatchResult.parsed);
4910
4913
  if (!parseResult.success) {
4911
4914
  const details = parseResult.error.message;
4912
- logger$17.warn({
4915
+ logger$19.warn({
4913
4916
  epicId,
4914
4917
  storyKey,
4915
4918
  details
@@ -4922,7 +4925,7 @@ async function runCreateStory(deps, params) {
4922
4925
  };
4923
4926
  }
4924
4927
  const parsed = parseResult.data;
4925
- logger$17.info({
4928
+ logger$19.info({
4926
4929
  epicId,
4927
4930
  storyKey,
4928
4931
  storyFile: parsed.story_file,
@@ -4944,7 +4947,7 @@ function getImplementationDecisions(deps) {
4944
4947
  try {
4945
4948
  return getDecisionsByPhase(deps.db, "implementation");
4946
4949
  } catch (err) {
4947
- logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
4950
+ logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve implementation decisions");
4948
4951
  return [];
4949
4952
  }
4950
4953
  }
@@ -4987,13 +4990,13 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
4987
4990
  if (storyKey) {
4988
4991
  const storySection = extractStorySection(shardContent, storyKey);
4989
4992
  if (storySection) {
4990
- logger$17.debug({
4993
+ logger$19.debug({
4991
4994
  epicId,
4992
4995
  storyKey
4993
4996
  }, "Extracted per-story section from epic shard");
4994
4997
  return storySection;
4995
4998
  }
4996
- logger$17.debug({
4999
+ logger$19.debug({
4997
5000
  epicId,
4998
5001
  storyKey
4999
5002
  }, "No matching story section found — using full epic shard");
@@ -5003,11 +5006,11 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
5003
5006
  if (projectRoot) {
5004
5007
  const fallback = readEpicShardFromFile(projectRoot, epicId);
5005
5008
  if (fallback) {
5006
- logger$17.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
5009
+ logger$19.info({ epicId }, "Using file-based fallback for epic shard (decisions table empty)");
5007
5010
  if (storyKey) {
5008
5011
  const storySection = extractStorySection(fallback, storyKey);
5009
5012
  if (storySection) {
5010
- logger$17.debug({
5013
+ logger$19.debug({
5011
5014
  epicId,
5012
5015
  storyKey
5013
5016
  }, "Extracted per-story section from file-based epic shard");
@@ -5019,7 +5022,7 @@ function getEpicShard(decisions, epicId, projectRoot, storyKey) {
5019
5022
  }
5020
5023
  return "";
5021
5024
  } catch (err) {
5022
- logger$17.warn({
5025
+ logger$19.warn({
5023
5026
  epicId,
5024
5027
  error: err instanceof Error ? err.message : String(err)
5025
5028
  }, "Failed to retrieve epic shard");
@@ -5036,7 +5039,7 @@ function getPrevDevNotes(decisions, epicId) {
5036
5039
  if (devNotes.length === 0) return "";
5037
5040
  return devNotes[devNotes.length - 1].value;
5038
5041
  } catch (err) {
5039
- logger$17.warn({
5042
+ logger$19.warn({
5040
5043
  epicId,
5041
5044
  error: err instanceof Error ? err.message : String(err)
5042
5045
  }, "Failed to retrieve prev dev notes");
@@ -5056,13 +5059,13 @@ function getArchConstraints$3(deps) {
5056
5059
  if (deps.projectRoot) {
5057
5060
  const fallback = readArchConstraintsFromFile(deps.projectRoot);
5058
5061
  if (fallback) {
5059
- logger$17.info("Using file-based fallback for architecture constraints (decisions table empty)");
5062
+ logger$19.info("Using file-based fallback for architecture constraints (decisions table empty)");
5060
5063
  return fallback;
5061
5064
  }
5062
5065
  }
5063
5066
  return "";
5064
5067
  } catch (err) {
5065
- logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
5068
+ logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
5066
5069
  return "";
5067
5070
  }
5068
5071
  }
@@ -5082,7 +5085,7 @@ function readEpicShardFromFile(projectRoot, epicId) {
5082
5085
  const match = pattern.exec(content);
5083
5086
  return match ? match[0].trim() : "";
5084
5087
  } catch (err) {
5085
- logger$17.warn({
5088
+ logger$19.warn({
5086
5089
  epicId,
5087
5090
  error: err instanceof Error ? err.message : String(err)
5088
5091
  }, "File-based epic shard fallback failed");
@@ -5105,7 +5108,7 @@ function readArchConstraintsFromFile(projectRoot) {
5105
5108
  const content = readFileSync$1(archPath, "utf-8");
5106
5109
  return content.slice(0, 1500);
5107
5110
  } catch (err) {
5108
- logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
5111
+ logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "File-based architecture fallback failed");
5109
5112
  return "";
5110
5113
  }
5111
5114
  }
@@ -5118,7 +5121,7 @@ async function getStoryTemplate(deps) {
5118
5121
  try {
5119
5122
  return await deps.pack.getTemplate("story");
5120
5123
  } catch (err) {
5121
- logger$17.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
5124
+ logger$19.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve story template from pack");
5122
5125
  return "";
5123
5126
  }
5124
5127
  }
@@ -5155,7 +5158,7 @@ async function isValidStoryFile(filePath) {
5155
5158
 
5156
5159
  //#endregion
5157
5160
  //#region src/modules/compiled-workflows/git-helpers.ts
5158
- const logger$16 = createLogger("compiled-workflows:git-helpers");
5161
+ const logger$18 = createLogger("compiled-workflows:git-helpers");
5159
5162
  /**
5160
5163
  * Capture the full git diff for HEAD (working tree vs current commit).
5161
5164
  *
@@ -5251,7 +5254,7 @@ async function stageIntentToAdd(files, workingDirectory) {
5251
5254
  if (files.length === 0) return;
5252
5255
  const existing = files.filter((f) => {
5253
5256
  const exists = existsSync$1(f);
5254
- if (!exists) logger$16.debug({ file: f }, "Skipping nonexistent file in stageIntentToAdd");
5257
+ if (!exists) logger$18.debug({ file: f }, "Skipping nonexistent file in stageIntentToAdd");
5255
5258
  return exists;
5256
5259
  });
5257
5260
  if (existing.length === 0) return;
@@ -5285,7 +5288,7 @@ async function runGitCommand(args, cwd, logLabel) {
5285
5288
  stderr += chunk.toString("utf-8");
5286
5289
  });
5287
5290
  proc.on("error", (err) => {
5288
- logger$16.warn({
5291
+ logger$18.warn({
5289
5292
  label: logLabel,
5290
5293
  cwd,
5291
5294
  error: err.message
@@ -5294,7 +5297,7 @@ async function runGitCommand(args, cwd, logLabel) {
5294
5297
  });
5295
5298
  proc.on("close", (code) => {
5296
5299
  if (code !== 0) {
5297
- logger$16.warn({
5300
+ logger$18.warn({
5298
5301
  label: logLabel,
5299
5302
  cwd,
5300
5303
  code,
@@ -5310,7 +5313,7 @@ async function runGitCommand(args, cwd, logLabel) {
5310
5313
 
5311
5314
  //#endregion
5312
5315
  //#region src/modules/implementation-orchestrator/project-findings.ts
5313
- const logger$15 = createLogger("project-findings");
5316
+ const logger$17 = createLogger("project-findings");
5314
5317
  /** Maximum character length for the findings summary */
5315
5318
  const MAX_CHARS = 2e3;
5316
5319
  /**
@@ -5376,7 +5379,7 @@ function getProjectFindings(db) {
5376
5379
  if (summary.length > MAX_CHARS) summary = summary.slice(0, MAX_CHARS - 3) + "...";
5377
5380
  return summary;
5378
5381
  } catch (err) {
5379
- logger$15.warn({ err }, "Failed to query project findings (graceful fallback)");
5382
+ logger$17.warn({ err }, "Failed to query project findings (graceful fallback)");
5380
5383
  return "";
5381
5384
  }
5382
5385
  }
@@ -5399,7 +5402,7 @@ function extractRecurringPatterns(outcomes) {
5399
5402
 
5400
5403
  //#endregion
5401
5404
  //#region src/modules/compiled-workflows/story-complexity.ts
5402
- const logger$14 = createLogger("compiled-workflows:story-complexity");
5405
+ const logger$16 = createLogger("compiled-workflows:story-complexity");
5403
5406
  /**
5404
5407
  * Compute a complexity score from story markdown content.
5405
5408
  *
@@ -5451,7 +5454,7 @@ function resolveFixStoryMaxTurns(complexityScore) {
5451
5454
  * @param resolvedMaxTurns - Turn limit resolved for this dispatch
5452
5455
  */
5453
5456
  function logComplexityResult(storyKey, complexity, resolvedMaxTurns) {
5454
- logger$14.info({
5457
+ logger$16.info({
5455
5458
  storyKey,
5456
5459
  taskCount: complexity.taskCount,
5457
5460
  subtaskCount: complexity.subtaskCount,
@@ -5509,7 +5512,7 @@ function countFilesInLayout(content) {
5509
5512
 
5510
5513
  //#endregion
5511
5514
  //#region src/modules/compiled-workflows/dev-story.ts
5512
- const logger$13 = createLogger("compiled-workflows:dev-story");
5515
+ const logger$15 = createLogger("compiled-workflows:dev-story");
5513
5516
  /** Default timeout for dev-story dispatches in milliseconds (30 min) */
5514
5517
  const DEFAULT_TIMEOUT_MS$1 = 18e5;
5515
5518
  /** Default Vitest test patterns injected when no test-pattern decisions exist */
@@ -5532,12 +5535,12 @@ const DEFAULT_VITEST_PATTERNS = `## Test Patterns (defaults)
5532
5535
  */
5533
5536
  async function runDevStory(deps, params) {
5534
5537
  const { storyKey, storyFilePath, taskScope, priorFiles } = params;
5535
- logger$13.info({
5538
+ logger$15.info({
5536
5539
  storyKey,
5537
5540
  storyFilePath
5538
5541
  }, "Starting compiled dev-story workflow");
5539
5542
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("dev-story", deps.tokenCeilings);
5540
- logger$13.info({
5543
+ logger$15.info({
5541
5544
  workflow: "dev-story",
5542
5545
  ceiling: TOKEN_CEILING,
5543
5546
  source: tokenCeilingSource
@@ -5580,10 +5583,10 @@ async function runDevStory(deps, params) {
5580
5583
  let template;
5581
5584
  try {
5582
5585
  template = await deps.pack.getPrompt("dev-story");
5583
- logger$13.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
5586
+ logger$15.debug({ storyKey }, "Retrieved dev-story prompt template from pack");
5584
5587
  } catch (err) {
5585
5588
  const error = err instanceof Error ? err.message : String(err);
5586
- logger$13.error({
5589
+ logger$15.error({
5587
5590
  storyKey,
5588
5591
  error
5589
5592
  }, "Failed to retrieve dev-story prompt template");
@@ -5594,14 +5597,14 @@ async function runDevStory(deps, params) {
5594
5597
  storyContent = await readFile$1(storyFilePath, "utf-8");
5595
5598
  } catch (err) {
5596
5599
  if (err.code === "ENOENT") {
5597
- logger$13.error({
5600
+ logger$15.error({
5598
5601
  storyKey,
5599
5602
  storyFilePath
5600
5603
  }, "Story file not found");
5601
5604
  return makeFailureResult("story_file_not_found");
5602
5605
  }
5603
5606
  const error = err instanceof Error ? err.message : String(err);
5604
- logger$13.error({
5607
+ logger$15.error({
5605
5608
  storyKey,
5606
5609
  storyFilePath,
5607
5610
  error
@@ -5609,7 +5612,7 @@ async function runDevStory(deps, params) {
5609
5612
  return makeFailureResult(`story_file_read_error: ${error}`);
5610
5613
  }
5611
5614
  if (storyContent.trim().length === 0) {
5612
- logger$13.error({
5615
+ logger$15.error({
5613
5616
  storyKey,
5614
5617
  storyFilePath
5615
5618
  }, "Story file is empty");
@@ -5624,17 +5627,17 @@ async function runDevStory(deps, params) {
5624
5627
  const testPatternDecisions = solutioningDecisions.filter((d) => d.category === "test-patterns");
5625
5628
  if (testPatternDecisions.length > 0) {
5626
5629
  testPatternsContent = "## Test Patterns\n" + testPatternDecisions.map((d) => `- ${d.key}: ${d.value}`).join("\n");
5627
- logger$13.debug({
5630
+ logger$15.debug({
5628
5631
  storyKey,
5629
5632
  count: testPatternDecisions.length
5630
5633
  }, "Loaded test patterns from decision store");
5631
5634
  } else {
5632
5635
  testPatternsContent = DEFAULT_VITEST_PATTERNS;
5633
- logger$13.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
5636
+ logger$15.debug({ storyKey }, "No test-pattern decisions found — using default Vitest patterns");
5634
5637
  }
5635
5638
  } catch (err) {
5636
5639
  const error = err instanceof Error ? err.message : String(err);
5637
- logger$13.warn({
5640
+ logger$15.warn({
5638
5641
  storyKey,
5639
5642
  error
5640
5643
  }, "Failed to load test patterns — using defaults");
@@ -5649,7 +5652,7 @@ async function runDevStory(deps, params) {
5649
5652
  const findings = getProjectFindings(deps.db);
5650
5653
  if (findings.length > 0) {
5651
5654
  priorFindingsContent = "Previous pipeline runs encountered these issues — avoid repeating them:\n\n" + findings;
5652
- logger$13.debug({
5655
+ logger$15.debug({
5653
5656
  storyKey,
5654
5657
  findingsLen: findings.length
5655
5658
  }, "Injecting prior findings into dev-story prompt");
@@ -5669,7 +5672,7 @@ async function runDevStory(deps, params) {
5669
5672
  if (plan.test_categories && plan.test_categories.length > 0) parts.push(`\n### Categories: ${plan.test_categories.join(", ")}`);
5670
5673
  if (plan.coverage_notes) parts.push(`\n### Coverage Notes\n${plan.coverage_notes}`);
5671
5674
  testPlanContent = parts.join("\n");
5672
- logger$13.debug({ storyKey }, "Injecting test plan into dev-story prompt");
5675
+ logger$15.debug({ storyKey }, "Injecting test plan into dev-story prompt");
5673
5676
  }
5674
5677
  } catch {}
5675
5678
  const sections = [
@@ -5715,7 +5718,7 @@ async function runDevStory(deps, params) {
5715
5718
  }
5716
5719
  ];
5717
5720
  const { prompt, tokenCount, truncated } = assemblePrompt(template, sections, TOKEN_CEILING);
5718
- logger$13.info({
5721
+ logger$15.info({
5719
5722
  storyKey,
5720
5723
  tokenCount,
5721
5724
  ceiling: TOKEN_CEILING,
@@ -5730,12 +5733,13 @@ async function runDevStory(deps, params) {
5730
5733
  timeout: DEFAULT_TIMEOUT_MS$1,
5731
5734
  outputSchema: DevStoryResultSchema,
5732
5735
  maxTurns: resolvedMaxTurns,
5733
- ...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
5736
+ ...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
5737
+ ...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
5734
5738
  });
5735
5739
  dispatchResult = await handle.result;
5736
5740
  } catch (err) {
5737
5741
  const error = err instanceof Error ? err.message : String(err);
5738
- logger$13.error({
5742
+ logger$15.error({
5739
5743
  storyKey,
5740
5744
  error
5741
5745
  }, "Dispatch threw an unexpected error");
@@ -5746,11 +5750,11 @@ async function runDevStory(deps, params) {
5746
5750
  output: dispatchResult.tokenEstimate.output
5747
5751
  };
5748
5752
  if (dispatchResult.status === "timeout") {
5749
- logger$13.error({
5753
+ logger$15.error({
5750
5754
  storyKey,
5751
5755
  durationMs: dispatchResult.durationMs
5752
5756
  }, "Dev-story dispatch timed out");
5753
- if (dispatchResult.output.length > 0) logger$13.info({
5757
+ if (dispatchResult.output.length > 0) logger$15.info({
5754
5758
  storyKey,
5755
5759
  partialOutput: dispatchResult.output.slice(0, 500)
5756
5760
  }, "Partial output before timeout");
@@ -5760,12 +5764,12 @@ async function runDevStory(deps, params) {
5760
5764
  };
5761
5765
  }
5762
5766
  if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
5763
- logger$13.error({
5767
+ logger$15.error({
5764
5768
  storyKey,
5765
5769
  exitCode: dispatchResult.exitCode,
5766
5770
  status: dispatchResult.status
5767
5771
  }, "Dev-story dispatch failed");
5768
- if (dispatchResult.output.length > 0) logger$13.info({
5772
+ if (dispatchResult.output.length > 0) logger$15.info({
5769
5773
  storyKey,
5770
5774
  partialOutput: dispatchResult.output.slice(0, 500)
5771
5775
  }, "Partial output from failed dispatch");
@@ -5777,7 +5781,7 @@ async function runDevStory(deps, params) {
5777
5781
  if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
5778
5782
  const details = dispatchResult.parseError ?? "parsed result was null";
5779
5783
  const rawSnippet = dispatchResult.output ? dispatchResult.output.slice(0, 1e3) : "(empty)";
5780
- logger$13.error({
5784
+ logger$15.error({
5781
5785
  storyKey,
5782
5786
  parseError: details,
5783
5787
  rawOutputSnippet: rawSnippet
@@ -5785,12 +5789,12 @@ async function runDevStory(deps, params) {
5785
5789
  let filesModified = [];
5786
5790
  try {
5787
5791
  filesModified = await getGitChangedFiles(deps.projectRoot ?? process.cwd());
5788
- if (filesModified.length > 0) logger$13.info({
5792
+ if (filesModified.length > 0) logger$15.info({
5789
5793
  storyKey,
5790
5794
  fileCount: filesModified.length
5791
5795
  }, "Recovered files_modified from git status (YAML fallback)");
5792
5796
  } catch (err) {
5793
- logger$13.warn({
5797
+ logger$15.warn({
5794
5798
  storyKey,
5795
5799
  error: err instanceof Error ? err.message : String(err)
5796
5800
  }, "Failed to recover files_modified from git");
@@ -5807,7 +5811,7 @@ async function runDevStory(deps, params) {
5807
5811
  };
5808
5812
  }
5809
5813
  const parsed = dispatchResult.parsed;
5810
- logger$13.info({
5814
+ logger$15.info({
5811
5815
  storyKey,
5812
5816
  result: parsed.result,
5813
5817
  acMet: parsed.ac_met.length
@@ -5946,7 +5950,7 @@ function extractFilesInScope(storyContent) {
5946
5950
 
5947
5951
  //#endregion
5948
5952
  //#region src/modules/compiled-workflows/code-review.ts
5949
- const logger$12 = createLogger("compiled-workflows:code-review");
5953
+ const logger$14 = createLogger("compiled-workflows:code-review");
5950
5954
  /**
5951
5955
  * Default fallback result when dispatch fails or times out.
5952
5956
  * Uses NEEDS_MINOR_FIXES (not NEEDS_MAJOR_REWORK) so a parse/schema failure
@@ -5984,14 +5988,14 @@ function defaultFailResult(error, tokenUsage) {
5984
5988
  async function runCodeReview(deps, params) {
5985
5989
  const { storyKey, storyFilePath, workingDirectory, pipelineRunId, filesModified, previousIssues } = params;
5986
5990
  const cwd = workingDirectory ?? process.cwd();
5987
- logger$12.debug({
5991
+ logger$14.debug({
5988
5992
  storyKey,
5989
5993
  storyFilePath,
5990
5994
  cwd,
5991
5995
  pipelineRunId
5992
5996
  }, "Starting code-review workflow");
5993
5997
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("code-review", deps.tokenCeilings);
5994
- logger$12.info({
5998
+ logger$14.info({
5995
5999
  workflow: "code-review",
5996
6000
  ceiling: TOKEN_CEILING,
5997
6001
  source: tokenCeilingSource
@@ -6001,7 +6005,7 @@ async function runCodeReview(deps, params) {
6001
6005
  template = await deps.pack.getPrompt("code-review");
6002
6006
  } catch (err) {
6003
6007
  const error = err instanceof Error ? err.message : String(err);
6004
- logger$12.error({ error }, "Failed to retrieve code-review prompt template");
6008
+ logger$14.error({ error }, "Failed to retrieve code-review prompt template");
6005
6009
  return defaultFailResult(`Failed to retrieve prompt template: ${error}`, {
6006
6010
  input: 0,
6007
6011
  output: 0
@@ -6012,7 +6016,7 @@ async function runCodeReview(deps, params) {
6012
6016
  storyContent = await readFile$1(storyFilePath, "utf-8");
6013
6017
  } catch (err) {
6014
6018
  const error = err instanceof Error ? err.message : String(err);
6015
- logger$12.error({
6019
+ logger$14.error({
6016
6020
  storyFilePath,
6017
6021
  error
6018
6022
  }, "Failed to read story file");
@@ -6032,12 +6036,12 @@ async function runCodeReview(deps, params) {
6032
6036
  const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
6033
6037
  if (scopedTotal <= TOKEN_CEILING) {
6034
6038
  gitDiffContent = scopedDiff;
6035
- logger$12.debug({
6039
+ logger$14.debug({
6036
6040
  fileCount: filesModified.length,
6037
6041
  tokenCount: scopedTotal
6038
6042
  }, "Using scoped file diff");
6039
6043
  } else {
6040
- logger$12.warn({
6044
+ logger$14.warn({
6041
6045
  estimatedTotal: scopedTotal,
6042
6046
  ceiling: TOKEN_CEILING,
6043
6047
  fileCount: filesModified.length
@@ -6051,7 +6055,7 @@ async function runCodeReview(deps, params) {
6051
6055
  const fullTotal = nonDiffTokens + countTokens(fullDiff);
6052
6056
  if (fullTotal <= TOKEN_CEILING) gitDiffContent = fullDiff;
6053
6057
  else {
6054
- logger$12.warn({
6058
+ logger$14.warn({
6055
6059
  estimatedTotal: fullTotal,
6056
6060
  ceiling: TOKEN_CEILING
6057
6061
  }, "Full git diff would exceed token ceiling — using stat-only summary");
@@ -6059,7 +6063,7 @@ async function runCodeReview(deps, params) {
6059
6063
  }
6060
6064
  }
6061
6065
  if (gitDiffContent.trim().length === 0) {
6062
- logger$12.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
6066
+ logger$14.info({ storyKey }, "Empty git diff — skipping review with SHIP_IT");
6063
6067
  return {
6064
6068
  verdict: "SHIP_IT",
6065
6069
  issues: 0,
@@ -6084,7 +6088,7 @@ async function runCodeReview(deps, params) {
6084
6088
  const findings = getProjectFindings(deps.db);
6085
6089
  if (findings.length > 0) {
6086
6090
  priorFindingsContent = "Previous reviews found these recurring patterns — pay special attention:\n\n" + findings;
6087
- logger$12.debug({
6091
+ logger$14.debug({
6088
6092
  storyKey,
6089
6093
  findingsLen: findings.length
6090
6094
  }, "Injecting prior findings into code-review prompt");
@@ -6118,11 +6122,11 @@ async function runCodeReview(deps, params) {
6118
6122
  }
6119
6123
  ];
6120
6124
  const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
6121
- if (assembleResult.truncated) logger$12.warn({
6125
+ if (assembleResult.truncated) logger$14.warn({
6122
6126
  storyKey,
6123
6127
  tokenCount: assembleResult.tokenCount
6124
6128
  }, "Code-review prompt truncated to fit token ceiling");
6125
- logger$12.debug({
6129
+ logger$14.debug({
6126
6130
  storyKey,
6127
6131
  tokenCount: assembleResult.tokenCount,
6128
6132
  truncated: assembleResult.truncated
@@ -6133,14 +6137,15 @@ async function runCodeReview(deps, params) {
6133
6137
  agent: "claude-code",
6134
6138
  taskType: "code-review",
6135
6139
  outputSchema: CodeReviewResultSchema,
6136
- workingDirectory: deps.projectRoot
6140
+ workingDirectory: deps.projectRoot,
6141
+ ...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
6137
6142
  });
6138
6143
  let dispatchResult;
6139
6144
  try {
6140
6145
  dispatchResult = await handle.result;
6141
6146
  } catch (err) {
6142
6147
  const error = err instanceof Error ? err.message : String(err);
6143
- logger$12.error({
6148
+ logger$14.error({
6144
6149
  storyKey,
6145
6150
  error
6146
6151
  }, "Code-review dispatch threw unexpected error");
@@ -6156,7 +6161,7 @@ async function runCodeReview(deps, params) {
6156
6161
  const rawOutput = dispatchResult.output ?? void 0;
6157
6162
  if (dispatchResult.status === "failed") {
6158
6163
  const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""} ${dispatchResult.output ? `Stderr: ${dispatchResult.output}` : ""}`.trim();
6159
- logger$12.warn({
6164
+ logger$14.warn({
6160
6165
  storyKey,
6161
6166
  exitCode: dispatchResult.exitCode
6162
6167
  }, "Code-review dispatch failed");
@@ -6166,7 +6171,7 @@ async function runCodeReview(deps, params) {
6166
6171
  };
6167
6172
  }
6168
6173
  if (dispatchResult.status === "timeout") {
6169
- logger$12.warn({ storyKey }, "Code-review dispatch timed out");
6174
+ logger$14.warn({ storyKey }, "Code-review dispatch timed out");
6170
6175
  return {
6171
6176
  ...defaultFailResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage),
6172
6177
  rawOutput
@@ -6174,7 +6179,7 @@ async function runCodeReview(deps, params) {
6174
6179
  }
6175
6180
  if (dispatchResult.parsed === null) {
6176
6181
  const details = dispatchResult.parseError ?? "No YAML block found in output";
6177
- logger$12.warn({
6182
+ logger$14.warn({
6178
6183
  storyKey,
6179
6184
  details
6180
6185
  }, "Code-review output schema validation failed");
@@ -6191,7 +6196,7 @@ async function runCodeReview(deps, params) {
6191
6196
  const parseResult = CodeReviewResultSchema.safeParse(dispatchResult.parsed);
6192
6197
  if (!parseResult.success) {
6193
6198
  const details = parseResult.error.message;
6194
- logger$12.warn({
6199
+ logger$14.warn({
6195
6200
  storyKey,
6196
6201
  details
6197
6202
  }, "Code-review output failed schema validation");
@@ -6206,13 +6211,13 @@ async function runCodeReview(deps, params) {
6206
6211
  };
6207
6212
  }
6208
6213
  const parsed = parseResult.data;
6209
- if (parsed.agentVerdict !== parsed.verdict) logger$12.info({
6214
+ if (parsed.agentVerdict !== parsed.verdict) logger$14.info({
6210
6215
  storyKey,
6211
6216
  agentVerdict: parsed.agentVerdict,
6212
6217
  pipelineVerdict: parsed.verdict,
6213
6218
  issues: parsed.issues
6214
6219
  }, "Pipeline overrode agent verdict based on issue severities");
6215
- logger$12.info({
6220
+ logger$14.info({
6216
6221
  storyKey,
6217
6222
  verdict: parsed.verdict,
6218
6223
  issues: parsed.issues
@@ -6237,14 +6242,14 @@ function getArchConstraints$2(deps) {
6237
6242
  if (constraints.length === 0) return "";
6238
6243
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
6239
6244
  } catch (err) {
6240
- logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
6245
+ logger$14.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
6241
6246
  return "";
6242
6247
  }
6243
6248
  }
6244
6249
 
6245
6250
  //#endregion
6246
6251
  //#region src/modules/compiled-workflows/test-plan.ts
6247
- const logger$11 = createLogger("compiled-workflows:test-plan");
6252
+ const logger$13 = createLogger("compiled-workflows:test-plan");
6248
6253
  /** Default timeout for test-plan dispatches in milliseconds (5 min — lightweight call) */
6249
6254
  const DEFAULT_TIMEOUT_MS = 3e5;
6250
6255
  /**
@@ -6256,12 +6261,12 @@ const DEFAULT_TIMEOUT_MS = 3e5;
6256
6261
  */
6257
6262
  async function runTestPlan(deps, params) {
6258
6263
  const { storyKey, storyFilePath, pipelineRunId } = params;
6259
- logger$11.info({
6264
+ logger$13.info({
6260
6265
  storyKey,
6261
6266
  storyFilePath
6262
6267
  }, "Starting compiled test-plan workflow");
6263
6268
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-plan", deps.tokenCeilings);
6264
- logger$11.info({
6269
+ logger$13.info({
6265
6270
  workflow: "test-plan",
6266
6271
  ceiling: TOKEN_CEILING,
6267
6272
  source: tokenCeilingSource
@@ -6269,10 +6274,10 @@ async function runTestPlan(deps, params) {
6269
6274
  let template;
6270
6275
  try {
6271
6276
  template = await deps.pack.getPrompt("test-plan");
6272
- logger$11.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
6277
+ logger$13.debug({ storyKey }, "Retrieved test-plan prompt template from pack");
6273
6278
  } catch (err) {
6274
6279
  const error = err instanceof Error ? err.message : String(err);
6275
- logger$11.warn({
6280
+ logger$13.warn({
6276
6281
  storyKey,
6277
6282
  error
6278
6283
  }, "Failed to retrieve test-plan prompt template");
@@ -6283,14 +6288,14 @@ async function runTestPlan(deps, params) {
6283
6288
  storyContent = await readFile$1(storyFilePath, "utf-8");
6284
6289
  } catch (err) {
6285
6290
  if (err.code === "ENOENT") {
6286
- logger$11.warn({
6291
+ logger$13.warn({
6287
6292
  storyKey,
6288
6293
  storyFilePath
6289
6294
  }, "Story file not found for test planning");
6290
6295
  return makeTestPlanFailureResult("story_file_not_found");
6291
6296
  }
6292
6297
  const error = err instanceof Error ? err.message : String(err);
6293
- logger$11.warn({
6298
+ logger$13.warn({
6294
6299
  storyKey,
6295
6300
  storyFilePath,
6296
6301
  error
@@ -6307,7 +6312,7 @@ async function runTestPlan(deps, params) {
6307
6312
  content: archConstraintsContent,
6308
6313
  priority: "optional"
6309
6314
  }], TOKEN_CEILING);
6310
- logger$11.info({
6315
+ logger$13.info({
6311
6316
  storyKey,
6312
6317
  tokenCount,
6313
6318
  ceiling: TOKEN_CEILING,
@@ -6321,12 +6326,13 @@ async function runTestPlan(deps, params) {
6321
6326
  taskType: "test-plan",
6322
6327
  timeout: DEFAULT_TIMEOUT_MS,
6323
6328
  outputSchema: TestPlanResultSchema,
6324
- ...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {}
6329
+ ...deps.projectRoot !== void 0 ? { workingDirectory: deps.projectRoot } : {},
6330
+ ...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
6325
6331
  });
6326
6332
  dispatchResult = await handle.result;
6327
6333
  } catch (err) {
6328
6334
  const error = err instanceof Error ? err.message : String(err);
6329
- logger$11.warn({
6335
+ logger$13.warn({
6330
6336
  storyKey,
6331
6337
  error
6332
6338
  }, "Test-plan dispatch threw an unexpected error");
@@ -6337,7 +6343,7 @@ async function runTestPlan(deps, params) {
6337
6343
  output: dispatchResult.tokenEstimate.output
6338
6344
  };
6339
6345
  if (dispatchResult.status === "timeout") {
6340
- logger$11.warn({
6346
+ logger$13.warn({
6341
6347
  storyKey,
6342
6348
  durationMs: dispatchResult.durationMs
6343
6349
  }, "Test-plan dispatch timed out");
@@ -6347,7 +6353,7 @@ async function runTestPlan(deps, params) {
6347
6353
  };
6348
6354
  }
6349
6355
  if (dispatchResult.status === "failed" || dispatchResult.exitCode !== 0) {
6350
- logger$11.warn({
6356
+ logger$13.warn({
6351
6357
  storyKey,
6352
6358
  exitCode: dispatchResult.exitCode,
6353
6359
  status: dispatchResult.status
@@ -6359,7 +6365,7 @@ async function runTestPlan(deps, params) {
6359
6365
  }
6360
6366
  if (dispatchResult.parseError !== null || dispatchResult.parsed === null) {
6361
6367
  const details = dispatchResult.parseError ?? "parsed result was null";
6362
- logger$11.warn({
6368
+ logger$13.warn({
6363
6369
  storyKey,
6364
6370
  parseError: details
6365
6371
  }, "Test-plan YAML schema validation failed");
@@ -6382,19 +6388,19 @@ async function runTestPlan(deps, params) {
6382
6388
  }),
6383
6389
  rationale: `Test plan for ${storyKey}: ${parsed.test_files.length} test files, categories: ${parsed.test_categories.join(", ")}`
6384
6390
  });
6385
- logger$11.info({
6391
+ logger$13.info({
6386
6392
  storyKey,
6387
6393
  fileCount: parsed.test_files.length,
6388
6394
  categories: parsed.test_categories
6389
6395
  }, "Test plan stored in decision store");
6390
6396
  } catch (err) {
6391
6397
  const error = err instanceof Error ? err.message : String(err);
6392
- logger$11.warn({
6398
+ logger$13.warn({
6393
6399
  storyKey,
6394
6400
  error
6395
6401
  }, "Failed to store test plan in decision store — proceeding anyway");
6396
6402
  }
6397
- logger$11.info({
6403
+ logger$13.info({
6398
6404
  storyKey,
6399
6405
  result: parsed.result
6400
6406
  }, "Test-plan workflow completed");
@@ -6434,14 +6440,14 @@ function getArchConstraints$1(deps) {
6434
6440
  if (constraints.length === 0) return "";
6435
6441
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
6436
6442
  } catch (err) {
6437
- logger$11.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
6443
+ logger$13.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints for test-plan — proceeding without them");
6438
6444
  return "";
6439
6445
  }
6440
6446
  }
6441
6447
 
6442
6448
  //#endregion
6443
6449
  //#region src/modules/compiled-workflows/test-expansion.ts
6444
- const logger$10 = createLogger("compiled-workflows:test-expansion");
6450
+ const logger$12 = createLogger("compiled-workflows:test-expansion");
6445
6451
  function defaultFallbackResult(error, tokenUsage) {
6446
6452
  return {
6447
6453
  expansion_priority: "low",
@@ -6471,14 +6477,14 @@ function defaultFallbackResult(error, tokenUsage) {
6471
6477
  async function runTestExpansion(deps, params) {
6472
6478
  const { storyKey, storyFilePath, pipelineRunId, filesModified, workingDirectory } = params;
6473
6479
  const cwd = workingDirectory ?? process.cwd();
6474
- logger$10.debug({
6480
+ logger$12.debug({
6475
6481
  storyKey,
6476
6482
  storyFilePath,
6477
6483
  cwd,
6478
6484
  pipelineRunId
6479
6485
  }, "Starting test-expansion workflow");
6480
6486
  const { ceiling: TOKEN_CEILING, source: tokenCeilingSource } = getTokenCeiling("test-expansion", deps.tokenCeilings);
6481
- logger$10.info({
6487
+ logger$12.info({
6482
6488
  workflow: "test-expansion",
6483
6489
  ceiling: TOKEN_CEILING,
6484
6490
  source: tokenCeilingSource
@@ -6488,7 +6494,7 @@ async function runTestExpansion(deps, params) {
6488
6494
  template = await deps.pack.getPrompt("test-expansion");
6489
6495
  } catch (err) {
6490
6496
  const error = err instanceof Error ? err.message : String(err);
6491
- logger$10.warn({ error }, "Failed to retrieve test-expansion prompt template");
6497
+ logger$12.warn({ error }, "Failed to retrieve test-expansion prompt template");
6492
6498
  return defaultFallbackResult(`Failed to retrieve prompt template: ${error}`, {
6493
6499
  input: 0,
6494
6500
  output: 0
@@ -6499,7 +6505,7 @@ async function runTestExpansion(deps, params) {
6499
6505
  storyContent = await readFile$1(storyFilePath, "utf-8");
6500
6506
  } catch (err) {
6501
6507
  const error = err instanceof Error ? err.message : String(err);
6502
- logger$10.warn({
6508
+ logger$12.warn({
6503
6509
  storyFilePath,
6504
6510
  error
6505
6511
  }, "Failed to read story file");
@@ -6519,12 +6525,12 @@ async function runTestExpansion(deps, params) {
6519
6525
  const scopedTotal = nonDiffTokens + countTokens(scopedDiff);
6520
6526
  if (scopedTotal <= TOKEN_CEILING) {
6521
6527
  gitDiffContent = scopedDiff;
6522
- logger$10.debug({
6528
+ logger$12.debug({
6523
6529
  fileCount: filesModified.length,
6524
6530
  tokenCount: scopedTotal
6525
6531
  }, "Using scoped file diff");
6526
6532
  } else {
6527
- logger$10.warn({
6533
+ logger$12.warn({
6528
6534
  estimatedTotal: scopedTotal,
6529
6535
  ceiling: TOKEN_CEILING,
6530
6536
  fileCount: filesModified.length
@@ -6532,7 +6538,7 @@ async function runTestExpansion(deps, params) {
6532
6538
  gitDiffContent = await getGitDiffStatSummary(cwd);
6533
6539
  }
6534
6540
  } catch (err) {
6535
- logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
6541
+ logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to get git diff — proceeding with empty diff");
6536
6542
  }
6537
6543
  const sections = [
6538
6544
  {
@@ -6552,11 +6558,11 @@ async function runTestExpansion(deps, params) {
6552
6558
  }
6553
6559
  ];
6554
6560
  const assembleResult = assemblePrompt(template, sections, TOKEN_CEILING);
6555
- if (assembleResult.truncated) logger$10.warn({
6561
+ if (assembleResult.truncated) logger$12.warn({
6556
6562
  storyKey,
6557
6563
  tokenCount: assembleResult.tokenCount
6558
6564
  }, "Test-expansion prompt truncated to fit token ceiling");
6559
- logger$10.debug({
6565
+ logger$12.debug({
6560
6566
  storyKey,
6561
6567
  tokenCount: assembleResult.tokenCount,
6562
6568
  truncated: assembleResult.truncated
@@ -6567,14 +6573,15 @@ async function runTestExpansion(deps, params) {
6567
6573
  agent: "claude-code",
6568
6574
  taskType: "test-expansion",
6569
6575
  outputSchema: TestExpansionResultSchema,
6570
- workingDirectory: deps.projectRoot
6576
+ workingDirectory: deps.projectRoot,
6577
+ ...deps.otlpEndpoint !== void 0 ? { otlpEndpoint: deps.otlpEndpoint } : {}
6571
6578
  });
6572
6579
  let dispatchResult;
6573
6580
  try {
6574
6581
  dispatchResult = await handle.result;
6575
6582
  } catch (err) {
6576
6583
  const error = err instanceof Error ? err.message : String(err);
6577
- logger$10.warn({
6584
+ logger$12.warn({
6578
6585
  storyKey,
6579
6586
  error
6580
6587
  }, "Test-expansion dispatch threw unexpected error");
@@ -6589,19 +6596,19 @@ async function runTestExpansion(deps, params) {
6589
6596
  };
6590
6597
  if (dispatchResult.status === "failed") {
6591
6598
  const errorMsg = `Dispatch status: failed. Exit code: ${dispatchResult.exitCode}. ${dispatchResult.parseError ?? ""}`.trim();
6592
- logger$10.warn({
6599
+ logger$12.warn({
6593
6600
  storyKey,
6594
6601
  exitCode: dispatchResult.exitCode
6595
6602
  }, "Test-expansion dispatch failed");
6596
6603
  return defaultFallbackResult(errorMsg, tokenUsage);
6597
6604
  }
6598
6605
  if (dispatchResult.status === "timeout") {
6599
- logger$10.warn({ storyKey }, "Test-expansion dispatch timed out");
6606
+ logger$12.warn({ storyKey }, "Test-expansion dispatch timed out");
6600
6607
  return defaultFallbackResult("Dispatch status: timeout. The agent did not complete within the allowed time.", tokenUsage);
6601
6608
  }
6602
6609
  if (dispatchResult.parsed === null) {
6603
6610
  const details = dispatchResult.parseError ?? "No YAML block found in output";
6604
- logger$10.warn({
6611
+ logger$12.warn({
6605
6612
  storyKey,
6606
6613
  details
6607
6614
  }, "Test-expansion output has no parseable YAML");
@@ -6610,14 +6617,14 @@ async function runTestExpansion(deps, params) {
6610
6617
  const parseResult = TestExpansionResultSchema.safeParse(dispatchResult.parsed);
6611
6618
  if (!parseResult.success) {
6612
6619
  const details = parseResult.error.message;
6613
- logger$10.warn({
6620
+ logger$12.warn({
6614
6621
  storyKey,
6615
6622
  details
6616
6623
  }, "Test-expansion output failed schema validation");
6617
6624
  return defaultFallbackResult(`schema_validation_failed: ${details}`, tokenUsage);
6618
6625
  }
6619
6626
  const parsed = parseResult.data;
6620
- logger$10.info({
6627
+ logger$12.info({
6621
6628
  storyKey,
6622
6629
  expansion_priority: parsed.expansion_priority,
6623
6630
  coverage_gaps: parsed.coverage_gaps.length,
@@ -6642,7 +6649,7 @@ function getArchConstraints(deps) {
6642
6649
  if (constraints.length === 0) return "";
6643
6650
  return constraints.map((d) => `${d.key}: ${d.value}`).join("\n");
6644
6651
  } catch (err) {
6645
- logger$10.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
6652
+ logger$12.warn({ error: err instanceof Error ? err.message : String(err) }, "Failed to retrieve architecture constraints");
6646
6653
  return "";
6647
6654
  }
6648
6655
  }
@@ -7930,7 +7937,7 @@ function createDoltClient(options) {
7930
7937
 
7931
7938
  //#endregion
7932
7939
  //#region src/modules/state/index.ts
7933
- const logger$9 = createLogger("state:factory");
7940
+ const logger$11 = createLogger("state:factory");
7934
7941
  /**
7935
7942
  * Synchronously check whether Dolt is available and a Dolt repo exists at the
7936
7943
  * canonical state path under `basePath`.
@@ -7977,14 +7984,14 @@ function createStateStore(config = {}) {
7977
7984
  const repoPath = config.basePath ?? process.cwd();
7978
7985
  const detection = detectDoltAvailableSync(repoPath);
7979
7986
  if (detection.available) {
7980
- logger$9.debug(`Dolt detected, using DoltStateStore (state path: ${join$1(repoPath, ".substrate", "state")})`);
7987
+ logger$11.debug(`Dolt detected, using DoltStateStore (state path: ${join$1(repoPath, ".substrate", "state")})`);
7981
7988
  const client = new DoltClient({ repoPath });
7982
7989
  return new DoltStateStore({
7983
7990
  repoPath,
7984
7991
  client
7985
7992
  });
7986
7993
  } else {
7987
- logger$9.debug(`Dolt not found, using FileStateStore (reason: ${detection.reason})`);
7994
+ logger$11.debug(`Dolt not found, using FileStateStore (reason: ${detection.reason})`);
7988
7995
  return new FileStateStore({ basePath: config.basePath });
7989
7996
  }
7990
7997
  }
@@ -7993,7 +8000,7 @@ function createStateStore(config = {}) {
7993
8000
 
7994
8001
  //#endregion
7995
8002
  //#region src/cli/commands/health.ts
7996
- const logger$8 = createLogger("health-cmd");
8003
+ const logger$10 = createLogger("health-cmd");
7997
8004
  /** Default stall threshold in seconds — also used by supervisor default */
7998
8005
  const DEFAULT_STALL_THRESHOLD_SECONDS = 600;
7999
8006
  /**
@@ -8295,7 +8302,7 @@ async function runHealthAction(options) {
8295
8302
  const msg = err instanceof Error ? err.message : String(err);
8296
8303
  if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, msg) + "\n");
8297
8304
  else process.stderr.write(`Error: ${msg}\n`);
8298
- logger$8.error({ err }, "health action failed");
8305
+ logger$10.error({ err }, "health action failed");
8299
8306
  return 1;
8300
8307
  }
8301
8308
  }
@@ -8342,7 +8349,7 @@ function registerHealthCommand(program, _version = "0.0.0", projectRoot = proces
8342
8349
 
8343
8350
  //#endregion
8344
8351
  //#region src/modules/implementation-orchestrator/seed-methodology-context.ts
8345
- const logger$7 = createLogger("implementation-orchestrator:seed");
8352
+ const logger$9 = createLogger("implementation-orchestrator:seed");
8346
8353
  /** Max chars for the architecture summary seeded into decisions */
8347
8354
  const MAX_ARCH_CHARS = 6e3;
8348
8355
  /** Max chars per epic shard (fallback when per-story extraction returns null) */
@@ -8376,12 +8383,12 @@ function seedMethodologyContext(db, projectRoot) {
8376
8383
  const testCount = seedTestPatterns(db, projectRoot);
8377
8384
  if (testCount === -1) result.skippedCategories.push("test-patterns");
8378
8385
  else result.decisionsCreated += testCount;
8379
- logger$7.info({
8386
+ logger$9.info({
8380
8387
  decisionsCreated: result.decisionsCreated,
8381
8388
  skippedCategories: result.skippedCategories
8382
8389
  }, "Methodology context seeding complete");
8383
8390
  } catch (err) {
8384
- logger$7.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
8391
+ logger$9.warn({ error: err instanceof Error ? err.message : String(err) }, "Methodology context seeding failed (non-fatal)");
8385
8392
  }
8386
8393
  return result;
8387
8394
  }
@@ -8425,7 +8432,7 @@ function seedArchitecture(db, projectRoot) {
8425
8432
  });
8426
8433
  count = 1;
8427
8434
  }
8428
- logger$7.debug({ count }, "Seeded architecture decisions");
8435
+ logger$9.debug({ count }, "Seeded architecture decisions");
8429
8436
  return count;
8430
8437
  }
8431
8438
  /**
@@ -8449,11 +8456,11 @@ function seedEpicShards(db, projectRoot) {
8449
8456
  const storedHashDecision = implementationDecisions.find((d) => d.category === "epic-shard-hash" && d.key === "epics-file");
8450
8457
  const storedHash = storedHashDecision?.value;
8451
8458
  if (storedHash === currentHash) {
8452
- logger$7.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
8459
+ logger$9.debug({ hash: currentHash }, "Epic shards up-to-date (hash unchanged) — skipping re-seed");
8453
8460
  return -1;
8454
8461
  }
8455
8462
  if (implementationDecisions.some((d) => d.category === "epic-shard")) {
8456
- logger$7.debug({
8463
+ logger$9.debug({
8457
8464
  storedHash,
8458
8465
  currentHash
8459
8466
  }, "Epics file changed — deleting stale epic-shard decisions");
@@ -8481,7 +8488,7 @@ function seedEpicShards(db, projectRoot) {
8481
8488
  value: currentHash,
8482
8489
  rationale: "SHA-256 hash of epics file content for change detection"
8483
8490
  });
8484
- logger$7.debug({
8491
+ logger$9.debug({
8485
8492
  count,
8486
8493
  hash: currentHash
8487
8494
  }, "Seeded epic shard decisions");
@@ -8505,7 +8512,7 @@ function seedTestPatterns(db, projectRoot) {
8505
8512
  value: patterns.slice(0, MAX_TEST_PATTERNS_CHARS),
8506
8513
  rationale: "Detected from project configuration at orchestrator startup"
8507
8514
  });
8508
- logger$7.debug("Seeded test patterns decision");
8515
+ logger$9.debug("Seeded test patterns decision");
8509
8516
  return 1;
8510
8517
  }
8511
8518
  /**
@@ -8678,7 +8685,7 @@ function findArtifact(projectRoot, candidates) {
8678
8685
 
8679
8686
  //#endregion
8680
8687
  //#region src/modules/agent-dispatch/interface-change-detector.ts
8681
- const logger$6 = createLogger("interface-change-detector");
8688
+ const logger$8 = createLogger("interface-change-detector");
8682
8689
  /**
8683
8690
  * Extract exported interface and type names from TypeScript source content.
8684
8691
  *
@@ -8725,7 +8732,7 @@ function detectInterfaceChanges(options) {
8725
8732
  for (const name of names) allNames.add(name);
8726
8733
  sourceDirs.push(dirname$1(relPath));
8727
8734
  } catch {
8728
- logger$6.debug({
8735
+ logger$8.debug({
8729
8736
  absPath,
8730
8737
  storyKey
8731
8738
  }, "Could not read modified file for interface extraction");
@@ -8766,7 +8773,7 @@ function detectInterfaceChanges(options) {
8766
8773
  potentiallyAffectedTests: Array.from(affectedTests)
8767
8774
  };
8768
8775
  } catch (err) {
8769
- logger$6.warn({
8776
+ logger$8.warn({
8770
8777
  err,
8771
8778
  storyKey: options.storyKey
8772
8779
  }, "Interface change detection failed — skipping");
@@ -8934,6 +8941,1159 @@ function verifyContracts(declarations, projectRoot) {
8934
8941
  return mismatches;
8935
8942
  }
8936
8943
 
8944
+ //#endregion
8945
+ //#region src/modules/telemetry/types.ts
8946
+ const ChildSpanSummarySchema = z.object({
8947
+ spanId: z.string(),
8948
+ name: z.string(),
8949
+ toolName: z.string().optional(),
8950
+ inputTokens: z.number(),
8951
+ outputTokens: z.number(),
8952
+ durationMs: z.number()
8953
+ });
8954
+ const TurnAnalysisSchema = z.object({
8955
+ spanId: z.string(),
8956
+ turnNumber: z.number().int().positive(),
8957
+ name: z.string(),
8958
+ timestamp: z.number(),
8959
+ source: z.string(),
8960
+ model: z.string().optional(),
8961
+ inputTokens: z.number(),
8962
+ outputTokens: z.number(),
8963
+ cacheReadTokens: z.number(),
8964
+ freshTokens: z.number(),
8965
+ cacheHitRate: z.number(),
8966
+ costUsd: z.number(),
8967
+ durationMs: z.number(),
8968
+ contextSize: z.number(),
8969
+ contextDelta: z.number(),
8970
+ toolName: z.string().optional(),
8971
+ isContextSpike: z.boolean(),
8972
+ childSpans: z.array(ChildSpanSummarySchema)
8973
+ });
8974
+ const SemanticCategorySchema = z.enum([
8975
+ "tool_outputs",
8976
+ "file_reads",
8977
+ "system_prompts",
8978
+ "conversation_history",
8979
+ "user_prompts",
8980
+ "other"
8981
+ ]);
8982
+ const TrendSchema = z.enum([
8983
+ "growing",
8984
+ "stable",
8985
+ "shrinking"
8986
+ ]);
8987
+ const TopInvocationSchema = z.object({
8988
+ spanId: z.string(),
8989
+ name: z.string(),
8990
+ toolName: z.string().optional(),
8991
+ totalTokens: z.number(),
8992
+ inputTokens: z.number(),
8993
+ outputTokens: z.number()
8994
+ });
8995
+ const CategoryStatsSchema = z.object({
8996
+ category: SemanticCategorySchema,
8997
+ totalTokens: z.number(),
8998
+ percentage: z.number(),
8999
+ eventCount: z.number(),
9000
+ avgTokensPerEvent: z.number(),
9001
+ trend: TrendSchema
9002
+ });
9003
+ const ConsumerStatsSchema = z.object({
9004
+ consumerKey: z.string(),
9005
+ category: SemanticCategorySchema,
9006
+ totalTokens: z.number(),
9007
+ percentage: z.number(),
9008
+ eventCount: z.number(),
9009
+ topInvocations: z.array(TopInvocationSchema).max(20)
9010
+ });
9011
+ const ModelEfficiencySchema = z.object({
9012
+ model: z.string(),
9013
+ cacheHitRate: z.number(),
9014
+ avgIoRatio: z.number(),
9015
+ costPer1KOutputTokens: z.number()
9016
+ });
9017
+ const SourceEfficiencySchema = z.object({
9018
+ source: z.string(),
9019
+ compositeScore: z.number(),
9020
+ turnCount: z.number()
9021
+ });
9022
+ const EfficiencyScoreSchema = z.object({
9023
+ storyKey: z.string(),
9024
+ timestamp: z.number(),
9025
+ compositeScore: z.number().int().min(0).max(100),
9026
+ cacheHitSubScore: z.number().min(0).max(100),
9027
+ ioRatioSubScore: z.number().min(0).max(100),
9028
+ contextManagementSubScore: z.number().min(0).max(100),
9029
+ avgCacheHitRate: z.number(),
9030
+ avgIoRatio: z.number(),
9031
+ contextSpikeCount: z.number().int().nonnegative(),
9032
+ totalTurns: z.number().int().nonnegative(),
9033
+ perModelBreakdown: z.array(ModelEfficiencySchema),
9034
+ perSourceBreakdown: z.array(SourceEfficiencySchema)
9035
+ });
9036
+ const RuleIdSchema = z.enum([
9037
+ "biggest_consumers",
9038
+ "large_file_reads",
9039
+ "expensive_bash",
9040
+ "repeated_tool_calls",
9041
+ "context_growth_spike",
9042
+ "growing_categories",
9043
+ "cache_efficiency",
9044
+ "per_model_comparison"
9045
+ ]);
9046
+ const RecommendationSeveritySchema = z.enum([
9047
+ "critical",
9048
+ "warning",
9049
+ "info"
9050
+ ]);
9051
+ const RecommendationSchema = z.object({
9052
+ id: z.string().length(16),
9053
+ storyKey: z.string(),
9054
+ sprintId: z.string().optional(),
9055
+ ruleId: RuleIdSchema,
9056
+ severity: RecommendationSeveritySchema,
9057
+ title: z.string(),
9058
+ description: z.string(),
9059
+ potentialSavingsTokens: z.number().optional(),
9060
+ potentialSavingsUsd: z.number().optional(),
9061
+ actionTarget: z.string().optional(),
9062
+ generatedAt: z.string()
9063
+ });
9064
+
9065
+ //#endregion
9066
+ //#region src/modules/telemetry/persistence.ts
9067
+ const logger$7 = createLogger("telemetry:persistence");
9068
+ /**
9069
+ * Concrete SQLite-backed telemetry persistence.
9070
+ *
9071
+ * All prepared statements are compiled once at construction time.
9072
+ * Call `initSchema()` before using if the tables may not exist yet.
9073
+ */
9074
+ var TelemetryPersistence = class {
9075
+ _db;
9076
+ _insertTurnAnalysis;
9077
+ _getTurnAnalysis;
9078
+ _insertEfficiencyScore;
9079
+ _getEfficiencyScore;
9080
+ _getEfficiencyScores;
9081
+ _insertRecommendation;
9082
+ _getRecommendations;
9083
+ _getAllRecommendations;
9084
+ _insertCategoryStats;
9085
+ _getCategoryStats;
9086
+ _getAllCategoryStats;
9087
+ _insertConsumerStats;
9088
+ _getConsumerStats;
9089
+ constructor(db) {
9090
+ this._db = db;
9091
+ this._insertTurnAnalysis = this._db.prepare(`
9092
+ INSERT OR REPLACE INTO turn_analysis (
9093
+ story_key, span_id, turn_number, name, timestamp, source, model,
9094
+ input_tokens, output_tokens, cache_read_tokens, fresh_tokens,
9095
+ cache_hit_rate, cost_usd, duration_ms, context_size, context_delta,
9096
+ tool_name, is_context_spike, child_spans_json
9097
+ ) VALUES (
9098
+ ?, ?, ?, ?, ?, ?, ?,
9099
+ ?, ?, ?, ?,
9100
+ ?, ?, ?, ?, ?,
9101
+ ?, ?, ?
9102
+ )
9103
+ `);
9104
+ this._getTurnAnalysis = this._db.prepare(`
9105
+ SELECT * FROM turn_analysis
9106
+ WHERE story_key = ?
9107
+ ORDER BY turn_number ASC
9108
+ `);
9109
+ this._insertEfficiencyScore = this._db.prepare(`
9110
+ INSERT OR REPLACE INTO efficiency_scores (
9111
+ story_key, timestamp, composite_score,
9112
+ cache_hit_sub_score, io_ratio_sub_score, context_management_sub_score,
9113
+ avg_cache_hit_rate, avg_io_ratio, context_spike_count, total_turns,
9114
+ per_model_json, per_source_json
9115
+ ) VALUES (
9116
+ ?, ?, ?,
9117
+ ?, ?, ?,
9118
+ ?, ?, ?, ?,
9119
+ ?, ?
9120
+ )
9121
+ `);
9122
+ this._getEfficiencyScore = this._db.prepare(`
9123
+ SELECT * FROM efficiency_scores
9124
+ WHERE story_key = ?
9125
+ ORDER BY timestamp DESC
9126
+ LIMIT 1
9127
+ `);
9128
+ this._getEfficiencyScores = this._db.prepare(`
9129
+ SELECT * FROM efficiency_scores
9130
+ ORDER BY timestamp DESC
9131
+ LIMIT ?
9132
+ `);
9133
+ this._insertRecommendation = this._db.prepare(`
9134
+ INSERT OR REPLACE INTO recommendations (
9135
+ id, story_key, sprint_id, rule_id, severity, title, description,
9136
+ potential_savings_tokens, potential_savings_usd, action_target, generated_at
9137
+ ) VALUES (
9138
+ ?, ?, ?, ?, ?, ?, ?,
9139
+ ?, ?, ?, ?
9140
+ )
9141
+ `);
9142
+ this._getRecommendations = this._db.prepare(`
9143
+ SELECT * FROM recommendations
9144
+ WHERE story_key = ?
9145
+ ORDER BY
9146
+ CASE severity
9147
+ WHEN 'critical' THEN 1
9148
+ WHEN 'warning' THEN 2
9149
+ ELSE 3
9150
+ END,
9151
+ COALESCE(potential_savings_tokens, 0) DESC
9152
+ `);
9153
+ this._getAllRecommendations = this._db.prepare(`
9154
+ SELECT * FROM recommendations
9155
+ ORDER BY
9156
+ CASE severity
9157
+ WHEN 'critical' THEN 1
9158
+ WHEN 'warning' THEN 2
9159
+ ELSE 3
9160
+ END,
9161
+ COALESCE(potential_savings_tokens, 0) DESC
9162
+ LIMIT ?
9163
+ `);
9164
+ this._insertCategoryStats = this._db.prepare(`
9165
+ INSERT OR IGNORE INTO category_stats (
9166
+ story_key, category, total_tokens, percentage, event_count,
9167
+ avg_tokens_per_event, trend
9168
+ ) VALUES (?, ?, ?, ?, ?, ?, ?)
9169
+ `);
9170
+ this._getCategoryStats = this._db.prepare(`
9171
+ SELECT * FROM category_stats
9172
+ WHERE story_key = ?
9173
+ ORDER BY total_tokens DESC
9174
+ `);
9175
+ this._getAllCategoryStats = this._db.prepare(`
9176
+ SELECT category, SUM(total_tokens) AS total_tokens,
9177
+ AVG(percentage) AS percentage,
9178
+ SUM(event_count) AS event_count,
9179
+ AVG(avg_tokens_per_event) AS avg_tokens_per_event,
9180
+ MAX(trend) AS trend
9181
+ FROM category_stats
9182
+ GROUP BY category
9183
+ ORDER BY total_tokens DESC
9184
+ `);
9185
+ this._insertConsumerStats = this._db.prepare(`
9186
+ INSERT OR IGNORE INTO consumer_stats (
9187
+ story_key, consumer_key, category, total_tokens, percentage,
9188
+ event_count, top_invocations_json
9189
+ ) VALUES (?, ?, ?, ?, ?, ?, ?)
9190
+ `);
9191
+ this._getConsumerStats = this._db.prepare(`
9192
+ SELECT * FROM consumer_stats
9193
+ WHERE story_key = ?
9194
+ ORDER BY total_tokens DESC
9195
+ `);
9196
+ }
9197
+ /**
9198
+ * Apply the telemetry schema DDL to the database.
9199
+ * Idempotent — uses CREATE TABLE IF NOT EXISTS.
9200
+ */
9201
+ initSchema() {
9202
+ this._db.exec(`
9203
+ CREATE TABLE IF NOT EXISTS turn_analysis (
9204
+ story_key VARCHAR(64) NOT NULL,
9205
+ span_id VARCHAR(128) NOT NULL,
9206
+ turn_number INTEGER NOT NULL,
9207
+ name VARCHAR(255) NOT NULL DEFAULT '',
9208
+ timestamp BIGINT NOT NULL DEFAULT 0,
9209
+ source VARCHAR(32) NOT NULL DEFAULT '',
9210
+ model VARCHAR(64),
9211
+ input_tokens INTEGER NOT NULL DEFAULT 0,
9212
+ output_tokens INTEGER NOT NULL DEFAULT 0,
9213
+ cache_read_tokens INTEGER NOT NULL DEFAULT 0,
9214
+ fresh_tokens INTEGER NOT NULL DEFAULT 0,
9215
+ cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
9216
+ cost_usd DOUBLE NOT NULL DEFAULT 0,
9217
+ duration_ms INTEGER NOT NULL DEFAULT 0,
9218
+ context_size INTEGER NOT NULL DEFAULT 0,
9219
+ context_delta INTEGER NOT NULL DEFAULT 0,
9220
+ tool_name VARCHAR(128),
9221
+ is_context_spike BOOLEAN NOT NULL DEFAULT 0,
9222
+ child_spans_json TEXT NOT NULL DEFAULT '[]',
9223
+ PRIMARY KEY (story_key, span_id)
9224
+ );
9225
+
9226
+ CREATE INDEX IF NOT EXISTS idx_turn_analysis_story
9227
+ ON turn_analysis (story_key, turn_number);
9228
+
9229
+ CREATE TABLE IF NOT EXISTS efficiency_scores (
9230
+ story_key VARCHAR(64) NOT NULL,
9231
+ timestamp BIGINT NOT NULL,
9232
+ composite_score INTEGER NOT NULL DEFAULT 0,
9233
+ cache_hit_sub_score DOUBLE NOT NULL DEFAULT 0,
9234
+ io_ratio_sub_score DOUBLE NOT NULL DEFAULT 0,
9235
+ context_management_sub_score DOUBLE NOT NULL DEFAULT 0,
9236
+ avg_cache_hit_rate DOUBLE NOT NULL DEFAULT 0,
9237
+ avg_io_ratio DOUBLE NOT NULL DEFAULT 0,
9238
+ context_spike_count INTEGER NOT NULL DEFAULT 0,
9239
+ total_turns INTEGER NOT NULL DEFAULT 0,
9240
+ per_model_json TEXT NOT NULL DEFAULT '[]',
9241
+ per_source_json TEXT NOT NULL DEFAULT '[]',
9242
+ PRIMARY KEY (story_key, timestamp)
9243
+ );
9244
+
9245
+ CREATE INDEX IF NOT EXISTS idx_efficiency_story
9246
+ ON efficiency_scores (story_key, timestamp DESC);
9247
+
9248
+ CREATE TABLE IF NOT EXISTS recommendations (
9249
+ id VARCHAR(16) NOT NULL,
9250
+ story_key VARCHAR(64) NOT NULL,
9251
+ sprint_id VARCHAR(64),
9252
+ rule_id VARCHAR(64) NOT NULL,
9253
+ severity VARCHAR(16) NOT NULL,
9254
+ title TEXT NOT NULL,
9255
+ description TEXT NOT NULL,
9256
+ potential_savings_tokens INTEGER,
9257
+ potential_savings_usd DOUBLE,
9258
+ action_target TEXT,
9259
+ generated_at VARCHAR(32) NOT NULL,
9260
+ PRIMARY KEY (id)
9261
+ );
9262
+
9263
+ CREATE INDEX IF NOT EXISTS idx_recommendations_story
9264
+ ON recommendations (story_key, severity);
9265
+
9266
+ CREATE TABLE IF NOT EXISTS category_stats (
9267
+ story_key VARCHAR(100) NOT NULL,
9268
+ category VARCHAR(30) NOT NULL,
9269
+ total_tokens BIGINT NOT NULL DEFAULT 0,
9270
+ percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
9271
+ event_count INTEGER NOT NULL DEFAULT 0,
9272
+ avg_tokens_per_event DECIMAL(12,2) NOT NULL DEFAULT 0,
9273
+ trend VARCHAR(10) NOT NULL DEFAULT 'stable',
9274
+ PRIMARY KEY (story_key, category)
9275
+ );
9276
+
9277
+ CREATE INDEX IF NOT EXISTS idx_category_stats_story
9278
+ ON category_stats (story_key, total_tokens);
9279
+
9280
+ CREATE TABLE IF NOT EXISTS consumer_stats (
9281
+ story_key VARCHAR(100) NOT NULL,
9282
+ consumer_key VARCHAR(300) NOT NULL,
9283
+ category VARCHAR(30) NOT NULL,
9284
+ total_tokens BIGINT NOT NULL DEFAULT 0,
9285
+ percentage DECIMAL(6,3) NOT NULL DEFAULT 0,
9286
+ event_count INTEGER NOT NULL DEFAULT 0,
9287
+ top_invocations_json TEXT,
9288
+ PRIMARY KEY (story_key, consumer_key)
9289
+ );
9290
+
9291
+ CREATE INDEX IF NOT EXISTS idx_consumer_stats_story
9292
+ ON consumer_stats (story_key, total_tokens);
9293
+ `);
9294
+ }
9295
+ async storeTurnAnalysis(storyKey, turns) {
9296
+ if (turns.length === 0) return;
9297
+ const insertAll = this._db.transaction((rows) => {
9298
+ for (const turn of rows) this._insertTurnAnalysis.run(storyKey, turn.spanId, turn.turnNumber, turn.name, turn.timestamp, turn.source, turn.model ?? null, turn.inputTokens, turn.outputTokens, turn.cacheReadTokens, turn.freshTokens, turn.cacheHitRate, turn.costUsd, turn.durationMs, turn.contextSize, turn.contextDelta, turn.toolName ?? null, turn.isContextSpike ? 1 : 0, JSON.stringify(turn.childSpans));
9299
+ });
9300
+ insertAll(turns);
9301
+ logger$7.debug({
9302
+ storyKey,
9303
+ count: turns.length
9304
+ }, "Stored turn analysis");
9305
+ }
9306
+ async getTurnAnalysis(storyKey) {
9307
+ const rows = this._getTurnAnalysis.all(storyKey);
9308
+ if (rows.length === 0) return [];
9309
+ return rows.map((row) => {
9310
+ const raw = {
9311
+ spanId: row.span_id,
9312
+ turnNumber: row.turn_number,
9313
+ name: row.name,
9314
+ timestamp: row.timestamp,
9315
+ source: row.source,
9316
+ model: row.model ?? void 0,
9317
+ inputTokens: row.input_tokens,
9318
+ outputTokens: row.output_tokens,
9319
+ cacheReadTokens: row.cache_read_tokens,
9320
+ freshTokens: row.fresh_tokens,
9321
+ cacheHitRate: row.cache_hit_rate,
9322
+ costUsd: row.cost_usd,
9323
+ durationMs: row.duration_ms,
9324
+ contextSize: row.context_size,
9325
+ contextDelta: row.context_delta,
9326
+ toolName: row.tool_name ?? void 0,
9327
+ isContextSpike: row.is_context_spike === 1,
9328
+ childSpans: JSON.parse(row.child_spans_json)
9329
+ };
9330
+ return TurnAnalysisSchema.parse(raw);
9331
+ });
9332
+ }
9333
+ async storeEfficiencyScore(score) {
9334
+ this._insertEfficiencyScore.run(score.storyKey, score.timestamp, score.compositeScore, score.cacheHitSubScore, score.ioRatioSubScore, score.contextManagementSubScore, score.avgCacheHitRate, score.avgIoRatio, score.contextSpikeCount, score.totalTurns, JSON.stringify(score.perModelBreakdown), JSON.stringify(score.perSourceBreakdown));
9335
+ logger$7.debug({
9336
+ storyKey: score.storyKey,
9337
+ compositeScore: score.compositeScore
9338
+ }, "Stored efficiency score");
9339
+ }
9340
+ async getEfficiencyScore(storyKey) {
9341
+ const row = this._getEfficiencyScore.get(storyKey);
9342
+ if (row === void 0) return null;
9343
+ const raw = {
9344
+ storyKey: row.story_key,
9345
+ timestamp: row.timestamp,
9346
+ compositeScore: row.composite_score,
9347
+ cacheHitSubScore: row.cache_hit_sub_score,
9348
+ ioRatioSubScore: row.io_ratio_sub_score,
9349
+ contextManagementSubScore: row.context_management_sub_score,
9350
+ avgCacheHitRate: row.avg_cache_hit_rate,
9351
+ avgIoRatio: row.avg_io_ratio,
9352
+ contextSpikeCount: row.context_spike_count,
9353
+ totalTurns: row.total_turns,
9354
+ perModelBreakdown: JSON.parse(row.per_model_json),
9355
+ perSourceBreakdown: JSON.parse(row.per_source_json)
9356
+ };
9357
+ return EfficiencyScoreSchema.parse(raw);
9358
+ }
9359
+ /**
9360
+ * Retrieve multiple efficiency scores ordered by timestamp DESC.
9361
+ * Returns up to `limit` records (default 20).
9362
+ */
9363
+ async getEfficiencyScores(limit = 20) {
9364
+ const rows = this._getEfficiencyScores.all(limit);
9365
+ if (rows.length === 0) return [];
9366
+ return rows.map((row) => {
9367
+ const raw = {
9368
+ storyKey: row.story_key,
9369
+ timestamp: row.timestamp,
9370
+ compositeScore: row.composite_score,
9371
+ cacheHitSubScore: row.cache_hit_sub_score,
9372
+ ioRatioSubScore: row.io_ratio_sub_score,
9373
+ contextManagementSubScore: row.context_management_sub_score,
9374
+ avgCacheHitRate: row.avg_cache_hit_rate,
9375
+ avgIoRatio: row.avg_io_ratio,
9376
+ contextSpikeCount: row.context_spike_count,
9377
+ totalTurns: row.total_turns,
9378
+ perModelBreakdown: JSON.parse(row.per_model_json),
9379
+ perSourceBreakdown: JSON.parse(row.per_source_json)
9380
+ };
9381
+ return EfficiencyScoreSchema.parse(raw);
9382
+ });
9383
+ }
9384
+ /**
9385
+ * Batch-insert all recommendations for a story in a single transaction.
9386
+ * Uses INSERT OR REPLACE for idempotency (IDs are deterministic hashes).
9387
+ */
9388
+ async saveRecommendations(storyKey, recs) {
9389
+ if (recs.length === 0) return;
9390
+ const insertAll = this._db.transaction((rows) => {
9391
+ for (const rec of rows) this._insertRecommendation.run(rec.id, rec.storyKey, rec.sprintId ?? null, rec.ruleId, rec.severity, rec.title, rec.description, rec.potentialSavingsTokens ?? null, rec.potentialSavingsUsd ?? null, rec.actionTarget ?? null, rec.generatedAt);
9392
+ });
9393
+ insertAll(recs);
9394
+ logger$7.debug({
9395
+ storyKey,
9396
+ count: recs.length
9397
+ }, "Saved recommendations");
9398
+ }
9399
+ /**
9400
+ * Retrieve recommendations for a story ordered by severity (critical first)
9401
+ * then by potentialSavingsTokens descending.
9402
+ * Each row is validated with RecommendationSchema.parse().
9403
+ */
9404
+ async getRecommendations(storyKey) {
9405
+ const rows = this._getRecommendations.all(storyKey);
9406
+ if (rows.length === 0) return [];
9407
+ return rows.map((row) => {
9408
+ const raw = {
9409
+ id: row.id,
9410
+ storyKey: row.story_key,
9411
+ sprintId: row.sprint_id ?? void 0,
9412
+ ruleId: row.rule_id,
9413
+ severity: row.severity,
9414
+ title: row.title,
9415
+ description: row.description,
9416
+ potentialSavingsTokens: row.potential_savings_tokens != null ? Number(row.potential_savings_tokens) : void 0,
9417
+ potentialSavingsUsd: row.potential_savings_usd != null ? Number(row.potential_savings_usd) : void 0,
9418
+ actionTarget: row.action_target ?? void 0,
9419
+ generatedAt: row.generated_at
9420
+ };
9421
+ return RecommendationSchema.parse(raw);
9422
+ });
9423
+ }
9424
+ /**
9425
+ * Retrieve all recommendations across all stories, ordered by severity (critical first)
9426
+ * then by potentialSavingsTokens descending. Returns up to `limit` records (default 20).
9427
+ */
9428
+ async getAllRecommendations(limit = 20) {
9429
+ const rows = this._getAllRecommendations.all(limit);
9430
+ if (rows.length === 0) return [];
9431
+ return rows.map((row) => {
9432
+ const raw = {
9433
+ id: row.id,
9434
+ storyKey: row.story_key,
9435
+ sprintId: row.sprint_id ?? void 0,
9436
+ ruleId: row.rule_id,
9437
+ severity: row.severity,
9438
+ title: row.title,
9439
+ description: row.description,
9440
+ potentialSavingsTokens: row.potential_savings_tokens != null ? Number(row.potential_savings_tokens) : void 0,
9441
+ potentialSavingsUsd: row.potential_savings_usd != null ? Number(row.potential_savings_usd) : void 0,
9442
+ actionTarget: row.action_target ?? void 0,
9443
+ generatedAt: row.generated_at
9444
+ };
9445
+ return RecommendationSchema.parse(raw);
9446
+ });
9447
+ }
9448
+ /**
9449
+ * Batch-insert category stats for a story.
9450
+ * Uses INSERT OR IGNORE — existing rows for the same (story_key, category) are preserved.
9451
+ */
9452
+ async storeCategoryStats(storyKey, stats) {
9453
+ if (stats.length === 0) return;
9454
+ const insertAll = this._db.transaction((rows) => {
9455
+ for (const stat$2 of rows) this._insertCategoryStats.run(storyKey, stat$2.category, stat$2.totalTokens, stat$2.percentage, stat$2.eventCount, stat$2.avgTokensPerEvent, stat$2.trend);
9456
+ });
9457
+ insertAll(stats);
9458
+ logger$7.debug({
9459
+ storyKey,
9460
+ count: stats.length
9461
+ }, "Stored category stats");
9462
+ }
9463
+ /**
9464
+ * Retrieve category stats for a story ordered by total_tokens descending.
9465
+ * Each row is validated with CategoryStatsSchema.parse().
9466
+ * Returns [] when no rows exist for the given storyKey.
9467
+ */
9468
+ async getCategoryStats(storyKey) {
9469
+ const rows = storyKey === "" ? this._getAllCategoryStats.all() : this._getCategoryStats.all(storyKey);
9470
+ if (rows.length === 0) return [];
9471
+ return rows.map((row) => {
9472
+ const raw = {
9473
+ category: row.category,
9474
+ totalTokens: Number(row.total_tokens),
9475
+ percentage: Number(row.percentage),
9476
+ eventCount: Number(row.event_count),
9477
+ avgTokensPerEvent: Number(row.avg_tokens_per_event),
9478
+ trend: row.trend
9479
+ };
9480
+ return CategoryStatsSchema.parse(raw);
9481
+ });
9482
+ }
9483
+ /**
9484
+ * Batch-insert consumer stats for a story.
9485
+ * topInvocations is serialized to JSON.
9486
+ * Uses INSERT OR IGNORE — existing rows for the same (story_key, consumer_key) are preserved.
9487
+ */
9488
+ async storeConsumerStats(storyKey, consumers) {
9489
+ if (consumers.length === 0) return;
9490
+ const insertAll = this._db.transaction((rows) => {
9491
+ for (const consumer of rows) this._insertConsumerStats.run(storyKey, consumer.consumerKey, consumer.category, consumer.totalTokens, consumer.percentage, consumer.eventCount, JSON.stringify(consumer.topInvocations));
9492
+ });
9493
+ insertAll(consumers);
9494
+ logger$7.debug({
9495
+ storyKey,
9496
+ count: consumers.length
9497
+ }, "Stored consumer stats");
9498
+ }
9499
+ /**
9500
+ * Retrieve consumer stats for a story ordered by total_tokens descending.
9501
+ * Deserializes top_invocations_json back to TopInvocation[].
9502
+ * Each row is validated with ConsumerStatsSchema.parse().
9503
+ * Returns [] when no rows exist for the given storyKey.
9504
+ */
9505
+ async getConsumerStats(storyKey) {
9506
+ const rows = this._getConsumerStats.all(storyKey);
9507
+ if (rows.length === 0) return [];
9508
+ return rows.map((row) => {
9509
+ const raw = {
9510
+ consumerKey: row.consumer_key,
9511
+ category: row.category,
9512
+ totalTokens: Number(row.total_tokens),
9513
+ percentage: Number(row.percentage),
9514
+ eventCount: Number(row.event_count),
9515
+ topInvocations: JSON.parse(row.top_invocations_json ?? "[]")
9516
+ };
9517
+ return ConsumerStatsSchema.parse(raw);
9518
+ });
9519
+ }
9520
+ };
9521
+
9522
+ //#endregion
9523
+ //#region src/errors/app-error.ts
9524
+ /**
9525
+ * AppError — base error class for substrate with machine-readable error codes.
9526
+ *
9527
+ * Architecture decision: AppError base class with numeric exit codes
9528
+ * (process.exit codes: 0 success, 1 user error, 2 internal error).
9529
+ * All structured errors in substrate should extend this class.
9530
+ */
9531
+ /**
9532
+ * Base error class for substrate with machine-readable error codes and exit codes.
9533
+ *
9534
+ * @example
9535
+ * throw new AppError('ERR_TELEMETRY_NOT_STARTED', 2, 'IngestionServer is not started')
9536
+ */
9537
+ var AppError = class extends Error {
9538
+ /** Machine-readable error code (e.g. ERR_DB_LOCKED, ERR_INVALID_INPUT) */
9539
+ code;
9540
+ /** Process exit code: 0 success, 1 user error, 2 internal error */
9541
+ exitCode;
9542
+ constructor(code, exitCode, message) {
9543
+ super(message);
9544
+ this.name = "AppError";
9545
+ this.code = code;
9546
+ this.exitCode = exitCode;
9547
+ }
9548
+ };
9549
+
9550
+ //#endregion
9551
+ //#region src/modules/telemetry/ingestion-server.ts
9552
+ const logger$6 = createLogger("telemetry:ingestion-server");
9553
+ /**
9554
+ * Error thrown by IngestionServer for server lifecycle violations.
9555
+ * Extends AppError to align with the project-standard error-handling pattern
9556
+ * (AppError base class with numeric exit codes).
9557
+ */
9558
+ var TelemetryError = class extends AppError {
9559
+ constructor(code, message) {
9560
+ super(code, 2, message);
9561
+ this.name = "TelemetryError";
9562
+ }
9563
+ };
9564
+ /**
9565
+ * Local HTTP server that accepts OTLP payloads from Claude Code sub-agents.
9566
+ *
9567
+ * Binds to `port` (default 4318). Use port 0 in tests for an OS-assigned port.
9568
+ */
9569
+ var IngestionServer = class {
9570
+ _server = null;
9571
+ _port;
9572
+ constructor(options = {}) {
9573
+ this._port = options.port ?? 4318;
9574
+ }
9575
+ /**
9576
+ * Start the HTTP ingestion server.
9577
+ * Resolves when the server is listening and ready to accept connections.
9578
+ */
9579
+ async start() {
9580
+ if (this._server !== null) {
9581
+ logger$6.warn("IngestionServer.start() called while already started — ignoring");
9582
+ return;
9583
+ }
9584
+ return new Promise((resolve$2, reject) => {
9585
+ const server = createServer(this._handleRequest.bind(this));
9586
+ server.on("error", (err) => {
9587
+ logger$6.error({ err }, "IngestionServer failed to start");
9588
+ reject(err);
9589
+ });
9590
+ server.listen(this._port, "127.0.0.1", () => {
9591
+ this._server = server;
9592
+ const addr = server.address();
9593
+ logger$6.info({ port: addr.port }, "IngestionServer listening");
9594
+ resolve$2();
9595
+ });
9596
+ });
9597
+ }
9598
+ /**
9599
+ * Stop the HTTP ingestion server.
9600
+ * Resolves when the server has closed all connections.
9601
+ */
9602
+ async stop() {
9603
+ const server = this._server;
9604
+ if (server === null) return;
9605
+ this._server = null;
9606
+ return new Promise((resolve$2, reject) => {
9607
+ server.close((err) => {
9608
+ if (err !== void 0 && err !== null) reject(err);
9609
+ else {
9610
+ logger$6.info("IngestionServer stopped");
9611
+ resolve$2();
9612
+ }
9613
+ });
9614
+ });
9615
+ }
9616
+ /**
9617
+ * Return the 5 OTLP environment variables to inject into sub-agent processes.
9618
+ *
9619
+ * @throws {TelemetryError} ERR_TELEMETRY_NOT_STARTED if the server is not started.
9620
+ */
9621
+ getOtlpEnvVars() {
9622
+ const addr = this._server?.address();
9623
+ if (addr === null || addr === void 0 || typeof addr === "string") throw new TelemetryError("ERR_TELEMETRY_NOT_STARTED", "IngestionServer is not started — call start() before getOtlpEnvVars()");
9624
+ const endpoint = `http://localhost:${addr.port}`;
9625
+ return {
9626
+ CLAUDE_CODE_ENABLE_TELEMETRY: "1",
9627
+ OTEL_LOGS_EXPORTER: "otlp",
9628
+ OTEL_METRICS_EXPORTER: "otlp",
9629
+ OTEL_EXPORTER_OTLP_PROTOCOL: "http/json",
9630
+ OTEL_EXPORTER_OTLP_ENDPOINT: endpoint
9631
+ };
9632
+ }
9633
+ _handleRequest(_req, res) {
9634
+ const chunks = [];
9635
+ _req.on("data", (chunk) => {
9636
+ chunks.push(chunk);
9637
+ });
9638
+ _req.on("end", () => {
9639
+ const body = Buffer.concat(chunks).toString("utf-8");
9640
+ logger$6.trace({
9641
+ url: _req.url,
9642
+ bodyLength: body.length
9643
+ }, "OTLP payload received");
9644
+ res.writeHead(200, { "Content-Type": "application/json" });
9645
+ res.end("{}");
9646
+ });
9647
+ _req.on("error", (err) => {
9648
+ logger$6.warn({ err }, "Error reading OTLP request body");
9649
+ res.writeHead(400);
9650
+ res.end("Bad Request");
9651
+ });
9652
+ }
9653
+ };
9654
+
9655
+ //#endregion
9656
+ //#region src/modules/telemetry/efficiency-scorer.ts
9657
+ var EfficiencyScorer = class {
9658
+ _logger;
9659
+ constructor(logger$26) {
9660
+ this._logger = logger$26;
9661
+ }
9662
+ /**
9663
+ * Compute an efficiency score for a story given its turn analyses.
9664
+ *
9665
+ * Returns a zeroed `EfficiencyScore` immediately when `turns` is empty.
9666
+ *
9667
+ * @param storyKey - The story identifier (e.g. "27-6")
9668
+ * @param turns - Turn analysis records from `TurnAnalyzer.analyze()`
9669
+ */
9670
+ score(storyKey, turns) {
9671
+ if (turns.length === 0) return {
9672
+ storyKey,
9673
+ timestamp: Date.now(),
9674
+ compositeScore: 0,
9675
+ cacheHitSubScore: 0,
9676
+ ioRatioSubScore: 0,
9677
+ contextManagementSubScore: 0,
9678
+ avgCacheHitRate: 0,
9679
+ avgIoRatio: 0,
9680
+ contextSpikeCount: 0,
9681
+ totalTurns: 0,
9682
+ perModelBreakdown: [],
9683
+ perSourceBreakdown: []
9684
+ };
9685
+ const avgCacheHitRate = this._computeAvgCacheHitRate(turns);
9686
+ const avgIoRatio = this._computeAvgIoRatio(turns);
9687
+ const contextSpikeCount = turns.filter((t) => t.isContextSpike).length;
9688
+ const totalTurns = turns.length;
9689
+ const cacheHitSubScore = this._computeCacheHitSubScore(turns);
9690
+ const ioRatioSubScore = this._computeIoRatioSubScore(turns);
9691
+ const contextManagementSubScore = this._computeContextManagementSubScore(turns);
9692
+ const compositeScore = Math.round(cacheHitSubScore * .4 + ioRatioSubScore * .3 + contextManagementSubScore * .3);
9693
+ const perModelBreakdown = this._buildPerModelBreakdown(turns);
9694
+ const perSourceBreakdown = this._buildPerSourceBreakdown(turns);
9695
+ this._logger.info({
9696
+ storyKey,
9697
+ compositeScore,
9698
+ contextSpikeCount
9699
+ }, "Computed efficiency score");
9700
+ return {
9701
+ storyKey,
9702
+ timestamp: Date.now(),
9703
+ compositeScore,
9704
+ cacheHitSubScore,
9705
+ ioRatioSubScore,
9706
+ contextManagementSubScore,
9707
+ avgCacheHitRate,
9708
+ avgIoRatio,
9709
+ contextSpikeCount,
9710
+ totalTurns,
9711
+ perModelBreakdown,
9712
+ perSourceBreakdown
9713
+ };
9714
+ }
9715
+ /**
9716
+ * Average cache hit rate across all turns, clamped to [0, 100].
9717
+ * Formula: clamp(avgCacheHitRate × 100, 0, 100)
9718
+ */
9719
+ _computeCacheHitSubScore(turns) {
9720
+ const avg = this._computeAvgCacheHitRate(turns);
9721
+ return this._clamp(avg * 100, 0, 100);
9722
+ }
9723
+ /**
9724
+ * I/O ratio sub-score: lower ratio = better = higher score.
9725
+ * Formula: clamp(100 - (avgIoRatio - 1) × 20, 0, 100)
9726
+ *
9727
+ * At avgIoRatio=1: score=80 (equal input/output tokens)
9728
+ * At avgIoRatio=5: score=20
9729
+ * At avgIoRatio≥6: clamped to 0
9730
+ */
9731
+ _computeIoRatioSubScore(turns) {
9732
+ const avg = this._computeAvgIoRatio(turns);
9733
+ return this._clamp(100 - (avg - 1) * 20, 0, 100);
9734
+ }
9735
+ /**
9736
+ * Context management sub-score: penalizes context spike frequency.
9737
+ * Formula: clamp(100 - spikeRatio × 100, 0, 100)
9738
+ * where spikeRatio = contextSpikeCount / max(totalTurns, 1)
9739
+ */
9740
+ _computeContextManagementSubScore(turns) {
9741
+ const totalTurns = Math.max(turns.length, 1);
9742
+ const spikeCount = turns.filter((t) => t.isContextSpike).length;
9743
+ const spikeRatio = spikeCount / totalTurns;
9744
+ return this._clamp(100 - spikeRatio * 100, 0, 100);
9745
+ }
9746
+ _computeAvgCacheHitRate(turns) {
9747
+ if (turns.length === 0) return 0;
9748
+ const sum = turns.reduce((acc, t) => acc + t.cacheHitRate, 0);
9749
+ return sum / turns.length;
9750
+ }
9751
+ /**
9752
+ * Average I/O ratio: inputTokens / max(outputTokens, 1) per turn.
9753
+ */
9754
+ _computeAvgIoRatio(turns) {
9755
+ if (turns.length === 0) return 0;
9756
+ const sum = turns.reduce((acc, t) => acc + t.inputTokens / Math.max(t.outputTokens, 1), 0);
9757
+ return sum / turns.length;
9758
+ }
9759
+ /**
9760
+ * Group turns by model, computing per-group efficiency metrics.
9761
+ * Turns with null/undefined model are grouped under "unknown".
9762
+ */
9763
+ _buildPerModelBreakdown(turns) {
9764
+ const groups = new Map();
9765
+ for (const turn of turns) {
9766
+ const key = turn.model != null && turn.model !== "" ? turn.model : "unknown";
9767
+ const existing = groups.get(key);
9768
+ if (existing !== void 0) existing.push(turn);
9769
+ else groups.set(key, [turn]);
9770
+ }
9771
+ const result = [];
9772
+ for (const [model, groupTurns] of groups) {
9773
+ const cacheHitRate = groupTurns.reduce((acc, t) => acc + t.cacheHitRate, 0) / groupTurns.length;
9774
+ const avgIoRatio = groupTurns.reduce((acc, t) => acc + t.inputTokens / Math.max(t.outputTokens, 1), 0) / groupTurns.length;
9775
+ const totalCostUsd = groupTurns.reduce((acc, t) => acc + t.costUsd, 0);
9776
+ const totalOutputTokens = groupTurns.reduce((acc, t) => acc + t.outputTokens, 0);
9777
+ const costPer1KOutputTokens = totalCostUsd / Math.max(totalOutputTokens, 1) * 1e3;
9778
+ result.push({
9779
+ model,
9780
+ cacheHitRate,
9781
+ avgIoRatio,
9782
+ costPer1KOutputTokens
9783
+ });
9784
+ }
9785
+ return result;
9786
+ }
9787
+ /**
9788
+ * Group turns by source, computing a per-group composite score using the
9789
+ * same formula as the overall score. Sources with zero turns are excluded.
9790
+ */
9791
+ _buildPerSourceBreakdown(turns) {
9792
+ const groups = new Map();
9793
+ for (const turn of turns) {
9794
+ const key = turn.source;
9795
+ const existing = groups.get(key);
9796
+ if (existing !== void 0) existing.push(turn);
9797
+ else groups.set(key, [turn]);
9798
+ }
9799
+ const result = [];
9800
+ for (const [source, groupTurns] of groups) {
9801
+ if (groupTurns.length === 0) continue;
9802
+ const cacheHitSub = this._computeCacheHitSubScoreForGroup(groupTurns);
9803
+ const ioRatioSub = this._computeIoRatioSubScoreForGroup(groupTurns);
9804
+ const contextSub = this._computeContextManagementSubScoreForGroup(groupTurns);
9805
+ const compositeScore = Math.round(cacheHitSub * .4 + ioRatioSub * .3 + contextSub * .3);
9806
+ result.push({
9807
+ source,
9808
+ compositeScore,
9809
+ turnCount: groupTurns.length
9810
+ });
9811
+ }
9812
+ return result;
9813
+ }
9814
+ _computeCacheHitSubScoreForGroup(turns) {
9815
+ if (turns.length === 0) return 0;
9816
+ const avg = turns.reduce((acc, t) => acc + t.cacheHitRate, 0) / turns.length;
9817
+ return this._clamp(avg * 100, 0, 100);
9818
+ }
9819
+ _computeIoRatioSubScoreForGroup(turns) {
9820
+ if (turns.length === 0) return 0;
9821
+ const avg = turns.reduce((acc, t) => acc + t.inputTokens / Math.max(t.outputTokens, 1), 0) / turns.length;
9822
+ return this._clamp(100 - (avg - 1) * 20, 0, 100);
9823
+ }
9824
+ _computeContextManagementSubScoreForGroup(turns) {
9825
+ if (turns.length === 0) return 0;
9826
+ const spikeCount = turns.filter((t) => t.isContextSpike).length;
9827
+ const spikeRatio = spikeCount / turns.length;
9828
+ return this._clamp(100 - spikeRatio * 100, 0, 100);
9829
+ }
9830
+ _clamp(value, min, max) {
9831
+ return Math.max(min, Math.min(max, value));
9832
+ }
9833
+ };
9834
+
9835
+ //#endregion
9836
+ //#region src/modules/telemetry/categorizer.ts
9837
+ const EXACT_CATEGORY_MAP = new Map([
9838
+ ["read_file", "file_reads"],
9839
+ ["write_file", "tool_outputs"],
9840
+ ["bash", "tool_outputs"],
9841
+ ["tool_use", "tool_outputs"],
9842
+ ["tool_result", "tool_outputs"],
9843
+ ["system_prompt", "system_prompts"],
9844
+ ["human_turn", "user_prompts"],
9845
+ ["user_message", "user_prompts"],
9846
+ ["assistant_turn", "conversation_history"],
9847
+ ["assistant_message", "conversation_history"],
9848
+ ["search_files", "file_reads"],
9849
+ ["list_files", "file_reads"],
9850
+ ["run_command", "tool_outputs"],
9851
+ ["memory_read", "system_prompts"],
9852
+ ["web_fetch", "tool_outputs"]
9853
+ ]);
9854
+ const PREFIX_PATTERNS = [
9855
+ {
9856
+ pattern: /^(bash|exec|run|spawn)/i,
9857
+ category: "tool_outputs"
9858
+ },
9859
+ {
9860
+ pattern: /^(read|open|cat|head|tail).*file/i,
9861
+ category: "file_reads"
9862
+ },
9863
+ {
9864
+ pattern: /^(list|glob|find).*file/i,
9865
+ category: "file_reads"
9866
+ },
9867
+ {
9868
+ pattern: /^tool/i,
9869
+ category: "tool_outputs"
9870
+ },
9871
+ {
9872
+ pattern: /^system/i,
9873
+ category: "system_prompts"
9874
+ },
9875
+ {
9876
+ pattern: /^(human|user)/i,
9877
+ category: "user_prompts"
9878
+ },
9879
+ {
9880
+ pattern: /^(assistant|ai|model)/i,
9881
+ category: "conversation_history"
9882
+ }
9883
+ ];
9884
+ /** All six semantic categories in a stable order for zero-fill initialisation. */
9885
+ const ALL_CATEGORIES = [
9886
+ "tool_outputs",
9887
+ "file_reads",
9888
+ "system_prompts",
9889
+ "conversation_history",
9890
+ "user_prompts",
9891
+ "other"
9892
+ ];
9893
+ var Categorizer = class {
9894
+ _logger;
9895
+ constructor(logger$26) {
9896
+ this._logger = logger$26;
9897
+ }
9898
+ /**
9899
+ * Classify an operation into a SemanticCategory using three-tier logic.
9900
+ *
9901
+ * @param operationName - Span operation name (e.g. 'read_file', 'bash')
9902
+ * @param toolName - Optional tool name; non-empty value overrides fallback to tool_outputs
9903
+ */
9904
+ classify(operationName, toolName) {
9905
+ const exact = EXACT_CATEGORY_MAP.get(operationName);
9906
+ if (exact !== void 0) return exact;
9907
+ for (const { pattern, category } of PREFIX_PATTERNS) if (pattern.test(operationName)) return category;
9908
+ const lower = operationName.toLowerCase();
9909
+ if (lower.includes("file") && (lower.includes("read") || lower.includes("open"))) return "file_reads";
9910
+ if (lower.includes("system") || lower.includes("prompt")) return "system_prompts";
9911
+ if (lower.includes("bash") || lower.includes("exec") || lower.includes("tool")) return "tool_outputs";
9912
+ if (lower.includes("conversation") || lower.includes("history") || lower.includes("chat")) return "conversation_history";
9913
+ if (lower.includes("user") || lower.includes("human")) return "user_prompts";
9914
+ if (toolName !== void 0 && toolName.length > 0) return "tool_outputs";
9915
+ return "other";
9916
+ }
9917
+ /**
9918
+ * Detect whether a category's token consumption is growing, stable, or shrinking
9919
+ * by comparing first-half vs second-half turn attribution.
9920
+ *
9921
+ * @param categorySpans - Spans already classified into this category
9922
+ * @param turns - Full turn sequence for the story
9923
+ */
9924
+ computeTrend(categorySpans, turns) {
9925
+ if (turns.length < 2) return "stable";
9926
+ const spanTurnMap = new Map();
9927
+ for (let i = 0; i < turns.length; i++) {
9928
+ const turn = turns[i];
9929
+ spanTurnMap.set(turn.spanId, i);
9930
+ for (const child of turn.childSpans) spanTurnMap.set(child.spanId, i);
9931
+ }
9932
+ const half = Math.floor(turns.length / 2);
9933
+ let firstHalfTokens = 0;
9934
+ let secondHalfTokens = 0;
9935
+ for (const span of categorySpans) {
9936
+ const turnIdx = spanTurnMap.has(span.spanId) ? spanTurnMap.get(span.spanId) : attributeSpanToTurnIndex(span.startTime, turns);
9937
+ const tokens = span.inputTokens + span.outputTokens;
9938
+ if (turnIdx < half) firstHalfTokens += tokens;
9939
+ else secondHalfTokens += tokens;
9940
+ }
9941
+ if (firstHalfTokens === 0 && secondHalfTokens === 0) return "stable";
9942
+ if (firstHalfTokens === 0) return "growing";
9943
+ if (secondHalfTokens > 1.2 * firstHalfTokens) return "growing";
9944
+ if (secondHalfTokens < .8 * firstHalfTokens) return "shrinking";
9945
+ return "stable";
9946
+ }
9947
+ /**
9948
+ * Compute per-category token statistics for a complete set of spans.
9949
+ *
9950
+ * All six SemanticCategory values are always present in the result (zero-token
9951
+ * categories are included with totalTokens: 0). Results are sorted by
9952
+ * totalTokens descending.
9953
+ *
9954
+ * @param spans - All NormalizedSpans for the story
9955
+ * @param turns - TurnAnalysis sequence (may be empty)
9956
+ */
9957
+ computeCategoryStats(spans, turns) {
9958
+ const grandTotal = spans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
9959
+ const buckets = new Map();
9960
+ for (const cat of ALL_CATEGORIES) buckets.set(cat, []);
9961
+ for (const span of spans) {
9962
+ const toolName = extractToolNameFromSpan(span);
9963
+ const cat = this.classify(span.operationName ?? span.name, toolName);
9964
+ buckets.get(cat).push(span);
9965
+ }
9966
+ const results = ALL_CATEGORIES.map((category) => {
9967
+ const catSpans = buckets.get(category);
9968
+ const totalTokens = catSpans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
9969
+ const eventCount = catSpans.length;
9970
+ const percentage = grandTotal > 0 ? Math.round(totalTokens / grandTotal * 100 * 1e3) / 1e3 : 0;
9971
+ const avgTokensPerEvent = eventCount > 0 ? totalTokens / eventCount : 0;
9972
+ const trend = this.computeTrend(catSpans, turns);
9973
+ return {
9974
+ category,
9975
+ totalTokens,
9976
+ percentage,
9977
+ eventCount,
9978
+ avgTokensPerEvent,
9979
+ trend
9980
+ };
9981
+ });
9982
+ this._logger.debug({
9983
+ categories: results.length,
9984
+ grandTotal
9985
+ }, "Computed category stats");
9986
+ return results.sort((a, b) => b.totalTokens - a.totalTokens);
9987
+ }
9988
+ };
9989
+ /**
9990
+ * Binary search: find the index of the last turn whose timestamp ≤ spanStartTime.
9991
+ * Returns 0 if no turn precedes the span.
9992
+ */
9993
+ function attributeSpanToTurnIndex(spanStartTime, turns) {
9994
+ let lo = 0;
9995
+ let hi = turns.length - 1;
9996
+ let result = 0;
9997
+ while (lo <= hi) {
9998
+ const mid = lo + hi >> 1;
9999
+ if (turns[mid].timestamp <= spanStartTime) {
10000
+ result = mid;
10001
+ lo = mid + 1;
10002
+ } else hi = mid - 1;
10003
+ }
10004
+ return result;
10005
+ }
10006
+ /**
10007
+ * Extract a tool name from a span's attributes, checking known attribute keys
10008
+ * in priority order.
10009
+ */
10010
+ function extractToolNameFromSpan(span) {
10011
+ if (!span.attributes) return void 0;
10012
+ const attrs = span.attributes;
10013
+ const name = attrs["tool.name"] || attrs["llm.tool.name"] || attrs["claude.tool_name"];
10014
+ return name || void 0;
10015
+ }
10016
+
10017
+ //#endregion
10018
+ //#region src/modules/telemetry/consumer-analyzer.ts
10019
+ var ConsumerAnalyzer = class {
10020
+ _categorizer;
10021
+ _logger;
10022
+ constructor(categorizer, logger$26) {
10023
+ this._categorizer = categorizer;
10024
+ this._logger = logger$26;
10025
+ }
10026
+ /**
10027
+ * Group spans by consumer key, rank by totalTokens descending, and return
10028
+ * ConsumerStats for each non-zero-token group.
10029
+ *
10030
+ * @param spans - All NormalizedSpans for the story
10031
+ */
10032
+ analyze(spans) {
10033
+ if (spans.length === 0) return [];
10034
+ const grandTotal = spans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
10035
+ const groups = new Map();
10036
+ for (const span of spans) {
10037
+ const key = this._buildConsumerKey(span);
10038
+ const existing = groups.get(key);
10039
+ if (existing !== void 0) existing.push(span);
10040
+ else groups.set(key, [span]);
10041
+ }
10042
+ const results = [];
10043
+ for (const [consumerKey, groupSpans] of groups) {
10044
+ const totalTokens = groupSpans.reduce((sum, s) => sum + s.inputTokens + s.outputTokens, 0);
10045
+ if (totalTokens === 0) continue;
10046
+ const percentage = grandTotal > 0 ? Math.round(totalTokens / grandTotal * 100 * 1e3) / 1e3 : 0;
10047
+ const eventCount = groupSpans.length;
10048
+ const firstSpan = groupSpans[0];
10049
+ const toolName = this._extractToolName(firstSpan);
10050
+ const operationName = firstSpan.operationName ?? firstSpan.name ?? "unknown";
10051
+ const category = this._categorizer.classify(operationName, toolName);
10052
+ const sorted = groupSpans.slice().sort((a, b) => b.inputTokens + b.outputTokens - (a.inputTokens + a.outputTokens));
10053
+ const topInvocations = sorted.slice(0, 20).map((s) => ({
10054
+ spanId: s.spanId,
10055
+ name: s.name,
10056
+ toolName: this._extractToolName(s),
10057
+ totalTokens: s.inputTokens + s.outputTokens,
10058
+ inputTokens: s.inputTokens,
10059
+ outputTokens: s.outputTokens
10060
+ }));
10061
+ results.push({
10062
+ consumerKey,
10063
+ category,
10064
+ totalTokens,
10065
+ percentage,
10066
+ eventCount,
10067
+ topInvocations
10068
+ });
10069
+ }
10070
+ this._logger.debug({
10071
+ consumers: results.length,
10072
+ grandTotal
10073
+ }, "Computed consumer stats");
10074
+ return results.sort((a, b) => b.totalTokens - a.totalTokens);
10075
+ }
10076
+ /**
10077
+ * Build a stable, collision-resistant consumer key from a span.
10078
+ * Format: `operationName|toolName` (tool part is empty string if absent).
10079
+ */
10080
+ _buildConsumerKey(span) {
10081
+ const operationPart = (span.operationName ?? span.name ?? "unknown").slice(0, 200);
10082
+ const toolPart = (this._extractToolName(span) ?? "").slice(0, 100);
10083
+ return `${operationPart}|${toolPart}`;
10084
+ }
10085
+ /**
10086
+ * Extract a tool name from span attributes, checking three known attribute keys
10087
+ * in priority order.
10088
+ */
10089
+ _extractToolName(span) {
10090
+ if (!span.attributes) return void 0;
10091
+ const attrs = span.attributes;
10092
+ const name = attrs["tool.name"] || attrs["llm.tool.name"] || attrs["claude.tool_name"];
10093
+ return name || void 0;
10094
+ }
10095
+ };
10096
+
8937
10097
  //#endregion
8938
10098
  //#region src/modules/implementation-orchestrator/orchestrator-impl.ts
8939
10099
  function createPauseGate() {
@@ -8975,8 +10135,8 @@ function buildTargetedFilesContent(issueList) {
8975
10135
  * @returns A fully-configured ImplementationOrchestrator ready to call run()
8976
10136
  */
8977
10137
  function createImplementationOrchestrator(deps) {
8978
- const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot, tokenCeilings, stateStore } = deps;
8979
- const logger$24 = createLogger("implementation-orchestrator");
10138
+ const { db, pack, contextCompiler, dispatcher, eventBus, config, projectRoot, tokenCeilings, stateStore, telemetryPersistence, ingestionServer } = deps;
10139
+ const logger$26 = createLogger("implementation-orchestrator");
8980
10140
  let _state = "IDLE";
8981
10141
  let _startedAt;
8982
10142
  let _completedAt;
@@ -8996,6 +10156,7 @@ function createImplementationOrchestrator(deps) {
8996
10156
  const _storyDispatches = new Map();
8997
10157
  let _maxConcurrentActual = 0;
8998
10158
  let _contractMismatches;
10159
+ let _otlpEndpoint;
8999
10160
  const _stateStoreCache = new Map();
9000
10161
  const MEMORY_PRESSURE_BACKOFF_MS = [
9001
10162
  3e4,
@@ -9021,7 +10182,7 @@ function createImplementationOrchestrator(deps) {
9021
10182
  const nowMs = Date.now();
9022
10183
  for (const [phase, startMs] of starts) {
9023
10184
  const endMs = ends?.get(phase);
9024
- if (endMs === void 0) logger$24.warn({
10185
+ if (endMs === void 0) logger$26.warn({
9025
10186
  storyKey,
9026
10187
  phase
9027
10188
  }, "Phase has no end time — story may have errored mid-phase. Duration capped to now() and may be inflated.");
@@ -9068,7 +10229,7 @@ function createImplementationOrchestrator(deps) {
9068
10229
  recordedAt: completedAt,
9069
10230
  timestamp: completedAt
9070
10231
  }).catch((storeErr) => {
9071
- logger$24.warn({
10232
+ logger$26.warn({
9072
10233
  err: storeErr,
9073
10234
  storyKey
9074
10235
  }, "Failed to record metric to StateStore (best-effort)");
@@ -9090,7 +10251,7 @@ function createImplementationOrchestrator(deps) {
9090
10251
  rationale: `Story ${storyKey} completed with result=${result} in ${wallClockSeconds}s. Tokens: ${tokenAgg.input}+${tokenAgg.output}. Review cycles: ${reviewCycles}.`
9091
10252
  });
9092
10253
  } catch (decisionErr) {
9093
- logger$24.warn({
10254
+ logger$26.warn({
9094
10255
  err: decisionErr,
9095
10256
  storyKey
9096
10257
  }, "Failed to write story-metrics decision (best-effort)");
@@ -9118,13 +10279,13 @@ function createImplementationOrchestrator(deps) {
9118
10279
  dispatches: _storyDispatches.get(storyKey) ?? 0
9119
10280
  });
9120
10281
  } catch (emitErr) {
9121
- logger$24.warn({
10282
+ logger$26.warn({
9122
10283
  err: emitErr,
9123
10284
  storyKey
9124
10285
  }, "Failed to emit story:metrics event (best-effort)");
9125
10286
  }
9126
10287
  } catch (err) {
9127
- logger$24.warn({
10288
+ logger$26.warn({
9128
10289
  err,
9129
10290
  storyKey
9130
10291
  }, "Failed to write story metrics (best-effort)");
@@ -9153,7 +10314,7 @@ function createImplementationOrchestrator(deps) {
9153
10314
  rationale: `Story ${storyKey} ${outcome} after ${reviewCycles} review cycle(s).`
9154
10315
  });
9155
10316
  } catch (err) {
9156
- logger$24.warn({
10317
+ logger$26.warn({
9157
10318
  err,
9158
10319
  storyKey
9159
10320
  }, "Failed to write story-outcome decision (best-effort)");
@@ -9179,7 +10340,7 @@ function createImplementationOrchestrator(deps) {
9179
10340
  rationale: `Escalation diagnosis for ${payload.storyKey}: ${diagnosis.recommendedAction} — ${diagnosis.rationale}`
9180
10341
  });
9181
10342
  } catch (err) {
9182
- logger$24.warn({
10343
+ logger$26.warn({
9183
10344
  err,
9184
10345
  storyKey: payload.storyKey
9185
10346
  }, "Failed to persist escalation diagnosis (best-effort)");
@@ -9228,7 +10389,7 @@ function createImplementationOrchestrator(deps) {
9228
10389
  const existing = _stories.get(storyKey);
9229
10390
  if (existing !== void 0) {
9230
10391
  Object.assign(existing, updates);
9231
- persistStoryState(storyKey, existing).catch((err) => logger$24.warn({
10392
+ persistStoryState(storyKey, existing).catch((err) => logger$26.warn({
9232
10393
  err,
9233
10394
  storyKey
9234
10395
  }, "StateStore write failed after updateStory"));
@@ -9237,12 +10398,12 @@ function createImplementationOrchestrator(deps) {
9237
10398
  storyKey,
9238
10399
  conflict: err
9239
10400
  });
9240
- else logger$24.warn({
10401
+ else logger$26.warn({
9241
10402
  err,
9242
10403
  storyKey
9243
10404
  }, "mergeStory failed");
9244
10405
  });
9245
- else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$24.warn({
10406
+ else if (updates.phase === "ESCALATED") stateStore?.rollbackStory(storyKey).catch((err) => logger$26.warn({
9246
10407
  err,
9247
10408
  storyKey
9248
10409
  }, "rollbackStory failed — branch may persist"));
@@ -9269,7 +10430,7 @@ function createImplementationOrchestrator(deps) {
9269
10430
  };
9270
10431
  await stateStore.setStoryState(storyKey, record);
9271
10432
  } catch (err) {
9272
- logger$24.warn({
10433
+ logger$26.warn({
9273
10434
  err,
9274
10435
  storyKey
9275
10436
  }, "StateStore.setStoryState failed (best-effort)");
@@ -9285,7 +10446,7 @@ function createImplementationOrchestrator(deps) {
9285
10446
  token_usage_json: serialized
9286
10447
  });
9287
10448
  } catch (err) {
9288
- logger$24.warn({ err }, "Failed to persist orchestrator state");
10449
+ logger$26.warn({ err }, "Failed to persist orchestrator state");
9289
10450
  }
9290
10451
  }
9291
10452
  function recordProgress() {
@@ -9332,7 +10493,7 @@ function createImplementationOrchestrator(deps) {
9332
10493
  }
9333
10494
  if (childActive) {
9334
10495
  _lastProgressTs = Date.now();
9335
- logger$24.debug({
10496
+ logger$26.debug({
9336
10497
  storyKey: key,
9337
10498
  phase: s.phase,
9338
10499
  childPids
@@ -9341,7 +10502,7 @@ function createImplementationOrchestrator(deps) {
9341
10502
  }
9342
10503
  _stalledStories.add(key);
9343
10504
  _storiesWithStall.add(key);
9344
- logger$24.warn({
10505
+ logger$26.warn({
9345
10506
  storyKey: key,
9346
10507
  phase: s.phase,
9347
10508
  elapsedMs: elapsed,
@@ -9386,7 +10547,7 @@ function createImplementationOrchestrator(deps) {
9386
10547
  for (let attempt = 0; attempt < MEMORY_PRESSURE_BACKOFF_MS.length; attempt++) {
9387
10548
  const memState = dispatcher.getMemoryState();
9388
10549
  if (!memState.isPressured) return true;
9389
- logger$24.warn({
10550
+ logger$26.warn({
9390
10551
  storyKey,
9391
10552
  freeMB: memState.freeMB,
9392
10553
  thresholdMB: memState.thresholdMB,
@@ -9406,11 +10567,11 @@ function createImplementationOrchestrator(deps) {
9406
10567
  * exhausted retries the story is ESCALATED.
9407
10568
  */
9408
10569
  async function processStory(storyKey) {
9409
- logger$24.info({ storyKey }, "Processing story");
10570
+ logger$26.info({ storyKey }, "Processing story");
9410
10571
  {
9411
10572
  const memoryOk = await checkMemoryPressure(storyKey);
9412
10573
  if (!memoryOk) {
9413
- logger$24.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
10574
+ logger$26.warn({ storyKey }, "Memory pressure exhausted — escalating story without dispatch");
9414
10575
  const memPressureState = {
9415
10576
  phase: "ESCALATED",
9416
10577
  reviewCycles: 0,
@@ -9419,7 +10580,7 @@ function createImplementationOrchestrator(deps) {
9419
10580
  completedAt: new Date().toISOString()
9420
10581
  };
9421
10582
  _stories.set(storyKey, memPressureState);
9422
- persistStoryState(storyKey, memPressureState).catch((err) => logger$24.warn({
10583
+ persistStoryState(storyKey, memPressureState).catch((err) => logger$26.warn({
9423
10584
  err,
9424
10585
  storyKey
9425
10586
  }, "StateStore write failed after memory-pressure escalation"));
@@ -9436,7 +10597,7 @@ function createImplementationOrchestrator(deps) {
9436
10597
  }
9437
10598
  await waitIfPaused();
9438
10599
  if (_state !== "RUNNING") return;
9439
- stateStore?.branchForStory(storyKey).catch((err) => logger$24.warn({
10600
+ stateStore?.branchForStory(storyKey).catch((err) => logger$26.warn({
9440
10601
  err,
9441
10602
  storyKey
9442
10603
  }, "branchForStory failed — continuing without branch isolation"));
@@ -9453,14 +10614,14 @@ function createImplementationOrchestrator(deps) {
9453
10614
  if (match) {
9454
10615
  const candidatePath = join$1(artifactsDir, match);
9455
10616
  const validation = await isValidStoryFile(candidatePath);
9456
- if (!validation.valid) logger$24.warn({
10617
+ if (!validation.valid) logger$26.warn({
9457
10618
  storyKey,
9458
10619
  storyFilePath: candidatePath,
9459
10620
  reason: validation.reason
9460
10621
  }, `Existing story file for ${storyKey} is invalid (${validation.reason}) — re-creating`);
9461
10622
  else {
9462
10623
  storyFilePath = candidatePath;
9463
- logger$24.info({
10624
+ logger$26.info({
9464
10625
  storyKey,
9465
10626
  storyFilePath
9466
10627
  }, "Found existing story file — skipping create-story");
@@ -9486,7 +10647,8 @@ function createImplementationOrchestrator(deps) {
9486
10647
  contextCompiler,
9487
10648
  dispatcher,
9488
10649
  projectRoot,
9489
- tokenCeilings
10650
+ tokenCeilings,
10651
+ otlpEndpoint: _otlpEndpoint
9490
10652
  }, {
9491
10653
  epicId: storyKey.split("-")[0] ?? storyKey,
9492
10654
  storyKey,
@@ -9577,14 +10739,14 @@ function createImplementationOrchestrator(deps) {
9577
10739
  ...contract.transport !== void 0 ? { transport: contract.transport } : {}
9578
10740
  })
9579
10741
  });
9580
- logger$24.info({
10742
+ logger$26.info({
9581
10743
  storyKey,
9582
10744
  contractCount: contracts.length,
9583
10745
  contracts
9584
10746
  }, "Stored interface contract declarations");
9585
10747
  }
9586
10748
  } catch (err) {
9587
- logger$24.warn({
10749
+ logger$26.warn({
9588
10750
  storyKey,
9589
10751
  error: err instanceof Error ? err.message : String(err)
9590
10752
  }, "Failed to parse interface contracts — continuing without contract declarations");
@@ -9602,17 +10764,18 @@ function createImplementationOrchestrator(deps) {
9602
10764
  contextCompiler,
9603
10765
  dispatcher,
9604
10766
  projectRoot,
9605
- tokenCeilings
10767
+ tokenCeilings,
10768
+ otlpEndpoint: _otlpEndpoint
9606
10769
  }, {
9607
10770
  storyKey,
9608
10771
  storyFilePath: storyFilePath ?? "",
9609
10772
  pipelineRunId: config.pipelineRunId ?? ""
9610
10773
  });
9611
10774
  testPlanPhaseResult = testPlanResult.result;
9612
- if (testPlanResult.result === "success") logger$24.info({ storyKey }, "Test plan generated successfully");
9613
- else logger$24.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
10775
+ if (testPlanResult.result === "success") logger$26.info({ storyKey }, "Test plan generated successfully");
10776
+ else logger$26.warn({ storyKey }, "Test planning returned failed result — proceeding to dev-story without test plan");
9614
10777
  } catch (err) {
9615
- logger$24.warn({
10778
+ logger$26.warn({
9616
10779
  storyKey,
9617
10780
  err
9618
10781
  }, "Test planning failed — proceeding to dev-story without test plan");
@@ -9636,7 +10799,7 @@ function createImplementationOrchestrator(deps) {
9636
10799
  try {
9637
10800
  storyContentForAnalysis = await readFile$1(storyFilePath ?? "", "utf-8");
9638
10801
  } catch (err) {
9639
- logger$24.error({
10802
+ logger$26.error({
9640
10803
  storyKey,
9641
10804
  storyFilePath,
9642
10805
  error: err instanceof Error ? err.message : String(err)
@@ -9644,7 +10807,7 @@ function createImplementationOrchestrator(deps) {
9644
10807
  }
9645
10808
  const analysis = analyzeStoryComplexity(storyContentForAnalysis);
9646
10809
  const batches = planTaskBatches(analysis);
9647
- logger$24.info({
10810
+ logger$26.info({
9648
10811
  storyKey,
9649
10812
  estimatedScope: analysis.estimatedScope,
9650
10813
  batchCount: batches.length,
@@ -9662,7 +10825,7 @@ function createImplementationOrchestrator(deps) {
9662
10825
  if (_state !== "RUNNING") break;
9663
10826
  const taskScope = batch.taskIds.map((id, i) => `T${id}: ${batch.taskTitles[i] ?? ""}`).join("\n");
9664
10827
  const priorFiles = allFilesModified.size > 0 ? Array.from(allFilesModified) : void 0;
9665
- logger$24.info({
10828
+ logger$26.info({
9666
10829
  storyKey,
9667
10830
  batchIndex: batch.batchIndex,
9668
10831
  taskCount: batch.taskIds.length
@@ -9677,7 +10840,8 @@ function createImplementationOrchestrator(deps) {
9677
10840
  contextCompiler,
9678
10841
  dispatcher,
9679
10842
  projectRoot,
9680
- tokenCeilings
10843
+ tokenCeilings,
10844
+ otlpEndpoint: _otlpEndpoint
9681
10845
  }, {
9682
10846
  storyKey,
9683
10847
  storyFilePath: storyFilePath ?? "",
@@ -9687,7 +10851,7 @@ function createImplementationOrchestrator(deps) {
9687
10851
  });
9688
10852
  } catch (batchErr) {
9689
10853
  const errMsg = batchErr instanceof Error ? batchErr.message : String(batchErr);
9690
- logger$24.warn({
10854
+ logger$26.warn({
9691
10855
  storyKey,
9692
10856
  batchIndex: batch.batchIndex,
9693
10857
  error: errMsg
@@ -9707,7 +10871,7 @@ function createImplementationOrchestrator(deps) {
9707
10871
  filesModified: batchFilesModified,
9708
10872
  result: batchResult.result === "success" ? "success" : "failed"
9709
10873
  };
9710
- logger$24.info(batchMetrics, "Batch dev-story metrics");
10874
+ logger$26.info(batchMetrics, "Batch dev-story metrics");
9711
10875
  for (const f of batchFilesModified) allFilesModified.add(f);
9712
10876
  if (batchFilesModified.length > 0) batchFileGroups.push({
9713
10877
  batchIndex: batch.batchIndex,
@@ -9729,13 +10893,13 @@ function createImplementationOrchestrator(deps) {
9729
10893
  })
9730
10894
  });
9731
10895
  } catch (tokenErr) {
9732
- logger$24.warn({
10896
+ logger$26.warn({
9733
10897
  storyKey,
9734
10898
  batchIndex: batch.batchIndex,
9735
10899
  err: tokenErr
9736
10900
  }, "Failed to record batch token usage");
9737
10901
  }
9738
- if (batchResult.result === "failed") logger$24.warn({
10902
+ if (batchResult.result === "failed") logger$26.warn({
9739
10903
  storyKey,
9740
10904
  batchIndex: batch.batchIndex,
9741
10905
  error: batchResult.error
@@ -9757,7 +10921,8 @@ function createImplementationOrchestrator(deps) {
9757
10921
  contextCompiler,
9758
10922
  dispatcher,
9759
10923
  projectRoot,
9760
- tokenCeilings
10924
+ tokenCeilings,
10925
+ otlpEndpoint: _otlpEndpoint
9761
10926
  }, {
9762
10927
  storyKey,
9763
10928
  storyFilePath: storyFilePath ?? "",
@@ -9771,7 +10936,7 @@ function createImplementationOrchestrator(deps) {
9771
10936
  });
9772
10937
  persistState();
9773
10938
  if (devResult.result === "success") devStoryWasSuccess = true;
9774
- else logger$24.warn({
10939
+ else logger$26.warn({
9775
10940
  storyKey,
9776
10941
  error: devResult.error,
9777
10942
  filesModified: devFilesModified.length
@@ -9799,7 +10964,7 @@ function createImplementationOrchestrator(deps) {
9799
10964
  if (devStoryWasSuccess) {
9800
10965
  gitDiffFiles = checkGitDiffFiles(projectRoot ?? process.cwd());
9801
10966
  if (gitDiffFiles.length === 0) {
9802
- logger$24.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes in git working tree");
10967
+ logger$26.warn({ storyKey }, "Zero-diff detected after COMPLETE dev-story — no file changes in git working tree");
9803
10968
  eventBus.emit("orchestrator:zero-diff-escalation", {
9804
10969
  storyKey,
9805
10970
  reason: "zero-diff-on-complete"
@@ -9830,7 +10995,7 @@ function createImplementationOrchestrator(deps) {
9830
10995
  });
9831
10996
  if (buildVerifyResult.status === "passed") {
9832
10997
  eventBus.emit("story:build-verification-passed", { storyKey });
9833
- logger$24.info({ storyKey }, "Build verification passed");
10998
+ logger$26.info({ storyKey }, "Build verification passed");
9834
10999
  } else if (buildVerifyResult.status === "failed" || buildVerifyResult.status === "timeout") {
9835
11000
  const truncatedOutput = (buildVerifyResult.output ?? "").slice(0, 2e3);
9836
11001
  const reason = buildVerifyResult.reason ?? "build-verification-failed";
@@ -9839,7 +11004,7 @@ function createImplementationOrchestrator(deps) {
9839
11004
  exitCode: buildVerifyResult.exitCode ?? 1,
9840
11005
  output: truncatedOutput
9841
11006
  });
9842
- logger$24.warn({
11007
+ logger$26.warn({
9843
11008
  storyKey,
9844
11009
  reason,
9845
11010
  exitCode: buildVerifyResult.exitCode
@@ -9869,7 +11034,7 @@ function createImplementationOrchestrator(deps) {
9869
11034
  storyKey
9870
11035
  });
9871
11036
  if (icResult.potentiallyAffectedTests.length > 0) {
9872
- logger$24.warn({
11037
+ logger$26.warn({
9873
11038
  storyKey,
9874
11039
  modifiedInterfaces: icResult.modifiedInterfaces,
9875
11040
  potentiallyAffectedTests: icResult.potentiallyAffectedTests
@@ -9915,7 +11080,7 @@ function createImplementationOrchestrator(deps) {
9915
11080
  "NEEDS_MAJOR_REWORK": 2
9916
11081
  };
9917
11082
  for (const group of batchFileGroups) {
9918
- logger$24.info({
11083
+ logger$26.info({
9919
11084
  storyKey,
9920
11085
  batchIndex: group.batchIndex,
9921
11086
  fileCount: group.files.length
@@ -9927,7 +11092,8 @@ function createImplementationOrchestrator(deps) {
9927
11092
  contextCompiler,
9928
11093
  dispatcher,
9929
11094
  projectRoot,
9930
- tokenCeilings
11095
+ tokenCeilings,
11096
+ otlpEndpoint: _otlpEndpoint
9931
11097
  }, {
9932
11098
  storyKey,
9933
11099
  storyFilePath: storyFilePath ?? "",
@@ -9953,7 +11119,7 @@ function createImplementationOrchestrator(deps) {
9953
11119
  rawOutput: lastRawOutput,
9954
11120
  tokenUsage: aggregateTokens
9955
11121
  };
9956
- logger$24.info({
11122
+ logger$26.info({
9957
11123
  storyKey,
9958
11124
  batchCount: batchFileGroups.length,
9959
11125
  verdict: worstVerdict,
@@ -9967,7 +11133,8 @@ function createImplementationOrchestrator(deps) {
9967
11133
  contextCompiler,
9968
11134
  dispatcher,
9969
11135
  projectRoot,
9970
- tokenCeilings
11136
+ tokenCeilings,
11137
+ otlpEndpoint: _otlpEndpoint
9971
11138
  }, {
9972
11139
  storyKey,
9973
11140
  storyFilePath: storyFilePath ?? "",
@@ -9980,7 +11147,7 @@ function createImplementationOrchestrator(deps) {
9980
11147
  const isPhantomReview = reviewResult.dispatchFailed === true || reviewResult.verdict !== "SHIP_IT" && reviewResult.verdict !== "LGTM_WITH_NOTES" && (reviewResult.issue_list === void 0 || reviewResult.issue_list.length === 0) && reviewResult.error !== void 0;
9981
11148
  if (isPhantomReview && !timeoutRetried) {
9982
11149
  timeoutRetried = true;
9983
- logger$24.warn({
11150
+ logger$26.warn({
9984
11151
  storyKey,
9985
11152
  reviewCycles,
9986
11153
  error: reviewResult.error
@@ -9990,7 +11157,7 @@ function createImplementationOrchestrator(deps) {
9990
11157
  verdict = reviewResult.verdict;
9991
11158
  issueList = reviewResult.issue_list ?? [];
9992
11159
  if (verdict === "NEEDS_MAJOR_REWORK" && reviewCycles > 0 && previousIssueList.length > 0 && issueList.length < previousIssueList.length) {
9993
- logger$24.info({
11160
+ logger$26.info({
9994
11161
  storyKey,
9995
11162
  originalVerdict: verdict,
9996
11163
  issuesBefore: previousIssueList.length,
@@ -10026,7 +11193,7 @@ function createImplementationOrchestrator(deps) {
10026
11193
  if (_decomposition !== void 0) parts.push(`decomposed: ${_decomposition.batchCount} batches`);
10027
11194
  parts.push(`${fileCount} files`);
10028
11195
  parts.push(`${totalTokensK} tokens`);
10029
- logger$24.info({
11196
+ logger$26.info({
10030
11197
  storyKey,
10031
11198
  verdict,
10032
11199
  agentVerdict: reviewResult.agentVerdict
@@ -10075,13 +11242,58 @@ function createImplementationOrchestrator(deps) {
10075
11242
  }),
10076
11243
  rationale: `Advisory notes from LGTM_WITH_NOTES review of ${storyKey}`
10077
11244
  });
10078
- logger$24.info({ storyKey }, "Advisory notes persisted to decision store");
11245
+ logger$26.info({ storyKey }, "Advisory notes persisted to decision store");
10079
11246
  } catch (advisoryErr) {
10080
- logger$24.warn({
11247
+ logger$26.warn({
10081
11248
  storyKey,
10082
11249
  error: advisoryErr instanceof Error ? advisoryErr.message : String(advisoryErr)
10083
11250
  }, "Failed to persist advisory notes (best-effort)");
10084
11251
  }
11252
+ if (telemetryPersistence !== void 0) try {
11253
+ const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
11254
+ if (turns.length > 0) {
11255
+ const scorer = new EfficiencyScorer(logger$26);
11256
+ const effScore = scorer.score(storyKey, turns);
11257
+ await telemetryPersistence.storeEfficiencyScore(effScore);
11258
+ logger$26.info({
11259
+ storyKey,
11260
+ compositeScore: effScore.compositeScore,
11261
+ modelCount: effScore.perModelBreakdown.length
11262
+ }, "Efficiency score computed and persisted");
11263
+ } else logger$26.debug({ storyKey }, "No turn analysis data available — skipping efficiency scoring");
11264
+ } catch (effErr) {
11265
+ logger$26.warn({
11266
+ storyKey,
11267
+ error: effErr instanceof Error ? effErr.message : String(effErr)
11268
+ }, "Efficiency scoring failed — story verdict unchanged");
11269
+ }
11270
+ if (telemetryPersistence !== void 0) try {
11271
+ const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
11272
+ const spans = [];
11273
+ if (spans.length === 0) logger$26.debug({ storyKey }, "No spans for telemetry categorization — skipping");
11274
+ else {
11275
+ const categorizer = new Categorizer(logger$26);
11276
+ const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$26);
11277
+ const categoryStats = categorizer.computeCategoryStats(spans, turns);
11278
+ const consumerStats = consumerAnalyzer.analyze(spans);
11279
+ await telemetryPersistence.storeCategoryStats(storyKey, categoryStats);
11280
+ await telemetryPersistence.storeConsumerStats(storyKey, consumerStats);
11281
+ const growingCount = categoryStats.filter((c) => c.trend === "growing").length;
11282
+ const topCategory = categoryStats[0]?.category ?? "none";
11283
+ const topConsumer = consumerStats[0]?.consumerKey ?? "none";
11284
+ logger$26.info({
11285
+ storyKey,
11286
+ topCategory,
11287
+ topConsumer,
11288
+ growingCount
11289
+ }, "Semantic categorization and consumer analysis complete");
11290
+ }
11291
+ } catch (catErr) {
11292
+ logger$26.warn({
11293
+ storyKey,
11294
+ error: catErr instanceof Error ? catErr.message : String(catErr)
11295
+ }, "Semantic categorization failed — story verdict unchanged");
11296
+ }
10085
11297
  try {
10086
11298
  const expansionResult = await runTestExpansion({
10087
11299
  db,
@@ -10089,7 +11301,8 @@ function createImplementationOrchestrator(deps) {
10089
11301
  contextCompiler,
10090
11302
  dispatcher,
10091
11303
  projectRoot,
10092
- tokenCeilings
11304
+ tokenCeilings,
11305
+ otlpEndpoint: _otlpEndpoint
10093
11306
  }, {
10094
11307
  storyKey,
10095
11308
  storyFilePath: storyFilePath ?? "",
@@ -10097,7 +11310,7 @@ function createImplementationOrchestrator(deps) {
10097
11310
  filesModified: devFilesModified,
10098
11311
  workingDirectory: projectRoot
10099
11312
  });
10100
- logger$24.debug({
11313
+ logger$26.debug({
10101
11314
  storyKey,
10102
11315
  expansion_priority: expansionResult.expansion_priority,
10103
11316
  coverage_gaps: expansionResult.coverage_gaps.length
@@ -10110,7 +11323,7 @@ function createImplementationOrchestrator(deps) {
10110
11323
  value: JSON.stringify(expansionResult)
10111
11324
  });
10112
11325
  } catch (expansionErr) {
10113
- logger$24.warn({
11326
+ logger$26.warn({
10114
11327
  storyKey,
10115
11328
  error: expansionErr instanceof Error ? expansionErr.message : String(expansionErr)
10116
11329
  }, "Test expansion failed — story verdict unchanged");
@@ -10137,7 +11350,7 @@ function createImplementationOrchestrator(deps) {
10137
11350
  persistState();
10138
11351
  return;
10139
11352
  }
10140
- logger$24.info({
11353
+ logger$26.info({
10141
11354
  storyKey,
10142
11355
  reviewCycles: finalReviewCycles,
10143
11356
  issueCount: issueList.length
@@ -10197,14 +11410,15 @@ function createImplementationOrchestrator(deps) {
10197
11410
  fixPrompt = assembled.prompt;
10198
11411
  } catch {
10199
11412
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, minor fixes needed`;
10200
- logger$24.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
11413
+ logger$26.warn({ storyKey }, "Failed to assemble auto-approve fix prompt, using fallback");
10201
11414
  }
10202
11415
  const handle = dispatcher.dispatch({
10203
11416
  prompt: fixPrompt,
10204
11417
  agent: "claude-code",
10205
11418
  taskType: "minor-fixes",
10206
11419
  workingDirectory: projectRoot,
10207
- ...autoApproveMaxTurns !== void 0 ? { maxTurns: autoApproveMaxTurns } : {}
11420
+ ...autoApproveMaxTurns !== void 0 ? { maxTurns: autoApproveMaxTurns } : {},
11421
+ ..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {}
10208
11422
  });
10209
11423
  const fixResult = await handle.result;
10210
11424
  eventBus.emit("orchestrator:story-phase-complete", {
@@ -10215,9 +11429,9 @@ function createImplementationOrchestrator(deps) {
10215
11429
  output: fixResult.tokenEstimate.output
10216
11430
  } : void 0 }
10217
11431
  });
10218
- if (fixResult.status === "timeout") logger$24.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
11432
+ if (fixResult.status === "timeout") logger$26.warn({ storyKey }, "Auto-approve fix timed out — approving anyway (issues were minor)");
10219
11433
  } catch (err) {
10220
- logger$24.warn({
11434
+ logger$26.warn({
10221
11435
  storyKey,
10222
11436
  err
10223
11437
  }, "Auto-approve fix dispatch failed — approving anyway (issues were minor)");
@@ -10334,7 +11548,7 @@ function createImplementationOrchestrator(deps) {
10334
11548
  fixPrompt = assembled.prompt;
10335
11549
  } catch {
10336
11550
  fixPrompt = `Fix story ${storyKey}: verdict=${verdict}, taskType=${taskType}`;
10337
- logger$24.warn({
11551
+ logger$26.warn({
10338
11552
  storyKey,
10339
11553
  taskType
10340
11554
  }, "Failed to assemble fix prompt, using fallback");
@@ -10347,14 +11561,16 @@ function createImplementationOrchestrator(deps) {
10347
11561
  ...fixModel !== void 0 ? { model: fixModel } : {},
10348
11562
  outputSchema: DevStoryResultSchema,
10349
11563
  ...fixMaxTurns !== void 0 ? { maxTurns: fixMaxTurns } : {},
10350
- ...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {}
11564
+ ...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {},
11565
+ ..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {}
10351
11566
  }) : dispatcher.dispatch({
10352
11567
  prompt: fixPrompt,
10353
11568
  agent: "claude-code",
10354
11569
  taskType,
10355
11570
  ...fixModel !== void 0 ? { model: fixModel } : {},
10356
11571
  ...fixMaxTurns !== void 0 ? { maxTurns: fixMaxTurns } : {},
10357
- ...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {}
11572
+ ...projectRoot !== void 0 ? { workingDirectory: projectRoot } : {},
11573
+ ..._otlpEndpoint !== void 0 ? { otlpEndpoint: _otlpEndpoint } : {}
10358
11574
  });
10359
11575
  const fixResult = await handle.result;
10360
11576
  eventBus.emit("orchestrator:story-phase-complete", {
@@ -10366,7 +11582,7 @@ function createImplementationOrchestrator(deps) {
10366
11582
  } : void 0 }
10367
11583
  });
10368
11584
  if (fixResult.status === "timeout") {
10369
- logger$24.warn({
11585
+ logger$26.warn({
10370
11586
  storyKey,
10371
11587
  taskType
10372
11588
  }, "Fix dispatch timed out — escalating story");
@@ -10388,7 +11604,7 @@ function createImplementationOrchestrator(deps) {
10388
11604
  }
10389
11605
  if (fixResult.status === "failed") {
10390
11606
  if (isMajorRework) {
10391
- logger$24.warn({
11607
+ logger$26.warn({
10392
11608
  storyKey,
10393
11609
  exitCode: fixResult.exitCode
10394
11610
  }, "Major rework dispatch failed — escalating story");
@@ -10408,14 +11624,14 @@ function createImplementationOrchestrator(deps) {
10408
11624
  persistState();
10409
11625
  return;
10410
11626
  }
10411
- logger$24.warn({
11627
+ logger$26.warn({
10412
11628
  storyKey,
10413
11629
  taskType,
10414
11630
  exitCode: fixResult.exitCode
10415
11631
  }, "Fix dispatch failed");
10416
11632
  }
10417
11633
  } catch (err) {
10418
- logger$24.warn({
11634
+ logger$26.warn({
10419
11635
  storyKey,
10420
11636
  taskType,
10421
11637
  err
@@ -10478,11 +11694,11 @@ function createImplementationOrchestrator(deps) {
10478
11694
  }
10479
11695
  async function run(storyKeys) {
10480
11696
  if (_state === "RUNNING" || _state === "PAUSED") {
10481
- logger$24.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
11697
+ logger$26.warn({ state: _state }, "run() called while orchestrator is already running or paused — ignoring");
10482
11698
  return getStatus();
10483
11699
  }
10484
11700
  if (_state === "COMPLETE") {
10485
- logger$24.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
11701
+ logger$26.warn({ state: _state }, "run() called on a COMPLETE orchestrator — ignoring");
10486
11702
  return getStatus();
10487
11703
  }
10488
11704
  _state = "RUNNING";
@@ -10503,7 +11719,7 @@ function createImplementationOrchestrator(deps) {
10503
11719
  if (config.enableHeartbeat) startHeartbeat();
10504
11720
  if (projectRoot !== void 0) {
10505
11721
  const seedResult = seedMethodologyContext(db, projectRoot);
10506
- if (seedResult.decisionsCreated > 0) logger$24.info({
11722
+ if (seedResult.decisionsCreated > 0) logger$26.info({
10507
11723
  decisionsCreated: seedResult.decisionsCreated,
10508
11724
  skippedCategories: seedResult.skippedCategories
10509
11725
  }, "Methodology context seeded from planning artifacts");
@@ -10513,7 +11729,7 @@ function createImplementationOrchestrator(deps) {
10513
11729
  await stateStore.initialize();
10514
11730
  for (const key of storyKeys) {
10515
11731
  const pendingState = _stories.get(key);
10516
- if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$24.warn({
11732
+ if (pendingState !== void 0) persistStoryState(key, pendingState).catch((err) => logger$26.warn({
10517
11733
  err,
10518
11734
  storyKey: key
10519
11735
  }, "StateStore write failed during PENDING init"));
@@ -10522,9 +11738,16 @@ function createImplementationOrchestrator(deps) {
10522
11738
  const existingRecords = await stateStore.queryStories({});
10523
11739
  for (const record of existingRecords) _stateStoreCache.set(record.storyKey, record);
10524
11740
  } catch (err) {
10525
- logger$24.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
11741
+ logger$26.warn({ err }, "StateStore.queryStories() failed during init — status merge will be empty (best-effort)");
10526
11742
  }
10527
11743
  }
11744
+ if (ingestionServer !== void 0) {
11745
+ await ingestionServer.start().catch((err) => logger$26.warn({ err }, "IngestionServer.start() failed — continuing without telemetry (best-effort)"));
11746
+ try {
11747
+ _otlpEndpoint = ingestionServer.getOtlpEnvVars().OTEL_EXPORTER_OTLP_ENDPOINT;
11748
+ logger$26.info({ otlpEndpoint: _otlpEndpoint }, "OTLP telemetry ingestion active");
11749
+ } catch {}
11750
+ }
10528
11751
  let contractDeclarations = [];
10529
11752
  if (stateStore !== void 0) {
10530
11753
  const allContractRecords = await stateStore.queryContracts();
@@ -10558,11 +11781,11 @@ function createImplementationOrchestrator(deps) {
10558
11781
  }).filter((d) => d !== null);
10559
11782
  }
10560
11783
  const { batches, edges: contractEdges } = detectConflictGroupsWithContracts(storyKeys, { moduleMap: pack.manifest.conflictGroups }, contractDeclarations);
10561
- if (contractEdges.length > 0) logger$24.info({
11784
+ if (contractEdges.length > 0) logger$26.info({
10562
11785
  contractEdges,
10563
11786
  edgeCount: contractEdges.length
10564
11787
  }, "Contract dependency edges detected — applying contract-aware dispatch ordering");
10565
- logger$24.info({
11788
+ logger$26.info({
10566
11789
  storyCount: storyKeys.length,
10567
11790
  groupCount: batches.reduce((sum, b) => sum + b.length, 0),
10568
11791
  batchCount: batches.length,
@@ -10582,7 +11805,7 @@ function createImplementationOrchestrator(deps) {
10582
11805
  exitCode,
10583
11806
  output: truncatedOutput
10584
11807
  });
10585
- logger$24.error({
11808
+ logger$26.error({
10586
11809
  exitCode,
10587
11810
  reason: preFlightResult.reason
10588
11811
  }, "Pre-flight build check failed — aborting pipeline before any story dispatch");
@@ -10591,7 +11814,7 @@ function createImplementationOrchestrator(deps) {
10591
11814
  persistState();
10592
11815
  return getStatus();
10593
11816
  }
10594
- if (preFlightResult.status !== "skipped") logger$24.info("Pre-flight build check passed");
11817
+ if (preFlightResult.status !== "skipped") logger$26.info("Pre-flight build check passed");
10595
11818
  }
10596
11819
  try {
10597
11820
  for (const batchGroups of batches) await runWithConcurrency(batchGroups, config.maxConcurrency);
@@ -10600,7 +11823,7 @@ function createImplementationOrchestrator(deps) {
10600
11823
  _state = "FAILED";
10601
11824
  _completedAt = new Date().toISOString();
10602
11825
  persistState();
10603
- logger$24.error({ err }, "Orchestrator failed with unhandled error");
11826
+ logger$26.error({ err }, "Orchestrator failed with unhandled error");
10604
11827
  return getStatus();
10605
11828
  }
10606
11829
  stopHeartbeat();
@@ -10616,11 +11839,11 @@ function createImplementationOrchestrator(deps) {
10616
11839
  contractName: mismatch.contractName,
10617
11840
  mismatchDescription: mismatch.mismatchDescription
10618
11841
  });
10619
- logger$24.warn({
11842
+ logger$26.warn({
10620
11843
  mismatchCount: mismatches.length,
10621
11844
  mismatches
10622
11845
  }, "Post-sprint contract verification found mismatches — manual review required");
10623
- } else logger$24.info("Post-sprint contract verification passed — all declared contracts satisfied");
11846
+ } else logger$26.info("Post-sprint contract verification passed — all declared contracts satisfied");
10624
11847
  if (stateStore !== void 0) try {
10625
11848
  const allContractsForVerification = await stateStore.queryContracts();
10626
11849
  const verifiedAt = new Date().toISOString();
@@ -10649,12 +11872,12 @@ function createImplementationOrchestrator(deps) {
10649
11872
  });
10650
11873
  await stateStore.setContractVerification(sk, records);
10651
11874
  }
10652
- logger$24.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
11875
+ logger$26.info({ storyCount: contractsByStory.size }, "Contract verification results persisted to StateStore");
10653
11876
  } catch (persistErr) {
10654
- logger$24.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
11877
+ logger$26.warn({ err: persistErr }, "Failed to persist contract verification results to StateStore");
10655
11878
  }
10656
11879
  } catch (err) {
10657
- logger$24.error({ err }, "Post-sprint contract verification threw an error — skipping");
11880
+ logger$26.error({ err }, "Post-sprint contract verification threw an error — skipping");
10658
11881
  }
10659
11882
  let completed = 0;
10660
11883
  let escalated = 0;
@@ -10671,7 +11894,8 @@ function createImplementationOrchestrator(deps) {
10671
11894
  persistState();
10672
11895
  return getStatus();
10673
11896
  } finally {
10674
- if (stateStore !== void 0) await stateStore.close().catch((err) => logger$24.warn({ err }, "StateStore.close() failed (best-effort)"));
11897
+ if (stateStore !== void 0) await stateStore.close().catch((err) => logger$26.warn({ err }, "StateStore.close() failed (best-effort)"));
11898
+ if (ingestionServer !== void 0) await ingestionServer.stop().catch((err) => logger$26.warn({ err }, "IngestionServer.stop() failed (best-effort)"));
10675
11899
  }
10676
11900
  }
10677
11901
  function pause() {
@@ -10680,7 +11904,7 @@ function createImplementationOrchestrator(deps) {
10680
11904
  _pauseGate = createPauseGate();
10681
11905
  _state = "PAUSED";
10682
11906
  eventBus.emit("orchestrator:paused", {});
10683
- logger$24.info("Orchestrator paused");
11907
+ logger$26.info("Orchestrator paused");
10684
11908
  }
10685
11909
  function resume() {
10686
11910
  if (_state !== "PAUSED") return;
@@ -10691,7 +11915,7 @@ function createImplementationOrchestrator(deps) {
10691
11915
  }
10692
11916
  _state = "RUNNING";
10693
11917
  eventBus.emit("orchestrator:resumed", {});
10694
- logger$24.info("Orchestrator resumed");
11918
+ logger$26.info("Orchestrator resumed");
10695
11919
  }
10696
11920
  return {
10697
11921
  run,
@@ -14965,12 +16189,19 @@ async function runRunAction(options) {
14965
16189
  });
14966
16190
  } catch {}
14967
16191
  let tokenCeilings;
16192
+ let telemetryEnabled = false;
16193
+ let telemetryPort = 4318;
14968
16194
  try {
14969
16195
  const configSystem = createConfigSystem({ projectConfigDir: dbDir });
14970
16196
  await configSystem.load();
14971
- tokenCeilings = configSystem.getConfig().token_ceilings;
16197
+ const cfg = configSystem.getConfig();
16198
+ tokenCeilings = cfg.token_ceilings;
16199
+ if (cfg.telemetry?.enabled === true) {
16200
+ telemetryEnabled = true;
16201
+ telemetryPort = cfg.telemetry.port ?? 4318;
16202
+ }
14972
16203
  } catch {
14973
- logger.debug("Config loading skipped — using default token ceilings");
16204
+ logger.debug("Config loading skipped — using default token ceilings and telemetry settings");
14974
16205
  }
14975
16206
  let parsedStoryKeys = [];
14976
16207
  if (storiesArg !== void 0 && storiesArg !== "") {
@@ -15428,6 +16659,7 @@ async function runRunAction(options) {
15428
16659
  });
15429
16660
  });
15430
16661
  }
16662
+ const ingestionServer = telemetryEnabled ? new IngestionServer({ port: telemetryPort }) : void 0;
15431
16663
  const orchestrator = createImplementationOrchestrator({
15432
16664
  db,
15433
16665
  pack,
@@ -15442,7 +16674,8 @@ async function runRunAction(options) {
15442
16674
  skipPreflight: skipPreflight === true
15443
16675
  },
15444
16676
  projectRoot,
15445
- tokenCeilings
16677
+ tokenCeilings,
16678
+ ...ingestionServer !== void 0 ? { ingestionServer } : {}
15446
16679
  });
15447
16680
  if (outputFormat === "human" && progressRenderer === void 0 && ndjsonEmitter === void 0) {
15448
16681
  process.stdout.write(`Starting pipeline: ${storyKeys.length} story/stories, concurrency=${concurrency}\n`);
@@ -15885,5 +17118,5 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
15885
17118
  }
15886
17119
 
15887
17120
  //#endregion
15888
- export { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, DoltNotInstalled, FileStateStore, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDispatcher, createDoltClient, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initializeDolt, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
15889
- //# sourceMappingURL=run-DNURadtJ.js.map
17121
+ export { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, DoltNotInstalled, FileStateStore, SUBSTRATE_OWNED_SETTINGS_KEYS, TelemetryPersistence, VALID_PHASES, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDispatcher, createDoltClient, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initializeDolt, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
17122
+ //# sourceMappingURL=run-Fzhz3-mv.js.map