substrate-ai 0.4.5 → 0.4.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js CHANGED
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env node
2
- import { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, DoltClient, DoltNotInstalled, DoltRepoMapMetaRepository, DoltSymbolRepository, FileStateStore, GitClient, GrammarLoader, IngestionServer, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SUBSTRATE_OWNED_SETTINGS_KEYS, SymbolParser, TelemetryPersistence, VALID_PHASES, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDispatcher, createDoltClient, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initializeDolt, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-B9G5NI16.js";
2
+ import { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, DoltClient, DoltNotInstalled, DoltRepoMapMetaRepository, DoltSymbolRepository, FileStateStore, GitClient, GrammarLoader, IngestionServer, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SUBSTRATE_OWNED_SETTINGS_KEYS, SymbolParser, TelemetryPersistence, VALID_PHASES, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDispatcher, createDoltClient, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initializeDolt, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-fWZd8vvq.js";
3
3
  import { createLogger } from "../logger-D2fS2ccL.js";
4
4
  import { AdapterRegistry } from "../adapter-registry-Cd-7lG5v.js";
5
5
  import { CURRENT_CONFIG_FORMAT_VERSION, CURRENT_TASK_GRAPH_VERSION, PartialSubstrateConfigSchema } from "../config-migrator-DtZW1maj.js";
@@ -2709,7 +2709,7 @@ async function runSupervisorAction(options, deps = {}) {
2709
2709
  const expDb = expDbWrapper.db;
2710
2710
  const { runRunAction: runPipeline } = await import(
2711
2711
  /* @vite-ignore */
2712
- "../run-CXPqJdGe.js"
2712
+ "../run-RerrMpM1.js"
2713
2713
  );
2714
2714
  const runStoryFn = async (opts) => {
2715
2715
  const exitCode = await runPipeline({
@@ -1,4 +1,4 @@
1
- import { registerRunCommand, runRunAction } from "./run-B9G5NI16.js";
1
+ import { registerRunCommand, runRunAction } from "./run-fWZd8vvq.js";
2
2
  import "./logger-D2fS2ccL.js";
3
3
  import "./config-migrator-DtZW1maj.js";
4
4
  import "./helpers-BihqWgVe.js";
@@ -5975,7 +5975,7 @@ function containsAnchorKey(content) {
5975
5975
  function parseYamlResult(yamlText, schema) {
5976
5976
  let raw;
5977
5977
  try {
5978
- raw = yaml.load(yamlText);
5978
+ raw = yaml.load(sanitizeYamlEscapes(yamlText));
5979
5979
  } catch (err) {
5980
5980
  const message = err instanceof Error ? err.message : String(err);
5981
5981
  return {
@@ -6001,6 +6001,51 @@ function parseYamlResult(yamlText, schema) {
6001
6001
  error: `Schema validation error: ${result.error.message}`
6002
6002
  };
6003
6003
  }
6004
+ /**
6005
+ * Valid YAML escape sequences in double-quoted strings (YAML 1.2 spec).
6006
+ * Any backslash followed by a character NOT in this set is invalid.
6007
+ */
6008
+ const VALID_YAML_ESCAPES = new Set([
6009
+ "0",
6010
+ "a",
6011
+ "b",
6012
+ "t",
6013
+ " ",
6014
+ "n",
6015
+ "v",
6016
+ "f",
6017
+ "r",
6018
+ "e",
6019
+ " ",
6020
+ "\"",
6021
+ "/",
6022
+ "\\",
6023
+ "N",
6024
+ "_",
6025
+ "L",
6026
+ "P",
6027
+ "x",
6028
+ "u",
6029
+ "U"
6030
+ ]);
6031
+ /**
6032
+ * Sanitize invalid backslash escape sequences in YAML double-quoted strings.
6033
+ *
6034
+ * LLMs frequently emit invalid escapes like `\$` or `\#` inside double-quoted
6035
+ * YAML values (e.g., `vi.mock('\$lib/types/review')`). js-yaml rejects these.
6036
+ * This function strips the backslash from invalid sequences, turning `\$` → `$`.
6037
+ *
6038
+ * Only operates within double-quoted string regions to avoid corrupting
6039
+ * single-quoted strings, block scalars, or unquoted values.
6040
+ */
6041
+ function sanitizeYamlEscapes(yamlText) {
6042
+ return yamlText.replace(/"(?:[^"\\]|\\.)*"/g, (match$1) => {
6043
+ return match$1.replace(/\\(.)/g, (esc, ch) => {
6044
+ if (VALID_YAML_ESCAPES.has(ch)) return esc;
6045
+ return ch;
6046
+ });
6047
+ });
6048
+ }
6004
6049
 
6005
6050
  //#endregion
6006
6051
  //#region src/modules/agent-dispatch/dispatcher-impl.ts
@@ -12587,6 +12632,7 @@ var Categorizer = class {
12587
12632
  if (lower.includes("conversation") || lower.includes("history") || lower.includes("chat")) return "conversation_history";
12588
12633
  if (lower.includes("user") || lower.includes("human")) return "user_prompts";
12589
12634
  if (toolName !== void 0 && toolName.length > 0) return "tool_outputs";
12635
+ if (lower === "log_turn") return "conversation_history";
12590
12636
  return "other";
12591
12637
  }
12592
12638
  /**
@@ -12620,6 +12666,75 @@ var Categorizer = class {
12620
12666
  return "stable";
12621
12667
  }
12622
12668
  /**
12669
+ * Compute per-category token statistics from TurnAnalysis data (not raw spans).
12670
+ *
12671
+ * All six SemanticCategory values are always present in the result (zero-token
12672
+ * categories are included with totalTokens: 0). Results are sorted by
12673
+ * totalTokens descending.
12674
+ *
12675
+ * Trend is computed by comparing first-half vs second-half turn token attribution
12676
+ * for each category, using the same 1.2×/0.8× thresholds as computeTrend().
12677
+ *
12678
+ * @param turns - TurnAnalysis[] for the story
12679
+ */
12680
+ computeCategoryStatsFromTurns(turns) {
12681
+ if (turns.length === 0) return ALL_CATEGORIES.map((category) => ({
12682
+ category,
12683
+ totalTokens: 0,
12684
+ percentage: 0,
12685
+ eventCount: 0,
12686
+ avgTokensPerEvent: 0,
12687
+ trend: "stable"
12688
+ }));
12689
+ const grandTotal = turns.reduce((sum, t) => sum + t.inputTokens + t.outputTokens, 0);
12690
+ const buckets = new Map();
12691
+ for (const cat of ALL_CATEGORIES) buckets.set(cat, {
12692
+ total: 0,
12693
+ count: 0,
12694
+ first: 0,
12695
+ second: 0
12696
+ });
12697
+ const half = Math.floor(turns.length / 2);
12698
+ for (let i = 0; i < turns.length; i++) {
12699
+ const turn = turns[i];
12700
+ const cat = this.classify(turn.name, turn.toolName);
12701
+ const bucket = buckets.get(cat);
12702
+ const tokens = turn.inputTokens + turn.outputTokens;
12703
+ bucket.total += tokens;
12704
+ bucket.count += 1;
12705
+ if (i < half) bucket.first += tokens;
12706
+ else bucket.second += tokens;
12707
+ }
12708
+ const results = ALL_CATEGORIES.map((category) => {
12709
+ const bucket = buckets.get(category);
12710
+ const totalTokens = bucket.total;
12711
+ const eventCount = bucket.count;
12712
+ const percentage = grandTotal > 0 ? Math.round(totalTokens / grandTotal * 100 * 1e3) / 1e3 : 0;
12713
+ const avgTokensPerEvent = eventCount > 0 ? totalTokens / eventCount : 0;
12714
+ let trend = "stable";
12715
+ if (turns.length >= 2) {
12716
+ const { first, second } = bucket;
12717
+ if (first === 0 && second === 0) trend = "stable";
12718
+ else if (first === 0) trend = "growing";
12719
+ else if (second > 1.2 * first) trend = "growing";
12720
+ else if (second < .8 * first) trend = "shrinking";
12721
+ }
12722
+ return {
12723
+ category,
12724
+ totalTokens,
12725
+ percentage,
12726
+ eventCount,
12727
+ avgTokensPerEvent,
12728
+ trend
12729
+ };
12730
+ });
12731
+ this._logger.debug({
12732
+ categories: results.length,
12733
+ grandTotal
12734
+ }, "Computed category stats from turns");
12735
+ return results.sort((a, b) => b.totalTokens - a.totalTokens);
12736
+ }
12737
+ /**
12623
12738
  * Compute per-category token statistics for a complete set of spans.
12624
12739
  *
12625
12740
  * All six SemanticCategory values are always present in the result (zero-token
@@ -12749,6 +12864,54 @@ var ConsumerAnalyzer = class {
12749
12864
  return results.sort((a, b) => b.totalTokens - a.totalTokens);
12750
12865
  }
12751
12866
  /**
12867
+ * Group turns by consumer key (model|toolName), rank by totalTokens descending,
12868
+ * and return ConsumerStats for each non-zero-token group.
12869
+ *
12870
+ * @param turns - All TurnAnalysis records for the story
12871
+ */
12872
+ analyzeFromTurns(turns) {
12873
+ if (turns.length === 0) return [];
12874
+ const grandTotal = turns.reduce((sum, t) => sum + t.inputTokens + t.outputTokens, 0);
12875
+ const groups = new Map();
12876
+ for (const turn of turns) {
12877
+ const key = this._buildConsumerKeyFromTurn(turn);
12878
+ const existing = groups.get(key);
12879
+ if (existing !== void 0) existing.push(turn);
12880
+ else groups.set(key, [turn]);
12881
+ }
12882
+ const results = [];
12883
+ for (const [consumerKey, groupTurns] of groups) {
12884
+ const totalTokens = groupTurns.reduce((sum, t) => sum + t.inputTokens + t.outputTokens, 0);
12885
+ if (totalTokens === 0) continue;
12886
+ const percentage = grandTotal > 0 ? Math.round(totalTokens / grandTotal * 100 * 1e3) / 1e3 : 0;
12887
+ const eventCount = groupTurns.length;
12888
+ const firstTurn = groupTurns[0];
12889
+ const category = this._categorizer.classify(firstTurn.name, firstTurn.toolName);
12890
+ const sorted = groupTurns.slice().sort((a, b) => b.inputTokens + b.outputTokens - (a.inputTokens + a.outputTokens));
12891
+ const topInvocations = sorted.slice(0, 20).map((t) => ({
12892
+ spanId: t.spanId,
12893
+ name: t.name,
12894
+ toolName: t.toolName,
12895
+ totalTokens: t.inputTokens + t.outputTokens,
12896
+ inputTokens: t.inputTokens,
12897
+ outputTokens: t.outputTokens
12898
+ }));
12899
+ results.push({
12900
+ consumerKey,
12901
+ category,
12902
+ totalTokens,
12903
+ percentage,
12904
+ eventCount,
12905
+ topInvocations
12906
+ });
12907
+ }
12908
+ this._logger.debug({
12909
+ consumers: results.length,
12910
+ grandTotal
12911
+ }, "Computed consumer stats from turns");
12912
+ return results.sort((a, b) => b.totalTokens - a.totalTokens);
12913
+ }
12914
+ /**
12752
12915
  * Build a stable, collision-resistant consumer key from a span.
12753
12916
  * Format: `operationName|toolName` (tool part is empty string if absent).
12754
12917
  */
@@ -12758,6 +12921,15 @@ var ConsumerAnalyzer = class {
12758
12921
  return `${operationPart}|${toolPart}`;
12759
12922
  }
12760
12923
  /**
12924
+ * Build a stable consumer key from a turn.
12925
+ * Format: `model|toolName` (tool part is empty string if absent).
12926
+ */
12927
+ _buildConsumerKeyFromTurn(turn) {
12928
+ const modelPart = (turn.model ?? "unknown").slice(0, 200);
12929
+ const toolPart = (turn.toolName ?? "").slice(0, 100);
12930
+ return `${modelPart}|${toolPart}`;
12931
+ }
12932
+ /**
12761
12933
  * Extract a tool name from span attributes, checking three known attribute keys
12762
12934
  * in priority order.
12763
12935
  */
@@ -13155,6 +13327,99 @@ var TurnAnalyzer = class {
13155
13327
  }
13156
13328
  };
13157
13329
 
13330
+ //#endregion
13331
+ //#region src/modules/telemetry/log-turn-analyzer.ts
13332
+ var LogTurnAnalyzer = class {
13333
+ _logger;
13334
+ constructor(logger$27) {
13335
+ this._logger = logger$27;
13336
+ }
13337
+ /**
13338
+ * Analyze a list of NormalizedLog records and produce TurnAnalysis[].
13339
+ *
13340
+ * Returns an empty array immediately when logs is empty or on any error.
13341
+ *
13342
+ * @param logs - All log records for a story
13343
+ */
13344
+ analyze(logs) {
13345
+ try {
13346
+ if (!Array.isArray(logs) || logs.length === 0) return [];
13347
+ const validLogs = logs.filter((log$2) => log$2 != null && typeof log$2 === "object" && ((log$2.inputTokens ?? 0) > 0 || (log$2.outputTokens ?? 0) > 0));
13348
+ if (validLogs.length === 0) {
13349
+ this._logger.debug("LogTurnAnalyzer: no LLM logs with tokens to analyze");
13350
+ return [];
13351
+ }
13352
+ const grouped = new Map();
13353
+ for (const log$2 of validLogs) {
13354
+ const key = log$2.traceId != null && log$2.spanId != null ? `${log$2.traceId}:${log$2.spanId}` : log$2.logId;
13355
+ const group = grouped.get(key) ?? [];
13356
+ group.push(log$2);
13357
+ grouped.set(key, group);
13358
+ }
13359
+ const merged = [];
13360
+ for (const group of grouped.values()) {
13361
+ const sorted = [...group].sort((a, b) => a.timestamp - b.timestamp);
13362
+ const representative = sorted[0];
13363
+ let inputTokens = 0;
13364
+ let outputTokens = 0;
13365
+ let cacheReadTokens = 0;
13366
+ let costUsd = 0;
13367
+ for (const log$2 of group) {
13368
+ inputTokens += log$2.inputTokens ?? 0;
13369
+ outputTokens += log$2.outputTokens ?? 0;
13370
+ cacheReadTokens += log$2.cacheReadTokens ?? 0;
13371
+ costUsd += log$2.costUsd ?? 0;
13372
+ }
13373
+ merged.push({
13374
+ representative,
13375
+ inputTokens,
13376
+ outputTokens,
13377
+ cacheReadTokens,
13378
+ costUsd
13379
+ });
13380
+ }
13381
+ merged.sort((a, b) => a.representative.timestamp - b.representative.timestamp);
13382
+ let runningContext = 0;
13383
+ const turns = merged.map(({ representative: log$2, inputTokens, outputTokens, cacheReadTokens, costUsd }, idx) => {
13384
+ const prevContext = runningContext;
13385
+ runningContext += inputTokens;
13386
+ const freshTokens = inputTokens - cacheReadTokens;
13387
+ const cacheHitRate = inputTokens > 0 ? cacheReadTokens / inputTokens : 0;
13388
+ return {
13389
+ spanId: log$2.spanId ?? log$2.logId,
13390
+ turnNumber: idx + 1,
13391
+ name: log$2.eventName ?? "log_turn",
13392
+ timestamp: log$2.timestamp,
13393
+ source: "claude-code",
13394
+ model: log$2.model,
13395
+ inputTokens,
13396
+ outputTokens,
13397
+ cacheReadTokens,
13398
+ freshTokens,
13399
+ cacheHitRate,
13400
+ costUsd,
13401
+ durationMs: 0,
13402
+ contextSize: runningContext,
13403
+ contextDelta: runningContext - prevContext,
13404
+ toolName: log$2.toolName,
13405
+ isContextSpike: false,
13406
+ childSpans: []
13407
+ };
13408
+ });
13409
+ const avg = turns.reduce((sum, t) => sum + t.inputTokens, 0) / turns.length;
13410
+ for (const turn of turns) turn.isContextSpike = avg > 0 && turn.inputTokens > 2 * avg;
13411
+ this._logger.debug({
13412
+ turnCount: turns.length,
13413
+ avg
13414
+ }, "LogTurnAnalyzer.analyze complete");
13415
+ return turns;
13416
+ } catch (err) {
13417
+ this._logger.warn({ err }, "LogTurnAnalyzer.analyze failed — returning empty array");
13418
+ return [];
13419
+ }
13420
+ }
13421
+ };
13422
+
13158
13423
  //#endregion
13159
13424
  //#region src/modules/telemetry/cost-table.ts
13160
13425
  /**
@@ -13719,6 +13984,7 @@ const logger$6 = createLogger("telemetry:pipeline");
13719
13984
  var TelemetryPipeline = class {
13720
13985
  _normalizer;
13721
13986
  _turnAnalyzer;
13987
+ _logTurnAnalyzer;
13722
13988
  _categorizer;
13723
13989
  _consumerAnalyzer;
13724
13990
  _efficiencyScorer;
@@ -13727,6 +13993,7 @@ var TelemetryPipeline = class {
13727
13993
  constructor(deps) {
13728
13994
  this._normalizer = deps.normalizer;
13729
13995
  this._turnAnalyzer = deps.turnAnalyzer;
13996
+ this._logTurnAnalyzer = deps.logTurnAnalyzer;
13730
13997
  this._categorizer = deps.categorizer;
13731
13998
  this._consumerAnalyzer = deps.consumerAnalyzer;
13732
13999
  this._efficiencyScorer = deps.efficiencyScorer;
@@ -13736,8 +14003,14 @@ var TelemetryPipeline = class {
13736
14003
  /**
13737
14004
  * Process a batch of raw OTLP payloads through the full analysis pipeline.
13738
14005
  *
13739
- * Each payload is normalized independently. Spans are then grouped by storyKey
13740
- * for per-story analysis. Items that fail normalization are skipped with a warning.
14006
+ * Each payload is normalized independently. Spans and logs are grouped by
14007
+ * storyKey for per-story analysis. Items that fail normalization are skipped
14008
+ * with a warning.
14009
+ *
14010
+ * Dual-track analysis (Story 27-15):
14011
+ * - Span-derived turns via TurnAnalyzer
14012
+ * - Log-derived turns via LogTurnAnalyzer
14013
+ * - Merged (deduplicated by spanId) before downstream analysis
13741
14014
  */
13742
14015
  async processBatch(items) {
13743
14016
  if (items.length === 0) return;
@@ -13762,25 +14035,46 @@ var TelemetryPipeline = class {
13762
14035
  spans: allSpans.length,
13763
14036
  logs: allLogs.length
13764
14037
  }, "TelemetryPipeline: normalized batch");
13765
- if (allSpans.length === 0) {
13766
- logger$6.debug("TelemetryPipeline: no spans normalized from batch");
14038
+ if (allSpans.length === 0 && allLogs.length === 0) {
14039
+ logger$6.debug("TelemetryPipeline: no spans or logs normalized from batch");
13767
14040
  return;
13768
14041
  }
13769
- const spansByStory = new Map();
13770
14042
  const unknownStoryKey = "__unknown__";
14043
+ const spansByStory = new Map();
13771
14044
  for (const span of allSpans) {
13772
14045
  const key = span.storyKey ?? unknownStoryKey;
13773
14046
  const existing = spansByStory.get(key);
13774
14047
  if (existing !== void 0) existing.push(span);
13775
14048
  else spansByStory.set(key, [span]);
13776
14049
  }
13777
- for (const [storyKey, spans] of spansByStory) {
14050
+ const logsByStory = new Map();
14051
+ for (const log$2 of allLogs) {
14052
+ const key = log$2.storyKey ?? unknownStoryKey;
14053
+ const existing = logsByStory.get(key);
14054
+ if (existing !== void 0) existing.push(log$2);
14055
+ else logsByStory.set(key, [log$2]);
14056
+ }
14057
+ const allStoryKeys = new Set();
14058
+ for (const key of spansByStory.keys()) allStoryKeys.add(key);
14059
+ for (const key of logsByStory.keys()) allStoryKeys.add(key);
14060
+ for (const storyKey of allStoryKeys) {
13778
14061
  if (storyKey === unknownStoryKey) {
13779
- logger$6.debug({ spanCount: spans.length }, "TelemetryPipeline: spans without storyKey — skipping analysis");
14062
+ const spanCount = spansByStory.get(unknownStoryKey)?.length ?? 0;
14063
+ const logCount = logsByStory.get(unknownStoryKey)?.length ?? 0;
14064
+ logger$6.debug({
14065
+ spanCount,
14066
+ logCount
14067
+ }, "TelemetryPipeline: data without storyKey — skipping analysis");
13780
14068
  continue;
13781
14069
  }
13782
14070
  try {
13783
- await this._processStory(storyKey, spans);
14071
+ const spans = spansByStory.get(storyKey) ?? [];
14072
+ const logs = logsByStory.get(storyKey) ?? [];
14073
+ const spanTurns = spans.length > 0 ? this._turnAnalyzer.analyze(spans) : [];
14074
+ const logTurns = logs.length > 0 ? this._logTurnAnalyzer.analyze(logs) : [];
14075
+ const mergedTurns = this._mergeTurns(spanTurns, logTurns);
14076
+ if (spans.length > 0) await this._processStory(storyKey, spans, mergedTurns);
14077
+ else await this._processStoryFromTurns(storyKey, mergedTurns);
13784
14078
  } catch (err) {
13785
14079
  logger$6.warn({
13786
14080
  err,
@@ -13788,10 +14082,29 @@ var TelemetryPipeline = class {
13788
14082
  }, "TelemetryPipeline: story processing failed — skipping");
13789
14083
  }
13790
14084
  }
13791
- logger$6.debug({ storyCount: spansByStory.size }, "TelemetryPipeline.processBatch complete");
14085
+ logger$6.debug({ storyCount: allStoryKeys.size }, "TelemetryPipeline.processBatch complete");
14086
+ }
14087
+ /**
14088
+ * Merge span-derived and log-derived turns, deduplicating by spanId.
14089
+ * When a span and a log share the same spanId, the span-derived turn is preferred
14090
+ * (richer data). The merged result is sorted chronologically and renumbered.
14091
+ */
14092
+ _mergeTurns(spanTurns, logTurns) {
14093
+ if (logTurns.length === 0) return spanTurns;
14094
+ if (spanTurns.length === 0) return logTurns;
14095
+ const spanTurnIds = new Set(spanTurns.map((t) => t.spanId));
14096
+ const uniqueLogTurns = logTurns.filter((t) => !spanTurnIds.has(t.spanId));
14097
+ return [...spanTurns, ...uniqueLogTurns].sort((a, b) => a.timestamp - b.timestamp).map((t, i) => ({
14098
+ ...t,
14099
+ turnNumber: i + 1
14100
+ }));
13792
14101
  }
13793
- async _processStory(storyKey, spans) {
13794
- const turns = this._turnAnalyzer.analyze(spans);
14102
+ /**
14103
+ * Full span-based analysis path (unchanged behavior when no logs present — AC4).
14104
+ * When mergedTurns is provided, uses those instead of computing from spans alone.
14105
+ */
14106
+ async _processStory(storyKey, spans, mergedTurns) {
14107
+ const turns = mergedTurns;
13795
14108
  const categories = this._categorizer.computeCategoryStats(spans, turns);
13796
14109
  const consumers = this._consumerAnalyzer.analyze(spans);
13797
14110
  const efficiencyScore = this._efficiencyScorer.score(storyKey, turns);
@@ -13835,6 +14148,28 @@ var TelemetryPipeline = class {
13835
14148
  recommendations: recommendations.length
13836
14149
  }, "TelemetryPipeline: story analysis complete");
13837
14150
  }
14151
+ /**
14152
+ * Log-only analysis path (AC3, AC6): processes turns from LogTurnAnalyzer
14153
+ * through efficiency scoring and persistence.
14154
+ *
14155
+ * Categorizer and consumer analyzer remain span-only for now (story 27-16).
14156
+ */
14157
+ async _processStoryFromTurns(storyKey, turns) {
14158
+ if (turns.length === 0) return;
14159
+ const efficiencyScore = this._efficiencyScorer.score(storyKey, turns);
14160
+ await Promise.all([this._persistence.storeTurnAnalysis(storyKey, turns).catch((err) => logger$6.warn({
14161
+ err,
14162
+ storyKey
14163
+ }, "Failed to store turn analysis")), this._persistence.storeEfficiencyScore(efficiencyScore).catch((err) => logger$6.warn({
14164
+ err,
14165
+ storyKey
14166
+ }, "Failed to store efficiency score"))]);
14167
+ logger$6.info({
14168
+ storyKey,
14169
+ turns: turns.length,
14170
+ compositeScore: efficiencyScore.compositeScore
14171
+ }, "TelemetryPipeline: story analysis from turns complete");
14172
+ }
13838
14173
  };
13839
14174
 
13840
14175
  //#endregion
@@ -15085,13 +15420,12 @@ function createImplementationOrchestrator(deps) {
15085
15420
  }
15086
15421
  if (telemetryPersistence !== void 0) try {
15087
15422
  const turns = await telemetryPersistence.getTurnAnalysis(storyKey);
15088
- const spans = [];
15089
- if (spans.length === 0) logger$27.debug({ storyKey }, "No spans for telemetry categorization — skipping");
15423
+ if (turns.length === 0) logger$27.debug({ storyKey }, "No turn analysis data for telemetry categorization — skipping");
15090
15424
  else {
15091
15425
  const categorizer = new Categorizer(logger$27);
15092
15426
  const consumerAnalyzer = new ConsumerAnalyzer(categorizer, logger$27);
15093
- const categoryStats = categorizer.computeCategoryStats(spans, turns);
15094
- const consumerStats = consumerAnalyzer.analyze(spans);
15427
+ const categoryStats = categorizer.computeCategoryStatsFromTurns(turns);
15428
+ const consumerStats = consumerAnalyzer.analyzeFromTurns(turns);
15095
15429
  await telemetryPersistence.storeCategoryStats(storyKey, categoryStats);
15096
15430
  await telemetryPersistence.storeConsumerStats(storyKey, consumerStats);
15097
15431
  const growingCount = categoryStats.filter((c) => c.trend === "growing").length;
@@ -15572,6 +15906,7 @@ function createImplementationOrchestrator(deps) {
15572
15906
  const telemetryPipeline = new TelemetryPipeline({
15573
15907
  normalizer: new TelemetryNormalizer(pipelineLogger),
15574
15908
  turnAnalyzer: new TurnAnalyzer(pipelineLogger),
15909
+ logTurnAnalyzer: new LogTurnAnalyzer(pipelineLogger),
15575
15910
  categorizer: new Categorizer(pipelineLogger),
15576
15911
  consumerAnalyzer: new ConsumerAnalyzer(new Categorizer(pipelineLogger), pipelineLogger),
15577
15912
  efficiencyScorer: new EfficiencyScorer(pipelineLogger),
@@ -21091,4 +21426,4 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
21091
21426
 
21092
21427
  //#endregion
21093
21428
  export { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, DoltClient, DoltNotInstalled, DoltRepoMapMetaRepository, DoltSymbolRepository, FileStateStore, GitClient, GrammarLoader, IngestionServer, RepoMapInjector, RepoMapModule, RepoMapQueryEngine, RepoMapStorage, SUBSTRATE_OWNED_SETTINGS_KEYS, SymbolParser, TelemetryPersistence, VALID_PHASES, buildPipelineStatusOutput, checkDoltInstalled, createConfigSystem, createContextCompiler, createDispatcher, createDoltClient, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStateStore, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, initializeDolt, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
21094
- //# sourceMappingURL=run-B9G5NI16.js.map
21429
+ //# sourceMappingURL=run-fWZd8vvq.js.map
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "substrate-ai",
3
- "version": "0.4.5",
3
+ "version": "0.4.7",
4
4
  "description": "Substrate — multi-agent orchestration daemon for AI coding agents",
5
5
  "type": "module",
6
6
  "license": "MIT",