substrate-ai 0.2.34 → 0.2.36

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli/index.js CHANGED
@@ -1,5 +1,5 @@
1
1
  #!/usr/bin/env node
2
- import { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, buildPipelineStatusOutput, createConfigSystem, createContextCompiler, createDispatcher, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, runAnalysisPhase, runMigrations, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-XNRFAHEx.js";
2
+ import { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, buildPipelineStatusOutput, createConfigSystem, createContextCompiler, createDispatcher, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runSolutioningPhase, validateStopAfterFromConflict } from "../run-B9OkdDIk.js";
3
3
  import { createLogger } from "../logger-D2fS2ccL.js";
4
4
  import { AdapterRegistry } from "../adapter-registry-PsWhP_1Q.js";
5
5
  import { CURRENT_CONFIG_FORMAT_VERSION, CURRENT_TASK_GRAPH_VERSION, PartialSubstrateConfigSchema } from "../config-migrator-DSi8KhQC.js";
@@ -1201,12 +1201,8 @@ async function runFullPipelineFromPhase(options) {
1201
1201
  logger$14.warn({ err }, "Failed to record token usage");
1202
1202
  }
1203
1203
  });
1204
- const storyDecisions = db.prepare(`SELECT description FROM requirements WHERE pipeline_run_id = ? AND source = 'solutioning-phase'`).all(runId);
1205
- const storyKeys = [];
1206
- for (const req of storyDecisions) {
1207
- const keyMatch = /^(\d+-\d+):/.exec(req.description);
1208
- if (keyMatch) storyKeys.push(keyMatch[1]);
1209
- }
1204
+ const storyKeys = resolveStoryKeys(db, projectRoot, { pipelineRunId: runId });
1205
+ if (storyKeys.length === 0 && outputFormat === "human") process.stdout.write("[IMPLEMENTATION] No stories found for this run. Check solutioning phase output.\n");
1210
1206
  await orchestrator.run(storyKeys);
1211
1207
  if (outputFormat === "human") process.stdout.write("[IMPLEMENTATION] Complete\n");
1212
1208
  }
@@ -2585,7 +2581,7 @@ async function runSupervisorAction(options, deps = {}) {
2585
2581
  const expDb = expDbWrapper.db;
2586
2582
  const { runRunAction: runPipeline } = await import(
2587
2583
  /* @vite-ignore */
2588
- "../run-Cd7sfXzo.js"
2584
+ "../run-LStYIzcb.js"
2589
2585
  );
2590
2586
  const runStoryFn = async (opts) => {
2591
2587
  const exitCode = await runPipeline({
@@ -1,22 +1,63 @@
1
1
  <!-- substrate:start -->
2
2
  ## Substrate Pipeline
3
3
 
4
- This project uses Substrate for automated implementation pipelines.
5
-
6
- ### Quick Start
7
- - Run `substrate run --help-agent` to get full pipeline interaction instructions
8
- - Run `substrate run --events` to execute the pipeline with structured event output
9
- - Run `substrate run --events --stories 7-1,7-2` to run specific stories
10
-
11
- ### Monitoring Pipeline Runs
12
- - **DO NOT use `Task Output` to monitor substrate** — Claude Code task IDs do not map to substrate's internal processes
13
- - Monitor progress with: `substrate status --output-format json`
14
- - For real-time output: redirect stdout to a file, then tail it: `substrate run --events > /tmp/substrate-out.log 2>&1 &` then `tail -f /tmp/substrate-out.log`
15
- - Check pipeline health: `substrate health --output-format json`
16
-
17
- ### Agent Behavior
18
- - On story escalation: read the flagged files and issues, propose a fix, ask the user before applying
19
- - On minor fix verdict: offer to fix automatically
20
- - Never re-run a failed story without explicit user confirmation
21
- - After pipeline completion: summarize results conversationally (X succeeded, Y failed, Z need attention)
4
+ This project uses Substrate for automated implementation pipelines. Substrate runs are long-running (5–40 minutes). Plan accordingly.
5
+
6
+ ### Running the Pipeline
7
+
8
+ **Preferred Supervisor mode** (handles stalls, auto-restarts, post-run analysis):
9
+ ```
10
+ substrate supervisor --output-format json --stories 7-1,7-2
11
+ ```
12
+
13
+ **Direct mode** (simpler, no auto-recovery):
14
+ ```
15
+ substrate run --events --stories 7-1,7-2
16
+ ```
17
+
18
+ **CRITICAL execution rules:**
19
+ - Pipeline runs take **5–40 minutes**. You MUST use `run_in_background: true` or `timeout: 600000` (10 min) when invoking via Bash tool. Default 2-minute timeout WILL kill the pipeline.
20
+ - **NEVER pipe substrate output** to `head`, `tail`, `grep`, or any command that may close the pipe early — this causes EPIPE stalls that hang the process.
21
+ - **DO NOT use `Task Output`** to monitor substrate Claude Code task IDs do not map to substrate's internal processes.
22
+ - For full event protocol and command reference: `substrate run --help-agent`
23
+
24
+ ### Monitoring (while pipeline is running)
25
+
26
+ Poll status periodically (every 60–90s):
27
+ ```
28
+ substrate status --output-format json
29
+ ```
30
+
31
+ Check process health if pipeline seems quiet:
32
+ ```
33
+ substrate health --output-format json
34
+ ```
35
+
36
+ **Interpreting silence:** No output for 5 minutes = normal (agent is working). No output for 15+ minutes = likely stalled. Use `substrate health` to confirm, then consider killing and resuming.
37
+
38
+ ### After Pipeline Completes
39
+
40
+ 1. **Summarize results** conversationally: X succeeded, Y failed, Z escalated
41
+ 2. **Check metrics**: `substrate metrics --output-format json`
42
+ 3. **Read analysis** (if supervisor mode): `substrate metrics --analysis <run_id> --output-format json`
43
+
44
+ ### Handling Escalations and Failures
45
+
46
+ - **On story escalation**: read the flagged files and issues listed in the escalation event, propose a fix, ask the user before applying
47
+ - **On minor fix verdict** (`NEEDS_MINOR_FIXES`): offer to fix automatically
48
+ - **On build verification failure**: read the build output, diagnose the compiler error, propose a fix
49
+ - **On contract mismatch** (`pipeline:contract-mismatch`): cross-story interface conflict — read both stories' files, reconcile types manually
50
+ - **Never re-run a failed story** without explicit user confirmation
51
+
52
+ ### Key Commands Reference
53
+
54
+ | Command | Purpose |
55
+ |---|---|
56
+ | `substrate run --events` | Run pipeline with NDJSON event stream |
57
+ | `substrate supervisor --output-format json` | Run with auto-recovery and analysis |
58
+ | `substrate status --output-format json` | Poll current pipeline state |
59
+ | `substrate health --output-format json` | Check process health and stall detection |
60
+ | `substrate metrics --output-format json` | View historical run metrics |
61
+ | `substrate resume` | Resume an interrupted pipeline run |
62
+ | `substrate run --help-agent` | Full agent instruction reference (487 lines) |
22
63
  <!-- substrate:end -->
@@ -9404,6 +9404,44 @@ function createImplementationOrchestrator(deps) {
9404
9404
  //#endregion
9405
9405
  //#region src/modules/implementation-orchestrator/story-discovery.ts
9406
9406
  /**
9407
+ * Unified story key resolution with a 4-level fallback chain.
9408
+ *
9409
+ * 1. Explicit keys (from --stories flag) — returned as-is
9410
+ * 2. Decisions table (category='stories', phase='solutioning')
9411
+ * 3. Epic shard decisions (category='epic-shard') — parsed with parseStoryKeysFromEpics
9412
+ * 4. epics.md file on disk (via discoverPendingStoryKeys)
9413
+ *
9414
+ * Optionally filters out completed stories when filterCompleted is set.
9415
+ *
9416
+ * @returns Sorted, deduplicated array of story keys in "N-M" format
9417
+ */
9418
+ function resolveStoryKeys(db, projectRoot, opts) {
9419
+ if (opts?.explicit !== void 0 && opts.explicit.length > 0) return opts.explicit;
9420
+ let keys = [];
9421
+ try {
9422
+ const query = opts?.pipelineRunId !== void 0 ? `SELECT key FROM decisions WHERE phase = 'solutioning' AND category = 'stories' AND pipeline_run_id = ? ORDER BY created_at ASC` : `SELECT key FROM decisions WHERE phase = 'solutioning' AND category = 'stories' ORDER BY created_at ASC`;
9423
+ const params = opts?.pipelineRunId !== void 0 ? [opts.pipelineRunId] : [];
9424
+ const rows = db.prepare(query).all(...params);
9425
+ for (const row of rows) if (/^\d+-\d+/.test(row.key)) {
9426
+ const match = /^(\d+-\d+)/.exec(row.key);
9427
+ if (match !== null) keys.push(match[1]);
9428
+ }
9429
+ } catch {}
9430
+ if (keys.length === 0) try {
9431
+ const query = opts?.pipelineRunId !== void 0 ? `SELECT value FROM decisions WHERE category = 'epic-shard' AND pipeline_run_id = ? ORDER BY created_at ASC` : `SELECT value FROM decisions WHERE category = 'epic-shard' ORDER BY created_at ASC`;
9432
+ const params = opts?.pipelineRunId !== void 0 ? [opts.pipelineRunId] : [];
9433
+ const shardRows = db.prepare(query).all(...params);
9434
+ const allContent = shardRows.map((r) => r.value).join("\n");
9435
+ if (allContent.length > 0) keys = parseStoryKeysFromEpics(allContent);
9436
+ } catch {}
9437
+ if (keys.length === 0) keys = discoverPendingStoryKeys(projectRoot);
9438
+ if (opts?.filterCompleted === true && keys.length > 0) {
9439
+ const completedKeys = getCompletedStoryKeys(db);
9440
+ keys = keys.filter((k) => !completedKeys.has(k));
9441
+ }
9442
+ return sortStoryKeys([...new Set(keys)]);
9443
+ }
9444
+ /**
9407
9445
  * Extract all story keys (N-M format) from epics.md content.
9408
9446
  *
9409
9447
  * Supports three extraction patterns found in real epics.md files:
@@ -9490,6 +9528,24 @@ function collectExistingStoryKeys(projectRoot) {
9490
9528
  return existing;
9491
9529
  }
9492
9530
  /**
9531
+ * Collect story keys already completed in previous pipeline runs.
9532
+ * Scans pipeline_runs with status='completed' and extracts story keys
9533
+ * with phase='COMPLETE' from their token_usage_json state.
9534
+ */
9535
+ function getCompletedStoryKeys(db) {
9536
+ const completed = new Set();
9537
+ try {
9538
+ const rows = db.prepare(`SELECT token_usage_json FROM pipeline_runs WHERE status = 'completed' AND token_usage_json IS NOT NULL`).all();
9539
+ for (const row of rows) try {
9540
+ const state = JSON.parse(row.token_usage_json);
9541
+ if (state.stories !== void 0) {
9542
+ for (const [key, s] of Object.entries(state.stories)) if (s.phase === "COMPLETE") completed.add(key);
9543
+ }
9544
+ } catch {}
9545
+ } catch {}
9546
+ return completed;
9547
+ }
9548
+ /**
9493
9549
  * Sort story keys numerically by epic number (primary) then story number (secondary).
9494
9550
  * E.g. ["10-1", "1-2", "2-1"] → ["1-2", "2-1", "10-1"]
9495
9551
  */
@@ -12341,7 +12397,7 @@ async function runStoryGeneration(deps, params, gapAnalysis) {
12341
12397
  pipeline_run_id: runId,
12342
12398
  source: "solutioning-phase",
12343
12399
  type: "functional",
12344
- description: `${story.title}: ${story.description}`,
12400
+ description: `${story.key}: ${story.title}: ${story.description}`,
12345
12401
  priority: story.priority
12346
12402
  });
12347
12403
  const totalStories = epics.reduce((sum, epic) => sum + epic.stories.length, 0);
@@ -12696,7 +12752,7 @@ async function runStoryGenerationMultiStep(deps, params) {
12696
12752
  pipeline_run_id: params.runId,
12697
12753
  source: "solutioning-phase",
12698
12754
  type: "functional",
12699
- description: `${story.title}: ${story.description}`,
12755
+ description: `${story.key}: ${story.title}: ${story.description}`,
12700
12756
  priority: story.priority
12701
12757
  });
12702
12758
  const artifactId = storyStep?.artifactId ?? "";
@@ -13494,6 +13550,16 @@ async function runRunAction(options) {
13494
13550
  } catch {
13495
13551
  logger.debug("Config loading skipped — using default token ceilings");
13496
13552
  }
13553
+ let parsedStoryKeys = [];
13554
+ if (storiesArg !== void 0 && storiesArg !== "") {
13555
+ parsedStoryKeys = storiesArg.split(",").map((k) => k.trim()).filter((k) => k.length > 0);
13556
+ for (const key of parsedStoryKeys) if (!validateStoryKey(key)) {
13557
+ const errorMsg = `Story key '${key}' is not a valid format. Expected: <epic>-<story> (e.g., 10-1)`;
13558
+ if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
13559
+ else process.stderr.write(`Error: ${errorMsg}\n`);
13560
+ return 1;
13561
+ }
13562
+ }
13497
13563
  if (startPhase !== void 0) return runFullPipeline({
13498
13564
  packName,
13499
13565
  packPath,
@@ -13506,6 +13572,7 @@ async function runRunAction(options) {
13506
13572
  outputFormat,
13507
13573
  projectRoot,
13508
13574
  tokenCeilings,
13575
+ ...parsedStoryKeys.length > 0 ? { stories: parsedStoryKeys } : {},
13509
13576
  ...eventsFlag === true ? { events: true } : {},
13510
13577
  ...skipUx === true ? { skipUx: true } : {},
13511
13578
  ...researchFlag === true ? { research: true } : {},
@@ -13513,16 +13580,7 @@ async function runRunAction(options) {
13513
13580
  ...skipPreflight === true ? { skipPreflight: true } : {},
13514
13581
  ...injectedRegistry !== void 0 ? { registry: injectedRegistry } : {}
13515
13582
  });
13516
- let storyKeys = [];
13517
- if (storiesArg !== void 0 && storiesArg !== "") {
13518
- storyKeys = storiesArg.split(",").map((k) => k.trim()).filter((k) => k.length > 0);
13519
- for (const key of storyKeys) if (!validateStoryKey(key)) {
13520
- const errorMsg = `Story key '${key}' is not a valid format. Expected: <epic>-<story> (e.g., 10-1)`;
13521
- if (outputFormat === "json") process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
13522
- else process.stderr.write(`Error: ${errorMsg}\n`);
13523
- return 1;
13524
- }
13525
- }
13583
+ let storyKeys = [...parsedStoryKeys];
13526
13584
  if (!existsSync(dbDir)) mkdirSync(dbDir, { recursive: true });
13527
13585
  const dbWrapper = new DatabaseWrapper(dbPath);
13528
13586
  try {
@@ -14029,7 +14087,7 @@ async function runRunAction(options) {
14029
14087
  }
14030
14088
  }
14031
14089
  async function runFullPipeline(options) {
14032
- const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, skipPreflight, registry: injectedRegistry, tokenCeilings } = options;
14090
+ const { packName, packPath, dbDir, dbPath, startPhase, stopAfter, concept, concurrency, outputFormat, projectRoot, events: eventsFlag, skipUx, research: researchFlag, skipResearch: skipResearchFlag, skipPreflight, registry: injectedRegistry, tokenCeilings, stories: explicitStories } = options;
14033
14091
  if (!existsSync(dbDir)) mkdirSync(dbDir, { recursive: true });
14034
14092
  const dbWrapper = new DatabaseWrapper(dbPath);
14035
14093
  try {
@@ -14267,12 +14325,8 @@ async function runFullPipeline(options) {
14267
14325
  process.stdout.write(` [ESCALATED] ${payload.storyKey}: ${payload.lastVerdict}\n`);
14268
14326
  });
14269
14327
  }
14270
- const storyDecisions = db.prepare(`SELECT description FROM requirements WHERE status = 'active' AND source = 'solutioning-phase'`).all();
14271
- const storyKeys = [];
14272
- for (const req of storyDecisions) {
14273
- const keyMatch = /^(\d+-\d+):/.exec(req.description);
14274
- if (keyMatch) storyKeys.push(keyMatch[1]);
14275
- }
14328
+ const storyKeys = resolveStoryKeys(db, projectRoot, { explicit: explicitStories });
14329
+ if (storyKeys.length === 0 && outputFormat === "human") process.stdout.write("[IMPLEMENTATION] No stories found. Run solutioning first or pass --stories.\n");
14276
14330
  if (outputFormat === "human") process.stdout.write(`[IMPLEMENTATION] Starting ${storyKeys.length} stories with concurrency=${concurrency}\n`);
14277
14331
  await orchestrator.run(storyKeys);
14278
14332
  if (outputFormat === "human") process.stdout.write("[IMPLEMENTATION] Complete\n");
@@ -14376,5 +14430,5 @@ function registerRunCommand(program, _version = "0.0.0", projectRoot = process.c
14376
14430
  }
14377
14431
 
14378
14432
  //#endregion
14379
- export { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, buildPipelineStatusOutput, createConfigSystem, createContextCompiler, createDispatcher, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
14380
- //# sourceMappingURL=run-XNRFAHEx.js.map
14433
+ export { DEFAULT_CONFIG, DEFAULT_ROUTING_POLICY, DatabaseWrapper, SUBSTRATE_OWNED_SETTINGS_KEYS, VALID_PHASES, buildPipelineStatusOutput, createConfigSystem, createContextCompiler, createDispatcher, createImplementationOrchestrator, createPackLoader, createPhaseOrchestrator, createStopAfterGate, findPackageRoot, formatOutput, formatPhaseCompletionSummary, formatPipelineStatusHuman, formatPipelineSummary, formatTokenTelemetry, getAllDescendantPids, getAutoHealthData, getSubstrateDefaultSettings, parseDbTimestampAsUtc, registerHealthCommand, registerRunCommand, resolveBmadMethodSrcPath, resolveBmadMethodVersion, resolveMainRepoRoot, resolveStoryKeys, runAnalysisPhase, runMigrations, runPlanningPhase, runRunAction, runSolutioningPhase, validateStopAfterFromConflict };
14434
+ //# sourceMappingURL=run-B9OkdDIk.js.map
@@ -1,4 +1,4 @@
1
- import { registerRunCommand, runRunAction } from "./run-XNRFAHEx.js";
1
+ import { registerRunCommand, runRunAction } from "./run-B9OkdDIk.js";
2
2
  import "./logger-D2fS2ccL.js";
3
3
  import "./config-migrator-DSi8KhQC.js";
4
4
  import "./helpers-RL22dYtn.js";
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "substrate-ai",
3
- "version": "0.2.34",
3
+ "version": "0.2.36",
4
4
  "description": "Substrate — multi-agent orchestration daemon for AI coding agents",
5
5
  "type": "module",
6
6
  "license": "MIT",