substrate-ai 0.1.18 → 0.1.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -4,9 +4,9 @@
4
4
 
5
5
  # Substrate
6
6
 
7
- Autonomous software development pipeline powered by multi-agent orchestration. Substrate takes a project idea from concept through analysis, planning, architecture, implementation, and code review — coordinating CLI-based AI agents (Claude Code, Codex, Gemini CLI) to do the work.
7
+ Substrate is an autonomous software development pipeline. Describe your project in plain language and Substrate handles the rest — coordinating multiple AI coding agents (Claude Code, Codex, Gemini CLI) working in parallel across isolated branches to take your idea from concept through implementation and code review.
8
8
 
9
- Substrate follows a modular monolith pattern running as a single Node.js process. The orchestrator never calls LLMs directly all intelligent work is delegated to CLI agents running as child processes in isolated git worktrees. The autonomous pipeline compiles BMAD methodology workflows into token-efficient agent dispatches.
9
+ Unlike API-based orchestrators, Substrate routes work through the CLI tools you already have installed, maximizing your existing AI subscriptions before falling back to pay-per-token billing. Runs are persistent and resumable no lost work, no re-execution waste, full cost visibility across every provider.
10
10
 
11
11
  ## Prerequisites
12
12
 
package/dist/cli/index.js CHANGED
@@ -7214,6 +7214,13 @@ const DEFAULT_TIMEOUTS = {
7214
7214
  * Only defined for task types that benefit from explicit turn budgets.
7215
7215
  */
7216
7216
  const DEFAULT_MAX_TURNS = {
7217
+ "analysis": 15,
7218
+ "planning": 20,
7219
+ "architecture": 25,
7220
+ "story-generation": 30,
7221
+ "readiness-check": 20,
7222
+ "elicitation": 15,
7223
+ "critique": 15,
7217
7224
  "dev-story": 75,
7218
7225
  "major-rework": 50,
7219
7226
  "code-review": 25,
@@ -7900,6 +7907,23 @@ function createDecision(db, input) {
7900
7907
  return row;
7901
7908
  }
7902
7909
  /**
7910
+ * Insert or update a decision record.
7911
+ * If a decision with the same pipeline_run_id, category, and key already exists,
7912
+ * update its value and rationale. Otherwise, insert a new record.
7913
+ */
7914
+ function upsertDecision(db, input) {
7915
+ const validated = CreateDecisionInputSchema.parse(input);
7916
+ const existing = db.prepare("SELECT * FROM decisions WHERE pipeline_run_id = ? AND category = ? AND key = ? LIMIT 1").get(validated.pipeline_run_id ?? null, validated.category, validated.key);
7917
+ if (existing) {
7918
+ updateDecision(db, existing.id, {
7919
+ value: validated.value,
7920
+ rationale: validated.rationale ?? void 0
7921
+ });
7922
+ return db.prepare("SELECT * FROM decisions WHERE id = ?").get(existing.id);
7923
+ }
7924
+ return createDecision(db, input);
7925
+ }
7926
+ /**
7903
7927
  * Get all decisions for a given phase, ordered by created_at ascending.
7904
7928
  */
7905
7929
  function getDecisionsByPhase(db, phase) {
@@ -7915,6 +7939,26 @@ function getDecisionsByPhaseForRun(db, runId, phase) {
7915
7939
  return stmt.all(runId, phase);
7916
7940
  }
7917
7941
  /**
7942
+ * Update a decision's value and/or rationale and set updated_at.
7943
+ */
7944
+ function updateDecision(db, id, updates) {
7945
+ const setClauses = [];
7946
+ const values = [];
7947
+ if (updates.value !== void 0) {
7948
+ setClauses.push("value = ?");
7949
+ values.push(updates.value);
7950
+ }
7951
+ if (updates.rationale !== void 0) {
7952
+ setClauses.push("rationale = ?");
7953
+ values.push(updates.rationale);
7954
+ }
7955
+ if (setClauses.length === 0) return;
7956
+ setClauses.push("updated_at = datetime('now')");
7957
+ values.push(id);
7958
+ const stmt = db.prepare(`UPDATE decisions SET ${setClauses.join(", ")} WHERE id = ?`);
7959
+ stmt.run(...values);
7960
+ }
7961
+ /**
7918
7962
  * Insert a new requirement with status = 'active'.
7919
7963
  */
7920
7964
  function createRequirement(db, input) {
@@ -11835,12 +11879,14 @@ function createQualityGate(config) {
11835
11879
 
11836
11880
  //#endregion
11837
11881
  //#region src/modules/phase-orchestrator/phases/solutioning.ts
11838
- /** Maximum total prompt token budget for architecture generation (3,000 tokens × 4 chars = 12,000 chars) */
11839
- const MAX_ARCH_PROMPT_TOKENS = 3e3;
11840
- const MAX_ARCH_PROMPT_CHARS = MAX_ARCH_PROMPT_TOKENS * 4;
11841
- /** Maximum total prompt token budget for story generation (4,000 tokens × 4 chars = 16,000 chars) */
11842
- const MAX_STORY_PROMPT_TOKENS = 4e3;
11843
- const MAX_STORY_PROMPT_CHARS = MAX_STORY_PROMPT_TOKENS * 4;
11882
+ /** Base token budget for architecture generation (covers template + requirements) */
11883
+ const BASE_ARCH_PROMPT_TOKENS = 3e3;
11884
+ /** Base token budget for story generation (covers template + requirements + architecture) */
11885
+ const BASE_STORY_PROMPT_TOKENS = 4e3;
11886
+ /** Additional tokens per architecture decision injected into story generation prompt */
11887
+ const TOKENS_PER_DECISION = 100;
11888
+ /** Absolute maximum prompt tokens (model context safety margin) */
11889
+ const ABSOLUTE_MAX_PROMPT_TOKENS = 12e3;
11844
11890
  /** Placeholder in architecture prompt template */
11845
11891
  const REQUIREMENTS_PLACEHOLDER = "{{requirements}}";
11846
11892
  /** Amendment context framing block prefix */
@@ -11854,6 +11900,58 @@ const STORY_REQUIREMENTS_PLACEHOLDER = "{{requirements}}";
11854
11900
  const STORY_ARCHITECTURE_PLACEHOLDER = "{{architecture_decisions}}";
11855
11901
  /** Gap analysis placeholder used in retry prompt */
11856
11902
  const GAP_ANALYSIS_PLACEHOLDER = "{{gap_analysis}}";
11903
+ /** Priority order for decision categories when summarizing (higher priority kept first) */
11904
+ const DECISION_CATEGORY_PRIORITY = [
11905
+ "data",
11906
+ "auth",
11907
+ "api",
11908
+ "frontend",
11909
+ "infra",
11910
+ "observability",
11911
+ "ci"
11912
+ ];
11913
+ /**
11914
+ * Calculate the dynamic prompt token budget based on the number of decisions
11915
+ * that will be injected into the prompt.
11916
+ *
11917
+ * @param baseBudget - Base token budget for the phase
11918
+ * @param decisionCount - Number of decisions to inject
11919
+ * @returns Calculated token budget, capped at ABSOLUTE_MAX_PROMPT_TOKENS
11920
+ */
11921
+ function calculateDynamicBudget(baseBudget, decisionCount) {
11922
+ const budget = baseBudget + decisionCount * TOKENS_PER_DECISION;
11923
+ return Math.min(budget, ABSOLUTE_MAX_PROMPT_TOKENS);
11924
+ }
11925
+ /**
11926
+ * Summarize architecture decisions into compact key:value one-liners,
11927
+ * dropping rationale and optionally dropping lower-priority categories
11928
+ * to fit within a character budget.
11929
+ *
11930
+ * @param decisions - Full architecture decisions from the decision store
11931
+ * @param maxChars - Maximum character budget for the summarized output
11932
+ * @returns Compact summary string
11933
+ */
11934
+ function summarizeDecisions(decisions, maxChars) {
11935
+ const sorted = [...decisions].sort((a, b) => {
11936
+ const aCat = (a.category ?? "").toLowerCase();
11937
+ const bCat = (b.category ?? "").toLowerCase();
11938
+ const aIdx = DECISION_CATEGORY_PRIORITY.indexOf(aCat);
11939
+ const bIdx = DECISION_CATEGORY_PRIORITY.indexOf(bCat);
11940
+ const aPri = aIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : aIdx;
11941
+ const bPri = bIdx === -1 ? DECISION_CATEGORY_PRIORITY.length : bIdx;
11942
+ return aPri - bPri;
11943
+ });
11944
+ const lines = ["## Architecture Decisions (Summarized)"];
11945
+ let currentLength = lines[0].length;
11946
+ for (const d of sorted) {
11947
+ const truncatedValue = d.value.length > 120 ? d.value.slice(0, 117) + "..." : d.value;
11948
+ const line = `- ${d.key}: ${truncatedValue}`;
11949
+ if (currentLength + line.length + 1 > maxChars) break;
11950
+ lines.push(line);
11951
+ currentLength += line.length + 1;
11952
+ }
11953
+ return lines.join("\n");
11954
+ }
11857
11955
  /**
11858
11956
  * Format functional and non-functional requirements from the planning phase
11859
11957
  * into a compact text block suitable for prompt injection.
@@ -11932,17 +12030,19 @@ async function runArchitectureGeneration(deps, params) {
11932
12030
  const template = await pack.getPrompt("architecture");
11933
12031
  const formattedRequirements = formatRequirements(db, runId);
11934
12032
  let prompt = template.replace(REQUIREMENTS_PLACEHOLDER, formattedRequirements);
12033
+ const dynamicBudgetTokens = calculateDynamicBudget(BASE_ARCH_PROMPT_TOKENS, 0);
12034
+ const dynamicBudgetChars = dynamicBudgetTokens * 4;
11935
12035
  if (amendmentContext !== void 0 && amendmentContext !== "") {
11936
12036
  const framingLen = AMENDMENT_CONTEXT_HEADER.length + AMENDMENT_CONTEXT_FOOTER.length;
11937
- const availableForContext = MAX_ARCH_PROMPT_CHARS - prompt.length - framingLen - TRUNCATED_MARKER.length;
12037
+ const availableForContext = dynamicBudgetChars - prompt.length - framingLen - TRUNCATED_MARKER.length;
11938
12038
  let contextToInject = amendmentContext;
11939
12039
  if (availableForContext <= 0) contextToInject = "";
11940
12040
  else if (amendmentContext.length > availableForContext) contextToInject = amendmentContext.slice(0, availableForContext) + TRUNCATED_MARKER;
11941
12041
  if (contextToInject !== "") prompt += AMENDMENT_CONTEXT_HEADER + contextToInject + AMENDMENT_CONTEXT_FOOTER;
11942
12042
  }
11943
12043
  const estimatedTokens = Math.ceil(prompt.length / 4);
11944
- if (estimatedTokens > MAX_ARCH_PROMPT_TOKENS) return {
11945
- error: `Architecture prompt exceeds token budget: ${estimatedTokens} tokens (max ${MAX_ARCH_PROMPT_TOKENS})`,
12044
+ if (estimatedTokens > dynamicBudgetTokens) return {
12045
+ error: `Architecture prompt exceeds token budget: ${estimatedTokens} tokens (max ${dynamicBudgetTokens})`,
11946
12046
  tokenUsage: zeroTokenUsage
11947
12047
  };
11948
12048
  const handle = dispatcher.dispatch({
@@ -11974,7 +12074,7 @@ async function runArchitectureGeneration(deps, params) {
11974
12074
  tokenUsage
11975
12075
  };
11976
12076
  const decisions = parsed.architecture_decisions;
11977
- for (const decision of decisions) createDecision(db, {
12077
+ for (const decision of decisions) upsertDecision(db, {
11978
12078
  pipeline_run_id: runId,
11979
12079
  phase: "solutioning",
11980
12080
  category: "architecture",
@@ -12017,20 +12117,34 @@ async function runStoryGeneration(deps, params, gapAnalysis) {
12017
12117
  };
12018
12118
  const template = await pack.getPrompt("story-generation");
12019
12119
  const formattedRequirements = formatRequirements(db, runId);
12020
- const formattedArchitecture = formatArchitectureDecisions(db, runId);
12120
+ const archDecisions = getDecisionsByPhaseForRun(db, runId, "solutioning").filter((d) => d.category === "architecture");
12121
+ const dynamicBudgetTokens = calculateDynamicBudget(BASE_STORY_PROMPT_TOKENS, archDecisions.length);
12122
+ const dynamicBudgetChars = dynamicBudgetTokens * 4;
12123
+ let formattedArchitecture = formatArchitectureDecisions(db, runId);
12021
12124
  let prompt = template.replace(STORY_REQUIREMENTS_PLACEHOLDER, formattedRequirements).replace(STORY_ARCHITECTURE_PLACEHOLDER, formattedArchitecture);
12022
12125
  if (gapAnalysis !== void 0) prompt = prompt.replace(GAP_ANALYSIS_PLACEHOLDER, gapAnalysis);
12023
12126
  if (amendmentContext !== void 0 && amendmentContext !== "") {
12024
12127
  const framingLen = AMENDMENT_CONTEXT_HEADER.length + AMENDMENT_CONTEXT_FOOTER.length;
12025
- const availableForContext = MAX_STORY_PROMPT_CHARS - prompt.length - framingLen - TRUNCATED_MARKER.length;
12128
+ const availableForContext = dynamicBudgetChars - prompt.length - framingLen - TRUNCATED_MARKER.length;
12026
12129
  let contextToInject = amendmentContext;
12027
12130
  if (availableForContext <= 0) contextToInject = "";
12028
12131
  else if (amendmentContext.length > availableForContext) contextToInject = amendmentContext.slice(0, availableForContext) + TRUNCATED_MARKER;
12029
12132
  if (contextToInject !== "") prompt += AMENDMENT_CONTEXT_HEADER + contextToInject + AMENDMENT_CONTEXT_FOOTER;
12030
12133
  }
12031
- const estimatedTokens = Math.ceil(prompt.length / 4);
12032
- if (estimatedTokens > MAX_STORY_PROMPT_TOKENS) return {
12033
- error: `Story generation prompt exceeds token budget: ${estimatedTokens} tokens (max ${MAX_STORY_PROMPT_TOKENS})`,
12134
+ let estimatedTokens = Math.ceil(prompt.length / 4);
12135
+ if (estimatedTokens > dynamicBudgetTokens) {
12136
+ const availableForDecisions = dynamicBudgetChars - (prompt.length - formattedArchitecture.length);
12137
+ formattedArchitecture = summarizeDecisions(archDecisions.map((d) => ({
12138
+ key: d.key,
12139
+ value: d.value,
12140
+ category: d.category
12141
+ })), Math.max(availableForDecisions, 200));
12142
+ prompt = template.replace(STORY_REQUIREMENTS_PLACEHOLDER, formattedRequirements).replace(STORY_ARCHITECTURE_PLACEHOLDER, formattedArchitecture);
12143
+ if (gapAnalysis !== void 0) prompt = prompt.replace(GAP_ANALYSIS_PLACEHOLDER, gapAnalysis);
12144
+ estimatedTokens = Math.ceil(prompt.length / 4);
12145
+ }
12146
+ if (estimatedTokens > dynamicBudgetTokens) return {
12147
+ error: `Story generation prompt exceeds token budget: ${estimatedTokens} tokens (max ${dynamicBudgetTokens})`,
12034
12148
  tokenUsage: zeroTokenUsage
12035
12149
  };
12036
12150
  const handle = dispatcher.dispatch({
@@ -12063,7 +12177,7 @@ async function runStoryGeneration(deps, params, gapAnalysis) {
12063
12177
  };
12064
12178
  const epics = parsed.epics;
12065
12179
  for (const [epicIndex, epic] of epics.entries()) {
12066
- createDecision(db, {
12180
+ upsertDecision(db, {
12067
12181
  pipeline_run_id: runId,
12068
12182
  phase: "solutioning",
12069
12183
  category: "epics",
@@ -12073,7 +12187,7 @@ async function runStoryGeneration(deps, params, gapAnalysis) {
12073
12187
  description: epic.description
12074
12188
  })
12075
12189
  });
12076
- for (const story of epic.stories) createDecision(db, {
12190
+ for (const story of epic.stories) upsertDecision(db, {
12077
12191
  pipeline_run_id: runId,
12078
12192
  phase: "solutioning",
12079
12193
  category: "stories",
@@ -12200,7 +12314,23 @@ async function runSolutioningPhase(deps, params) {
12200
12314
  let totalInput = 0;
12201
12315
  let totalOutput = 0;
12202
12316
  try {
12203
- const archResult = await runArchitectureGeneration(deps, params);
12317
+ const existingArchArtifact = getArtifactByTypeForRun(deps.db, params.runId, "solutioning", "architecture");
12318
+ let archResult;
12319
+ if (existingArchArtifact) {
12320
+ const existingDecisions = getDecisionsByPhaseForRun(deps.db, params.runId, "solutioning").filter((d) => d.category === "architecture");
12321
+ archResult = {
12322
+ decisions: existingDecisions.map((d) => ({
12323
+ key: d.key,
12324
+ value: d.value,
12325
+ rationale: d.rationale ?? ""
12326
+ })),
12327
+ artifactId: existingArchArtifact.id,
12328
+ tokenUsage: {
12329
+ input: 0,
12330
+ output: 0
12331
+ }
12332
+ };
12333
+ } else archResult = await runArchitectureGeneration(deps, params);
12204
12334
  totalInput += archResult.tokenUsage.input;
12205
12335
  totalOutput += archResult.tokenUsage.output;
12206
12336
  if ("error" in archResult) return {
@@ -13824,6 +13954,7 @@ async function runFullPipeline(options) {
13824
13954
  });
13825
13955
  }
13826
13956
  if (result.result === "failed") {
13957
+ updatePipelineRun(db, runId, { status: "failed" });
13827
13958
  const errorMsg = `Analysis phase failed: ${result.error ?? "unknown error"}${result.details ? ` — ${result.details}` : ""}`;
13828
13959
  if (outputFormat === "human") process.stderr.write(`Error: ${errorMsg}\n`);
13829
13960
  else process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
@@ -13846,6 +13977,7 @@ async function runFullPipeline(options) {
13846
13977
  });
13847
13978
  }
13848
13979
  if (result.result === "failed") {
13980
+ updatePipelineRun(db, runId, { status: "failed" });
13849
13981
  const errorMsg = `Planning phase failed: ${result.error ?? "unknown error"}${result.details ? ` — ${result.details}` : ""}`;
13850
13982
  if (outputFormat === "human") process.stderr.write(`Error: ${errorMsg}\n`);
13851
13983
  else process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
@@ -13868,6 +14000,7 @@ async function runFullPipeline(options) {
13868
14000
  });
13869
14001
  }
13870
14002
  if (result.result === "failed") {
14003
+ updatePipelineRun(db, runId, { status: "failed" });
13871
14004
  const errorMsg = `Solutioning phase failed: ${result.error ?? "unknown error"}${result.details ? ` — ${result.details}` : ""}`;
13872
14005
  if (outputFormat === "human") process.stderr.write(`Error: ${errorMsg}\n`);
13873
14006
  else process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
@@ -14141,6 +14274,7 @@ async function runFullPipelineFromPhase(options) {
14141
14274
  });
14142
14275
  }
14143
14276
  if (result.result === "failed") {
14277
+ updatePipelineRun(db, runId, { status: "failed" });
14144
14278
  const errorMsg = `Analysis phase failed: ${result.error ?? "unknown error"}`;
14145
14279
  if (outputFormat === "human") process.stderr.write(`Error: ${errorMsg}\n`);
14146
14280
  else process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
@@ -14160,6 +14294,7 @@ async function runFullPipelineFromPhase(options) {
14160
14294
  });
14161
14295
  }
14162
14296
  if (result.result === "failed") {
14297
+ updatePipelineRun(db, runId, { status: "failed" });
14163
14298
  const errorMsg = `Planning phase failed: ${result.error ?? "unknown error"}`;
14164
14299
  if (outputFormat === "human") process.stderr.write(`Error: ${errorMsg}\n`);
14165
14300
  else process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
@@ -14179,6 +14314,7 @@ async function runFullPipelineFromPhase(options) {
14179
14314
  });
14180
14315
  }
14181
14316
  if (result.result === "failed") {
14317
+ updatePipelineRun(db, runId, { status: "failed" });
14182
14318
  const errorMsg = `Solutioning phase failed: ${result.error ?? "unknown error"}`;
14183
14319
  if (outputFormat === "human") process.stderr.write(`Error: ${errorMsg}\n`);
14184
14320
  else process.stdout.write(formatOutput(null, "json", false, errorMsg) + "\n");
@@ -14552,6 +14688,7 @@ async function runAmendCommand(options) {
14552
14688
  });
14553
14689
  }
14554
14690
  if (result.result === "failed") {
14691
+ updatePipelineRun(db, amendmentRunId, { status: "failed" });
14555
14692
  process.stderr.write(`Error: Analysis phase failed: ${result.error ?? "unknown error"}${result.details ? ` — ${result.details}` : ""}\n`);
14556
14693
  return 1;
14557
14694
  }
@@ -14573,6 +14710,7 @@ async function runAmendCommand(options) {
14573
14710
  });
14574
14711
  }
14575
14712
  if (result.result === "failed") {
14713
+ updatePipelineRun(db, amendmentRunId, { status: "failed" });
14576
14714
  process.stderr.write(`Error: Planning phase failed: ${result.error ?? "unknown error"}${result.details ? ` — ${result.details}` : ""}\n`);
14577
14715
  return 1;
14578
14716
  }
@@ -14594,6 +14732,7 @@ async function runAmendCommand(options) {
14594
14732
  });
14595
14733
  }
14596
14734
  if (result.result === "failed") {
14735
+ updatePipelineRun(db, amendmentRunId, { status: "failed" });
14597
14736
  process.stderr.write(`Error: Solutioning phase failed: ${result.error ?? "unknown error"}${result.details ? ` — ${result.details}` : ""}\n`);
14598
14737
  return 1;
14599
14738
  }
@@ -15757,6 +15896,12 @@ function registerMonitorCommand(program, version = "0.0.0", projectRoot = proces
15757
15896
  //#endregion
15758
15897
  //#region src/cli/index.ts
15759
15898
  process.setMaxListeners(30);
15899
+ process.stdout.on("error", (err) => {
15900
+ if (err.code === "EPIPE") process.exit(0);
15901
+ });
15902
+ process.stderr.on("error", (err) => {
15903
+ if (err.code === "EPIPE") process.exit(0);
15904
+ });
15760
15905
  const logger = createLogger("cli");
15761
15906
  /** Resolve the package.json path relative to this file */
15762
15907
  async function getPackageVersion() {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "substrate-ai",
3
- "version": "0.1.18",
3
+ "version": "0.1.20",
4
4
  "description": "Substrate — multi-agent orchestration daemon for AI coding agents",
5
5
  "type": "module",
6
6
  "license": "MIT",
@@ -64,6 +64,7 @@
64
64
  "js-yaml": "^4.1.1",
65
65
  "pino": "^9.6.0",
66
66
  "semver": "^7.6.3",
67
+ "substrate-ai": "^0.1.19",
67
68
  "zod": "^4.3.6"
68
69
  },
69
70
  "devDependencies": {