substrate-ai 0.1.9 → 0.1.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/package.json +4 -5
- package/packs/bmad/constraints/code-review.yaml +59 -0
- package/packs/bmad/constraints/create-story.yaml +99 -0
- package/packs/bmad/constraints/dev-story.yaml +64 -0
- package/packs/bmad/manifest.yaml +43 -0
- package/packs/bmad/prompts/analysis.md +61 -0
- package/packs/bmad/prompts/architecture.md +71 -0
- package/packs/bmad/prompts/code-review.md +80 -0
- package/packs/bmad/prompts/create-story.md +64 -0
- package/packs/bmad/prompts/dev-story.md +86 -0
- package/packs/bmad/prompts/fix-story.md +65 -0
- package/packs/bmad/prompts/planning.md +91 -0
- package/packs/bmad/prompts/story-generation.md +84 -0
- package/packs/bmad/templates/story.md +37 -0
- package/README.npm.md +0 -206
package/README.md
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
<p align="center">
|
|
2
|
-
<img src="https://
|
|
2
|
+
<img src="https://raw.githubusercontent.com/johnplanow/substrate/main/assets/substrate-header.png" alt="Substrate — Autonomous Software Development Pipeline" />
|
|
3
3
|
</p>
|
|
4
4
|
|
|
5
5
|
# Substrate
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "substrate-ai",
|
|
3
|
-
"version": "0.1.
|
|
3
|
+
"version": "0.1.11",
|
|
4
4
|
"description": "Substrate — multi-agent orchestration daemon for AI coding agents",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"license": "MIT",
|
|
@@ -23,7 +23,6 @@
|
|
|
23
23
|
"bin": {
|
|
24
24
|
"substrate": "./dist/cli/index.js"
|
|
25
25
|
},
|
|
26
|
-
"readme": "README.npm.md",
|
|
27
26
|
"main": "./dist/index.js",
|
|
28
27
|
"exports": {
|
|
29
28
|
".": {
|
|
@@ -40,12 +39,12 @@
|
|
|
40
39
|
"dist/**/*.d.ts",
|
|
41
40
|
"dist/**/*.json",
|
|
42
41
|
"dist/cli/templates",
|
|
43
|
-
"
|
|
44
|
-
"README.
|
|
42
|
+
"packs",
|
|
43
|
+
"README.md"
|
|
45
44
|
],
|
|
46
45
|
"scripts": {
|
|
47
46
|
"build": "tsdown",
|
|
48
|
-
"postbuild": "cp -r src/cli/templates dist/cli/templates
|
|
47
|
+
"postbuild": "cp -r src/cli/templates dist/cli/templates",
|
|
49
48
|
"dev": "tsx watch src/cli/index.ts",
|
|
50
49
|
"test": "vitest run --coverage",
|
|
51
50
|
"test:watch": "vitest",
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
- name: minimum-three-issues
|
|
2
|
+
description: At least 3 specific issues must be found in every review
|
|
3
|
+
severity: required
|
|
4
|
+
check: issue_list contains at least 3 entries
|
|
5
|
+
|
|
6
|
+
- name: maximum-ten-issues
|
|
7
|
+
description: If fewer than 10 issues found, re-examine before finalizing
|
|
8
|
+
severity: recommended
|
|
9
|
+
check: issue_list contains between 3 and 10 entries or reviewer has documented re-examination
|
|
10
|
+
|
|
11
|
+
- name: adversarial-framing
|
|
12
|
+
description: Review must actively seek problems, not confirm correctness
|
|
13
|
+
severity: required
|
|
14
|
+
check: Review posture is "find what is wrong" not "validate what is right"
|
|
15
|
+
|
|
16
|
+
- name: ac-validation-required
|
|
17
|
+
description: Every acceptance criterion must be validated against implementation
|
|
18
|
+
severity: required
|
|
19
|
+
check: Each AC has IMPLEMENTED, PARTIAL, or MISSING determination
|
|
20
|
+
|
|
21
|
+
- name: task-completion-audit
|
|
22
|
+
description: Every task marked [x] must be verified as actually done
|
|
23
|
+
severity: required
|
|
24
|
+
check: Each [x] task has evidence of actual implementation
|
|
25
|
+
|
|
26
|
+
- name: git-reality-check
|
|
27
|
+
description: Story File List must be cross-referenced with actual git changes
|
|
28
|
+
severity: required
|
|
29
|
+
check: git status and git diff output compared to story File List
|
|
30
|
+
|
|
31
|
+
- name: false-claims-are-critical
|
|
32
|
+
description: Tasks or files claimed done/changed but with no git evidence are CRITICAL findings
|
|
33
|
+
severity: required
|
|
34
|
+
check: Any story claim without git evidence is flagged as high or critical
|
|
35
|
+
|
|
36
|
+
- name: test-quality-check
|
|
37
|
+
description: Tests must have real assertions, not placeholder expectations
|
|
38
|
+
severity: required
|
|
39
|
+
check: Test files reviewed for meaningful assertions vs placeholder stubs
|
|
40
|
+
|
|
41
|
+
- name: security-review
|
|
42
|
+
description: Code must be reviewed for injection risks, missing validation, auth issues
|
|
43
|
+
severity: required
|
|
44
|
+
check: Security audit performed on all implementation files
|
|
45
|
+
|
|
46
|
+
- name: verdict-criteria
|
|
47
|
+
description: Verdict must match issue severity distribution
|
|
48
|
+
severity: required
|
|
49
|
+
check: approve only if no CRITICAL/HIGH; changes_requested if MEDIUM/HIGH; blocked if CRITICAL
|
|
50
|
+
|
|
51
|
+
- name: no-bmad-folder-review
|
|
52
|
+
description: _bmad/ and _bmad-output/ folders must not be reviewed
|
|
53
|
+
severity: required
|
|
54
|
+
check: Review scope excludes _bmad/, _bmad-output/, .cursor/, .windsurf/, .claude/
|
|
55
|
+
|
|
56
|
+
- name: location-required-for-issues
|
|
57
|
+
description: Every issue must have a specific file and line reference
|
|
58
|
+
severity: recommended
|
|
59
|
+
check: Each issue in issue_list has a location field with file:line reference
|
|
@@ -0,0 +1,99 @@
|
|
|
1
|
+
- name: story-has-user-story
|
|
2
|
+
description: Story must have a user story statement with As a/I want/so that
|
|
3
|
+
severity: required
|
|
4
|
+
check: Story section contains As a, I want, so that
|
|
5
|
+
|
|
6
|
+
- name: acs-in-bdd-format
|
|
7
|
+
description: All acceptance criteria must use Given/When/Then format
|
|
8
|
+
severity: required
|
|
9
|
+
check: Each AC has Given, When, Then keywords
|
|
10
|
+
|
|
11
|
+
- name: at-least-three-acs
|
|
12
|
+
description: Story must have at least 3 acceptance criteria
|
|
13
|
+
severity: required
|
|
14
|
+
check: Count of ACs >= 3
|
|
15
|
+
|
|
16
|
+
- name: tasks-map-to-acs
|
|
17
|
+
description: Each task must reference at least one AC
|
|
18
|
+
severity: required
|
|
19
|
+
check: Each task has an AC annotation referencing an AC number
|
|
20
|
+
|
|
21
|
+
- name: tasks-are-granular
|
|
22
|
+
description: Tasks should be implementable in 2-4 hours with subtasks
|
|
23
|
+
severity: recommended
|
|
24
|
+
check: Tasks with significant scope have subtasks
|
|
25
|
+
|
|
26
|
+
- name: dev-notes-present
|
|
27
|
+
description: Dev Notes section must have architecture constraints
|
|
28
|
+
severity: required
|
|
29
|
+
check: Dev Notes section exists and is non-empty
|
|
30
|
+
|
|
31
|
+
- name: esm-imports-noted
|
|
32
|
+
description: Dev Notes must mention ESM import requirements if TypeScript project
|
|
33
|
+
severity: recommended
|
|
34
|
+
check: Dev Notes mentions .js extension requirement for ESM imports
|
|
35
|
+
|
|
36
|
+
- name: test-requirements-specified
|
|
37
|
+
description: Dev Notes should specify test framework and patterns
|
|
38
|
+
severity: recommended
|
|
39
|
+
check: Dev Notes has Testing Requirements or similar section
|
|
40
|
+
|
|
41
|
+
- name: file-structure-specified
|
|
42
|
+
description: Dev Notes should specify where new files go
|
|
43
|
+
severity: recommended
|
|
44
|
+
check: Dev Notes references file paths or directory structure
|
|
45
|
+
|
|
46
|
+
- name: no-vague-tasks
|
|
47
|
+
description: Tasks must be specific and implementable not vague
|
|
48
|
+
severity: required
|
|
49
|
+
check: No task descriptions contain only implement, add, create without specifics
|
|
50
|
+
|
|
51
|
+
- name: story-status-ready-for-dev
|
|
52
|
+
description: Story status must be set to ready-for-dev when complete
|
|
53
|
+
severity: required
|
|
54
|
+
check: Status field equals ready-for-dev
|
|
55
|
+
|
|
56
|
+
- name: sprint-status-updated
|
|
57
|
+
description: Sprint status yaml must be updated to ready-for-dev
|
|
58
|
+
severity: required
|
|
59
|
+
check: development_status story_key equals ready-for-dev in sprint-status.yaml
|
|
60
|
+
|
|
61
|
+
- name: no-copy-paste-from-epics
|
|
62
|
+
description: Story must add context beyond what is in epics
|
|
63
|
+
severity: required
|
|
64
|
+
check: Dev Notes contains architecture/technical content not present in epic source
|
|
65
|
+
|
|
66
|
+
- name: previous-story-learnings-applied
|
|
67
|
+
description: If previous story exists learnings should be reflected
|
|
68
|
+
severity: recommended
|
|
69
|
+
check: Dev Notes references previous story findings when story_num > 1
|
|
70
|
+
|
|
71
|
+
- name: acceptance-criteria-testable
|
|
72
|
+
description: ACs must be testable with a clear pass/fail condition
|
|
73
|
+
severity: required
|
|
74
|
+
check: Each AC has a concrete measurable outcome in Then clause
|
|
75
|
+
|
|
76
|
+
- name: no-ambiguous-requirements
|
|
77
|
+
description: Requirements must be specific enough to implement without clarification
|
|
78
|
+
severity: required
|
|
79
|
+
check: No AC or task contains etc or and so on without specification
|
|
80
|
+
|
|
81
|
+
- name: dev-agent-record-present
|
|
82
|
+
description: Story must have Dev Agent Record section
|
|
83
|
+
severity: required
|
|
84
|
+
check: Story has Dev Agent Record section with subsections
|
|
85
|
+
|
|
86
|
+
- name: file-list-section-present
|
|
87
|
+
description: Story must have File List section in Dev Agent Record
|
|
88
|
+
severity: required
|
|
89
|
+
check: Dev Agent Record has File List subsection
|
|
90
|
+
|
|
91
|
+
- name: change-log-section-present
|
|
92
|
+
description: Story must have Change Log section
|
|
93
|
+
severity: recommended
|
|
94
|
+
check: Story has Change Log section
|
|
95
|
+
|
|
96
|
+
- name: story-key-matches-filename
|
|
97
|
+
description: Story key derived from filename must match epic/story numbers
|
|
98
|
+
severity: required
|
|
99
|
+
check: Filename pattern N-N-title.md matches epic_num and story_num in content
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
- name: sequential-task-execution
|
|
2
|
+
description: Tasks must be executed in the exact order listed in the story file
|
|
3
|
+
severity: required
|
|
4
|
+
check: Implementation follows task order without skipping
|
|
5
|
+
|
|
6
|
+
- name: red-green-refactor
|
|
7
|
+
description: Tests must be written and fail before implementation code is written
|
|
8
|
+
severity: required
|
|
9
|
+
check: Tests exist and were confirmed failing before implementation
|
|
10
|
+
|
|
11
|
+
- name: tests-pass-before-complete
|
|
12
|
+
description: All tests must pass before marking a task complete
|
|
13
|
+
severity: required
|
|
14
|
+
check: Full test suite passes before checkbox is marked [x]
|
|
15
|
+
|
|
16
|
+
- name: no-regression-allowed
|
|
17
|
+
description: No existing tests may be broken by implementation
|
|
18
|
+
severity: required
|
|
19
|
+
check: Full test suite passes after each task implementation
|
|
20
|
+
|
|
21
|
+
- name: halt-on-new-dependency
|
|
22
|
+
description: New dependencies beyond story spec require user approval before proceeding
|
|
23
|
+
severity: required
|
|
24
|
+
check: Agent stops and requests approval when new dependency needed
|
|
25
|
+
|
|
26
|
+
- name: halt-on-repeated-failure
|
|
27
|
+
description: Three consecutive implementation failures require guidance
|
|
28
|
+
severity: required
|
|
29
|
+
check: Agent halts after 3 failures on same task
|
|
30
|
+
|
|
31
|
+
- name: permitted-sections-only
|
|
32
|
+
description: Only permitted story sections may be modified
|
|
33
|
+
severity: required
|
|
34
|
+
check: Only Tasks/Subtasks, Dev Agent Record, File List, Change Log, Status are modified
|
|
35
|
+
|
|
36
|
+
- name: no-extra-features
|
|
37
|
+
description: Implementation must match exactly what task specifies, no more
|
|
38
|
+
severity: required
|
|
39
|
+
check: No functionality implemented beyond task/subtask scope
|
|
40
|
+
|
|
41
|
+
- name: file-list-complete
|
|
42
|
+
description: File List must include every new, modified, or deleted file
|
|
43
|
+
severity: required
|
|
44
|
+
check: Every file touched is listed in Dev Agent Record -> File List
|
|
45
|
+
|
|
46
|
+
- name: completion-notes-required
|
|
47
|
+
description: Dev Agent Record must have completion notes describing what was implemented
|
|
48
|
+
severity: required
|
|
49
|
+
check: Completion Notes section is non-empty when story is complete
|
|
50
|
+
|
|
51
|
+
- name: all-acs-satisfied
|
|
52
|
+
description: Every acceptance criterion must be satisfied before status can be review
|
|
53
|
+
severity: required
|
|
54
|
+
check: All ACs verified against implementation before status = review
|
|
55
|
+
|
|
56
|
+
- name: status-review-on-completion
|
|
57
|
+
description: Story status must be set to review when all tasks done
|
|
58
|
+
severity: required
|
|
59
|
+
check: Status field equals "review" after completion
|
|
60
|
+
|
|
61
|
+
- name: change-log-entry
|
|
62
|
+
description: Change Log must have at least one entry describing the implementation
|
|
63
|
+
severity: required
|
|
64
|
+
check: Change Log section has at least one dated entry
|
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
name: bmad
|
|
2
|
+
version: 1.0.0
|
|
3
|
+
description: BMAD methodology for autonomous software development
|
|
4
|
+
|
|
5
|
+
phases:
|
|
6
|
+
- name: analysis
|
|
7
|
+
description: Product discovery and brief creation
|
|
8
|
+
entryGates: []
|
|
9
|
+
exitGates: [product-brief-complete]
|
|
10
|
+
artifacts: [product-brief]
|
|
11
|
+
- name: planning
|
|
12
|
+
description: PRD and requirements generation
|
|
13
|
+
entryGates: [product-brief-complete]
|
|
14
|
+
exitGates: [prd-complete]
|
|
15
|
+
artifacts: [prd]
|
|
16
|
+
- name: solutioning
|
|
17
|
+
description: Architecture and epic/story breakdown
|
|
18
|
+
entryGates: [prd-complete]
|
|
19
|
+
exitGates: [architecture-complete, stories-complete, readiness-check]
|
|
20
|
+
artifacts: [architecture, epics, stories]
|
|
21
|
+
- name: implementation
|
|
22
|
+
description: Code generation, testing, and review
|
|
23
|
+
entryGates: [stories-complete]
|
|
24
|
+
exitGates: [all-stories-shipped]
|
|
25
|
+
artifacts: [code, tests]
|
|
26
|
+
|
|
27
|
+
prompts:
|
|
28
|
+
analysis: prompts/analysis.md
|
|
29
|
+
planning: prompts/planning.md
|
|
30
|
+
architecture: prompts/architecture.md
|
|
31
|
+
story-generation: prompts/story-generation.md
|
|
32
|
+
create-story: prompts/create-story.md
|
|
33
|
+
dev-story: prompts/dev-story.md
|
|
34
|
+
code-review: prompts/code-review.md
|
|
35
|
+
fix-story: prompts/fix-story.md
|
|
36
|
+
|
|
37
|
+
constraints:
|
|
38
|
+
create-story: constraints/create-story.yaml
|
|
39
|
+
dev-story: constraints/dev-story.yaml
|
|
40
|
+
code-review: constraints/code-review.yaml
|
|
41
|
+
|
|
42
|
+
templates:
|
|
43
|
+
story: templates/story.md
|
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# BMAD Compiled Analysis Agent
|
|
2
|
+
|
|
3
|
+
## Context (pre-assembled by pipeline)
|
|
4
|
+
|
|
5
|
+
### Project Concept
|
|
6
|
+
{{concept}}
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
## Mission
|
|
11
|
+
|
|
12
|
+
Analyze the project concept above and produce a structured **Product Brief** that captures the essential product definition. Think like a senior business analyst conducting market analysis, user research, and feasibility assessment.
|
|
13
|
+
|
|
14
|
+
## Instructions
|
|
15
|
+
|
|
16
|
+
1. **Analyze the concept deeply** before generating output:
|
|
17
|
+
- What problem does this solve? Who experiences this problem most acutely?
|
|
18
|
+
- What existing solutions exist? Why do they fall short?
|
|
19
|
+
- What are the real technical constraints and market dynamics?
|
|
20
|
+
- What would make this succeed vs. fail?
|
|
21
|
+
|
|
22
|
+
2. **Generate each field with research-grade depth:**
|
|
23
|
+
- `problem_statement`: A clear, specific articulation of the problem (minimum 2-3 sentences). Ground it in user pain, not technology. Include the impact of the problem remaining unsolved.
|
|
24
|
+
- `target_users`: Specific user segments (not generic labels). Include role, context, and why they care. Minimum 2 distinct segments.
|
|
25
|
+
- `core_features`: Capabilities that directly address the problem statement. Each feature should be a concrete capability, not a vague category. Prioritize by user impact.
|
|
26
|
+
- `success_metrics`: Measurable outcomes tied to user value and business objectives. Include both leading indicators (engagement, adoption) and lagging indicators (retention, revenue). Be specific enough to measure.
|
|
27
|
+
- `constraints`: Technical limitations, regulatory requirements, budget boundaries, timeline pressures, platform restrictions, or integration requirements. Omit if genuinely none exist.
|
|
28
|
+
|
|
29
|
+
3. **Quality bar**: Every field should contain enough detail that a product manager could begin writing a PRD from this brief alone. Avoid placeholder text, generic statements, or single-word items.
|
|
30
|
+
|
|
31
|
+
4. **Amendment awareness**: If amendment context from a parent run is provided below, refine and build upon existing decisions rather than starting from scratch. Identify what changes, what stays, and what new elements the amendment introduces.
|
|
32
|
+
|
|
33
|
+
## Output Contract
|
|
34
|
+
|
|
35
|
+
Emit ONLY this YAML block as your final output — no other text.
|
|
36
|
+
|
|
37
|
+
**CRITICAL**: All array items MUST be plain strings, NOT objects/maps. Write each item as a single descriptive string on one line.
|
|
38
|
+
|
|
39
|
+
```yaml
|
|
40
|
+
result: success
|
|
41
|
+
product_brief:
|
|
42
|
+
problem_statement: "A clear articulation of the problem in 2-3 sentences."
|
|
43
|
+
target_users:
|
|
44
|
+
- "Software developers who work in terminal environments and want habit tracking"
|
|
45
|
+
- "DevOps engineers who need to maintain daily operational checklists"
|
|
46
|
+
core_features:
|
|
47
|
+
- "CLI command to register, check-off, and view daily habits with streak tracking"
|
|
48
|
+
- "Local SQLite storage with export to JSON/CSV for portability"
|
|
49
|
+
success_metrics:
|
|
50
|
+
- "Daily active usage rate >60% among onboarded users within 30 days"
|
|
51
|
+
- "Streak completion rate >40% across all tracked habits"
|
|
52
|
+
constraints:
|
|
53
|
+
- "CLI-only interface limits audience to terminal-comfortable users"
|
|
54
|
+
- "Must work offline with local storage, no cloud dependency"
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
If you cannot produce a valid product brief:
|
|
58
|
+
|
|
59
|
+
```yaml
|
|
60
|
+
result: failed
|
|
61
|
+
```
|
|
@@ -0,0 +1,71 @@
|
|
|
1
|
+
# BMAD Compiled Architecture Agent
|
|
2
|
+
|
|
3
|
+
## Context (pre-assembled by pipeline)
|
|
4
|
+
|
|
5
|
+
### Requirements (from Planning Phase)
|
|
6
|
+
{{requirements}}
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
## Mission
|
|
11
|
+
|
|
12
|
+
Produce concrete **architecture decisions** that translate the requirements above into a buildable technical design. Think like a pragmatic senior architect — choose boring technology that ships, not cutting-edge technology that impresses.
|
|
13
|
+
|
|
14
|
+
## Instructions
|
|
15
|
+
|
|
16
|
+
1. **Make concrete decisions, not suggestions:**
|
|
17
|
+
- Each decision is a key-value pair capturing one architectural concern
|
|
18
|
+
- The `key` identifies WHAT is being decided (e.g., "api-style", "auth-strategy", "state-management", "deployment-target")
|
|
19
|
+
- The `value` states the CHOICE (e.g., "REST with OpenAPI 3.1", "JWT with refresh tokens", "React Context + useReducer", "Docker on AWS ECS")
|
|
20
|
+
- The `rationale` explains WHY this choice over alternatives (optional but strongly recommended)
|
|
21
|
+
|
|
22
|
+
2. **Cover these architectural concerns at minimum:**
|
|
23
|
+
- **System architecture**: Monolith, modular monolith, microservices, or serverless
|
|
24
|
+
- **API design**: REST, GraphQL, gRPC, or hybrid
|
|
25
|
+
- **Data storage**: Database engine, schema strategy, migration approach
|
|
26
|
+
- **Authentication/authorization**: Strategy and implementation approach
|
|
27
|
+
- **Project structure**: Directory layout, module boundaries, dependency rules
|
|
28
|
+
- **Error handling**: Strategy for errors, logging, monitoring
|
|
29
|
+
- **Testing strategy**: Unit/integration/E2E split, framework choices
|
|
30
|
+
|
|
31
|
+
3. **Align with requirements:**
|
|
32
|
+
- Every `must` functional requirement should be architecturally supportable
|
|
33
|
+
- NFRs (performance, security, scalability) should directly inform decisions
|
|
34
|
+
- Tech stack choices from planning should be respected unless there's a strong reason to deviate
|
|
35
|
+
|
|
36
|
+
4. **Use the `category` field** to group related decisions:
|
|
37
|
+
- `infrastructure`: deployment, hosting, CI/CD
|
|
38
|
+
- `backend`: API, database, auth, services
|
|
39
|
+
- `frontend`: UI framework, state, routing
|
|
40
|
+
- `crosscutting`: logging, error handling, testing, security
|
|
41
|
+
|
|
42
|
+
5. **Amendment awareness**: If amendment context from a parent run is provided below, build upon the existing architecture. Add new decisions for new capabilities, refine existing decisions where the amendment changes requirements, and preserve decisions that remain valid.
|
|
43
|
+
|
|
44
|
+
## Output Contract
|
|
45
|
+
|
|
46
|
+
Emit ONLY this YAML block as your final output — no other text.
|
|
47
|
+
|
|
48
|
+
**CRITICAL YAML RULES**: All string values MUST be quoted with double quotes. This prevents YAML parse errors from colons, special characters, or multi-line values. Keep each value on a single line.
|
|
49
|
+
|
|
50
|
+
```yaml
|
|
51
|
+
result: success
|
|
52
|
+
architecture_decisions:
|
|
53
|
+
- category: "backend"
|
|
54
|
+
key: "api-style"
|
|
55
|
+
value: "REST with OpenAPI 3.1 spec"
|
|
56
|
+
rationale: "Industry standard, excellent tooling, team familiarity"
|
|
57
|
+
- category: "backend"
|
|
58
|
+
key: "database"
|
|
59
|
+
value: "SQLite with better-sqlite3 driver"
|
|
60
|
+
rationale: "Zero-config local storage, perfect for CLI tools"
|
|
61
|
+
- category: "crosscutting"
|
|
62
|
+
key: "testing-strategy"
|
|
63
|
+
value: "Vitest for unit and integration, no E2E needed for CLI"
|
|
64
|
+
rationale: "Fast execution, native ESM support, compatible with TypeScript"
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
If you cannot produce valid architecture output:
|
|
68
|
+
|
|
69
|
+
```yaml
|
|
70
|
+
result: failed
|
|
71
|
+
```
|
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
# BMAD Compiled Code-Review Agent
|
|
2
|
+
|
|
3
|
+
## Context (pre-assembled by pipeline)
|
|
4
|
+
|
|
5
|
+
### Story File Content
|
|
6
|
+
{{story_content}}
|
|
7
|
+
|
|
8
|
+
### Git Diff
|
|
9
|
+
{{git_diff}}
|
|
10
|
+
|
|
11
|
+
### Previous Review Findings
|
|
12
|
+
{{previous_findings}}
|
|
13
|
+
|
|
14
|
+
### Architecture Constraints
|
|
15
|
+
{{arch_constraints}}
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Mission
|
|
20
|
+
|
|
21
|
+
Adversarial code review. Find what's wrong. Validate story claims against actual implementation.
|
|
22
|
+
|
|
23
|
+
## Instructions
|
|
24
|
+
|
|
25
|
+
1. **Parse the story file** to extract:
|
|
26
|
+
- Acceptance Criteria (AC1, AC2, etc.)
|
|
27
|
+
- Tasks with their completion status (`[x]` or `[ ]`)
|
|
28
|
+
- Dev Notes and File List
|
|
29
|
+
|
|
30
|
+
2. **Review the git diff** for:
|
|
31
|
+
- Files changed vs files listed in the story File List
|
|
32
|
+
- Whether each AC is actually implemented
|
|
33
|
+
- Whether each `[x]` task is actually done
|
|
34
|
+
|
|
35
|
+
3. **Execute adversarial review** across 4 dimensions:
|
|
36
|
+
- **AC Validation** — Is each acceptance criterion implemented?
|
|
37
|
+
- **Task Audit** — Tasks marked `[x]` that aren't done are BLOCKER issues
|
|
38
|
+
- **Code Quality** — Security, error handling, edge cases, maintainability
|
|
39
|
+
- **Test Quality** — Real assertions, not placeholders or skipped tests
|
|
40
|
+
|
|
41
|
+
4. **Severity classification:**
|
|
42
|
+
- **blocker** — Task `[x]` but not implemented; security vulnerability; data loss risk
|
|
43
|
+
- **major** — AC not implemented; false claims; missing error handling on boundaries
|
|
44
|
+
- **minor** — Style; documentation gap; naming; low-risk edge case
|
|
45
|
+
|
|
46
|
+
## Output Contract
|
|
47
|
+
|
|
48
|
+
After completing the review, emit ONLY this YAML block — no other text:
|
|
49
|
+
|
|
50
|
+
```yaml
|
|
51
|
+
verdict: SHIP_IT
|
|
52
|
+
issues: 0
|
|
53
|
+
issue_list: []
|
|
54
|
+
```
|
|
55
|
+
|
|
56
|
+
Or if issues were found:
|
|
57
|
+
|
|
58
|
+
```yaml
|
|
59
|
+
verdict: NEEDS_MINOR_FIXES
|
|
60
|
+
issues: 3
|
|
61
|
+
issue_list:
|
|
62
|
+
- severity: major
|
|
63
|
+
description: "AC2 not implemented — getConstraints() always returns []"
|
|
64
|
+
file: "src/modules/foo/foo.ts"
|
|
65
|
+
line: 42
|
|
66
|
+
- severity: minor
|
|
67
|
+
description: "Missing JSDoc on exported function"
|
|
68
|
+
file: "src/modules/foo/foo.ts"
|
|
69
|
+
- severity: minor
|
|
70
|
+
description: "Variable name `d` should be more descriptive"
|
|
71
|
+
file: "src/modules/foo/foo.ts"
|
|
72
|
+
line: 15
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
**IMPORTANT**: `issues` must equal the number of items in `issue_list`.
|
|
76
|
+
|
|
77
|
+
**Verdict rules:**
|
|
78
|
+
- `SHIP_IT` — zero blocker/major issues (minor issues acceptable)
|
|
79
|
+
- `NEEDS_MINOR_FIXES` — minor issues only, or 1-2 major with no blockers
|
|
80
|
+
- `NEEDS_MAJOR_REWORK` — any blocker issue, or 3+ major issues
|
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
# BMAD Compiled Create-Story Agent
|
|
2
|
+
|
|
3
|
+
## Context (pre-assembled by pipeline)
|
|
4
|
+
|
|
5
|
+
### Epic Scope
|
|
6
|
+
{{epic_shard}}
|
|
7
|
+
|
|
8
|
+
### Architecture Constraints
|
|
9
|
+
{{arch_constraints}}
|
|
10
|
+
|
|
11
|
+
### Previous Story Dev Notes
|
|
12
|
+
{{prev_dev_notes}}
|
|
13
|
+
|
|
14
|
+
### Story File Template
|
|
15
|
+
{{story_template}}
|
|
16
|
+
|
|
17
|
+
---
|
|
18
|
+
|
|
19
|
+
## Mission
|
|
20
|
+
|
|
21
|
+
Using the context above, write a complete, implementation-ready story file for story **{{story_key}}**.
|
|
22
|
+
|
|
23
|
+
## Instructions
|
|
24
|
+
|
|
25
|
+
1. **Parse the epic scope** to understand what this epic is building and where this story fits
|
|
26
|
+
2. **Apply architecture constraints** — every constraint listed above is mandatory (file paths, import style, test framework, etc.)
|
|
27
|
+
3. **Use previous dev notes** as guardrails — don't repeat mistakes, build on patterns that worked
|
|
28
|
+
4. **Fill out the story template** with:
|
|
29
|
+
- A clear user story (As a / I want / So that)
|
|
30
|
+
- Acceptance criteria in BDD Given/When/Then format (minimum 3, maximum 8)
|
|
31
|
+
- Concrete tasks broken into 2–4 hour subtasks, each tied to specific ACs
|
|
32
|
+
- Dev Notes with file paths, import patterns, testing requirements
|
|
33
|
+
5. **Apply the scope cap** — see Scope Cap Guidance below
|
|
34
|
+
6. **Write the story file** to: `_bmad-output/implementation-artifacts/{{story_key}}-<kebab-title>.md`
|
|
35
|
+
- Status must be: `ready-for-dev`
|
|
36
|
+
- Dev Agent Record section must be present but left blank (to be filled by dev agent)
|
|
37
|
+
|
|
38
|
+
## Scope Cap Guidance
|
|
39
|
+
|
|
40
|
+
**Aim for 6-7 acceptance criteria and 7-8 tasks per story.**
|
|
41
|
+
|
|
42
|
+
Each story will be implemented by an AI agent in a single pass. Stories with more than 7 ACs tend to exceed agent capabilities and require decomposition, adding latency and complexity to the pipeline.
|
|
43
|
+
|
|
44
|
+
If the scope requires more than 7 ACs, split into multiple sequential stories (e.g., `7-1a: Core Setup`, `7-1b: Advanced Features`). Splitting is preferable to cramming too much scope into a single story.
|
|
45
|
+
|
|
46
|
+
This is guidance, not enforcement — if the scope genuinely fits in a slightly larger story, use your judgment. The goal is to avoid stories that will predictably fail during implementation.
|
|
47
|
+
|
|
48
|
+
## Output Contract
|
|
49
|
+
|
|
50
|
+
After writing the story file, emit ONLY this YAML block as your final message — no other text:
|
|
51
|
+
|
|
52
|
+
```yaml
|
|
53
|
+
result: success
|
|
54
|
+
story_file: <absolute path to the written story file>
|
|
55
|
+
story_key: {{story_key}}
|
|
56
|
+
story_title: <one-line title of the story>
|
|
57
|
+
```
|
|
58
|
+
|
|
59
|
+
If you cannot write the story file for any reason:
|
|
60
|
+
|
|
61
|
+
```yaml
|
|
62
|
+
result: failure
|
|
63
|
+
error: <reason>
|
|
64
|
+
```
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
# BMAD Compiled Dev-Story Agent
|
|
2
|
+
|
|
3
|
+
## Context (pre-assembled by pipeline)
|
|
4
|
+
|
|
5
|
+
### Story File Content
|
|
6
|
+
{{story_content}}
|
|
7
|
+
|
|
8
|
+
{{task_scope}}
|
|
9
|
+
|
|
10
|
+
{{prior_files}}
|
|
11
|
+
|
|
12
|
+
{{files_in_scope}}
|
|
13
|
+
|
|
14
|
+
{{project_context}}
|
|
15
|
+
|
|
16
|
+
### Test Patterns
|
|
17
|
+
{{test_patterns}}
|
|
18
|
+
|
|
19
|
+
---
|
|
20
|
+
|
|
21
|
+
## Mission
|
|
22
|
+
|
|
23
|
+
Implement the story above completely. Follow tasks in exact order. Do not stop until all tasks are done.
|
|
24
|
+
|
|
25
|
+
## Instructions
|
|
26
|
+
|
|
27
|
+
1. **Parse the story file** to understand:
|
|
28
|
+
- Acceptance Criteria (AC1, AC2, etc.)
|
|
29
|
+
- Tasks/Subtasks (ordered list with `[ ]` checkboxes)
|
|
30
|
+
- Dev Notes (file paths, import patterns, test requirements)
|
|
31
|
+
|
|
32
|
+
2. **Implement each task in order** (Red-Green-Refactor):
|
|
33
|
+
- Write failing tests first
|
|
34
|
+
- Make tests pass with minimal code
|
|
35
|
+
- Refactor while keeping tests green
|
|
36
|
+
|
|
37
|
+
3. **After each task**:
|
|
38
|
+
- Verify tests pass
|
|
39
|
+
- Run the full test suite to check for regressions
|
|
40
|
+
- Mark the task `[x]` in the story file
|
|
41
|
+
- Update the story File List with all new/modified files
|
|
42
|
+
|
|
43
|
+
4. **After all tasks complete**:
|
|
44
|
+
- Run the full test suite one final time
|
|
45
|
+
- Update story Status to `review`
|
|
46
|
+
|
|
47
|
+
## CRITICAL: Output Contract Emission
|
|
48
|
+
|
|
49
|
+
**You MUST emit the YAML output block (see Output Contract below) as the very last thing you produce.** The downstream pipeline depends on `files_modified` to generate scoped code-review diffs. If you exhaust your turns without emitting the YAML block, the pipeline cannot review your work properly.
|
|
50
|
+
|
|
51
|
+
- If you are running low on turns, **stop implementation and emit the YAML block immediately** with whatever progress you have made. A partial `files_modified` list is far more valuable than none at all.
|
|
52
|
+
- The YAML block must be the final output — no summary text, no emoji, no explanation after it.
|
|
53
|
+
- **Narrating about the YAML block is NOT the same as emitting it.** Do not say "the YAML has been emitted" — actually emit the literal YAML block starting with `result:`.
|
|
54
|
+
|
|
55
|
+
## HALT Conditions (stop and report as failed)
|
|
56
|
+
|
|
57
|
+
- New dependency required beyond story spec
|
|
58
|
+
- 3 consecutive implementation failures with no progress
|
|
59
|
+
- Story requirements are ambiguous with no way to resolve
|
|
60
|
+
|
|
61
|
+
## Output Contract
|
|
62
|
+
|
|
63
|
+
After completing all tasks (or hitting a HALT condition), emit ONLY this YAML block — no other text:
|
|
64
|
+
|
|
65
|
+
```yaml
|
|
66
|
+
result: success
|
|
67
|
+
ac_met:
|
|
68
|
+
- AC1
|
|
69
|
+
- AC2
|
|
70
|
+
ac_failures: []
|
|
71
|
+
files_modified:
|
|
72
|
+
- <absolute path to modified file>
|
|
73
|
+
tests: pass
|
|
74
|
+
```
|
|
75
|
+
|
|
76
|
+
If a HALT condition was hit:
|
|
77
|
+
|
|
78
|
+
```yaml
|
|
79
|
+
result: failed
|
|
80
|
+
ac_met: []
|
|
81
|
+
ac_failures:
|
|
82
|
+
- <which AC could not be met>
|
|
83
|
+
files_modified: []
|
|
84
|
+
tests: fail
|
|
85
|
+
notes: <reason for failure>
|
|
86
|
+
```
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
# BMAD Compiled Fix-Story Agent
|
|
2
|
+
|
|
3
|
+
## Context (pre-assembled by pipeline)
|
|
4
|
+
|
|
5
|
+
### Story File Content
|
|
6
|
+
{{story_content}}
|
|
7
|
+
|
|
8
|
+
### Review Feedback
|
|
9
|
+
{{review_feedback}}
|
|
10
|
+
|
|
11
|
+
### Architecture Constraints
|
|
12
|
+
{{arch_constraints}}
|
|
13
|
+
|
|
14
|
+
---
|
|
15
|
+
|
|
16
|
+
## Mission
|
|
17
|
+
|
|
18
|
+
Fix the issues identified in the code review above. Address every issue listed in the review feedback.
|
|
19
|
+
|
|
20
|
+
## Instructions
|
|
21
|
+
|
|
22
|
+
1. **Parse the review feedback** to understand:
|
|
23
|
+
- The verdict (NEEDS_MINOR_FIXES or NEEDS_MAJOR_REWORK)
|
|
24
|
+
- Each issue's severity (blocker, major, minor)
|
|
25
|
+
- Each issue's description, file, and line number (if provided)
|
|
26
|
+
|
|
27
|
+
2. **Fix issues in severity order**: blockers first, then major, then minor.
|
|
28
|
+
|
|
29
|
+
3. **For each fix**:
|
|
30
|
+
- Make the code change
|
|
31
|
+
- Run relevant tests to verify the fix
|
|
32
|
+
- Ensure no regressions
|
|
33
|
+
|
|
34
|
+
4. **After all fixes**:
|
|
35
|
+
- Run the full test suite
|
|
36
|
+
- Verify all issues from the review have been addressed
|
|
37
|
+
|
|
38
|
+
## HALT Conditions (stop and report as failed)
|
|
39
|
+
|
|
40
|
+
- Contradictory requirements between story and review feedback
|
|
41
|
+
- 3 consecutive fix attempts with no progress
|
|
42
|
+
- Fix requires architectural changes beyond the story scope
|
|
43
|
+
|
|
44
|
+
## Output Contract
|
|
45
|
+
|
|
46
|
+
After all fixes are applied (or a HALT condition is hit), emit ONLY this YAML block:
|
|
47
|
+
|
|
48
|
+
```yaml
|
|
49
|
+
result: success
|
|
50
|
+
fixes_applied:
|
|
51
|
+
- <description of fix>
|
|
52
|
+
files_modified:
|
|
53
|
+
- <absolute path to modified file>
|
|
54
|
+
tests: pass
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
If a HALT condition was hit:
|
|
58
|
+
|
|
59
|
+
```yaml
|
|
60
|
+
result: failed
|
|
61
|
+
fixes_applied: []
|
|
62
|
+
files_modified: []
|
|
63
|
+
tests: fail
|
|
64
|
+
notes: <reason for failure>
|
|
65
|
+
```
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# BMAD Compiled Planning Agent
|
|
2
|
+
|
|
3
|
+
## Context (pre-assembled by pipeline)
|
|
4
|
+
|
|
5
|
+
### Product Brief (from Analysis Phase)
|
|
6
|
+
{{product_brief}}
|
|
7
|
+
|
|
8
|
+
---
|
|
9
|
+
|
|
10
|
+
## Mission
|
|
11
|
+
|
|
12
|
+
Transform the product brief above into a structured **Product Requirements Document (PRD)** — the complete specification that drives architecture, epic creation, and implementation. Think like a veteran product manager who ships products, not one who writes documents.
|
|
13
|
+
|
|
14
|
+
## Instructions
|
|
15
|
+
|
|
16
|
+
1. **Derive functional requirements from the product brief:**
|
|
17
|
+
- Each FR must be specific, testable, and traceable to a core feature or user need
|
|
18
|
+
- Use MoSCoW prioritization: `must` (MVP-critical), `should` (high-value), `could` (nice-to-have)
|
|
19
|
+
- Minimum 3 FRs, but don't pad — every FR should earn its place
|
|
20
|
+
- Frame as capabilities, not implementation details ("Users can filter by date range" not "Add a date picker component")
|
|
21
|
+
|
|
22
|
+
2. **Define non-functional requirements:**
|
|
23
|
+
- Each NFR must have a category (performance, security, scalability, accessibility, reliability, etc.)
|
|
24
|
+
- Be concrete: "API responses under 200ms at p95" not "System should be fast"
|
|
25
|
+
- Minimum 2 NFRs covering different categories
|
|
26
|
+
|
|
27
|
+
3. **Write user stories:**
|
|
28
|
+
- Each story captures a user journey or interaction pattern
|
|
29
|
+
- Title should be scannable; description should explain the "why"
|
|
30
|
+
- Stories bridge the gap between requirements and implementation — they tell the human story behind the FRs
|
|
31
|
+
|
|
32
|
+
4. **Specify the tech stack:**
|
|
33
|
+
- Key-value pairs mapping technology concerns to specific choices
|
|
34
|
+
- Use real, current technologies — do not fabricate frameworks or versions
|
|
35
|
+
- Cover at minimum: language, framework, database, testing
|
|
36
|
+
- Choices should align with the product brief constraints
|
|
37
|
+
|
|
38
|
+
5. **Build the domain model:**
|
|
39
|
+
- Key entities and their relationships
|
|
40
|
+
- Each entity as a key with its attributes/relationships as the value
|
|
41
|
+
- This informs database design and API structure downstream
|
|
42
|
+
|
|
43
|
+
6. **Define out-of-scope items** to prevent scope creep — what this product explicitly does NOT do in its initial version.
|
|
44
|
+
|
|
45
|
+
7. **Amendment awareness**: If amendment context from a parent run is provided below, evolve the existing requirements rather than replacing them wholesale. Add new FRs for new scope, adjust priorities where the amendment changes emphasis, and note any FRs that the amendment renders obsolete.
|
|
46
|
+
|
|
47
|
+
## Output Contract
|
|
48
|
+
|
|
49
|
+
Emit ONLY this YAML block as your final output — no other text.
|
|
50
|
+
|
|
51
|
+
**CRITICAL YAML RULES**: All string values MUST be quoted with double quotes. This prevents YAML parse errors from colons or special characters. Keep each value on a single line. Array items must be plain strings, NOT objects.
|
|
52
|
+
|
|
53
|
+
```yaml
|
|
54
|
+
result: success
|
|
55
|
+
functional_requirements:
|
|
56
|
+
- description: "Users can register new habits with a name and frequency"
|
|
57
|
+
priority: must
|
|
58
|
+
- description: "Users can view current streaks for all tracked habits"
|
|
59
|
+
priority: must
|
|
60
|
+
non_functional_requirements:
|
|
61
|
+
- description: "CLI commands complete within 200ms for local operations"
|
|
62
|
+
category: "performance"
|
|
63
|
+
- description: "All user data encrypted at rest using AES-256"
|
|
64
|
+
category: "security"
|
|
65
|
+
user_stories:
|
|
66
|
+
- title: "Habit Registration"
|
|
67
|
+
description: "As a developer, I want to register daily habits so I can track my consistency"
|
|
68
|
+
- title: "Streak Dashboard"
|
|
69
|
+
description: "As a user, I want to see my current streaks so I stay motivated"
|
|
70
|
+
tech_stack:
|
|
71
|
+
language: "TypeScript"
|
|
72
|
+
framework: "Node.js CLI with Commander"
|
|
73
|
+
database: "SQLite via better-sqlite3"
|
|
74
|
+
testing: "Vitest"
|
|
75
|
+
domain_model:
|
|
76
|
+
Habit:
|
|
77
|
+
attributes: ["name", "frequency", "created_at"]
|
|
78
|
+
relationships: ["has_many: Completions"]
|
|
79
|
+
Completion:
|
|
80
|
+
attributes: ["habit_id", "completed_at"]
|
|
81
|
+
relationships: ["belongs_to: Habit"]
|
|
82
|
+
out_of_scope:
|
|
83
|
+
- "Web or mobile interface"
|
|
84
|
+
- "Cloud sync or multi-device support"
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
If you cannot produce valid planning output:
|
|
88
|
+
|
|
89
|
+
```yaml
|
|
90
|
+
result: failed
|
|
91
|
+
```
|
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
# BMAD Compiled Story Generation Agent
|
|
2
|
+
|
|
3
|
+
## Context (pre-assembled by pipeline)
|
|
4
|
+
|
|
5
|
+
### Requirements (from Planning Phase)
|
|
6
|
+
{{requirements}}
|
|
7
|
+
|
|
8
|
+
### Architecture Decisions (from Solutioning Phase)
|
|
9
|
+
{{architecture_decisions}}
|
|
10
|
+
|
|
11
|
+
### Gap Analysis (retry context — may be empty)
|
|
12
|
+
{{gap_analysis}}
|
|
13
|
+
|
|
14
|
+
---
|
|
15
|
+
|
|
16
|
+
## Mission
|
|
17
|
+
|
|
18
|
+
Break down the requirements and architecture above into **epics and stories** — the work breakdown structure that drives implementation. Think like a product manager and scrum master working together: epics organized by user value, stories sized for a single developer in a single sprint.
|
|
19
|
+
|
|
20
|
+
## Instructions
|
|
21
|
+
|
|
22
|
+
1. **Organize epics by user value, never by technical layer:**
|
|
23
|
+
- GOOD: "User Authentication & Onboarding", "Dashboard & Analytics", "Content Management"
|
|
24
|
+
- BAD: "Database Setup", "API Development", "Frontend Components"
|
|
25
|
+
- Each epic must be independently valuable — a user should benefit from just that epic being complete
|
|
26
|
+
- No forward dependencies — Epic N should not require Epic N+1 to be useful
|
|
27
|
+
|
|
28
|
+
2. **Write implementation-ready stories:**
|
|
29
|
+
- `key`: Short identifier like "1-1" or "2-3" (epic number - story number)
|
|
30
|
+
- `title`: Clear, action-oriented (e.g., "User registration with email verification")
|
|
31
|
+
- `description`: What the developer needs to build and why it matters. Include enough context to start coding.
|
|
32
|
+
- `acceptance_criteria`: Specific, testable conditions. Minimum 1 per story. Use concrete language ("User sees error message when password is under 8 characters") not vague language ("Error handling works")
|
|
33
|
+
- `priority`: must (MVP-critical), should (high-value post-MVP), could (nice-to-have)
|
|
34
|
+
|
|
35
|
+
3. **Ensure full FR coverage:**
|
|
36
|
+
- Every functional requirement from the planning phase must be addressed by at least one story
|
|
37
|
+
- If gap analysis is provided above, it lists specific uncovered requirements — generate stories to cover them
|
|
38
|
+
- Cross-reference: scan each FR and verify you have a story that addresses it
|
|
39
|
+
|
|
40
|
+
4. **Respect architecture decisions:**
|
|
41
|
+
- Stories should reference the chosen tech stack and patterns
|
|
42
|
+
- If architecture specifies a project structure, Epic 1 Story 1 should be project scaffolding
|
|
43
|
+
- Database tables, API endpoints, and infrastructure are created in the epic where they are FIRST NEEDED
|
|
44
|
+
|
|
45
|
+
5. **Size stories appropriately:**
|
|
46
|
+
- Each story should be completable by one developer in 1-3 days
|
|
47
|
+
- If a story feels too large, split it into multiple stories within the same epic
|
|
48
|
+
- If an epic has more than 8 stories, consider splitting the epic
|
|
49
|
+
|
|
50
|
+
6. **Amendment awareness**: If amendment context from a parent run is provided below, generate stories for the NEW scope introduced by the amendment. Do not regenerate stories for unchanged requirements.
|
|
51
|
+
|
|
52
|
+
## Output Contract
|
|
53
|
+
|
|
54
|
+
Emit ONLY this YAML block as your final output — no other text.
|
|
55
|
+
|
|
56
|
+
**CRITICAL YAML RULES**: All string values MUST be quoted with double quotes. This prevents YAML parse errors from colons or special characters. Keep values on single lines. Acceptance criteria items must be plain strings.
|
|
57
|
+
|
|
58
|
+
```yaml
|
|
59
|
+
result: success
|
|
60
|
+
epics:
|
|
61
|
+
- title: "User Onboarding and Habit Management"
|
|
62
|
+
description: "Core habit tracking functionality that delivers immediate user value"
|
|
63
|
+
stories:
|
|
64
|
+
- key: "1-1"
|
|
65
|
+
title: "Project scaffolding and CLI setup"
|
|
66
|
+
description: "Initialize the project with TypeScript, Commander, and SQLite"
|
|
67
|
+
acceptance_criteria:
|
|
68
|
+
- "CLI binary runs and shows help text"
|
|
69
|
+
- "SQLite database is created on first run"
|
|
70
|
+
priority: must
|
|
71
|
+
- key: "1-2"
|
|
72
|
+
title: "Register and list habits"
|
|
73
|
+
description: "Users can create habits and view all tracked habits"
|
|
74
|
+
acceptance_criteria:
|
|
75
|
+
- "habit add command creates a new habit"
|
|
76
|
+
- "habit list command shows all habits with status"
|
|
77
|
+
priority: must
|
|
78
|
+
```
|
|
79
|
+
|
|
80
|
+
If you cannot produce valid story output:
|
|
81
|
+
|
|
82
|
+
```yaml
|
|
83
|
+
result: failed
|
|
84
|
+
```
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
# Story {epic_num}.{story_num}: {Title}
|
|
2
|
+
|
|
3
|
+
Status: draft
|
|
4
|
+
|
|
5
|
+
## Story
|
|
6
|
+
|
|
7
|
+
As a {role},
|
|
8
|
+
I want {capability},
|
|
9
|
+
so that {benefit}.
|
|
10
|
+
|
|
11
|
+
## Acceptance Criteria
|
|
12
|
+
|
|
13
|
+
### AC1: {Title}
|
|
14
|
+
**Given** {context}
|
|
15
|
+
**When** {action}
|
|
16
|
+
**Then** {outcome}
|
|
17
|
+
|
|
18
|
+
## Tasks / Subtasks
|
|
19
|
+
|
|
20
|
+
- [ ] Task 1: {description} (AC: #1)
|
|
21
|
+
- [ ] {subtask}
|
|
22
|
+
|
|
23
|
+
## Dev Notes
|
|
24
|
+
|
|
25
|
+
### Architecture Constraints
|
|
26
|
+
- {constraint}
|
|
27
|
+
|
|
28
|
+
### Testing Requirements
|
|
29
|
+
- {requirement}
|
|
30
|
+
|
|
31
|
+
## Dev Agent Record
|
|
32
|
+
|
|
33
|
+
### Agent Model Used
|
|
34
|
+
### Completion Notes List
|
|
35
|
+
### File List
|
|
36
|
+
|
|
37
|
+
## Change Log
|
package/README.npm.md
DELETED
|
@@ -1,206 +0,0 @@
|
|
|
1
|
-
<p align="center">
|
|
2
|
-
<img src="https://github.com/user-attachments/assets/622dd577-2814-4657-bb28-112ed272486d" alt="Substrate — Autonomous Software Development Pipeline" />
|
|
3
|
-
</p>
|
|
4
|
-
|
|
5
|
-
# Substrate
|
|
6
|
-
|
|
7
|
-
Autonomous software development pipeline powered by multi-agent orchestration. Substrate takes a project idea from concept through analysis, planning, architecture, implementation, and code review — coordinating CLI-based AI agents (Claude Code, Codex, Gemini CLI) to do the work.
|
|
8
|
-
|
|
9
|
-
Substrate follows a modular monolith pattern running as a single Node.js process. The orchestrator never calls LLMs directly — all intelligent work is delegated to CLI agents running as child processes in isolated git worktrees. The autonomous pipeline compiles BMAD methodology workflows into token-efficient agent dispatches.
|
|
10
|
-
|
|
11
|
-
## Prerequisites
|
|
12
|
-
|
|
13
|
-
- **Node.js** 22.0.0 or later
|
|
14
|
-
- **git** 2.20 or later
|
|
15
|
-
- At least one supported AI CLI agent installed:
|
|
16
|
-
- [Claude Code](https://docs.anthropic.com/en/docs/claude-code) (`claude`)
|
|
17
|
-
- [Codex CLI](https://github.com/openai/codex) (`codex`)
|
|
18
|
-
- Gemini CLI (`gemini`)
|
|
19
|
-
|
|
20
|
-
## Installation
|
|
21
|
-
|
|
22
|
-
Install as a project dependency:
|
|
23
|
-
|
|
24
|
-
```bash
|
|
25
|
-
npm install substrate-ai
|
|
26
|
-
```
|
|
27
|
-
|
|
28
|
-
Or install globally:
|
|
29
|
-
|
|
30
|
-
```bash
|
|
31
|
-
npm install -g substrate-ai
|
|
32
|
-
```
|
|
33
|
-
|
|
34
|
-
Verify the installation:
|
|
35
|
-
|
|
36
|
-
```bash
|
|
37
|
-
npx substrate --version # project install
|
|
38
|
-
substrate --version # global install
|
|
39
|
-
```
|
|
40
|
-
|
|
41
|
-
> Examples below use `[npx] substrate` — include `npx` for project installs, omit for global.
|
|
42
|
-
|
|
43
|
-
## Quick Start
|
|
44
|
-
|
|
45
|
-
### Autonomous Pipeline (recommended)
|
|
46
|
-
|
|
47
|
-
Got an idea? Substrate can take it from concept to working code.
|
|
48
|
-
|
|
49
|
-
1. **Brainstorm** — explore your idea with a multi-persona AI session:
|
|
50
|
-
|
|
51
|
-
```bash
|
|
52
|
-
[npx] substrate brainstorm
|
|
53
|
-
```
|
|
54
|
-
|
|
55
|
-
2. **Initialize the pipeline** — set up the methodology pack and decision store:
|
|
56
|
-
|
|
57
|
-
```bash
|
|
58
|
-
[npx] substrate auto init
|
|
59
|
-
```
|
|
60
|
-
|
|
61
|
-
3. **Run the full pipeline** — analysis, planning, solutioning, and implementation:
|
|
62
|
-
|
|
63
|
-
```bash
|
|
64
|
-
[npx] substrate auto run
|
|
65
|
-
```
|
|
66
|
-
|
|
67
|
-
Substrate walks through the entire software development lifecycle autonomously:
|
|
68
|
-
- **Analysis** — generates a product brief from your brainstorm
|
|
69
|
-
- **Planning** — creates a PRD with requirements
|
|
70
|
-
- **Solutioning** — produces architecture, epics, and stories
|
|
71
|
-
- **Implementation** — dispatches agents to build, test, and code-review each story
|
|
72
|
-
|
|
73
|
-
You can start from any phase or resume an interrupted run:
|
|
74
|
-
|
|
75
|
-
```bash
|
|
76
|
-
[npx] substrate auto run --from solutioning # Skip to a specific phase
|
|
77
|
-
[npx] substrate auto resume # Pick up where you left off
|
|
78
|
-
[npx] substrate auto status # Check pipeline progress
|
|
79
|
-
```
|
|
80
|
-
|
|
81
|
-
### Pick Up an Existing BMAD Project
|
|
82
|
-
|
|
83
|
-
Already have a project with BMAD artifacts (vanilla BMAD or the Beads-based ai-toolkit)? Substrate can pick up the remaining implementation work. It reads one directory — `_bmad-output/` — and doesn't care which tool created it.
|
|
84
|
-
|
|
85
|
-
**What Substrate needs from your project:**
|
|
86
|
-
|
|
87
|
-
| File | Required? | Purpose |
|
|
88
|
-
|------|-----------|---------|
|
|
89
|
-
| `_bmad-output/planning-artifacts/epics.md` | Yes | Parsed into per-epic context shards |
|
|
90
|
-
| `_bmad-output/planning-artifacts/architecture.md` | Yes | Tech stack and constraints for agents |
|
|
91
|
-
| `_bmad-output/implementation-artifacts/*.md` | Optional | Existing story files — Substrate skips create-story for any it finds |
|
|
92
|
-
| `package.json` | Optional | Test framework detection |
|
|
93
|
-
|
|
94
|
-
**Three commands:**
|
|
95
|
-
|
|
96
|
-
```bash
|
|
97
|
-
npm install substrate-ai
|
|
98
|
-
[npx] substrate auto init # Seeds context from _bmad-output/
|
|
99
|
-
[npx] substrate auto run --stories 5-3,5-4,6-1 # Only the unfinished story keys
|
|
100
|
-
```
|
|
101
|
-
|
|
102
|
-
For each story, Substrate runs: **create-story** (skipped if story file exists) → **dev-story** (implement) → **code-review** (adversarial review). Non-conflicting stories run in parallel automatically.
|
|
103
|
-
|
|
104
|
-
Substrate does not read `sprint-status.yaml` or `.beads/` — you decide what's left by choosing which story keys to pass.
|
|
105
|
-
|
|
106
|
-
## Supported Agents
|
|
107
|
-
|
|
108
|
-
| Agent ID | CLI Tool | Billing |
|
|
109
|
-
|----------|----------|---------|
|
|
110
|
-
| `claude-code` | Claude Code | Subscription (Max) or API key |
|
|
111
|
-
| `codex` | Codex CLI | Subscription (ChatGPT Plus/Pro) or API key |
|
|
112
|
-
| `gemini` | Gemini CLI | Subscription or API key |
|
|
113
|
-
|
|
114
|
-
## Commands
|
|
115
|
-
|
|
116
|
-
### Pipeline
|
|
117
|
-
|
|
118
|
-
| Command | Description |
|
|
119
|
-
|---------|-------------|
|
|
120
|
-
| `substrate brainstorm` | Interactive multi-persona ideation session |
|
|
121
|
-
| `substrate auto init` | Initialize methodology pack for autonomous pipeline |
|
|
122
|
-
| `substrate auto run` | Run the full pipeline (analysis → implement) |
|
|
123
|
-
| `substrate auto run --from <phase>` | Start from a specific phase |
|
|
124
|
-
| `substrate auto resume` | Resume an interrupted pipeline run |
|
|
125
|
-
| `substrate auto status` | Show pipeline run status |
|
|
126
|
-
|
|
127
|
-
### Monitoring
|
|
128
|
-
|
|
129
|
-
| Command | Description |
|
|
130
|
-
|---------|-------------|
|
|
131
|
-
| `substrate monitor status` | View task metrics and agent performance |
|
|
132
|
-
| `substrate monitor report` | Generate a detailed performance report |
|
|
133
|
-
| `substrate cost-report` | View cost and token usage summary |
|
|
134
|
-
|
|
135
|
-
### Setup
|
|
136
|
-
|
|
137
|
-
| Command | Description |
|
|
138
|
-
|---------|-------------|
|
|
139
|
-
| `substrate init` | Initialize project configuration |
|
|
140
|
-
| `substrate --help` | Show all available commands |
|
|
141
|
-
|
|
142
|
-
## Configuration
|
|
143
|
-
|
|
144
|
-
Substrate reads configuration from `.substrate/config.yaml` in your project root. Run `[npx] substrate init` to generate a default config.
|
|
145
|
-
|
|
146
|
-
## Development
|
|
147
|
-
|
|
148
|
-
```bash
|
|
149
|
-
# Clone and install
|
|
150
|
-
git clone https://github.com/johnplanow/substrate.git
|
|
151
|
-
cd substrate
|
|
152
|
-
npm install
|
|
153
|
-
|
|
154
|
-
# Build
|
|
155
|
-
npm run build
|
|
156
|
-
|
|
157
|
-
# Run tests
|
|
158
|
-
npm test
|
|
159
|
-
|
|
160
|
-
# Development mode (watch)
|
|
161
|
-
npm run dev
|
|
162
|
-
|
|
163
|
-
# Type check
|
|
164
|
-
npm run typecheck
|
|
165
|
-
|
|
166
|
-
# Lint
|
|
167
|
-
npm run lint
|
|
168
|
-
```
|
|
169
|
-
|
|
170
|
-
## Manual Task Graphs
|
|
171
|
-
|
|
172
|
-
For fine-grained control, you can define exactly what agents should do in a YAML task graph:
|
|
173
|
-
|
|
174
|
-
```yaml
|
|
175
|
-
version: "1"
|
|
176
|
-
session:
|
|
177
|
-
name: "my-tasks"
|
|
178
|
-
tasks:
|
|
179
|
-
write-tests:
|
|
180
|
-
name: "Write unit tests"
|
|
181
|
-
prompt: |
|
|
182
|
-
Look at the src/utils/ directory.
|
|
183
|
-
Write comprehensive unit tests for all exported functions.
|
|
184
|
-
type: testing
|
|
185
|
-
agent: claude-code
|
|
186
|
-
update-docs:
|
|
187
|
-
name: "Update README"
|
|
188
|
-
prompt: |
|
|
189
|
-
Read the README.md and verify it accurately describes
|
|
190
|
-
the project. Fix any inaccuracies.
|
|
191
|
-
type: docs
|
|
192
|
-
agent: codex
|
|
193
|
-
depends_on:
|
|
194
|
-
- write-tests
|
|
195
|
-
```
|
|
196
|
-
|
|
197
|
-
```bash
|
|
198
|
-
[npx] substrate start --graph tasks.yaml # Execute the graph
|
|
199
|
-
[npx] substrate plan --graph tasks.yaml # Preview without running
|
|
200
|
-
```
|
|
201
|
-
|
|
202
|
-
Tasks without dependencies run in parallel. Each agent gets its own isolated git worktree.
|
|
203
|
-
|
|
204
|
-
## License
|
|
205
|
-
|
|
206
|
-
MIT
|