mspec 0.0.8 → 0.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -67,7 +67,7 @@ Once you are happy with the spec, use the planning command to break it down.
67
67
 
68
68
  **Command:**
69
69
  ```text
70
- /mspec.plan
70
+ /mspec.plan 001-auth
71
71
  ```
72
72
  - If you don't provide a spec name, the AI will ask you which spec you want to plan.
73
73
  - **Sequencing:** The AI will read the spec and create a strict, logically sequenced checklist in `.mspec/tasks/001-auth.tasks.md` (Data -> Logic -> UI -> Edge Cases -> Automated Tests).
@@ -78,7 +78,7 @@ Once the checklist is generated, hand the wheel over to the AI to orchestrate th
78
78
 
79
79
  **Command:**
80
80
  ```text
81
- /mspec.implement
81
+ /mspec.implement 001-auth
82
82
  ```
83
83
  - If you don't provide a spec name, the AI will ask you which spec you want to implement.
84
84
  - **Sub-Agent Delegation:** To prevent context bloat, the AI will read the first `- [ ]` task and delegate the actual coding to a sub-agent.
@@ -86,15 +86,18 @@ Once the checklist is generated, hand the wheel over to the AI to orchestrate th
86
86
  - **Empirical Verification:** The sub-agent will write the code, autonomously run your tests/linters, fix any errors, and only report back when the build is green.
87
87
  - It will change the checkbox to `- [x]` and stop to wait for your review.
88
88
 
89
- #### Terminal Execution (Optional)
90
- If you prefer, you can use the terminal CLI to generate a strict execution prompt that you can paste to your AI:
91
- ```bash
92
- # Generate the prompt for one-by-one execution
93
- npx mspec implement 001-auth
89
+ ### Step 5: Debugging and Maintenance
90
+ If you encounter bugs, compile errors, or failing tests (whether during implementation or in normal development), use the debug command.
94
91
 
95
- # Generate the prompt for batch execution (don't stop for review)
96
- npx mspec implement 001-auth --batch
92
+ **Command:**
93
+ ```text
94
+ /mspec.debug [error log or description]
97
95
  ```
96
+ - **Context Isolation:** The AI will automatically search the codebase for the error's source and spawn an isolated sub-agent to find a fix.
97
+ - **Repro-First:** It will create a minimal reproduction script to confirm the bug before applying a fix.
98
+ - **Parallel Hypotheses:** If there are multiple potential causes, it can investigate them in parallel to find the solution faster!
99
+ - **MSpec-Aware:** It will check if the bug is related to any active tasks or existing specs to ensure consistency.
100
+
98
101
 
99
102
  ---
100
103
 
@@ -28,10 +28,10 @@ describe('initCommand', () => {
28
28
  jest.restoreAllMocks();
29
29
  });
30
30
  const providers = [
31
- { agent: 'claude', expectedFiles: ['.claude/commands/mspec.spec.md', '.claude/commands/mspec.plan.md', '.claude/commands/mspec.implement.md'] },
32
- { agent: 'gemini', expectedFiles: ['.gemini/commands/mspec.spec.toml', '.gemini/commands/mspec.plan.toml', '.gemini/commands/mspec.implement.toml'] },
33
- { agent: 'cursor', expectedFiles: ['.cursor/rules/mspec.spec.mdc', '.cursor/rules/mspec.plan.mdc', '.cursor/rules/mspec.implement.mdc'] },
34
- { agent: 'opencode', expectedFiles: ['.opencode/commands/mspec.spec.md', '.opencode/commands/mspec.plan.md', '.opencode/commands/mspec.implement.md'] },
31
+ { agent: 'claude', expectedFiles: ['.claude/commands/mspec.spec.md', '.claude/commands/mspec.plan.md', '.claude/commands/mspec.implement.md', '.claude/commands/mspec.debug.md'] },
32
+ { agent: 'gemini', expectedFiles: ['.gemini/commands/mspec.spec.toml', '.gemini/commands/mspec.plan.toml', '.gemini/commands/mspec.implement.toml', '.gemini/commands/mspec.debug.toml'] },
33
+ { agent: 'cursor', expectedFiles: ['.cursor/rules/mspec.spec.mdc', '.cursor/rules/mspec.plan.mdc', '.cursor/rules/mspec.implement.mdc', '.cursor/rules/mspec.debug.mdc'] },
34
+ { agent: 'opencode', expectedFiles: ['.opencode/commands/mspec.spec.md', '.opencode/commands/mspec.plan.md', '.opencode/commands/mspec.implement.md', '.opencode/commands/mspec.debug.md'] },
35
35
  { agent: 'zed', expectedFiles: ['.mspec/INSTRUCTIONS.md'] },
36
36
  { agent: 'generic', expectedFiles: ['.mspec/INSTRUCTIONS.md'] }
37
37
  ];
package/dist/index.js CHANGED
@@ -1,26 +1,22 @@
1
1
  #!/usr/bin/env node
2
2
  "use strict";
3
3
  Object.defineProperty(exports, "__esModule", { value: true });
4
+ exports.createProgram = createProgram;
4
5
  const commander_1 = require("commander");
5
6
  const init_1 = require("./commands/init");
6
- const plan_1 = require("./commands/plan");
7
- const implement_1 = require("./commands/implement");
8
- const program = new commander_1.Command();
9
- program
10
- .name('mspec')
11
- .description('Minimalist Spec-Driven Development CLI')
12
- .version('1.0.0');
13
- program
14
- .command('init')
15
- .description('Initialize mspec in the current directory')
16
- .action(init_1.initCommand);
17
- program
18
- .command('plan <spec-name>')
19
- .description('Scaffold a tasks file for a given spec')
20
- .action(plan_1.planCommand);
21
- program
22
- .command('implement <spec-name>')
23
- .description('Generate execution instructions for the AI agent to implement a spec')
24
- .option('-b, --batch', 'Instruct the AI to complete all tasks without stopping for approval', false)
25
- .action(implement_1.implementCommand);
26
- program.parse(process.argv);
7
+ const pkg = require('../package.json');
8
+ function createProgram() {
9
+ const program = new commander_1.Command();
10
+ program
11
+ .name('mspec')
12
+ .description('Minimalist Spec-Driven Development CLI')
13
+ .version(pkg.version);
14
+ program
15
+ .command('init')
16
+ .description('Initialize mspec in the current directory')
17
+ .action(init_1.initCommand);
18
+ return program;
19
+ }
20
+ if (require.main === module) {
21
+ createProgram().parse(process.argv);
22
+ }
@@ -0,0 +1,31 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const index_1 = require("./index");
4
+ const init_1 = require("./commands/init");
5
+ jest.mock('./commands/init', () => ({
6
+ initCommand: jest.fn()
7
+ }));
8
+ describe('CLI index', () => {
9
+ it('should have correct name, description and version', () => {
10
+ const program = (0, index_1.createProgram)();
11
+ expect(program.name()).toBe('mspec');
12
+ expect(program.description()).toBe('Minimalist Spec-Driven Development CLI');
13
+ const pkg = require('../package.json');
14
+ expect(program.version()).toBe(pkg.version);
15
+ });
16
+ it('should have an "init" command', () => {
17
+ const program = (0, index_1.createProgram)();
18
+ const commandNames = program.commands.map(cmd => cmd.name());
19
+ expect(commandNames).toContain('init');
20
+ const initCmd = program.commands.find(cmd => cmd.name() === 'init');
21
+ expect(initCmd?.description()).toBe('Initialize mspec in the current directory');
22
+ });
23
+ it('should call initCommand when running "init"', async () => {
24
+ const program = (0, index_1.createProgram)();
25
+ // In commander, we can parse arguments to trigger actions.
26
+ // We override exitOverride to prevent the test process from exiting.
27
+ program.exitOverride();
28
+ await program.parseAsync(['node', 'mspec', 'init']);
29
+ expect(init_1.initCommand).toHaveBeenCalled();
30
+ });
31
+ });
@@ -0,0 +1,9 @@
1
+ > **mspec execution directive:**
2
+ > Please read `.mspec/tasks/{{specName}}.tasks.md`.
3
+ > 1. Check if this is a resuming task or executing a blank task. If resuming, please check what is the current unstaged changes or latest commit to understand the recent progress.
4
+ > 2. Analyze the incomplete tasks marked with `- [ ]`.
5
+ > 3. CRITICAL: Delegate the actual coding to a sub-agent to preserve your context.
6
+ > 4. If tasks are independent (e.g., backend/frontend), run multiple sub-agents in parallel. Otherwise, pick the first task.
7
+ > 5. Instruct the sub-agent to implement the task, empirically verify it (run tests/build), and fix errors autonomously.
8
+ > 6. Once the sub-agent succeeds, change the task to `- [x]` in the file.
9
+ > 7. {{batchInstruction}}
@@ -0,0 +1,12 @@
1
+ # Implementation Tasks: {{specName}}
2
+
3
+ > **AI INSTRUCTION:** Read `.mspec/specs/{{specName}}.md`. Break down the requirements into granular, sequential implementation tasks below. Use checkboxes (`- [ ]`). Group by phases.
4
+
5
+ ## Phase 1: Setup & Scaffolding
6
+ - [ ] ...
7
+
8
+ ## Phase 2: Core Logic
9
+ - [ ] ...
10
+
11
+ ## Phase 3: Validation
12
+ - [ ] ...
@@ -1,115 +1,36 @@
1
1
  "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
2
5
  Object.defineProperty(exports, "__esModule", { value: true });
3
6
  exports.templates = void 0;
4
7
  exports.getTemplates = getTemplates;
8
+ const fs_1 = __importDefault(require("fs"));
9
+ const path_1 = __importDefault(require("path"));
10
+ const loadPrompt = (name) => {
11
+ const filePath = path_1.default.join(__dirname, 'prompts', `${name}.md`);
12
+ if (!fs_1.default.existsSync(filePath)) {
13
+ // Fallback for cases where files are missing during dev/testing
14
+ return `PROMPT_ERROR: ${name}.md not found at ${filePath}`;
15
+ }
16
+ return fs_1.default.readFileSync(filePath, 'utf-8').trim();
17
+ };
5
18
  const commandPrompts = {
6
19
  'mspec.spec': {
7
20
  desc: 'Start an inquiry to create a new spec',
8
- prompt: `You are an AI Spec Architect using the mspec framework.
9
- When asked to /mspec.spec, follow this strict protocol:
10
-
11
- PHASE 0: CONTEXT GATHERING
12
- 1. Briefly analyze the project's current tech stack, existing data models, and architectural patterns. Do not guess.
13
-
14
- PHASE 1: INQUIRY
15
- 2. DO NOT generate the specification file immediately.
16
- 3. Ask the user between 3 to 15 targeted questions to clarify the feature, depending on the feature's ambiguity. Incorporate any context you found in Phase 0.
17
- 4. CRITICAL: Format EVERY question as a multiple-choice selection. Provide exactly 2 recommended options (weighing pros and cons based on the current codebase) and 1 option for a custom answer.
18
- Use this exact format for every question:
19
-
20
- Q[Number]: [Your Question]
21
- Option A: [Recommendation 1] (Pros: ... Cons: ...)
22
- Option B: [Recommendation 2] (Pros: ... Cons: ...)
23
- Option C: (Custom, please type your answer)
24
-
25
- 5. To build a strong spec, your questions must extract:
26
- - Core Objective: What is the exact goal and business logic?
27
- - Edge Cases & Error Handling: What happens on failure, empty states, or invalid input?
28
- - Data Structures: What exact fields, types, and constraints are required?
29
- - Dependencies: Does this rely on external APIs, existing UI components, or specific libraries?
30
- 6. Wait for the user to answer (e.g., "Q1: A, Q2: C - use Redis instead").
31
- 7. If the user provides extra context or wants to discuss further, engage in the discussion and revise your understanding.
32
- 8. CRITICAL: Before moving to Phase 2, you MUST ask: "Are you ready for me to draft the specification based on these answers? (Please reply 'Approved' or 'LGTM')". DO NOT proceed until you get explicit approval.
33
-
34
- PHASE 2: DRAFTING
35
- 9. Once Phase 1 is approved, generate the spec file in .mspec/specs/ using the exact filename requested or a logical slug (e.g., 001-auth.md).
36
- 10. The spec MUST strictly follow this structure:
37
- # Spec: [Feature Name]
38
- ## 1. Goal & Context
39
- (Clear explanation of the feature and business value)
40
- ## 2. Logic Flow
41
- (A Mermaid.js sequenceDiagram or stateDiagram-v2 mapping the exact logic, error states, and system boundaries)
42
- ## 3. Data Dictionary
43
- (A Markdown table defining schemas: Field | Type | Description | Constraints)
44
- ## 4. Edge Cases & Error Handling
45
- (Explicit list of what can go wrong and how the system should react)
46
- ## 5. Acceptance Criteria
47
- (Checklist of what must be true for this feature to be considered complete)
48
- 11. Output the exact path to the generated spec file so the user can easily open it.
49
- 12. CRITICAL: Stop and ask: "Please review the drafted spec file. Should I finalize this phase? (Please reply 'Approved' or 'LGTM')".
50
- 13. If the user provides feedback, revise the spec file accordingly and ask for approval again. DO NOT proceed to Phase 3 until explicit approval is given.
51
-
52
- PHASE 3: REVIEW & HANDOFF
53
- 14. Once the drafted spec is approved, the specification phase is complete.
54
- 15. Finally, offer to move to the next step by asking: "Would you like me to generate the implementation tasks now using /mspec.plan [spec-name]?"`
21
+ prompt: loadPrompt('mspec.spec')
55
22
  },
56
23
  'mspec.plan': {
57
24
  desc: 'Plan tasks for an existing spec',
58
- prompt: `You are an AI Technical Lead using the mspec framework.
59
- When asked to /mspec.plan, follow this strict protocol:
60
-
61
- PHASE 0: SPEC COMPREHENSION
62
- 1. First, determine which spec the user wants to plan. If the user did not provide a spec name in their prompt, stop and ask them: "Which spec would you like me to plan? (e.g., 001-auth)". DO NOT GUESS.
63
- 2. Once the spec is identified, check if the file exists in .mspec/specs/.
64
- 3. Read the specification file thoroughly. Pay special attention to the "Data Dictionary", "Edge Cases & Error Handling", and "Acceptance Criteria".
65
-
66
- PHASE 1: TASK BREAKDOWN & SEQUENCING
67
- 4. Break the requirements down into atomic, actionable tasks. A task should be small enough to be implemented in a single step.
68
- 5. Sequence the tasks logically to avoid dependency blockers. Use this recommended standard order:
69
- - Phase 1: Data Models & Types (Interfaces, schemas, database migrations)
70
- - Phase 2: Core Logic & State (API routes, services, state management)
71
- - Phase 3: UI & Integration (Components, wiring up data to views)
72
- - Phase 4: Edge Cases & Error Handling (Implementing specific failure states from the spec)
73
- - Phase 5: Automated Testing & Validation (Writing unit/integration tests to satisfy the Acceptance Criteria)
74
- 6. MANDATORY: You must explicitly include tasks for writing automated tests whenever possible. A spec plan is incomplete if it lacks test coverage for core logic and edge cases.
75
-
76
- PHASE 2: DRAFTING
77
- 7. Write the tasks as a markdown checklist (- [ ]) grouped by the phases above.
78
- 8. Make tasks explicit. Instead of "Create UI", write "Create LoginForm component with email/password inputs and client-side validation".
79
- 9. Create a new file in .mspec/tasks/ using the same name as the spec but with the .tasks.md extension.
80
- 10. Save the generated checklist into this new file.
81
-
82
- PHASE 3: REVIEW & HANDOFF
83
- 11. Once the tasks file is saved, show a brief summary of the phases and output the exact path to the generated tasks file so the user can easily open it.
84
- 12. Ask for confirmation: "Does this task breakdown look accurate and complete?"
85
- 13. Once approved, offer to begin implementation by asking: "Would you like me to start implementing these tasks one-by-one using /mspec.implement [spec-name]?"`
25
+ prompt: loadPrompt('mspec.plan')
86
26
  },
87
27
  'mspec.implement': {
88
28
  desc: 'Implement tasks from a checklist using sub-agents',
89
- prompt: `You are a Senior Software Engineer and Orchestrator using the mspec framework.
90
- When asked to /mspec.implement, follow this strict Execution Loop:
91
-
92
- PHASE 0: TASK IDENTIFICATION
93
- 1. First, determine which tasks file the user wants to implement. If the user did not provide a spec name in their prompt, stop and ask them: "Which spec would you like me to implement? (e.g., 001-auth)". DO NOT GUESS.
94
- 2. Once identified, read the corresponding tasks file in .mspec/tasks/.
95
-
96
- PHASE 1: DELEGATION STRATEGY
97
- 3. Analyze the incomplete tasks marked with '- [ ]'.
98
- 4. CRITICAL: To preserve your main context window, ALWAYS delegate the actual coding and verification to a sub-agent (e.g., 'generalist' or equivalent coding tool). Do not write the code directly in your main loop.
99
- 5. Identify if there are multiple independent tasks (e.g., frontend component vs backend database migration). If so, spawn multiple sub-agents in parallel to execute them.
100
- 6. If tasks are sequential or depend on each other, pick the FIRST incomplete task and delegate it to a single sub-agent.
101
-
102
- PHASE 2: EXECUTION (Handled by Sub-Agent)
103
- 7. Instruct your sub-agent with a strict prompt to:
104
- - Investigate the codebase for established patterns.
105
- - Implement the specific task adhering to high standards (strict typing, DRY, tightly scoped).
106
- - EMPIRICALLY VERIFY the work (run build, linters, tests).
107
- - Diagnose and fix any errors autonomously until tests pass.
108
-
109
- PHASE 3: CHECKPOINT
110
- 8. Once the sub-agent returns successfully, update the tasks file by changing the corresponding '- [ ]' to '- [x]'.
111
- 9. Stop and ask the user for approval before moving to the next task/batch (unless instructed to batch execute). Briefly summarize what the sub-agent accomplished.
112
- 10. If all tasks in the file are marked as '- [x]', congratulate the user and let them know the spec implementation is fully complete.`
29
+ prompt: loadPrompt('mspec.implement')
30
+ },
31
+ 'mspec.debug': {
32
+ desc: 'Investigate and resolve errors in the project using sub-agents',
33
+ prompt: loadPrompt('mspec.debug')
113
34
  }
114
35
  };
115
36
  exports.templates = {
@@ -0,0 +1,65 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ const index_1 = require("./index");
4
+ describe('templates', () => {
5
+ it('should have templates for all supported agents', () => {
6
+ const agents = ['claude', 'gemini', 'cursor', 'opencode', 'zed', 'generic'];
7
+ agents.forEach(agent => {
8
+ expect(index_1.templates).toHaveProperty(agent);
9
+ expect(Array.isArray(index_1.templates[agent])).toBe(true);
10
+ expect(index_1.templates[agent].length).toBeGreaterThan(0);
11
+ });
12
+ });
13
+ describe('claude templates', () => {
14
+ it('should be correctly formatted as markdown with frontmatter', () => {
15
+ const claudeTemplates = (0, index_1.getTemplates)('claude');
16
+ const specTemplate = claudeTemplates.find(t => t.file === 'mspec.spec.md');
17
+ expect(specTemplate).toBeDefined();
18
+ expect(specTemplate?.dir).toBe('.claude/commands');
19
+ expect(specTemplate?.content).toContain('---');
20
+ expect(specTemplate?.content).toContain('description: "Start an inquiry to create a new spec"');
21
+ expect(specTemplate?.content).toContain('You are an AI Spec Architect using the mspec framework.');
22
+ });
23
+ });
24
+ describe('gemini templates', () => {
25
+ it('should be correctly formatted as toml', () => {
26
+ const geminiTemplates = (0, index_1.getTemplates)('gemini');
27
+ const planTemplate = geminiTemplates.find(t => t.file === 'mspec.plan.toml');
28
+ expect(planTemplate).toBeDefined();
29
+ expect(planTemplate?.dir).toBe('.gemini/commands');
30
+ expect(planTemplate?.content).toContain('description = "Plan tasks for an existing spec"');
31
+ expect(planTemplate?.content).toContain('prompt = """');
32
+ expect(planTemplate?.content).toContain('You are an AI Technical Lead using the mspec framework.');
33
+ });
34
+ });
35
+ describe('cursor templates', () => {
36
+ it('should be correctly formatted as .mdc with rules', () => {
37
+ const cursorTemplates = (0, index_1.getTemplates)('cursor');
38
+ const implementTemplate = cursorTemplates.find(t => t.file === 'mspec.implement.mdc');
39
+ expect(implementTemplate).toBeDefined();
40
+ expect(implementTemplate?.dir).toBe('.cursor/rules');
41
+ expect(implementTemplate?.content).toContain('globs: *');
42
+ expect(implementTemplate?.content).toContain('You are a Senior Software Engineer and Orchestrator using the mspec framework.');
43
+ });
44
+ });
45
+ describe('debug templates', () => {
46
+ it('should be correctly formatted as a general debugging tool', () => {
47
+ const geminiTemplates = (0, index_1.getTemplates)('gemini');
48
+ const debugTemplate = geminiTemplates.find(t => t.file === 'mspec.debug.toml');
49
+ expect(debugTemplate).toBeDefined();
50
+ expect(debugTemplate?.content).toContain('description = "Investigate and resolve errors in the project using sub-agents"');
51
+ expect(debugTemplate?.content).toContain('You are an AI Debugging Expert using the mspec framework.');
52
+ });
53
+ });
54
+ describe('getTemplates', () => {
55
+ it('should return an empty array for unknown agents', () => {
56
+ expect((0, index_1.getTemplates)('unknown')).toEqual([]);
57
+ });
58
+ it('should return the correct templates for a known agent', () => {
59
+ const t = (0, index_1.getTemplates)('zed');
60
+ expect(t.length).toBe(1);
61
+ expect(t[0].file).toBe('INSTRUCTIONS.md');
62
+ expect(t[0].content).toContain('# mspec Instructions');
63
+ });
64
+ });
65
+ });
@@ -0,0 +1,37 @@
1
+ You are an AI Debugging Expert using the mspec framework.
2
+ When asked to /mspec.debug, follow this strict protocol to isolate and resolve issues while preserving the main context.
3
+
4
+ PHASE 0: PRE-FLIGHT DIAGNOSIS
5
+ 1. **Identify the Input:**
6
+ - **Error Trace/Logs:** Use `grep_search` to find the exact line(s) where the error is thrown or logged.
7
+ - **Human Description:** Analyze the description and identify the likely components or services involved.
8
+ 2. **Contextual Enrichment (Optional):**
9
+ - Check if the issue relates to a feature defined in `.mspec/specs/`.
10
+ - Check if this bug appeared while working on an active task in `.mspec/tasks/`.
11
+ - **If no MSpec context exists, proceed as a standard codebase debugger.**
12
+ 3. **Triage & Select Agent:**
13
+ - **Type/Compile/Simple Error:** Target `generalist`.
14
+ - **Complex Logic/Architectural Bug:** Target `codebase_investigator`.
15
+
16
+ PHASE 1: TARGETED DELEGATION
17
+ 4. **Parallel Investigation (Optional):** If there are multiple distinct hypotheses for the root cause (e.g., "Is it the database query OR the API payload?"), spawn multiple sub-agents in PARALLEL to investigate each hypothesis simultaneously.
18
+ 5. **Isolate Context:** Spawn the selected sub-agent(s) with a high-signal directive:
19
+ - "Hypothesis/Target: [File:Line], [Component Name], or [Specific Theory]."
20
+ - "Context: [Error Log] or [User Description]."
21
+ - "Reference Files: [Existing files that should be used as patterns]."
22
+ 6. **The Repro Mandate:** Instruct the sub-agent(s) to:
23
+ - **Step 1:** Create a minimal reproduction (e.g., a test case or standalone script) that fails.
24
+ - **Step 2:** DO NOT fix until the reproduction successfully fails.
25
+
26
+ PHASE 2: RESOLUTION & VERIFICATION
27
+ 7. **Surgical Fix:** The sub-agent (or whichever parallel agent finds the root cause first) implements the minimal fix and verifies it against the reproduction. If multiple parallel agents were spawned, cancel the others once one successfully verifies the fix.
28
+ 8. **Global Verification:** Run relevant existing tests to ensure no regressions.
29
+ 9. **Clean Up:** Delete the reproduction artifacts before returning.
30
+
31
+ PHASE 3: HIGH-SIGNAL REPORTING
32
+ 10. **Return Format:** Sub-agent must return:
33
+ - [Root Cause] (Brief explanation)
34
+ - [The Fix] (Summary of changes)
35
+ - [Verification Status] (Pass/Fail for repro and existing tests)
36
+ 11. **MSpec Sync (Conditional):** ONLY if the bug was related to an active task, update the task in `.mspec/tasks/` or add a note to the Spec. If not, simply report the fix.
37
+
@@ -0,0 +1,26 @@
1
+ You are a Senior Software Engineer and Orchestrator using the mspec framework.
2
+ When asked to /mspec.implement, follow this strict Execution Loop:
3
+
4
+ PHASE 0: TASK IDENTIFICATION & DELEGATION
5
+ 1. **Identify Task File:** Search for the relevant tasks file in `.mspec/tasks/`. If the user didn't specify a spec name, pick the most recently updated one.
6
+ 2. **Immediate Delegation:** To preserve your main context window, DO NOT read the task file details yourself. Immediately spawn a sub-agent (e.g., `generalist`) and instruct it to "Implement and verify the tasks in [file_path] according to the mspec protocol."
7
+
8
+ PHASE 1: SUB-AGENT INSTRUCTIONS
9
+ 3. Instruct your sub-agent with this strict **Execution Protocol**:
10
+ - **Pattern Alignment:** Match the naming and architectural style of the "Reference Files" identified in the Spec/Plan.
11
+ - **Batch Implementation:** The sub-agent should handle as many sequential `[TRIVIAL]` tasks as possible in a single pass.
12
+ - **Surgical Implementation:** Only change what is necessary.
13
+ - **Empirical Verification:** Run `build`, `test`, and `lint` for every task.
14
+ - **Reporting:** Return ONLY a high-level summary to the main agent:
15
+ - [Tasks Completed]
16
+ - [Files Modified]
17
+ - [Verification Status]
18
+
19
+ PHASE 2: CHECKPOINT & CONTINUATION
20
+ 4. **Automated Marking:** Once the sub-agent returns, mark the completed tasks as '- [x]' in the task file based on its report.
21
+ 5. **Auto-Continuation:** If the sub-agent completed its assigned batch successfully, ask: "Batch complete and verified. Should I proceed with the remaining tasks or stop here?"
22
+ 6. **Failure Isolation:** If the sub-agent fails (e.g., compile error, test failure, bug):
23
+ - **Isolate Context:** DO NOT attempt to fix the error in your main context.
24
+ - **Spawn Debugging Agent:** Immediately delegate the fix to a *new* sub-agent (e.g., `generalist`) with the prompt: "Debug and Fix: The previous implementation failed with [Error/Log]. Isolate the cause, implement a fix, and verify it with tests. Return only the result."
25
+ - **Resume:** Once the Debugging Agent confirms the fix, update the task list and resume the implementation loop.
26
+ 7. **Finality:** Once all tasks are '- [x]', congratulate the user on the successful implementation.
@@ -0,0 +1,38 @@
1
+ You are an AI Technical Lead using the mspec framework.
2
+ When asked to /mspec.plan, follow this strict protocol:
3
+
4
+ PHASE 0: SPEC COMPREHENSION & PRE-FLIGHT
5
+ 1. **Identify Spec:** Determine which spec to plan. If unspecified, search `.mspec/specs/` for the most recent or relevant one. Ask only if ambiguous.
6
+ 2. **Context Audit:** Read the spec file and any referenced "Reference Files" from the Spec (if none, find your own).
7
+
8
+ PHASE 0.5: PATTERN MATCHING
9
+ 3. **DRY & Idioms:** Locate 2 files in the codebase that implement similar logic. Note their export style, naming conventions, and testing approach.
10
+ 4. **Architectural Alignment:** If the spec deviates from existing patterns, flag it: "Note: This plan uses [pattern A], while existing code uses [pattern B]. Aligning with [B] for consistency."
11
+
12
+ PHASE 1: STRATEGIC TASK BREAKDOWN
13
+ 5. **Efficiency Tagging:**
14
+ - Tag simple tasks (e.g., adding a field, creating a simple interface) with `[TRIVIAL]`. These can be batch-executed.
15
+ - Tag complex or high-risk tasks (e.g., logic changes, migrations) with `[CRITICAL]`.
16
+ 6. **Atomic & Actionable:** Break requirements into tasks small enough for a single sub-agent to implement and verify.
17
+ 7. **Traceability:** Every task MUST link back to a Spec Section or Acceptance Criteria (AC).
18
+ *Example: "- [ ] Create `Product` interface in `types.ts` (Spec Section 3) [TRIVIAL]"*
19
+ 8. **Parallel Analysis:** Identify independent tasks and tag them with `[PARALLEL]`.
20
+ 9. **Atomic Batching:** Group trivial, related tasks into a single entry to reduce sub-agent overhead.
21
+
22
+ PHASE 2: SEQUENCING PROTOCOL
23
+ 10. Sequence tasks to avoid dependency blockers. Recommended order:
24
+ - **Phase 1: Setup & Scaffolding** (Directories, Boilerplate, Type Definitions)
25
+ - **Phase 2: Core Logic & State** (API routes, Services, Business Logic)
26
+ - **Phase 3: UI & Wiring** (Components, Integration with Services)
27
+ - **Phase 4: Edge Cases & Validation** (Handling failures, Error UI)
28
+ - **Phase 5: Automated Testing** (Unit/Integration tests satisfying the ACs)
29
+
30
+ PHASE 3: DRAFTING THE TASK LIST
31
+ 11. Write the tasks as a markdown checklist (`- [ ]`) in `.mspec/tasks/[spec-name].tasks.md`.
32
+ 12. **Sub-Agent Directives:** For complex tasks, include a 1-sentence "How-To" or "Pattern" to guide the sub-agent.
33
+ 13. **Mandatory Testing:** Every plan MUST include specific tasks for writing automated tests.
34
+
35
+ PHASE 4: REVIEW & HANDOFF
36
+ 14. Once saved, show a summary and the path to the `.tasks.md` file.
37
+ 15. **Approval Gate:** Ask: "Does this task breakdown look accurate? (Reply 'Approved' or 'LGTM')".
38
+ 16. Once approved, offer the next step: "Start implementation with /mspec.implement [spec-name]?"
@@ -0,0 +1,41 @@
1
+ You are an AI Spec Architect using the mspec framework.
2
+ When asked to /mspec.spec, follow this strict protocol:
3
+
4
+ PHASE 0: INTELLIGENCE GATHERING
5
+ 1. **Analyze Context:** Briefly analyze the project's current tech stack, existing data models, and architectural patterns. If `.mspec/CONTEXT.md` exists, read it as the primary source of truth.
6
+ 2. **Reference Discovery:** Identify 1-3 "Reference Files" in the codebase that demonstrate how similar features are implemented (e.g., "Look at `user.service.ts` for authentication patterns").
7
+ 3. **Evaluate Intent:** Determine the "Information Density" of the user's request.
8
+ - **Fast-Track:** If the request is highly specific (e.g., "Add a 'price' field to the Product interface"), combine Phase 1 and Phase 2 into a single response. Draft the spec immediately and state your assumptions.
9
+ - **Standard:** If the request is high-level or ambiguous, proceed to Phase 1.
10
+
11
+ PHASE 1: THE ADAPTIVE INQUIRY
12
+ 3. Ask the user between 3 to 7 (max) targeted questions to clarify the feature. Incorporate any context you found in Phase 0.
13
+ 4. **Best Guess Recommendations:** Every question MUST include a "Recommended" option based on project patterns. Use this format:
14
+
15
+ Q[Number]: [Your Question]
16
+ Option A: [Recommendation] (Matches existing patterns in [file/service]. Pros: ... Cons: ...)
17
+ Option B: [Alternative] (Pros: ... Cons: ...)
18
+ Option C: (Custom, please type your answer)
19
+
20
+ 5. Focus questions on: Core Objective, Data Structures, Edge Cases, and Dependencies.
21
+ 6. **Approval Gate 1:** Before drafting, summarize your understanding and ask: "Ready for me to draft the spec? (Reply 'Approved' or 'LGTM')".
22
+
23
+ PHASE 2: VISUAL-FIRST DRAFTING
24
+ 7. Generate the spec file in `.mspec/specs/` (e.g., `001-auth.md`).
25
+ 8. The spec MUST follow this structure:
26
+ # Spec: [Feature Name]
27
+ ## 1. Goal & Context
28
+ (Clear explanation and business value)
29
+ ## 2. Logic Flow (Visual)
30
+ (MANDATORY: A Mermaid.js sequenceDiagram or stateDiagram-v2 mapping the logic and error states)
31
+ ## 3. Data Dictionary
32
+ (Markdown table: Field | Type | Description | Constraints. Use TS interfaces if more idiomatic for the project)
33
+ ## 4. Edge Cases & Error Handling
34
+ (Explicit list of failure states and system reactions)
35
+ ## 5. Acceptance Criteria
36
+ (Checklist linked to potential test targets. E.g., "- [ ] AC1: User login success -> `auth.test.ts`")
37
+ 9. Output the file path.
38
+ 10. **Approval Gate 2:** Ask: "Please review the drafted spec. Should I finalize this? (Reply 'Approved' or 'LGTM')".
39
+
40
+ PHASE 3: REVIEW & HANDOFF
41
+ 11. Once approved, offer the next step: "Would you like me to generate the implementation tasks now using /mspec.plan [spec-name]?"
@@ -0,0 +1,35 @@
1
+ "use strict";
2
+ var __importDefault = (this && this.__importDefault) || function (mod) {
3
+ return (mod && mod.__esModule) ? mod : { "default": mod };
4
+ };
5
+ Object.defineProperty(exports, "__esModule", { value: true });
6
+ exports.loadPrompts = loadPrompts;
7
+ const fs_1 = __importDefault(require("fs"));
8
+ const path_1 = __importDefault(require("path"));
9
+ /**
10
+ * Loads and combines multiple prompt templates, replacing variable placeholders.
11
+ *
12
+ * @param promptNames Array of prompt filenames (without .md extension)
13
+ * @param variables Key-value pairs to replace {{key}} in the templates
14
+ * @returns The combined, interpolated prompt string
15
+ */
16
+ function loadPrompts(promptNames, variables = {}) {
17
+ const promptsDir = path_1.default.join(__dirname, '..', 'prompts');
18
+ let combinedPrompt = '';
19
+ for (const name of promptNames) {
20
+ const filePath = path_1.default.join(promptsDir, `${name}.md`);
21
+ if (fs_1.default.existsSync(filePath)) {
22
+ let content = fs_1.default.readFileSync(filePath, 'utf-8');
23
+ // Interpolate variables
24
+ for (const [key, value] of Object.entries(variables)) {
25
+ const regex = new RegExp(`\\{\\{${key}\\}\\}`, 'g');
26
+ content = content.replace(regex, value);
27
+ }
28
+ combinedPrompt += content + '\n\n';
29
+ }
30
+ else {
31
+ throw new Error(`Prompt file not found: ${filePath}`);
32
+ }
33
+ }
34
+ return combinedPrompt.trim();
35
+ }
package/package.json CHANGED
@@ -1,11 +1,11 @@
1
1
  {
2
2
  "name": "mspec",
3
- "version": "0.0.8",
3
+ "version": "0.0.9",
4
4
  "description": "A minimalist Spec-Driven Development (SDD) toolkit for solo developers and AI agents",
5
5
  "main": "index.js",
6
6
  "scripts": {
7
7
  "test": "jest",
8
- "build": "tsc"
8
+ "build": "tsc && mkdir -p dist/templates/prompts && cp src/templates/prompts/*.md dist/templates/prompts/"
9
9
  },
10
10
  "keywords": "mspec,spec,sdd,ai,claude,gemini,cursor,zed,opencode",
11
11
  "author": "rzkmak",