foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,525 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow prompts for foundry-mcp.
|
|
3
|
+
|
|
4
|
+
Provides MCP prompts for common SDD workflows like starting features,
|
|
5
|
+
debugging tests, and completing phases.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
from mcp.server.fastmcp import FastMCP
|
|
12
|
+
|
|
13
|
+
from foundry_mcp.config import ServerConfig
|
|
14
|
+
from foundry_mcp.core.spec import (
|
|
15
|
+
load_spec,
|
|
16
|
+
list_specs,
|
|
17
|
+
find_specs_directory,
|
|
18
|
+
)
|
|
19
|
+
from foundry_mcp.core.progress import get_progress_summary, list_phases
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# Schema version for prompt responses
|
|
25
|
+
SCHEMA_VERSION = "1.0.0"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def register_workflow_prompts(mcp: FastMCP, config: ServerConfig) -> None:
|
|
29
|
+
"""
|
|
30
|
+
Register workflow prompts with the FastMCP server.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
mcp: FastMCP server instance
|
|
34
|
+
config: Server configuration
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def _get_specs_dir() -> Optional[str]:
|
|
38
|
+
"""Get the specs directory path."""
|
|
39
|
+
specs_dir = config.specs_dir or find_specs_directory()
|
|
40
|
+
return str(specs_dir) if specs_dir else None
|
|
41
|
+
|
|
42
|
+
@mcp.prompt(
|
|
43
|
+
description='{"desc": "Start a new feature spec with phases and tasks", "args": {"feature_name": "Name of feature", "description": "Optional description", "template": "basic|feature|bugfix"}, "returns": "Formatted prompt"}'
|
|
44
|
+
)
|
|
45
|
+
def start_feature(
|
|
46
|
+
feature_name: str, description: Optional[str] = None, template: str = "feature"
|
|
47
|
+
) -> str:
|
|
48
|
+
"""
|
|
49
|
+
Start a new feature implementation.
|
|
50
|
+
|
|
51
|
+
Creates a structured prompt to guide the AI through setting up
|
|
52
|
+
a new feature spec with proper phases and tasks.
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
feature_name: Name of the feature to implement
|
|
56
|
+
description: Optional description of the feature
|
|
57
|
+
template: Template to use (basic, feature, bugfix)
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Formatted prompt for starting a new feature
|
|
61
|
+
"""
|
|
62
|
+
specs_dir = _get_specs_dir()
|
|
63
|
+
|
|
64
|
+
# Check for existing specs
|
|
65
|
+
existing_specs = []
|
|
66
|
+
if specs_dir:
|
|
67
|
+
from pathlib import Path
|
|
68
|
+
|
|
69
|
+
specs = list_specs(specs_dir=Path(specs_dir), status="active")
|
|
70
|
+
existing_specs = [s["spec_id"] for s in specs[:5]]
|
|
71
|
+
|
|
72
|
+
prompt_parts = [
|
|
73
|
+
f"# Start New Feature: {feature_name}",
|
|
74
|
+
"",
|
|
75
|
+
"## Overview",
|
|
76
|
+
f"Feature Name: {feature_name}",
|
|
77
|
+
]
|
|
78
|
+
|
|
79
|
+
if description:
|
|
80
|
+
prompt_parts.append(f"Description: {description}")
|
|
81
|
+
|
|
82
|
+
prompt_parts.extend(
|
|
83
|
+
[
|
|
84
|
+
f"Template: {template}",
|
|
85
|
+
"",
|
|
86
|
+
"## Instructions",
|
|
87
|
+
"",
|
|
88
|
+
"Please help me set up a new SDD specification for this feature.",
|
|
89
|
+
"",
|
|
90
|
+
"### Step 1: Create the Spec",
|
|
91
|
+
f"Use the `{template}` template to create a new spec file.",
|
|
92
|
+
"The spec should be placed in `specs/pending/` initially.",
|
|
93
|
+
"",
|
|
94
|
+
"### Step 2: Define Phases",
|
|
95
|
+
"Based on the feature requirements, define appropriate phases:",
|
|
96
|
+
]
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
if template == "feature":
|
|
100
|
+
prompt_parts.extend(
|
|
101
|
+
[
|
|
102
|
+
"1. **Design Phase**: Architecture decisions, API design, data models",
|
|
103
|
+
"2. **Implementation Phase**: Core functionality, tests, documentation",
|
|
104
|
+
"3. **Verification Phase**: Integration tests, manual QA, sign-off",
|
|
105
|
+
]
|
|
106
|
+
)
|
|
107
|
+
elif template == "bugfix":
|
|
108
|
+
prompt_parts.extend(
|
|
109
|
+
[
|
|
110
|
+
"1. **Investigation Phase**: Reproduce bug, identify root cause",
|
|
111
|
+
"2. **Fix & Verify Phase**: Implement fix, verify resolution",
|
|
112
|
+
]
|
|
113
|
+
)
|
|
114
|
+
else:
|
|
115
|
+
prompt_parts.extend(
|
|
116
|
+
[
|
|
117
|
+
"1. **Implementation Phase**: Core tasks for the feature",
|
|
118
|
+
]
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
prompt_parts.extend(
|
|
122
|
+
[
|
|
123
|
+
"",
|
|
124
|
+
"### Step 3: Break Down Tasks",
|
|
125
|
+
"For each phase, create specific, actionable tasks with:",
|
|
126
|
+
"- Clear acceptance criteria",
|
|
127
|
+
"- Estimated effort (in hours)",
|
|
128
|
+
"- Dependencies between tasks",
|
|
129
|
+
"",
|
|
130
|
+
"### Step 4: Activate When Ready",
|
|
131
|
+
"Once the spec is reviewed and approved, move it to `specs/active/`.",
|
|
132
|
+
"",
|
|
133
|
+
]
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
if existing_specs:
|
|
137
|
+
prompt_parts.extend(
|
|
138
|
+
[
|
|
139
|
+
"## Active Specs",
|
|
140
|
+
"Note: The following specs are currently active:",
|
|
141
|
+
*[f"- {spec}" for spec in existing_specs],
|
|
142
|
+
"",
|
|
143
|
+
]
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
prompt_parts.extend(
|
|
147
|
+
[
|
|
148
|
+
"## Available Tools",
|
|
149
|
+
"Use these MCP tools to manage the spec:",
|
|
150
|
+
'- `spec(action="validate")`: Validate spec structure',
|
|
151
|
+
'- `lifecycle(action="activate")`: Move spec to active',
|
|
152
|
+
'- `task(action="next")`: Find next task to work on',
|
|
153
|
+
"",
|
|
154
|
+
"Ready to begin? Please provide more details about the feature requirements.",
|
|
155
|
+
]
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
return "\n".join(prompt_parts)
|
|
159
|
+
|
|
160
|
+
@mcp.prompt(
|
|
161
|
+
description='{"desc": "Debug a failing test systematically", "args": {"test_name": "Failing test name (optional)", "error_message": "Error output (optional)", "spec_id": "Related spec ID (optional)"}, "returns": "Debugging workflow prompt"}'
|
|
162
|
+
)
|
|
163
|
+
def debug_test(
|
|
164
|
+
test_name: Optional[str] = None,
|
|
165
|
+
error_message: Optional[str] = None,
|
|
166
|
+
spec_id: Optional[str] = None,
|
|
167
|
+
) -> str:
|
|
168
|
+
"""
|
|
169
|
+
Debug a failing test.
|
|
170
|
+
|
|
171
|
+
Creates a structured prompt to guide the AI through debugging
|
|
172
|
+
a test failure systematically.
|
|
173
|
+
|
|
174
|
+
Args:
|
|
175
|
+
test_name: Name of the failing test (optional)
|
|
176
|
+
error_message: Error message from the test (optional)
|
|
177
|
+
spec_id: Related spec ID if working on a specific task
|
|
178
|
+
|
|
179
|
+
Returns:
|
|
180
|
+
Formatted prompt for debugging a test failure
|
|
181
|
+
"""
|
|
182
|
+
prompt_parts = [
|
|
183
|
+
"# Debug Test Failure",
|
|
184
|
+
"",
|
|
185
|
+
"## Problem Details",
|
|
186
|
+
]
|
|
187
|
+
|
|
188
|
+
if test_name:
|
|
189
|
+
prompt_parts.append(f"**Test Name:** `{test_name}`")
|
|
190
|
+
else:
|
|
191
|
+
prompt_parts.append("**Test Name:** Not specified")
|
|
192
|
+
|
|
193
|
+
if error_message:
|
|
194
|
+
prompt_parts.extend(
|
|
195
|
+
[
|
|
196
|
+
"",
|
|
197
|
+
"**Error Message:**",
|
|
198
|
+
"```",
|
|
199
|
+
error_message[:500] if len(error_message) > 500 else error_message,
|
|
200
|
+
"```" if error_message else "",
|
|
201
|
+
]
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
if spec_id:
|
|
205
|
+
prompt_parts.append(f"**Related Spec:** {spec_id}")
|
|
206
|
+
|
|
207
|
+
prompt_parts.extend(
|
|
208
|
+
[
|
|
209
|
+
"",
|
|
210
|
+
"## Debugging Workflow",
|
|
211
|
+
"",
|
|
212
|
+
"### Step 1: Understand the Failure",
|
|
213
|
+
"- What is the test trying to verify?",
|
|
214
|
+
"- What was the expected behavior?",
|
|
215
|
+
"- What actually happened?",
|
|
216
|
+
"",
|
|
217
|
+
"### Step 2: Reproduce Locally",
|
|
218
|
+
"Run the test in isolation:",
|
|
219
|
+
"```bash",
|
|
220
|
+
f"pytest {test_name or 'path/to/test.py'} -v --tb=long",
|
|
221
|
+
"```",
|
|
222
|
+
"",
|
|
223
|
+
"### Step 3: Identify Root Cause",
|
|
224
|
+
"Common causes to check:",
|
|
225
|
+
"- [ ] Missing or incorrect test fixtures",
|
|
226
|
+
"- [ ] State from previous tests",
|
|
227
|
+
"- [ ] Environment differences",
|
|
228
|
+
"- [ ] Race conditions or timing issues",
|
|
229
|
+
"- [ ] Incorrect assertions",
|
|
230
|
+
"- [ ] Changed API or implementation",
|
|
231
|
+
"",
|
|
232
|
+
"### Step 4: Implement Fix",
|
|
233
|
+
"Based on the root cause:",
|
|
234
|
+
"- If test is wrong: Update the test",
|
|
235
|
+
"- If code is wrong: Fix the implementation",
|
|
236
|
+
"- If both: Fix both and add regression tests",
|
|
237
|
+
"",
|
|
238
|
+
"### Step 5: Verify",
|
|
239
|
+
"- Run the fixed test",
|
|
240
|
+
"- Run related tests",
|
|
241
|
+
"- Run full test suite if changes are significant",
|
|
242
|
+
"",
|
|
243
|
+
"## Available Tools",
|
|
244
|
+
"Use these MCP tools to help debug:",
|
|
245
|
+
'- `test(action="run", preset="quick")`: Run tests',
|
|
246
|
+
'- `test(action="discover")`: Discover available tests',
|
|
247
|
+
"",
|
|
248
|
+
]
|
|
249
|
+
)
|
|
250
|
+
|
|
251
|
+
if spec_id:
|
|
252
|
+
prompt_parts.extend(
|
|
253
|
+
[
|
|
254
|
+
"## Spec Context",
|
|
255
|
+
f"This test is related to spec `{spec_id}`.",
|
|
256
|
+
"After fixing, remember to update the task status.",
|
|
257
|
+
"",
|
|
258
|
+
]
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
prompt_parts.append(
|
|
262
|
+
"Please provide the test output or more details about the failure."
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
return "\n".join(prompt_parts)
|
|
266
|
+
|
|
267
|
+
@mcp.prompt(
|
|
268
|
+
description='{"desc": "Complete all tasks in a phase and move to next", "args": {"spec_id": "Specification ID", "phase_id": "Phase ID (optional, uses current)"}, "returns": "Phase completion checklist"}'
|
|
269
|
+
)
|
|
270
|
+
def complete_phase(spec_id: str, phase_id: Optional[str] = None) -> str:
|
|
271
|
+
"""
|
|
272
|
+
Complete a phase in a specification.
|
|
273
|
+
|
|
274
|
+
Creates a structured prompt to guide the AI through completing
|
|
275
|
+
all tasks in a phase and moving to the next one.
|
|
276
|
+
|
|
277
|
+
Args:
|
|
278
|
+
spec_id: Specification ID
|
|
279
|
+
phase_id: Phase ID to complete (optional, uses current if not specified)
|
|
280
|
+
|
|
281
|
+
Returns:
|
|
282
|
+
Formatted prompt for completing a phase
|
|
283
|
+
"""
|
|
284
|
+
specs_dir = _get_specs_dir()
|
|
285
|
+
|
|
286
|
+
prompt_parts = [
|
|
287
|
+
f"# Complete Phase for Spec: {spec_id}",
|
|
288
|
+
"",
|
|
289
|
+
]
|
|
290
|
+
|
|
291
|
+
# Try to load spec and get phase info
|
|
292
|
+
spec_data = None
|
|
293
|
+
phase_info = None
|
|
294
|
+
progress_info = None
|
|
295
|
+
|
|
296
|
+
if specs_dir:
|
|
297
|
+
from pathlib import Path
|
|
298
|
+
|
|
299
|
+
spec_data = load_spec(spec_id, Path(specs_dir))
|
|
300
|
+
|
|
301
|
+
if spec_data:
|
|
302
|
+
progress_info = get_progress_summary(spec_data)
|
|
303
|
+
phases = list_phases(spec_data)
|
|
304
|
+
|
|
305
|
+
if phase_id:
|
|
306
|
+
phase_info = next((p for p in phases if p["id"] == phase_id), None)
|
|
307
|
+
else:
|
|
308
|
+
# Find current in-progress phase
|
|
309
|
+
phase_info = next(
|
|
310
|
+
(p for p in phases if p["status"] == "in_progress"), None
|
|
311
|
+
)
|
|
312
|
+
if not phase_info:
|
|
313
|
+
# Find first pending phase
|
|
314
|
+
phase_info = next(
|
|
315
|
+
(p for p in phases if p["status"] == "pending"), None
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
if spec_data and progress_info:
|
|
319
|
+
prompt_parts.extend(
|
|
320
|
+
[
|
|
321
|
+
"## Current Progress",
|
|
322
|
+
f"- **Overall:** {progress_info['percentage']}% complete",
|
|
323
|
+
f"- **Tasks:** {progress_info['completed_tasks']}/{progress_info['total_tasks']} done",
|
|
324
|
+
"",
|
|
325
|
+
]
|
|
326
|
+
)
|
|
327
|
+
|
|
328
|
+
if phase_info:
|
|
329
|
+
prompt_parts.extend(
|
|
330
|
+
[
|
|
331
|
+
"## Phase Details",
|
|
332
|
+
f"- **Phase:** {phase_info.get('title', phase_info.get('id', 'Unknown'))}",
|
|
333
|
+
f"- **Status:** {phase_info.get('status', 'unknown')}",
|
|
334
|
+
f"- **Progress:** {phase_info.get('completed_tasks', 0)}/{phase_info.get('total_tasks', 0)} tasks",
|
|
335
|
+
"",
|
|
336
|
+
]
|
|
337
|
+
)
|
|
338
|
+
elif phase_id:
|
|
339
|
+
prompt_parts.extend(
|
|
340
|
+
[
|
|
341
|
+
f"## Phase: {phase_id}",
|
|
342
|
+
"(Phase details not available)",
|
|
343
|
+
"",
|
|
344
|
+
]
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
prompt_parts.extend(
|
|
348
|
+
[
|
|
349
|
+
"## Completion Checklist",
|
|
350
|
+
"",
|
|
351
|
+
"### Step 1: Review Remaining Tasks",
|
|
352
|
+
"List all pending/in-progress tasks in this phase:",
|
|
353
|
+
"```bash",
|
|
354
|
+
'# Use task(action="progress") to check status',
|
|
355
|
+
"```",
|
|
356
|
+
"",
|
|
357
|
+
"### Step 2: Complete Each Task",
|
|
358
|
+
"For each remaining task:",
|
|
359
|
+
'1. Start the task (`task(action="start")`)',
|
|
360
|
+
"2. Implement the required changes",
|
|
361
|
+
"3. Verify the implementation",
|
|
362
|
+
'4. Complete the task with journal entry (`task(action="complete")`)',
|
|
363
|
+
"",
|
|
364
|
+
"### Step 3: Run Verification",
|
|
365
|
+
"Before marking the phase complete:",
|
|
366
|
+
"- [ ] All tasks show status: completed",
|
|
367
|
+
"- [ ] All verification tasks pass",
|
|
368
|
+
"- [ ] No blockers remain",
|
|
369
|
+
"- [ ] Tests pass for this phase's changes",
|
|
370
|
+
"",
|
|
371
|
+
"### Step 4: Phase Wrap-up",
|
|
372
|
+
"Once all tasks are done:",
|
|
373
|
+
"1. Review the phase journal entries",
|
|
374
|
+
"2. Update any documentation",
|
|
375
|
+
"3. The phase will auto-complete when all children are done",
|
|
376
|
+
"",
|
|
377
|
+
"### Step 5: Prepare for Next Phase",
|
|
378
|
+
"After this phase completes:",
|
|
379
|
+
"1. Review the next phase requirements",
|
|
380
|
+
'2. Use `task(action="next")` to get the first task',
|
|
381
|
+
"3. Continue the workflow",
|
|
382
|
+
"",
|
|
383
|
+
"## Available Tools",
|
|
384
|
+
'- `task(action="progress")`: Check spec/phase progress',
|
|
385
|
+
'- `task(action="next")`: Find next task to work on',
|
|
386
|
+
'- `task(action="prepare")`: Get task context',
|
|
387
|
+
'- `task(action="complete")`: Mark task done with journal',
|
|
388
|
+
'- `test(action="run", preset="quick")`: Run verification tests',
|
|
389
|
+
"",
|
|
390
|
+
"Ready to proceed? Let's review the remaining tasks.",
|
|
391
|
+
]
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
return "\n".join(prompt_parts)
|
|
395
|
+
|
|
396
|
+
@mcp.prompt(
|
|
397
|
+
description='{"desc": "Review spec status, progress, and journal", "args": {"spec_id": "Specification ID to review"}, "returns": "Spec overview with actions"}'
|
|
398
|
+
)
|
|
399
|
+
def review_spec(spec_id: str) -> str:
|
|
400
|
+
"""
|
|
401
|
+
Review a specification's status and progress.
|
|
402
|
+
|
|
403
|
+
Creates a comprehensive overview of a spec for review purposes.
|
|
404
|
+
|
|
405
|
+
Args:
|
|
406
|
+
spec_id: Specification ID to review
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
Formatted prompt with spec review information
|
|
410
|
+
"""
|
|
411
|
+
specs_dir = _get_specs_dir()
|
|
412
|
+
|
|
413
|
+
prompt_parts = [
|
|
414
|
+
f"# Spec Review: {spec_id}",
|
|
415
|
+
"",
|
|
416
|
+
]
|
|
417
|
+
|
|
418
|
+
# Try to load spec
|
|
419
|
+
spec_data = None
|
|
420
|
+
if specs_dir:
|
|
421
|
+
from pathlib import Path
|
|
422
|
+
|
|
423
|
+
spec_data = load_spec(spec_id, Path(specs_dir))
|
|
424
|
+
|
|
425
|
+
if not spec_data:
|
|
426
|
+
prompt_parts.extend(
|
|
427
|
+
[
|
|
428
|
+
"**Error:** Spec not found or could not be loaded.",
|
|
429
|
+
"",
|
|
430
|
+
f"Please verify the spec ID `{spec_id}` is correct.",
|
|
431
|
+
'Use `spec(action="list")` to see available specs.',
|
|
432
|
+
]
|
|
433
|
+
)
|
|
434
|
+
return "\n".join(prompt_parts)
|
|
435
|
+
|
|
436
|
+
# Get metadata
|
|
437
|
+
metadata = spec_data.get("metadata", {})
|
|
438
|
+
title = metadata.get("title", spec_data.get("title", "Untitled"))
|
|
439
|
+
|
|
440
|
+
prompt_parts.extend(
|
|
441
|
+
[
|
|
442
|
+
f"## {title}",
|
|
443
|
+
"",
|
|
444
|
+
]
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
if metadata.get("description"):
|
|
448
|
+
prompt_parts.extend(
|
|
449
|
+
[
|
|
450
|
+
"### Description",
|
|
451
|
+
metadata["description"],
|
|
452
|
+
"",
|
|
453
|
+
]
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
# Get progress
|
|
457
|
+
progress_info = get_progress_summary(spec_data)
|
|
458
|
+
prompt_parts.extend(
|
|
459
|
+
[
|
|
460
|
+
"### Progress Overview",
|
|
461
|
+
f"- **Completion:** {progress_info['percentage']}%",
|
|
462
|
+
f"- **Tasks:** {progress_info['completed_tasks']}/{progress_info['total_tasks']}",
|
|
463
|
+
f"- **Remaining:** {progress_info.get('remaining_tasks', 0)}",
|
|
464
|
+
"",
|
|
465
|
+
]
|
|
466
|
+
)
|
|
467
|
+
|
|
468
|
+
# Get phases
|
|
469
|
+
phases = list_phases(spec_data)
|
|
470
|
+
if phases:
|
|
471
|
+
prompt_parts.extend(
|
|
472
|
+
[
|
|
473
|
+
"### Phases",
|
|
474
|
+
"",
|
|
475
|
+
]
|
|
476
|
+
)
|
|
477
|
+
for phase in phases:
|
|
478
|
+
status_icon = {
|
|
479
|
+
"completed": "✅",
|
|
480
|
+
"in_progress": "🔄",
|
|
481
|
+
"pending": "⏳",
|
|
482
|
+
"blocked": "🚫",
|
|
483
|
+
}.get(phase.get("status", "pending"), "❓")
|
|
484
|
+
|
|
485
|
+
pct = phase.get("percentage", 0)
|
|
486
|
+
prompt_parts.append(
|
|
487
|
+
f"- {status_icon} **{phase.get('title', phase['id'])}**: "
|
|
488
|
+
f"{phase.get('completed_tasks', 0)}/{phase.get('total_tasks', 0)} ({pct}%)"
|
|
489
|
+
)
|
|
490
|
+
prompt_parts.append("")
|
|
491
|
+
|
|
492
|
+
# Get journal summary
|
|
493
|
+
journal = spec_data.get("journal", [])
|
|
494
|
+
if journal:
|
|
495
|
+
recent = journal[-5:] # Last 5 entries
|
|
496
|
+
prompt_parts.extend(
|
|
497
|
+
[
|
|
498
|
+
"### Recent Journal Entries",
|
|
499
|
+
"",
|
|
500
|
+
]
|
|
501
|
+
)
|
|
502
|
+
for entry in reversed(recent):
|
|
503
|
+
entry_type = entry.get("entry_type", "note")
|
|
504
|
+
title = entry.get("title", "Untitled")
|
|
505
|
+
prompt_parts.append(f"- [{entry_type}] {title}")
|
|
506
|
+
prompt_parts.append("")
|
|
507
|
+
|
|
508
|
+
prompt_parts.extend(
|
|
509
|
+
[
|
|
510
|
+
"### Actions",
|
|
511
|
+
"",
|
|
512
|
+
"What would you like to do?",
|
|
513
|
+
'1. Continue with next task (`task(action="next")`)',
|
|
514
|
+
"2. View specific phase details",
|
|
515
|
+
"3. Check blocked tasks",
|
|
516
|
+
"4. Review journal entries",
|
|
517
|
+
"5. Run verification tests",
|
|
518
|
+
]
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
return "\n".join(prompt_parts)
|
|
522
|
+
|
|
523
|
+
logger.debug(
|
|
524
|
+
"Registered workflow prompts: start_feature, debug_test, complete_phase, review_spec"
|
|
525
|
+
)
|