foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,876 @@
|
|
|
1
|
+
"""Unified plan tooling with action routing."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import re
|
|
7
|
+
import time
|
|
8
|
+
from dataclasses import asdict
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Dict, Optional
|
|
11
|
+
|
|
12
|
+
from mcp.server.fastmcp import FastMCP
|
|
13
|
+
|
|
14
|
+
from foundry_mcp.config import ServerConfig
|
|
15
|
+
from foundry_mcp.core.ai_consultation import (
|
|
16
|
+
ConsultationOrchestrator,
|
|
17
|
+
ConsultationRequest,
|
|
18
|
+
ConsultationResult,
|
|
19
|
+
ConsultationWorkflow,
|
|
20
|
+
ConsensusResult,
|
|
21
|
+
)
|
|
22
|
+
from foundry_mcp.core.llm_config import load_consultation_config
|
|
23
|
+
from foundry_mcp.core.naming import canonical_tool
|
|
24
|
+
from foundry_mcp.core.observability import get_metrics, mcp_tool
|
|
25
|
+
from foundry_mcp.core.providers import available_providers
|
|
26
|
+
from foundry_mcp.core.responses import (
|
|
27
|
+
ErrorCode,
|
|
28
|
+
ErrorType,
|
|
29
|
+
ai_no_provider_error,
|
|
30
|
+
error_response,
|
|
31
|
+
success_response,
|
|
32
|
+
)
|
|
33
|
+
from foundry_mcp.core.llm_config import load_consultation_config
|
|
34
|
+
from foundry_mcp.core.security import is_prompt_injection
|
|
35
|
+
from foundry_mcp.core.spec import find_specs_directory
|
|
36
|
+
from foundry_mcp.tools.unified.router import (
|
|
37
|
+
ActionDefinition,
|
|
38
|
+
ActionRouter,
|
|
39
|
+
ActionRouterError,
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
logger = logging.getLogger(__name__)
|
|
43
|
+
_metrics = get_metrics()
|
|
44
|
+
|
|
45
|
+
REVIEW_TYPES = ["quick", "full", "security", "feasibility"]
|
|
46
|
+
REVIEW_TYPE_TO_TEMPLATE = {
|
|
47
|
+
"full": "MARKDOWN_PLAN_REVIEW_FULL_V1",
|
|
48
|
+
"quick": "MARKDOWN_PLAN_REVIEW_QUICK_V1",
|
|
49
|
+
"security": "MARKDOWN_PLAN_REVIEW_SECURITY_V1",
|
|
50
|
+
"feasibility": "MARKDOWN_PLAN_REVIEW_FEASIBILITY_V1",
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _extract_plan_name(plan_path: str) -> str:
|
|
55
|
+
"""Extract plan name from file path."""
|
|
56
|
+
|
|
57
|
+
return Path(plan_path).stem
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _find_config_file(start_path: Path) -> Optional[Path]:
|
|
61
|
+
"""Find foundry-mcp.toml by walking up from start_path."""
|
|
62
|
+
current = start_path if start_path.is_dir() else start_path.parent
|
|
63
|
+
for _ in range(10): # Limit depth to prevent infinite loops
|
|
64
|
+
config_file = current / "foundry-mcp.toml"
|
|
65
|
+
if config_file.exists():
|
|
66
|
+
return config_file
|
|
67
|
+
parent = current.parent
|
|
68
|
+
if parent == current: # Reached root
|
|
69
|
+
break
|
|
70
|
+
current = parent
|
|
71
|
+
return None
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _parse_review_summary(content: str) -> dict:
|
|
75
|
+
"""Parse review markdown to extract section counts."""
|
|
76
|
+
|
|
77
|
+
summary = {
|
|
78
|
+
"critical_blockers": 0,
|
|
79
|
+
"major_suggestions": 0,
|
|
80
|
+
"minor_suggestions": 0,
|
|
81
|
+
"questions": 0,
|
|
82
|
+
"praise": 0,
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
sections = {
|
|
86
|
+
"Critical Blockers": "critical_blockers",
|
|
87
|
+
"Major Suggestions": "major_suggestions",
|
|
88
|
+
"Minor Suggestions": "minor_suggestions",
|
|
89
|
+
"Questions": "questions",
|
|
90
|
+
"Praise": "praise",
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
for section_name, key in sections.items():
|
|
94
|
+
pattern = rf"##\s*{section_name}\s*\n(.*?)(?=\n##|\Z)"
|
|
95
|
+
match = re.search(pattern, content, re.DOTALL | re.IGNORECASE)
|
|
96
|
+
if not match:
|
|
97
|
+
continue
|
|
98
|
+
section_content = match.group(1)
|
|
99
|
+
items = re.findall(r"^\s*-\s+\*\*\[", section_content, re.MULTILINE)
|
|
100
|
+
if not items:
|
|
101
|
+
items = re.findall(r"^\s*-\s+\*\*", section_content, re.MULTILINE)
|
|
102
|
+
if "None identified" in section_content and len(items) <= 1:
|
|
103
|
+
summary[key] = 0
|
|
104
|
+
else:
|
|
105
|
+
summary[key] = len(items)
|
|
106
|
+
|
|
107
|
+
return summary
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def _format_inline_summary(summary: dict) -> str:
|
|
111
|
+
"""Format summary dict into inline human-readable string."""
|
|
112
|
+
|
|
113
|
+
parts = []
|
|
114
|
+
if summary["critical_blockers"]:
|
|
115
|
+
parts.append(f"{summary['critical_blockers']} critical blocker(s)")
|
|
116
|
+
if summary["major_suggestions"]:
|
|
117
|
+
parts.append(f"{summary['major_suggestions']} major suggestion(s)")
|
|
118
|
+
if summary["minor_suggestions"]:
|
|
119
|
+
parts.append(f"{summary['minor_suggestions']} minor suggestion(s)")
|
|
120
|
+
if summary["questions"]:
|
|
121
|
+
parts.append(f"{summary['questions']} question(s)")
|
|
122
|
+
if summary["praise"]:
|
|
123
|
+
parts.append(f"{summary['praise']} praise item(s)")
|
|
124
|
+
|
|
125
|
+
return ", ".join(parts) if parts else "No issues identified"
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def _get_llm_status() -> dict:
|
|
129
|
+
"""Return current provider availability."""
|
|
130
|
+
|
|
131
|
+
providers = available_providers()
|
|
132
|
+
return {"available": bool(providers), "providers": providers}
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
PLAN_TEMPLATES = {
|
|
136
|
+
"simple": """# {name}
|
|
137
|
+
|
|
138
|
+
## Objective
|
|
139
|
+
|
|
140
|
+
[Describe the primary goal of this plan]
|
|
141
|
+
|
|
142
|
+
## Scope
|
|
143
|
+
|
|
144
|
+
[What is included/excluded from this plan]
|
|
145
|
+
|
|
146
|
+
## Tasks
|
|
147
|
+
|
|
148
|
+
1. [Task 1]
|
|
149
|
+
2. [Task 2]
|
|
150
|
+
3. [Task 3]
|
|
151
|
+
|
|
152
|
+
## Success Criteria
|
|
153
|
+
|
|
154
|
+
- [ ] [Criterion 1]
|
|
155
|
+
- [ ] [Criterion 2]
|
|
156
|
+
""",
|
|
157
|
+
"detailed": """# {name}
|
|
158
|
+
|
|
159
|
+
## Objective
|
|
160
|
+
|
|
161
|
+
[Describe the primary goal of this plan]
|
|
162
|
+
|
|
163
|
+
## Scope
|
|
164
|
+
|
|
165
|
+
### In Scope
|
|
166
|
+
- [Item 1]
|
|
167
|
+
- [Item 2]
|
|
168
|
+
|
|
169
|
+
### Out of Scope
|
|
170
|
+
- [Item 1]
|
|
171
|
+
|
|
172
|
+
## Phases
|
|
173
|
+
|
|
174
|
+
### Phase 1: [Phase Name]
|
|
175
|
+
|
|
176
|
+
**Purpose**: [Why this phase exists]
|
|
177
|
+
|
|
178
|
+
**Tasks**:
|
|
179
|
+
1. [Task 1]
|
|
180
|
+
2. [Task 2]
|
|
181
|
+
|
|
182
|
+
**Verification**: [How to verify phase completion]
|
|
183
|
+
|
|
184
|
+
### Phase 2: [Phase Name]
|
|
185
|
+
|
|
186
|
+
**Purpose**: [Why this phase exists]
|
|
187
|
+
|
|
188
|
+
**Tasks**:
|
|
189
|
+
1. [Task 1]
|
|
190
|
+
2. [Task 2]
|
|
191
|
+
|
|
192
|
+
**Verification**: [How to verify phase completion]
|
|
193
|
+
|
|
194
|
+
## Risks and Mitigations
|
|
195
|
+
|
|
196
|
+
| Risk | Impact | Mitigation |
|
|
197
|
+
|------|--------|------------|
|
|
198
|
+
| [Risk 1] | [High/Medium/Low] | [Mitigation strategy] |
|
|
199
|
+
|
|
200
|
+
## Success Criteria
|
|
201
|
+
|
|
202
|
+
- [ ] [Criterion 1]
|
|
203
|
+
- [ ] [Criterion 2]
|
|
204
|
+
- [ ] [Criterion 3]
|
|
205
|
+
""",
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def _slugify(name: str) -> str:
|
|
210
|
+
"""Convert a name to a slug."""
|
|
211
|
+
|
|
212
|
+
slug = name.lower().strip()
|
|
213
|
+
slug = re.sub(r"[^\w\s-]", "", slug)
|
|
214
|
+
return re.sub(r"[-\s]+", "-", slug)
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
def perform_plan_review(
|
|
218
|
+
*,
|
|
219
|
+
plan_path: str,
|
|
220
|
+
review_type: str = "full",
|
|
221
|
+
ai_provider: Optional[str] = None,
|
|
222
|
+
ai_timeout: float = 360.0,
|
|
223
|
+
consultation_cache: bool = True,
|
|
224
|
+
dry_run: bool = False,
|
|
225
|
+
) -> dict:
|
|
226
|
+
"""Execute the plan review workflow and return serialized response."""
|
|
227
|
+
|
|
228
|
+
start_time = time.perf_counter()
|
|
229
|
+
|
|
230
|
+
if review_type not in REVIEW_TYPES:
|
|
231
|
+
return asdict(
|
|
232
|
+
error_response(
|
|
233
|
+
f"Invalid review_type: {review_type}. Must be one of: {', '.join(REVIEW_TYPES)}",
|
|
234
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
235
|
+
error_type=ErrorType.VALIDATION,
|
|
236
|
+
remediation=f"Use one of: {', '.join(REVIEW_TYPES)}",
|
|
237
|
+
details={"review_type": review_type, "allowed": REVIEW_TYPES},
|
|
238
|
+
)
|
|
239
|
+
)
|
|
240
|
+
|
|
241
|
+
for field_name, field_value in (
|
|
242
|
+
("plan_path", plan_path),
|
|
243
|
+
("ai_provider", ai_provider),
|
|
244
|
+
):
|
|
245
|
+
if field_value and is_prompt_injection(field_value):
|
|
246
|
+
_metrics.counter(
|
|
247
|
+
"plan_review.security_blocked",
|
|
248
|
+
labels={"tool": "plan-review", "reason": "prompt_injection"},
|
|
249
|
+
)
|
|
250
|
+
return asdict(
|
|
251
|
+
error_response(
|
|
252
|
+
f"Input validation failed for {field_name}",
|
|
253
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
254
|
+
error_type=ErrorType.VALIDATION,
|
|
255
|
+
remediation="Remove special characters or instruction-like patterns from input.",
|
|
256
|
+
)
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
llm_status = _get_llm_status()
|
|
260
|
+
|
|
261
|
+
plan_file = Path(plan_path)
|
|
262
|
+
if not plan_file.is_absolute():
|
|
263
|
+
plan_file = Path.cwd() / plan_file
|
|
264
|
+
|
|
265
|
+
if not plan_file.exists():
|
|
266
|
+
_metrics.counter(
|
|
267
|
+
"plan_review.errors",
|
|
268
|
+
labels={"tool": "plan-review", "error_type": "not_found"},
|
|
269
|
+
)
|
|
270
|
+
return asdict(
|
|
271
|
+
error_response(
|
|
272
|
+
f"Plan file not found: {plan_path}",
|
|
273
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
274
|
+
error_type=ErrorType.NOT_FOUND,
|
|
275
|
+
remediation="Ensure the markdown plan exists at the specified path",
|
|
276
|
+
details={"plan_path": plan_path},
|
|
277
|
+
)
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
try:
|
|
281
|
+
plan_content = plan_file.read_text(encoding="utf-8")
|
|
282
|
+
except Exception as exc: # pragma: no cover - filesystem errors
|
|
283
|
+
_metrics.counter(
|
|
284
|
+
"plan_review.errors",
|
|
285
|
+
labels={"tool": "plan-review", "error_type": "read_error"},
|
|
286
|
+
)
|
|
287
|
+
return asdict(
|
|
288
|
+
error_response(
|
|
289
|
+
f"Failed to read plan file: {exc}",
|
|
290
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
291
|
+
error_type=ErrorType.INTERNAL,
|
|
292
|
+
remediation="Check file permissions and encoding",
|
|
293
|
+
details={"plan_path": str(plan_file)},
|
|
294
|
+
)
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
if not plan_content.strip():
|
|
298
|
+
_metrics.counter(
|
|
299
|
+
"plan_review.errors",
|
|
300
|
+
labels={"tool": "plan-review", "error_type": "empty_plan"},
|
|
301
|
+
)
|
|
302
|
+
return asdict(
|
|
303
|
+
error_response(
|
|
304
|
+
"Plan file is empty",
|
|
305
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
306
|
+
error_type=ErrorType.VALIDATION,
|
|
307
|
+
remediation="Add content to the markdown plan before reviewing",
|
|
308
|
+
details={"plan_path": str(plan_file)},
|
|
309
|
+
)
|
|
310
|
+
)
|
|
311
|
+
|
|
312
|
+
plan_name = _extract_plan_name(plan_file.name)
|
|
313
|
+
|
|
314
|
+
if dry_run:
|
|
315
|
+
return asdict(
|
|
316
|
+
success_response(
|
|
317
|
+
data={
|
|
318
|
+
"plan_path": str(plan_file),
|
|
319
|
+
"plan_name": plan_name,
|
|
320
|
+
"review_type": review_type,
|
|
321
|
+
"dry_run": True,
|
|
322
|
+
"llm_status": llm_status,
|
|
323
|
+
"message": "Dry run - review skipped",
|
|
324
|
+
},
|
|
325
|
+
telemetry={
|
|
326
|
+
"duration_ms": round((time.perf_counter() - start_time) * 1000, 2)
|
|
327
|
+
},
|
|
328
|
+
)
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
if not llm_status["available"]:
|
|
332
|
+
return asdict(
|
|
333
|
+
ai_no_provider_error(
|
|
334
|
+
"No AI provider available for plan review",
|
|
335
|
+
required_providers=["gemini", "codex", "cursor-agent"],
|
|
336
|
+
)
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
template_id = REVIEW_TYPE_TO_TEMPLATE[review_type]
|
|
340
|
+
|
|
341
|
+
try:
|
|
342
|
+
# Load consultation config from workspace to get provider priority list
|
|
343
|
+
config_file = _find_config_file(plan_file)
|
|
344
|
+
consultation_config = load_consultation_config(config_file=config_file)
|
|
345
|
+
orchestrator = ConsultationOrchestrator(config=consultation_config)
|
|
346
|
+
request = ConsultationRequest(
|
|
347
|
+
workflow=ConsultationWorkflow.MARKDOWN_PLAN_REVIEW,
|
|
348
|
+
prompt_id=template_id,
|
|
349
|
+
context={
|
|
350
|
+
"plan_content": plan_content,
|
|
351
|
+
"plan_name": plan_name,
|
|
352
|
+
"plan_path": str(plan_file),
|
|
353
|
+
},
|
|
354
|
+
provider_id=ai_provider,
|
|
355
|
+
timeout=ai_timeout,
|
|
356
|
+
)
|
|
357
|
+
result = orchestrator.consult(request, use_cache=consultation_cache)
|
|
358
|
+
|
|
359
|
+
consensus_info: Optional[dict] = None
|
|
360
|
+
provider_used: Optional[str] = None
|
|
361
|
+
provider_reviews: list[dict[str, str]] = []
|
|
362
|
+
|
|
363
|
+
if isinstance(result, ConsultationResult):
|
|
364
|
+
if not result.success:
|
|
365
|
+
return asdict(
|
|
366
|
+
error_response(
|
|
367
|
+
f"AI consultation failed: {result.error}",
|
|
368
|
+
error_code=ErrorCode.AI_PROVIDER_ERROR,
|
|
369
|
+
error_type=ErrorType.AI_PROVIDER,
|
|
370
|
+
remediation="Check AI provider configuration or try again later",
|
|
371
|
+
)
|
|
372
|
+
)
|
|
373
|
+
review_content = result.content
|
|
374
|
+
provider_used = result.provider_id
|
|
375
|
+
elif isinstance(result, ConsensusResult):
|
|
376
|
+
if not result.success:
|
|
377
|
+
return asdict(
|
|
378
|
+
error_response(
|
|
379
|
+
"AI consultation failed - no successful responses",
|
|
380
|
+
error_code=ErrorCode.AI_PROVIDER_ERROR,
|
|
381
|
+
error_type=ErrorType.AI_PROVIDER,
|
|
382
|
+
remediation="Check AI provider configuration or try again later",
|
|
383
|
+
)
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
providers_consulted = [r.provider_id for r in result.responses]
|
|
387
|
+
provider_used = providers_consulted[0] if providers_consulted else "unknown"
|
|
388
|
+
|
|
389
|
+
# Extract failed provider details for visibility
|
|
390
|
+
failed_providers = [
|
|
391
|
+
{"provider_id": r.provider_id, "error": r.error}
|
|
392
|
+
for r in result.responses
|
|
393
|
+
if not r.success
|
|
394
|
+
]
|
|
395
|
+
# Filter for truly successful responses (success=True AND non-empty content)
|
|
396
|
+
successful_responses = [
|
|
397
|
+
r for r in result.responses if r.success and r.content.strip()
|
|
398
|
+
]
|
|
399
|
+
successful_providers = [r.provider_id for r in successful_responses]
|
|
400
|
+
|
|
401
|
+
consensus_info = {
|
|
402
|
+
"providers_consulted": providers_consulted,
|
|
403
|
+
"successful": result.agreement.successful_providers
|
|
404
|
+
if result.agreement
|
|
405
|
+
else 0,
|
|
406
|
+
"failed": result.agreement.failed_providers if result.agreement else 0,
|
|
407
|
+
"successful_providers": successful_providers,
|
|
408
|
+
"failed_providers": failed_providers,
|
|
409
|
+
}
|
|
410
|
+
|
|
411
|
+
# Save individual provider review files and optionally run synthesis
|
|
412
|
+
if len(successful_responses) >= 2:
|
|
413
|
+
# Multi-model mode: save per-provider files, then synthesize
|
|
414
|
+
specs_dir = find_specs_directory()
|
|
415
|
+
if specs_dir is None:
|
|
416
|
+
return asdict(
|
|
417
|
+
error_response(
|
|
418
|
+
"No specs directory found for storing plan review",
|
|
419
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
420
|
+
error_type=ErrorType.NOT_FOUND,
|
|
421
|
+
remediation="Create a specs/ directory with pending/active/completed/archived subdirectories",
|
|
422
|
+
)
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
plan_reviews_dir = specs_dir / ".plan-reviews"
|
|
426
|
+
plan_reviews_dir.mkdir(parents=True, exist_ok=True)
|
|
427
|
+
|
|
428
|
+
# Save each provider's review to a separate file
|
|
429
|
+
model_reviews_text = ""
|
|
430
|
+
for response in successful_responses:
|
|
431
|
+
provider_file = (
|
|
432
|
+
plan_reviews_dir
|
|
433
|
+
/ f"{plan_name}-{review_type}-{response.provider_id}.md"
|
|
434
|
+
)
|
|
435
|
+
provider_file.write_text(response.content, encoding="utf-8")
|
|
436
|
+
provider_reviews.append(
|
|
437
|
+
{"provider_id": response.provider_id, "path": str(provider_file)}
|
|
438
|
+
)
|
|
439
|
+
model_reviews_text += (
|
|
440
|
+
f"\n---\n## Review by {response.provider_id}\n\n"
|
|
441
|
+
f"{response.content}\n"
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
# Run synthesis call using first provider
|
|
445
|
+
logger.info(
|
|
446
|
+
"Running synthesis for %d provider reviews: %s",
|
|
447
|
+
len(successful_responses),
|
|
448
|
+
successful_providers,
|
|
449
|
+
)
|
|
450
|
+
synthesis_request = ConsultationRequest(
|
|
451
|
+
workflow=ConsultationWorkflow.PLAN_REVIEW,
|
|
452
|
+
prompt_id="SYNTHESIS_PROMPT_V1",
|
|
453
|
+
context={
|
|
454
|
+
"spec_id": plan_name,
|
|
455
|
+
"title": plan_name,
|
|
456
|
+
"num_models": len(successful_responses),
|
|
457
|
+
"model_reviews": model_reviews_text,
|
|
458
|
+
},
|
|
459
|
+
provider_id=successful_providers[0],
|
|
460
|
+
timeout=ai_timeout,
|
|
461
|
+
)
|
|
462
|
+
try:
|
|
463
|
+
synthesis_result = orchestrator.consult(
|
|
464
|
+
synthesis_request, use_cache=consultation_cache
|
|
465
|
+
)
|
|
466
|
+
except Exception as e:
|
|
467
|
+
logger.error("Synthesis call crashed: %s", e, exc_info=True)
|
|
468
|
+
synthesis_result = None
|
|
469
|
+
|
|
470
|
+
# Handle both ConsultationResult and ConsensusResult
|
|
471
|
+
synthesis_success = False
|
|
472
|
+
synthesis_content = None
|
|
473
|
+
if synthesis_result:
|
|
474
|
+
if isinstance(synthesis_result, ConsultationResult) and synthesis_result.success:
|
|
475
|
+
synthesis_content = synthesis_result.content
|
|
476
|
+
consensus_info["synthesis_provider"] = synthesis_result.provider_id
|
|
477
|
+
synthesis_success = bool(synthesis_content and synthesis_content.strip())
|
|
478
|
+
elif isinstance(synthesis_result, ConsensusResult) and synthesis_result.success:
|
|
479
|
+
synthesis_content = synthesis_result.primary_content
|
|
480
|
+
consensus_info["synthesis_provider"] = synthesis_result.responses[0].provider_id if synthesis_result.responses else "unknown"
|
|
481
|
+
synthesis_success = bool(synthesis_content and synthesis_content.strip())
|
|
482
|
+
|
|
483
|
+
if synthesis_success and synthesis_content:
|
|
484
|
+
review_content = synthesis_content
|
|
485
|
+
else:
|
|
486
|
+
# Synthesis failed - fall back to first provider's content
|
|
487
|
+
error_detail = "unknown"
|
|
488
|
+
if synthesis_result is None:
|
|
489
|
+
error_detail = "synthesis crashed (see logs)"
|
|
490
|
+
elif isinstance(synthesis_result, ConsultationResult):
|
|
491
|
+
error_detail = synthesis_result.error or "empty response"
|
|
492
|
+
elif isinstance(synthesis_result, ConsensusResult):
|
|
493
|
+
error_detail = "empty synthesis content"
|
|
494
|
+
logger.warning(
|
|
495
|
+
"Synthesis call failed (%s), falling back to first provider's content",
|
|
496
|
+
error_detail,
|
|
497
|
+
)
|
|
498
|
+
review_content = result.primary_content
|
|
499
|
+
consensus_info["synthesis_failed"] = True
|
|
500
|
+
consensus_info["synthesis_error"] = error_detail
|
|
501
|
+
else:
|
|
502
|
+
# Single successful provider - use its content directly (no synthesis needed)
|
|
503
|
+
review_content = result.primary_content
|
|
504
|
+
else: # pragma: no cover - defensive branch
|
|
505
|
+
logger.error("Unknown consultation result type: %s", type(result))
|
|
506
|
+
return asdict(
|
|
507
|
+
error_response(
|
|
508
|
+
"Unsupported consultation result",
|
|
509
|
+
error_code=ErrorCode.AI_PROVIDER_ERROR,
|
|
510
|
+
error_type=ErrorType.AI_PROVIDER,
|
|
511
|
+
)
|
|
512
|
+
)
|
|
513
|
+
except Exception as exc: # pragma: no cover - orchestration errors
|
|
514
|
+
_metrics.counter(
|
|
515
|
+
"plan_review.errors",
|
|
516
|
+
labels={"tool": "plan-review", "error_type": "consultation_error"},
|
|
517
|
+
)
|
|
518
|
+
return asdict(
|
|
519
|
+
error_response(
|
|
520
|
+
f"AI consultation failed: {exc}",
|
|
521
|
+
error_code=ErrorCode.AI_PROVIDER_ERROR,
|
|
522
|
+
error_type=ErrorType.AI_PROVIDER,
|
|
523
|
+
remediation="Check AI provider configuration or try again later",
|
|
524
|
+
)
|
|
525
|
+
)
|
|
526
|
+
|
|
527
|
+
summary = _parse_review_summary(review_content)
|
|
528
|
+
inline_summary = _format_inline_summary(summary)
|
|
529
|
+
|
|
530
|
+
specs_dir = find_specs_directory()
|
|
531
|
+
if specs_dir is None:
|
|
532
|
+
return asdict(
|
|
533
|
+
error_response(
|
|
534
|
+
"No specs directory found for storing plan review",
|
|
535
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
536
|
+
error_type=ErrorType.NOT_FOUND,
|
|
537
|
+
remediation="Create a specs/ directory with pending/active/completed/archived subdirectories",
|
|
538
|
+
)
|
|
539
|
+
)
|
|
540
|
+
|
|
541
|
+
plan_reviews_dir = specs_dir / ".plan-reviews"
|
|
542
|
+
try:
|
|
543
|
+
plan_reviews_dir.mkdir(parents=True, exist_ok=True)
|
|
544
|
+
review_file = plan_reviews_dir / f"{plan_name}-{review_type}.md"
|
|
545
|
+
review_file.write_text(review_content, encoding="utf-8")
|
|
546
|
+
except Exception as exc: # pragma: no cover - filesystem errors
|
|
547
|
+
_metrics.counter(
|
|
548
|
+
"plan_review.errors",
|
|
549
|
+
labels={"tool": "plan-review", "error_type": "write_error"},
|
|
550
|
+
)
|
|
551
|
+
return asdict(
|
|
552
|
+
error_response(
|
|
553
|
+
f"Failed to write review file: {exc}",
|
|
554
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
555
|
+
error_type=ErrorType.INTERNAL,
|
|
556
|
+
remediation="Check write permissions for specs/.plan-reviews/ directory",
|
|
557
|
+
)
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
561
|
+
_metrics.counter(
|
|
562
|
+
"plan_review.completed",
|
|
563
|
+
labels={"tool": "plan-review", "review_type": review_type},
|
|
564
|
+
)
|
|
565
|
+
|
|
566
|
+
response_data = {
|
|
567
|
+
"plan_path": str(plan_file),
|
|
568
|
+
"plan_name": plan_name,
|
|
569
|
+
"review_type": review_type,
|
|
570
|
+
"review_path": str(review_file),
|
|
571
|
+
"summary": summary,
|
|
572
|
+
"inline_summary": inline_summary,
|
|
573
|
+
"llm_status": llm_status,
|
|
574
|
+
"provider_used": provider_used,
|
|
575
|
+
}
|
|
576
|
+
if provider_reviews:
|
|
577
|
+
response_data["provider_reviews"] = provider_reviews
|
|
578
|
+
if consensus_info:
|
|
579
|
+
response_data["consensus"] = consensus_info
|
|
580
|
+
|
|
581
|
+
return asdict(
|
|
582
|
+
success_response(
|
|
583
|
+
data=response_data,
|
|
584
|
+
telemetry={"duration_ms": round(duration_ms, 2)},
|
|
585
|
+
)
|
|
586
|
+
)
|
|
587
|
+
|
|
588
|
+
|
|
589
|
+
def perform_plan_create(name: str, template: str = "detailed") -> dict:
|
|
590
|
+
"""Create a markdown implementation plan using the requested template."""
|
|
591
|
+
|
|
592
|
+
start_time = time.perf_counter()
|
|
593
|
+
|
|
594
|
+
if template not in PLAN_TEMPLATES:
|
|
595
|
+
return asdict(
|
|
596
|
+
error_response(
|
|
597
|
+
f"Invalid template: {template}. Must be one of: simple, detailed",
|
|
598
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
599
|
+
error_type=ErrorType.VALIDATION,
|
|
600
|
+
remediation="Use 'simple' or 'detailed' template",
|
|
601
|
+
details={
|
|
602
|
+
"template": template,
|
|
603
|
+
"allowed": sorted(PLAN_TEMPLATES.keys()),
|
|
604
|
+
},
|
|
605
|
+
)
|
|
606
|
+
)
|
|
607
|
+
|
|
608
|
+
if is_prompt_injection(name):
|
|
609
|
+
_metrics.counter(
|
|
610
|
+
"plan_create.security_blocked",
|
|
611
|
+
labels={"tool": "plan-create", "reason": "prompt_injection"},
|
|
612
|
+
)
|
|
613
|
+
return asdict(
|
|
614
|
+
error_response(
|
|
615
|
+
"Input validation failed for name",
|
|
616
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
617
|
+
error_type=ErrorType.VALIDATION,
|
|
618
|
+
remediation="Remove special characters or instruction-like patterns from input.",
|
|
619
|
+
)
|
|
620
|
+
)
|
|
621
|
+
|
|
622
|
+
specs_dir = find_specs_directory()
|
|
623
|
+
if specs_dir is None:
|
|
624
|
+
return asdict(
|
|
625
|
+
error_response(
|
|
626
|
+
"No specs directory found",
|
|
627
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
628
|
+
error_type=ErrorType.NOT_FOUND,
|
|
629
|
+
remediation="Create a specs/ directory with pending/active/completed/archived subdirectories",
|
|
630
|
+
)
|
|
631
|
+
)
|
|
632
|
+
|
|
633
|
+
plans_dir = specs_dir / ".plans"
|
|
634
|
+
try:
|
|
635
|
+
plans_dir.mkdir(parents=True, exist_ok=True)
|
|
636
|
+
except Exception as exc:
|
|
637
|
+
return asdict(
|
|
638
|
+
error_response(
|
|
639
|
+
f"Failed to create plans directory: {exc}",
|
|
640
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
641
|
+
error_type=ErrorType.INTERNAL,
|
|
642
|
+
remediation="Check write permissions for specs/.plans/ directory",
|
|
643
|
+
)
|
|
644
|
+
)
|
|
645
|
+
|
|
646
|
+
plan_slug = _slugify(name)
|
|
647
|
+
plan_file = plans_dir / f"{plan_slug}.md"
|
|
648
|
+
|
|
649
|
+
if plan_file.exists():
|
|
650
|
+
return asdict(
|
|
651
|
+
error_response(
|
|
652
|
+
f"Plan already exists: {plan_file}",
|
|
653
|
+
error_code=ErrorCode.DUPLICATE_ENTRY,
|
|
654
|
+
error_type=ErrorType.CONFLICT,
|
|
655
|
+
remediation="Use a different name or delete the existing plan",
|
|
656
|
+
details={"plan_path": str(plan_file)},
|
|
657
|
+
)
|
|
658
|
+
)
|
|
659
|
+
|
|
660
|
+
plan_content = PLAN_TEMPLATES[template].format(name=name)
|
|
661
|
+
try:
|
|
662
|
+
plan_file.write_text(plan_content, encoding="utf-8")
|
|
663
|
+
except Exception as exc: # pragma: no cover - filesystem errors
|
|
664
|
+
return asdict(
|
|
665
|
+
error_response(
|
|
666
|
+
f"Failed to write plan file: {exc}",
|
|
667
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
668
|
+
error_type=ErrorType.INTERNAL,
|
|
669
|
+
remediation="Check write permissions for specs/.plans/ directory",
|
|
670
|
+
)
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
674
|
+
_metrics.counter(
|
|
675
|
+
"plan_create.completed",
|
|
676
|
+
labels={"tool": "plan-create", "template": template},
|
|
677
|
+
)
|
|
678
|
+
|
|
679
|
+
return asdict(
|
|
680
|
+
success_response(
|
|
681
|
+
data={
|
|
682
|
+
"plan_name": name,
|
|
683
|
+
"plan_slug": plan_slug,
|
|
684
|
+
"plan_path": str(plan_file),
|
|
685
|
+
"template": template,
|
|
686
|
+
},
|
|
687
|
+
telemetry={"duration_ms": round(duration_ms, 2)},
|
|
688
|
+
)
|
|
689
|
+
)
|
|
690
|
+
|
|
691
|
+
|
|
692
|
+
def perform_plan_list() -> dict:
|
|
693
|
+
"""List plans stored in specs/.plans and any associated reviews."""
|
|
694
|
+
|
|
695
|
+
start_time = time.perf_counter()
|
|
696
|
+
|
|
697
|
+
specs_dir = find_specs_directory()
|
|
698
|
+
if specs_dir is None:
|
|
699
|
+
return asdict(
|
|
700
|
+
error_response(
|
|
701
|
+
"No specs directory found",
|
|
702
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
703
|
+
error_type=ErrorType.NOT_FOUND,
|
|
704
|
+
remediation="Create a specs/ directory with pending/active/completed/archived subdirectories",
|
|
705
|
+
)
|
|
706
|
+
)
|
|
707
|
+
|
|
708
|
+
plans_dir = specs_dir / ".plans"
|
|
709
|
+
if not plans_dir.exists():
|
|
710
|
+
return asdict(
|
|
711
|
+
success_response(
|
|
712
|
+
data={"plans": [], "count": 0, "plans_dir": str(plans_dir)},
|
|
713
|
+
telemetry={
|
|
714
|
+
"duration_ms": round((time.perf_counter() - start_time) * 1000, 2)
|
|
715
|
+
},
|
|
716
|
+
)
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
plans = []
|
|
720
|
+
for plan_file in sorted(plans_dir.glob("*.md")):
|
|
721
|
+
stat = plan_file.stat()
|
|
722
|
+
plans.append(
|
|
723
|
+
{
|
|
724
|
+
"name": plan_file.stem,
|
|
725
|
+
"path": str(plan_file),
|
|
726
|
+
"size_bytes": stat.st_size,
|
|
727
|
+
"modified": stat.st_mtime,
|
|
728
|
+
}
|
|
729
|
+
)
|
|
730
|
+
|
|
731
|
+
reviews_dir = specs_dir / ".plan-reviews"
|
|
732
|
+
for plan in plans:
|
|
733
|
+
plan_name = plan["name"]
|
|
734
|
+
if reviews_dir.exists():
|
|
735
|
+
review_files = list(reviews_dir.glob(f"{plan_name}-*.md"))
|
|
736
|
+
else:
|
|
737
|
+
review_files = []
|
|
738
|
+
plan["reviews"] = [rf.stem for rf in review_files]
|
|
739
|
+
plan["has_review"] = bool(review_files)
|
|
740
|
+
|
|
741
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
742
|
+
_metrics.counter("plan_list.completed", labels={"tool": "plan-list"})
|
|
743
|
+
|
|
744
|
+
return asdict(
|
|
745
|
+
success_response(
|
|
746
|
+
data={"plans": plans, "count": len(plans), "plans_dir": str(plans_dir)},
|
|
747
|
+
telemetry={"duration_ms": round(duration_ms, 2)},
|
|
748
|
+
)
|
|
749
|
+
)
|
|
750
|
+
|
|
751
|
+
|
|
752
|
+
_ACTION_SUMMARY = {
|
|
753
|
+
"create": "Create markdown plan templates in specs/.plans",
|
|
754
|
+
"list": "Enumerate existing markdown plans and review coverage",
|
|
755
|
+
"review": "Run AI-assisted review workflows for markdown plans",
|
|
756
|
+
}
|
|
757
|
+
|
|
758
|
+
|
|
759
|
+
def _handle_plan_create(**payload: Any) -> dict:
|
|
760
|
+
name = payload.get("name")
|
|
761
|
+
template = payload.get("template", "detailed")
|
|
762
|
+
if not name:
|
|
763
|
+
return asdict(
|
|
764
|
+
error_response(
|
|
765
|
+
"Missing required parameter 'name' for plan.create",
|
|
766
|
+
error_code=ErrorCode.MISSING_REQUIRED,
|
|
767
|
+
error_type=ErrorType.VALIDATION,
|
|
768
|
+
remediation="Provide a plan name when action=create",
|
|
769
|
+
)
|
|
770
|
+
)
|
|
771
|
+
return perform_plan_create(name=name, template=template)
|
|
772
|
+
|
|
773
|
+
|
|
774
|
+
def _handle_plan_list(**_: Any) -> dict:
|
|
775
|
+
return perform_plan_list()
|
|
776
|
+
|
|
777
|
+
|
|
778
|
+
def _handle_plan_review(**payload: Any) -> dict:
|
|
779
|
+
plan_path = payload.get("plan_path")
|
|
780
|
+
if not plan_path:
|
|
781
|
+
return asdict(
|
|
782
|
+
error_response(
|
|
783
|
+
"Missing required parameter 'plan_path' for plan.review",
|
|
784
|
+
error_code=ErrorCode.MISSING_REQUIRED,
|
|
785
|
+
error_type=ErrorType.VALIDATION,
|
|
786
|
+
remediation="Provide a markdown plan path when action=review",
|
|
787
|
+
)
|
|
788
|
+
)
|
|
789
|
+
return perform_plan_review(
|
|
790
|
+
plan_path=plan_path,
|
|
791
|
+
review_type=payload.get("review_type", "full"),
|
|
792
|
+
ai_provider=payload.get("ai_provider"),
|
|
793
|
+
ai_timeout=payload.get("ai_timeout", 120.0),
|
|
794
|
+
consultation_cache=payload.get("consultation_cache", True),
|
|
795
|
+
dry_run=payload.get("dry_run", False),
|
|
796
|
+
)
|
|
797
|
+
|
|
798
|
+
|
|
799
|
+
_PLAN_ROUTER = ActionRouter(
|
|
800
|
+
tool_name="plan",
|
|
801
|
+
actions=[
|
|
802
|
+
ActionDefinition(
|
|
803
|
+
name="create",
|
|
804
|
+
handler=_handle_plan_create,
|
|
805
|
+
summary=_ACTION_SUMMARY["create"],
|
|
806
|
+
),
|
|
807
|
+
ActionDefinition(
|
|
808
|
+
name="list", handler=_handle_plan_list, summary=_ACTION_SUMMARY["list"]
|
|
809
|
+
),
|
|
810
|
+
ActionDefinition(
|
|
811
|
+
name="review",
|
|
812
|
+
handler=_handle_plan_review,
|
|
813
|
+
summary=_ACTION_SUMMARY["review"],
|
|
814
|
+
aliases=("plan-review",),
|
|
815
|
+
),
|
|
816
|
+
],
|
|
817
|
+
)
|
|
818
|
+
|
|
819
|
+
|
|
820
|
+
def _dispatch_plan_action(action: str, payload: Dict[str, Any]) -> dict:
|
|
821
|
+
try:
|
|
822
|
+
return _PLAN_ROUTER.dispatch(action=action, **payload)
|
|
823
|
+
except ActionRouterError as exc:
|
|
824
|
+
allowed = ", ".join(exc.allowed_actions)
|
|
825
|
+
return asdict(
|
|
826
|
+
error_response(
|
|
827
|
+
f"Unsupported plan action '{action}'. Allowed actions: {allowed}",
|
|
828
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
829
|
+
error_type=ErrorType.VALIDATION,
|
|
830
|
+
remediation=f"Use one of: {allowed}",
|
|
831
|
+
)
|
|
832
|
+
)
|
|
833
|
+
|
|
834
|
+
|
|
835
|
+
def register_unified_plan_tool(mcp: FastMCP, config: ServerConfig) -> None:
|
|
836
|
+
"""Register the consolidated plan tool."""
|
|
837
|
+
|
|
838
|
+
@canonical_tool(
|
|
839
|
+
mcp,
|
|
840
|
+
canonical_name="plan",
|
|
841
|
+
)
|
|
842
|
+
@mcp_tool(tool_name="plan", emit_metrics=True, audit=True)
|
|
843
|
+
def plan(
|
|
844
|
+
action: str,
|
|
845
|
+
name: Optional[str] = None,
|
|
846
|
+
template: str = "detailed",
|
|
847
|
+
plan_path: Optional[str] = None,
|
|
848
|
+
review_type: str = "full",
|
|
849
|
+
ai_provider: Optional[str] = None,
|
|
850
|
+
ai_timeout: float = 120.0,
|
|
851
|
+
consultation_cache: bool = True,
|
|
852
|
+
dry_run: bool = False,
|
|
853
|
+
) -> dict:
|
|
854
|
+
"""Execute plan workflows via the action router."""
|
|
855
|
+
|
|
856
|
+
payload = {
|
|
857
|
+
"name": name,
|
|
858
|
+
"template": template,
|
|
859
|
+
"plan_path": plan_path,
|
|
860
|
+
"review_type": review_type,
|
|
861
|
+
"ai_provider": ai_provider,
|
|
862
|
+
"ai_timeout": ai_timeout,
|
|
863
|
+
"consultation_cache": consultation_cache,
|
|
864
|
+
"dry_run": dry_run,
|
|
865
|
+
}
|
|
866
|
+
return _dispatch_plan_action(action=action, payload=payload)
|
|
867
|
+
|
|
868
|
+
logger.debug("Registered unified plan tool")
|
|
869
|
+
|
|
870
|
+
|
|
871
|
+
__all__ = [
|
|
872
|
+
"register_unified_plan_tool",
|
|
873
|
+
"perform_plan_review",
|
|
874
|
+
"perform_plan_create",
|
|
875
|
+
"perform_plan_list",
|
|
876
|
+
]
|