foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,667 @@
|
|
|
1
|
+
"""Review commands for SDD CLI.
|
|
2
|
+
|
|
3
|
+
Provides commands for spec review including:
|
|
4
|
+
- Quick structural review (no LLM required)
|
|
5
|
+
- AI-powered full/security/feasibility reviews via ConsultationOrchestrator
|
|
6
|
+
- AI-powered fidelity reviews to compare implementation against spec
|
|
7
|
+
|
|
8
|
+
AI-enhanced reviews use:
|
|
9
|
+
- PLAN_REVIEW_FULL_V1: Comprehensive 6-dimension review
|
|
10
|
+
- PLAN_REVIEW_QUICK_V1: Critical blockers and questions focus
|
|
11
|
+
- PLAN_REVIEW_SECURITY_V1: Security-focused review
|
|
12
|
+
- PLAN_REVIEW_FEASIBILITY_V1: Technical complexity assessment
|
|
13
|
+
- SYNTHESIS_PROMPT_V1: Multi-model response synthesis
|
|
14
|
+
- FIDELITY_REVIEW_V1: Implementation vs specification comparison
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
import json
|
|
18
|
+
import time
|
|
19
|
+
from pathlib import Path
|
|
20
|
+
from typing import Any, Dict, List, Optional
|
|
21
|
+
|
|
22
|
+
import click
|
|
23
|
+
|
|
24
|
+
from foundry_mcp.cli.logging import cli_command, get_cli_logger
|
|
25
|
+
from foundry_mcp.cli.output import emit_error, emit_success
|
|
26
|
+
from foundry_mcp.cli.registry import get_context
|
|
27
|
+
from foundry_mcp.cli.resilience import (
|
|
28
|
+
FAST_TIMEOUT,
|
|
29
|
+
SLOW_TIMEOUT,
|
|
30
|
+
handle_keyboard_interrupt,
|
|
31
|
+
with_sync_timeout,
|
|
32
|
+
)
|
|
33
|
+
from foundry_mcp.tools.unified.documentation_helpers import (
|
|
34
|
+
_build_implementation_artifacts,
|
|
35
|
+
_build_journal_entries,
|
|
36
|
+
_build_spec_requirements,
|
|
37
|
+
_build_test_results,
|
|
38
|
+
)
|
|
39
|
+
from foundry_mcp.tools.unified.review_helpers import (
|
|
40
|
+
DEFAULT_AI_TIMEOUT,
|
|
41
|
+
REVIEW_TYPES,
|
|
42
|
+
_get_llm_status,
|
|
43
|
+
_run_ai_review,
|
|
44
|
+
_run_quick_review,
|
|
45
|
+
)
|
|
46
|
+
from foundry_mcp.core.llm_config import get_consultation_config
|
|
47
|
+
|
|
48
|
+
logger = get_cli_logger()
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def _emit_review_envelope(envelope: Dict[str, Any], *, duration_ms: float) -> None:
|
|
52
|
+
"""Emit a response-v2 envelope returned by shared review helpers."""
|
|
53
|
+
|
|
54
|
+
if envelope.get("success") is True:
|
|
55
|
+
emit_success(
|
|
56
|
+
envelope.get("data", {}),
|
|
57
|
+
telemetry={"duration_ms": round(duration_ms, 2)},
|
|
58
|
+
)
|
|
59
|
+
return
|
|
60
|
+
|
|
61
|
+
payload = envelope.get("data") or {}
|
|
62
|
+
|
|
63
|
+
error_code = payload.get("error_code", "INTERNAL_ERROR")
|
|
64
|
+
if hasattr(error_code, "value"):
|
|
65
|
+
error_code = error_code.value
|
|
66
|
+
|
|
67
|
+
error_type = payload.get("error_type", "internal")
|
|
68
|
+
if hasattr(error_type, "value"):
|
|
69
|
+
error_type = error_type.value
|
|
70
|
+
|
|
71
|
+
emit_error(
|
|
72
|
+
envelope.get("error") or "Review failed",
|
|
73
|
+
code=str(error_code),
|
|
74
|
+
error_type=str(error_type),
|
|
75
|
+
remediation=payload.get("remediation"),
|
|
76
|
+
details=payload.get("details"),
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
REVIEW_TOOL_DEFINITIONS = [
|
|
81
|
+
{
|
|
82
|
+
"name": "quick-review",
|
|
83
|
+
"description": "Structural validation with schema & progress checks (native).",
|
|
84
|
+
"capabilities": ["structure", "progress", "quality"],
|
|
85
|
+
"requires_llm": False,
|
|
86
|
+
},
|
|
87
|
+
{
|
|
88
|
+
"name": "full-review",
|
|
89
|
+
"description": "LLM-powered deep review via sdd-toolkit.",
|
|
90
|
+
"capabilities": ["structure", "quality", "suggestions"],
|
|
91
|
+
"requires_llm": True,
|
|
92
|
+
"alternative": "sdd-toolkit:sdd-plan-review",
|
|
93
|
+
},
|
|
94
|
+
{
|
|
95
|
+
"name": "security-review",
|
|
96
|
+
"description": "Security-focused LLM analysis.",
|
|
97
|
+
"capabilities": ["security", "trust_boundaries"],
|
|
98
|
+
"requires_llm": True,
|
|
99
|
+
"alternative": "sdd-toolkit:sdd-plan-review",
|
|
100
|
+
},
|
|
101
|
+
{
|
|
102
|
+
"name": "feasibility-review",
|
|
103
|
+
"description": "Implementation feasibility assessment (LLM).",
|
|
104
|
+
"capabilities": ["complexity", "risk", "dependencies"],
|
|
105
|
+
"requires_llm": True,
|
|
106
|
+
"alternative": "sdd-toolkit:sdd-plan-review",
|
|
107
|
+
},
|
|
108
|
+
]
|
|
109
|
+
|
|
110
|
+
# Fidelity review timeout (longer for AI consultation)
|
|
111
|
+
FIDELITY_TIMEOUT = 600
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
@click.group("review")
|
|
115
|
+
def review_group() -> None:
|
|
116
|
+
"""Spec review and fidelity checking commands."""
|
|
117
|
+
pass
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
@review_group.command("spec")
|
|
121
|
+
@click.argument("spec_id")
|
|
122
|
+
@click.option(
|
|
123
|
+
"--type",
|
|
124
|
+
"review_type",
|
|
125
|
+
type=click.Choice(REVIEW_TYPES),
|
|
126
|
+
default=None,
|
|
127
|
+
help="Type of review to perform (defaults to config value, typically 'full').",
|
|
128
|
+
)
|
|
129
|
+
@click.option(
|
|
130
|
+
"--tools",
|
|
131
|
+
help="Comma-separated list of review tools to use (LLM types only).",
|
|
132
|
+
)
|
|
133
|
+
@click.option(
|
|
134
|
+
"--model",
|
|
135
|
+
help="LLM model to use for review (LLM types only).",
|
|
136
|
+
)
|
|
137
|
+
@click.option(
|
|
138
|
+
"--ai-provider",
|
|
139
|
+
help="Explicit AI provider selection (e.g., gemini, cursor-agent).",
|
|
140
|
+
)
|
|
141
|
+
@click.option(
|
|
142
|
+
"--ai-timeout",
|
|
143
|
+
type=float,
|
|
144
|
+
default=DEFAULT_AI_TIMEOUT,
|
|
145
|
+
help=f"AI consultation timeout in seconds (default: {DEFAULT_AI_TIMEOUT}).",
|
|
146
|
+
)
|
|
147
|
+
@click.option(
|
|
148
|
+
"--no-consultation-cache",
|
|
149
|
+
is_flag=True,
|
|
150
|
+
help="Bypass AI consultation cache (always query providers fresh).",
|
|
151
|
+
)
|
|
152
|
+
@click.option(
|
|
153
|
+
"--dry-run",
|
|
154
|
+
is_flag=True,
|
|
155
|
+
help="Show what would be reviewed without executing.",
|
|
156
|
+
)
|
|
157
|
+
@click.pass_context
|
|
158
|
+
@cli_command("spec")
|
|
159
|
+
@handle_keyboard_interrupt()
|
|
160
|
+
@with_sync_timeout(SLOW_TIMEOUT, "Review timed out")
|
|
161
|
+
def review_spec_cmd(
|
|
162
|
+
ctx: click.Context,
|
|
163
|
+
spec_id: str,
|
|
164
|
+
review_type: Optional[str],
|
|
165
|
+
tools: Optional[str],
|
|
166
|
+
model: Optional[str],
|
|
167
|
+
ai_provider: Optional[str],
|
|
168
|
+
ai_timeout: float,
|
|
169
|
+
no_consultation_cache: bool,
|
|
170
|
+
dry_run: bool,
|
|
171
|
+
) -> None:
|
|
172
|
+
"""Run a structural or AI-powered review on a specification."""
|
|
173
|
+
start_time = time.perf_counter()
|
|
174
|
+
cli_ctx = get_context(ctx)
|
|
175
|
+
specs_dir = cli_ctx.specs_dir
|
|
176
|
+
|
|
177
|
+
# Get default review_type from config if not provided
|
|
178
|
+
if review_type is None:
|
|
179
|
+
consultation_config = get_consultation_config()
|
|
180
|
+
workflow_config = consultation_config.get_workflow_config("plan_review")
|
|
181
|
+
review_type = workflow_config.default_review_type
|
|
182
|
+
|
|
183
|
+
if specs_dir is None:
|
|
184
|
+
emit_error(
|
|
185
|
+
"No specs directory found",
|
|
186
|
+
code="VALIDATION_ERROR",
|
|
187
|
+
error_type="validation",
|
|
188
|
+
remediation="Use --specs-dir option or set SDD_SPECS_DIR environment variable",
|
|
189
|
+
details={"hint": "Use --specs-dir or set SDD_SPECS_DIR"},
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
llm_status = _get_llm_status()
|
|
193
|
+
|
|
194
|
+
if review_type == "quick":
|
|
195
|
+
envelope = _run_quick_review(
|
|
196
|
+
spec_id=spec_id,
|
|
197
|
+
specs_dir=specs_dir,
|
|
198
|
+
dry_run=dry_run,
|
|
199
|
+
llm_status=llm_status,
|
|
200
|
+
start_time=start_time,
|
|
201
|
+
)
|
|
202
|
+
else:
|
|
203
|
+
envelope = _run_ai_review(
|
|
204
|
+
spec_id=spec_id,
|
|
205
|
+
specs_dir=specs_dir,
|
|
206
|
+
review_type=review_type,
|
|
207
|
+
ai_provider=ai_provider,
|
|
208
|
+
model=model,
|
|
209
|
+
ai_timeout=ai_timeout,
|
|
210
|
+
consultation_cache=not no_consultation_cache,
|
|
211
|
+
dry_run=dry_run,
|
|
212
|
+
llm_status=llm_status,
|
|
213
|
+
start_time=start_time,
|
|
214
|
+
)
|
|
215
|
+
|
|
216
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
217
|
+
_emit_review_envelope(envelope, duration_ms=duration_ms)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
@review_group.command("tools")
|
|
221
|
+
@click.pass_context
|
|
222
|
+
@cli_command("tools")
|
|
223
|
+
@handle_keyboard_interrupt()
|
|
224
|
+
@with_sync_timeout(FAST_TIMEOUT, "Review tools lookup timed out")
|
|
225
|
+
def review_tools_cmd(ctx: click.Context) -> None:
|
|
226
|
+
"""List native and external review toolchains."""
|
|
227
|
+
start_time = time.perf_counter()
|
|
228
|
+
|
|
229
|
+
llm_status = _get_llm_status()
|
|
230
|
+
|
|
231
|
+
tools_info = []
|
|
232
|
+
for definition in REVIEW_TOOL_DEFINITIONS:
|
|
233
|
+
requires_llm = definition.get("requires_llm", False)
|
|
234
|
+
available = not requires_llm # LLM reviews are handled by external workflows
|
|
235
|
+
tool_info = {
|
|
236
|
+
"name": definition["name"],
|
|
237
|
+
"description": definition["description"],
|
|
238
|
+
"capabilities": definition.get("capabilities", []),
|
|
239
|
+
"requires_llm": requires_llm,
|
|
240
|
+
"available": available,
|
|
241
|
+
"status": "native" if available else "external",
|
|
242
|
+
}
|
|
243
|
+
if not available:
|
|
244
|
+
tool_info["alternative"] = definition.get("alternative")
|
|
245
|
+
tool_info["message"] = "Use the sdd-toolkit workflow for this review type"
|
|
246
|
+
tools_info.append(tool_info)
|
|
247
|
+
|
|
248
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
249
|
+
|
|
250
|
+
emit_success(
|
|
251
|
+
{
|
|
252
|
+
"tools": tools_info,
|
|
253
|
+
"llm_status": llm_status,
|
|
254
|
+
"review_types": REVIEW_TYPES,
|
|
255
|
+
},
|
|
256
|
+
telemetry={"duration_ms": round(duration_ms, 2)},
|
|
257
|
+
)
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
@review_group.command("plan-tools")
|
|
261
|
+
@click.pass_context
|
|
262
|
+
@cli_command("plan-tools")
|
|
263
|
+
@handle_keyboard_interrupt()
|
|
264
|
+
@with_sync_timeout(FAST_TIMEOUT, "Plan tools lookup timed out")
|
|
265
|
+
def review_plan_tools_cmd(ctx: click.Context) -> None:
|
|
266
|
+
"""List available plan review toolchains."""
|
|
267
|
+
start_time = time.perf_counter()
|
|
268
|
+
|
|
269
|
+
llm_status = _get_llm_status()
|
|
270
|
+
|
|
271
|
+
# Define plan review toolchains
|
|
272
|
+
plan_tools = [
|
|
273
|
+
{
|
|
274
|
+
"name": "quick-review",
|
|
275
|
+
"description": "Fast structural review for basic validation",
|
|
276
|
+
"capabilities": ["structure", "syntax", "basic_quality"],
|
|
277
|
+
"llm_required": False,
|
|
278
|
+
"estimated_time": "< 10 seconds",
|
|
279
|
+
},
|
|
280
|
+
{
|
|
281
|
+
"name": "full-review",
|
|
282
|
+
"description": "Comprehensive review with LLM analysis",
|
|
283
|
+
"capabilities": ["structure", "quality", "feasibility", "suggestions"],
|
|
284
|
+
"llm_required": True,
|
|
285
|
+
"estimated_time": "30-60 seconds",
|
|
286
|
+
},
|
|
287
|
+
{
|
|
288
|
+
"name": "security-review",
|
|
289
|
+
"description": "Security-focused analysis of plan",
|
|
290
|
+
"capabilities": ["security", "trust_boundaries", "data_flow"],
|
|
291
|
+
"llm_required": True,
|
|
292
|
+
"estimated_time": "30-60 seconds",
|
|
293
|
+
},
|
|
294
|
+
{
|
|
295
|
+
"name": "feasibility-review",
|
|
296
|
+
"description": "Implementation feasibility assessment",
|
|
297
|
+
"capabilities": ["complexity", "dependencies", "risk"],
|
|
298
|
+
"llm_required": True,
|
|
299
|
+
"estimated_time": "30-60 seconds",
|
|
300
|
+
},
|
|
301
|
+
]
|
|
302
|
+
|
|
303
|
+
# Add availability status (only quick review is native today)
|
|
304
|
+
available_tools = []
|
|
305
|
+
for tool in plan_tools:
|
|
306
|
+
tool_info = tool.copy()
|
|
307
|
+
if tool["llm_required"]:
|
|
308
|
+
tool_info["status"] = "external"
|
|
309
|
+
tool_info["available"] = False
|
|
310
|
+
tool_info["reason"] = "Use the sdd-toolkit:sdd-plan-review workflow"
|
|
311
|
+
tool_info["alternative"] = "sdd-toolkit:sdd-plan-review"
|
|
312
|
+
else:
|
|
313
|
+
tool_info["status"] = "native"
|
|
314
|
+
tool_info["available"] = True
|
|
315
|
+
available_tools.append(tool_info)
|
|
316
|
+
|
|
317
|
+
recommendations = [
|
|
318
|
+
"Use 'quick-review' for structural validation inside foundry-mcp",
|
|
319
|
+
"Invoke sdd-toolkit:sdd-plan-review for AI-assisted plan analysis",
|
|
320
|
+
"Configure LLM credentials when ready to adopt the toolkit workflow",
|
|
321
|
+
]
|
|
322
|
+
|
|
323
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
324
|
+
|
|
325
|
+
emit_success(
|
|
326
|
+
{
|
|
327
|
+
"plan_tools": available_tools,
|
|
328
|
+
"llm_status": llm_status,
|
|
329
|
+
"recommendations": recommendations,
|
|
330
|
+
},
|
|
331
|
+
telemetry={"duration_ms": round(duration_ms, 2)},
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
@review_group.command("fidelity")
|
|
336
|
+
@click.argument("spec_id")
|
|
337
|
+
@click.option(
|
|
338
|
+
"--task",
|
|
339
|
+
"task_id",
|
|
340
|
+
help="Review specific task implementation.",
|
|
341
|
+
)
|
|
342
|
+
@click.option(
|
|
343
|
+
"--phase",
|
|
344
|
+
"phase_id",
|
|
345
|
+
help="Review entire phase implementation.",
|
|
346
|
+
)
|
|
347
|
+
@click.option(
|
|
348
|
+
"--files",
|
|
349
|
+
multiple=True,
|
|
350
|
+
help="Review specific file(s) only.",
|
|
351
|
+
)
|
|
352
|
+
@click.option(
|
|
353
|
+
"--incremental",
|
|
354
|
+
is_flag=True,
|
|
355
|
+
help="Only review changed files since last run.",
|
|
356
|
+
)
|
|
357
|
+
@click.option(
|
|
358
|
+
"--base-branch",
|
|
359
|
+
default="main",
|
|
360
|
+
help="Base branch for git diff.",
|
|
361
|
+
)
|
|
362
|
+
@click.option(
|
|
363
|
+
"--ai-provider",
|
|
364
|
+
help="Explicit AI provider selection (e.g., gemini, cursor-agent).",
|
|
365
|
+
)
|
|
366
|
+
@click.option(
|
|
367
|
+
"--ai-timeout",
|
|
368
|
+
type=float,
|
|
369
|
+
default=DEFAULT_AI_TIMEOUT,
|
|
370
|
+
help=f"AI consultation timeout in seconds (default: {DEFAULT_AI_TIMEOUT}).",
|
|
371
|
+
)
|
|
372
|
+
@click.option(
|
|
373
|
+
"--no-consultation-cache",
|
|
374
|
+
is_flag=True,
|
|
375
|
+
help="Bypass AI consultation cache (always query providers fresh).",
|
|
376
|
+
)
|
|
377
|
+
@click.pass_context
|
|
378
|
+
@cli_command("fidelity")
|
|
379
|
+
@handle_keyboard_interrupt()
|
|
380
|
+
@with_sync_timeout(FIDELITY_TIMEOUT, "Fidelity review timed out")
|
|
381
|
+
def review_fidelity_cmd(
|
|
382
|
+
ctx: click.Context,
|
|
383
|
+
spec_id: str,
|
|
384
|
+
task_id: Optional[str],
|
|
385
|
+
phase_id: Optional[str],
|
|
386
|
+
files: tuple,
|
|
387
|
+
incremental: bool,
|
|
388
|
+
base_branch: str,
|
|
389
|
+
ai_provider: Optional[str],
|
|
390
|
+
ai_timeout: float,
|
|
391
|
+
no_consultation_cache: bool,
|
|
392
|
+
) -> None:
|
|
393
|
+
"""Compare implementation against specification.
|
|
394
|
+
|
|
395
|
+
SPEC_ID is the specification identifier.
|
|
396
|
+
|
|
397
|
+
Performs a fidelity review to verify that code implementation
|
|
398
|
+
matches the specification requirements using the AI consultation layer.
|
|
399
|
+
"""
|
|
400
|
+
start_time = time.perf_counter()
|
|
401
|
+
cli_ctx = get_context(ctx)
|
|
402
|
+
specs_dir = cli_ctx.specs_dir
|
|
403
|
+
consultation_cache = not no_consultation_cache
|
|
404
|
+
|
|
405
|
+
if specs_dir is None:
|
|
406
|
+
emit_error(
|
|
407
|
+
"No specs directory found",
|
|
408
|
+
code="VALIDATION_ERROR",
|
|
409
|
+
error_type="validation",
|
|
410
|
+
remediation="Use --specs-dir option or set SDD_SPECS_DIR environment variable",
|
|
411
|
+
details={"hint": "Use --specs-dir or set SDD_SPECS_DIR"},
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
# Validate mutually exclusive options
|
|
415
|
+
if task_id and phase_id:
|
|
416
|
+
emit_error(
|
|
417
|
+
"Cannot specify both --task and --phase",
|
|
418
|
+
code="INVALID_OPTIONS",
|
|
419
|
+
error_type="validation",
|
|
420
|
+
remediation="Use either --task or --phase, not both",
|
|
421
|
+
details={"hint": "Use either --task or --phase, not both"},
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
llm_status = _get_llm_status()
|
|
425
|
+
|
|
426
|
+
# Determine scope
|
|
427
|
+
if task_id:
|
|
428
|
+
pass
|
|
429
|
+
elif phase_id:
|
|
430
|
+
pass
|
|
431
|
+
elif files:
|
|
432
|
+
f"files:{len(files)}"
|
|
433
|
+
|
|
434
|
+
# Run the fidelity review
|
|
435
|
+
result = _run_fidelity_review(
|
|
436
|
+
spec_id=spec_id,
|
|
437
|
+
task_id=task_id,
|
|
438
|
+
phase_id=phase_id,
|
|
439
|
+
files=list(files) if files else None,
|
|
440
|
+
ai_provider=ai_provider,
|
|
441
|
+
ai_timeout=ai_timeout,
|
|
442
|
+
consultation_cache=consultation_cache,
|
|
443
|
+
incremental=incremental,
|
|
444
|
+
base_branch=base_branch,
|
|
445
|
+
specs_dir=specs_dir,
|
|
446
|
+
llm_status=llm_status,
|
|
447
|
+
start_time=start_time,
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
duration_ms = (time.perf_counter() - start_time) * 1000
|
|
451
|
+
emit_success(
|
|
452
|
+
result,
|
|
453
|
+
telemetry={"duration_ms": round(duration_ms, 2)},
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
def _run_fidelity_review(
|
|
458
|
+
spec_id: str,
|
|
459
|
+
task_id: Optional[str],
|
|
460
|
+
phase_id: Optional[str],
|
|
461
|
+
files: Optional[List[str]],
|
|
462
|
+
ai_provider: Optional[str],
|
|
463
|
+
ai_timeout: float,
|
|
464
|
+
consultation_cache: bool,
|
|
465
|
+
incremental: bool,
|
|
466
|
+
base_branch: str,
|
|
467
|
+
specs_dir: Any,
|
|
468
|
+
llm_status: Dict[str, Any],
|
|
469
|
+
start_time: float,
|
|
470
|
+
) -> Dict[str, Any]:
|
|
471
|
+
"""
|
|
472
|
+
Run a fidelity review using the AI consultation layer.
|
|
473
|
+
|
|
474
|
+
Args:
|
|
475
|
+
spec_id: Specification ID to review against
|
|
476
|
+
task_id: Optional task ID for task-scoped review
|
|
477
|
+
phase_id: Optional phase ID for phase-scoped review
|
|
478
|
+
files: Optional list of files to review
|
|
479
|
+
ai_provider: Explicit AI provider selection
|
|
480
|
+
ai_timeout: Consultation timeout in seconds
|
|
481
|
+
consultation_cache: Whether to use consultation cache
|
|
482
|
+
incremental: Only review changed files
|
|
483
|
+
base_branch: Base branch for git diff
|
|
484
|
+
specs_dir: Path to specs directory
|
|
485
|
+
llm_status: LLM configuration status
|
|
486
|
+
start_time: Start time for duration tracking
|
|
487
|
+
|
|
488
|
+
Returns:
|
|
489
|
+
Dict with fidelity review results
|
|
490
|
+
"""
|
|
491
|
+
|
|
492
|
+
# Import consultation layer components
|
|
493
|
+
try:
|
|
494
|
+
from foundry_mcp.core.ai_consultation import (
|
|
495
|
+
ConsultationOrchestrator,
|
|
496
|
+
ConsultationRequest,
|
|
497
|
+
ConsultationWorkflow,
|
|
498
|
+
)
|
|
499
|
+
except ImportError:
|
|
500
|
+
emit_error(
|
|
501
|
+
"AI consultation layer not available",
|
|
502
|
+
code="AI_NOT_AVAILABLE",
|
|
503
|
+
error_type="unavailable",
|
|
504
|
+
remediation="Ensure foundry_mcp.core.ai_consultation is properly installed",
|
|
505
|
+
)
|
|
506
|
+
|
|
507
|
+
# Load spec
|
|
508
|
+
try:
|
|
509
|
+
from foundry_mcp.core.spec import load_spec, find_spec_file
|
|
510
|
+
|
|
511
|
+
spec_file = find_spec_file(spec_id, specs_dir)
|
|
512
|
+
if not spec_file:
|
|
513
|
+
emit_error(
|
|
514
|
+
f"Specification not found: {spec_id}",
|
|
515
|
+
code="SPEC_NOT_FOUND",
|
|
516
|
+
error_type="not_found",
|
|
517
|
+
remediation="Verify the spec ID exists using 'sdd list'",
|
|
518
|
+
details={"spec_id": spec_id},
|
|
519
|
+
)
|
|
520
|
+
spec_data = load_spec(spec_file)
|
|
521
|
+
except Exception:
|
|
522
|
+
logger.exception(f"Failed to load spec {spec_id}")
|
|
523
|
+
emit_error(
|
|
524
|
+
"Failed to load spec",
|
|
525
|
+
code="SPEC_LOAD_ERROR",
|
|
526
|
+
error_type="error",
|
|
527
|
+
remediation="Check that the spec file is valid JSON",
|
|
528
|
+
details={"spec_id": spec_id},
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
# Determine review scope
|
|
532
|
+
if task_id:
|
|
533
|
+
review_scope = f"Task {task_id}"
|
|
534
|
+
elif phase_id:
|
|
535
|
+
review_scope = f"Phase {phase_id}"
|
|
536
|
+
elif files:
|
|
537
|
+
review_scope = f"Files: {', '.join(files)}"
|
|
538
|
+
else:
|
|
539
|
+
review_scope = "Full specification"
|
|
540
|
+
|
|
541
|
+
# Build context for fidelity review
|
|
542
|
+
spec_title = spec_data.get("title", spec_id)
|
|
543
|
+
spec_description = spec_data.get("description", "")
|
|
544
|
+
|
|
545
|
+
# Build spec requirements from task details
|
|
546
|
+
spec_requirements = _build_spec_requirements(spec_data, task_id, phase_id)
|
|
547
|
+
|
|
548
|
+
# Build implementation artifacts (file contents, git diff if incremental)
|
|
549
|
+
workspace_root = Path(specs_dir).parent if specs_dir else None
|
|
550
|
+
implementation_artifacts = _build_implementation_artifacts(
|
|
551
|
+
spec_data,
|
|
552
|
+
task_id,
|
|
553
|
+
phase_id,
|
|
554
|
+
files,
|
|
555
|
+
incremental,
|
|
556
|
+
base_branch,
|
|
557
|
+
workspace_root=workspace_root,
|
|
558
|
+
)
|
|
559
|
+
|
|
560
|
+
# Build test results section
|
|
561
|
+
test_results = _build_test_results(spec_data, task_id, phase_id)
|
|
562
|
+
|
|
563
|
+
# Build journal entries section
|
|
564
|
+
journal_entries = _build_journal_entries(spec_data, task_id, phase_id)
|
|
565
|
+
|
|
566
|
+
# Initialize orchestrator
|
|
567
|
+
orchestrator = ConsultationOrchestrator(
|
|
568
|
+
default_timeout=ai_timeout,
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
# Check if providers are available
|
|
572
|
+
if not orchestrator.is_available(provider_id=ai_provider):
|
|
573
|
+
provider_msg = f" (requested: {ai_provider})" if ai_provider else ""
|
|
574
|
+
emit_error(
|
|
575
|
+
f"Fidelity review requested but no providers available{provider_msg}",
|
|
576
|
+
code="AI_NO_PROVIDER",
|
|
577
|
+
error_type="unavailable",
|
|
578
|
+
remediation="Install and configure an AI provider (gemini, cursor-agent, codex)",
|
|
579
|
+
details={
|
|
580
|
+
"spec_id": spec_id,
|
|
581
|
+
"requested_provider": ai_provider,
|
|
582
|
+
"llm_status": llm_status,
|
|
583
|
+
},
|
|
584
|
+
)
|
|
585
|
+
|
|
586
|
+
# Create consultation request
|
|
587
|
+
request = ConsultationRequest(
|
|
588
|
+
workflow=ConsultationWorkflow.FIDELITY_REVIEW,
|
|
589
|
+
prompt_id="FIDELITY_REVIEW_V1",
|
|
590
|
+
context={
|
|
591
|
+
"spec_id": spec_id,
|
|
592
|
+
"spec_title": spec_title,
|
|
593
|
+
"spec_description": f"**Description:** {spec_description}"
|
|
594
|
+
if spec_description
|
|
595
|
+
else "",
|
|
596
|
+
"review_scope": review_scope,
|
|
597
|
+
"spec_requirements": spec_requirements,
|
|
598
|
+
"implementation_artifacts": implementation_artifacts,
|
|
599
|
+
"test_results": test_results,
|
|
600
|
+
"journal_entries": journal_entries,
|
|
601
|
+
},
|
|
602
|
+
provider_id=ai_provider,
|
|
603
|
+
timeout=ai_timeout,
|
|
604
|
+
)
|
|
605
|
+
|
|
606
|
+
# Execute consultation
|
|
607
|
+
try:
|
|
608
|
+
result = orchestrator.consult(request, use_cache=consultation_cache)
|
|
609
|
+
except Exception:
|
|
610
|
+
logger.exception(f"AI fidelity consultation failed for {spec_id}")
|
|
611
|
+
emit_error(
|
|
612
|
+
"AI consultation failed",
|
|
613
|
+
code="AI_CONSULTATION_ERROR",
|
|
614
|
+
error_type="error",
|
|
615
|
+
remediation="Check provider configuration and try again",
|
|
616
|
+
details={
|
|
617
|
+
"spec_id": spec_id,
|
|
618
|
+
"review_scope": review_scope,
|
|
619
|
+
},
|
|
620
|
+
)
|
|
621
|
+
|
|
622
|
+
# Parse JSON response if possible
|
|
623
|
+
parsed_response = None
|
|
624
|
+
if result and result.content:
|
|
625
|
+
try:
|
|
626
|
+
# Try to extract JSON from markdown code blocks if present
|
|
627
|
+
content = result.content
|
|
628
|
+
if "```json" in content:
|
|
629
|
+
start = content.find("```json") + 7
|
|
630
|
+
end = content.find("```", start)
|
|
631
|
+
if end > start:
|
|
632
|
+
content = content[start:end].strip()
|
|
633
|
+
elif "```" in content:
|
|
634
|
+
start = content.find("```") + 3
|
|
635
|
+
end = content.find("```", start)
|
|
636
|
+
if end > start:
|
|
637
|
+
content = content[start:end].strip()
|
|
638
|
+
parsed_response = json.loads(content)
|
|
639
|
+
except (json.JSONDecodeError, ValueError):
|
|
640
|
+
# Fall back to raw content
|
|
641
|
+
pass
|
|
642
|
+
|
|
643
|
+
# Build response
|
|
644
|
+
return {
|
|
645
|
+
"spec_id": spec_id,
|
|
646
|
+
"title": spec_title,
|
|
647
|
+
"review_scope": review_scope,
|
|
648
|
+
"task_id": task_id,
|
|
649
|
+
"phase_id": phase_id,
|
|
650
|
+
"files": files,
|
|
651
|
+
"verdict": parsed_response.get("verdict", "unknown")
|
|
652
|
+
if parsed_response
|
|
653
|
+
else "unknown",
|
|
654
|
+
"llm_status": llm_status,
|
|
655
|
+
"ai_provider": result.provider_id if result else ai_provider,
|
|
656
|
+
"consultation_cache": consultation_cache,
|
|
657
|
+
"response": parsed_response
|
|
658
|
+
if parsed_response
|
|
659
|
+
else result.content
|
|
660
|
+
if result
|
|
661
|
+
else None,
|
|
662
|
+
"raw_response": result.content if result and not parsed_response else None,
|
|
663
|
+
"model": result.model_used if result else None,
|
|
664
|
+
"cached": result.cache_hit if result else False,
|
|
665
|
+
"incremental": incremental,
|
|
666
|
+
"base_branch": base_branch,
|
|
667
|
+
}
|