foundry-mcp 0.8.22__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of foundry-mcp might be problematic. Click here for more details.

Files changed (153) hide show
  1. foundry_mcp/__init__.py +13 -0
  2. foundry_mcp/cli/__init__.py +67 -0
  3. foundry_mcp/cli/__main__.py +9 -0
  4. foundry_mcp/cli/agent.py +96 -0
  5. foundry_mcp/cli/commands/__init__.py +37 -0
  6. foundry_mcp/cli/commands/cache.py +137 -0
  7. foundry_mcp/cli/commands/dashboard.py +148 -0
  8. foundry_mcp/cli/commands/dev.py +446 -0
  9. foundry_mcp/cli/commands/journal.py +377 -0
  10. foundry_mcp/cli/commands/lifecycle.py +274 -0
  11. foundry_mcp/cli/commands/modify.py +824 -0
  12. foundry_mcp/cli/commands/plan.py +640 -0
  13. foundry_mcp/cli/commands/pr.py +393 -0
  14. foundry_mcp/cli/commands/review.py +667 -0
  15. foundry_mcp/cli/commands/session.py +472 -0
  16. foundry_mcp/cli/commands/specs.py +686 -0
  17. foundry_mcp/cli/commands/tasks.py +807 -0
  18. foundry_mcp/cli/commands/testing.py +676 -0
  19. foundry_mcp/cli/commands/validate.py +982 -0
  20. foundry_mcp/cli/config.py +98 -0
  21. foundry_mcp/cli/context.py +298 -0
  22. foundry_mcp/cli/logging.py +212 -0
  23. foundry_mcp/cli/main.py +44 -0
  24. foundry_mcp/cli/output.py +122 -0
  25. foundry_mcp/cli/registry.py +110 -0
  26. foundry_mcp/cli/resilience.py +178 -0
  27. foundry_mcp/cli/transcript.py +217 -0
  28. foundry_mcp/config.py +1454 -0
  29. foundry_mcp/core/__init__.py +144 -0
  30. foundry_mcp/core/ai_consultation.py +1773 -0
  31. foundry_mcp/core/batch_operations.py +1202 -0
  32. foundry_mcp/core/cache.py +195 -0
  33. foundry_mcp/core/capabilities.py +446 -0
  34. foundry_mcp/core/concurrency.py +898 -0
  35. foundry_mcp/core/context.py +540 -0
  36. foundry_mcp/core/discovery.py +1603 -0
  37. foundry_mcp/core/error_collection.py +728 -0
  38. foundry_mcp/core/error_store.py +592 -0
  39. foundry_mcp/core/health.py +749 -0
  40. foundry_mcp/core/intake.py +933 -0
  41. foundry_mcp/core/journal.py +700 -0
  42. foundry_mcp/core/lifecycle.py +412 -0
  43. foundry_mcp/core/llm_config.py +1376 -0
  44. foundry_mcp/core/llm_patterns.py +510 -0
  45. foundry_mcp/core/llm_provider.py +1569 -0
  46. foundry_mcp/core/logging_config.py +374 -0
  47. foundry_mcp/core/metrics_persistence.py +584 -0
  48. foundry_mcp/core/metrics_registry.py +327 -0
  49. foundry_mcp/core/metrics_store.py +641 -0
  50. foundry_mcp/core/modifications.py +224 -0
  51. foundry_mcp/core/naming.py +146 -0
  52. foundry_mcp/core/observability.py +1216 -0
  53. foundry_mcp/core/otel.py +452 -0
  54. foundry_mcp/core/otel_stubs.py +264 -0
  55. foundry_mcp/core/pagination.py +255 -0
  56. foundry_mcp/core/progress.py +387 -0
  57. foundry_mcp/core/prometheus.py +564 -0
  58. foundry_mcp/core/prompts/__init__.py +464 -0
  59. foundry_mcp/core/prompts/fidelity_review.py +691 -0
  60. foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
  61. foundry_mcp/core/prompts/plan_review.py +627 -0
  62. foundry_mcp/core/providers/__init__.py +237 -0
  63. foundry_mcp/core/providers/base.py +515 -0
  64. foundry_mcp/core/providers/claude.py +472 -0
  65. foundry_mcp/core/providers/codex.py +637 -0
  66. foundry_mcp/core/providers/cursor_agent.py +630 -0
  67. foundry_mcp/core/providers/detectors.py +515 -0
  68. foundry_mcp/core/providers/gemini.py +426 -0
  69. foundry_mcp/core/providers/opencode.py +718 -0
  70. foundry_mcp/core/providers/opencode_wrapper.js +308 -0
  71. foundry_mcp/core/providers/package-lock.json +24 -0
  72. foundry_mcp/core/providers/package.json +25 -0
  73. foundry_mcp/core/providers/registry.py +607 -0
  74. foundry_mcp/core/providers/test_provider.py +171 -0
  75. foundry_mcp/core/providers/validation.py +857 -0
  76. foundry_mcp/core/rate_limit.py +427 -0
  77. foundry_mcp/core/research/__init__.py +68 -0
  78. foundry_mcp/core/research/memory.py +528 -0
  79. foundry_mcp/core/research/models.py +1234 -0
  80. foundry_mcp/core/research/providers/__init__.py +40 -0
  81. foundry_mcp/core/research/providers/base.py +242 -0
  82. foundry_mcp/core/research/providers/google.py +507 -0
  83. foundry_mcp/core/research/providers/perplexity.py +442 -0
  84. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  85. foundry_mcp/core/research/providers/tavily.py +383 -0
  86. foundry_mcp/core/research/workflows/__init__.py +25 -0
  87. foundry_mcp/core/research/workflows/base.py +298 -0
  88. foundry_mcp/core/research/workflows/chat.py +271 -0
  89. foundry_mcp/core/research/workflows/consensus.py +539 -0
  90. foundry_mcp/core/research/workflows/deep_research.py +4142 -0
  91. foundry_mcp/core/research/workflows/ideate.py +682 -0
  92. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  93. foundry_mcp/core/resilience.py +600 -0
  94. foundry_mcp/core/responses.py +1624 -0
  95. foundry_mcp/core/review.py +366 -0
  96. foundry_mcp/core/security.py +438 -0
  97. foundry_mcp/core/spec.py +4119 -0
  98. foundry_mcp/core/task.py +2463 -0
  99. foundry_mcp/core/testing.py +839 -0
  100. foundry_mcp/core/validation.py +2357 -0
  101. foundry_mcp/dashboard/__init__.py +32 -0
  102. foundry_mcp/dashboard/app.py +119 -0
  103. foundry_mcp/dashboard/components/__init__.py +17 -0
  104. foundry_mcp/dashboard/components/cards.py +88 -0
  105. foundry_mcp/dashboard/components/charts.py +177 -0
  106. foundry_mcp/dashboard/components/filters.py +136 -0
  107. foundry_mcp/dashboard/components/tables.py +195 -0
  108. foundry_mcp/dashboard/data/__init__.py +11 -0
  109. foundry_mcp/dashboard/data/stores.py +433 -0
  110. foundry_mcp/dashboard/launcher.py +300 -0
  111. foundry_mcp/dashboard/views/__init__.py +12 -0
  112. foundry_mcp/dashboard/views/errors.py +217 -0
  113. foundry_mcp/dashboard/views/metrics.py +164 -0
  114. foundry_mcp/dashboard/views/overview.py +96 -0
  115. foundry_mcp/dashboard/views/providers.py +83 -0
  116. foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
  117. foundry_mcp/dashboard/views/tool_usage.py +139 -0
  118. foundry_mcp/prompts/__init__.py +9 -0
  119. foundry_mcp/prompts/workflows.py +525 -0
  120. foundry_mcp/resources/__init__.py +9 -0
  121. foundry_mcp/resources/specs.py +591 -0
  122. foundry_mcp/schemas/__init__.py +38 -0
  123. foundry_mcp/schemas/intake-schema.json +89 -0
  124. foundry_mcp/schemas/sdd-spec-schema.json +414 -0
  125. foundry_mcp/server.py +150 -0
  126. foundry_mcp/tools/__init__.py +10 -0
  127. foundry_mcp/tools/unified/__init__.py +92 -0
  128. foundry_mcp/tools/unified/authoring.py +3620 -0
  129. foundry_mcp/tools/unified/context_helpers.py +98 -0
  130. foundry_mcp/tools/unified/documentation_helpers.py +268 -0
  131. foundry_mcp/tools/unified/environment.py +1341 -0
  132. foundry_mcp/tools/unified/error.py +479 -0
  133. foundry_mcp/tools/unified/health.py +225 -0
  134. foundry_mcp/tools/unified/journal.py +841 -0
  135. foundry_mcp/tools/unified/lifecycle.py +640 -0
  136. foundry_mcp/tools/unified/metrics.py +777 -0
  137. foundry_mcp/tools/unified/plan.py +876 -0
  138. foundry_mcp/tools/unified/pr.py +294 -0
  139. foundry_mcp/tools/unified/provider.py +589 -0
  140. foundry_mcp/tools/unified/research.py +1283 -0
  141. foundry_mcp/tools/unified/review.py +1042 -0
  142. foundry_mcp/tools/unified/review_helpers.py +314 -0
  143. foundry_mcp/tools/unified/router.py +102 -0
  144. foundry_mcp/tools/unified/server.py +565 -0
  145. foundry_mcp/tools/unified/spec.py +1283 -0
  146. foundry_mcp/tools/unified/task.py +3846 -0
  147. foundry_mcp/tools/unified/test.py +431 -0
  148. foundry_mcp/tools/unified/verification.py +520 -0
  149. foundry_mcp-0.8.22.dist-info/METADATA +344 -0
  150. foundry_mcp-0.8.22.dist-info/RECORD +153 -0
  151. foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
  152. foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
  153. foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,1042 @@
1
+ """Unified review tooling with action routing.
2
+
3
+ Consolidates spec review, review tool discovery, and fidelity review
4
+ into a single `review(action=...)` entry point.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import json
10
+ import logging
11
+ import time
12
+ from datetime import datetime
13
+ from dataclasses import asdict
14
+ from pathlib import Path
15
+ from typing import Any, Dict, List, Optional
16
+
17
+ from mcp.server.fastmcp import FastMCP
18
+
19
+ from foundry_mcp.config import ServerConfig
20
+ from foundry_mcp.core.ai_consultation import (
21
+ ConsultationOrchestrator,
22
+ ConsultationRequest,
23
+ ConsultationResult,
24
+ ConsultationWorkflow,
25
+ ConsensusResult,
26
+ )
27
+ from foundry_mcp.core.prompts.fidelity_review import (
28
+ FIDELITY_SYNTHESIZED_RESPONSE_SCHEMA,
29
+ )
30
+ from foundry_mcp.core.llm_config import get_consultation_config, load_consultation_config
31
+ from foundry_mcp.core.naming import canonical_tool
32
+ from foundry_mcp.core.observability import get_metrics, mcp_tool
33
+ from foundry_mcp.core.providers import get_provider_statuses
34
+ from foundry_mcp.core.responses import (
35
+ ErrorCode,
36
+ ErrorType,
37
+ error_response,
38
+ success_response,
39
+ )
40
+ from foundry_mcp.core.security import is_prompt_injection
41
+ from foundry_mcp.core.spec import find_spec_file, find_specs_directory, load_spec
42
+ from .documentation_helpers import (
43
+ _build_implementation_artifacts,
44
+ _build_journal_entries,
45
+ _build_spec_requirements,
46
+ _build_test_results,
47
+ )
48
+ from .review_helpers import (
49
+ DEFAULT_AI_TIMEOUT,
50
+ REVIEW_TYPES,
51
+ _get_llm_status,
52
+ _run_ai_review,
53
+ _run_quick_review,
54
+ )
55
+ from foundry_mcp.tools.unified.router import (
56
+ ActionDefinition,
57
+ ActionRouter,
58
+ ActionRouterError,
59
+ )
60
+
61
+ logger = logging.getLogger(__name__)
62
+ _metrics = get_metrics()
63
+
64
+
65
+ def _parse_json_content(content: str) -> Optional[dict]:
66
+ if not content:
67
+ return None
68
+
69
+ candidate = content
70
+ if "```json" in candidate:
71
+ start = candidate.find("```json") + 7
72
+ end = candidate.find("```", start)
73
+ if end > start:
74
+ candidate = candidate[start:end].strip()
75
+ elif "```" in candidate:
76
+ start = candidate.find("```") + 3
77
+ end = candidate.find("```", start)
78
+ if end > start:
79
+ candidate = candidate[start:end].strip()
80
+
81
+ try:
82
+ parsed = json.loads(candidate)
83
+ except (json.JSONDecodeError, TypeError, ValueError):
84
+ return None
85
+
86
+ return parsed if isinstance(parsed, dict) else None
87
+
88
+
89
+ def _handle_spec_review(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
90
+ spec_id = payload.get("spec_id")
91
+ # Get default review_type from consultation config (used when not provided or None)
92
+ consultation_config = get_consultation_config()
93
+ workflow_config = consultation_config.get_workflow_config("plan_review")
94
+ default_review_type = workflow_config.default_review_type
95
+ review_type = payload.get("review_type") or default_review_type
96
+
97
+ if not isinstance(spec_id, str) or not spec_id.strip():
98
+ return asdict(
99
+ error_response(
100
+ "spec_id is required",
101
+ error_code=ErrorCode.MISSING_REQUIRED,
102
+ error_type=ErrorType.VALIDATION,
103
+ remediation="Provide a valid spec_id",
104
+ )
105
+ )
106
+
107
+ if review_type not in REVIEW_TYPES:
108
+ return asdict(
109
+ error_response(
110
+ f"Invalid review_type: {review_type}",
111
+ error_code=ErrorCode.VALIDATION_ERROR,
112
+ error_type=ErrorType.VALIDATION,
113
+ remediation=f"Use one of: {', '.join(REVIEW_TYPES)}",
114
+ )
115
+ )
116
+
117
+ start_time = time.perf_counter()
118
+ llm_status = _get_llm_status()
119
+
120
+ path = payload.get("path")
121
+ ai_provider = payload.get("ai_provider")
122
+ model = payload.get("model")
123
+
124
+ for field_name, field_value in [
125
+ ("spec_id", spec_id),
126
+ ("path", path),
127
+ ("ai_provider", ai_provider),
128
+ ("model", model),
129
+ ]:
130
+ if (
131
+ field_value
132
+ and isinstance(field_value, str)
133
+ and is_prompt_injection(field_value)
134
+ ):
135
+ return asdict(
136
+ error_response(
137
+ f"Input validation failed for {field_name}",
138
+ error_code=ErrorCode.VALIDATION_ERROR,
139
+ error_type=ErrorType.VALIDATION,
140
+ remediation="Remove instruction-like patterns from input.",
141
+ )
142
+ )
143
+
144
+ specs_dir = None
145
+ if isinstance(path, str) and path.strip():
146
+ candidate = Path(path)
147
+ if candidate.is_dir():
148
+ specs_dir = candidate
149
+ elif candidate.is_file():
150
+ specs_dir = candidate.parent
151
+ else:
152
+ return asdict(
153
+ error_response(
154
+ f"Invalid path: {path}",
155
+ error_code=ErrorCode.VALIDATION_ERROR,
156
+ error_type=ErrorType.VALIDATION,
157
+ remediation="Provide an existing directory or spec file path.",
158
+ )
159
+ )
160
+ else:
161
+ specs_dir = config.specs_dir
162
+
163
+ dry_run_value = payload.get("dry_run", False)
164
+ if dry_run_value is not None and not isinstance(dry_run_value, bool):
165
+ return asdict(
166
+ error_response(
167
+ "dry_run must be a boolean",
168
+ error_code=ErrorCode.INVALID_FORMAT,
169
+ error_type=ErrorType.VALIDATION,
170
+ remediation="Provide dry_run=true|false",
171
+ details={"field": "dry_run"},
172
+ )
173
+ )
174
+ dry_run = dry_run_value if isinstance(dry_run_value, bool) else False
175
+
176
+ if review_type == "quick":
177
+ return _run_quick_review(
178
+ spec_id=spec_id,
179
+ specs_dir=specs_dir,
180
+ dry_run=dry_run,
181
+ llm_status=llm_status,
182
+ start_time=start_time,
183
+ )
184
+
185
+ try:
186
+ ai_timeout = float(payload.get("ai_timeout", DEFAULT_AI_TIMEOUT))
187
+ except (TypeError, ValueError):
188
+ return asdict(
189
+ error_response(
190
+ "ai_timeout must be a number",
191
+ error_code=ErrorCode.VALIDATION_ERROR,
192
+ error_type=ErrorType.VALIDATION,
193
+ remediation="Provide ai_timeout as a float (seconds).",
194
+ )
195
+ )
196
+
197
+ if ai_timeout <= 0:
198
+ return asdict(
199
+ error_response(
200
+ "ai_timeout must be greater than 0",
201
+ error_code=ErrorCode.VALIDATION_ERROR,
202
+ error_type=ErrorType.VALIDATION,
203
+ remediation="Provide ai_timeout as a positive number of seconds.",
204
+ )
205
+ )
206
+
207
+ consultation_cache_value = payload.get("consultation_cache", True)
208
+ if consultation_cache_value is not None and not isinstance(
209
+ consultation_cache_value, bool
210
+ ):
211
+ return asdict(
212
+ error_response(
213
+ "consultation_cache must be a boolean",
214
+ error_code=ErrorCode.INVALID_FORMAT,
215
+ error_type=ErrorType.VALIDATION,
216
+ remediation="Provide consultation_cache=true|false",
217
+ details={"field": "consultation_cache"},
218
+ )
219
+ )
220
+ consultation_cache = (
221
+ consultation_cache_value if isinstance(consultation_cache_value, bool) else True
222
+ )
223
+
224
+ return _run_ai_review(
225
+ spec_id=spec_id,
226
+ specs_dir=specs_dir,
227
+ review_type=review_type,
228
+ ai_provider=ai_provider,
229
+ model=model,
230
+ ai_timeout=ai_timeout,
231
+ consultation_cache=consultation_cache,
232
+ dry_run=dry_run,
233
+ llm_status=llm_status,
234
+ start_time=start_time,
235
+ )
236
+
237
+
238
+ def _handle_list_tools(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
239
+ start_time = time.perf_counter()
240
+
241
+ try:
242
+ llm_status = _get_llm_status()
243
+
244
+ provider_statuses = get_provider_statuses()
245
+ tools_info = [
246
+ {
247
+ "name": provider_id,
248
+ "available": is_available,
249
+ "status": "available" if is_available else "unavailable",
250
+ "reason": None,
251
+ "checked_at": None,
252
+ }
253
+ for provider_id, is_available in provider_statuses.items()
254
+ ]
255
+
256
+ duration_ms = (time.perf_counter() - start_time) * 1000
257
+ _metrics.timer("review.review_list_tools.duration_ms", duration_ms)
258
+
259
+ return asdict(
260
+ success_response(
261
+ tools=tools_info,
262
+ llm_status=llm_status,
263
+ review_types=REVIEW_TYPES,
264
+ available_count=sum(1 for tool in tools_info if tool.get("available")),
265
+ total_count=len(tools_info),
266
+ telemetry={"duration_ms": round(duration_ms, 2)},
267
+ )
268
+ )
269
+
270
+ except Exception as exc:
271
+ logger.exception("Error listing review tools")
272
+ return asdict(
273
+ error_response(
274
+ f"Error listing review tools: {exc}",
275
+ error_code=ErrorCode.INTERNAL_ERROR,
276
+ error_type=ErrorType.INTERNAL,
277
+ remediation="Check logs for details",
278
+ )
279
+ )
280
+
281
+
282
+ def _handle_list_plan_tools(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
283
+ start_time = time.perf_counter()
284
+
285
+ try:
286
+ llm_status = _get_llm_status()
287
+
288
+ plan_tools = [
289
+ {
290
+ "name": "quick-review",
291
+ "description": "Fast structural review for basic validation",
292
+ "capabilities": ["structure", "syntax", "basic_quality"],
293
+ "llm_required": False,
294
+ "estimated_time": "< 10 seconds",
295
+ },
296
+ {
297
+ "name": "full-review",
298
+ "description": "Comprehensive review with LLM analysis",
299
+ "capabilities": ["structure", "quality", "feasibility", "suggestions"],
300
+ "llm_required": True,
301
+ "estimated_time": "30-60 seconds",
302
+ },
303
+ {
304
+ "name": "security-review",
305
+ "description": "Security-focused analysis of plan",
306
+ "capabilities": ["security", "trust_boundaries", "data_flow"],
307
+ "llm_required": True,
308
+ "estimated_time": "30-60 seconds",
309
+ },
310
+ {
311
+ "name": "feasibility-review",
312
+ "description": "Feasibility and complexity assessment",
313
+ "capabilities": ["complexity", "estimation", "risks"],
314
+ "llm_required": True,
315
+ "estimated_time": "30-60 seconds",
316
+ },
317
+ ]
318
+
319
+ recommendations = [
320
+ "Use 'quick-review' for a fast sanity check.",
321
+ "Use 'full-review' before implementation for comprehensive feedback.",
322
+ "Use 'security-review' for specs touching auth/data boundaries.",
323
+ "Use 'feasibility-review' to validate scope/estimates.",
324
+ ]
325
+
326
+ duration_ms = (time.perf_counter() - start_time) * 1000
327
+ _metrics.timer("review.review_list_plan_tools.duration_ms", duration_ms)
328
+
329
+ return asdict(
330
+ success_response(
331
+ plan_tools=plan_tools,
332
+ llm_status=llm_status,
333
+ recommendations=recommendations,
334
+ telemetry={"duration_ms": round(duration_ms, 2)},
335
+ )
336
+ )
337
+
338
+ except Exception as exc:
339
+ logger.exception("Error listing plan review tools")
340
+ return asdict(
341
+ error_response(
342
+ f"Error listing plan review tools: {exc}",
343
+ error_code=ErrorCode.INTERNAL_ERROR,
344
+ error_type=ErrorType.INTERNAL,
345
+ remediation="Check logs for details",
346
+ )
347
+ )
348
+
349
+
350
+ def _handle_parse_feedback(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
351
+ spec_id = payload.get("spec_id")
352
+ review_path = payload.get("review_path")
353
+ output_path = payload.get("output_path")
354
+
355
+ return asdict(
356
+ error_response(
357
+ "Review feedback parsing requires complex text/markdown parsing. "
358
+ "Use the sdd-toolkit:sdd-modify skill to apply review feedback.",
359
+ error_code=ErrorCode.UNAVAILABLE,
360
+ error_type=ErrorType.UNAVAILABLE,
361
+ data={
362
+ "spec_id": spec_id,
363
+ "review_path": review_path,
364
+ "output_path": output_path,
365
+ "alternative": "sdd-toolkit:sdd-modify skill",
366
+ "feature_status": "requires_complex_parsing",
367
+ },
368
+ remediation="Use the sdd-toolkit:sdd-modify skill for parsing support.",
369
+ )
370
+ )
371
+
372
+
373
+ def _format_fidelity_markdown(
374
+ parsed: Dict[str, Any],
375
+ spec_id: str,
376
+ spec_title: str,
377
+ scope: str,
378
+ task_id: Optional[str] = None,
379
+ phase_id: Optional[str] = None,
380
+ provider_id: Optional[str] = None,
381
+ ) -> str:
382
+ """Format fidelity review JSON as human-readable markdown."""
383
+ # Build scope detail
384
+ scope_detail = scope
385
+ if task_id:
386
+ scope_detail += f" (task: {task_id})"
387
+ elif phase_id:
388
+ scope_detail += f" (phase: {phase_id})"
389
+
390
+ lines = [
391
+ f"# Fidelity Review: {spec_title}",
392
+ "",
393
+ f"**Spec ID:** {spec_id}",
394
+ f"**Scope:** {scope_detail}",
395
+ f"**Verdict:** {parsed.get('verdict', 'unknown')}",
396
+ f"**Date:** {datetime.now().isoformat()}",
397
+ ]
398
+ if provider_id:
399
+ lines.append(f"**Provider:** {provider_id}")
400
+ lines.append("")
401
+
402
+ # Summary section
403
+ if parsed.get("summary"):
404
+ lines.extend(["## Summary", "", parsed["summary"], ""])
405
+
406
+ # Requirement Alignment
407
+ req_align = parsed.get("requirement_alignment", {})
408
+ if req_align:
409
+ lines.extend([
410
+ "## Requirement Alignment",
411
+ f"**Status:** {req_align.get('answer', 'unknown')}",
412
+ "",
413
+ req_align.get("details", ""),
414
+ "",
415
+ ])
416
+
417
+ # Success Criteria
418
+ success = parsed.get("success_criteria", {})
419
+ if success:
420
+ lines.extend([
421
+ "## Success Criteria",
422
+ f"**Status:** {success.get('met', 'unknown')}",
423
+ "",
424
+ success.get("details", ""),
425
+ "",
426
+ ])
427
+
428
+ # Deviations
429
+ deviations = parsed.get("deviations", [])
430
+ if deviations:
431
+ lines.extend(["## Deviations", ""])
432
+ for dev in deviations:
433
+ severity = dev.get("severity", "unknown")
434
+ description = dev.get("description", "")
435
+ justification = dev.get("justification", "")
436
+ lines.append(f"- **[{severity.upper()}]** {description}")
437
+ if justification:
438
+ lines.append(f" - Justification: {justification}")
439
+ lines.append("")
440
+
441
+ # Test Coverage
442
+ test_cov = parsed.get("test_coverage", {})
443
+ if test_cov:
444
+ lines.extend([
445
+ "## Test Coverage",
446
+ f"**Status:** {test_cov.get('status', 'unknown')}",
447
+ "",
448
+ test_cov.get("details", ""),
449
+ "",
450
+ ])
451
+
452
+ # Code Quality
453
+ code_quality = parsed.get("code_quality", {})
454
+ if code_quality:
455
+ lines.extend(["## Code Quality", ""])
456
+ if code_quality.get("details"):
457
+ lines.append(code_quality["details"])
458
+ lines.append("")
459
+ for issue in code_quality.get("issues", []):
460
+ lines.append(f"- {issue}")
461
+ lines.append("")
462
+
463
+ # Documentation
464
+ doc = parsed.get("documentation", {})
465
+ if doc:
466
+ lines.extend([
467
+ "## Documentation",
468
+ f"**Status:** {doc.get('status', 'unknown')}",
469
+ "",
470
+ doc.get("details", ""),
471
+ "",
472
+ ])
473
+
474
+ # Issues
475
+ issues = parsed.get("issues", [])
476
+ if issues:
477
+ lines.extend(["## Issues", ""])
478
+ for issue in issues:
479
+ lines.append(f"- {issue}")
480
+ lines.append("")
481
+
482
+ # Recommendations
483
+ recommendations = parsed.get("recommendations", [])
484
+ if recommendations:
485
+ lines.extend(["## Recommendations", ""])
486
+ for rec in recommendations:
487
+ lines.append(f"- {rec}")
488
+ lines.append("")
489
+
490
+ # Verdict consensus (if synthesized)
491
+ verdict_consensus = parsed.get("verdict_consensus", {})
492
+ if verdict_consensus:
493
+ lines.extend(["## Verdict Consensus", ""])
494
+ votes = verdict_consensus.get("votes", {})
495
+ for verdict_type, models in votes.items():
496
+ if models:
497
+ lines.append(f"- **{verdict_type}:** {', '.join(models)}")
498
+ agreement = verdict_consensus.get("agreement_level", "")
499
+ if agreement:
500
+ lines.append(f"\n**Agreement Level:** {agreement}")
501
+ notes = verdict_consensus.get("notes", "")
502
+ if notes:
503
+ lines.extend(["", notes])
504
+ lines.append("")
505
+
506
+ # Synthesis metadata
507
+ synth_meta = parsed.get("synthesis_metadata", {})
508
+ if synth_meta:
509
+ lines.extend(["## Synthesis Metadata", ""])
510
+ if synth_meta.get("models_consulted"):
511
+ lines.append(f"- Models consulted: {', '.join(synth_meta['models_consulted'])}")
512
+ if synth_meta.get("models_succeeded"):
513
+ lines.append(f"- Models succeeded: {', '.join(synth_meta['models_succeeded'])}")
514
+ if synth_meta.get("synthesis_provider"):
515
+ lines.append(f"- Synthesis provider: {synth_meta['synthesis_provider']}")
516
+ lines.append("")
517
+
518
+ lines.extend([
519
+ "---",
520
+ "*Generated by Foundry MCP Fidelity Review*",
521
+ ])
522
+
523
+ return "\n".join(lines)
524
+
525
+
526
+ def _handle_fidelity(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
527
+ """Best-effort fidelity review.
528
+
529
+ Note: the canonical `spec-review-fidelity` tool remains the source of truth
530
+ for fidelity review behavior; this action is primarily to support the
531
+ consolidated manifest.
532
+ """
533
+
534
+ start_time = time.perf_counter()
535
+ spec_id = payload.get("spec_id")
536
+ task_id = payload.get("task_id")
537
+ phase_id = payload.get("phase_id")
538
+ files = payload.get("files")
539
+ ai_tools = payload.get("ai_tools")
540
+ model = payload.get("model")
541
+ consensus_threshold = payload.get("consensus_threshold", 2)
542
+ incremental_value = payload.get("incremental", False)
543
+ if incremental_value is not None and not isinstance(incremental_value, bool):
544
+ return asdict(
545
+ error_response(
546
+ "incremental must be a boolean",
547
+ error_code=ErrorCode.INVALID_FORMAT,
548
+ error_type=ErrorType.VALIDATION,
549
+ remediation="Provide incremental=true|false",
550
+ details={"field": "incremental"},
551
+ )
552
+ )
553
+ incremental = incremental_value if isinstance(incremental_value, bool) else False
554
+
555
+ include_tests_value = payload.get("include_tests", True)
556
+ if include_tests_value is not None and not isinstance(include_tests_value, bool):
557
+ return asdict(
558
+ error_response(
559
+ "include_tests must be a boolean",
560
+ error_code=ErrorCode.INVALID_FORMAT,
561
+ error_type=ErrorType.VALIDATION,
562
+ remediation="Provide include_tests=true|false",
563
+ details={"field": "include_tests"},
564
+ )
565
+ )
566
+ include_tests = (
567
+ include_tests_value if isinstance(include_tests_value, bool) else True
568
+ )
569
+ base_branch = payload.get("base_branch", "main")
570
+ workspace = payload.get("workspace")
571
+
572
+ if not isinstance(spec_id, str) or not spec_id:
573
+ return asdict(
574
+ error_response(
575
+ "Specification ID is required",
576
+ error_code=ErrorCode.MISSING_REQUIRED,
577
+ error_type=ErrorType.VALIDATION,
578
+ remediation="Provide a valid spec_id to review.",
579
+ )
580
+ )
581
+
582
+ if task_id and phase_id:
583
+ return asdict(
584
+ error_response(
585
+ "Cannot specify both task_id and phase_id",
586
+ error_code=ErrorCode.VALIDATION_ERROR,
587
+ error_type=ErrorType.VALIDATION,
588
+ remediation="Provide either task_id OR phase_id, not both.",
589
+ )
590
+ )
591
+
592
+ if (
593
+ not isinstance(consensus_threshold, int)
594
+ or consensus_threshold < 1
595
+ or consensus_threshold > 5
596
+ ):
597
+ return asdict(
598
+ error_response(
599
+ f"Invalid consensus_threshold: {consensus_threshold}. Must be between 1 and 5.",
600
+ error_code=ErrorCode.VALIDATION_ERROR,
601
+ error_type=ErrorType.VALIDATION,
602
+ remediation="Use a consensus_threshold between 1 and 5.",
603
+ )
604
+ )
605
+
606
+ for field_name, field_value in [
607
+ ("spec_id", spec_id),
608
+ ("task_id", task_id),
609
+ ("phase_id", phase_id),
610
+ ("model", model),
611
+ ("base_branch", base_branch),
612
+ ("workspace", workspace),
613
+ ]:
614
+ if (
615
+ field_value
616
+ and isinstance(field_value, str)
617
+ and is_prompt_injection(field_value)
618
+ ):
619
+ return asdict(
620
+ error_response(
621
+ f"Input validation failed for {field_name}",
622
+ error_code=ErrorCode.VALIDATION_ERROR,
623
+ error_type=ErrorType.VALIDATION,
624
+ remediation="Remove instruction-like patterns from input.",
625
+ )
626
+ )
627
+
628
+ if files:
629
+ for idx, file_path in enumerate(files):
630
+ if isinstance(file_path, str) and is_prompt_injection(file_path):
631
+ return asdict(
632
+ error_response(
633
+ f"Input validation failed for files[{idx}]",
634
+ error_code=ErrorCode.VALIDATION_ERROR,
635
+ error_type=ErrorType.VALIDATION,
636
+ remediation="Remove instruction-like patterns from file paths.",
637
+ )
638
+ )
639
+
640
+ ws_path = (
641
+ Path(workspace) if isinstance(workspace, str) and workspace else Path.cwd()
642
+ )
643
+ specs_dir = find_specs_directory(str(ws_path))
644
+ if not specs_dir:
645
+ return asdict(
646
+ error_response(
647
+ "Could not find specs directory",
648
+ error_code=ErrorCode.NOT_FOUND,
649
+ error_type=ErrorType.NOT_FOUND,
650
+ remediation="Ensure you're in a project with a specs/ directory",
651
+ )
652
+ )
653
+
654
+ spec_file = find_spec_file(spec_id, specs_dir)
655
+ if not spec_file:
656
+ return asdict(
657
+ error_response(
658
+ f"Specification not found: {spec_id}",
659
+ error_code=ErrorCode.SPEC_NOT_FOUND,
660
+ error_type=ErrorType.NOT_FOUND,
661
+ remediation='Verify the spec ID exists using spec(action="list").',
662
+ )
663
+ )
664
+
665
+ spec_data = load_spec(spec_id, specs_dir)
666
+ if not spec_data:
667
+ return asdict(
668
+ error_response(
669
+ f"Failed to load specification: {spec_id}",
670
+ error_code=ErrorCode.INTERNAL_ERROR,
671
+ error_type=ErrorType.INTERNAL,
672
+ remediation="Check spec JSON validity and retry.",
673
+ )
674
+ )
675
+
676
+ scope = "task" if task_id else ("phase" if phase_id else "spec")
677
+
678
+ # Setup fidelity reviews directory and file naming
679
+ fidelity_reviews_dir = Path(specs_dir) / ".fidelity-reviews"
680
+ base_name = f"{spec_id}-{scope}"
681
+ if task_id:
682
+ base_name += f"-{task_id}"
683
+ elif phase_id:
684
+ base_name += f"-{phase_id}"
685
+ provider_review_paths: List[Dict[str, Any]] = []
686
+ review_path: Optional[str] = None
687
+
688
+ spec_requirements = _build_spec_requirements(spec_data, task_id, phase_id)
689
+ implementation_artifacts = _build_implementation_artifacts(
690
+ spec_data,
691
+ task_id,
692
+ phase_id,
693
+ files,
694
+ incremental,
695
+ base_branch,
696
+ workspace_root=ws_path,
697
+ )
698
+ test_results = (
699
+ _build_test_results(spec_data, task_id, phase_id) if include_tests else ""
700
+ )
701
+ journal_entries = _build_journal_entries(spec_data, task_id, phase_id)
702
+
703
+ preferred_providers = ai_tools if isinstance(ai_tools, list) else []
704
+ first_provider = preferred_providers[0] if preferred_providers else None
705
+
706
+ # Load consultation config from workspace path to get provider priority list
707
+ config_file = ws_path / "foundry-mcp.toml"
708
+ consultation_config = load_consultation_config(config_file=config_file)
709
+ orchestrator = ConsultationOrchestrator(config=consultation_config)
710
+ if not orchestrator.is_available(provider_id=first_provider):
711
+ return asdict(
712
+ error_response(
713
+ "Fidelity review requested but no providers available",
714
+ error_code=ErrorCode.AI_NO_PROVIDER,
715
+ error_type=ErrorType.UNAVAILABLE,
716
+ data={"spec_id": spec_id, "requested_provider": first_provider},
717
+ remediation="Install/configure an AI provider (claude/gemini/codex)",
718
+ )
719
+ )
720
+
721
+ request = ConsultationRequest(
722
+ workflow=ConsultationWorkflow.FIDELITY_REVIEW,
723
+ prompt_id="FIDELITY_REVIEW_V1",
724
+ context={
725
+ "spec_id": spec_id,
726
+ "spec_title": spec_data.get("title", spec_id),
727
+ "spec_description": spec_data.get("description", ""),
728
+ "review_scope": scope,
729
+ "spec_requirements": spec_requirements,
730
+ "implementation_artifacts": implementation_artifacts,
731
+ "test_results": test_results,
732
+ "journal_entries": journal_entries,
733
+ },
734
+ provider_id=first_provider,
735
+ model=model,
736
+ )
737
+
738
+ result = orchestrator.consult(request, use_cache=True)
739
+ is_consensus = isinstance(result, ConsensusResult)
740
+ synthesis_performed = False
741
+ synthesis_error = None
742
+ successful_providers: List[str] = []
743
+ failed_providers: List[Dict[str, Any]] = []
744
+
745
+ if is_consensus:
746
+ # Extract provider details for visibility
747
+ failed_providers = [
748
+ {"provider_id": r.provider_id, "error": r.error}
749
+ for r in result.responses
750
+ if not r.success
751
+ ]
752
+ # Filter for truly successful responses (success=True AND non-empty content)
753
+ successful_responses = [
754
+ r for r in result.responses if r.success and r.content.strip()
755
+ ]
756
+ successful_providers = [r.provider_id for r in successful_responses]
757
+
758
+ if len(successful_responses) >= 2:
759
+ # Multi-model mode: run synthesis to consolidate reviews
760
+ model_reviews_json = ""
761
+ for response in successful_responses:
762
+ model_reviews_json += (
763
+ f"\n---\n## Review by {response.provider_id}\n\n"
764
+ f"```json\n{response.content}\n```\n"
765
+ )
766
+
767
+ # Write individual provider review files
768
+ try:
769
+ fidelity_reviews_dir.mkdir(parents=True, exist_ok=True)
770
+ for response in successful_responses:
771
+ provider_parsed = _parse_json_content(response.content)
772
+ provider_file = fidelity_reviews_dir / f"{base_name}-{response.provider_id}.md"
773
+ if provider_parsed:
774
+ provider_md = _format_fidelity_markdown(
775
+ provider_parsed,
776
+ spec_id,
777
+ spec_data.get("title", spec_id),
778
+ scope,
779
+ task_id=task_id,
780
+ phase_id=phase_id,
781
+ provider_id=response.provider_id,
782
+ )
783
+ provider_file.write_text(provider_md, encoding="utf-8")
784
+ provider_review_paths.append({
785
+ "provider_id": response.provider_id,
786
+ "path": str(provider_file),
787
+ })
788
+ else:
789
+ # JSON parsing failed - write raw content as fallback
790
+ logger.warning(
791
+ "Provider %s returned non-JSON content, writing raw response",
792
+ response.provider_id,
793
+ )
794
+ raw_md = (
795
+ f"# Fidelity Review (Raw): {spec_id}\n\n"
796
+ f"**Provider:** {response.provider_id}\n"
797
+ f"**Note:** Response could not be parsed as JSON\n\n"
798
+ f"## Raw Response\n\n```\n{response.content}\n```\n"
799
+ )
800
+ provider_file.write_text(raw_md, encoding="utf-8")
801
+ provider_review_paths.append({
802
+ "provider_id": response.provider_id,
803
+ "path": str(provider_file),
804
+ "parse_error": True,
805
+ })
806
+ except Exception as e:
807
+ logger.warning("Failed to write provider review files: %s", e)
808
+
809
+ logger.info(
810
+ "Running fidelity synthesis for %d provider reviews: %s",
811
+ len(successful_responses),
812
+ successful_providers,
813
+ )
814
+
815
+ synthesis_request = ConsultationRequest(
816
+ workflow=ConsultationWorkflow.FIDELITY_REVIEW,
817
+ prompt_id="FIDELITY_SYNTHESIS_PROMPT_V1",
818
+ context={
819
+ "spec_id": spec_id,
820
+ "spec_title": spec_data.get("title", spec_id),
821
+ "review_scope": scope,
822
+ "num_models": len(successful_responses),
823
+ "model_reviews": model_reviews_json,
824
+ "response_schema": FIDELITY_SYNTHESIZED_RESPONSE_SCHEMA,
825
+ },
826
+ provider_id=successful_providers[0],
827
+ model=model,
828
+ )
829
+
830
+ try:
831
+ synthesis_result = orchestrator.consult(synthesis_request, use_cache=True)
832
+ except Exception as e:
833
+ logger.error("Fidelity synthesis call crashed: %s", e, exc_info=True)
834
+ synthesis_result = None
835
+
836
+ # Handle both ConsultationResult and ConsensusResult from synthesis
837
+ synthesis_success = False
838
+ synthesis_content = None
839
+ if synthesis_result:
840
+ if isinstance(synthesis_result, ConsultationResult) and synthesis_result.success:
841
+ synthesis_content = synthesis_result.content
842
+ synthesis_success = bool(synthesis_content and synthesis_content.strip())
843
+ elif isinstance(synthesis_result, ConsensusResult) and synthesis_result.success:
844
+ synthesis_content = synthesis_result.primary_content
845
+ synthesis_success = bool(synthesis_content and synthesis_content.strip())
846
+
847
+ if synthesis_success and synthesis_content:
848
+ content = synthesis_content
849
+ synthesis_performed = True
850
+ else:
851
+ # Synthesis failed - fall back to first provider's content
852
+ error_detail = "unknown"
853
+ if synthesis_result is None:
854
+ error_detail = "synthesis crashed (see logs)"
855
+ elif isinstance(synthesis_result, ConsultationResult):
856
+ error_detail = synthesis_result.error or "empty response"
857
+ elif isinstance(synthesis_result, ConsensusResult):
858
+ error_detail = "empty synthesis content"
859
+ logger.warning(
860
+ "Fidelity synthesis call failed (%s), falling back to first provider's content",
861
+ error_detail,
862
+ )
863
+ content = result.primary_content
864
+ synthesis_error = error_detail
865
+ else:
866
+ # Single successful provider - use its content directly (no synthesis needed)
867
+ content = result.primary_content
868
+ else:
869
+ content = result.content
870
+
871
+ parsed = _parse_json_content(content)
872
+ verdict = parsed.get("verdict") if parsed else "unknown"
873
+
874
+ # Write main fidelity review file
875
+ if parsed:
876
+ try:
877
+ fidelity_reviews_dir.mkdir(parents=True, exist_ok=True)
878
+ main_md = _format_fidelity_markdown(
879
+ parsed,
880
+ spec_id,
881
+ spec_data.get("title", spec_id),
882
+ scope,
883
+ task_id=task_id,
884
+ phase_id=phase_id,
885
+ )
886
+ review_file = fidelity_reviews_dir / f"{base_name}.md"
887
+ review_file.write_text(main_md, encoding="utf-8")
888
+ review_path = str(review_file)
889
+ except Exception as e:
890
+ logger.warning("Failed to write main fidelity review file: %s", e)
891
+
892
+ duration_ms = (time.perf_counter() - start_time) * 1000
893
+
894
+ # Build consensus info with synthesis details
895
+ consensus_info: Dict[str, Any] = {
896
+ "mode": "multi_model" if is_consensus else "single_model",
897
+ "threshold": consensus_threshold,
898
+ "provider_id": getattr(result, "provider_id", None),
899
+ "model_used": getattr(result, "model_used", None),
900
+ "synthesis_performed": synthesis_performed,
901
+ }
902
+
903
+ if is_consensus:
904
+ consensus_info["successful_providers"] = successful_providers
905
+ consensus_info["failed_providers"] = failed_providers
906
+ if synthesis_error:
907
+ consensus_info["synthesis_error"] = synthesis_error
908
+
909
+ # Include additional synthesized fields if available
910
+ response_data: Dict[str, Any] = {
911
+ "spec_id": spec_id,
912
+ "title": spec_data.get("title", spec_id),
913
+ "scope": scope,
914
+ "verdict": verdict,
915
+ "deviations": parsed.get("deviations") if parsed else [],
916
+ "recommendations": parsed.get("recommendations") if parsed else [],
917
+ "consensus": consensus_info,
918
+ }
919
+
920
+ # Add file paths if reviews were written
921
+ if review_path:
922
+ response_data["review_path"] = review_path
923
+ if provider_review_paths:
924
+ response_data["provider_reviews"] = provider_review_paths
925
+
926
+ # Add synthesis-specific fields if synthesis was performed
927
+ if synthesis_performed and parsed:
928
+ if "verdict_consensus" in parsed:
929
+ response_data["verdict_consensus"] = parsed["verdict_consensus"]
930
+ if "synthesis_metadata" in parsed:
931
+ response_data["synthesis_metadata"] = parsed["synthesis_metadata"]
932
+
933
+ return asdict(
934
+ success_response(
935
+ **response_data,
936
+ telemetry={"duration_ms": round(duration_ms, 2)},
937
+ )
938
+ )
939
+
940
+
941
+ _ACTIONS = [
942
+ ActionDefinition(name="spec", handler=_handle_spec_review, summary="Review a spec", aliases=("spec-review",)),
943
+ ActionDefinition(
944
+ name="fidelity",
945
+ handler=_handle_fidelity,
946
+ summary="Run a fidelity review",
947
+ ),
948
+ ActionDefinition(
949
+ name="parse-feedback",
950
+ handler=_handle_parse_feedback,
951
+ summary="Parse reviewer feedback into structured issues",
952
+ ),
953
+ ActionDefinition(
954
+ name="list-tools",
955
+ handler=_handle_list_tools,
956
+ summary="List available review tools",
957
+ ),
958
+ ActionDefinition(
959
+ name="list-plan-tools",
960
+ handler=_handle_list_plan_tools,
961
+ summary="List available plan review toolchains",
962
+ ),
963
+ ]
964
+
965
+ _REVIEW_ROUTER = ActionRouter(tool_name="review", actions=_ACTIONS)
966
+
967
+
968
+ def _dispatch_review_action(
969
+ *, action: str, payload: Dict[str, Any], config: ServerConfig
970
+ ) -> dict:
971
+ try:
972
+ return _REVIEW_ROUTER.dispatch(action=action, payload=payload, config=config)
973
+ except ActionRouterError as exc:
974
+ allowed = ", ".join(exc.allowed_actions)
975
+ return asdict(
976
+ error_response(
977
+ f"Unsupported review action '{action}'. Allowed actions: {allowed}",
978
+ error_code=ErrorCode.VALIDATION_ERROR,
979
+ error_type=ErrorType.VALIDATION,
980
+ remediation=f"Use one of: {allowed}",
981
+ )
982
+ )
983
+
984
+
985
+ def register_unified_review_tool(mcp: FastMCP, config: ServerConfig) -> None:
986
+ """Register the consolidated review tool."""
987
+
988
+ @canonical_tool(mcp, canonical_name="review")
989
+ @mcp_tool(tool_name="review", emit_metrics=True, audit=True)
990
+ def review(
991
+ action: str,
992
+ spec_id: Optional[str] = None,
993
+ review_type: Optional[str] = None,
994
+ tools: Optional[str] = None,
995
+ model: Optional[str] = None,
996
+ ai_provider: Optional[str] = None,
997
+ ai_timeout: float = DEFAULT_AI_TIMEOUT,
998
+ consultation_cache: bool = True,
999
+ path: Optional[str] = None,
1000
+ dry_run: bool = False,
1001
+ task_id: Optional[str] = None,
1002
+ phase_id: Optional[str] = None,
1003
+ files: Optional[List[str]] = None,
1004
+ ai_tools: Optional[List[str]] = None,
1005
+ consensus_threshold: int = 2,
1006
+ incremental: bool = False,
1007
+ include_tests: bool = True,
1008
+ base_branch: str = "main",
1009
+ workspace: Optional[str] = None,
1010
+ review_path: Optional[str] = None,
1011
+ output_path: Optional[str] = None,
1012
+ ) -> dict:
1013
+ payload = {
1014
+ "spec_id": spec_id,
1015
+ "review_type": review_type,
1016
+ "tools": tools,
1017
+ "model": model,
1018
+ "ai_provider": ai_provider,
1019
+ "ai_timeout": ai_timeout,
1020
+ "consultation_cache": consultation_cache,
1021
+ "path": path,
1022
+ "dry_run": dry_run,
1023
+ "task_id": task_id,
1024
+ "phase_id": phase_id,
1025
+ "files": files,
1026
+ "ai_tools": ai_tools,
1027
+ "consensus_threshold": consensus_threshold,
1028
+ "incremental": incremental,
1029
+ "include_tests": include_tests,
1030
+ "base_branch": base_branch,
1031
+ "workspace": workspace,
1032
+ "review_path": review_path,
1033
+ "output_path": output_path,
1034
+ }
1035
+ return _dispatch_review_action(action=action, payload=payload, config=config)
1036
+
1037
+ logger.debug("Registered unified review tool")
1038
+
1039
+
1040
+ __all__ = [
1041
+ "register_unified_review_tool",
1042
+ ]