foundry-mcp 0.3.3__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- foundry_mcp/__init__.py +7 -1
- foundry_mcp/cli/commands/plan.py +10 -3
- foundry_mcp/cli/commands/review.py +19 -4
- foundry_mcp/cli/commands/specs.py +38 -208
- foundry_mcp/cli/output.py +3 -3
- foundry_mcp/config.py +235 -5
- foundry_mcp/core/ai_consultation.py +146 -9
- foundry_mcp/core/discovery.py +6 -6
- foundry_mcp/core/error_store.py +2 -2
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/llm_config.py +20 -2
- foundry_mcp/core/metrics_store.py +2 -2
- foundry_mcp/core/progress.py +70 -0
- foundry_mcp/core/prompts/fidelity_review.py +149 -4
- foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
- foundry_mcp/core/prompts/plan_review.py +5 -1
- foundry_mcp/core/providers/claude.py +6 -47
- foundry_mcp/core/providers/codex.py +6 -57
- foundry_mcp/core/providers/cursor_agent.py +3 -44
- foundry_mcp/core/providers/gemini.py +6 -57
- foundry_mcp/core/providers/opencode.py +35 -5
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +425 -0
- foundry_mcp/core/research/models.py +437 -0
- foundry_mcp/core/research/workflows/__init__.py +22 -0
- foundry_mcp/core/research/workflows/base.py +204 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +396 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/responses.py +450 -0
- foundry_mcp/core/spec.py +2438 -236
- foundry_mcp/core/task.py +1064 -19
- foundry_mcp/core/testing.py +512 -123
- foundry_mcp/core/validation.py +313 -42
- foundry_mcp/dashboard/components/charts.py +0 -57
- foundry_mcp/dashboard/launcher.py +11 -0
- foundry_mcp/dashboard/views/metrics.py +25 -35
- foundry_mcp/dashboard/views/overview.py +1 -65
- foundry_mcp/resources/specs.py +25 -25
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +33 -5
- foundry_mcp/server.py +38 -0
- foundry_mcp/tools/unified/__init__.py +4 -2
- foundry_mcp/tools/unified/authoring.py +2423 -267
- foundry_mcp/tools/unified/documentation_helpers.py +69 -6
- foundry_mcp/tools/unified/environment.py +235 -6
- foundry_mcp/tools/unified/error.py +18 -1
- foundry_mcp/tools/unified/lifecycle.py +8 -0
- foundry_mcp/tools/unified/plan.py +113 -1
- foundry_mcp/tools/unified/research.py +658 -0
- foundry_mcp/tools/unified/review.py +370 -16
- foundry_mcp/tools/unified/spec.py +367 -0
- foundry_mcp/tools/unified/task.py +1163 -48
- foundry_mcp/tools/unified/test.py +69 -8
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/METADATA +7 -1
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/RECORD +60 -48
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/WHEEL +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/entry_points.txt +0 -0
- {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -45,6 +45,49 @@ def _build_spec_requirements(
|
|
|
45
45
|
return "\n".join(lines) if lines else "*No requirements available*"
|
|
46
46
|
|
|
47
47
|
|
|
48
|
+
def _split_file_paths(value: Any) -> List[str]:
|
|
49
|
+
if value is None:
|
|
50
|
+
return []
|
|
51
|
+
if isinstance(value, list):
|
|
52
|
+
parts: List[str] = []
|
|
53
|
+
for item in value:
|
|
54
|
+
parts.extend(_split_file_paths(item))
|
|
55
|
+
return parts
|
|
56
|
+
if isinstance(value, str):
|
|
57
|
+
segments = [part.strip() for part in value.split(",")]
|
|
58
|
+
return [segment for segment in segments if segment]
|
|
59
|
+
return [str(value)]
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _normalize_for_comparison(path_value: str, workspace_root: Optional[Path]) -> str:
|
|
63
|
+
raw_path = Path(path_value)
|
|
64
|
+
if raw_path.is_absolute() and workspace_root:
|
|
65
|
+
try:
|
|
66
|
+
raw_path = raw_path.relative_to(workspace_root)
|
|
67
|
+
except ValueError:
|
|
68
|
+
pass
|
|
69
|
+
if workspace_root and raw_path.parts and raw_path.parts[0] == workspace_root.name:
|
|
70
|
+
raw_path = Path(*raw_path.parts[1:])
|
|
71
|
+
return raw_path.as_posix()
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _resolve_path(path_value: str, workspace_root: Optional[Path]) -> Path:
|
|
75
|
+
raw_path = Path(path_value)
|
|
76
|
+
candidates: List[Path] = []
|
|
77
|
+
if raw_path.is_absolute():
|
|
78
|
+
candidates.append(raw_path)
|
|
79
|
+
else:
|
|
80
|
+
candidates.append(raw_path)
|
|
81
|
+
if workspace_root:
|
|
82
|
+
candidates.append(workspace_root / raw_path)
|
|
83
|
+
if raw_path.parts and raw_path.parts[0] == workspace_root.name:
|
|
84
|
+
candidates.append(workspace_root / Path(*raw_path.parts[1:]))
|
|
85
|
+
for candidate in candidates:
|
|
86
|
+
if candidate.exists():
|
|
87
|
+
return candidate
|
|
88
|
+
return candidates[0] if candidates else raw_path
|
|
89
|
+
|
|
90
|
+
|
|
48
91
|
def _build_implementation_artifacts(
|
|
49
92
|
spec_data: Dict[str, Any],
|
|
50
93
|
task_id: Optional[str],
|
|
@@ -52,21 +95,32 @@ def _build_implementation_artifacts(
|
|
|
52
95
|
files: Optional[List[str]],
|
|
53
96
|
incremental: bool,
|
|
54
97
|
base_branch: str,
|
|
98
|
+
workspace_root: Optional[Path] = None,
|
|
55
99
|
) -> str:
|
|
56
100
|
lines: list[str] = []
|
|
57
101
|
file_paths: list[str] = []
|
|
102
|
+
if workspace_root is not None and not isinstance(workspace_root, Path):
|
|
103
|
+
workspace_root = Path(str(workspace_root))
|
|
58
104
|
if files:
|
|
59
|
-
file_paths =
|
|
105
|
+
file_paths = _split_file_paths(files)
|
|
60
106
|
elif task_id:
|
|
61
107
|
task = _find_task(spec_data, task_id)
|
|
62
108
|
if task and task.get("metadata", {}).get("file_path"):
|
|
63
|
-
file_paths =
|
|
109
|
+
file_paths = _split_file_paths(task["metadata"]["file_path"])
|
|
64
110
|
elif phase_id:
|
|
65
111
|
phase = _find_phase(spec_data, phase_id)
|
|
66
112
|
if phase:
|
|
67
113
|
for child in _get_child_nodes(spec_data, phase):
|
|
68
114
|
if child.get("metadata", {}).get("file_path"):
|
|
69
|
-
file_paths.
|
|
115
|
+
file_paths.extend(_split_file_paths(child["metadata"]["file_path"]))
|
|
116
|
+
if file_paths:
|
|
117
|
+
deduped: List[str] = []
|
|
118
|
+
seen = set()
|
|
119
|
+
for file_path in file_paths:
|
|
120
|
+
if file_path not in seen:
|
|
121
|
+
seen.add(file_path)
|
|
122
|
+
deduped.append(file_path)
|
|
123
|
+
file_paths = deduped
|
|
70
124
|
if incremental:
|
|
71
125
|
try:
|
|
72
126
|
import subprocess
|
|
@@ -82,16 +136,25 @@ def _build_implementation_artifacts(
|
|
|
82
136
|
result.stdout.strip().split("\n") if result.stdout else []
|
|
83
137
|
)
|
|
84
138
|
if file_paths:
|
|
85
|
-
|
|
139
|
+
changed_set = {
|
|
140
|
+
_normalize_for_comparison(path, workspace_root)
|
|
141
|
+
for path in changed_files
|
|
142
|
+
if path
|
|
143
|
+
}
|
|
144
|
+
file_paths = [
|
|
145
|
+
path
|
|
146
|
+
for path in file_paths
|
|
147
|
+
if _normalize_for_comparison(path, workspace_root) in changed_set
|
|
148
|
+
]
|
|
86
149
|
else:
|
|
87
|
-
file_paths = changed_files
|
|
150
|
+
file_paths = [path for path in changed_files if path]
|
|
88
151
|
lines.append(
|
|
89
152
|
f"*Incremental review: {len(file_paths)} changed files since {base_branch}*\n"
|
|
90
153
|
)
|
|
91
154
|
except Exception:
|
|
92
155
|
lines.append(f"*Warning: Could not get git diff from {base_branch}*\n")
|
|
93
156
|
for file_path in file_paths[:5]:
|
|
94
|
-
path =
|
|
157
|
+
path = _resolve_path(file_path, workspace_root)
|
|
95
158
|
if path.exists():
|
|
96
159
|
try:
|
|
97
160
|
content = path.read_text(encoding="utf-8")
|
|
@@ -13,7 +13,7 @@ from typing import Any, Dict, List, Optional, cast
|
|
|
13
13
|
|
|
14
14
|
from mcp.server.fastmcp import FastMCP
|
|
15
15
|
|
|
16
|
-
from foundry_mcp.config import ServerConfig
|
|
16
|
+
from foundry_mcp.config import ServerConfig, _PACKAGE_VERSION
|
|
17
17
|
from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
|
|
18
18
|
from foundry_mcp.core.feature_flags import FeatureFlag, FlagState, get_flag_service
|
|
19
19
|
from foundry_mcp.core.naming import canonical_tool
|
|
@@ -45,15 +45,44 @@ try:
|
|
|
45
45
|
except ValueError:
|
|
46
46
|
pass
|
|
47
47
|
|
|
48
|
-
|
|
48
|
+
_DEFAULT_TOML_TEMPLATE = """[workspace]
|
|
49
49
|
specs_dir = "./specs"
|
|
50
50
|
|
|
51
|
+
[logging]
|
|
52
|
+
level = "INFO"
|
|
53
|
+
structured = true
|
|
54
|
+
|
|
55
|
+
[server]
|
|
56
|
+
name = "foundry-mcp"
|
|
57
|
+
version = "{version}"
|
|
58
|
+
|
|
51
59
|
[workflow]
|
|
52
60
|
mode = "single"
|
|
53
61
|
auto_validate = true
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
62
|
+
journal_enabled = true
|
|
63
|
+
|
|
64
|
+
[consultation]
|
|
65
|
+
# priority = [] # Appended by setup based on detected providers
|
|
66
|
+
default_timeout = 300
|
|
67
|
+
max_retries = 2
|
|
68
|
+
retry_delay = 5.0
|
|
69
|
+
fallback_enabled = true
|
|
70
|
+
cache_ttl = 3600
|
|
71
|
+
|
|
72
|
+
[consultation.workflows.fidelity_review]
|
|
73
|
+
min_models = 2
|
|
74
|
+
timeout_override = 600.0
|
|
75
|
+
default_review_type = "full"
|
|
76
|
+
|
|
77
|
+
[consultation.workflows.plan_review]
|
|
78
|
+
min_models = 2
|
|
79
|
+
timeout_override = 180.0
|
|
80
|
+
default_review_type = "full"
|
|
81
|
+
|
|
82
|
+
[consultation.workflows.markdown_plan_review]
|
|
83
|
+
min_models = 2
|
|
84
|
+
timeout_override = 180.0
|
|
85
|
+
default_review_type = "full"
|
|
57
86
|
"""
|
|
58
87
|
|
|
59
88
|
|
|
@@ -141,11 +170,16 @@ def _update_permissions(
|
|
|
141
170
|
return {"changes": changes}
|
|
142
171
|
|
|
143
172
|
|
|
173
|
+
def _get_default_toml_content() -> str:
|
|
174
|
+
"""Get default TOML content with current package version."""
|
|
175
|
+
return _DEFAULT_TOML_TEMPLATE.format(version=_PACKAGE_VERSION)
|
|
176
|
+
|
|
177
|
+
|
|
144
178
|
def _write_default_toml(toml_path: Path) -> None:
|
|
145
179
|
"""Write default foundry-mcp.toml configuration file."""
|
|
146
180
|
|
|
147
181
|
with open(toml_path, "w") as handle:
|
|
148
|
-
handle.write(
|
|
182
|
+
handle.write(_get_default_toml_content())
|
|
149
183
|
|
|
150
184
|
|
|
151
185
|
def _init_specs_directory(base_path: Path, dry_run: bool) -> Dict[str, Any]:
|
|
@@ -184,6 +218,7 @@ _ACTION_SUMMARY = {
|
|
|
184
218
|
"verify-env": "Validate runtimes, packages, and workspace environment",
|
|
185
219
|
"init": "Initialize the standard specs/ workspace structure",
|
|
186
220
|
"detect": "Detect repository topology (project type, specs/docs)",
|
|
221
|
+
"detect-test-runner": "Detect appropriate test runner for the project",
|
|
187
222
|
"setup": "Complete SDD setup with permissions + config",
|
|
188
223
|
}
|
|
189
224
|
|
|
@@ -545,6 +580,190 @@ def _handle_detect_topology(
|
|
|
545
580
|
)
|
|
546
581
|
|
|
547
582
|
|
|
583
|
+
def _handle_detect_test_runner(
|
|
584
|
+
*,
|
|
585
|
+
config: ServerConfig, # noqa: ARG001 - reserved for future hooks
|
|
586
|
+
path: Optional[str] = None,
|
|
587
|
+
**_: Any,
|
|
588
|
+
) -> dict:
|
|
589
|
+
"""Detect appropriate test runner based on project type and configuration files.
|
|
590
|
+
|
|
591
|
+
Returns a structured response with detected runners, confidence levels, and
|
|
592
|
+
a recommended default runner.
|
|
593
|
+
|
|
594
|
+
Detection rules:
|
|
595
|
+
- Python: pyproject.toml, setup.py, requirements.txt, Pipfile → pytest
|
|
596
|
+
- Go: go.mod → go
|
|
597
|
+
- Jest: jest.config.* or package.json with "jest" key → jest (precedence over npm)
|
|
598
|
+
- Node: package.json with "test" script → npm
|
|
599
|
+
- Rust: Cargo.toml + Makefile present → make
|
|
600
|
+
"""
|
|
601
|
+
request_id = _request_id()
|
|
602
|
+
blocked = _feature_flag_blocked(request_id)
|
|
603
|
+
if blocked:
|
|
604
|
+
return blocked
|
|
605
|
+
|
|
606
|
+
if path is not None and not isinstance(path, str):
|
|
607
|
+
return _validation_error(
|
|
608
|
+
action="detect-test-runner",
|
|
609
|
+
field="path",
|
|
610
|
+
message="Directory path must be a string",
|
|
611
|
+
request_id=request_id,
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
metric_key = _metric_name("detect-test-runner")
|
|
615
|
+
try:
|
|
616
|
+
base_path = Path(path) if path else Path.cwd()
|
|
617
|
+
|
|
618
|
+
detected_runners: List[Dict[str, Any]] = []
|
|
619
|
+
|
|
620
|
+
# Python detection (highest precedence for Python projects)
|
|
621
|
+
python_primary = ["pyproject.toml", "setup.py"]
|
|
622
|
+
python_secondary = ["requirements.txt", "Pipfile"]
|
|
623
|
+
|
|
624
|
+
for marker in python_primary:
|
|
625
|
+
if (base_path / marker).exists():
|
|
626
|
+
detected_runners.append({
|
|
627
|
+
"runner_name": "pytest",
|
|
628
|
+
"project_type": "python",
|
|
629
|
+
"confidence": "high",
|
|
630
|
+
"reason": f"{marker} found",
|
|
631
|
+
})
|
|
632
|
+
break
|
|
633
|
+
else:
|
|
634
|
+
# Check secondary markers only if no primary found
|
|
635
|
+
for marker in python_secondary:
|
|
636
|
+
if (base_path / marker).exists():
|
|
637
|
+
detected_runners.append({
|
|
638
|
+
"runner_name": "pytest",
|
|
639
|
+
"project_type": "python",
|
|
640
|
+
"confidence": "medium",
|
|
641
|
+
"reason": f"{marker} found",
|
|
642
|
+
})
|
|
643
|
+
break
|
|
644
|
+
|
|
645
|
+
# Go detection
|
|
646
|
+
if (base_path / "go.mod").exists():
|
|
647
|
+
detected_runners.append({
|
|
648
|
+
"runner_name": "go",
|
|
649
|
+
"project_type": "go",
|
|
650
|
+
"confidence": "high",
|
|
651
|
+
"reason": "go.mod found",
|
|
652
|
+
})
|
|
653
|
+
|
|
654
|
+
# Node detection - Jest takes precedence over npm
|
|
655
|
+
jest_configs = [
|
|
656
|
+
"jest.config.js",
|
|
657
|
+
"jest.config.ts",
|
|
658
|
+
"jest.config.mjs",
|
|
659
|
+
"jest.config.cjs",
|
|
660
|
+
"jest.config.json",
|
|
661
|
+
]
|
|
662
|
+
|
|
663
|
+
jest_detected = False
|
|
664
|
+
for jest_config in jest_configs:
|
|
665
|
+
if (base_path / jest_config).exists():
|
|
666
|
+
detected_runners.append({
|
|
667
|
+
"runner_name": "jest",
|
|
668
|
+
"project_type": "node",
|
|
669
|
+
"confidence": "high",
|
|
670
|
+
"reason": f"{jest_config} found",
|
|
671
|
+
})
|
|
672
|
+
jest_detected = True
|
|
673
|
+
break
|
|
674
|
+
|
|
675
|
+
# Check package.json for jest config or test script
|
|
676
|
+
package_json_path = base_path / "package.json"
|
|
677
|
+
if package_json_path.exists():
|
|
678
|
+
try:
|
|
679
|
+
with open(package_json_path, "r") as f:
|
|
680
|
+
pkg = json.load(f)
|
|
681
|
+
|
|
682
|
+
# Jest config in package.json takes precedence
|
|
683
|
+
if not jest_detected and "jest" in pkg:
|
|
684
|
+
detected_runners.append({
|
|
685
|
+
"runner_name": "jest",
|
|
686
|
+
"project_type": "node",
|
|
687
|
+
"confidence": "high",
|
|
688
|
+
"reason": "jest key in package.json",
|
|
689
|
+
})
|
|
690
|
+
jest_detected = True
|
|
691
|
+
|
|
692
|
+
# npm test script (only if jest not already detected)
|
|
693
|
+
if not jest_detected:
|
|
694
|
+
scripts = pkg.get("scripts", {})
|
|
695
|
+
if "test" in scripts:
|
|
696
|
+
detected_runners.append({
|
|
697
|
+
"runner_name": "npm",
|
|
698
|
+
"project_type": "node",
|
|
699
|
+
"confidence": "high",
|
|
700
|
+
"reason": "test script in package.json",
|
|
701
|
+
})
|
|
702
|
+
except (json.JSONDecodeError, OSError):
|
|
703
|
+
# If package.json is invalid, skip Node detection
|
|
704
|
+
pass
|
|
705
|
+
|
|
706
|
+
# Rust detection - only if BOTH Cargo.toml and Makefile exist
|
|
707
|
+
cargo_exists = (base_path / "Cargo.toml").exists()
|
|
708
|
+
makefile_exists = (base_path / "Makefile").exists() or (
|
|
709
|
+
base_path / "makefile"
|
|
710
|
+
).exists()
|
|
711
|
+
|
|
712
|
+
if cargo_exists and makefile_exists:
|
|
713
|
+
detected_runners.append({
|
|
714
|
+
"runner_name": "make",
|
|
715
|
+
"project_type": "rust",
|
|
716
|
+
"confidence": "medium",
|
|
717
|
+
"reason": "Cargo.toml + Makefile found",
|
|
718
|
+
})
|
|
719
|
+
|
|
720
|
+
# Determine recommended default based on precedence order from plan
|
|
721
|
+
# Priority: python (1) > go (2) > jest (3) > npm (4) > make (5)
|
|
722
|
+
precedence_order = ["pytest", "go", "jest", "npm", "make"]
|
|
723
|
+
recommended_default: Optional[str] = None
|
|
724
|
+
|
|
725
|
+
for runner_name in precedence_order:
|
|
726
|
+
for runner in detected_runners:
|
|
727
|
+
if runner["runner_name"] == runner_name:
|
|
728
|
+
recommended_default = runner_name
|
|
729
|
+
break
|
|
730
|
+
if recommended_default:
|
|
731
|
+
break
|
|
732
|
+
|
|
733
|
+
data: Dict[str, Any] = {
|
|
734
|
+
"detected_runners": detected_runners,
|
|
735
|
+
"recommended_default": recommended_default,
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
warnings: List[str] = []
|
|
739
|
+
if not detected_runners:
|
|
740
|
+
warnings.append(
|
|
741
|
+
"No test runners detected. Configure [test] section manually in "
|
|
742
|
+
"foundry-mcp.toml if tests are needed."
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
746
|
+
return asdict(
|
|
747
|
+
success_response(
|
|
748
|
+
data=data,
|
|
749
|
+
warnings=warnings or None,
|
|
750
|
+
request_id=request_id,
|
|
751
|
+
)
|
|
752
|
+
)
|
|
753
|
+
except Exception as exc:
|
|
754
|
+
logger.exception("Error detecting test runner")
|
|
755
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
756
|
+
return asdict(
|
|
757
|
+
error_response(
|
|
758
|
+
f"Failed to detect test runner: {exc}",
|
|
759
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
760
|
+
error_type=ErrorType.INTERNAL,
|
|
761
|
+
remediation="Verify the directory exists and retry",
|
|
762
|
+
request_id=request_id,
|
|
763
|
+
)
|
|
764
|
+
)
|
|
765
|
+
|
|
766
|
+
|
|
548
767
|
def _handle_verify_environment(
|
|
549
768
|
*,
|
|
550
769
|
config: ServerConfig, # noqa: ARG001 - reserved for future hooks
|
|
@@ -868,6 +1087,16 @@ _ENVIRONMENT_ROUTER = ActionRouter(
|
|
|
868
1087
|
summary=_ACTION_SUMMARY["detect"],
|
|
869
1088
|
aliases=("sdd-detect-topology", "sdd_detect_topology"),
|
|
870
1089
|
),
|
|
1090
|
+
ActionDefinition(
|
|
1091
|
+
name="detect-test-runner",
|
|
1092
|
+
handler=_handle_detect_test_runner,
|
|
1093
|
+
summary=_ACTION_SUMMARY["detect-test-runner"],
|
|
1094
|
+
aliases=(
|
|
1095
|
+
"detect_test_runner",
|
|
1096
|
+
"sdd-detect-test-runner",
|
|
1097
|
+
"sdd_detect_test_runner",
|
|
1098
|
+
),
|
|
1099
|
+
),
|
|
871
1100
|
ActionDefinition(
|
|
872
1101
|
name="setup",
|
|
873
1102
|
handler=_handle_setup,
|
|
@@ -332,7 +332,24 @@ def perform_error_cleanup(
|
|
|
332
332
|
|
|
333
333
|
|
|
334
334
|
def _handle_error_list(*, config: ServerConfig, **payload: Any) -> dict:
|
|
335
|
-
|
|
335
|
+
# Filter out parameters not accepted by perform_error_list
|
|
336
|
+
filtered_payload = {
|
|
337
|
+
k: v
|
|
338
|
+
for k, v in payload.items()
|
|
339
|
+
if k
|
|
340
|
+
in (
|
|
341
|
+
"tool_name",
|
|
342
|
+
"error_code",
|
|
343
|
+
"error_type",
|
|
344
|
+
"fingerprint",
|
|
345
|
+
"provider_id",
|
|
346
|
+
"since",
|
|
347
|
+
"until",
|
|
348
|
+
"limit",
|
|
349
|
+
"cursor",
|
|
350
|
+
)
|
|
351
|
+
}
|
|
352
|
+
return perform_error_list(config=config, **filtered_payload)
|
|
336
353
|
|
|
337
354
|
|
|
338
355
|
def _handle_error_get(*, config: ServerConfig, **payload: Any) -> dict:
|
|
@@ -209,6 +209,7 @@ def _handle_move(
|
|
|
209
209
|
spec_id: Optional[str] = None,
|
|
210
210
|
to_folder: Optional[str] = None,
|
|
211
211
|
path: Optional[str] = None,
|
|
212
|
+
force: Optional[bool] = None, # Unused, accepted for router compatibility
|
|
212
213
|
) -> dict:
|
|
213
214
|
action = "move"
|
|
214
215
|
request_id = _request_id()
|
|
@@ -293,7 +294,9 @@ def _handle_activate(
|
|
|
293
294
|
*,
|
|
294
295
|
config: ServerConfig,
|
|
295
296
|
spec_id: Optional[str] = None,
|
|
297
|
+
to_folder: Optional[str] = None, # Unused, accepted for router compatibility
|
|
296
298
|
path: Optional[str] = None,
|
|
299
|
+
force: Optional[bool] = None, # Unused, accepted for router compatibility
|
|
297
300
|
) -> dict:
|
|
298
301
|
action = "activate"
|
|
299
302
|
request_id = _request_id()
|
|
@@ -356,6 +359,7 @@ def _handle_complete(
|
|
|
356
359
|
*,
|
|
357
360
|
config: ServerConfig,
|
|
358
361
|
spec_id: Optional[str] = None,
|
|
362
|
+
to_folder: Optional[str] = None, # Unused, accepted for router compatibility
|
|
359
363
|
force: Optional[bool] = False,
|
|
360
364
|
path: Optional[str] = None,
|
|
361
365
|
) -> dict:
|
|
@@ -429,7 +433,9 @@ def _handle_archive(
|
|
|
429
433
|
*,
|
|
430
434
|
config: ServerConfig,
|
|
431
435
|
spec_id: Optional[str] = None,
|
|
436
|
+
to_folder: Optional[str] = None, # Unused, accepted for router compatibility
|
|
432
437
|
path: Optional[str] = None,
|
|
438
|
+
force: Optional[bool] = None, # Unused, accepted for router compatibility
|
|
433
439
|
) -> dict:
|
|
434
440
|
action = "archive"
|
|
435
441
|
request_id = _request_id()
|
|
@@ -492,7 +498,9 @@ def _handle_state(
|
|
|
492
498
|
*,
|
|
493
499
|
config: ServerConfig,
|
|
494
500
|
spec_id: Optional[str] = None,
|
|
501
|
+
to_folder: Optional[str] = None, # Unused, accepted for router compatibility
|
|
495
502
|
path: Optional[str] = None,
|
|
503
|
+
force: Optional[bool] = None, # Unused, accepted for router compatibility
|
|
496
504
|
) -> dict:
|
|
497
505
|
action = "state"
|
|
498
506
|
request_id = _request_id()
|
|
@@ -339,6 +339,7 @@ def perform_plan_review(
|
|
|
339
339
|
|
|
340
340
|
consensus_info: Optional[dict] = None
|
|
341
341
|
provider_used: Optional[str] = None
|
|
342
|
+
provider_reviews: list[dict[str, str]] = []
|
|
342
343
|
|
|
343
344
|
if isinstance(result, ConsultationResult):
|
|
344
345
|
if not result.success:
|
|
@@ -362,16 +363,125 @@ def perform_plan_review(
|
|
|
362
363
|
remediation="Check AI provider configuration or try again later",
|
|
363
364
|
)
|
|
364
365
|
)
|
|
365
|
-
|
|
366
|
+
|
|
366
367
|
providers_consulted = [r.provider_id for r in result.responses]
|
|
367
368
|
provider_used = providers_consulted[0] if providers_consulted else "unknown"
|
|
369
|
+
|
|
370
|
+
# Extract failed provider details for visibility
|
|
371
|
+
failed_providers = [
|
|
372
|
+
{"provider_id": r.provider_id, "error": r.error}
|
|
373
|
+
for r in result.responses
|
|
374
|
+
if not r.success
|
|
375
|
+
]
|
|
376
|
+
# Filter for truly successful responses (success=True AND non-empty content)
|
|
377
|
+
successful_responses = [
|
|
378
|
+
r for r in result.responses if r.success and r.content.strip()
|
|
379
|
+
]
|
|
380
|
+
successful_providers = [r.provider_id for r in successful_responses]
|
|
381
|
+
|
|
368
382
|
consensus_info = {
|
|
369
383
|
"providers_consulted": providers_consulted,
|
|
370
384
|
"successful": result.agreement.successful_providers
|
|
371
385
|
if result.agreement
|
|
372
386
|
else 0,
|
|
373
387
|
"failed": result.agreement.failed_providers if result.agreement else 0,
|
|
388
|
+
"successful_providers": successful_providers,
|
|
389
|
+
"failed_providers": failed_providers,
|
|
374
390
|
}
|
|
391
|
+
|
|
392
|
+
# Save individual provider review files and optionally run synthesis
|
|
393
|
+
if len(successful_responses) >= 2:
|
|
394
|
+
# Multi-model mode: save per-provider files, then synthesize
|
|
395
|
+
specs_dir = find_specs_directory()
|
|
396
|
+
if specs_dir is None:
|
|
397
|
+
return asdict(
|
|
398
|
+
error_response(
|
|
399
|
+
"No specs directory found for storing plan review",
|
|
400
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
401
|
+
error_type=ErrorType.NOT_FOUND,
|
|
402
|
+
remediation="Create a specs/ directory with pending/active/completed/archived subdirectories",
|
|
403
|
+
)
|
|
404
|
+
)
|
|
405
|
+
|
|
406
|
+
plan_reviews_dir = specs_dir / ".plan-reviews"
|
|
407
|
+
plan_reviews_dir.mkdir(parents=True, exist_ok=True)
|
|
408
|
+
|
|
409
|
+
# Save each provider's review to a separate file
|
|
410
|
+
model_reviews_text = ""
|
|
411
|
+
for response in successful_responses:
|
|
412
|
+
provider_file = (
|
|
413
|
+
plan_reviews_dir
|
|
414
|
+
/ f"{plan_name}-{review_type}-{response.provider_id}.md"
|
|
415
|
+
)
|
|
416
|
+
provider_file.write_text(response.content, encoding="utf-8")
|
|
417
|
+
provider_reviews.append(
|
|
418
|
+
{"provider_id": response.provider_id, "path": str(provider_file)}
|
|
419
|
+
)
|
|
420
|
+
model_reviews_text += (
|
|
421
|
+
f"\n---\n## Review by {response.provider_id}\n\n"
|
|
422
|
+
f"{response.content}\n"
|
|
423
|
+
)
|
|
424
|
+
|
|
425
|
+
# Run synthesis call using first provider
|
|
426
|
+
logger.info(
|
|
427
|
+
"Running synthesis for %d provider reviews: %s",
|
|
428
|
+
len(successful_responses),
|
|
429
|
+
successful_providers,
|
|
430
|
+
)
|
|
431
|
+
synthesis_request = ConsultationRequest(
|
|
432
|
+
workflow=ConsultationWorkflow.PLAN_REVIEW,
|
|
433
|
+
prompt_id="SYNTHESIS_PROMPT_V1",
|
|
434
|
+
context={
|
|
435
|
+
"spec_id": plan_name,
|
|
436
|
+
"title": plan_name,
|
|
437
|
+
"num_models": len(successful_responses),
|
|
438
|
+
"model_reviews": model_reviews_text,
|
|
439
|
+
},
|
|
440
|
+
provider_id=successful_providers[0],
|
|
441
|
+
timeout=ai_timeout,
|
|
442
|
+
)
|
|
443
|
+
try:
|
|
444
|
+
synthesis_result = orchestrator.consult(
|
|
445
|
+
synthesis_request, use_cache=consultation_cache
|
|
446
|
+
)
|
|
447
|
+
except Exception as e:
|
|
448
|
+
logger.error("Synthesis call crashed: %s", e, exc_info=True)
|
|
449
|
+
synthesis_result = None
|
|
450
|
+
|
|
451
|
+
# Handle both ConsultationResult and ConsensusResult
|
|
452
|
+
synthesis_success = False
|
|
453
|
+
synthesis_content = None
|
|
454
|
+
if synthesis_result:
|
|
455
|
+
if isinstance(synthesis_result, ConsultationResult) and synthesis_result.success:
|
|
456
|
+
synthesis_content = synthesis_result.content
|
|
457
|
+
consensus_info["synthesis_provider"] = synthesis_result.provider_id
|
|
458
|
+
synthesis_success = bool(synthesis_content and synthesis_content.strip())
|
|
459
|
+
elif isinstance(synthesis_result, ConsensusResult) and synthesis_result.success:
|
|
460
|
+
synthesis_content = synthesis_result.primary_content
|
|
461
|
+
consensus_info["synthesis_provider"] = synthesis_result.responses[0].provider_id if synthesis_result.responses else "unknown"
|
|
462
|
+
synthesis_success = bool(synthesis_content and synthesis_content.strip())
|
|
463
|
+
|
|
464
|
+
if synthesis_success and synthesis_content:
|
|
465
|
+
review_content = synthesis_content
|
|
466
|
+
else:
|
|
467
|
+
# Synthesis failed - fall back to first provider's content
|
|
468
|
+
error_detail = "unknown"
|
|
469
|
+
if synthesis_result is None:
|
|
470
|
+
error_detail = "synthesis crashed (see logs)"
|
|
471
|
+
elif isinstance(synthesis_result, ConsultationResult):
|
|
472
|
+
error_detail = synthesis_result.error or "empty response"
|
|
473
|
+
elif isinstance(synthesis_result, ConsensusResult):
|
|
474
|
+
error_detail = "empty synthesis content"
|
|
475
|
+
logger.warning(
|
|
476
|
+
"Synthesis call failed (%s), falling back to first provider's content",
|
|
477
|
+
error_detail,
|
|
478
|
+
)
|
|
479
|
+
review_content = result.primary_content
|
|
480
|
+
consensus_info["synthesis_failed"] = True
|
|
481
|
+
consensus_info["synthesis_error"] = error_detail
|
|
482
|
+
else:
|
|
483
|
+
# Single successful provider - use its content directly (no synthesis needed)
|
|
484
|
+
review_content = result.primary_content
|
|
375
485
|
else: # pragma: no cover - defensive branch
|
|
376
486
|
logger.error("Unknown consultation result type: %s", type(result))
|
|
377
487
|
return asdict(
|
|
@@ -444,6 +554,8 @@ def perform_plan_review(
|
|
|
444
554
|
"llm_status": llm_status,
|
|
445
555
|
"provider_used": provider_used,
|
|
446
556
|
}
|
|
557
|
+
if provider_reviews:
|
|
558
|
+
response_data["provider_reviews"] = provider_reviews
|
|
447
559
|
if consensus_info:
|
|
448
560
|
response_data["consensus"] = consensus_info
|
|
449
561
|
|