foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/__init__.py +0 -13
  3. foundry_mcp/cli/commands/plan.py +10 -3
  4. foundry_mcp/cli/commands/review.py +19 -4
  5. foundry_mcp/cli/commands/session.py +1 -8
  6. foundry_mcp/cli/commands/specs.py +38 -208
  7. foundry_mcp/cli/context.py +39 -0
  8. foundry_mcp/cli/output.py +3 -3
  9. foundry_mcp/config.py +615 -11
  10. foundry_mcp/core/ai_consultation.py +146 -9
  11. foundry_mcp/core/batch_operations.py +1196 -0
  12. foundry_mcp/core/discovery.py +7 -7
  13. foundry_mcp/core/error_store.py +2 -2
  14. foundry_mcp/core/intake.py +933 -0
  15. foundry_mcp/core/llm_config.py +28 -2
  16. foundry_mcp/core/metrics_store.py +2 -2
  17. foundry_mcp/core/naming.py +25 -2
  18. foundry_mcp/core/progress.py +70 -0
  19. foundry_mcp/core/prometheus.py +0 -13
  20. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  21. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  22. foundry_mcp/core/prompts/plan_review.py +5 -1
  23. foundry_mcp/core/providers/__init__.py +12 -0
  24. foundry_mcp/core/providers/base.py +39 -0
  25. foundry_mcp/core/providers/claude.py +51 -48
  26. foundry_mcp/core/providers/codex.py +70 -60
  27. foundry_mcp/core/providers/cursor_agent.py +25 -47
  28. foundry_mcp/core/providers/detectors.py +34 -7
  29. foundry_mcp/core/providers/gemini.py +69 -58
  30. foundry_mcp/core/providers/opencode.py +101 -47
  31. foundry_mcp/core/providers/package-lock.json +4 -4
  32. foundry_mcp/core/providers/package.json +1 -1
  33. foundry_mcp/core/providers/validation.py +128 -0
  34. foundry_mcp/core/research/__init__.py +68 -0
  35. foundry_mcp/core/research/memory.py +528 -0
  36. foundry_mcp/core/research/models.py +1220 -0
  37. foundry_mcp/core/research/providers/__init__.py +40 -0
  38. foundry_mcp/core/research/providers/base.py +242 -0
  39. foundry_mcp/core/research/providers/google.py +507 -0
  40. foundry_mcp/core/research/providers/perplexity.py +442 -0
  41. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  42. foundry_mcp/core/research/providers/tavily.py +383 -0
  43. foundry_mcp/core/research/workflows/__init__.py +25 -0
  44. foundry_mcp/core/research/workflows/base.py +298 -0
  45. foundry_mcp/core/research/workflows/chat.py +271 -0
  46. foundry_mcp/core/research/workflows/consensus.py +539 -0
  47. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  48. foundry_mcp/core/research/workflows/ideate.py +682 -0
  49. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  50. foundry_mcp/core/responses.py +690 -0
  51. foundry_mcp/core/spec.py +2439 -236
  52. foundry_mcp/core/task.py +1205 -31
  53. foundry_mcp/core/testing.py +512 -123
  54. foundry_mcp/core/validation.py +319 -43
  55. foundry_mcp/dashboard/components/charts.py +0 -57
  56. foundry_mcp/dashboard/launcher.py +11 -0
  57. foundry_mcp/dashboard/views/metrics.py +25 -35
  58. foundry_mcp/dashboard/views/overview.py +1 -65
  59. foundry_mcp/resources/specs.py +25 -25
  60. foundry_mcp/schemas/intake-schema.json +89 -0
  61. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  62. foundry_mcp/server.py +0 -14
  63. foundry_mcp/tools/unified/__init__.py +39 -18
  64. foundry_mcp/tools/unified/authoring.py +2371 -248
  65. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  66. foundry_mcp/tools/unified/environment.py +434 -32
  67. foundry_mcp/tools/unified/error.py +18 -1
  68. foundry_mcp/tools/unified/lifecycle.py +8 -0
  69. foundry_mcp/tools/unified/plan.py +133 -2
  70. foundry_mcp/tools/unified/provider.py +0 -40
  71. foundry_mcp/tools/unified/research.py +1283 -0
  72. foundry_mcp/tools/unified/review.py +374 -17
  73. foundry_mcp/tools/unified/review_helpers.py +16 -1
  74. foundry_mcp/tools/unified/server.py +9 -24
  75. foundry_mcp/tools/unified/spec.py +367 -0
  76. foundry_mcp/tools/unified/task.py +1664 -30
  77. foundry_mcp/tools/unified/test.py +69 -8
  78. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
  79. foundry_mcp-0.8.10.dist-info/RECORD +153 -0
  80. foundry_mcp/cli/flags.py +0 -266
  81. foundry_mcp/core/feature_flags.py +0 -592
  82. foundry_mcp-0.3.3.dist-info/RECORD +0 -135
  83. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  84. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  85. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -45,6 +45,49 @@ def _build_spec_requirements(
45
45
  return "\n".join(lines) if lines else "*No requirements available*"
46
46
 
47
47
 
48
+ def _split_file_paths(value: Any) -> List[str]:
49
+ if value is None:
50
+ return []
51
+ if isinstance(value, list):
52
+ parts: List[str] = []
53
+ for item in value:
54
+ parts.extend(_split_file_paths(item))
55
+ return parts
56
+ if isinstance(value, str):
57
+ segments = [part.strip() for part in value.split(",")]
58
+ return [segment for segment in segments if segment]
59
+ return [str(value)]
60
+
61
+
62
+ def _normalize_for_comparison(path_value: str, workspace_root: Optional[Path]) -> str:
63
+ raw_path = Path(path_value)
64
+ if raw_path.is_absolute() and workspace_root:
65
+ try:
66
+ raw_path = raw_path.relative_to(workspace_root)
67
+ except ValueError:
68
+ pass
69
+ if workspace_root and raw_path.parts and raw_path.parts[0] == workspace_root.name:
70
+ raw_path = Path(*raw_path.parts[1:])
71
+ return raw_path.as_posix()
72
+
73
+
74
+ def _resolve_path(path_value: str, workspace_root: Optional[Path]) -> Path:
75
+ raw_path = Path(path_value)
76
+ candidates: List[Path] = []
77
+ if raw_path.is_absolute():
78
+ candidates.append(raw_path)
79
+ else:
80
+ candidates.append(raw_path)
81
+ if workspace_root:
82
+ candidates.append(workspace_root / raw_path)
83
+ if raw_path.parts and raw_path.parts[0] == workspace_root.name:
84
+ candidates.append(workspace_root / Path(*raw_path.parts[1:]))
85
+ for candidate in candidates:
86
+ if candidate.exists():
87
+ return candidate
88
+ return candidates[0] if candidates else raw_path
89
+
90
+
48
91
  def _build_implementation_artifacts(
49
92
  spec_data: Dict[str, Any],
50
93
  task_id: Optional[str],
@@ -52,21 +95,32 @@ def _build_implementation_artifacts(
52
95
  files: Optional[List[str]],
53
96
  incremental: bool,
54
97
  base_branch: str,
98
+ workspace_root: Optional[Path] = None,
55
99
  ) -> str:
56
100
  lines: list[str] = []
57
101
  file_paths: list[str] = []
102
+ if workspace_root is not None and not isinstance(workspace_root, Path):
103
+ workspace_root = Path(str(workspace_root))
58
104
  if files:
59
- file_paths = list(files)
105
+ file_paths = _split_file_paths(files)
60
106
  elif task_id:
61
107
  task = _find_task(spec_data, task_id)
62
108
  if task and task.get("metadata", {}).get("file_path"):
63
- file_paths = [task["metadata"]["file_path"]]
109
+ file_paths = _split_file_paths(task["metadata"]["file_path"])
64
110
  elif phase_id:
65
111
  phase = _find_phase(spec_data, phase_id)
66
112
  if phase:
67
113
  for child in _get_child_nodes(spec_data, phase):
68
114
  if child.get("metadata", {}).get("file_path"):
69
- file_paths.append(child["metadata"]["file_path"])
115
+ file_paths.extend(_split_file_paths(child["metadata"]["file_path"]))
116
+ if file_paths:
117
+ deduped: List[str] = []
118
+ seen = set()
119
+ for file_path in file_paths:
120
+ if file_path not in seen:
121
+ seen.add(file_path)
122
+ deduped.append(file_path)
123
+ file_paths = deduped
70
124
  if incremental:
71
125
  try:
72
126
  import subprocess
@@ -82,16 +136,25 @@ def _build_implementation_artifacts(
82
136
  result.stdout.strip().split("\n") if result.stdout else []
83
137
  )
84
138
  if file_paths:
85
- file_paths = [path for path in file_paths if path in changed_files]
139
+ changed_set = {
140
+ _normalize_for_comparison(path, workspace_root)
141
+ for path in changed_files
142
+ if path
143
+ }
144
+ file_paths = [
145
+ path
146
+ for path in file_paths
147
+ if _normalize_for_comparison(path, workspace_root) in changed_set
148
+ ]
86
149
  else:
87
- file_paths = changed_files
150
+ file_paths = [path for path in changed_files if path]
88
151
  lines.append(
89
152
  f"*Incremental review: {len(file_paths)} changed files since {base_branch}*\n"
90
153
  )
91
154
  except Exception:
92
155
  lines.append(f"*Warning: Could not get git diff from {base_branch}*\n")
93
156
  for file_path in file_paths[:5]:
94
- path = Path(file_path)
157
+ path = _resolve_path(file_path, workspace_root)
95
158
  if path.exists():
96
159
  try:
97
160
  content = path.read_text(encoding="utf-8")
@@ -13,9 +13,8 @@ from typing import Any, Dict, List, Optional, cast
13
13
 
14
14
  from mcp.server.fastmcp import FastMCP
15
15
 
16
- from foundry_mcp.config import ServerConfig
16
+ from foundry_mcp.config import ServerConfig, _PACKAGE_VERSION
17
17
  from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
18
- from foundry_mcp.core.feature_flags import FeatureFlag, FlagState, get_flag_service
19
18
  from foundry_mcp.core.naming import canonical_tool
20
19
  from foundry_mcp.core.observability import audit_log, get_metrics, mcp_tool
21
20
  from foundry_mcp.core.responses import (
@@ -32,28 +31,68 @@ from foundry_mcp.tools.unified.router import (
32
31
 
33
32
  logger = logging.getLogger(__name__)
34
33
  _metrics = get_metrics()
35
- _flag_service = get_flag_service()
36
- try:
37
- _flag_service.register(
38
- FeatureFlag(
39
- name="environment_tools",
40
- description="Environment readiness and workspace tooling",
41
- state=FlagState.BETA,
42
- default_enabled=True,
43
- )
44
- )
45
- except ValueError:
46
- pass
47
34
 
48
- _DEFAULT_TOML_CONTENT = """[workspace]
35
+ _DEFAULT_TOML_TEMPLATE = """[workspace]
49
36
  specs_dir = "./specs"
50
37
 
38
+ [logging]
39
+ level = "INFO"
40
+ structured = true
41
+
42
+ [tools]
43
+ # Disable tools to reduce context window usage
44
+ # Available: health, plan, pr, error, metrics, journal, authoring, review,
45
+ # spec, task, provider, environment, lifecycle, verification,
46
+ # server, test, research
47
+ disabled_tools = ["error", "metrics", "health"]
48
+
51
49
  [workflow]
52
50
  mode = "single"
53
51
  auto_validate = true
54
-
55
- [logging]
56
- level = "INFO"
52
+ journal_enabled = true
53
+
54
+ [implement]
55
+ # Default flags for /implement command (can be overridden via CLI flags)
56
+ auto = false # --auto: skip prompts between tasks
57
+ delegate = false # --delegate: use subagent(s) for implementation
58
+ parallel = false # --parallel: run subagents concurrently (implies delegate)
59
+
60
+ [consultation]
61
+ # priority = [] # Appended by setup based on detected providers
62
+ default_timeout = 300
63
+
64
+ [research]
65
+ # Research tool configuration (chat, consensus, thinkdeep, ideate, deep)
66
+ # default_provider = "[cli]provider:model" # Appended by setup
67
+ # consensus_providers = [] # Appended by setup (same as consultation.priority)
68
+ max_retries = 2
69
+ retry_delay = 5.0
70
+ fallback_enabled = true
71
+ cache_ttl = 3600
72
+
73
+ [research.deep]
74
+ # Deep research workflow settings
75
+ max_iterations = 3
76
+ max_sub_queries = 5
77
+ max_sources_per_query = 5
78
+ follow_links = true
79
+ max_concurrent = 3
80
+ timeout_per_operation = 120
81
+
82
+ [consultation.workflows.fidelity_review]
83
+ min_models = 2
84
+ timeout_override = 600.0
85
+ default_review_type = "full"
86
+
87
+ [consultation.workflows.plan_review]
88
+ min_models = 2
89
+ timeout_override = 180.0
90
+ default_review_type = "full"
91
+
92
+ [consultation.workflows.markdown_plan_review]
93
+ min_models = 2
94
+ timeout_override = 180.0
95
+ default_review_type = "full"
57
96
  """
58
97
 
59
98
 
@@ -141,11 +180,16 @@ def _update_permissions(
141
180
  return {"changes": changes}
142
181
 
143
182
 
183
+ def _get_default_toml_content() -> str:
184
+ """Get default TOML content with current package version."""
185
+ return _DEFAULT_TOML_TEMPLATE.format(version=_PACKAGE_VERSION)
186
+
187
+
144
188
  def _write_default_toml(toml_path: Path) -> None:
145
189
  """Write default foundry-mcp.toml configuration file."""
146
190
 
147
191
  with open(toml_path, "w") as handle:
148
- handle.write(_DEFAULT_TOML_CONTENT)
192
+ handle.write(_get_default_toml_content())
149
193
 
150
194
 
151
195
  def _init_specs_directory(base_path: Path, dry_run: bool) -> Dict[str, Any]:
@@ -184,7 +228,9 @@ _ACTION_SUMMARY = {
184
228
  "verify-env": "Validate runtimes, packages, and workspace environment",
185
229
  "init": "Initialize the standard specs/ workspace structure",
186
230
  "detect": "Detect repository topology (project type, specs/docs)",
231
+ "detect-test-runner": "Detect appropriate test runner for the project",
187
232
  "setup": "Complete SDD setup with permissions + config",
233
+ "get-config": "Read configuration sections from foundry-mcp.toml",
188
234
  }
189
235
 
190
236
 
@@ -197,19 +243,8 @@ def _request_id() -> str:
197
243
 
198
244
 
199
245
  def _feature_flag_blocked(request_id: str) -> Optional[dict]:
200
- if _flag_service.is_enabled("environment_tools"):
201
- return None
202
-
203
- return asdict(
204
- error_response(
205
- "Environment tools are disabled by feature flag",
206
- error_code=ErrorCode.FEATURE_DISABLED,
207
- error_type=ErrorType.FEATURE_FLAG,
208
- data={"feature": "environment_tools"},
209
- remediation="Enable the 'environment_tools' feature flag to call environment actions.",
210
- request_id=request_id,
211
- )
212
- )
246
+ # Feature flags disabled - always allow
247
+ return None
213
248
 
214
249
 
215
250
  def _validation_error(
@@ -545,6 +580,190 @@ def _handle_detect_topology(
545
580
  )
546
581
 
547
582
 
583
+ def _handle_detect_test_runner(
584
+ *,
585
+ config: ServerConfig, # noqa: ARG001 - reserved for future hooks
586
+ path: Optional[str] = None,
587
+ **_: Any,
588
+ ) -> dict:
589
+ """Detect appropriate test runner based on project type and configuration files.
590
+
591
+ Returns a structured response with detected runners, confidence levels, and
592
+ a recommended default runner.
593
+
594
+ Detection rules:
595
+ - Python: pyproject.toml, setup.py, requirements.txt, Pipfile → pytest
596
+ - Go: go.mod → go
597
+ - Jest: jest.config.* or package.json with "jest" key → jest (precedence over npm)
598
+ - Node: package.json with "test" script → npm
599
+ - Rust: Cargo.toml + Makefile present → make
600
+ """
601
+ request_id = _request_id()
602
+ blocked = _feature_flag_blocked(request_id)
603
+ if blocked:
604
+ return blocked
605
+
606
+ if path is not None and not isinstance(path, str):
607
+ return _validation_error(
608
+ action="detect-test-runner",
609
+ field="path",
610
+ message="Directory path must be a string",
611
+ request_id=request_id,
612
+ )
613
+
614
+ metric_key = _metric_name("detect-test-runner")
615
+ try:
616
+ base_path = Path(path) if path else Path.cwd()
617
+
618
+ detected_runners: List[Dict[str, Any]] = []
619
+
620
+ # Python detection (highest precedence for Python projects)
621
+ python_primary = ["pyproject.toml", "setup.py"]
622
+ python_secondary = ["requirements.txt", "Pipfile"]
623
+
624
+ for marker in python_primary:
625
+ if (base_path / marker).exists():
626
+ detected_runners.append({
627
+ "runner_name": "pytest",
628
+ "project_type": "python",
629
+ "confidence": "high",
630
+ "reason": f"{marker} found",
631
+ })
632
+ break
633
+ else:
634
+ # Check secondary markers only if no primary found
635
+ for marker in python_secondary:
636
+ if (base_path / marker).exists():
637
+ detected_runners.append({
638
+ "runner_name": "pytest",
639
+ "project_type": "python",
640
+ "confidence": "medium",
641
+ "reason": f"{marker} found",
642
+ })
643
+ break
644
+
645
+ # Go detection
646
+ if (base_path / "go.mod").exists():
647
+ detected_runners.append({
648
+ "runner_name": "go",
649
+ "project_type": "go",
650
+ "confidence": "high",
651
+ "reason": "go.mod found",
652
+ })
653
+
654
+ # Node detection - Jest takes precedence over npm
655
+ jest_configs = [
656
+ "jest.config.js",
657
+ "jest.config.ts",
658
+ "jest.config.mjs",
659
+ "jest.config.cjs",
660
+ "jest.config.json",
661
+ ]
662
+
663
+ jest_detected = False
664
+ for jest_config in jest_configs:
665
+ if (base_path / jest_config).exists():
666
+ detected_runners.append({
667
+ "runner_name": "jest",
668
+ "project_type": "node",
669
+ "confidence": "high",
670
+ "reason": f"{jest_config} found",
671
+ })
672
+ jest_detected = True
673
+ break
674
+
675
+ # Check package.json for jest config or test script
676
+ package_json_path = base_path / "package.json"
677
+ if package_json_path.exists():
678
+ try:
679
+ with open(package_json_path, "r") as f:
680
+ pkg = json.load(f)
681
+
682
+ # Jest config in package.json takes precedence
683
+ if not jest_detected and "jest" in pkg:
684
+ detected_runners.append({
685
+ "runner_name": "jest",
686
+ "project_type": "node",
687
+ "confidence": "high",
688
+ "reason": "jest key in package.json",
689
+ })
690
+ jest_detected = True
691
+
692
+ # npm test script (only if jest not already detected)
693
+ if not jest_detected:
694
+ scripts = pkg.get("scripts", {})
695
+ if "test" in scripts:
696
+ detected_runners.append({
697
+ "runner_name": "npm",
698
+ "project_type": "node",
699
+ "confidence": "high",
700
+ "reason": "test script in package.json",
701
+ })
702
+ except (json.JSONDecodeError, OSError):
703
+ # If package.json is invalid, skip Node detection
704
+ pass
705
+
706
+ # Rust detection - only if BOTH Cargo.toml and Makefile exist
707
+ cargo_exists = (base_path / "Cargo.toml").exists()
708
+ makefile_exists = (base_path / "Makefile").exists() or (
709
+ base_path / "makefile"
710
+ ).exists()
711
+
712
+ if cargo_exists and makefile_exists:
713
+ detected_runners.append({
714
+ "runner_name": "make",
715
+ "project_type": "rust",
716
+ "confidence": "medium",
717
+ "reason": "Cargo.toml + Makefile found",
718
+ })
719
+
720
+ # Determine recommended default based on precedence order from plan
721
+ # Priority: python (1) > go (2) > jest (3) > npm (4) > make (5)
722
+ precedence_order = ["pytest", "go", "jest", "npm", "make"]
723
+ recommended_default: Optional[str] = None
724
+
725
+ for runner_name in precedence_order:
726
+ for runner in detected_runners:
727
+ if runner["runner_name"] == runner_name:
728
+ recommended_default = runner_name
729
+ break
730
+ if recommended_default:
731
+ break
732
+
733
+ data: Dict[str, Any] = {
734
+ "detected_runners": detected_runners,
735
+ "recommended_default": recommended_default,
736
+ }
737
+
738
+ warnings: List[str] = []
739
+ if not detected_runners:
740
+ warnings.append(
741
+ "No test runners detected. Configure [test] section manually in "
742
+ "foundry-mcp.toml if tests are needed."
743
+ )
744
+
745
+ _metrics.counter(metric_key, labels={"status": "success"})
746
+ return asdict(
747
+ success_response(
748
+ data=data,
749
+ warnings=warnings or None,
750
+ request_id=request_id,
751
+ )
752
+ )
753
+ except Exception as exc:
754
+ logger.exception("Error detecting test runner")
755
+ _metrics.counter(metric_key, labels={"status": "error"})
756
+ return asdict(
757
+ error_response(
758
+ f"Failed to detect test runner: {exc}",
759
+ error_code=ErrorCode.INTERNAL_ERROR,
760
+ error_type=ErrorType.INTERNAL,
761
+ remediation="Verify the directory exists and retry",
762
+ request_id=request_id,
763
+ )
764
+ )
765
+
766
+
548
767
  def _handle_verify_environment(
549
768
  *,
550
769
  config: ServerConfig, # noqa: ARG001 - reserved for future hooks
@@ -837,6 +1056,169 @@ def _handle_setup(
837
1056
  )
838
1057
 
839
1058
 
1059
+ def _handle_get_config(
1060
+ *,
1061
+ config: ServerConfig, # noqa: ARG001 - config object available but we read TOML directly
1062
+ sections: Optional[List[str]] = None,
1063
+ key: Optional[str] = None,
1064
+ **_: Any,
1065
+ ) -> dict:
1066
+ """Read configuration sections from foundry-mcp.toml.
1067
+
1068
+ Returns the requested sections from the TOML config file.
1069
+ Supported sections: implement, git.
1070
+
1071
+ Args:
1072
+ sections: List of section names to return (default: all supported sections)
1073
+ key: Specific key within section (only valid when requesting single section)
1074
+ """
1075
+ import tomllib
1076
+
1077
+ request_id = _request_id()
1078
+ blocked = _feature_flag_blocked(request_id)
1079
+ if blocked:
1080
+ return blocked
1081
+
1082
+ # Validate sections parameter
1083
+ supported_sections = {"implement", "git"}
1084
+ if sections is not None:
1085
+ if not isinstance(sections, list):
1086
+ return _validation_error(
1087
+ action="get-config",
1088
+ field="sections",
1089
+ message="Expected a list of section names",
1090
+ request_id=request_id,
1091
+ code=ErrorCode.INVALID_FORMAT,
1092
+ )
1093
+ invalid = set(sections) - supported_sections
1094
+ if invalid:
1095
+ return _validation_error(
1096
+ action="get-config",
1097
+ field="sections",
1098
+ message=f"Unsupported sections: {', '.join(sorted(invalid))}. Supported: {', '.join(sorted(supported_sections))}",
1099
+ request_id=request_id,
1100
+ )
1101
+
1102
+ # Validate key parameter
1103
+ if key is not None:
1104
+ if not isinstance(key, str):
1105
+ return _validation_error(
1106
+ action="get-config",
1107
+ field="key",
1108
+ message="Expected a string",
1109
+ request_id=request_id,
1110
+ code=ErrorCode.INVALID_FORMAT,
1111
+ )
1112
+ if sections is None or len(sections) != 1:
1113
+ return _validation_error(
1114
+ action="get-config",
1115
+ field="key",
1116
+ message="The 'key' parameter is only valid when requesting exactly one section",
1117
+ request_id=request_id,
1118
+ )
1119
+
1120
+ metric_key = _metric_name("get-config")
1121
+ try:
1122
+ # Find the TOML config file
1123
+ toml_path = None
1124
+ for candidate in ["foundry-mcp.toml", ".foundry-mcp.toml"]:
1125
+ if Path(candidate).exists():
1126
+ toml_path = Path(candidate)
1127
+ break
1128
+
1129
+ if not toml_path:
1130
+ _metrics.counter(metric_key, labels={"status": "not_found"})
1131
+ return asdict(
1132
+ error_response(
1133
+ "No foundry-mcp.toml config file found",
1134
+ error_code=ErrorCode.NOT_FOUND,
1135
+ error_type=ErrorType.NOT_FOUND,
1136
+ remediation="Run environment(action=setup) to create the config file",
1137
+ request_id=request_id,
1138
+ )
1139
+ )
1140
+
1141
+ # Read and parse TOML
1142
+ with open(toml_path, "rb") as f:
1143
+ data = tomllib.load(f)
1144
+
1145
+ # Determine which sections to return
1146
+ requested = set(sections) if sections else supported_sections
1147
+
1148
+ # Build result with only supported sections
1149
+ result: Dict[str, Any] = {}
1150
+
1151
+ if "implement" in requested and "implement" in data:
1152
+ impl_data = data["implement"]
1153
+ result["implement"] = {
1154
+ "auto": impl_data.get("auto", False),
1155
+ "delegate": impl_data.get("delegate", False),
1156
+ "parallel": impl_data.get("parallel", False),
1157
+ }
1158
+
1159
+ if "git" in requested and "git" in data:
1160
+ git_data = data["git"]
1161
+ result["git"] = {
1162
+ "enabled": git_data.get("enabled", True),
1163
+ "auto_commit": git_data.get("auto_commit", False),
1164
+ "auto_push": git_data.get("auto_push", False),
1165
+ "auto_pr": git_data.get("auto_pr", False),
1166
+ "commit_cadence": git_data.get("commit_cadence", "task"),
1167
+ }
1168
+
1169
+ # If sections were requested but not found, include them as empty/defaults
1170
+ for section in requested:
1171
+ if section not in result:
1172
+ if section == "implement":
1173
+ result["implement"] = {
1174
+ "auto": False,
1175
+ "delegate": False,
1176
+ "parallel": False,
1177
+ }
1178
+ elif section == "git":
1179
+ result["git"] = {
1180
+ "enabled": True,
1181
+ "auto_commit": False,
1182
+ "auto_push": False,
1183
+ "auto_pr": False,
1184
+ "commit_cadence": "task",
1185
+ }
1186
+
1187
+ # If a specific key was requested, extract just that value
1188
+ if key is not None:
1189
+ section_name = sections[0] # Already validated to be exactly one section
1190
+ section_data = result.get(section_name, {})
1191
+ if key not in section_data:
1192
+ return _validation_error(
1193
+ action="get-config",
1194
+ field="key",
1195
+ message=f"Key '{key}' not found in section '{section_name}'",
1196
+ request_id=request_id,
1197
+ code=ErrorCode.NOT_FOUND,
1198
+ )
1199
+ result = {section_name: {key: section_data[key]}}
1200
+
1201
+ _metrics.counter(metric_key, labels={"status": "success"})
1202
+ return asdict(
1203
+ success_response(
1204
+ data={"sections": result, "config_file": str(toml_path)},
1205
+ request_id=request_id,
1206
+ )
1207
+ )
1208
+ except Exception as exc:
1209
+ logger.exception("Error reading config")
1210
+ _metrics.counter(metric_key, labels={"status": "error"})
1211
+ return asdict(
1212
+ error_response(
1213
+ f"Failed to read config: {exc}",
1214
+ error_code=ErrorCode.INTERNAL_ERROR,
1215
+ error_type=ErrorType.INTERNAL,
1216
+ remediation="Check foundry-mcp.toml syntax and retry",
1217
+ request_id=request_id,
1218
+ )
1219
+ )
1220
+
1221
+
840
1222
  _ENVIRONMENT_ROUTER = ActionRouter(
841
1223
  tool_name="environment",
842
1224
  actions=[
@@ -868,12 +1250,28 @@ _ENVIRONMENT_ROUTER = ActionRouter(
868
1250
  summary=_ACTION_SUMMARY["detect"],
869
1251
  aliases=("sdd-detect-topology", "sdd_detect_topology"),
870
1252
  ),
1253
+ ActionDefinition(
1254
+ name="detect-test-runner",
1255
+ handler=_handle_detect_test_runner,
1256
+ summary=_ACTION_SUMMARY["detect-test-runner"],
1257
+ aliases=(
1258
+ "detect_test_runner",
1259
+ "sdd-detect-test-runner",
1260
+ "sdd_detect_test_runner",
1261
+ ),
1262
+ ),
871
1263
  ActionDefinition(
872
1264
  name="setup",
873
1265
  handler=_handle_setup,
874
1266
  summary=_ACTION_SUMMARY["setup"],
875
1267
  aliases=("sdd-setup", "sdd_setup"),
876
1268
  ),
1269
+ ActionDefinition(
1270
+ name="get-config",
1271
+ handler=_handle_get_config,
1272
+ summary=_ACTION_SUMMARY["get-config"],
1273
+ aliases=("config", "read-config", "get_config"),
1274
+ ),
877
1275
  ],
878
1276
  )
879
1277
 
@@ -914,6 +1312,8 @@ def register_unified_environment_tool(mcp: FastMCP, config: ServerConfig) -> Non
914
1312
  permissions_preset: str = "full",
915
1313
  create_toml: bool = True,
916
1314
  dry_run: bool = False,
1315
+ sections: Optional[List[str]] = None,
1316
+ key: Optional[str] = None,
917
1317
  ) -> dict:
918
1318
  payload = {
919
1319
  "path": path,
@@ -926,6 +1326,8 @@ def register_unified_environment_tool(mcp: FastMCP, config: ServerConfig) -> Non
926
1326
  "permissions_preset": permissions_preset,
927
1327
  "create_toml": create_toml,
928
1328
  "dry_run": dry_run,
1329
+ "sections": sections,
1330
+ "key": key,
929
1331
  }
930
1332
  return _dispatch_environment_action(
931
1333
  action=action, payload=payload, config=config
@@ -332,7 +332,24 @@ def perform_error_cleanup(
332
332
 
333
333
 
334
334
  def _handle_error_list(*, config: ServerConfig, **payload: Any) -> dict:
335
- return perform_error_list(config=config, **payload)
335
+ # Filter out parameters not accepted by perform_error_list
336
+ filtered_payload = {
337
+ k: v
338
+ for k, v in payload.items()
339
+ if k
340
+ in (
341
+ "tool_name",
342
+ "error_code",
343
+ "error_type",
344
+ "fingerprint",
345
+ "provider_id",
346
+ "since",
347
+ "until",
348
+ "limit",
349
+ "cursor",
350
+ )
351
+ }
352
+ return perform_error_list(config=config, **filtered_payload)
336
353
 
337
354
 
338
355
  def _handle_error_get(*, config: ServerConfig, **payload: Any) -> dict: