foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,1341 @@
|
|
|
1
|
+
"""Unified environment tool with action routing and feature-flag enforcement."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
import shutil
|
|
8
|
+
import subprocess
|
|
9
|
+
import sys
|
|
10
|
+
from dataclasses import asdict
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any, Dict, List, Optional, cast
|
|
13
|
+
|
|
14
|
+
from mcp.server.fastmcp import FastMCP
|
|
15
|
+
|
|
16
|
+
from foundry_mcp.config import ServerConfig, _PACKAGE_VERSION
|
|
17
|
+
from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
|
|
18
|
+
from foundry_mcp.core.naming import canonical_tool
|
|
19
|
+
from foundry_mcp.core.observability import audit_log, get_metrics, mcp_tool
|
|
20
|
+
from foundry_mcp.core.responses import (
|
|
21
|
+
ErrorCode,
|
|
22
|
+
ErrorType,
|
|
23
|
+
error_response,
|
|
24
|
+
success_response,
|
|
25
|
+
)
|
|
26
|
+
from foundry_mcp.tools.unified.router import (
|
|
27
|
+
ActionDefinition,
|
|
28
|
+
ActionRouter,
|
|
29
|
+
ActionRouterError,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
logger = logging.getLogger(__name__)
|
|
33
|
+
_metrics = get_metrics()
|
|
34
|
+
|
|
35
|
+
_DEFAULT_TOML_TEMPLATE = """[workspace]
|
|
36
|
+
specs_dir = "./specs"
|
|
37
|
+
|
|
38
|
+
[logging]
|
|
39
|
+
level = "INFO"
|
|
40
|
+
structured = true
|
|
41
|
+
|
|
42
|
+
[tools]
|
|
43
|
+
# Disable tools to reduce context window usage
|
|
44
|
+
# Available: health, plan, pr, error, metrics, journal, authoring, review,
|
|
45
|
+
# spec, task, provider, environment, lifecycle, verification,
|
|
46
|
+
# server, test, research
|
|
47
|
+
disabled_tools = ["error", "metrics", "health"]
|
|
48
|
+
|
|
49
|
+
[workflow]
|
|
50
|
+
mode = "single"
|
|
51
|
+
auto_validate = true
|
|
52
|
+
journal_enabled = true
|
|
53
|
+
|
|
54
|
+
[implement]
|
|
55
|
+
# Default flags for /implement command (can be overridden via CLI flags)
|
|
56
|
+
auto = false # --auto: skip prompts between tasks
|
|
57
|
+
delegate = false # --delegate: use subagent(s) for implementation
|
|
58
|
+
parallel = false # --parallel: run subagents concurrently (implies delegate)
|
|
59
|
+
|
|
60
|
+
[consultation]
|
|
61
|
+
# priority = [] # Appended by setup based on detected providers
|
|
62
|
+
default_timeout = 360
|
|
63
|
+
|
|
64
|
+
[research]
|
|
65
|
+
# Research tool configuration (chat, consensus, thinkdeep, ideate, deep)
|
|
66
|
+
# default_provider = "[cli]provider:model" # Appended by setup
|
|
67
|
+
# consensus_providers = [] # Appended by setup (same as consultation.priority)
|
|
68
|
+
max_retries = 2
|
|
69
|
+
retry_delay = 5.0
|
|
70
|
+
fallback_enabled = true
|
|
71
|
+
cache_ttl = 3600
|
|
72
|
+
|
|
73
|
+
[research.deep]
|
|
74
|
+
# Deep research workflow settings
|
|
75
|
+
max_iterations = 3
|
|
76
|
+
max_sub_queries = 5
|
|
77
|
+
max_sources_per_query = 5
|
|
78
|
+
follow_links = true
|
|
79
|
+
max_concurrent = 3
|
|
80
|
+
timeout_per_operation = 360
|
|
81
|
+
|
|
82
|
+
[consultation.workflows.fidelity_review]
|
|
83
|
+
min_models = 2
|
|
84
|
+
timeout_override = 600.0
|
|
85
|
+
default_review_type = "full"
|
|
86
|
+
|
|
87
|
+
[consultation.workflows.plan_review]
|
|
88
|
+
min_models = 2
|
|
89
|
+
timeout_override = 600.0
|
|
90
|
+
default_review_type = "full"
|
|
91
|
+
|
|
92
|
+
[consultation.workflows.markdown_plan_review]
|
|
93
|
+
min_models = 2
|
|
94
|
+
timeout_override = 600.0
|
|
95
|
+
default_review_type = "full"
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
# ---------------------------------------------------------------------------
|
|
100
|
+
# Helper functions used by unified surface
|
|
101
|
+
# ---------------------------------------------------------------------------
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
def _update_permissions(
|
|
105
|
+
settings_file: Path, preset: str, dry_run: bool
|
|
106
|
+
) -> Dict[str, Any]:
|
|
107
|
+
"""Update .claude/settings.local.json with additive permission merge."""
|
|
108
|
+
|
|
109
|
+
changes: List[str] = []
|
|
110
|
+
preset_perms = {
|
|
111
|
+
"minimal": [
|
|
112
|
+
"mcp__foundry-mcp__server",
|
|
113
|
+
"mcp__foundry-mcp__spec",
|
|
114
|
+
"mcp__foundry-mcp__task",
|
|
115
|
+
],
|
|
116
|
+
"standard": [
|
|
117
|
+
"mcp__foundry-mcp__authoring",
|
|
118
|
+
"mcp__foundry-mcp__environment",
|
|
119
|
+
"mcp__foundry-mcp__journal",
|
|
120
|
+
"mcp__foundry-mcp__lifecycle",
|
|
121
|
+
"mcp__foundry-mcp__review",
|
|
122
|
+
"mcp__foundry-mcp__server",
|
|
123
|
+
"mcp__foundry-mcp__spec",
|
|
124
|
+
"mcp__foundry-mcp__task",
|
|
125
|
+
"mcp__foundry-mcp__test",
|
|
126
|
+
"Read(//**/specs/**)",
|
|
127
|
+
"Write(//**/specs/active/**)",
|
|
128
|
+
"Write(//**/specs/pending/**)",
|
|
129
|
+
"Edit(//**/specs/active/**)",
|
|
130
|
+
"Edit(//**/specs/pending/**)",
|
|
131
|
+
],
|
|
132
|
+
"full": [
|
|
133
|
+
"mcp__foundry-mcp__*",
|
|
134
|
+
"Read(//**/specs/**)",
|
|
135
|
+
"Write(//**/specs/**)",
|
|
136
|
+
"Edit(//**/specs/**)",
|
|
137
|
+
],
|
|
138
|
+
}[preset]
|
|
139
|
+
|
|
140
|
+
if settings_file.exists():
|
|
141
|
+
with open(settings_file, "r") as handle:
|
|
142
|
+
settings = cast(Dict[str, Any], json.load(handle))
|
|
143
|
+
else:
|
|
144
|
+
settings = cast(
|
|
145
|
+
Dict[str, Any], {"permissions": {"allow": [], "deny": [], "ask": []}}
|
|
146
|
+
)
|
|
147
|
+
changes.append(f"Created {settings_file}")
|
|
148
|
+
|
|
149
|
+
permissions_cfg = settings.get("permissions")
|
|
150
|
+
if not isinstance(permissions_cfg, dict):
|
|
151
|
+
permissions_cfg = {"allow": [], "deny": [], "ask": []}
|
|
152
|
+
settings["permissions"] = permissions_cfg
|
|
153
|
+
|
|
154
|
+
allow_list = permissions_cfg.get("allow")
|
|
155
|
+
if not isinstance(allow_list, list):
|
|
156
|
+
allow_list = []
|
|
157
|
+
permissions_cfg["allow"] = allow_list
|
|
158
|
+
|
|
159
|
+
existing = set(allow_list)
|
|
160
|
+
new_perms = set(preset_perms) - existing
|
|
161
|
+
|
|
162
|
+
if new_perms:
|
|
163
|
+
allow_list.extend(sorted(new_perms))
|
|
164
|
+
changes.append(f"Added {len(new_perms)} permissions to allow list")
|
|
165
|
+
|
|
166
|
+
settings["enableAllProjectMcpServers"] = True
|
|
167
|
+
enabled_servers = settings.get("enabledMcpjsonServers")
|
|
168
|
+
if not isinstance(enabled_servers, list):
|
|
169
|
+
enabled_servers = []
|
|
170
|
+
settings["enabledMcpjsonServers"] = enabled_servers
|
|
171
|
+
if "foundry-mcp" not in enabled_servers:
|
|
172
|
+
enabled_servers.append("foundry-mcp")
|
|
173
|
+
changes.append("Enabled foundry-mcp server")
|
|
174
|
+
|
|
175
|
+
if not dry_run and changes:
|
|
176
|
+
settings_file.parent.mkdir(parents=True, exist_ok=True)
|
|
177
|
+
with open(settings_file, "w") as handle:
|
|
178
|
+
json.dump(settings, handle, indent=2)
|
|
179
|
+
|
|
180
|
+
return {"changes": changes}
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def _get_default_toml_content() -> str:
|
|
184
|
+
"""Get default TOML content with current package version."""
|
|
185
|
+
return _DEFAULT_TOML_TEMPLATE.format(version=_PACKAGE_VERSION)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _write_default_toml(toml_path: Path) -> None:
|
|
189
|
+
"""Write default foundry-mcp.toml configuration file."""
|
|
190
|
+
|
|
191
|
+
with open(toml_path, "w") as handle:
|
|
192
|
+
handle.write(_get_default_toml_content())
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
def _init_specs_directory(base_path: Path, dry_run: bool) -> Dict[str, Any]:
|
|
196
|
+
"""Initialize specs directory structure."""
|
|
197
|
+
|
|
198
|
+
specs_dir = base_path / "specs"
|
|
199
|
+
subdirs = ["active", "pending", "completed", "archived"]
|
|
200
|
+
changes: List[str] = []
|
|
201
|
+
|
|
202
|
+
if not dry_run:
|
|
203
|
+
if not specs_dir.exists():
|
|
204
|
+
specs_dir.mkdir(parents=True)
|
|
205
|
+
changes.append(f"Created {specs_dir}")
|
|
206
|
+
for subdir in subdirs:
|
|
207
|
+
subdir_path = specs_dir / subdir
|
|
208
|
+
if not subdir_path.exists():
|
|
209
|
+
subdir_path.mkdir(parents=True)
|
|
210
|
+
changes.append(f"Created {subdir_path}")
|
|
211
|
+
else:
|
|
212
|
+
if not specs_dir.exists():
|
|
213
|
+
changes.append(f"Would create {specs_dir}")
|
|
214
|
+
for subdir in subdirs:
|
|
215
|
+
subdir_path = specs_dir / subdir
|
|
216
|
+
if not subdir_path.exists():
|
|
217
|
+
changes.append(f"Would create {subdir_path}")
|
|
218
|
+
|
|
219
|
+
return {"changes": changes}
|
|
220
|
+
|
|
221
|
+
|
|
222
|
+
# ---------------------------------------------------------------------------
|
|
223
|
+
# Unified action helpers
|
|
224
|
+
# ---------------------------------------------------------------------------
|
|
225
|
+
|
|
226
|
+
_ACTION_SUMMARY = {
|
|
227
|
+
"verify-toolchain": "Validate CLI/toolchain availability",
|
|
228
|
+
"verify-env": "Validate runtimes, packages, and workspace environment",
|
|
229
|
+
"init": "Initialize the standard specs/ workspace structure",
|
|
230
|
+
"detect": "Detect repository topology (project type, specs/docs)",
|
|
231
|
+
"detect-test-runner": "Detect appropriate test runner for the project",
|
|
232
|
+
"setup": "Complete SDD setup with permissions + config",
|
|
233
|
+
"get-config": "Read configuration sections from foundry-mcp.toml",
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def _metric_name(action: str) -> str:
|
|
238
|
+
return f"environment.{action.replace('-', '_')}"
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _request_id() -> str:
|
|
242
|
+
return get_correlation_id() or generate_correlation_id(prefix="environment")
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def _feature_flag_blocked(request_id: str) -> Optional[dict]:
|
|
246
|
+
# Feature flags disabled - always allow
|
|
247
|
+
return None
|
|
248
|
+
|
|
249
|
+
|
|
250
|
+
def _validation_error(
|
|
251
|
+
*,
|
|
252
|
+
action: str,
|
|
253
|
+
field: str,
|
|
254
|
+
message: str,
|
|
255
|
+
request_id: str,
|
|
256
|
+
remediation: Optional[str] = None,
|
|
257
|
+
code: ErrorCode = ErrorCode.VALIDATION_ERROR,
|
|
258
|
+
) -> dict:
|
|
259
|
+
return asdict(
|
|
260
|
+
error_response(
|
|
261
|
+
f"Invalid field '{field}' for environment.{action}: {message}",
|
|
262
|
+
error_code=code,
|
|
263
|
+
error_type=ErrorType.VALIDATION,
|
|
264
|
+
remediation=remediation,
|
|
265
|
+
details={"field": field, "action": f"environment.{action}"},
|
|
266
|
+
request_id=request_id,
|
|
267
|
+
)
|
|
268
|
+
)
|
|
269
|
+
|
|
270
|
+
|
|
271
|
+
# ---------------------------------------------------------------------------
|
|
272
|
+
# Action handlers
|
|
273
|
+
# ---------------------------------------------------------------------------
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def _handle_verify_toolchain(
|
|
277
|
+
*,
|
|
278
|
+
config: ServerConfig, # noqa: ARG001 - reserved for future hooks
|
|
279
|
+
include_optional: Optional[bool] = True,
|
|
280
|
+
**_: Any,
|
|
281
|
+
) -> dict:
|
|
282
|
+
request_id = _request_id()
|
|
283
|
+
blocked = _feature_flag_blocked(request_id)
|
|
284
|
+
if blocked:
|
|
285
|
+
return blocked
|
|
286
|
+
|
|
287
|
+
if include_optional is not None and not isinstance(include_optional, bool):
|
|
288
|
+
return _validation_error(
|
|
289
|
+
action="verify-toolchain",
|
|
290
|
+
field="include_optional",
|
|
291
|
+
message="Expected a boolean value",
|
|
292
|
+
request_id=request_id,
|
|
293
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
include = True if include_optional is None else include_optional
|
|
297
|
+
metric_key = _metric_name("verify-toolchain")
|
|
298
|
+
|
|
299
|
+
try:
|
|
300
|
+
required_tools = ["python", "git"]
|
|
301
|
+
optional_tools = ["grep", "cat", "find", "node", "npm"]
|
|
302
|
+
|
|
303
|
+
def check_tool(tool_name: str) -> bool:
|
|
304
|
+
return shutil.which(tool_name) is not None
|
|
305
|
+
|
|
306
|
+
required_status: Dict[str, bool] = {}
|
|
307
|
+
missing_required: List[str] = []
|
|
308
|
+
for tool in required_tools:
|
|
309
|
+
available = check_tool(tool)
|
|
310
|
+
required_status[tool] = available
|
|
311
|
+
if not available:
|
|
312
|
+
missing_required.append(tool)
|
|
313
|
+
|
|
314
|
+
optional_status: Dict[str, bool] = {}
|
|
315
|
+
if include:
|
|
316
|
+
for tool in optional_tools:
|
|
317
|
+
optional_status[tool] = check_tool(tool)
|
|
318
|
+
|
|
319
|
+
data: Dict[str, Any] = {
|
|
320
|
+
"required": required_status,
|
|
321
|
+
"all_available": not missing_required,
|
|
322
|
+
}
|
|
323
|
+
if include:
|
|
324
|
+
data["optional"] = optional_status
|
|
325
|
+
if missing_required:
|
|
326
|
+
data["missing"] = missing_required
|
|
327
|
+
|
|
328
|
+
warnings: List[str] = []
|
|
329
|
+
if include:
|
|
330
|
+
missing_optional = [
|
|
331
|
+
tool for tool, available in optional_status.items() if not available
|
|
332
|
+
]
|
|
333
|
+
if missing_optional:
|
|
334
|
+
warnings.append(
|
|
335
|
+
f"Optional tools not found: {', '.join(sorted(missing_optional))}"
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
if missing_required:
|
|
339
|
+
_metrics.counter(metric_key, labels={"status": "missing_required"})
|
|
340
|
+
return asdict(
|
|
341
|
+
error_response(
|
|
342
|
+
f"Required tools missing: {', '.join(missing_required)}",
|
|
343
|
+
error_code=ErrorCode.MISSING_REQUIRED,
|
|
344
|
+
error_type=ErrorType.VALIDATION,
|
|
345
|
+
data=data,
|
|
346
|
+
remediation="Install missing tools before continuing with SDD workflows.",
|
|
347
|
+
request_id=request_id,
|
|
348
|
+
)
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
352
|
+
return asdict(
|
|
353
|
+
success_response(
|
|
354
|
+
data=data,
|
|
355
|
+
warnings=warnings or None,
|
|
356
|
+
request_id=request_id,
|
|
357
|
+
)
|
|
358
|
+
)
|
|
359
|
+
except Exception:
|
|
360
|
+
logger.exception("Error verifying toolchain")
|
|
361
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
362
|
+
return asdict(
|
|
363
|
+
error_response(
|
|
364
|
+
"Failed to verify toolchain",
|
|
365
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
366
|
+
error_type=ErrorType.INTERNAL,
|
|
367
|
+
remediation="Check PATH configuration and retry",
|
|
368
|
+
request_id=request_id,
|
|
369
|
+
)
|
|
370
|
+
)
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
def _handle_init_workspace(
|
|
374
|
+
*,
|
|
375
|
+
config: ServerConfig, # noqa: ARG001 - reserved for future hooks
|
|
376
|
+
path: Optional[str] = None,
|
|
377
|
+
create_subdirs: bool = True,
|
|
378
|
+
**_: Any,
|
|
379
|
+
) -> dict:
|
|
380
|
+
request_id = _request_id()
|
|
381
|
+
blocked = _feature_flag_blocked(request_id)
|
|
382
|
+
if blocked:
|
|
383
|
+
return blocked
|
|
384
|
+
|
|
385
|
+
if path is not None and not isinstance(path, str):
|
|
386
|
+
return _validation_error(
|
|
387
|
+
action="init",
|
|
388
|
+
field="path",
|
|
389
|
+
message="Workspace path must be a string",
|
|
390
|
+
request_id=request_id,
|
|
391
|
+
)
|
|
392
|
+
if not isinstance(create_subdirs, bool):
|
|
393
|
+
return _validation_error(
|
|
394
|
+
action="init",
|
|
395
|
+
field="create_subdirs",
|
|
396
|
+
message="Expected a boolean value",
|
|
397
|
+
request_id=request_id,
|
|
398
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
399
|
+
)
|
|
400
|
+
|
|
401
|
+
metric_key = _metric_name("init")
|
|
402
|
+
try:
|
|
403
|
+
base_path = Path(path) if path else Path.cwd()
|
|
404
|
+
specs_dir = base_path / "specs"
|
|
405
|
+
subdirs = ["active", "pending", "completed", "archived"]
|
|
406
|
+
|
|
407
|
+
created_dirs: List[str] = []
|
|
408
|
+
existing_dirs: List[str] = []
|
|
409
|
+
|
|
410
|
+
if not specs_dir.exists():
|
|
411
|
+
specs_dir.mkdir(parents=True)
|
|
412
|
+
created_dirs.append(str(specs_dir))
|
|
413
|
+
else:
|
|
414
|
+
existing_dirs.append(str(specs_dir))
|
|
415
|
+
|
|
416
|
+
if create_subdirs:
|
|
417
|
+
for subdir in subdirs:
|
|
418
|
+
subdir_path = specs_dir / subdir
|
|
419
|
+
if not subdir_path.exists():
|
|
420
|
+
subdir_path.mkdir(parents=True)
|
|
421
|
+
created_dirs.append(str(subdir_path))
|
|
422
|
+
else:
|
|
423
|
+
existing_dirs.append(str(subdir_path))
|
|
424
|
+
|
|
425
|
+
warnings: List[str] = []
|
|
426
|
+
if not created_dirs:
|
|
427
|
+
warnings.append("All directories already existed, no changes made")
|
|
428
|
+
|
|
429
|
+
audit_log(
|
|
430
|
+
"workspace_init",
|
|
431
|
+
tool="environment.init",
|
|
432
|
+
path=str(base_path),
|
|
433
|
+
created_count=len(created_dirs),
|
|
434
|
+
success=True,
|
|
435
|
+
)
|
|
436
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
437
|
+
|
|
438
|
+
data: Dict[str, Any] = {
|
|
439
|
+
"specs_dir": str(specs_dir),
|
|
440
|
+
"active_dir": str(specs_dir / "active"),
|
|
441
|
+
"created_dirs": created_dirs,
|
|
442
|
+
"existing_dirs": existing_dirs,
|
|
443
|
+
}
|
|
444
|
+
return asdict(
|
|
445
|
+
success_response(
|
|
446
|
+
data=data,
|
|
447
|
+
warnings=warnings or None,
|
|
448
|
+
request_id=request_id,
|
|
449
|
+
)
|
|
450
|
+
)
|
|
451
|
+
except PermissionError as exc:
|
|
452
|
+
logger.exception("Permission denied during workspace initialization")
|
|
453
|
+
_metrics.counter(metric_key, labels={"status": "forbidden"})
|
|
454
|
+
return asdict(
|
|
455
|
+
error_response(
|
|
456
|
+
f"Permission denied: {exc}",
|
|
457
|
+
error_code=ErrorCode.FORBIDDEN,
|
|
458
|
+
error_type=ErrorType.AUTHORIZATION,
|
|
459
|
+
remediation="Check write permissions for the target directory.",
|
|
460
|
+
request_id=request_id,
|
|
461
|
+
)
|
|
462
|
+
)
|
|
463
|
+
except Exception as exc:
|
|
464
|
+
logger.exception("Error initializing workspace")
|
|
465
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
466
|
+
return asdict(
|
|
467
|
+
error_response(
|
|
468
|
+
f"Failed to initialize workspace: {exc}",
|
|
469
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
470
|
+
error_type=ErrorType.INTERNAL,
|
|
471
|
+
remediation="Verify the path exists and retry",
|
|
472
|
+
request_id=request_id,
|
|
473
|
+
)
|
|
474
|
+
)
|
|
475
|
+
|
|
476
|
+
|
|
477
|
+
def _handle_detect_topology(
|
|
478
|
+
*,
|
|
479
|
+
config: ServerConfig, # noqa: ARG001 - reserved for future hooks
|
|
480
|
+
path: Optional[str] = None,
|
|
481
|
+
**_: Any,
|
|
482
|
+
) -> dict:
|
|
483
|
+
request_id = _request_id()
|
|
484
|
+
blocked = _feature_flag_blocked(request_id)
|
|
485
|
+
if blocked:
|
|
486
|
+
return blocked
|
|
487
|
+
|
|
488
|
+
if path is not None and not isinstance(path, str):
|
|
489
|
+
return _validation_error(
|
|
490
|
+
action="detect",
|
|
491
|
+
field="path",
|
|
492
|
+
message="Directory path must be a string",
|
|
493
|
+
request_id=request_id,
|
|
494
|
+
)
|
|
495
|
+
|
|
496
|
+
metric_key = _metric_name("detect")
|
|
497
|
+
try:
|
|
498
|
+
base_path = Path(path) if path else Path.cwd()
|
|
499
|
+
|
|
500
|
+
project_type = "unknown"
|
|
501
|
+
detected_files: List[str] = []
|
|
502
|
+
|
|
503
|
+
python_markers = ["pyproject.toml", "setup.py", "requirements.txt", "Pipfile"]
|
|
504
|
+
for marker in python_markers:
|
|
505
|
+
if (base_path / marker).exists():
|
|
506
|
+
project_type = "python"
|
|
507
|
+
detected_files.append(marker)
|
|
508
|
+
break
|
|
509
|
+
|
|
510
|
+
if project_type == "unknown":
|
|
511
|
+
node_markers = ["package.json", "yarn.lock", "pnpm-lock.yaml"]
|
|
512
|
+
for marker in node_markers:
|
|
513
|
+
if (base_path / marker).exists():
|
|
514
|
+
project_type = "node"
|
|
515
|
+
detected_files.append(marker)
|
|
516
|
+
break
|
|
517
|
+
|
|
518
|
+
if project_type == "unknown" and (base_path / "Cargo.toml").exists():
|
|
519
|
+
project_type = "rust"
|
|
520
|
+
detected_files.append("Cargo.toml")
|
|
521
|
+
|
|
522
|
+
if project_type == "unknown" and (base_path / "go.mod").exists():
|
|
523
|
+
project_type = "go"
|
|
524
|
+
detected_files.append("go.mod")
|
|
525
|
+
|
|
526
|
+
specs_dir = None
|
|
527
|
+
for candidate in ["specs", ".specs", "specifications"]:
|
|
528
|
+
candidate_path = base_path / candidate
|
|
529
|
+
if candidate_path.is_dir():
|
|
530
|
+
specs_dir = str(candidate_path)
|
|
531
|
+
break
|
|
532
|
+
|
|
533
|
+
docs_dir = None
|
|
534
|
+
for candidate in ["docs", "documentation", "doc"]:
|
|
535
|
+
candidate_path = base_path / candidate
|
|
536
|
+
if candidate_path.is_dir():
|
|
537
|
+
docs_dir = str(candidate_path)
|
|
538
|
+
break
|
|
539
|
+
|
|
540
|
+
has_git = (base_path / ".git").is_dir()
|
|
541
|
+
|
|
542
|
+
data: Dict[str, Any] = {
|
|
543
|
+
"project_type": project_type,
|
|
544
|
+
"has_git": has_git,
|
|
545
|
+
}
|
|
546
|
+
if specs_dir:
|
|
547
|
+
data["specs_dir"] = specs_dir
|
|
548
|
+
if docs_dir:
|
|
549
|
+
data["docs_dir"] = docs_dir
|
|
550
|
+
if detected_files:
|
|
551
|
+
data["detected_files"] = detected_files
|
|
552
|
+
|
|
553
|
+
warnings: List[str] = []
|
|
554
|
+
if project_type == "unknown":
|
|
555
|
+
warnings.append("Could not detect project type from standard marker files")
|
|
556
|
+
if not specs_dir:
|
|
557
|
+
warnings.append(
|
|
558
|
+
"No specs directory found - run environment(action=init) to create one"
|
|
559
|
+
)
|
|
560
|
+
|
|
561
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
562
|
+
return asdict(
|
|
563
|
+
success_response(
|
|
564
|
+
data=data,
|
|
565
|
+
warnings=warnings or None,
|
|
566
|
+
request_id=request_id,
|
|
567
|
+
)
|
|
568
|
+
)
|
|
569
|
+
except Exception as exc:
|
|
570
|
+
logger.exception("Error detecting topology")
|
|
571
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
572
|
+
return asdict(
|
|
573
|
+
error_response(
|
|
574
|
+
f"Failed to detect topology: {exc}",
|
|
575
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
576
|
+
error_type=ErrorType.INTERNAL,
|
|
577
|
+
remediation="Verify the directory exists and retry",
|
|
578
|
+
request_id=request_id,
|
|
579
|
+
)
|
|
580
|
+
)
|
|
581
|
+
|
|
582
|
+
|
|
583
|
+
def _handle_detect_test_runner(
|
|
584
|
+
*,
|
|
585
|
+
config: ServerConfig, # noqa: ARG001 - reserved for future hooks
|
|
586
|
+
path: Optional[str] = None,
|
|
587
|
+
**_: Any,
|
|
588
|
+
) -> dict:
|
|
589
|
+
"""Detect appropriate test runner based on project type and configuration files.
|
|
590
|
+
|
|
591
|
+
Returns a structured response with detected runners, confidence levels, and
|
|
592
|
+
a recommended default runner.
|
|
593
|
+
|
|
594
|
+
Detection rules:
|
|
595
|
+
- Python: pyproject.toml, setup.py, requirements.txt, Pipfile → pytest
|
|
596
|
+
- Go: go.mod → go
|
|
597
|
+
- Jest: jest.config.* or package.json with "jest" key → jest (precedence over npm)
|
|
598
|
+
- Node: package.json with "test" script → npm
|
|
599
|
+
- Rust: Cargo.toml + Makefile present → make
|
|
600
|
+
"""
|
|
601
|
+
request_id = _request_id()
|
|
602
|
+
blocked = _feature_flag_blocked(request_id)
|
|
603
|
+
if blocked:
|
|
604
|
+
return blocked
|
|
605
|
+
|
|
606
|
+
if path is not None and not isinstance(path, str):
|
|
607
|
+
return _validation_error(
|
|
608
|
+
action="detect-test-runner",
|
|
609
|
+
field="path",
|
|
610
|
+
message="Directory path must be a string",
|
|
611
|
+
request_id=request_id,
|
|
612
|
+
)
|
|
613
|
+
|
|
614
|
+
metric_key = _metric_name("detect-test-runner")
|
|
615
|
+
try:
|
|
616
|
+
base_path = Path(path) if path else Path.cwd()
|
|
617
|
+
|
|
618
|
+
detected_runners: List[Dict[str, Any]] = []
|
|
619
|
+
|
|
620
|
+
# Python detection (highest precedence for Python projects)
|
|
621
|
+
python_primary = ["pyproject.toml", "setup.py"]
|
|
622
|
+
python_secondary = ["requirements.txt", "Pipfile"]
|
|
623
|
+
|
|
624
|
+
for marker in python_primary:
|
|
625
|
+
if (base_path / marker).exists():
|
|
626
|
+
detected_runners.append({
|
|
627
|
+
"runner_name": "pytest",
|
|
628
|
+
"project_type": "python",
|
|
629
|
+
"confidence": "high",
|
|
630
|
+
"reason": f"{marker} found",
|
|
631
|
+
})
|
|
632
|
+
break
|
|
633
|
+
else:
|
|
634
|
+
# Check secondary markers only if no primary found
|
|
635
|
+
for marker in python_secondary:
|
|
636
|
+
if (base_path / marker).exists():
|
|
637
|
+
detected_runners.append({
|
|
638
|
+
"runner_name": "pytest",
|
|
639
|
+
"project_type": "python",
|
|
640
|
+
"confidence": "medium",
|
|
641
|
+
"reason": f"{marker} found",
|
|
642
|
+
})
|
|
643
|
+
break
|
|
644
|
+
|
|
645
|
+
# Go detection
|
|
646
|
+
if (base_path / "go.mod").exists():
|
|
647
|
+
detected_runners.append({
|
|
648
|
+
"runner_name": "go",
|
|
649
|
+
"project_type": "go",
|
|
650
|
+
"confidence": "high",
|
|
651
|
+
"reason": "go.mod found",
|
|
652
|
+
})
|
|
653
|
+
|
|
654
|
+
# Node detection - Jest takes precedence over npm
|
|
655
|
+
jest_configs = [
|
|
656
|
+
"jest.config.js",
|
|
657
|
+
"jest.config.ts",
|
|
658
|
+
"jest.config.mjs",
|
|
659
|
+
"jest.config.cjs",
|
|
660
|
+
"jest.config.json",
|
|
661
|
+
]
|
|
662
|
+
|
|
663
|
+
jest_detected = False
|
|
664
|
+
for jest_config in jest_configs:
|
|
665
|
+
if (base_path / jest_config).exists():
|
|
666
|
+
detected_runners.append({
|
|
667
|
+
"runner_name": "jest",
|
|
668
|
+
"project_type": "node",
|
|
669
|
+
"confidence": "high",
|
|
670
|
+
"reason": f"{jest_config} found",
|
|
671
|
+
})
|
|
672
|
+
jest_detected = True
|
|
673
|
+
break
|
|
674
|
+
|
|
675
|
+
# Check package.json for jest config or test script
|
|
676
|
+
package_json_path = base_path / "package.json"
|
|
677
|
+
if package_json_path.exists():
|
|
678
|
+
try:
|
|
679
|
+
with open(package_json_path, "r") as f:
|
|
680
|
+
pkg = json.load(f)
|
|
681
|
+
|
|
682
|
+
# Jest config in package.json takes precedence
|
|
683
|
+
if not jest_detected and "jest" in pkg:
|
|
684
|
+
detected_runners.append({
|
|
685
|
+
"runner_name": "jest",
|
|
686
|
+
"project_type": "node",
|
|
687
|
+
"confidence": "high",
|
|
688
|
+
"reason": "jest key in package.json",
|
|
689
|
+
})
|
|
690
|
+
jest_detected = True
|
|
691
|
+
|
|
692
|
+
# npm test script (only if jest not already detected)
|
|
693
|
+
if not jest_detected:
|
|
694
|
+
scripts = pkg.get("scripts", {})
|
|
695
|
+
if "test" in scripts:
|
|
696
|
+
detected_runners.append({
|
|
697
|
+
"runner_name": "npm",
|
|
698
|
+
"project_type": "node",
|
|
699
|
+
"confidence": "high",
|
|
700
|
+
"reason": "test script in package.json",
|
|
701
|
+
})
|
|
702
|
+
except (json.JSONDecodeError, OSError):
|
|
703
|
+
# If package.json is invalid, skip Node detection
|
|
704
|
+
pass
|
|
705
|
+
|
|
706
|
+
# Rust detection - only if BOTH Cargo.toml and Makefile exist
|
|
707
|
+
cargo_exists = (base_path / "Cargo.toml").exists()
|
|
708
|
+
makefile_exists = (base_path / "Makefile").exists() or (
|
|
709
|
+
base_path / "makefile"
|
|
710
|
+
).exists()
|
|
711
|
+
|
|
712
|
+
if cargo_exists and makefile_exists:
|
|
713
|
+
detected_runners.append({
|
|
714
|
+
"runner_name": "make",
|
|
715
|
+
"project_type": "rust",
|
|
716
|
+
"confidence": "medium",
|
|
717
|
+
"reason": "Cargo.toml + Makefile found",
|
|
718
|
+
})
|
|
719
|
+
|
|
720
|
+
# Determine recommended default based on precedence order from plan
|
|
721
|
+
# Priority: python (1) > go (2) > jest (3) > npm (4) > make (5)
|
|
722
|
+
precedence_order = ["pytest", "go", "jest", "npm", "make"]
|
|
723
|
+
recommended_default: Optional[str] = None
|
|
724
|
+
|
|
725
|
+
for runner_name in precedence_order:
|
|
726
|
+
for runner in detected_runners:
|
|
727
|
+
if runner["runner_name"] == runner_name:
|
|
728
|
+
recommended_default = runner_name
|
|
729
|
+
break
|
|
730
|
+
if recommended_default:
|
|
731
|
+
break
|
|
732
|
+
|
|
733
|
+
data: Dict[str, Any] = {
|
|
734
|
+
"detected_runners": detected_runners,
|
|
735
|
+
"recommended_default": recommended_default,
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
warnings: List[str] = []
|
|
739
|
+
if not detected_runners:
|
|
740
|
+
warnings.append(
|
|
741
|
+
"No test runners detected. Configure [test] section manually in "
|
|
742
|
+
"foundry-mcp.toml if tests are needed."
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
746
|
+
return asdict(
|
|
747
|
+
success_response(
|
|
748
|
+
data=data,
|
|
749
|
+
warnings=warnings or None,
|
|
750
|
+
request_id=request_id,
|
|
751
|
+
)
|
|
752
|
+
)
|
|
753
|
+
except Exception as exc:
|
|
754
|
+
logger.exception("Error detecting test runner")
|
|
755
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
756
|
+
return asdict(
|
|
757
|
+
error_response(
|
|
758
|
+
f"Failed to detect test runner: {exc}",
|
|
759
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
760
|
+
error_type=ErrorType.INTERNAL,
|
|
761
|
+
remediation="Verify the directory exists and retry",
|
|
762
|
+
request_id=request_id,
|
|
763
|
+
)
|
|
764
|
+
)
|
|
765
|
+
|
|
766
|
+
|
|
767
|
+
def _handle_verify_environment(
|
|
768
|
+
*,
|
|
769
|
+
config: ServerConfig, # noqa: ARG001 - reserved for future hooks
|
|
770
|
+
path: Optional[str] = None,
|
|
771
|
+
check_python: bool = True,
|
|
772
|
+
check_git: bool = True,
|
|
773
|
+
check_node: bool = False,
|
|
774
|
+
required_packages: Optional[str] = None,
|
|
775
|
+
**_: Any,
|
|
776
|
+
) -> dict:
|
|
777
|
+
request_id = _request_id()
|
|
778
|
+
blocked = _feature_flag_blocked(request_id)
|
|
779
|
+
if blocked:
|
|
780
|
+
return blocked
|
|
781
|
+
|
|
782
|
+
if path is not None and not isinstance(path, str):
|
|
783
|
+
return _validation_error(
|
|
784
|
+
action="verify-env",
|
|
785
|
+
field="path",
|
|
786
|
+
message="Directory path must be a string",
|
|
787
|
+
request_id=request_id,
|
|
788
|
+
)
|
|
789
|
+
for field_name, value in (
|
|
790
|
+
("check_python", check_python),
|
|
791
|
+
("check_git", check_git),
|
|
792
|
+
("check_node", check_node),
|
|
793
|
+
):
|
|
794
|
+
if not isinstance(value, bool):
|
|
795
|
+
return _validation_error(
|
|
796
|
+
action="verify-env",
|
|
797
|
+
field=field_name,
|
|
798
|
+
message="Expected a boolean value",
|
|
799
|
+
request_id=request_id,
|
|
800
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
801
|
+
)
|
|
802
|
+
|
|
803
|
+
if required_packages is not None and not isinstance(required_packages, str):
|
|
804
|
+
return _validation_error(
|
|
805
|
+
action="verify-env",
|
|
806
|
+
field="required_packages",
|
|
807
|
+
message="Provide a comma-separated string",
|
|
808
|
+
request_id=request_id,
|
|
809
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
810
|
+
)
|
|
811
|
+
|
|
812
|
+
metric_key = _metric_name("verify-env")
|
|
813
|
+
try:
|
|
814
|
+
Path(path) if path else Path.cwd()
|
|
815
|
+
|
|
816
|
+
runtimes: Dict[str, Any] = {}
|
|
817
|
+
issues: List[str] = []
|
|
818
|
+
packages: Dict[str, bool] = {}
|
|
819
|
+
|
|
820
|
+
if check_python:
|
|
821
|
+
python_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
|
|
822
|
+
runtimes["python"] = {
|
|
823
|
+
"available": True,
|
|
824
|
+
"version": python_version,
|
|
825
|
+
"executable": sys.executable,
|
|
826
|
+
}
|
|
827
|
+
if sys.version_info < (3, 9):
|
|
828
|
+
issues.append(f"Python 3.9+ required, found {python_version}")
|
|
829
|
+
|
|
830
|
+
if check_git:
|
|
831
|
+
git_path = shutil.which("git")
|
|
832
|
+
if git_path:
|
|
833
|
+
try:
|
|
834
|
+
result = subprocess.run(
|
|
835
|
+
["git", "--version"],
|
|
836
|
+
capture_output=True,
|
|
837
|
+
text=True,
|
|
838
|
+
timeout=5,
|
|
839
|
+
)
|
|
840
|
+
version_str = result.stdout.strip().replace("git version ", "")
|
|
841
|
+
except Exception:
|
|
842
|
+
version_str = "unknown"
|
|
843
|
+
runtimes["git"] = {
|
|
844
|
+
"available": True,
|
|
845
|
+
"version": version_str,
|
|
846
|
+
"executable": git_path,
|
|
847
|
+
}
|
|
848
|
+
else:
|
|
849
|
+
runtimes["git"] = {"available": False}
|
|
850
|
+
issues.append("Git not found in PATH")
|
|
851
|
+
|
|
852
|
+
if check_node:
|
|
853
|
+
node_path = shutil.which("node")
|
|
854
|
+
if node_path:
|
|
855
|
+
try:
|
|
856
|
+
result = subprocess.run(
|
|
857
|
+
["node", "--version"],
|
|
858
|
+
capture_output=True,
|
|
859
|
+
text=True,
|
|
860
|
+
timeout=5,
|
|
861
|
+
)
|
|
862
|
+
node_version = result.stdout.strip()
|
|
863
|
+
except Exception:
|
|
864
|
+
node_version = "unknown"
|
|
865
|
+
runtimes["node"] = {
|
|
866
|
+
"available": True,
|
|
867
|
+
"version": node_version,
|
|
868
|
+
"executable": node_path,
|
|
869
|
+
}
|
|
870
|
+
else:
|
|
871
|
+
runtimes["node"] = {"available": False}
|
|
872
|
+
issues.append("Node.js not found in PATH")
|
|
873
|
+
|
|
874
|
+
if required_packages:
|
|
875
|
+
pkg_list = [
|
|
876
|
+
pkg.strip() for pkg in required_packages.split(",") if pkg.strip()
|
|
877
|
+
]
|
|
878
|
+
for pkg in pkg_list:
|
|
879
|
+
try:
|
|
880
|
+
__import__(pkg.replace("-", "_"))
|
|
881
|
+
packages[pkg] = True
|
|
882
|
+
except ImportError:
|
|
883
|
+
packages[pkg] = False
|
|
884
|
+
issues.append(f"Required package not found: {pkg}")
|
|
885
|
+
|
|
886
|
+
all_valid = not issues
|
|
887
|
+
data: Dict[str, Any] = {"runtimes": runtimes, "all_valid": all_valid}
|
|
888
|
+
if packages:
|
|
889
|
+
data["packages"] = packages
|
|
890
|
+
if issues:
|
|
891
|
+
data["issues"] = issues
|
|
892
|
+
|
|
893
|
+
if not all_valid:
|
|
894
|
+
_metrics.counter(metric_key, labels={"status": "invalid"})
|
|
895
|
+
return asdict(
|
|
896
|
+
error_response(
|
|
897
|
+
f"Environment validation failed: {len(issues)} issue(s) found",
|
|
898
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
899
|
+
error_type=ErrorType.VALIDATION,
|
|
900
|
+
data=data,
|
|
901
|
+
remediation="Resolve the listed issues and retry the validation.",
|
|
902
|
+
request_id=request_id,
|
|
903
|
+
)
|
|
904
|
+
)
|
|
905
|
+
|
|
906
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
907
|
+
return asdict(
|
|
908
|
+
success_response(
|
|
909
|
+
data=data,
|
|
910
|
+
request_id=request_id,
|
|
911
|
+
)
|
|
912
|
+
)
|
|
913
|
+
except Exception as exc:
|
|
914
|
+
logger.exception("Error verifying environment", extra={"path": path})
|
|
915
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
916
|
+
return asdict(
|
|
917
|
+
error_response(
|
|
918
|
+
f"Failed to verify environment: {exc}",
|
|
919
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
920
|
+
error_type=ErrorType.INTERNAL,
|
|
921
|
+
remediation="Check system configuration and retry",
|
|
922
|
+
request_id=request_id,
|
|
923
|
+
)
|
|
924
|
+
)
|
|
925
|
+
|
|
926
|
+
|
|
927
|
+
def _handle_setup(
|
|
928
|
+
*,
|
|
929
|
+
config: ServerConfig, # noqa: ARG001 - reserved for future hooks
|
|
930
|
+
path: Optional[str] = None,
|
|
931
|
+
permissions_preset: str = "full",
|
|
932
|
+
create_toml: bool = True,
|
|
933
|
+
dry_run: bool = False,
|
|
934
|
+
**_: Any,
|
|
935
|
+
) -> dict:
|
|
936
|
+
request_id = _request_id()
|
|
937
|
+
blocked = _feature_flag_blocked(request_id)
|
|
938
|
+
if blocked:
|
|
939
|
+
return blocked
|
|
940
|
+
|
|
941
|
+
if path is not None and not isinstance(path, str):
|
|
942
|
+
return _validation_error(
|
|
943
|
+
action="setup",
|
|
944
|
+
field="path",
|
|
945
|
+
message="Project path must be a string",
|
|
946
|
+
request_id=request_id,
|
|
947
|
+
)
|
|
948
|
+
if permissions_preset not in {"minimal", "standard", "full"}:
|
|
949
|
+
return _validation_error(
|
|
950
|
+
action="setup",
|
|
951
|
+
field="permissions_preset",
|
|
952
|
+
message="Invalid preset. Use 'minimal', 'standard', or 'full'",
|
|
953
|
+
request_id=request_id,
|
|
954
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
955
|
+
)
|
|
956
|
+
for field_name, value in (("create_toml", create_toml), ("dry_run", dry_run)):
|
|
957
|
+
if not isinstance(value, bool):
|
|
958
|
+
return _validation_error(
|
|
959
|
+
action="setup",
|
|
960
|
+
field=field_name,
|
|
961
|
+
message="Expected a boolean value",
|
|
962
|
+
request_id=request_id,
|
|
963
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
964
|
+
)
|
|
965
|
+
|
|
966
|
+
metric_key = _metric_name("setup")
|
|
967
|
+
try:
|
|
968
|
+
base_path = Path(path) if path else Path.cwd()
|
|
969
|
+
if not base_path.exists():
|
|
970
|
+
return asdict(
|
|
971
|
+
error_response(
|
|
972
|
+
f"Path does not exist: {base_path}",
|
|
973
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
974
|
+
error_type=ErrorType.NOT_FOUND,
|
|
975
|
+
remediation="Provide a valid project directory path",
|
|
976
|
+
request_id=request_id,
|
|
977
|
+
)
|
|
978
|
+
)
|
|
979
|
+
|
|
980
|
+
changes: List[str] = []
|
|
981
|
+
warnings: List[str] = []
|
|
982
|
+
|
|
983
|
+
specs_result = _init_specs_directory(base_path, dry_run)
|
|
984
|
+
changes.extend(specs_result["changes"])
|
|
985
|
+
|
|
986
|
+
claude_dir = base_path / ".claude"
|
|
987
|
+
settings_file = claude_dir / "settings.local.json"
|
|
988
|
+
settings_result = _update_permissions(
|
|
989
|
+
settings_file, permissions_preset, dry_run
|
|
990
|
+
)
|
|
991
|
+
changes.extend(settings_result["changes"])
|
|
992
|
+
|
|
993
|
+
config_file = None
|
|
994
|
+
if create_toml:
|
|
995
|
+
toml_path = base_path / "foundry-mcp.toml"
|
|
996
|
+
if not toml_path.exists():
|
|
997
|
+
config_file = str(toml_path)
|
|
998
|
+
if not dry_run:
|
|
999
|
+
_write_default_toml(toml_path)
|
|
1000
|
+
changes.append(f"Created {toml_path}")
|
|
1001
|
+
else:
|
|
1002
|
+
warnings.append("foundry-mcp.toml already exists, skipping")
|
|
1003
|
+
|
|
1004
|
+
audit_log(
|
|
1005
|
+
"sdd_setup",
|
|
1006
|
+
tool="environment.setup",
|
|
1007
|
+
path=str(base_path),
|
|
1008
|
+
preset=permissions_preset,
|
|
1009
|
+
dry_run=dry_run,
|
|
1010
|
+
)
|
|
1011
|
+
_metrics.counter(
|
|
1012
|
+
metric_key,
|
|
1013
|
+
labels={
|
|
1014
|
+
"status": "success",
|
|
1015
|
+
"preset": permissions_preset,
|
|
1016
|
+
"dry_run": str(dry_run),
|
|
1017
|
+
},
|
|
1018
|
+
)
|
|
1019
|
+
|
|
1020
|
+
return asdict(
|
|
1021
|
+
success_response(
|
|
1022
|
+
data={
|
|
1023
|
+
"specs_dir": str(base_path / "specs"),
|
|
1024
|
+
"permissions_file": str(settings_file),
|
|
1025
|
+
"config_file": config_file,
|
|
1026
|
+
"changes": changes,
|
|
1027
|
+
"dry_run": dry_run,
|
|
1028
|
+
},
|
|
1029
|
+
warnings=warnings or None,
|
|
1030
|
+
request_id=request_id,
|
|
1031
|
+
)
|
|
1032
|
+
)
|
|
1033
|
+
except PermissionError as exc:
|
|
1034
|
+
logger.exception("Permission denied during environment setup")
|
|
1035
|
+
_metrics.counter(metric_key, labels={"status": "forbidden"})
|
|
1036
|
+
return asdict(
|
|
1037
|
+
error_response(
|
|
1038
|
+
f"Permission denied: {exc}",
|
|
1039
|
+
error_code=ErrorCode.FORBIDDEN,
|
|
1040
|
+
error_type=ErrorType.AUTHORIZATION,
|
|
1041
|
+
remediation="Check write permissions for the target directory.",
|
|
1042
|
+
request_id=request_id,
|
|
1043
|
+
)
|
|
1044
|
+
)
|
|
1045
|
+
except Exception as exc:
|
|
1046
|
+
logger.exception("Error in environment setup")
|
|
1047
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1048
|
+
return asdict(
|
|
1049
|
+
error_response(
|
|
1050
|
+
f"Setup failed: {exc}",
|
|
1051
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1052
|
+
error_type=ErrorType.INTERNAL,
|
|
1053
|
+
remediation="Inspect the logged errors and retry",
|
|
1054
|
+
request_id=request_id,
|
|
1055
|
+
)
|
|
1056
|
+
)
|
|
1057
|
+
|
|
1058
|
+
|
|
1059
|
+
def _handle_get_config(
|
|
1060
|
+
*,
|
|
1061
|
+
config: ServerConfig, # noqa: ARG001 - config object available but we read TOML directly
|
|
1062
|
+
sections: Optional[List[str]] = None,
|
|
1063
|
+
key: Optional[str] = None,
|
|
1064
|
+
**_: Any,
|
|
1065
|
+
) -> dict:
|
|
1066
|
+
"""Read configuration sections from foundry-mcp.toml.
|
|
1067
|
+
|
|
1068
|
+
Returns the requested sections from the TOML config file.
|
|
1069
|
+
Supported sections: implement, git.
|
|
1070
|
+
|
|
1071
|
+
Args:
|
|
1072
|
+
sections: List of section names to return (default: all supported sections)
|
|
1073
|
+
key: Specific key within section (only valid when requesting single section)
|
|
1074
|
+
"""
|
|
1075
|
+
import tomllib
|
|
1076
|
+
|
|
1077
|
+
request_id = _request_id()
|
|
1078
|
+
blocked = _feature_flag_blocked(request_id)
|
|
1079
|
+
if blocked:
|
|
1080
|
+
return blocked
|
|
1081
|
+
|
|
1082
|
+
# Validate sections parameter
|
|
1083
|
+
supported_sections = {"implement", "git"}
|
|
1084
|
+
if sections is not None:
|
|
1085
|
+
if not isinstance(sections, list):
|
|
1086
|
+
return _validation_error(
|
|
1087
|
+
action="get-config",
|
|
1088
|
+
field="sections",
|
|
1089
|
+
message="Expected a list of section names",
|
|
1090
|
+
request_id=request_id,
|
|
1091
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1092
|
+
)
|
|
1093
|
+
invalid = set(sections) - supported_sections
|
|
1094
|
+
if invalid:
|
|
1095
|
+
return _validation_error(
|
|
1096
|
+
action="get-config",
|
|
1097
|
+
field="sections",
|
|
1098
|
+
message=f"Unsupported sections: {', '.join(sorted(invalid))}. Supported: {', '.join(sorted(supported_sections))}",
|
|
1099
|
+
request_id=request_id,
|
|
1100
|
+
)
|
|
1101
|
+
|
|
1102
|
+
# Validate key parameter
|
|
1103
|
+
if key is not None:
|
|
1104
|
+
if not isinstance(key, str):
|
|
1105
|
+
return _validation_error(
|
|
1106
|
+
action="get-config",
|
|
1107
|
+
field="key",
|
|
1108
|
+
message="Expected a string",
|
|
1109
|
+
request_id=request_id,
|
|
1110
|
+
code=ErrorCode.INVALID_FORMAT,
|
|
1111
|
+
)
|
|
1112
|
+
if sections is None or len(sections) != 1:
|
|
1113
|
+
return _validation_error(
|
|
1114
|
+
action="get-config",
|
|
1115
|
+
field="key",
|
|
1116
|
+
message="The 'key' parameter is only valid when requesting exactly one section",
|
|
1117
|
+
request_id=request_id,
|
|
1118
|
+
)
|
|
1119
|
+
|
|
1120
|
+
metric_key = _metric_name("get-config")
|
|
1121
|
+
try:
|
|
1122
|
+
# Find the TOML config file
|
|
1123
|
+
toml_path = None
|
|
1124
|
+
for candidate in ["foundry-mcp.toml", ".foundry-mcp.toml"]:
|
|
1125
|
+
if Path(candidate).exists():
|
|
1126
|
+
toml_path = Path(candidate)
|
|
1127
|
+
break
|
|
1128
|
+
|
|
1129
|
+
if not toml_path:
|
|
1130
|
+
_metrics.counter(metric_key, labels={"status": "not_found"})
|
|
1131
|
+
return asdict(
|
|
1132
|
+
error_response(
|
|
1133
|
+
"No foundry-mcp.toml config file found",
|
|
1134
|
+
error_code=ErrorCode.NOT_FOUND,
|
|
1135
|
+
error_type=ErrorType.NOT_FOUND,
|
|
1136
|
+
remediation="Run environment(action=setup) to create the config file",
|
|
1137
|
+
request_id=request_id,
|
|
1138
|
+
)
|
|
1139
|
+
)
|
|
1140
|
+
|
|
1141
|
+
# Read and parse TOML
|
|
1142
|
+
with open(toml_path, "rb") as f:
|
|
1143
|
+
data = tomllib.load(f)
|
|
1144
|
+
|
|
1145
|
+
# Determine which sections to return
|
|
1146
|
+
requested = set(sections) if sections else supported_sections
|
|
1147
|
+
|
|
1148
|
+
# Build result with only supported sections
|
|
1149
|
+
result: Dict[str, Any] = {}
|
|
1150
|
+
|
|
1151
|
+
if "implement" in requested and "implement" in data:
|
|
1152
|
+
impl_data = data["implement"]
|
|
1153
|
+
result["implement"] = {
|
|
1154
|
+
"auto": impl_data.get("auto", False),
|
|
1155
|
+
"delegate": impl_data.get("delegate", False),
|
|
1156
|
+
"parallel": impl_data.get("parallel", False),
|
|
1157
|
+
}
|
|
1158
|
+
|
|
1159
|
+
if "git" in requested and "git" in data:
|
|
1160
|
+
git_data = data["git"]
|
|
1161
|
+
result["git"] = {
|
|
1162
|
+
"enabled": git_data.get("enabled", True),
|
|
1163
|
+
"auto_commit": git_data.get("auto_commit", False),
|
|
1164
|
+
"auto_push": git_data.get("auto_push", False),
|
|
1165
|
+
"auto_pr": git_data.get("auto_pr", False),
|
|
1166
|
+
"commit_cadence": git_data.get("commit_cadence", "task"),
|
|
1167
|
+
}
|
|
1168
|
+
|
|
1169
|
+
# If sections were requested but not found, include them as empty/defaults
|
|
1170
|
+
for section in requested:
|
|
1171
|
+
if section not in result:
|
|
1172
|
+
if section == "implement":
|
|
1173
|
+
result["implement"] = {
|
|
1174
|
+
"auto": False,
|
|
1175
|
+
"delegate": False,
|
|
1176
|
+
"parallel": False,
|
|
1177
|
+
}
|
|
1178
|
+
elif section == "git":
|
|
1179
|
+
result["git"] = {
|
|
1180
|
+
"enabled": True,
|
|
1181
|
+
"auto_commit": False,
|
|
1182
|
+
"auto_push": False,
|
|
1183
|
+
"auto_pr": False,
|
|
1184
|
+
"commit_cadence": "task",
|
|
1185
|
+
}
|
|
1186
|
+
|
|
1187
|
+
# If a specific key was requested, extract just that value
|
|
1188
|
+
if key is not None:
|
|
1189
|
+
section_name = sections[0] # Already validated to be exactly one section
|
|
1190
|
+
section_data = result.get(section_name, {})
|
|
1191
|
+
if key not in section_data:
|
|
1192
|
+
return _validation_error(
|
|
1193
|
+
action="get-config",
|
|
1194
|
+
field="key",
|
|
1195
|
+
message=f"Key '{key}' not found in section '{section_name}'",
|
|
1196
|
+
request_id=request_id,
|
|
1197
|
+
code=ErrorCode.NOT_FOUND,
|
|
1198
|
+
)
|
|
1199
|
+
result = {section_name: {key: section_data[key]}}
|
|
1200
|
+
|
|
1201
|
+
_metrics.counter(metric_key, labels={"status": "success"})
|
|
1202
|
+
return asdict(
|
|
1203
|
+
success_response(
|
|
1204
|
+
data={"sections": result, "config_file": str(toml_path)},
|
|
1205
|
+
request_id=request_id,
|
|
1206
|
+
)
|
|
1207
|
+
)
|
|
1208
|
+
except Exception as exc:
|
|
1209
|
+
logger.exception("Error reading config")
|
|
1210
|
+
_metrics.counter(metric_key, labels={"status": "error"})
|
|
1211
|
+
return asdict(
|
|
1212
|
+
error_response(
|
|
1213
|
+
f"Failed to read config: {exc}",
|
|
1214
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
1215
|
+
error_type=ErrorType.INTERNAL,
|
|
1216
|
+
remediation="Check foundry-mcp.toml syntax and retry",
|
|
1217
|
+
request_id=request_id,
|
|
1218
|
+
)
|
|
1219
|
+
)
|
|
1220
|
+
|
|
1221
|
+
|
|
1222
|
+
_ENVIRONMENT_ROUTER = ActionRouter(
|
|
1223
|
+
tool_name="environment",
|
|
1224
|
+
actions=[
|
|
1225
|
+
ActionDefinition(
|
|
1226
|
+
name="verify-toolchain",
|
|
1227
|
+
handler=_handle_verify_toolchain,
|
|
1228
|
+
summary=_ACTION_SUMMARY["verify-toolchain"],
|
|
1229
|
+
aliases=(
|
|
1230
|
+
"verify_toolchain",
|
|
1231
|
+
"sdd-verify-toolchain",
|
|
1232
|
+
"sdd_verify_toolchain",
|
|
1233
|
+
),
|
|
1234
|
+
),
|
|
1235
|
+
ActionDefinition(
|
|
1236
|
+
name="verify-env",
|
|
1237
|
+
handler=_handle_verify_environment,
|
|
1238
|
+
summary=_ACTION_SUMMARY["verify-env"],
|
|
1239
|
+
aliases=("verify_env", "sdd-verify-environment", "sdd_verify_environment"),
|
|
1240
|
+
),
|
|
1241
|
+
ActionDefinition(
|
|
1242
|
+
name="init",
|
|
1243
|
+
handler=_handle_init_workspace,
|
|
1244
|
+
summary=_ACTION_SUMMARY["init"],
|
|
1245
|
+
aliases=("sdd-init-workspace", "sdd_init_workspace"),
|
|
1246
|
+
),
|
|
1247
|
+
ActionDefinition(
|
|
1248
|
+
name="detect",
|
|
1249
|
+
handler=_handle_detect_topology,
|
|
1250
|
+
summary=_ACTION_SUMMARY["detect"],
|
|
1251
|
+
aliases=("sdd-detect-topology", "sdd_detect_topology"),
|
|
1252
|
+
),
|
|
1253
|
+
ActionDefinition(
|
|
1254
|
+
name="detect-test-runner",
|
|
1255
|
+
handler=_handle_detect_test_runner,
|
|
1256
|
+
summary=_ACTION_SUMMARY["detect-test-runner"],
|
|
1257
|
+
aliases=(
|
|
1258
|
+
"detect_test_runner",
|
|
1259
|
+
"sdd-detect-test-runner",
|
|
1260
|
+
"sdd_detect_test_runner",
|
|
1261
|
+
),
|
|
1262
|
+
),
|
|
1263
|
+
ActionDefinition(
|
|
1264
|
+
name="setup",
|
|
1265
|
+
handler=_handle_setup,
|
|
1266
|
+
summary=_ACTION_SUMMARY["setup"],
|
|
1267
|
+
aliases=("sdd-setup", "sdd_setup"),
|
|
1268
|
+
),
|
|
1269
|
+
ActionDefinition(
|
|
1270
|
+
name="get-config",
|
|
1271
|
+
handler=_handle_get_config,
|
|
1272
|
+
summary=_ACTION_SUMMARY["get-config"],
|
|
1273
|
+
aliases=("config", "read-config", "get_config"),
|
|
1274
|
+
),
|
|
1275
|
+
],
|
|
1276
|
+
)
|
|
1277
|
+
|
|
1278
|
+
|
|
1279
|
+
def _dispatch_environment_action(
|
|
1280
|
+
*, action: str, payload: Dict[str, Any], config: ServerConfig
|
|
1281
|
+
) -> dict:
|
|
1282
|
+
try:
|
|
1283
|
+
return _ENVIRONMENT_ROUTER.dispatch(action=action, config=config, **payload)
|
|
1284
|
+
except ActionRouterError as exc:
|
|
1285
|
+
request_id = _request_id()
|
|
1286
|
+
allowed = ", ".join(exc.allowed_actions)
|
|
1287
|
+
return asdict(
|
|
1288
|
+
error_response(
|
|
1289
|
+
f"Unsupported environment action '{action}'. Allowed actions: {allowed}",
|
|
1290
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
1291
|
+
error_type=ErrorType.VALIDATION,
|
|
1292
|
+
remediation=f"Use one of: {allowed}",
|
|
1293
|
+
request_id=request_id,
|
|
1294
|
+
)
|
|
1295
|
+
)
|
|
1296
|
+
|
|
1297
|
+
|
|
1298
|
+
def register_unified_environment_tool(mcp: FastMCP, config: ServerConfig) -> None:
|
|
1299
|
+
"""Register the consolidated environment tool."""
|
|
1300
|
+
|
|
1301
|
+
@canonical_tool(mcp, canonical_name="environment")
|
|
1302
|
+
@mcp_tool(tool_name="environment", emit_metrics=True, audit=True)
|
|
1303
|
+
def environment( # noqa: PLR0913 - composite signature spanning actions
|
|
1304
|
+
action: str,
|
|
1305
|
+
path: Optional[str] = None,
|
|
1306
|
+
include_optional: Optional[bool] = True,
|
|
1307
|
+
create_subdirs: bool = True,
|
|
1308
|
+
check_python: bool = True,
|
|
1309
|
+
check_git: bool = True,
|
|
1310
|
+
check_node: bool = False,
|
|
1311
|
+
required_packages: Optional[str] = None,
|
|
1312
|
+
permissions_preset: str = "full",
|
|
1313
|
+
create_toml: bool = True,
|
|
1314
|
+
dry_run: bool = False,
|
|
1315
|
+
sections: Optional[List[str]] = None,
|
|
1316
|
+
key: Optional[str] = None,
|
|
1317
|
+
) -> dict:
|
|
1318
|
+
payload = {
|
|
1319
|
+
"path": path,
|
|
1320
|
+
"include_optional": include_optional,
|
|
1321
|
+
"create_subdirs": create_subdirs,
|
|
1322
|
+
"check_python": check_python,
|
|
1323
|
+
"check_git": check_git,
|
|
1324
|
+
"check_node": check_node,
|
|
1325
|
+
"required_packages": required_packages,
|
|
1326
|
+
"permissions_preset": permissions_preset,
|
|
1327
|
+
"create_toml": create_toml,
|
|
1328
|
+
"dry_run": dry_run,
|
|
1329
|
+
"sections": sections,
|
|
1330
|
+
"key": key,
|
|
1331
|
+
}
|
|
1332
|
+
return _dispatch_environment_action(
|
|
1333
|
+
action=action, payload=payload, config=config
|
|
1334
|
+
)
|
|
1335
|
+
|
|
1336
|
+
logger.debug("Registered unified environment tool")
|
|
1337
|
+
|
|
1338
|
+
|
|
1339
|
+
__all__ = [
|
|
1340
|
+
"register_unified_environment_tool",
|
|
1341
|
+
]
|