foundry-mcp 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. foundry_mcp/__init__.py +7 -0
  2. foundry_mcp/cli/__init__.py +80 -0
  3. foundry_mcp/cli/__main__.py +9 -0
  4. foundry_mcp/cli/agent.py +96 -0
  5. foundry_mcp/cli/commands/__init__.py +37 -0
  6. foundry_mcp/cli/commands/cache.py +137 -0
  7. foundry_mcp/cli/commands/dashboard.py +148 -0
  8. foundry_mcp/cli/commands/dev.py +446 -0
  9. foundry_mcp/cli/commands/journal.py +377 -0
  10. foundry_mcp/cli/commands/lifecycle.py +274 -0
  11. foundry_mcp/cli/commands/modify.py +824 -0
  12. foundry_mcp/cli/commands/plan.py +633 -0
  13. foundry_mcp/cli/commands/pr.py +393 -0
  14. foundry_mcp/cli/commands/review.py +652 -0
  15. foundry_mcp/cli/commands/session.py +479 -0
  16. foundry_mcp/cli/commands/specs.py +856 -0
  17. foundry_mcp/cli/commands/tasks.py +807 -0
  18. foundry_mcp/cli/commands/testing.py +676 -0
  19. foundry_mcp/cli/commands/validate.py +982 -0
  20. foundry_mcp/cli/config.py +98 -0
  21. foundry_mcp/cli/context.py +259 -0
  22. foundry_mcp/cli/flags.py +266 -0
  23. foundry_mcp/cli/logging.py +212 -0
  24. foundry_mcp/cli/main.py +44 -0
  25. foundry_mcp/cli/output.py +122 -0
  26. foundry_mcp/cli/registry.py +110 -0
  27. foundry_mcp/cli/resilience.py +178 -0
  28. foundry_mcp/cli/transcript.py +217 -0
  29. foundry_mcp/config.py +850 -0
  30. foundry_mcp/core/__init__.py +144 -0
  31. foundry_mcp/core/ai_consultation.py +1636 -0
  32. foundry_mcp/core/cache.py +195 -0
  33. foundry_mcp/core/capabilities.py +446 -0
  34. foundry_mcp/core/concurrency.py +898 -0
  35. foundry_mcp/core/context.py +540 -0
  36. foundry_mcp/core/discovery.py +1603 -0
  37. foundry_mcp/core/error_collection.py +728 -0
  38. foundry_mcp/core/error_store.py +592 -0
  39. foundry_mcp/core/feature_flags.py +592 -0
  40. foundry_mcp/core/health.py +749 -0
  41. foundry_mcp/core/journal.py +694 -0
  42. foundry_mcp/core/lifecycle.py +412 -0
  43. foundry_mcp/core/llm_config.py +1350 -0
  44. foundry_mcp/core/llm_patterns.py +510 -0
  45. foundry_mcp/core/llm_provider.py +1569 -0
  46. foundry_mcp/core/logging_config.py +374 -0
  47. foundry_mcp/core/metrics_persistence.py +584 -0
  48. foundry_mcp/core/metrics_registry.py +327 -0
  49. foundry_mcp/core/metrics_store.py +641 -0
  50. foundry_mcp/core/modifications.py +224 -0
  51. foundry_mcp/core/naming.py +123 -0
  52. foundry_mcp/core/observability.py +1216 -0
  53. foundry_mcp/core/otel.py +452 -0
  54. foundry_mcp/core/otel_stubs.py +264 -0
  55. foundry_mcp/core/pagination.py +255 -0
  56. foundry_mcp/core/progress.py +317 -0
  57. foundry_mcp/core/prometheus.py +577 -0
  58. foundry_mcp/core/prompts/__init__.py +464 -0
  59. foundry_mcp/core/prompts/fidelity_review.py +546 -0
  60. foundry_mcp/core/prompts/markdown_plan_review.py +511 -0
  61. foundry_mcp/core/prompts/plan_review.py +623 -0
  62. foundry_mcp/core/providers/__init__.py +225 -0
  63. foundry_mcp/core/providers/base.py +476 -0
  64. foundry_mcp/core/providers/claude.py +460 -0
  65. foundry_mcp/core/providers/codex.py +619 -0
  66. foundry_mcp/core/providers/cursor_agent.py +642 -0
  67. foundry_mcp/core/providers/detectors.py +488 -0
  68. foundry_mcp/core/providers/gemini.py +405 -0
  69. foundry_mcp/core/providers/opencode.py +616 -0
  70. foundry_mcp/core/providers/opencode_wrapper.js +302 -0
  71. foundry_mcp/core/providers/package-lock.json +24 -0
  72. foundry_mcp/core/providers/package.json +25 -0
  73. foundry_mcp/core/providers/registry.py +607 -0
  74. foundry_mcp/core/providers/test_provider.py +171 -0
  75. foundry_mcp/core/providers/validation.py +729 -0
  76. foundry_mcp/core/rate_limit.py +427 -0
  77. foundry_mcp/core/resilience.py +600 -0
  78. foundry_mcp/core/responses.py +934 -0
  79. foundry_mcp/core/review.py +366 -0
  80. foundry_mcp/core/security.py +438 -0
  81. foundry_mcp/core/spec.py +1650 -0
  82. foundry_mcp/core/task.py +1289 -0
  83. foundry_mcp/core/testing.py +450 -0
  84. foundry_mcp/core/validation.py +2081 -0
  85. foundry_mcp/dashboard/__init__.py +32 -0
  86. foundry_mcp/dashboard/app.py +119 -0
  87. foundry_mcp/dashboard/components/__init__.py +17 -0
  88. foundry_mcp/dashboard/components/cards.py +88 -0
  89. foundry_mcp/dashboard/components/charts.py +234 -0
  90. foundry_mcp/dashboard/components/filters.py +136 -0
  91. foundry_mcp/dashboard/components/tables.py +195 -0
  92. foundry_mcp/dashboard/data/__init__.py +11 -0
  93. foundry_mcp/dashboard/data/stores.py +433 -0
  94. foundry_mcp/dashboard/launcher.py +289 -0
  95. foundry_mcp/dashboard/views/__init__.py +12 -0
  96. foundry_mcp/dashboard/views/errors.py +217 -0
  97. foundry_mcp/dashboard/views/metrics.py +174 -0
  98. foundry_mcp/dashboard/views/overview.py +160 -0
  99. foundry_mcp/dashboard/views/providers.py +83 -0
  100. foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
  101. foundry_mcp/dashboard/views/tool_usage.py +139 -0
  102. foundry_mcp/prompts/__init__.py +9 -0
  103. foundry_mcp/prompts/workflows.py +525 -0
  104. foundry_mcp/resources/__init__.py +9 -0
  105. foundry_mcp/resources/specs.py +591 -0
  106. foundry_mcp/schemas/__init__.py +38 -0
  107. foundry_mcp/schemas/sdd-spec-schema.json +386 -0
  108. foundry_mcp/server.py +164 -0
  109. foundry_mcp/tools/__init__.py +10 -0
  110. foundry_mcp/tools/unified/__init__.py +71 -0
  111. foundry_mcp/tools/unified/authoring.py +1487 -0
  112. foundry_mcp/tools/unified/context_helpers.py +98 -0
  113. foundry_mcp/tools/unified/documentation_helpers.py +198 -0
  114. foundry_mcp/tools/unified/environment.py +939 -0
  115. foundry_mcp/tools/unified/error.py +462 -0
  116. foundry_mcp/tools/unified/health.py +225 -0
  117. foundry_mcp/tools/unified/journal.py +841 -0
  118. foundry_mcp/tools/unified/lifecycle.py +632 -0
  119. foundry_mcp/tools/unified/metrics.py +777 -0
  120. foundry_mcp/tools/unified/plan.py +745 -0
  121. foundry_mcp/tools/unified/pr.py +294 -0
  122. foundry_mcp/tools/unified/provider.py +629 -0
  123. foundry_mcp/tools/unified/review.py +685 -0
  124. foundry_mcp/tools/unified/review_helpers.py +299 -0
  125. foundry_mcp/tools/unified/router.py +102 -0
  126. foundry_mcp/tools/unified/server.py +580 -0
  127. foundry_mcp/tools/unified/spec.py +808 -0
  128. foundry_mcp/tools/unified/task.py +2202 -0
  129. foundry_mcp/tools/unified/test.py +370 -0
  130. foundry_mcp/tools/unified/verification.py +520 -0
  131. foundry_mcp-0.3.3.dist-info/METADATA +337 -0
  132. foundry_mcp-0.3.3.dist-info/RECORD +135 -0
  133. foundry_mcp-0.3.3.dist-info/WHEEL +4 -0
  134. foundry_mcp-0.3.3.dist-info/entry_points.txt +3 -0
  135. foundry_mcp-0.3.3.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,939 @@
1
+ """Unified environment tool with action routing and feature-flag enforcement."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ import logging
7
+ import shutil
8
+ import subprocess
9
+ import sys
10
+ from dataclasses import asdict
11
+ from pathlib import Path
12
+ from typing import Any, Dict, List, Optional, cast
13
+
14
+ from mcp.server.fastmcp import FastMCP
15
+
16
+ from foundry_mcp.config import ServerConfig
17
+ from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
18
+ from foundry_mcp.core.feature_flags import FeatureFlag, FlagState, get_flag_service
19
+ from foundry_mcp.core.naming import canonical_tool
20
+ from foundry_mcp.core.observability import audit_log, get_metrics, mcp_tool
21
+ from foundry_mcp.core.responses import (
22
+ ErrorCode,
23
+ ErrorType,
24
+ error_response,
25
+ success_response,
26
+ )
27
+ from foundry_mcp.tools.unified.router import (
28
+ ActionDefinition,
29
+ ActionRouter,
30
+ ActionRouterError,
31
+ )
32
+
33
+ logger = logging.getLogger(__name__)
34
+ _metrics = get_metrics()
35
+ _flag_service = get_flag_service()
36
+ try:
37
+ _flag_service.register(
38
+ FeatureFlag(
39
+ name="environment_tools",
40
+ description="Environment readiness and workspace tooling",
41
+ state=FlagState.BETA,
42
+ default_enabled=True,
43
+ )
44
+ )
45
+ except ValueError:
46
+ pass
47
+
48
+ _DEFAULT_TOML_CONTENT = """[workspace]
49
+ specs_dir = "./specs"
50
+
51
+ [workflow]
52
+ mode = "single"
53
+ auto_validate = true
54
+
55
+ [logging]
56
+ level = "INFO"
57
+ """
58
+
59
+
60
+ # ---------------------------------------------------------------------------
61
+ # Helper functions used by unified surface
62
+ # ---------------------------------------------------------------------------
63
+
64
+
65
+ def _update_permissions(
66
+ settings_file: Path, preset: str, dry_run: bool
67
+ ) -> Dict[str, Any]:
68
+ """Update .claude/settings.local.json with additive permission merge."""
69
+
70
+ changes: List[str] = []
71
+ preset_perms = {
72
+ "minimal": [
73
+ "mcp__foundry-mcp__server",
74
+ "mcp__foundry-mcp__spec",
75
+ "mcp__foundry-mcp__task",
76
+ ],
77
+ "standard": [
78
+ "mcp__foundry-mcp__authoring",
79
+ "mcp__foundry-mcp__environment",
80
+ "mcp__foundry-mcp__journal",
81
+ "mcp__foundry-mcp__lifecycle",
82
+ "mcp__foundry-mcp__review",
83
+ "mcp__foundry-mcp__server",
84
+ "mcp__foundry-mcp__spec",
85
+ "mcp__foundry-mcp__task",
86
+ "mcp__foundry-mcp__test",
87
+ "Read(//**/specs/**)",
88
+ "Write(//**/specs/active/**)",
89
+ "Write(//**/specs/pending/**)",
90
+ "Edit(//**/specs/active/**)",
91
+ "Edit(//**/specs/pending/**)",
92
+ ],
93
+ "full": [
94
+ "mcp__foundry-mcp__*",
95
+ "Read(//**/specs/**)",
96
+ "Write(//**/specs/**)",
97
+ "Edit(//**/specs/**)",
98
+ ],
99
+ }[preset]
100
+
101
+ if settings_file.exists():
102
+ with open(settings_file, "r") as handle:
103
+ settings = cast(Dict[str, Any], json.load(handle))
104
+ else:
105
+ settings = cast(
106
+ Dict[str, Any], {"permissions": {"allow": [], "deny": [], "ask": []}}
107
+ )
108
+ changes.append(f"Created {settings_file}")
109
+
110
+ permissions_cfg = settings.get("permissions")
111
+ if not isinstance(permissions_cfg, dict):
112
+ permissions_cfg = {"allow": [], "deny": [], "ask": []}
113
+ settings["permissions"] = permissions_cfg
114
+
115
+ allow_list = permissions_cfg.get("allow")
116
+ if not isinstance(allow_list, list):
117
+ allow_list = []
118
+ permissions_cfg["allow"] = allow_list
119
+
120
+ existing = set(allow_list)
121
+ new_perms = set(preset_perms) - existing
122
+
123
+ if new_perms:
124
+ allow_list.extend(sorted(new_perms))
125
+ changes.append(f"Added {len(new_perms)} permissions to allow list")
126
+
127
+ settings["enableAllProjectMcpServers"] = True
128
+ enabled_servers = settings.get("enabledMcpjsonServers")
129
+ if not isinstance(enabled_servers, list):
130
+ enabled_servers = []
131
+ settings["enabledMcpjsonServers"] = enabled_servers
132
+ if "foundry-mcp" not in enabled_servers:
133
+ enabled_servers.append("foundry-mcp")
134
+ changes.append("Enabled foundry-mcp server")
135
+
136
+ if not dry_run and changes:
137
+ settings_file.parent.mkdir(parents=True, exist_ok=True)
138
+ with open(settings_file, "w") as handle:
139
+ json.dump(settings, handle, indent=2)
140
+
141
+ return {"changes": changes}
142
+
143
+
144
+ def _write_default_toml(toml_path: Path) -> None:
145
+ """Write default foundry-mcp.toml configuration file."""
146
+
147
+ with open(toml_path, "w") as handle:
148
+ handle.write(_DEFAULT_TOML_CONTENT)
149
+
150
+
151
+ def _init_specs_directory(base_path: Path, dry_run: bool) -> Dict[str, Any]:
152
+ """Initialize specs directory structure."""
153
+
154
+ specs_dir = base_path / "specs"
155
+ subdirs = ["active", "pending", "completed", "archived"]
156
+ changes: List[str] = []
157
+
158
+ if not dry_run:
159
+ if not specs_dir.exists():
160
+ specs_dir.mkdir(parents=True)
161
+ changes.append(f"Created {specs_dir}")
162
+ for subdir in subdirs:
163
+ subdir_path = specs_dir / subdir
164
+ if not subdir_path.exists():
165
+ subdir_path.mkdir(parents=True)
166
+ changes.append(f"Created {subdir_path}")
167
+ else:
168
+ if not specs_dir.exists():
169
+ changes.append(f"Would create {specs_dir}")
170
+ for subdir in subdirs:
171
+ subdir_path = specs_dir / subdir
172
+ if not subdir_path.exists():
173
+ changes.append(f"Would create {subdir_path}")
174
+
175
+ return {"changes": changes}
176
+
177
+
178
+ # ---------------------------------------------------------------------------
179
+ # Unified action helpers
180
+ # ---------------------------------------------------------------------------
181
+
182
+ _ACTION_SUMMARY = {
183
+ "verify-toolchain": "Validate CLI/toolchain availability",
184
+ "verify-env": "Validate runtimes, packages, and workspace environment",
185
+ "init": "Initialize the standard specs/ workspace structure",
186
+ "detect": "Detect repository topology (project type, specs/docs)",
187
+ "setup": "Complete SDD setup with permissions + config",
188
+ }
189
+
190
+
191
+ def _metric_name(action: str) -> str:
192
+ return f"environment.{action.replace('-', '_')}"
193
+
194
+
195
+ def _request_id() -> str:
196
+ return get_correlation_id() or generate_correlation_id(prefix="environment")
197
+
198
+
199
+ def _feature_flag_blocked(request_id: str) -> Optional[dict]:
200
+ if _flag_service.is_enabled("environment_tools"):
201
+ return None
202
+
203
+ return asdict(
204
+ error_response(
205
+ "Environment tools are disabled by feature flag",
206
+ error_code=ErrorCode.FEATURE_DISABLED,
207
+ error_type=ErrorType.FEATURE_FLAG,
208
+ data={"feature": "environment_tools"},
209
+ remediation="Enable the 'environment_tools' feature flag to call environment actions.",
210
+ request_id=request_id,
211
+ )
212
+ )
213
+
214
+
215
+ def _validation_error(
216
+ *,
217
+ action: str,
218
+ field: str,
219
+ message: str,
220
+ request_id: str,
221
+ remediation: Optional[str] = None,
222
+ code: ErrorCode = ErrorCode.VALIDATION_ERROR,
223
+ ) -> dict:
224
+ return asdict(
225
+ error_response(
226
+ f"Invalid field '{field}' for environment.{action}: {message}",
227
+ error_code=code,
228
+ error_type=ErrorType.VALIDATION,
229
+ remediation=remediation,
230
+ details={"field": field, "action": f"environment.{action}"},
231
+ request_id=request_id,
232
+ )
233
+ )
234
+
235
+
236
+ # ---------------------------------------------------------------------------
237
+ # Action handlers
238
+ # ---------------------------------------------------------------------------
239
+
240
+
241
+ def _handle_verify_toolchain(
242
+ *,
243
+ config: ServerConfig, # noqa: ARG001 - reserved for future hooks
244
+ include_optional: Optional[bool] = True,
245
+ **_: Any,
246
+ ) -> dict:
247
+ request_id = _request_id()
248
+ blocked = _feature_flag_blocked(request_id)
249
+ if blocked:
250
+ return blocked
251
+
252
+ if include_optional is not None and not isinstance(include_optional, bool):
253
+ return _validation_error(
254
+ action="verify-toolchain",
255
+ field="include_optional",
256
+ message="Expected a boolean value",
257
+ request_id=request_id,
258
+ code=ErrorCode.INVALID_FORMAT,
259
+ )
260
+
261
+ include = True if include_optional is None else include_optional
262
+ metric_key = _metric_name("verify-toolchain")
263
+
264
+ try:
265
+ required_tools = ["python", "git"]
266
+ optional_tools = ["grep", "cat", "find", "node", "npm"]
267
+
268
+ def check_tool(tool_name: str) -> bool:
269
+ return shutil.which(tool_name) is not None
270
+
271
+ required_status: Dict[str, bool] = {}
272
+ missing_required: List[str] = []
273
+ for tool in required_tools:
274
+ available = check_tool(tool)
275
+ required_status[tool] = available
276
+ if not available:
277
+ missing_required.append(tool)
278
+
279
+ optional_status: Dict[str, bool] = {}
280
+ if include:
281
+ for tool in optional_tools:
282
+ optional_status[tool] = check_tool(tool)
283
+
284
+ data: Dict[str, Any] = {
285
+ "required": required_status,
286
+ "all_available": not missing_required,
287
+ }
288
+ if include:
289
+ data["optional"] = optional_status
290
+ if missing_required:
291
+ data["missing"] = missing_required
292
+
293
+ warnings: List[str] = []
294
+ if include:
295
+ missing_optional = [
296
+ tool for tool, available in optional_status.items() if not available
297
+ ]
298
+ if missing_optional:
299
+ warnings.append(
300
+ f"Optional tools not found: {', '.join(sorted(missing_optional))}"
301
+ )
302
+
303
+ if missing_required:
304
+ _metrics.counter(metric_key, labels={"status": "missing_required"})
305
+ return asdict(
306
+ error_response(
307
+ f"Required tools missing: {', '.join(missing_required)}",
308
+ error_code=ErrorCode.MISSING_REQUIRED,
309
+ error_type=ErrorType.VALIDATION,
310
+ data=data,
311
+ remediation="Install missing tools before continuing with SDD workflows.",
312
+ request_id=request_id,
313
+ )
314
+ )
315
+
316
+ _metrics.counter(metric_key, labels={"status": "success"})
317
+ return asdict(
318
+ success_response(
319
+ data=data,
320
+ warnings=warnings or None,
321
+ request_id=request_id,
322
+ )
323
+ )
324
+ except Exception:
325
+ logger.exception("Error verifying toolchain")
326
+ _metrics.counter(metric_key, labels={"status": "error"})
327
+ return asdict(
328
+ error_response(
329
+ "Failed to verify toolchain",
330
+ error_code=ErrorCode.INTERNAL_ERROR,
331
+ error_type=ErrorType.INTERNAL,
332
+ remediation="Check PATH configuration and retry",
333
+ request_id=request_id,
334
+ )
335
+ )
336
+
337
+
338
+ def _handle_init_workspace(
339
+ *,
340
+ config: ServerConfig, # noqa: ARG001 - reserved for future hooks
341
+ path: Optional[str] = None,
342
+ create_subdirs: bool = True,
343
+ **_: Any,
344
+ ) -> dict:
345
+ request_id = _request_id()
346
+ blocked = _feature_flag_blocked(request_id)
347
+ if blocked:
348
+ return blocked
349
+
350
+ if path is not None and not isinstance(path, str):
351
+ return _validation_error(
352
+ action="init",
353
+ field="path",
354
+ message="Workspace path must be a string",
355
+ request_id=request_id,
356
+ )
357
+ if not isinstance(create_subdirs, bool):
358
+ return _validation_error(
359
+ action="init",
360
+ field="create_subdirs",
361
+ message="Expected a boolean value",
362
+ request_id=request_id,
363
+ code=ErrorCode.INVALID_FORMAT,
364
+ )
365
+
366
+ metric_key = _metric_name("init")
367
+ try:
368
+ base_path = Path(path) if path else Path.cwd()
369
+ specs_dir = base_path / "specs"
370
+ subdirs = ["active", "pending", "completed", "archived"]
371
+
372
+ created_dirs: List[str] = []
373
+ existing_dirs: List[str] = []
374
+
375
+ if not specs_dir.exists():
376
+ specs_dir.mkdir(parents=True)
377
+ created_dirs.append(str(specs_dir))
378
+ else:
379
+ existing_dirs.append(str(specs_dir))
380
+
381
+ if create_subdirs:
382
+ for subdir in subdirs:
383
+ subdir_path = specs_dir / subdir
384
+ if not subdir_path.exists():
385
+ subdir_path.mkdir(parents=True)
386
+ created_dirs.append(str(subdir_path))
387
+ else:
388
+ existing_dirs.append(str(subdir_path))
389
+
390
+ warnings: List[str] = []
391
+ if not created_dirs:
392
+ warnings.append("All directories already existed, no changes made")
393
+
394
+ audit_log(
395
+ "workspace_init",
396
+ tool="environment.init",
397
+ path=str(base_path),
398
+ created_count=len(created_dirs),
399
+ success=True,
400
+ )
401
+ _metrics.counter(metric_key, labels={"status": "success"})
402
+
403
+ data: Dict[str, Any] = {
404
+ "specs_dir": str(specs_dir),
405
+ "active_dir": str(specs_dir / "active"),
406
+ "created_dirs": created_dirs,
407
+ "existing_dirs": existing_dirs,
408
+ }
409
+ return asdict(
410
+ success_response(
411
+ data=data,
412
+ warnings=warnings or None,
413
+ request_id=request_id,
414
+ )
415
+ )
416
+ except PermissionError as exc:
417
+ logger.exception("Permission denied during workspace initialization")
418
+ _metrics.counter(metric_key, labels={"status": "forbidden"})
419
+ return asdict(
420
+ error_response(
421
+ f"Permission denied: {exc}",
422
+ error_code=ErrorCode.FORBIDDEN,
423
+ error_type=ErrorType.AUTHORIZATION,
424
+ remediation="Check write permissions for the target directory.",
425
+ request_id=request_id,
426
+ )
427
+ )
428
+ except Exception as exc:
429
+ logger.exception("Error initializing workspace")
430
+ _metrics.counter(metric_key, labels={"status": "error"})
431
+ return asdict(
432
+ error_response(
433
+ f"Failed to initialize workspace: {exc}",
434
+ error_code=ErrorCode.INTERNAL_ERROR,
435
+ error_type=ErrorType.INTERNAL,
436
+ remediation="Verify the path exists and retry",
437
+ request_id=request_id,
438
+ )
439
+ )
440
+
441
+
442
+ def _handle_detect_topology(
443
+ *,
444
+ config: ServerConfig, # noqa: ARG001 - reserved for future hooks
445
+ path: Optional[str] = None,
446
+ **_: Any,
447
+ ) -> dict:
448
+ request_id = _request_id()
449
+ blocked = _feature_flag_blocked(request_id)
450
+ if blocked:
451
+ return blocked
452
+
453
+ if path is not None and not isinstance(path, str):
454
+ return _validation_error(
455
+ action="detect",
456
+ field="path",
457
+ message="Directory path must be a string",
458
+ request_id=request_id,
459
+ )
460
+
461
+ metric_key = _metric_name("detect")
462
+ try:
463
+ base_path = Path(path) if path else Path.cwd()
464
+
465
+ project_type = "unknown"
466
+ detected_files: List[str] = []
467
+
468
+ python_markers = ["pyproject.toml", "setup.py", "requirements.txt", "Pipfile"]
469
+ for marker in python_markers:
470
+ if (base_path / marker).exists():
471
+ project_type = "python"
472
+ detected_files.append(marker)
473
+ break
474
+
475
+ if project_type == "unknown":
476
+ node_markers = ["package.json", "yarn.lock", "pnpm-lock.yaml"]
477
+ for marker in node_markers:
478
+ if (base_path / marker).exists():
479
+ project_type = "node"
480
+ detected_files.append(marker)
481
+ break
482
+
483
+ if project_type == "unknown" and (base_path / "Cargo.toml").exists():
484
+ project_type = "rust"
485
+ detected_files.append("Cargo.toml")
486
+
487
+ if project_type == "unknown" and (base_path / "go.mod").exists():
488
+ project_type = "go"
489
+ detected_files.append("go.mod")
490
+
491
+ specs_dir = None
492
+ for candidate in ["specs", ".specs", "specifications"]:
493
+ candidate_path = base_path / candidate
494
+ if candidate_path.is_dir():
495
+ specs_dir = str(candidate_path)
496
+ break
497
+
498
+ docs_dir = None
499
+ for candidate in ["docs", "documentation", "doc"]:
500
+ candidate_path = base_path / candidate
501
+ if candidate_path.is_dir():
502
+ docs_dir = str(candidate_path)
503
+ break
504
+
505
+ has_git = (base_path / ".git").is_dir()
506
+
507
+ data: Dict[str, Any] = {
508
+ "project_type": project_type,
509
+ "has_git": has_git,
510
+ }
511
+ if specs_dir:
512
+ data["specs_dir"] = specs_dir
513
+ if docs_dir:
514
+ data["docs_dir"] = docs_dir
515
+ if detected_files:
516
+ data["detected_files"] = detected_files
517
+
518
+ warnings: List[str] = []
519
+ if project_type == "unknown":
520
+ warnings.append("Could not detect project type from standard marker files")
521
+ if not specs_dir:
522
+ warnings.append(
523
+ "No specs directory found - run environment(action=init) to create one"
524
+ )
525
+
526
+ _metrics.counter(metric_key, labels={"status": "success"})
527
+ return asdict(
528
+ success_response(
529
+ data=data,
530
+ warnings=warnings or None,
531
+ request_id=request_id,
532
+ )
533
+ )
534
+ except Exception as exc:
535
+ logger.exception("Error detecting topology")
536
+ _metrics.counter(metric_key, labels={"status": "error"})
537
+ return asdict(
538
+ error_response(
539
+ f"Failed to detect topology: {exc}",
540
+ error_code=ErrorCode.INTERNAL_ERROR,
541
+ error_type=ErrorType.INTERNAL,
542
+ remediation="Verify the directory exists and retry",
543
+ request_id=request_id,
544
+ )
545
+ )
546
+
547
+
548
+ def _handle_verify_environment(
549
+ *,
550
+ config: ServerConfig, # noqa: ARG001 - reserved for future hooks
551
+ path: Optional[str] = None,
552
+ check_python: bool = True,
553
+ check_git: bool = True,
554
+ check_node: bool = False,
555
+ required_packages: Optional[str] = None,
556
+ **_: Any,
557
+ ) -> dict:
558
+ request_id = _request_id()
559
+ blocked = _feature_flag_blocked(request_id)
560
+ if blocked:
561
+ return blocked
562
+
563
+ if path is not None and not isinstance(path, str):
564
+ return _validation_error(
565
+ action="verify-env",
566
+ field="path",
567
+ message="Directory path must be a string",
568
+ request_id=request_id,
569
+ )
570
+ for field_name, value in (
571
+ ("check_python", check_python),
572
+ ("check_git", check_git),
573
+ ("check_node", check_node),
574
+ ):
575
+ if not isinstance(value, bool):
576
+ return _validation_error(
577
+ action="verify-env",
578
+ field=field_name,
579
+ message="Expected a boolean value",
580
+ request_id=request_id,
581
+ code=ErrorCode.INVALID_FORMAT,
582
+ )
583
+
584
+ if required_packages is not None and not isinstance(required_packages, str):
585
+ return _validation_error(
586
+ action="verify-env",
587
+ field="required_packages",
588
+ message="Provide a comma-separated string",
589
+ request_id=request_id,
590
+ code=ErrorCode.INVALID_FORMAT,
591
+ )
592
+
593
+ metric_key = _metric_name("verify-env")
594
+ try:
595
+ Path(path) if path else Path.cwd()
596
+
597
+ runtimes: Dict[str, Any] = {}
598
+ issues: List[str] = []
599
+ packages: Dict[str, bool] = {}
600
+
601
+ if check_python:
602
+ python_version = f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
603
+ runtimes["python"] = {
604
+ "available": True,
605
+ "version": python_version,
606
+ "executable": sys.executable,
607
+ }
608
+ if sys.version_info < (3, 9):
609
+ issues.append(f"Python 3.9+ required, found {python_version}")
610
+
611
+ if check_git:
612
+ git_path = shutil.which("git")
613
+ if git_path:
614
+ try:
615
+ result = subprocess.run(
616
+ ["git", "--version"],
617
+ capture_output=True,
618
+ text=True,
619
+ timeout=5,
620
+ )
621
+ version_str = result.stdout.strip().replace("git version ", "")
622
+ except Exception:
623
+ version_str = "unknown"
624
+ runtimes["git"] = {
625
+ "available": True,
626
+ "version": version_str,
627
+ "executable": git_path,
628
+ }
629
+ else:
630
+ runtimes["git"] = {"available": False}
631
+ issues.append("Git not found in PATH")
632
+
633
+ if check_node:
634
+ node_path = shutil.which("node")
635
+ if node_path:
636
+ try:
637
+ result = subprocess.run(
638
+ ["node", "--version"],
639
+ capture_output=True,
640
+ text=True,
641
+ timeout=5,
642
+ )
643
+ node_version = result.stdout.strip()
644
+ except Exception:
645
+ node_version = "unknown"
646
+ runtimes["node"] = {
647
+ "available": True,
648
+ "version": node_version,
649
+ "executable": node_path,
650
+ }
651
+ else:
652
+ runtimes["node"] = {"available": False}
653
+ issues.append("Node.js not found in PATH")
654
+
655
+ if required_packages:
656
+ pkg_list = [
657
+ pkg.strip() for pkg in required_packages.split(",") if pkg.strip()
658
+ ]
659
+ for pkg in pkg_list:
660
+ try:
661
+ __import__(pkg.replace("-", "_"))
662
+ packages[pkg] = True
663
+ except ImportError:
664
+ packages[pkg] = False
665
+ issues.append(f"Required package not found: {pkg}")
666
+
667
+ all_valid = not issues
668
+ data: Dict[str, Any] = {"runtimes": runtimes, "all_valid": all_valid}
669
+ if packages:
670
+ data["packages"] = packages
671
+ if issues:
672
+ data["issues"] = issues
673
+
674
+ if not all_valid:
675
+ _metrics.counter(metric_key, labels={"status": "invalid"})
676
+ return asdict(
677
+ error_response(
678
+ f"Environment validation failed: {len(issues)} issue(s) found",
679
+ error_code=ErrorCode.VALIDATION_ERROR,
680
+ error_type=ErrorType.VALIDATION,
681
+ data=data,
682
+ remediation="Resolve the listed issues and retry the validation.",
683
+ request_id=request_id,
684
+ )
685
+ )
686
+
687
+ _metrics.counter(metric_key, labels={"status": "success"})
688
+ return asdict(
689
+ success_response(
690
+ data=data,
691
+ request_id=request_id,
692
+ )
693
+ )
694
+ except Exception as exc:
695
+ logger.exception("Error verifying environment", extra={"path": path})
696
+ _metrics.counter(metric_key, labels={"status": "error"})
697
+ return asdict(
698
+ error_response(
699
+ f"Failed to verify environment: {exc}",
700
+ error_code=ErrorCode.INTERNAL_ERROR,
701
+ error_type=ErrorType.INTERNAL,
702
+ remediation="Check system configuration and retry",
703
+ request_id=request_id,
704
+ )
705
+ )
706
+
707
+
708
+ def _handle_setup(
709
+ *,
710
+ config: ServerConfig, # noqa: ARG001 - reserved for future hooks
711
+ path: Optional[str] = None,
712
+ permissions_preset: str = "full",
713
+ create_toml: bool = True,
714
+ dry_run: bool = False,
715
+ **_: Any,
716
+ ) -> dict:
717
+ request_id = _request_id()
718
+ blocked = _feature_flag_blocked(request_id)
719
+ if blocked:
720
+ return blocked
721
+
722
+ if path is not None and not isinstance(path, str):
723
+ return _validation_error(
724
+ action="setup",
725
+ field="path",
726
+ message="Project path must be a string",
727
+ request_id=request_id,
728
+ )
729
+ if permissions_preset not in {"minimal", "standard", "full"}:
730
+ return _validation_error(
731
+ action="setup",
732
+ field="permissions_preset",
733
+ message="Invalid preset. Use 'minimal', 'standard', or 'full'",
734
+ request_id=request_id,
735
+ code=ErrorCode.INVALID_FORMAT,
736
+ )
737
+ for field_name, value in (("create_toml", create_toml), ("dry_run", dry_run)):
738
+ if not isinstance(value, bool):
739
+ return _validation_error(
740
+ action="setup",
741
+ field=field_name,
742
+ message="Expected a boolean value",
743
+ request_id=request_id,
744
+ code=ErrorCode.INVALID_FORMAT,
745
+ )
746
+
747
+ metric_key = _metric_name("setup")
748
+ try:
749
+ base_path = Path(path) if path else Path.cwd()
750
+ if not base_path.exists():
751
+ return asdict(
752
+ error_response(
753
+ f"Path does not exist: {base_path}",
754
+ error_code=ErrorCode.NOT_FOUND,
755
+ error_type=ErrorType.NOT_FOUND,
756
+ remediation="Provide a valid project directory path",
757
+ request_id=request_id,
758
+ )
759
+ )
760
+
761
+ changes: List[str] = []
762
+ warnings: List[str] = []
763
+
764
+ specs_result = _init_specs_directory(base_path, dry_run)
765
+ changes.extend(specs_result["changes"])
766
+
767
+ claude_dir = base_path / ".claude"
768
+ settings_file = claude_dir / "settings.local.json"
769
+ settings_result = _update_permissions(
770
+ settings_file, permissions_preset, dry_run
771
+ )
772
+ changes.extend(settings_result["changes"])
773
+
774
+ config_file = None
775
+ if create_toml:
776
+ toml_path = base_path / "foundry-mcp.toml"
777
+ if not toml_path.exists():
778
+ config_file = str(toml_path)
779
+ if not dry_run:
780
+ _write_default_toml(toml_path)
781
+ changes.append(f"Created {toml_path}")
782
+ else:
783
+ warnings.append("foundry-mcp.toml already exists, skipping")
784
+
785
+ audit_log(
786
+ "sdd_setup",
787
+ tool="environment.setup",
788
+ path=str(base_path),
789
+ preset=permissions_preset,
790
+ dry_run=dry_run,
791
+ )
792
+ _metrics.counter(
793
+ metric_key,
794
+ labels={
795
+ "status": "success",
796
+ "preset": permissions_preset,
797
+ "dry_run": str(dry_run),
798
+ },
799
+ )
800
+
801
+ return asdict(
802
+ success_response(
803
+ data={
804
+ "specs_dir": str(base_path / "specs"),
805
+ "permissions_file": str(settings_file),
806
+ "config_file": config_file,
807
+ "changes": changes,
808
+ "dry_run": dry_run,
809
+ },
810
+ warnings=warnings or None,
811
+ request_id=request_id,
812
+ )
813
+ )
814
+ except PermissionError as exc:
815
+ logger.exception("Permission denied during environment setup")
816
+ _metrics.counter(metric_key, labels={"status": "forbidden"})
817
+ return asdict(
818
+ error_response(
819
+ f"Permission denied: {exc}",
820
+ error_code=ErrorCode.FORBIDDEN,
821
+ error_type=ErrorType.AUTHORIZATION,
822
+ remediation="Check write permissions for the target directory.",
823
+ request_id=request_id,
824
+ )
825
+ )
826
+ except Exception as exc:
827
+ logger.exception("Error in environment setup")
828
+ _metrics.counter(metric_key, labels={"status": "error"})
829
+ return asdict(
830
+ error_response(
831
+ f"Setup failed: {exc}",
832
+ error_code=ErrorCode.INTERNAL_ERROR,
833
+ error_type=ErrorType.INTERNAL,
834
+ remediation="Inspect the logged errors and retry",
835
+ request_id=request_id,
836
+ )
837
+ )
838
+
839
+
840
+ _ENVIRONMENT_ROUTER = ActionRouter(
841
+ tool_name="environment",
842
+ actions=[
843
+ ActionDefinition(
844
+ name="verify-toolchain",
845
+ handler=_handle_verify_toolchain,
846
+ summary=_ACTION_SUMMARY["verify-toolchain"],
847
+ aliases=(
848
+ "verify_toolchain",
849
+ "sdd-verify-toolchain",
850
+ "sdd_verify_toolchain",
851
+ ),
852
+ ),
853
+ ActionDefinition(
854
+ name="verify-env",
855
+ handler=_handle_verify_environment,
856
+ summary=_ACTION_SUMMARY["verify-env"],
857
+ aliases=("verify_env", "sdd-verify-environment", "sdd_verify_environment"),
858
+ ),
859
+ ActionDefinition(
860
+ name="init",
861
+ handler=_handle_init_workspace,
862
+ summary=_ACTION_SUMMARY["init"],
863
+ aliases=("sdd-init-workspace", "sdd_init_workspace"),
864
+ ),
865
+ ActionDefinition(
866
+ name="detect",
867
+ handler=_handle_detect_topology,
868
+ summary=_ACTION_SUMMARY["detect"],
869
+ aliases=("sdd-detect-topology", "sdd_detect_topology"),
870
+ ),
871
+ ActionDefinition(
872
+ name="setup",
873
+ handler=_handle_setup,
874
+ summary=_ACTION_SUMMARY["setup"],
875
+ aliases=("sdd-setup", "sdd_setup"),
876
+ ),
877
+ ],
878
+ )
879
+
880
+
881
+ def _dispatch_environment_action(
882
+ *, action: str, payload: Dict[str, Any], config: ServerConfig
883
+ ) -> dict:
884
+ try:
885
+ return _ENVIRONMENT_ROUTER.dispatch(action=action, config=config, **payload)
886
+ except ActionRouterError as exc:
887
+ request_id = _request_id()
888
+ allowed = ", ".join(exc.allowed_actions)
889
+ return asdict(
890
+ error_response(
891
+ f"Unsupported environment action '{action}'. Allowed actions: {allowed}",
892
+ error_code=ErrorCode.VALIDATION_ERROR,
893
+ error_type=ErrorType.VALIDATION,
894
+ remediation=f"Use one of: {allowed}",
895
+ request_id=request_id,
896
+ )
897
+ )
898
+
899
+
900
+ def register_unified_environment_tool(mcp: FastMCP, config: ServerConfig) -> None:
901
+ """Register the consolidated environment tool."""
902
+
903
+ @canonical_tool(mcp, canonical_name="environment")
904
+ @mcp_tool(tool_name="environment", emit_metrics=True, audit=True)
905
+ def environment( # noqa: PLR0913 - composite signature spanning actions
906
+ action: str,
907
+ path: Optional[str] = None,
908
+ include_optional: Optional[bool] = True,
909
+ create_subdirs: bool = True,
910
+ check_python: bool = True,
911
+ check_git: bool = True,
912
+ check_node: bool = False,
913
+ required_packages: Optional[str] = None,
914
+ permissions_preset: str = "full",
915
+ create_toml: bool = True,
916
+ dry_run: bool = False,
917
+ ) -> dict:
918
+ payload = {
919
+ "path": path,
920
+ "include_optional": include_optional,
921
+ "create_subdirs": create_subdirs,
922
+ "check_python": check_python,
923
+ "check_git": check_git,
924
+ "check_node": check_node,
925
+ "required_packages": required_packages,
926
+ "permissions_preset": permissions_preset,
927
+ "create_toml": create_toml,
928
+ "dry_run": dry_run,
929
+ }
930
+ return _dispatch_environment_action(
931
+ action=action, payload=payload, config=config
932
+ )
933
+
934
+ logger.debug("Registered unified environment tool")
935
+
936
+
937
+ __all__ = [
938
+ "register_unified_environment_tool",
939
+ ]