foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,431 @@
|
|
|
1
|
+
"""Unified test tool with action routing.
|
|
2
|
+
|
|
3
|
+
Provides the unified `test(action=...)` entry point.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import time
|
|
10
|
+
from dataclasses import asdict
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
from typing import Any, Dict, Optional
|
|
13
|
+
|
|
14
|
+
from mcp.server.fastmcp import FastMCP
|
|
15
|
+
|
|
16
|
+
from foundry_mcp.config import ServerConfig
|
|
17
|
+
from foundry_mcp.core.context import generate_correlation_id, get_correlation_id
|
|
18
|
+
from foundry_mcp.core.naming import canonical_tool
|
|
19
|
+
from foundry_mcp.core.observability import get_metrics, mcp_tool
|
|
20
|
+
from foundry_mcp.core.responses import (
|
|
21
|
+
ErrorCode,
|
|
22
|
+
ErrorType,
|
|
23
|
+
error_response,
|
|
24
|
+
success_response,
|
|
25
|
+
)
|
|
26
|
+
from foundry_mcp.core.testing import (
|
|
27
|
+
TestRunner,
|
|
28
|
+
get_presets,
|
|
29
|
+
get_runner,
|
|
30
|
+
get_available_runners,
|
|
31
|
+
)
|
|
32
|
+
from foundry_mcp.tools.unified.router import (
|
|
33
|
+
ActionDefinition,
|
|
34
|
+
ActionRouter,
|
|
35
|
+
ActionRouterError,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
logger = logging.getLogger(__name__)
|
|
39
|
+
_metrics = get_metrics()
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _request_id() -> str:
|
|
43
|
+
return get_correlation_id() or generate_correlation_id(prefix="test")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _metric(action: str) -> str:
|
|
47
|
+
return f"unified_tools.test.{action.replace('-', '_')}"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _get_test_runner(
|
|
51
|
+
config: ServerConfig,
|
|
52
|
+
workspace: Optional[str],
|
|
53
|
+
runner_name: Optional[str] = None,
|
|
54
|
+
) -> TestRunner:
|
|
55
|
+
"""Get a TestRunner with the appropriate backend.
|
|
56
|
+
|
|
57
|
+
Args:
|
|
58
|
+
config: Server configuration
|
|
59
|
+
workspace: Workspace path override
|
|
60
|
+
runner_name: Name of the test runner backend to use
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
TestRunner configured with the appropriate backend
|
|
64
|
+
"""
|
|
65
|
+
ws: Optional[Path] = None
|
|
66
|
+
if workspace:
|
|
67
|
+
ws = Path(workspace)
|
|
68
|
+
elif config.specs_dir is not None:
|
|
69
|
+
ws = config.specs_dir.parent
|
|
70
|
+
|
|
71
|
+
# Get the runner backend from config or defaults
|
|
72
|
+
runner_backend = get_runner(runner_name, config.test)
|
|
73
|
+
|
|
74
|
+
return TestRunner(workspace=ws, runner=runner_backend)
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _validation_error(
|
|
78
|
+
*, message: str, request_id: str, remediation: Optional[str] = None
|
|
79
|
+
) -> dict:
|
|
80
|
+
return asdict(
|
|
81
|
+
error_response(
|
|
82
|
+
message,
|
|
83
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
84
|
+
error_type=ErrorType.VALIDATION,
|
|
85
|
+
remediation=remediation,
|
|
86
|
+
request_id=request_id,
|
|
87
|
+
)
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def _handle_run(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
92
|
+
request_id = _request_id()
|
|
93
|
+
|
|
94
|
+
# Validate runner parameter
|
|
95
|
+
runner_name = payload.get("runner")
|
|
96
|
+
if runner_name is not None and not isinstance(runner_name, str):
|
|
97
|
+
return _validation_error(
|
|
98
|
+
message="runner must be a string",
|
|
99
|
+
request_id=request_id,
|
|
100
|
+
remediation="Use runner=pytest|go|npm|jest|make or a custom runner name",
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
if isinstance(runner_name, str):
|
|
104
|
+
available_runners = get_available_runners(config.test)
|
|
105
|
+
if runner_name not in available_runners:
|
|
106
|
+
return _validation_error(
|
|
107
|
+
message=f"Unknown runner: {runner_name}",
|
|
108
|
+
request_id=request_id,
|
|
109
|
+
remediation=f"Use one of: {', '.join(sorted(available_runners))}",
|
|
110
|
+
)
|
|
111
|
+
|
|
112
|
+
preset = payload.get("preset")
|
|
113
|
+
if preset is not None and not isinstance(preset, str):
|
|
114
|
+
return _validation_error(
|
|
115
|
+
message="preset must be a string",
|
|
116
|
+
request_id=request_id,
|
|
117
|
+
remediation="Use preset=quick|unit|full",
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
if isinstance(preset, str):
|
|
121
|
+
presets = get_presets()
|
|
122
|
+
if preset not in presets:
|
|
123
|
+
return _validation_error(
|
|
124
|
+
message=f"Unknown preset: {preset}",
|
|
125
|
+
request_id=request_id,
|
|
126
|
+
remediation=f"Use one of: {', '.join(sorted(presets))}",
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
target = payload.get("target")
|
|
130
|
+
if target is not None and not isinstance(target, str):
|
|
131
|
+
return _validation_error(
|
|
132
|
+
message="target must be a string",
|
|
133
|
+
request_id=request_id,
|
|
134
|
+
remediation="Provide a test target like tests/unit or tests/test_file.py",
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
timeout = payload.get("timeout", 300)
|
|
138
|
+
if timeout is not None:
|
|
139
|
+
try:
|
|
140
|
+
timeout_int = int(timeout)
|
|
141
|
+
except (TypeError, ValueError):
|
|
142
|
+
return _validation_error(
|
|
143
|
+
message="timeout must be an integer",
|
|
144
|
+
request_id=request_id,
|
|
145
|
+
remediation="Provide a timeout in seconds",
|
|
146
|
+
)
|
|
147
|
+
if timeout_int <= 0:
|
|
148
|
+
return _validation_error(
|
|
149
|
+
message="timeout must be > 0",
|
|
150
|
+
request_id=request_id,
|
|
151
|
+
remediation="Provide a timeout in seconds",
|
|
152
|
+
)
|
|
153
|
+
timeout = timeout_int
|
|
154
|
+
|
|
155
|
+
verbose_value = payload.get("verbose", True)
|
|
156
|
+
if verbose_value is not None and not isinstance(verbose_value, bool):
|
|
157
|
+
return _validation_error(
|
|
158
|
+
message="verbose must be a boolean",
|
|
159
|
+
request_id=request_id,
|
|
160
|
+
remediation="Provide verbose=true|false",
|
|
161
|
+
)
|
|
162
|
+
verbose = verbose_value if isinstance(verbose_value, bool) else True
|
|
163
|
+
|
|
164
|
+
fail_fast_value = payload.get("fail_fast", False)
|
|
165
|
+
if fail_fast_value is not None and not isinstance(fail_fast_value, bool):
|
|
166
|
+
return _validation_error(
|
|
167
|
+
message="fail_fast must be a boolean",
|
|
168
|
+
request_id=request_id,
|
|
169
|
+
remediation="Provide fail_fast=true|false",
|
|
170
|
+
)
|
|
171
|
+
fail_fast = fail_fast_value if isinstance(fail_fast_value, bool) else False
|
|
172
|
+
markers = payload.get("markers")
|
|
173
|
+
if markers is not None and not isinstance(markers, str):
|
|
174
|
+
return _validation_error(
|
|
175
|
+
message="markers must be a string",
|
|
176
|
+
request_id=request_id,
|
|
177
|
+
remediation="Provide a pytest markers expression like 'not slow'",
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
workspace = payload.get("workspace")
|
|
181
|
+
if workspace is not None and not isinstance(workspace, str):
|
|
182
|
+
return _validation_error(
|
|
183
|
+
message="workspace must be a string",
|
|
184
|
+
request_id=request_id,
|
|
185
|
+
remediation="Provide an absolute path to the workspace",
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
include_passed_value = payload.get("include_passed", False)
|
|
189
|
+
if include_passed_value is not None and not isinstance(include_passed_value, bool):
|
|
190
|
+
return _validation_error(
|
|
191
|
+
message="include_passed must be a boolean",
|
|
192
|
+
request_id=request_id,
|
|
193
|
+
remediation="Provide include_passed=true|false",
|
|
194
|
+
)
|
|
195
|
+
include_passed = (
|
|
196
|
+
include_passed_value if isinstance(include_passed_value, bool) else False
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
runner = _get_test_runner(config, workspace, runner_name)
|
|
200
|
+
|
|
201
|
+
start = time.perf_counter()
|
|
202
|
+
result = runner.run_tests(
|
|
203
|
+
target=target,
|
|
204
|
+
preset=preset,
|
|
205
|
+
timeout=timeout,
|
|
206
|
+
verbose=verbose,
|
|
207
|
+
fail_fast=fail_fast,
|
|
208
|
+
markers=markers,
|
|
209
|
+
)
|
|
210
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
211
|
+
|
|
212
|
+
_metrics.timer(_metric("run") + ".duration_ms", elapsed_ms)
|
|
213
|
+
_metrics.counter(
|
|
214
|
+
_metric("run"), labels={"status": "success" if result.success else "failure"}
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
if result.error:
|
|
218
|
+
return asdict(
|
|
219
|
+
error_response(
|
|
220
|
+
result.error,
|
|
221
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
222
|
+
error_type=ErrorType.INTERNAL,
|
|
223
|
+
request_id=request_id,
|
|
224
|
+
)
|
|
225
|
+
)
|
|
226
|
+
|
|
227
|
+
filtered_tests = (
|
|
228
|
+
result.tests
|
|
229
|
+
if include_passed
|
|
230
|
+
else [t for t in result.tests if t.outcome in ("failed", "error")]
|
|
231
|
+
)
|
|
232
|
+
|
|
233
|
+
return asdict(
|
|
234
|
+
success_response(
|
|
235
|
+
execution_id=result.execution_id,
|
|
236
|
+
timestamp=result.timestamp,
|
|
237
|
+
tests_passed=result.success,
|
|
238
|
+
summary={
|
|
239
|
+
"total": result.total,
|
|
240
|
+
"passed": result.passed,
|
|
241
|
+
"failed": result.failed,
|
|
242
|
+
"skipped": result.skipped,
|
|
243
|
+
"errors": result.errors,
|
|
244
|
+
},
|
|
245
|
+
tests=[
|
|
246
|
+
{
|
|
247
|
+
"name": t.name,
|
|
248
|
+
"outcome": t.outcome,
|
|
249
|
+
"duration": t.duration,
|
|
250
|
+
"message": t.message,
|
|
251
|
+
}
|
|
252
|
+
for t in filtered_tests
|
|
253
|
+
],
|
|
254
|
+
filtered=not include_passed,
|
|
255
|
+
command=result.command,
|
|
256
|
+
duration=result.duration,
|
|
257
|
+
metadata=dict(result.metadata or {}),
|
|
258
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
259
|
+
request_id=request_id,
|
|
260
|
+
)
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
|
|
264
|
+
def _handle_discover(*, config: ServerConfig, payload: Dict[str, Any]) -> dict:
|
|
265
|
+
request_id = _request_id()
|
|
266
|
+
|
|
267
|
+
# Validate runner parameter
|
|
268
|
+
runner_name = payload.get("runner")
|
|
269
|
+
if runner_name is not None and not isinstance(runner_name, str):
|
|
270
|
+
return _validation_error(
|
|
271
|
+
message="runner must be a string",
|
|
272
|
+
request_id=request_id,
|
|
273
|
+
remediation="Use runner=pytest|go|npm|jest|make or a custom runner name",
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
if isinstance(runner_name, str):
|
|
277
|
+
available_runners = get_available_runners(config.test)
|
|
278
|
+
if runner_name not in available_runners:
|
|
279
|
+
return _validation_error(
|
|
280
|
+
message=f"Unknown runner: {runner_name}",
|
|
281
|
+
request_id=request_id,
|
|
282
|
+
remediation=f"Use one of: {', '.join(sorted(available_runners))}",
|
|
283
|
+
)
|
|
284
|
+
|
|
285
|
+
target = payload.get("target")
|
|
286
|
+
if target is not None and not isinstance(target, str):
|
|
287
|
+
return _validation_error(
|
|
288
|
+
message="target must be a string",
|
|
289
|
+
request_id=request_id,
|
|
290
|
+
remediation="Provide a test directory or file to search",
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
pattern = payload.get("pattern", "test_*.py")
|
|
294
|
+
if not isinstance(pattern, str) or not pattern:
|
|
295
|
+
return _validation_error(
|
|
296
|
+
message="pattern must be a non-empty string",
|
|
297
|
+
request_id=request_id,
|
|
298
|
+
remediation="Provide a file glob pattern like test_*.py",
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
workspace = payload.get("workspace")
|
|
302
|
+
if workspace is not None and not isinstance(workspace, str):
|
|
303
|
+
return _validation_error(
|
|
304
|
+
message="workspace must be a string",
|
|
305
|
+
request_id=request_id,
|
|
306
|
+
remediation="Provide an absolute path to the workspace",
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
runner = _get_test_runner(config, workspace, runner_name)
|
|
310
|
+
|
|
311
|
+
start = time.perf_counter()
|
|
312
|
+
result = runner.discover_tests(target=target, pattern=pattern)
|
|
313
|
+
elapsed_ms = (time.perf_counter() - start) * 1000
|
|
314
|
+
|
|
315
|
+
_metrics.timer(_metric("discover") + ".duration_ms", elapsed_ms)
|
|
316
|
+
_metrics.counter(
|
|
317
|
+
_metric("discover"),
|
|
318
|
+
labels={"status": "success" if result.success else "failure"},
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
if result.error:
|
|
322
|
+
return asdict(
|
|
323
|
+
error_response(
|
|
324
|
+
result.error,
|
|
325
|
+
error_code=ErrorCode.INTERNAL_ERROR,
|
|
326
|
+
error_type=ErrorType.INTERNAL,
|
|
327
|
+
request_id=request_id,
|
|
328
|
+
)
|
|
329
|
+
)
|
|
330
|
+
|
|
331
|
+
return asdict(
|
|
332
|
+
success_response(
|
|
333
|
+
timestamp=result.timestamp,
|
|
334
|
+
total=result.total,
|
|
335
|
+
test_files=result.test_files,
|
|
336
|
+
tests=[
|
|
337
|
+
{
|
|
338
|
+
"name": t.name,
|
|
339
|
+
"file_path": t.file_path,
|
|
340
|
+
"line_number": t.line_number,
|
|
341
|
+
"markers": t.markers,
|
|
342
|
+
}
|
|
343
|
+
for t in result.tests
|
|
344
|
+
],
|
|
345
|
+
metadata=dict(result.metadata or {}),
|
|
346
|
+
telemetry={"duration_ms": round(elapsed_ms, 2)},
|
|
347
|
+
request_id=request_id,
|
|
348
|
+
)
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
|
|
352
|
+
_ACTION_SUMMARY = {
|
|
353
|
+
"run": "Execute tests using the specified runner (pytest, go, npm, jest, make).",
|
|
354
|
+
"discover": "Discover tests without executing using the specified runner.",
|
|
355
|
+
}
|
|
356
|
+
|
|
357
|
+
|
|
358
|
+
def _build_router() -> ActionRouter:
|
|
359
|
+
actions = [
|
|
360
|
+
ActionDefinition(
|
|
361
|
+
name="run", handler=_handle_run, summary=_ACTION_SUMMARY["run"]
|
|
362
|
+
),
|
|
363
|
+
ActionDefinition(
|
|
364
|
+
name="discover",
|
|
365
|
+
handler=_handle_discover,
|
|
366
|
+
summary=_ACTION_SUMMARY["discover"],
|
|
367
|
+
),
|
|
368
|
+
]
|
|
369
|
+
return ActionRouter(tool_name="test", actions=actions)
|
|
370
|
+
|
|
371
|
+
|
|
372
|
+
_TEST_ROUTER = _build_router()
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
def _dispatch_test_action(
|
|
376
|
+
*, action: str, payload: Dict[str, Any], config: ServerConfig
|
|
377
|
+
) -> dict:
|
|
378
|
+
try:
|
|
379
|
+
return _TEST_ROUTER.dispatch(action, config=config, payload=payload)
|
|
380
|
+
except ActionRouterError as exc:
|
|
381
|
+
allowed = ", ".join(exc.allowed_actions)
|
|
382
|
+
request_id = _request_id()
|
|
383
|
+
return asdict(
|
|
384
|
+
error_response(
|
|
385
|
+
f"Unsupported test action '{action}'. Allowed actions: {allowed}",
|
|
386
|
+
error_code=ErrorCode.VALIDATION_ERROR,
|
|
387
|
+
error_type=ErrorType.VALIDATION,
|
|
388
|
+
remediation=f"Use one of: {allowed}",
|
|
389
|
+
request_id=request_id,
|
|
390
|
+
)
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
|
|
394
|
+
def register_unified_test_tool(mcp: FastMCP, config: ServerConfig) -> None:
|
|
395
|
+
"""Register the consolidated test tool."""
|
|
396
|
+
|
|
397
|
+
@canonical_tool(mcp, canonical_name="test")
|
|
398
|
+
@mcp_tool(tool_name="test", emit_metrics=True, audit=True)
|
|
399
|
+
def test(
|
|
400
|
+
action: str,
|
|
401
|
+
target: Optional[str] = None,
|
|
402
|
+
preset: Optional[str] = None,
|
|
403
|
+
runner: Optional[str] = None,
|
|
404
|
+
timeout: int = 300,
|
|
405
|
+
verbose: bool = True,
|
|
406
|
+
fail_fast: bool = False,
|
|
407
|
+
markers: Optional[str] = None,
|
|
408
|
+
pattern: str = "test_*.py",
|
|
409
|
+
workspace: Optional[str] = None,
|
|
410
|
+
include_passed: bool = False,
|
|
411
|
+
) -> dict:
|
|
412
|
+
payload: Dict[str, Any] = {
|
|
413
|
+
"target": target,
|
|
414
|
+
"preset": preset,
|
|
415
|
+
"runner": runner,
|
|
416
|
+
"timeout": timeout,
|
|
417
|
+
"verbose": verbose,
|
|
418
|
+
"fail_fast": fail_fast,
|
|
419
|
+
"markers": markers,
|
|
420
|
+
"pattern": pattern,
|
|
421
|
+
"workspace": workspace,
|
|
422
|
+
"include_passed": include_passed,
|
|
423
|
+
}
|
|
424
|
+
return _dispatch_test_action(action=action, payload=payload, config=config)
|
|
425
|
+
|
|
426
|
+
logger.debug("Registered unified test tool")
|
|
427
|
+
|
|
428
|
+
|
|
429
|
+
__all__ = [
|
|
430
|
+
"register_unified_test_tool",
|
|
431
|
+
]
|