foundry-mcp 0.8.22__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of foundry-mcp might be problematic. Click here for more details.
- foundry_mcp/__init__.py +13 -0
- foundry_mcp/cli/__init__.py +67 -0
- foundry_mcp/cli/__main__.py +9 -0
- foundry_mcp/cli/agent.py +96 -0
- foundry_mcp/cli/commands/__init__.py +37 -0
- foundry_mcp/cli/commands/cache.py +137 -0
- foundry_mcp/cli/commands/dashboard.py +148 -0
- foundry_mcp/cli/commands/dev.py +446 -0
- foundry_mcp/cli/commands/journal.py +377 -0
- foundry_mcp/cli/commands/lifecycle.py +274 -0
- foundry_mcp/cli/commands/modify.py +824 -0
- foundry_mcp/cli/commands/plan.py +640 -0
- foundry_mcp/cli/commands/pr.py +393 -0
- foundry_mcp/cli/commands/review.py +667 -0
- foundry_mcp/cli/commands/session.py +472 -0
- foundry_mcp/cli/commands/specs.py +686 -0
- foundry_mcp/cli/commands/tasks.py +807 -0
- foundry_mcp/cli/commands/testing.py +676 -0
- foundry_mcp/cli/commands/validate.py +982 -0
- foundry_mcp/cli/config.py +98 -0
- foundry_mcp/cli/context.py +298 -0
- foundry_mcp/cli/logging.py +212 -0
- foundry_mcp/cli/main.py +44 -0
- foundry_mcp/cli/output.py +122 -0
- foundry_mcp/cli/registry.py +110 -0
- foundry_mcp/cli/resilience.py +178 -0
- foundry_mcp/cli/transcript.py +217 -0
- foundry_mcp/config.py +1454 -0
- foundry_mcp/core/__init__.py +144 -0
- foundry_mcp/core/ai_consultation.py +1773 -0
- foundry_mcp/core/batch_operations.py +1202 -0
- foundry_mcp/core/cache.py +195 -0
- foundry_mcp/core/capabilities.py +446 -0
- foundry_mcp/core/concurrency.py +898 -0
- foundry_mcp/core/context.py +540 -0
- foundry_mcp/core/discovery.py +1603 -0
- foundry_mcp/core/error_collection.py +728 -0
- foundry_mcp/core/error_store.py +592 -0
- foundry_mcp/core/health.py +749 -0
- foundry_mcp/core/intake.py +933 -0
- foundry_mcp/core/journal.py +700 -0
- foundry_mcp/core/lifecycle.py +412 -0
- foundry_mcp/core/llm_config.py +1376 -0
- foundry_mcp/core/llm_patterns.py +510 -0
- foundry_mcp/core/llm_provider.py +1569 -0
- foundry_mcp/core/logging_config.py +374 -0
- foundry_mcp/core/metrics_persistence.py +584 -0
- foundry_mcp/core/metrics_registry.py +327 -0
- foundry_mcp/core/metrics_store.py +641 -0
- foundry_mcp/core/modifications.py +224 -0
- foundry_mcp/core/naming.py +146 -0
- foundry_mcp/core/observability.py +1216 -0
- foundry_mcp/core/otel.py +452 -0
- foundry_mcp/core/otel_stubs.py +264 -0
- foundry_mcp/core/pagination.py +255 -0
- foundry_mcp/core/progress.py +387 -0
- foundry_mcp/core/prometheus.py +564 -0
- foundry_mcp/core/prompts/__init__.py +464 -0
- foundry_mcp/core/prompts/fidelity_review.py +691 -0
- foundry_mcp/core/prompts/markdown_plan_review.py +515 -0
- foundry_mcp/core/prompts/plan_review.py +627 -0
- foundry_mcp/core/providers/__init__.py +237 -0
- foundry_mcp/core/providers/base.py +515 -0
- foundry_mcp/core/providers/claude.py +472 -0
- foundry_mcp/core/providers/codex.py +637 -0
- foundry_mcp/core/providers/cursor_agent.py +630 -0
- foundry_mcp/core/providers/detectors.py +515 -0
- foundry_mcp/core/providers/gemini.py +426 -0
- foundry_mcp/core/providers/opencode.py +718 -0
- foundry_mcp/core/providers/opencode_wrapper.js +308 -0
- foundry_mcp/core/providers/package-lock.json +24 -0
- foundry_mcp/core/providers/package.json +25 -0
- foundry_mcp/core/providers/registry.py +607 -0
- foundry_mcp/core/providers/test_provider.py +171 -0
- foundry_mcp/core/providers/validation.py +857 -0
- foundry_mcp/core/rate_limit.py +427 -0
- foundry_mcp/core/research/__init__.py +68 -0
- foundry_mcp/core/research/memory.py +528 -0
- foundry_mcp/core/research/models.py +1234 -0
- foundry_mcp/core/research/providers/__init__.py +40 -0
- foundry_mcp/core/research/providers/base.py +242 -0
- foundry_mcp/core/research/providers/google.py +507 -0
- foundry_mcp/core/research/providers/perplexity.py +442 -0
- foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
- foundry_mcp/core/research/providers/tavily.py +383 -0
- foundry_mcp/core/research/workflows/__init__.py +25 -0
- foundry_mcp/core/research/workflows/base.py +298 -0
- foundry_mcp/core/research/workflows/chat.py +271 -0
- foundry_mcp/core/research/workflows/consensus.py +539 -0
- foundry_mcp/core/research/workflows/deep_research.py +4142 -0
- foundry_mcp/core/research/workflows/ideate.py +682 -0
- foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
- foundry_mcp/core/resilience.py +600 -0
- foundry_mcp/core/responses.py +1624 -0
- foundry_mcp/core/review.py +366 -0
- foundry_mcp/core/security.py +438 -0
- foundry_mcp/core/spec.py +4119 -0
- foundry_mcp/core/task.py +2463 -0
- foundry_mcp/core/testing.py +839 -0
- foundry_mcp/core/validation.py +2357 -0
- foundry_mcp/dashboard/__init__.py +32 -0
- foundry_mcp/dashboard/app.py +119 -0
- foundry_mcp/dashboard/components/__init__.py +17 -0
- foundry_mcp/dashboard/components/cards.py +88 -0
- foundry_mcp/dashboard/components/charts.py +177 -0
- foundry_mcp/dashboard/components/filters.py +136 -0
- foundry_mcp/dashboard/components/tables.py +195 -0
- foundry_mcp/dashboard/data/__init__.py +11 -0
- foundry_mcp/dashboard/data/stores.py +433 -0
- foundry_mcp/dashboard/launcher.py +300 -0
- foundry_mcp/dashboard/views/__init__.py +12 -0
- foundry_mcp/dashboard/views/errors.py +217 -0
- foundry_mcp/dashboard/views/metrics.py +164 -0
- foundry_mcp/dashboard/views/overview.py +96 -0
- foundry_mcp/dashboard/views/providers.py +83 -0
- foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
- foundry_mcp/dashboard/views/tool_usage.py +139 -0
- foundry_mcp/prompts/__init__.py +9 -0
- foundry_mcp/prompts/workflows.py +525 -0
- foundry_mcp/resources/__init__.py +9 -0
- foundry_mcp/resources/specs.py +591 -0
- foundry_mcp/schemas/__init__.py +38 -0
- foundry_mcp/schemas/intake-schema.json +89 -0
- foundry_mcp/schemas/sdd-spec-schema.json +414 -0
- foundry_mcp/server.py +150 -0
- foundry_mcp/tools/__init__.py +10 -0
- foundry_mcp/tools/unified/__init__.py +92 -0
- foundry_mcp/tools/unified/authoring.py +3620 -0
- foundry_mcp/tools/unified/context_helpers.py +98 -0
- foundry_mcp/tools/unified/documentation_helpers.py +268 -0
- foundry_mcp/tools/unified/environment.py +1341 -0
- foundry_mcp/tools/unified/error.py +479 -0
- foundry_mcp/tools/unified/health.py +225 -0
- foundry_mcp/tools/unified/journal.py +841 -0
- foundry_mcp/tools/unified/lifecycle.py +640 -0
- foundry_mcp/tools/unified/metrics.py +777 -0
- foundry_mcp/tools/unified/plan.py +876 -0
- foundry_mcp/tools/unified/pr.py +294 -0
- foundry_mcp/tools/unified/provider.py +589 -0
- foundry_mcp/tools/unified/research.py +1283 -0
- foundry_mcp/tools/unified/review.py +1042 -0
- foundry_mcp/tools/unified/review_helpers.py +314 -0
- foundry_mcp/tools/unified/router.py +102 -0
- foundry_mcp/tools/unified/server.py +565 -0
- foundry_mcp/tools/unified/spec.py +1283 -0
- foundry_mcp/tools/unified/task.py +3846 -0
- foundry_mcp/tools/unified/test.py +431 -0
- foundry_mcp/tools/unified/verification.py +520 -0
- foundry_mcp-0.8.22.dist-info/METADATA +344 -0
- foundry_mcp-0.8.22.dist-info/RECORD +153 -0
- foundry_mcp-0.8.22.dist-info/WHEEL +4 -0
- foundry_mcp-0.8.22.dist-info/entry_points.txt +3 -0
- foundry_mcp-0.8.22.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,839 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Testing operations for foundry-mcp.
|
|
3
|
+
Provides functions for running tests and test discovery.
|
|
4
|
+
|
|
5
|
+
Supports multiple test runners (pytest, go, npm, jest, etc.) via configuration.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import re
|
|
9
|
+
import subprocess
|
|
10
|
+
import uuid
|
|
11
|
+
from abc import ABC, abstractmethod
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from datetime import datetime, timezone
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Any, Dict, List, Optional, TYPE_CHECKING
|
|
16
|
+
|
|
17
|
+
if TYPE_CHECKING:
|
|
18
|
+
from foundry_mcp.config import RunnerConfig, TestConfig
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Schema version for compatibility tracking
|
|
22
|
+
SCHEMA_VERSION = "1.0.0"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# Default runner configurations (used when no TOML config provided)
|
|
26
|
+
DEFAULT_RUNNERS: Dict[str, Dict[str, Any]] = {
|
|
27
|
+
"pytest": {
|
|
28
|
+
"command": ["python", "-m", "pytest"],
|
|
29
|
+
"run_args": ["-v", "--tb=short"],
|
|
30
|
+
"discover_args": ["--collect-only", "-q"],
|
|
31
|
+
"pattern": "test_*.py",
|
|
32
|
+
"timeout": 300,
|
|
33
|
+
},
|
|
34
|
+
"go": {
|
|
35
|
+
"command": ["go", "test"],
|
|
36
|
+
"run_args": ["-v"],
|
|
37
|
+
"discover_args": ["-list", ".*"],
|
|
38
|
+
"pattern": "*_test.go",
|
|
39
|
+
"timeout": 300,
|
|
40
|
+
},
|
|
41
|
+
"npm": {
|
|
42
|
+
"command": ["npm", "test"],
|
|
43
|
+
"run_args": ["--"],
|
|
44
|
+
"discover_args": [],
|
|
45
|
+
"pattern": "*.test.js",
|
|
46
|
+
"timeout": 300,
|
|
47
|
+
},
|
|
48
|
+
"jest": {
|
|
49
|
+
"command": ["npx", "jest"],
|
|
50
|
+
"run_args": ["--verbose"],
|
|
51
|
+
"discover_args": ["--listTests"],
|
|
52
|
+
"pattern": "*.test.{js,ts,jsx,tsx}",
|
|
53
|
+
"timeout": 300,
|
|
54
|
+
},
|
|
55
|
+
"make": {
|
|
56
|
+
"command": ["make", "test"],
|
|
57
|
+
"run_args": [],
|
|
58
|
+
"discover_args": [],
|
|
59
|
+
"pattern": "*",
|
|
60
|
+
"timeout": 300,
|
|
61
|
+
},
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
# Presets for common test configurations
|
|
66
|
+
TEST_PRESETS = {
|
|
67
|
+
"quick": {
|
|
68
|
+
"timeout": 60,
|
|
69
|
+
"verbose": False,
|
|
70
|
+
"fail_fast": True,
|
|
71
|
+
"markers": "not slow",
|
|
72
|
+
},
|
|
73
|
+
"full": {
|
|
74
|
+
"timeout": 300,
|
|
75
|
+
"verbose": True,
|
|
76
|
+
"fail_fast": False,
|
|
77
|
+
"markers": None,
|
|
78
|
+
},
|
|
79
|
+
"unit": {
|
|
80
|
+
"timeout": 120,
|
|
81
|
+
"verbose": True,
|
|
82
|
+
"fail_fast": False,
|
|
83
|
+
"markers": "unit",
|
|
84
|
+
"pattern": "test_*.py",
|
|
85
|
+
},
|
|
86
|
+
"integration": {
|
|
87
|
+
"timeout": 300,
|
|
88
|
+
"verbose": True,
|
|
89
|
+
"fail_fast": False,
|
|
90
|
+
"markers": "integration",
|
|
91
|
+
},
|
|
92
|
+
"smoke": {
|
|
93
|
+
"timeout": 30,
|
|
94
|
+
"verbose": False,
|
|
95
|
+
"fail_fast": True,
|
|
96
|
+
"markers": "smoke",
|
|
97
|
+
},
|
|
98
|
+
}
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
# Data structures
|
|
102
|
+
|
|
103
|
+
@dataclass
|
|
104
|
+
class TestResult:
|
|
105
|
+
"""
|
|
106
|
+
Result of a single test.
|
|
107
|
+
"""
|
|
108
|
+
name: str
|
|
109
|
+
outcome: str # passed, failed, skipped, error
|
|
110
|
+
duration: float = 0.0
|
|
111
|
+
message: Optional[str] = None
|
|
112
|
+
file_path: Optional[str] = None
|
|
113
|
+
line_number: Optional[int] = None
|
|
114
|
+
stdout: Optional[str] = None
|
|
115
|
+
stderr: Optional[str] = None
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
@dataclass
|
|
119
|
+
class TestRunResult:
|
|
120
|
+
"""
|
|
121
|
+
Result of a test run.
|
|
122
|
+
"""
|
|
123
|
+
success: bool
|
|
124
|
+
execution_id: str = ""
|
|
125
|
+
schema_version: str = SCHEMA_VERSION
|
|
126
|
+
timestamp: str = ""
|
|
127
|
+
duration: float = 0.0
|
|
128
|
+
total: int = 0
|
|
129
|
+
passed: int = 0
|
|
130
|
+
failed: int = 0
|
|
131
|
+
skipped: int = 0
|
|
132
|
+
errors: int = 0
|
|
133
|
+
tests: List[TestResult] = field(default_factory=list)
|
|
134
|
+
command: str = ""
|
|
135
|
+
cwd: str = ""
|
|
136
|
+
stdout: str = ""
|
|
137
|
+
stderr: str = ""
|
|
138
|
+
error: Optional[str] = None
|
|
139
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
140
|
+
|
|
141
|
+
def __post_init__(self):
|
|
142
|
+
if not self.execution_id:
|
|
143
|
+
self.execution_id = str(uuid.uuid4())[:8]
|
|
144
|
+
if not self.timestamp:
|
|
145
|
+
self.timestamp = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
@dataclass
|
|
149
|
+
class DiscoveredTest:
|
|
150
|
+
"""
|
|
151
|
+
A discovered test.
|
|
152
|
+
"""
|
|
153
|
+
name: str
|
|
154
|
+
file_path: str
|
|
155
|
+
line_number: Optional[int] = None
|
|
156
|
+
markers: List[str] = field(default_factory=list)
|
|
157
|
+
docstring: Optional[str] = None
|
|
158
|
+
|
|
159
|
+
|
|
160
|
+
@dataclass
|
|
161
|
+
class TestDiscoveryResult:
|
|
162
|
+
"""
|
|
163
|
+
Result of test discovery.
|
|
164
|
+
"""
|
|
165
|
+
success: bool
|
|
166
|
+
schema_version: str = SCHEMA_VERSION
|
|
167
|
+
timestamp: str = ""
|
|
168
|
+
total: int = 0
|
|
169
|
+
tests: List[DiscoveredTest] = field(default_factory=list)
|
|
170
|
+
test_files: List[str] = field(default_factory=list)
|
|
171
|
+
error: Optional[str] = None
|
|
172
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
173
|
+
|
|
174
|
+
def __post_init__(self):
|
|
175
|
+
if not self.timestamp:
|
|
176
|
+
self.timestamp = datetime.now(timezone.utc).isoformat().replace("+00:00", "Z")
|
|
177
|
+
self.total = len(self.tests)
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
# Abstract test runner interface
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
class BaseTestRunner(ABC):
|
|
184
|
+
"""Abstract base class for test runners."""
|
|
185
|
+
|
|
186
|
+
@abstractmethod
|
|
187
|
+
def build_run_command(
|
|
188
|
+
self,
|
|
189
|
+
target: Optional[str] = None,
|
|
190
|
+
verbose: bool = True,
|
|
191
|
+
fail_fast: bool = False,
|
|
192
|
+
extra_args: Optional[List[str]] = None,
|
|
193
|
+
**kwargs: Any,
|
|
194
|
+
) -> List[str]:
|
|
195
|
+
"""Build the command to run tests."""
|
|
196
|
+
pass
|
|
197
|
+
|
|
198
|
+
@abstractmethod
|
|
199
|
+
def build_discover_command(
|
|
200
|
+
self,
|
|
201
|
+
target: Optional[str] = None,
|
|
202
|
+
pattern: str = "*",
|
|
203
|
+
) -> List[str]:
|
|
204
|
+
"""Build the command to discover tests."""
|
|
205
|
+
pass
|
|
206
|
+
|
|
207
|
+
@abstractmethod
|
|
208
|
+
def parse_run_output(
|
|
209
|
+
self,
|
|
210
|
+
stdout: str,
|
|
211
|
+
stderr: str,
|
|
212
|
+
returncode: int,
|
|
213
|
+
) -> tuple:
|
|
214
|
+
"""Parse test run output. Returns (tests, passed, failed, skipped, errors)."""
|
|
215
|
+
pass
|
|
216
|
+
|
|
217
|
+
@abstractmethod
|
|
218
|
+
def parse_discover_output(self, stdout: str) -> tuple:
|
|
219
|
+
"""Parse test discovery output. Returns (tests, test_files)."""
|
|
220
|
+
pass
|
|
221
|
+
|
|
222
|
+
@property
|
|
223
|
+
@abstractmethod
|
|
224
|
+
def default_timeout(self) -> int:
|
|
225
|
+
"""Default timeout in seconds."""
|
|
226
|
+
pass
|
|
227
|
+
|
|
228
|
+
@property
|
|
229
|
+
@abstractmethod
|
|
230
|
+
def not_found_error(self) -> str:
|
|
231
|
+
"""Error message when the runner is not found."""
|
|
232
|
+
pass
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
class PytestRunner(BaseTestRunner):
|
|
236
|
+
"""Test runner for pytest-based projects."""
|
|
237
|
+
|
|
238
|
+
def build_run_command(
|
|
239
|
+
self,
|
|
240
|
+
target: Optional[str] = None,
|
|
241
|
+
verbose: bool = True,
|
|
242
|
+
fail_fast: bool = False,
|
|
243
|
+
extra_args: Optional[List[str]] = None,
|
|
244
|
+
markers: Optional[str] = None,
|
|
245
|
+
**kwargs: Any,
|
|
246
|
+
) -> List[str]:
|
|
247
|
+
cmd = ["python", "-m", "pytest"]
|
|
248
|
+
|
|
249
|
+
if target:
|
|
250
|
+
cmd.append(target)
|
|
251
|
+
|
|
252
|
+
if verbose:
|
|
253
|
+
cmd.append("-v")
|
|
254
|
+
|
|
255
|
+
if fail_fast:
|
|
256
|
+
cmd.append("-x")
|
|
257
|
+
|
|
258
|
+
if markers:
|
|
259
|
+
cmd.extend(["-m", markers])
|
|
260
|
+
|
|
261
|
+
cmd.append("--tb=short")
|
|
262
|
+
|
|
263
|
+
if extra_args:
|
|
264
|
+
cmd.extend(extra_args)
|
|
265
|
+
|
|
266
|
+
return cmd
|
|
267
|
+
|
|
268
|
+
def build_discover_command(
|
|
269
|
+
self,
|
|
270
|
+
target: Optional[str] = None,
|
|
271
|
+
pattern: str = "test_*.py",
|
|
272
|
+
) -> List[str]:
|
|
273
|
+
cmd = ["python", "-m", "pytest", "--collect-only", "-q"]
|
|
274
|
+
if target:
|
|
275
|
+
cmd.append(target)
|
|
276
|
+
return cmd
|
|
277
|
+
|
|
278
|
+
def parse_run_output(
|
|
279
|
+
self,
|
|
280
|
+
stdout: str,
|
|
281
|
+
stderr: str,
|
|
282
|
+
returncode: int,
|
|
283
|
+
) -> tuple:
|
|
284
|
+
"""Parse pytest output to extract test results."""
|
|
285
|
+
tests = []
|
|
286
|
+
passed = 0
|
|
287
|
+
failed = 0
|
|
288
|
+
skipped = 0
|
|
289
|
+
errors = 0
|
|
290
|
+
|
|
291
|
+
lines = stdout.split("\n")
|
|
292
|
+
|
|
293
|
+
for line in lines:
|
|
294
|
+
line = line.strip()
|
|
295
|
+
|
|
296
|
+
# Parse individual test results
|
|
297
|
+
if "::" in line:
|
|
298
|
+
if " PASSED" in line:
|
|
299
|
+
name = line.split(" PASSED")[0].strip()
|
|
300
|
+
tests.append(TestResult(name=name, outcome="passed"))
|
|
301
|
+
passed += 1
|
|
302
|
+
elif " FAILED" in line:
|
|
303
|
+
name = line.split(" FAILED")[0].strip()
|
|
304
|
+
tests.append(TestResult(name=name, outcome="failed"))
|
|
305
|
+
failed += 1
|
|
306
|
+
elif " SKIPPED" in line:
|
|
307
|
+
name = line.split(" SKIPPED")[0].strip()
|
|
308
|
+
tests.append(TestResult(name=name, outcome="skipped"))
|
|
309
|
+
skipped += 1
|
|
310
|
+
elif " ERROR" in line:
|
|
311
|
+
name = line.split(" ERROR")[0].strip()
|
|
312
|
+
tests.append(TestResult(name=name, outcome="error"))
|
|
313
|
+
errors += 1
|
|
314
|
+
|
|
315
|
+
# Parse summary line
|
|
316
|
+
if "passed" in line.lower() and (
|
|
317
|
+
"failed" in line.lower()
|
|
318
|
+
or "error" in line.lower()
|
|
319
|
+
or "skipped" in line.lower()
|
|
320
|
+
):
|
|
321
|
+
passed_match = re.search(r"(\d+) passed", line)
|
|
322
|
+
failed_match = re.search(r"(\d+) failed", line)
|
|
323
|
+
skipped_match = re.search(r"(\d+) skipped", line)
|
|
324
|
+
error_match = re.search(r"(\d+) error", line)
|
|
325
|
+
|
|
326
|
+
if passed_match:
|
|
327
|
+
passed = int(passed_match.group(1))
|
|
328
|
+
if failed_match:
|
|
329
|
+
failed = int(failed_match.group(1))
|
|
330
|
+
if skipped_match:
|
|
331
|
+
skipped = int(skipped_match.group(1))
|
|
332
|
+
if error_match:
|
|
333
|
+
errors = int(error_match.group(1))
|
|
334
|
+
|
|
335
|
+
return tests, passed, failed, skipped, errors
|
|
336
|
+
|
|
337
|
+
def parse_discover_output(self, stdout: str) -> tuple:
|
|
338
|
+
"""Parse pytest --collect-only output."""
|
|
339
|
+
tests = []
|
|
340
|
+
test_files: set[str] = set()
|
|
341
|
+
|
|
342
|
+
for line in stdout.split("\n"):
|
|
343
|
+
line = line.strip()
|
|
344
|
+
if "::" in line and not line.startswith("="):
|
|
345
|
+
parts = line.split("::")
|
|
346
|
+
if parts:
|
|
347
|
+
file_path = parts[0]
|
|
348
|
+
test_files.add(file_path)
|
|
349
|
+
tests.append(DiscoveredTest(name=line, file_path=file_path))
|
|
350
|
+
|
|
351
|
+
return tests, list(test_files)
|
|
352
|
+
|
|
353
|
+
@property
|
|
354
|
+
def default_timeout(self) -> int:
|
|
355
|
+
return 300
|
|
356
|
+
|
|
357
|
+
@property
|
|
358
|
+
def not_found_error(self) -> str:
|
|
359
|
+
return "pytest not found. Install with: pip install pytest"
|
|
360
|
+
|
|
361
|
+
|
|
362
|
+
class GenericRunner(BaseTestRunner):
|
|
363
|
+
"""Generic test runner that uses RunnerConfig from TOML configuration."""
|
|
364
|
+
|
|
365
|
+
def __init__(
|
|
366
|
+
self,
|
|
367
|
+
command: List[str],
|
|
368
|
+
run_args: Optional[List[str]] = None,
|
|
369
|
+
discover_args: Optional[List[str]] = None,
|
|
370
|
+
pattern: str = "*",
|
|
371
|
+
timeout: int = 300,
|
|
372
|
+
runner_name: str = "generic",
|
|
373
|
+
):
|
|
374
|
+
self.command = command
|
|
375
|
+
self.run_args = run_args or []
|
|
376
|
+
self.discover_args = discover_args or []
|
|
377
|
+
self.pattern = pattern
|
|
378
|
+
self.timeout = timeout
|
|
379
|
+
self.runner_name = runner_name
|
|
380
|
+
|
|
381
|
+
@classmethod
|
|
382
|
+
def from_runner_config(
|
|
383
|
+
cls, config: "RunnerConfig", runner_name: str = "generic"
|
|
384
|
+
) -> "GenericRunner":
|
|
385
|
+
"""Create a GenericRunner from a RunnerConfig object."""
|
|
386
|
+
return cls(
|
|
387
|
+
command=list(config.command),
|
|
388
|
+
run_args=list(config.run_args),
|
|
389
|
+
discover_args=list(config.discover_args),
|
|
390
|
+
pattern=config.pattern,
|
|
391
|
+
timeout=config.timeout,
|
|
392
|
+
runner_name=runner_name,
|
|
393
|
+
)
|
|
394
|
+
|
|
395
|
+
@classmethod
|
|
396
|
+
def from_default(cls, runner_name: str) -> "GenericRunner":
|
|
397
|
+
"""Create a GenericRunner from DEFAULT_RUNNERS."""
|
|
398
|
+
if runner_name not in DEFAULT_RUNNERS:
|
|
399
|
+
raise ValueError(f"Unknown default runner: {runner_name}")
|
|
400
|
+
cfg = DEFAULT_RUNNERS[runner_name]
|
|
401
|
+
return cls(
|
|
402
|
+
command=list(cfg["command"]),
|
|
403
|
+
run_args=list(cfg.get("run_args", [])),
|
|
404
|
+
discover_args=list(cfg.get("discover_args", [])),
|
|
405
|
+
pattern=cfg.get("pattern", "*"),
|
|
406
|
+
timeout=cfg.get("timeout", 300),
|
|
407
|
+
runner_name=runner_name,
|
|
408
|
+
)
|
|
409
|
+
|
|
410
|
+
def build_run_command(
|
|
411
|
+
self,
|
|
412
|
+
target: Optional[str] = None,
|
|
413
|
+
verbose: bool = True,
|
|
414
|
+
fail_fast: bool = False,
|
|
415
|
+
extra_args: Optional[List[str]] = None,
|
|
416
|
+
**kwargs: Any,
|
|
417
|
+
) -> List[str]:
|
|
418
|
+
cmd = list(self.command) + list(self.run_args)
|
|
419
|
+
if target:
|
|
420
|
+
cmd.append(target)
|
|
421
|
+
if extra_args:
|
|
422
|
+
cmd.extend(extra_args)
|
|
423
|
+
return cmd
|
|
424
|
+
|
|
425
|
+
def build_discover_command(
|
|
426
|
+
self,
|
|
427
|
+
target: Optional[str] = None,
|
|
428
|
+
pattern: str = "*",
|
|
429
|
+
) -> List[str]:
|
|
430
|
+
cmd = list(self.command) + list(self.discover_args)
|
|
431
|
+
if target:
|
|
432
|
+
cmd.append(target)
|
|
433
|
+
return cmd
|
|
434
|
+
|
|
435
|
+
def parse_run_output(
|
|
436
|
+
self,
|
|
437
|
+
stdout: str,
|
|
438
|
+
stderr: str,
|
|
439
|
+
returncode: int,
|
|
440
|
+
) -> tuple:
|
|
441
|
+
"""Parse generic test output - basic heuristics."""
|
|
442
|
+
tests: List[TestResult] = []
|
|
443
|
+
passed = 0
|
|
444
|
+
failed = 0
|
|
445
|
+
skipped = 0
|
|
446
|
+
errors = 0
|
|
447
|
+
|
|
448
|
+
# Go test output parsing
|
|
449
|
+
if self.runner_name == "go":
|
|
450
|
+
for line in stdout.split("\n"):
|
|
451
|
+
line = line.strip()
|
|
452
|
+
if line.startswith("--- PASS:"):
|
|
453
|
+
name = line.split("--- PASS:")[1].split()[0]
|
|
454
|
+
tests.append(TestResult(name=name, outcome="passed"))
|
|
455
|
+
passed += 1
|
|
456
|
+
elif line.startswith("--- FAIL:"):
|
|
457
|
+
name = line.split("--- FAIL:")[1].split()[0]
|
|
458
|
+
tests.append(TestResult(name=name, outcome="failed"))
|
|
459
|
+
failed += 1
|
|
460
|
+
elif line.startswith("--- SKIP:"):
|
|
461
|
+
name = line.split("--- SKIP:")[1].split()[0]
|
|
462
|
+
tests.append(TestResult(name=name, outcome="skipped"))
|
|
463
|
+
skipped += 1
|
|
464
|
+
# If no individual tests parsed, check return code
|
|
465
|
+
if not tests:
|
|
466
|
+
if returncode == 0:
|
|
467
|
+
passed = 1
|
|
468
|
+
else:
|
|
469
|
+
failed = 1
|
|
470
|
+
|
|
471
|
+
# Jest/npm output parsing
|
|
472
|
+
elif self.runner_name in ("jest", "npm"):
|
|
473
|
+
for line in stdout.split("\n"):
|
|
474
|
+
line = line.strip()
|
|
475
|
+
if "✓" in line or "PASS" in line:
|
|
476
|
+
passed += 1
|
|
477
|
+
elif "✕" in line or "FAIL" in line:
|
|
478
|
+
failed += 1
|
|
479
|
+
elif "○" in line or "skipped" in line.lower():
|
|
480
|
+
skipped += 1
|
|
481
|
+
if passed == 0 and failed == 0:
|
|
482
|
+
if returncode == 0:
|
|
483
|
+
passed = 1
|
|
484
|
+
else:
|
|
485
|
+
failed = 1
|
|
486
|
+
|
|
487
|
+
# Generic fallback - just check return code
|
|
488
|
+
else:
|
|
489
|
+
if returncode == 0:
|
|
490
|
+
passed = 1
|
|
491
|
+
else:
|
|
492
|
+
failed = 1
|
|
493
|
+
|
|
494
|
+
return tests, passed, failed, skipped, errors
|
|
495
|
+
|
|
496
|
+
def parse_discover_output(self, stdout: str) -> tuple:
|
|
497
|
+
"""Parse generic discovery output."""
|
|
498
|
+
tests: List[DiscoveredTest] = []
|
|
499
|
+
test_files: set[str] = set()
|
|
500
|
+
|
|
501
|
+
for line in stdout.split("\n"):
|
|
502
|
+
line = line.strip()
|
|
503
|
+
if line and not line.startswith("#") and not line.startswith("="):
|
|
504
|
+
# Try to extract file path
|
|
505
|
+
if "/" in line or "\\" in line:
|
|
506
|
+
# Looks like a file path
|
|
507
|
+
file_path = line.split()[0] if " " in line else line
|
|
508
|
+
test_files.add(file_path)
|
|
509
|
+
tests.append(DiscoveredTest(name=line, file_path=file_path))
|
|
510
|
+
elif line:
|
|
511
|
+
tests.append(DiscoveredTest(name=line, file_path=""))
|
|
512
|
+
|
|
513
|
+
return tests, list(test_files)
|
|
514
|
+
|
|
515
|
+
@property
|
|
516
|
+
def default_timeout(self) -> int:
|
|
517
|
+
return self.timeout
|
|
518
|
+
|
|
519
|
+
@property
|
|
520
|
+
def not_found_error(self) -> str:
|
|
521
|
+
cmd_name = self.command[0] if self.command else "test runner"
|
|
522
|
+
return f"{cmd_name} not found. Ensure it is installed and in PATH."
|
|
523
|
+
|
|
524
|
+
|
|
525
|
+
def get_runner(
|
|
526
|
+
runner_name: Optional[str] = None,
|
|
527
|
+
test_config: Optional["TestConfig"] = None,
|
|
528
|
+
) -> BaseTestRunner:
|
|
529
|
+
"""Factory function to get the appropriate test runner.
|
|
530
|
+
|
|
531
|
+
Args:
|
|
532
|
+
runner_name: Name of the runner to use. If None, uses default_runner from config.
|
|
533
|
+
test_config: TestConfig from foundry-mcp.toml. If None, uses DEFAULT_RUNNERS.
|
|
534
|
+
|
|
535
|
+
Returns:
|
|
536
|
+
BaseTestRunner instance.
|
|
537
|
+
|
|
538
|
+
Raises:
|
|
539
|
+
ValueError: If the specified runner is not found.
|
|
540
|
+
"""
|
|
541
|
+
# Determine which runner to use
|
|
542
|
+
if runner_name is None:
|
|
543
|
+
if test_config is not None:
|
|
544
|
+
runner_name = test_config.default_runner
|
|
545
|
+
else:
|
|
546
|
+
runner_name = "pytest"
|
|
547
|
+
|
|
548
|
+
# Special case: pytest always uses the optimized PytestRunner
|
|
549
|
+
if runner_name == "pytest":
|
|
550
|
+
return PytestRunner()
|
|
551
|
+
|
|
552
|
+
# Check if runner is defined in test_config
|
|
553
|
+
if test_config is not None:
|
|
554
|
+
runner_cfg = test_config.get_runner(runner_name)
|
|
555
|
+
if runner_cfg is not None:
|
|
556
|
+
return GenericRunner.from_runner_config(runner_cfg, runner_name)
|
|
557
|
+
|
|
558
|
+
# Fall back to DEFAULT_RUNNERS
|
|
559
|
+
if runner_name in DEFAULT_RUNNERS:
|
|
560
|
+
return GenericRunner.from_default(runner_name)
|
|
561
|
+
|
|
562
|
+
# List available runners for error message
|
|
563
|
+
available = list(DEFAULT_RUNNERS.keys())
|
|
564
|
+
if test_config is not None:
|
|
565
|
+
available.extend(test_config.runners.keys())
|
|
566
|
+
available = sorted(set(available))
|
|
567
|
+
|
|
568
|
+
raise ValueError(
|
|
569
|
+
f"Unknown runner: {runner_name}. Available runners: {', '.join(available)}"
|
|
570
|
+
)
|
|
571
|
+
|
|
572
|
+
|
|
573
|
+
def get_available_runners(test_config: Optional["TestConfig"] = None) -> List[str]:
|
|
574
|
+
"""Get list of available runner names.
|
|
575
|
+
|
|
576
|
+
Args:
|
|
577
|
+
test_config: Optional TestConfig for custom runners.
|
|
578
|
+
|
|
579
|
+
Returns:
|
|
580
|
+
List of available runner names.
|
|
581
|
+
"""
|
|
582
|
+
runners = list(DEFAULT_RUNNERS.keys())
|
|
583
|
+
if test_config is not None:
|
|
584
|
+
runners.extend(test_config.runners.keys())
|
|
585
|
+
return sorted(set(runners))
|
|
586
|
+
|
|
587
|
+
|
|
588
|
+
# Main test runner
|
|
589
|
+
|
|
590
|
+
|
|
591
|
+
class TestRunner:
|
|
592
|
+
"""
|
|
593
|
+
Test runner that supports multiple backends (pytest, go, npm, etc.).
|
|
594
|
+
"""
|
|
595
|
+
|
|
596
|
+
def __init__(
|
|
597
|
+
self,
|
|
598
|
+
workspace: Optional[Path] = None,
|
|
599
|
+
runner: Optional[BaseTestRunner] = None,
|
|
600
|
+
):
|
|
601
|
+
"""
|
|
602
|
+
Initialize test runner.
|
|
603
|
+
|
|
604
|
+
Args:
|
|
605
|
+
workspace: Repository root (defaults to current directory)
|
|
606
|
+
runner: Test runner backend (defaults to PytestRunner)
|
|
607
|
+
"""
|
|
608
|
+
self.workspace = workspace or Path.cwd()
|
|
609
|
+
self._runner = runner or PytestRunner()
|
|
610
|
+
|
|
611
|
+
def run_tests(
|
|
612
|
+
self,
|
|
613
|
+
target: Optional[str] = None,
|
|
614
|
+
preset: Optional[str] = None,
|
|
615
|
+
timeout: Optional[int] = None,
|
|
616
|
+
verbose: bool = True,
|
|
617
|
+
fail_fast: bool = False,
|
|
618
|
+
markers: Optional[str] = None,
|
|
619
|
+
extra_args: Optional[List[str]] = None,
|
|
620
|
+
) -> TestRunResult:
|
|
621
|
+
"""
|
|
622
|
+
Run tests using the configured test runner backend.
|
|
623
|
+
|
|
624
|
+
Args:
|
|
625
|
+
target: Test target (file, directory, or test name)
|
|
626
|
+
preset: Use a preset configuration (quick, full, unit, integration, smoke)
|
|
627
|
+
timeout: Timeout in seconds (defaults to runner's default)
|
|
628
|
+
verbose: Enable verbose output
|
|
629
|
+
fail_fast: Stop on first failure
|
|
630
|
+
markers: Pytest markers expression (only applicable for pytest runner)
|
|
631
|
+
extra_args: Additional arguments passed to the runner
|
|
632
|
+
|
|
633
|
+
Returns:
|
|
634
|
+
TestRunResult with test outcomes
|
|
635
|
+
"""
|
|
636
|
+
# Apply preset if specified
|
|
637
|
+
if preset and preset in TEST_PRESETS:
|
|
638
|
+
preset_config = TEST_PRESETS[preset]
|
|
639
|
+
timeout = preset_config.get("timeout", timeout)
|
|
640
|
+
verbose = preset_config.get("verbose", verbose)
|
|
641
|
+
fail_fast = preset_config.get("fail_fast", fail_fast)
|
|
642
|
+
markers = preset_config.get("markers", markers)
|
|
643
|
+
|
|
644
|
+
# Use runner's default timeout if not specified
|
|
645
|
+
if timeout is None:
|
|
646
|
+
timeout = self._runner.default_timeout
|
|
647
|
+
|
|
648
|
+
# Build command using the runner backend
|
|
649
|
+
cmd = self._runner.build_run_command(
|
|
650
|
+
target=target,
|
|
651
|
+
verbose=verbose,
|
|
652
|
+
fail_fast=fail_fast,
|
|
653
|
+
extra_args=extra_args,
|
|
654
|
+
markers=markers,
|
|
655
|
+
)
|
|
656
|
+
|
|
657
|
+
command_str = " ".join(cmd)
|
|
658
|
+
|
|
659
|
+
try:
|
|
660
|
+
result = subprocess.run(
|
|
661
|
+
cmd,
|
|
662
|
+
cwd=str(self.workspace),
|
|
663
|
+
capture_output=True,
|
|
664
|
+
text=True,
|
|
665
|
+
timeout=timeout,
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
# Parse output using the runner backend
|
|
669
|
+
tests, passed, failed, skipped, errors = self._runner.parse_run_output(
|
|
670
|
+
result.stdout, result.stderr, result.returncode
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
return TestRunResult(
|
|
674
|
+
success=result.returncode == 0,
|
|
675
|
+
duration=0.0, # Would need timing wrapper
|
|
676
|
+
total=len(tests) if tests else max(passed + failed + skipped + errors, 1),
|
|
677
|
+
passed=passed,
|
|
678
|
+
failed=failed,
|
|
679
|
+
skipped=skipped,
|
|
680
|
+
errors=errors,
|
|
681
|
+
tests=tests,
|
|
682
|
+
command=command_str,
|
|
683
|
+
cwd=str(self.workspace),
|
|
684
|
+
stdout=result.stdout,
|
|
685
|
+
stderr=result.stderr,
|
|
686
|
+
metadata={
|
|
687
|
+
"return_code": result.returncode,
|
|
688
|
+
"preset": preset,
|
|
689
|
+
"target": target,
|
|
690
|
+
"runner": type(self._runner).__name__,
|
|
691
|
+
},
|
|
692
|
+
)
|
|
693
|
+
|
|
694
|
+
except subprocess.TimeoutExpired:
|
|
695
|
+
return TestRunResult(
|
|
696
|
+
success=False,
|
|
697
|
+
command=command_str,
|
|
698
|
+
cwd=str(self.workspace),
|
|
699
|
+
error=f"Test run timed out after {timeout} seconds",
|
|
700
|
+
metadata={"timeout": timeout},
|
|
701
|
+
)
|
|
702
|
+
|
|
703
|
+
except FileNotFoundError:
|
|
704
|
+
return TestRunResult(
|
|
705
|
+
success=False,
|
|
706
|
+
command=command_str,
|
|
707
|
+
cwd=str(self.workspace),
|
|
708
|
+
error=self._runner.not_found_error,
|
|
709
|
+
)
|
|
710
|
+
|
|
711
|
+
except Exception as e:
|
|
712
|
+
return TestRunResult(
|
|
713
|
+
success=False,
|
|
714
|
+
command=command_str,
|
|
715
|
+
cwd=str(self.workspace),
|
|
716
|
+
error=str(e),
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
def discover_tests(
|
|
720
|
+
self,
|
|
721
|
+
target: Optional[str] = None,
|
|
722
|
+
pattern: str = "test_*.py",
|
|
723
|
+
) -> TestDiscoveryResult:
|
|
724
|
+
"""
|
|
725
|
+
Discover tests without running them.
|
|
726
|
+
|
|
727
|
+
Args:
|
|
728
|
+
target: Directory or file to search
|
|
729
|
+
pattern: File pattern for test files
|
|
730
|
+
|
|
731
|
+
Returns:
|
|
732
|
+
TestDiscoveryResult with discovered tests
|
|
733
|
+
"""
|
|
734
|
+
cmd = self._runner.build_discover_command(target=target, pattern=pattern)
|
|
735
|
+
|
|
736
|
+
try:
|
|
737
|
+
result = subprocess.run(
|
|
738
|
+
cmd,
|
|
739
|
+
cwd=str(self.workspace),
|
|
740
|
+
capture_output=True,
|
|
741
|
+
text=True,
|
|
742
|
+
timeout=60,
|
|
743
|
+
)
|
|
744
|
+
|
|
745
|
+
tests, test_files = self._runner.parse_discover_output(result.stdout)
|
|
746
|
+
|
|
747
|
+
return TestDiscoveryResult(
|
|
748
|
+
success=result.returncode == 0,
|
|
749
|
+
tests=tests,
|
|
750
|
+
test_files=test_files,
|
|
751
|
+
metadata={
|
|
752
|
+
"target": target,
|
|
753
|
+
"pattern": pattern,
|
|
754
|
+
"runner": type(self._runner).__name__,
|
|
755
|
+
},
|
|
756
|
+
)
|
|
757
|
+
|
|
758
|
+
except subprocess.TimeoutExpired:
|
|
759
|
+
return TestDiscoveryResult(
|
|
760
|
+
success=False,
|
|
761
|
+
error="Test discovery timed out",
|
|
762
|
+
)
|
|
763
|
+
|
|
764
|
+
except FileNotFoundError:
|
|
765
|
+
return TestDiscoveryResult(
|
|
766
|
+
success=False,
|
|
767
|
+
error=self._runner.not_found_error,
|
|
768
|
+
)
|
|
769
|
+
|
|
770
|
+
except Exception as e:
|
|
771
|
+
return TestDiscoveryResult(
|
|
772
|
+
success=False,
|
|
773
|
+
error=str(e),
|
|
774
|
+
)
|
|
775
|
+
|
|
776
|
+
|
|
777
|
+
# Convenience functions
|
|
778
|
+
|
|
779
|
+
|
|
780
|
+
def run_tests(
|
|
781
|
+
target: Optional[str] = None,
|
|
782
|
+
preset: Optional[str] = None,
|
|
783
|
+
workspace: Optional[Path] = None,
|
|
784
|
+
runner_name: Optional[str] = None,
|
|
785
|
+
test_config: Optional["TestConfig"] = None,
|
|
786
|
+
**kwargs: Any,
|
|
787
|
+
) -> TestRunResult:
|
|
788
|
+
"""
|
|
789
|
+
Run tests using the specified runner.
|
|
790
|
+
|
|
791
|
+
Args:
|
|
792
|
+
target: Test target
|
|
793
|
+
preset: Preset configuration
|
|
794
|
+
workspace: Repository root
|
|
795
|
+
runner_name: Name of the runner to use (pytest, go, npm, etc.)
|
|
796
|
+
test_config: TestConfig from foundry-mcp.toml
|
|
797
|
+
**kwargs: Additional arguments for TestRunner.run_tests
|
|
798
|
+
|
|
799
|
+
Returns:
|
|
800
|
+
TestRunResult with test outcomes
|
|
801
|
+
"""
|
|
802
|
+
runner_backend = get_runner(runner_name, test_config)
|
|
803
|
+
runner = TestRunner(workspace, runner=runner_backend)
|
|
804
|
+
return runner.run_tests(target, preset, **kwargs)
|
|
805
|
+
|
|
806
|
+
|
|
807
|
+
def discover_tests(
|
|
808
|
+
target: Optional[str] = None,
|
|
809
|
+
workspace: Optional[Path] = None,
|
|
810
|
+
pattern: str = "test_*.py",
|
|
811
|
+
runner_name: Optional[str] = None,
|
|
812
|
+
test_config: Optional["TestConfig"] = None,
|
|
813
|
+
) -> TestDiscoveryResult:
|
|
814
|
+
"""
|
|
815
|
+
Discover tests without running them.
|
|
816
|
+
|
|
817
|
+
Args:
|
|
818
|
+
target: Directory or file to search
|
|
819
|
+
workspace: Repository root
|
|
820
|
+
pattern: File pattern
|
|
821
|
+
runner_name: Name of the runner to use (pytest, go, npm, etc.)
|
|
822
|
+
test_config: TestConfig from foundry-mcp.toml
|
|
823
|
+
|
|
824
|
+
Returns:
|
|
825
|
+
TestDiscoveryResult with discovered tests
|
|
826
|
+
"""
|
|
827
|
+
runner_backend = get_runner(runner_name, test_config)
|
|
828
|
+
runner = TestRunner(workspace, runner=runner_backend)
|
|
829
|
+
return runner.discover_tests(target, pattern)
|
|
830
|
+
|
|
831
|
+
|
|
832
|
+
def get_presets() -> Dict[str, Dict[str, Any]]:
|
|
833
|
+
"""
|
|
834
|
+
Get available test presets.
|
|
835
|
+
|
|
836
|
+
Returns:
|
|
837
|
+
Dict of preset names to configurations
|
|
838
|
+
"""
|
|
839
|
+
return TEST_PRESETS.copy()
|