foundry-mcp 0.3.3__py3-none-any.whl → 0.8.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (85) hide show
  1. foundry_mcp/__init__.py +7 -1
  2. foundry_mcp/cli/__init__.py +0 -13
  3. foundry_mcp/cli/commands/plan.py +10 -3
  4. foundry_mcp/cli/commands/review.py +19 -4
  5. foundry_mcp/cli/commands/session.py +1 -8
  6. foundry_mcp/cli/commands/specs.py +38 -208
  7. foundry_mcp/cli/context.py +39 -0
  8. foundry_mcp/cli/output.py +3 -3
  9. foundry_mcp/config.py +615 -11
  10. foundry_mcp/core/ai_consultation.py +146 -9
  11. foundry_mcp/core/batch_operations.py +1196 -0
  12. foundry_mcp/core/discovery.py +7 -7
  13. foundry_mcp/core/error_store.py +2 -2
  14. foundry_mcp/core/intake.py +933 -0
  15. foundry_mcp/core/llm_config.py +28 -2
  16. foundry_mcp/core/metrics_store.py +2 -2
  17. foundry_mcp/core/naming.py +25 -2
  18. foundry_mcp/core/progress.py +70 -0
  19. foundry_mcp/core/prometheus.py +0 -13
  20. foundry_mcp/core/prompts/fidelity_review.py +149 -4
  21. foundry_mcp/core/prompts/markdown_plan_review.py +5 -1
  22. foundry_mcp/core/prompts/plan_review.py +5 -1
  23. foundry_mcp/core/providers/__init__.py +12 -0
  24. foundry_mcp/core/providers/base.py +39 -0
  25. foundry_mcp/core/providers/claude.py +51 -48
  26. foundry_mcp/core/providers/codex.py +70 -60
  27. foundry_mcp/core/providers/cursor_agent.py +25 -47
  28. foundry_mcp/core/providers/detectors.py +34 -7
  29. foundry_mcp/core/providers/gemini.py +69 -58
  30. foundry_mcp/core/providers/opencode.py +101 -47
  31. foundry_mcp/core/providers/package-lock.json +4 -4
  32. foundry_mcp/core/providers/package.json +1 -1
  33. foundry_mcp/core/providers/validation.py +128 -0
  34. foundry_mcp/core/research/__init__.py +68 -0
  35. foundry_mcp/core/research/memory.py +528 -0
  36. foundry_mcp/core/research/models.py +1220 -0
  37. foundry_mcp/core/research/providers/__init__.py +40 -0
  38. foundry_mcp/core/research/providers/base.py +242 -0
  39. foundry_mcp/core/research/providers/google.py +507 -0
  40. foundry_mcp/core/research/providers/perplexity.py +442 -0
  41. foundry_mcp/core/research/providers/semantic_scholar.py +544 -0
  42. foundry_mcp/core/research/providers/tavily.py +383 -0
  43. foundry_mcp/core/research/workflows/__init__.py +25 -0
  44. foundry_mcp/core/research/workflows/base.py +298 -0
  45. foundry_mcp/core/research/workflows/chat.py +271 -0
  46. foundry_mcp/core/research/workflows/consensus.py +539 -0
  47. foundry_mcp/core/research/workflows/deep_research.py +4020 -0
  48. foundry_mcp/core/research/workflows/ideate.py +682 -0
  49. foundry_mcp/core/research/workflows/thinkdeep.py +405 -0
  50. foundry_mcp/core/responses.py +690 -0
  51. foundry_mcp/core/spec.py +2439 -236
  52. foundry_mcp/core/task.py +1205 -31
  53. foundry_mcp/core/testing.py +512 -123
  54. foundry_mcp/core/validation.py +319 -43
  55. foundry_mcp/dashboard/components/charts.py +0 -57
  56. foundry_mcp/dashboard/launcher.py +11 -0
  57. foundry_mcp/dashboard/views/metrics.py +25 -35
  58. foundry_mcp/dashboard/views/overview.py +1 -65
  59. foundry_mcp/resources/specs.py +25 -25
  60. foundry_mcp/schemas/intake-schema.json +89 -0
  61. foundry_mcp/schemas/sdd-spec-schema.json +33 -5
  62. foundry_mcp/server.py +0 -14
  63. foundry_mcp/tools/unified/__init__.py +39 -18
  64. foundry_mcp/tools/unified/authoring.py +2371 -248
  65. foundry_mcp/tools/unified/documentation_helpers.py +69 -6
  66. foundry_mcp/tools/unified/environment.py +434 -32
  67. foundry_mcp/tools/unified/error.py +18 -1
  68. foundry_mcp/tools/unified/lifecycle.py +8 -0
  69. foundry_mcp/tools/unified/plan.py +133 -2
  70. foundry_mcp/tools/unified/provider.py +0 -40
  71. foundry_mcp/tools/unified/research.py +1283 -0
  72. foundry_mcp/tools/unified/review.py +374 -17
  73. foundry_mcp/tools/unified/review_helpers.py +16 -1
  74. foundry_mcp/tools/unified/server.py +9 -24
  75. foundry_mcp/tools/unified/spec.py +367 -0
  76. foundry_mcp/tools/unified/task.py +1664 -30
  77. foundry_mcp/tools/unified/test.py +69 -8
  78. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/METADATA +8 -1
  79. foundry_mcp-0.8.10.dist-info/RECORD +153 -0
  80. foundry_mcp/cli/flags.py +0 -266
  81. foundry_mcp/core/feature_flags.py +0 -592
  82. foundry_mcp-0.3.3.dist-info/RECORD +0 -135
  83. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/WHEEL +0 -0
  84. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/entry_points.txt +0 -0
  85. {foundry_mcp-0.3.3.dist-info → foundry_mcp-0.8.10.dist-info}/licenses/LICENSE +0 -0
@@ -1,20 +1,67 @@
1
1
  """
2
2
  Testing operations for foundry-mcp.
3
3
  Provides functions for running tests and test discovery.
4
+
5
+ Supports multiple test runners (pytest, go, npm, jest, etc.) via configuration.
4
6
  """
5
7
 
8
+ import re
6
9
  import subprocess
7
10
  import uuid
11
+ from abc import ABC, abstractmethod
8
12
  from dataclasses import dataclass, field
9
13
  from datetime import datetime, timezone
10
14
  from pathlib import Path
11
- from typing import Any, Dict, List, Optional
15
+ from typing import Any, Dict, List, Optional, TYPE_CHECKING
16
+
17
+ if TYPE_CHECKING:
18
+ from foundry_mcp.config import RunnerConfig, TestConfig
12
19
 
13
20
 
14
21
  # Schema version for compatibility tracking
15
22
  SCHEMA_VERSION = "1.0.0"
16
23
 
17
24
 
25
+ # Default runner configurations (used when no TOML config provided)
26
+ DEFAULT_RUNNERS: Dict[str, Dict[str, Any]] = {
27
+ "pytest": {
28
+ "command": ["python", "-m", "pytest"],
29
+ "run_args": ["-v", "--tb=short"],
30
+ "discover_args": ["--collect-only", "-q"],
31
+ "pattern": "test_*.py",
32
+ "timeout": 300,
33
+ },
34
+ "go": {
35
+ "command": ["go", "test"],
36
+ "run_args": ["-v"],
37
+ "discover_args": ["-list", ".*"],
38
+ "pattern": "*_test.go",
39
+ "timeout": 300,
40
+ },
41
+ "npm": {
42
+ "command": ["npm", "test"],
43
+ "run_args": ["--"],
44
+ "discover_args": [],
45
+ "pattern": "*.test.js",
46
+ "timeout": 300,
47
+ },
48
+ "jest": {
49
+ "command": ["npx", "jest"],
50
+ "run_args": ["--verbose"],
51
+ "discover_args": ["--listTests"],
52
+ "pattern": "*.test.{js,ts,jsx,tsx}",
53
+ "timeout": 300,
54
+ },
55
+ "make": {
56
+ "command": ["make", "test"],
57
+ "run_args": [],
58
+ "discover_args": [],
59
+ "pattern": "*",
60
+ "timeout": 300,
61
+ },
62
+ }
63
+
64
+
18
65
  # Presets for common test configurations
19
66
  TEST_PRESETS = {
20
67
  "quick": {
@@ -130,43 +177,458 @@ class TestDiscoveryResult:
130
177
  self.total = len(self.tests)
131
178
 
132
179
 
180
+ # Abstract test runner interface
181
+
182
+
183
+ class BaseTestRunner(ABC):
184
+ """Abstract base class for test runners."""
185
+
186
+ @abstractmethod
187
+ def build_run_command(
188
+ self,
189
+ target: Optional[str] = None,
190
+ verbose: bool = True,
191
+ fail_fast: bool = False,
192
+ extra_args: Optional[List[str]] = None,
193
+ **kwargs: Any,
194
+ ) -> List[str]:
195
+ """Build the command to run tests."""
196
+ pass
197
+
198
+ @abstractmethod
199
+ def build_discover_command(
200
+ self,
201
+ target: Optional[str] = None,
202
+ pattern: str = "*",
203
+ ) -> List[str]:
204
+ """Build the command to discover tests."""
205
+ pass
206
+
207
+ @abstractmethod
208
+ def parse_run_output(
209
+ self,
210
+ stdout: str,
211
+ stderr: str,
212
+ returncode: int,
213
+ ) -> tuple:
214
+ """Parse test run output. Returns (tests, passed, failed, skipped, errors)."""
215
+ pass
216
+
217
+ @abstractmethod
218
+ def parse_discover_output(self, stdout: str) -> tuple:
219
+ """Parse test discovery output. Returns (tests, test_files)."""
220
+ pass
221
+
222
+ @property
223
+ @abstractmethod
224
+ def default_timeout(self) -> int:
225
+ """Default timeout in seconds."""
226
+ pass
227
+
228
+ @property
229
+ @abstractmethod
230
+ def not_found_error(self) -> str:
231
+ """Error message when the runner is not found."""
232
+ pass
233
+
234
+
235
+ class PytestRunner(BaseTestRunner):
236
+ """Test runner for pytest-based projects."""
237
+
238
+ def build_run_command(
239
+ self,
240
+ target: Optional[str] = None,
241
+ verbose: bool = True,
242
+ fail_fast: bool = False,
243
+ extra_args: Optional[List[str]] = None,
244
+ markers: Optional[str] = None,
245
+ **kwargs: Any,
246
+ ) -> List[str]:
247
+ cmd = ["python", "-m", "pytest"]
248
+
249
+ if target:
250
+ cmd.append(target)
251
+
252
+ if verbose:
253
+ cmd.append("-v")
254
+
255
+ if fail_fast:
256
+ cmd.append("-x")
257
+
258
+ if markers:
259
+ cmd.extend(["-m", markers])
260
+
261
+ cmd.append("--tb=short")
262
+
263
+ if extra_args:
264
+ cmd.extend(extra_args)
265
+
266
+ return cmd
267
+
268
+ def build_discover_command(
269
+ self,
270
+ target: Optional[str] = None,
271
+ pattern: str = "test_*.py",
272
+ ) -> List[str]:
273
+ cmd = ["python", "-m", "pytest", "--collect-only", "-q"]
274
+ if target:
275
+ cmd.append(target)
276
+ return cmd
277
+
278
+ def parse_run_output(
279
+ self,
280
+ stdout: str,
281
+ stderr: str,
282
+ returncode: int,
283
+ ) -> tuple:
284
+ """Parse pytest output to extract test results."""
285
+ tests = []
286
+ passed = 0
287
+ failed = 0
288
+ skipped = 0
289
+ errors = 0
290
+
291
+ lines = stdout.split("\n")
292
+
293
+ for line in lines:
294
+ line = line.strip()
295
+
296
+ # Parse individual test results
297
+ if "::" in line:
298
+ if " PASSED" in line:
299
+ name = line.split(" PASSED")[0].strip()
300
+ tests.append(TestResult(name=name, outcome="passed"))
301
+ passed += 1
302
+ elif " FAILED" in line:
303
+ name = line.split(" FAILED")[0].strip()
304
+ tests.append(TestResult(name=name, outcome="failed"))
305
+ failed += 1
306
+ elif " SKIPPED" in line:
307
+ name = line.split(" SKIPPED")[0].strip()
308
+ tests.append(TestResult(name=name, outcome="skipped"))
309
+ skipped += 1
310
+ elif " ERROR" in line:
311
+ name = line.split(" ERROR")[0].strip()
312
+ tests.append(TestResult(name=name, outcome="error"))
313
+ errors += 1
314
+
315
+ # Parse summary line
316
+ if "passed" in line.lower() and (
317
+ "failed" in line.lower()
318
+ or "error" in line.lower()
319
+ or "skipped" in line.lower()
320
+ ):
321
+ passed_match = re.search(r"(\d+) passed", line)
322
+ failed_match = re.search(r"(\d+) failed", line)
323
+ skipped_match = re.search(r"(\d+) skipped", line)
324
+ error_match = re.search(r"(\d+) error", line)
325
+
326
+ if passed_match:
327
+ passed = int(passed_match.group(1))
328
+ if failed_match:
329
+ failed = int(failed_match.group(1))
330
+ if skipped_match:
331
+ skipped = int(skipped_match.group(1))
332
+ if error_match:
333
+ errors = int(error_match.group(1))
334
+
335
+ return tests, passed, failed, skipped, errors
336
+
337
+ def parse_discover_output(self, stdout: str) -> tuple:
338
+ """Parse pytest --collect-only output."""
339
+ tests = []
340
+ test_files: set[str] = set()
341
+
342
+ for line in stdout.split("\n"):
343
+ line = line.strip()
344
+ if "::" in line and not line.startswith("="):
345
+ parts = line.split("::")
346
+ if parts:
347
+ file_path = parts[0]
348
+ test_files.add(file_path)
349
+ tests.append(DiscoveredTest(name=line, file_path=file_path))
350
+
351
+ return tests, list(test_files)
352
+
353
+ @property
354
+ def default_timeout(self) -> int:
355
+ return 300
356
+
357
+ @property
358
+ def not_found_error(self) -> str:
359
+ return "pytest not found. Install with: pip install pytest"
360
+
361
+
362
+ class GenericRunner(BaseTestRunner):
363
+ """Generic test runner that uses RunnerConfig from TOML configuration."""
364
+
365
+ def __init__(
366
+ self,
367
+ command: List[str],
368
+ run_args: Optional[List[str]] = None,
369
+ discover_args: Optional[List[str]] = None,
370
+ pattern: str = "*",
371
+ timeout: int = 300,
372
+ runner_name: str = "generic",
373
+ ):
374
+ self.command = command
375
+ self.run_args = run_args or []
376
+ self.discover_args = discover_args or []
377
+ self.pattern = pattern
378
+ self.timeout = timeout
379
+ self.runner_name = runner_name
380
+
381
+ @classmethod
382
+ def from_runner_config(
383
+ cls, config: "RunnerConfig", runner_name: str = "generic"
384
+ ) -> "GenericRunner":
385
+ """Create a GenericRunner from a RunnerConfig object."""
386
+ return cls(
387
+ command=list(config.command),
388
+ run_args=list(config.run_args),
389
+ discover_args=list(config.discover_args),
390
+ pattern=config.pattern,
391
+ timeout=config.timeout,
392
+ runner_name=runner_name,
393
+ )
394
+
395
+ @classmethod
396
+ def from_default(cls, runner_name: str) -> "GenericRunner":
397
+ """Create a GenericRunner from DEFAULT_RUNNERS."""
398
+ if runner_name not in DEFAULT_RUNNERS:
399
+ raise ValueError(f"Unknown default runner: {runner_name}")
400
+ cfg = DEFAULT_RUNNERS[runner_name]
401
+ return cls(
402
+ command=list(cfg["command"]),
403
+ run_args=list(cfg.get("run_args", [])),
404
+ discover_args=list(cfg.get("discover_args", [])),
405
+ pattern=cfg.get("pattern", "*"),
406
+ timeout=cfg.get("timeout", 300),
407
+ runner_name=runner_name,
408
+ )
409
+
410
+ def build_run_command(
411
+ self,
412
+ target: Optional[str] = None,
413
+ verbose: bool = True,
414
+ fail_fast: bool = False,
415
+ extra_args: Optional[List[str]] = None,
416
+ **kwargs: Any,
417
+ ) -> List[str]:
418
+ cmd = list(self.command) + list(self.run_args)
419
+ if target:
420
+ cmd.append(target)
421
+ if extra_args:
422
+ cmd.extend(extra_args)
423
+ return cmd
424
+
425
+ def build_discover_command(
426
+ self,
427
+ target: Optional[str] = None,
428
+ pattern: str = "*",
429
+ ) -> List[str]:
430
+ cmd = list(self.command) + list(self.discover_args)
431
+ if target:
432
+ cmd.append(target)
433
+ return cmd
434
+
435
+ def parse_run_output(
436
+ self,
437
+ stdout: str,
438
+ stderr: str,
439
+ returncode: int,
440
+ ) -> tuple:
441
+ """Parse generic test output - basic heuristics."""
442
+ tests: List[TestResult] = []
443
+ passed = 0
444
+ failed = 0
445
+ skipped = 0
446
+ errors = 0
447
+
448
+ # Go test output parsing
449
+ if self.runner_name == "go":
450
+ for line in stdout.split("\n"):
451
+ line = line.strip()
452
+ if line.startswith("--- PASS:"):
453
+ name = line.split("--- PASS:")[1].split()[0]
454
+ tests.append(TestResult(name=name, outcome="passed"))
455
+ passed += 1
456
+ elif line.startswith("--- FAIL:"):
457
+ name = line.split("--- FAIL:")[1].split()[0]
458
+ tests.append(TestResult(name=name, outcome="failed"))
459
+ failed += 1
460
+ elif line.startswith("--- SKIP:"):
461
+ name = line.split("--- SKIP:")[1].split()[0]
462
+ tests.append(TestResult(name=name, outcome="skipped"))
463
+ skipped += 1
464
+ # If no individual tests parsed, check return code
465
+ if not tests:
466
+ if returncode == 0:
467
+ passed = 1
468
+ else:
469
+ failed = 1
470
+
471
+ # Jest/npm output parsing
472
+ elif self.runner_name in ("jest", "npm"):
473
+ for line in stdout.split("\n"):
474
+ line = line.strip()
475
+ if "✓" in line or "PASS" in line:
476
+ passed += 1
477
+ elif "✕" in line or "FAIL" in line:
478
+ failed += 1
479
+ elif "○" in line or "skipped" in line.lower():
480
+ skipped += 1
481
+ if passed == 0 and failed == 0:
482
+ if returncode == 0:
483
+ passed = 1
484
+ else:
485
+ failed = 1
486
+
487
+ # Generic fallback - just check return code
488
+ else:
489
+ if returncode == 0:
490
+ passed = 1
491
+ else:
492
+ failed = 1
493
+
494
+ return tests, passed, failed, skipped, errors
495
+
496
+ def parse_discover_output(self, stdout: str) -> tuple:
497
+ """Parse generic discovery output."""
498
+ tests: List[DiscoveredTest] = []
499
+ test_files: set[str] = set()
500
+
501
+ for line in stdout.split("\n"):
502
+ line = line.strip()
503
+ if line and not line.startswith("#") and not line.startswith("="):
504
+ # Try to extract file path
505
+ if "/" in line or "\\" in line:
506
+ # Looks like a file path
507
+ file_path = line.split()[0] if " " in line else line
508
+ test_files.add(file_path)
509
+ tests.append(DiscoveredTest(name=line, file_path=file_path))
510
+ elif line:
511
+ tests.append(DiscoveredTest(name=line, file_path=""))
512
+
513
+ return tests, list(test_files)
514
+
515
+ @property
516
+ def default_timeout(self) -> int:
517
+ return self.timeout
518
+
519
+ @property
520
+ def not_found_error(self) -> str:
521
+ cmd_name = self.command[0] if self.command else "test runner"
522
+ return f"{cmd_name} not found. Ensure it is installed and in PATH."
523
+
524
+
525
+ def get_runner(
526
+ runner_name: Optional[str] = None,
527
+ test_config: Optional["TestConfig"] = None,
528
+ ) -> BaseTestRunner:
529
+ """Factory function to get the appropriate test runner.
530
+
531
+ Args:
532
+ runner_name: Name of the runner to use. If None, uses default_runner from config.
533
+ test_config: TestConfig from foundry-mcp.toml. If None, uses DEFAULT_RUNNERS.
534
+
535
+ Returns:
536
+ BaseTestRunner instance.
537
+
538
+ Raises:
539
+ ValueError: If the specified runner is not found.
540
+ """
541
+ # Determine which runner to use
542
+ if runner_name is None:
543
+ if test_config is not None:
544
+ runner_name = test_config.default_runner
545
+ else:
546
+ runner_name = "pytest"
547
+
548
+ # Special case: pytest always uses the optimized PytestRunner
549
+ if runner_name == "pytest":
550
+ return PytestRunner()
551
+
552
+ # Check if runner is defined in test_config
553
+ if test_config is not None:
554
+ runner_cfg = test_config.get_runner(runner_name)
555
+ if runner_cfg is not None:
556
+ return GenericRunner.from_runner_config(runner_cfg, runner_name)
557
+
558
+ # Fall back to DEFAULT_RUNNERS
559
+ if runner_name in DEFAULT_RUNNERS:
560
+ return GenericRunner.from_default(runner_name)
561
+
562
+ # List available runners for error message
563
+ available = list(DEFAULT_RUNNERS.keys())
564
+ if test_config is not None:
565
+ available.extend(test_config.runners.keys())
566
+ available = sorted(set(available))
567
+
568
+ raise ValueError(
569
+ f"Unknown runner: {runner_name}. Available runners: {', '.join(available)}"
570
+ )
571
+
572
+
573
+ def get_available_runners(test_config: Optional["TestConfig"] = None) -> List[str]:
574
+ """Get list of available runner names.
575
+
576
+ Args:
577
+ test_config: Optional TestConfig for custom runners.
578
+
579
+ Returns:
580
+ List of available runner names.
581
+ """
582
+ runners = list(DEFAULT_RUNNERS.keys())
583
+ if test_config is not None:
584
+ runners.extend(test_config.runners.keys())
585
+ return sorted(set(runners))
586
+
587
+
133
588
  # Main test runner
134
589
 
590
+
135
591
  class TestRunner:
136
592
  """
137
- Test runner for pytest-based projects.
593
+ Test runner that supports multiple backends (pytest, go, npm, etc.).
138
594
  """
139
595
 
140
- def __init__(self, workspace: Optional[Path] = None):
596
+ def __init__(
597
+ self,
598
+ workspace: Optional[Path] = None,
599
+ runner: Optional[BaseTestRunner] = None,
600
+ ):
141
601
  """
142
602
  Initialize test runner.
143
603
 
144
604
  Args:
145
605
  workspace: Repository root (defaults to current directory)
606
+ runner: Test runner backend (defaults to PytestRunner)
146
607
  """
147
608
  self.workspace = workspace or Path.cwd()
609
+ self._runner = runner or PytestRunner()
148
610
 
149
611
  def run_tests(
150
612
  self,
151
613
  target: Optional[str] = None,
152
614
  preset: Optional[str] = None,
153
- timeout: int = 300,
615
+ timeout: Optional[int] = None,
154
616
  verbose: bool = True,
155
617
  fail_fast: bool = False,
156
618
  markers: Optional[str] = None,
157
619
  extra_args: Optional[List[str]] = None,
158
620
  ) -> TestRunResult:
159
621
  """
160
- Run tests using pytest.
622
+ Run tests using the configured test runner backend.
161
623
 
162
624
  Args:
163
625
  target: Test target (file, directory, or test name)
164
626
  preset: Use a preset configuration (quick, full, unit, integration, smoke)
165
- timeout: Timeout in seconds
627
+ timeout: Timeout in seconds (defaults to runner's default)
166
628
  verbose: Enable verbose output
167
629
  fail_fast: Stop on first failure
168
- markers: Pytest markers expression
169
- extra_args: Additional pytest arguments
630
+ markers: Pytest markers expression (only applicable for pytest runner)
631
+ extra_args: Additional arguments passed to the runner
170
632
 
171
633
  Returns:
172
634
  TestRunResult with test outcomes
@@ -179,26 +641,18 @@ class TestRunner:
179
641
  fail_fast = preset_config.get("fail_fast", fail_fast)
180
642
  markers = preset_config.get("markers", markers)
181
643
 
182
- # Build command
183
- cmd = ["python", "-m", "pytest"]
644
+ # Use runner's default timeout if not specified
645
+ if timeout is None:
646
+ timeout = self._runner.default_timeout
184
647
 
185
- if target:
186
- cmd.append(target)
187
-
188
- if verbose:
189
- cmd.append("-v")
190
-
191
- if fail_fast:
192
- cmd.append("-x")
193
-
194
- if markers:
195
- cmd.extend(["-m", markers])
196
-
197
- # Add JSON output for parsing
198
- cmd.append("--tb=short")
199
-
200
- if extra_args:
201
- cmd.extend(extra_args)
648
+ # Build command using the runner backend
649
+ cmd = self._runner.build_run_command(
650
+ target=target,
651
+ verbose=verbose,
652
+ fail_fast=fail_fast,
653
+ extra_args=extra_args,
654
+ markers=markers,
655
+ )
202
656
 
203
657
  command_str = " ".join(cmd)
204
658
 
@@ -211,13 +665,15 @@ class TestRunner:
211
665
  timeout=timeout,
212
666
  )
213
667
 
214
- # Parse output
215
- tests, passed, failed, skipped, errors = self._parse_pytest_output(result.stdout)
668
+ # Parse output using the runner backend
669
+ tests, passed, failed, skipped, errors = self._runner.parse_run_output(
670
+ result.stdout, result.stderr, result.returncode
671
+ )
216
672
 
217
673
  return TestRunResult(
218
674
  success=result.returncode == 0,
219
675
  duration=0.0, # Would need timing wrapper
220
- total=len(tests),
676
+ total=len(tests) if tests else max(passed + failed + skipped + errors, 1),
221
677
  passed=passed,
222
678
  failed=failed,
223
679
  skipped=skipped,
@@ -231,6 +687,7 @@ class TestRunner:
231
687
  "return_code": result.returncode,
232
688
  "preset": preset,
233
689
  "target": target,
690
+ "runner": type(self._runner).__name__,
234
691
  },
235
692
  )
236
693
 
@@ -248,7 +705,7 @@ class TestRunner:
248
705
  success=False,
249
706
  command=command_str,
250
707
  cwd=str(self.workspace),
251
- error="pytest not found. Install with: pip install pytest",
708
+ error=self._runner.not_found_error,
252
709
  )
253
710
 
254
711
  except Exception as e:
@@ -259,63 +716,6 @@ class TestRunner:
259
716
  error=str(e),
260
717
  )
261
718
 
262
- def _parse_pytest_output(self, output: str) -> tuple:
263
- """
264
- Parse pytest output to extract test results.
265
-
266
- Returns:
267
- Tuple of (tests, passed, failed, skipped, errors)
268
- """
269
- tests = []
270
- passed = 0
271
- failed = 0
272
- skipped = 0
273
- errors = 0
274
-
275
- lines = output.split("\n")
276
-
277
- for line in lines:
278
- line = line.strip()
279
-
280
- # Parse individual test results
281
- if "::" in line:
282
- if " PASSED" in line:
283
- name = line.split(" PASSED")[0].strip()
284
- tests.append(TestResult(name=name, outcome="passed"))
285
- passed += 1
286
- elif " FAILED" in line:
287
- name = line.split(" FAILED")[0].strip()
288
- tests.append(TestResult(name=name, outcome="failed"))
289
- failed += 1
290
- elif " SKIPPED" in line:
291
- name = line.split(" SKIPPED")[0].strip()
292
- tests.append(TestResult(name=name, outcome="skipped"))
293
- skipped += 1
294
- elif " ERROR" in line:
295
- name = line.split(" ERROR")[0].strip()
296
- tests.append(TestResult(name=name, outcome="error"))
297
- errors += 1
298
-
299
- # Parse summary line
300
- if "passed" in line.lower() and ("failed" in line.lower() or "error" in line.lower() or "skipped" in line.lower()):
301
- # Try to extract counts from summary like "5 passed, 2 failed"
302
- import re
303
- passed_match = re.search(r"(\d+) passed", line)
304
- failed_match = re.search(r"(\d+) failed", line)
305
- skipped_match = re.search(r"(\d+) skipped", line)
306
- error_match = re.search(r"(\d+) error", line)
307
-
308
- if passed_match:
309
- passed = int(passed_match.group(1))
310
- if failed_match:
311
- failed = int(failed_match.group(1))
312
- if skipped_match:
313
- skipped = int(skipped_match.group(1))
314
- if error_match:
315
- errors = int(error_match.group(1))
316
-
317
- return tests, passed, failed, skipped, errors
318
-
319
719
  def discover_tests(
320
720
  self,
321
721
  target: Optional[str] = None,
@@ -331,10 +731,7 @@ class TestRunner:
331
731
  Returns:
332
732
  TestDiscoveryResult with discovered tests
333
733
  """
334
- cmd = ["python", "-m", "pytest", "--collect-only", "-q"]
335
-
336
- if target:
337
- cmd.append(target)
734
+ cmd = self._runner.build_discover_command(target=target, pattern=pattern)
338
735
 
339
736
  try:
340
737
  result = subprocess.run(
@@ -345,7 +742,7 @@ class TestRunner:
345
742
  timeout=60,
346
743
  )
347
744
 
348
- tests, test_files = self._parse_collect_output(result.stdout)
745
+ tests, test_files = self._runner.parse_discover_output(result.stdout)
349
746
 
350
747
  return TestDiscoveryResult(
351
748
  success=result.returncode == 0,
@@ -354,6 +751,7 @@ class TestRunner:
354
751
  metadata={
355
752
  "target": target,
356
753
  "pattern": pattern,
754
+ "runner": type(self._runner).__name__,
357
755
  },
358
756
  )
359
757
 
@@ -363,60 +761,46 @@ class TestRunner:
363
761
  error="Test discovery timed out",
364
762
  )
365
763
 
764
+ except FileNotFoundError:
765
+ return TestDiscoveryResult(
766
+ success=False,
767
+ error=self._runner.not_found_error,
768
+ )
769
+
366
770
  except Exception as e:
367
771
  return TestDiscoveryResult(
368
772
  success=False,
369
773
  error=str(e),
370
774
  )
371
775
 
372
- def _parse_collect_output(self, output: str) -> tuple:
373
- """
374
- Parse pytest --collect-only output.
375
-
376
- Returns:
377
- Tuple of (tests, test_files)
378
- """
379
- tests = []
380
- test_files = set()
381
-
382
- for line in output.split("\n"):
383
- line = line.strip()
384
- if "::" in line and not line.startswith("="):
385
- # Parse test path like "tests/test_foo.py::TestClass::test_method"
386
- parts = line.split("::")
387
- if parts:
388
- file_path = parts[0]
389
- test_files.add(file_path)
390
-
391
- tests.append(DiscoveredTest(
392
- name=line,
393
- file_path=file_path,
394
- ))
395
-
396
- return tests, list(test_files)
397
-
398
776
 
399
777
  # Convenience functions
400
778
 
779
+
401
780
  def run_tests(
402
781
  target: Optional[str] = None,
403
782
  preset: Optional[str] = None,
404
783
  workspace: Optional[Path] = None,
405
- **kwargs,
784
+ runner_name: Optional[str] = None,
785
+ test_config: Optional["TestConfig"] = None,
786
+ **kwargs: Any,
406
787
  ) -> TestRunResult:
407
788
  """
408
- Run tests using pytest.
789
+ Run tests using the specified runner.
409
790
 
410
791
  Args:
411
792
  target: Test target
412
793
  preset: Preset configuration
413
794
  workspace: Repository root
795
+ runner_name: Name of the runner to use (pytest, go, npm, etc.)
796
+ test_config: TestConfig from foundry-mcp.toml
414
797
  **kwargs: Additional arguments for TestRunner.run_tests
415
798
 
416
799
  Returns:
417
800
  TestRunResult with test outcomes
418
801
  """
419
- runner = TestRunner(workspace)
802
+ runner_backend = get_runner(runner_name, test_config)
803
+ runner = TestRunner(workspace, runner=runner_backend)
420
804
  return runner.run_tests(target, preset, **kwargs)
421
805
 
422
806
 
@@ -424,6 +808,8 @@ def discover_tests(
424
808
  target: Optional[str] = None,
425
809
  workspace: Optional[Path] = None,
426
810
  pattern: str = "test_*.py",
811
+ runner_name: Optional[str] = None,
812
+ test_config: Optional["TestConfig"] = None,
427
813
  ) -> TestDiscoveryResult:
428
814
  """
429
815
  Discover tests without running them.
@@ -432,11 +818,14 @@ def discover_tests(
432
818
  target: Directory or file to search
433
819
  workspace: Repository root
434
820
  pattern: File pattern
821
+ runner_name: Name of the runner to use (pytest, go, npm, etc.)
822
+ test_config: TestConfig from foundry-mcp.toml
435
823
 
436
824
  Returns:
437
825
  TestDiscoveryResult with discovered tests
438
826
  """
439
- runner = TestRunner(workspace)
827
+ runner_backend = get_runner(runner_name, test_config)
828
+ runner = TestRunner(workspace, runner=runner_backend)
440
829
  return runner.discover_tests(target, pattern)
441
830
 
442
831