foundry-mcp 0.3.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (135) hide show
  1. foundry_mcp/__init__.py +7 -0
  2. foundry_mcp/cli/__init__.py +80 -0
  3. foundry_mcp/cli/__main__.py +9 -0
  4. foundry_mcp/cli/agent.py +96 -0
  5. foundry_mcp/cli/commands/__init__.py +37 -0
  6. foundry_mcp/cli/commands/cache.py +137 -0
  7. foundry_mcp/cli/commands/dashboard.py +148 -0
  8. foundry_mcp/cli/commands/dev.py +446 -0
  9. foundry_mcp/cli/commands/journal.py +377 -0
  10. foundry_mcp/cli/commands/lifecycle.py +274 -0
  11. foundry_mcp/cli/commands/modify.py +824 -0
  12. foundry_mcp/cli/commands/plan.py +633 -0
  13. foundry_mcp/cli/commands/pr.py +393 -0
  14. foundry_mcp/cli/commands/review.py +652 -0
  15. foundry_mcp/cli/commands/session.py +479 -0
  16. foundry_mcp/cli/commands/specs.py +856 -0
  17. foundry_mcp/cli/commands/tasks.py +807 -0
  18. foundry_mcp/cli/commands/testing.py +676 -0
  19. foundry_mcp/cli/commands/validate.py +982 -0
  20. foundry_mcp/cli/config.py +98 -0
  21. foundry_mcp/cli/context.py +259 -0
  22. foundry_mcp/cli/flags.py +266 -0
  23. foundry_mcp/cli/logging.py +212 -0
  24. foundry_mcp/cli/main.py +44 -0
  25. foundry_mcp/cli/output.py +122 -0
  26. foundry_mcp/cli/registry.py +110 -0
  27. foundry_mcp/cli/resilience.py +178 -0
  28. foundry_mcp/cli/transcript.py +217 -0
  29. foundry_mcp/config.py +850 -0
  30. foundry_mcp/core/__init__.py +144 -0
  31. foundry_mcp/core/ai_consultation.py +1636 -0
  32. foundry_mcp/core/cache.py +195 -0
  33. foundry_mcp/core/capabilities.py +446 -0
  34. foundry_mcp/core/concurrency.py +898 -0
  35. foundry_mcp/core/context.py +540 -0
  36. foundry_mcp/core/discovery.py +1603 -0
  37. foundry_mcp/core/error_collection.py +728 -0
  38. foundry_mcp/core/error_store.py +592 -0
  39. foundry_mcp/core/feature_flags.py +592 -0
  40. foundry_mcp/core/health.py +749 -0
  41. foundry_mcp/core/journal.py +694 -0
  42. foundry_mcp/core/lifecycle.py +412 -0
  43. foundry_mcp/core/llm_config.py +1350 -0
  44. foundry_mcp/core/llm_patterns.py +510 -0
  45. foundry_mcp/core/llm_provider.py +1569 -0
  46. foundry_mcp/core/logging_config.py +374 -0
  47. foundry_mcp/core/metrics_persistence.py +584 -0
  48. foundry_mcp/core/metrics_registry.py +327 -0
  49. foundry_mcp/core/metrics_store.py +641 -0
  50. foundry_mcp/core/modifications.py +224 -0
  51. foundry_mcp/core/naming.py +123 -0
  52. foundry_mcp/core/observability.py +1216 -0
  53. foundry_mcp/core/otel.py +452 -0
  54. foundry_mcp/core/otel_stubs.py +264 -0
  55. foundry_mcp/core/pagination.py +255 -0
  56. foundry_mcp/core/progress.py +317 -0
  57. foundry_mcp/core/prometheus.py +577 -0
  58. foundry_mcp/core/prompts/__init__.py +464 -0
  59. foundry_mcp/core/prompts/fidelity_review.py +546 -0
  60. foundry_mcp/core/prompts/markdown_plan_review.py +511 -0
  61. foundry_mcp/core/prompts/plan_review.py +623 -0
  62. foundry_mcp/core/providers/__init__.py +225 -0
  63. foundry_mcp/core/providers/base.py +476 -0
  64. foundry_mcp/core/providers/claude.py +460 -0
  65. foundry_mcp/core/providers/codex.py +619 -0
  66. foundry_mcp/core/providers/cursor_agent.py +642 -0
  67. foundry_mcp/core/providers/detectors.py +488 -0
  68. foundry_mcp/core/providers/gemini.py +405 -0
  69. foundry_mcp/core/providers/opencode.py +616 -0
  70. foundry_mcp/core/providers/opencode_wrapper.js +302 -0
  71. foundry_mcp/core/providers/package-lock.json +24 -0
  72. foundry_mcp/core/providers/package.json +25 -0
  73. foundry_mcp/core/providers/registry.py +607 -0
  74. foundry_mcp/core/providers/test_provider.py +171 -0
  75. foundry_mcp/core/providers/validation.py +729 -0
  76. foundry_mcp/core/rate_limit.py +427 -0
  77. foundry_mcp/core/resilience.py +600 -0
  78. foundry_mcp/core/responses.py +934 -0
  79. foundry_mcp/core/review.py +366 -0
  80. foundry_mcp/core/security.py +438 -0
  81. foundry_mcp/core/spec.py +1650 -0
  82. foundry_mcp/core/task.py +1289 -0
  83. foundry_mcp/core/testing.py +450 -0
  84. foundry_mcp/core/validation.py +2081 -0
  85. foundry_mcp/dashboard/__init__.py +32 -0
  86. foundry_mcp/dashboard/app.py +119 -0
  87. foundry_mcp/dashboard/components/__init__.py +17 -0
  88. foundry_mcp/dashboard/components/cards.py +88 -0
  89. foundry_mcp/dashboard/components/charts.py +234 -0
  90. foundry_mcp/dashboard/components/filters.py +136 -0
  91. foundry_mcp/dashboard/components/tables.py +195 -0
  92. foundry_mcp/dashboard/data/__init__.py +11 -0
  93. foundry_mcp/dashboard/data/stores.py +433 -0
  94. foundry_mcp/dashboard/launcher.py +289 -0
  95. foundry_mcp/dashboard/views/__init__.py +12 -0
  96. foundry_mcp/dashboard/views/errors.py +217 -0
  97. foundry_mcp/dashboard/views/metrics.py +174 -0
  98. foundry_mcp/dashboard/views/overview.py +160 -0
  99. foundry_mcp/dashboard/views/providers.py +83 -0
  100. foundry_mcp/dashboard/views/sdd_workflow.py +255 -0
  101. foundry_mcp/dashboard/views/tool_usage.py +139 -0
  102. foundry_mcp/prompts/__init__.py +9 -0
  103. foundry_mcp/prompts/workflows.py +525 -0
  104. foundry_mcp/resources/__init__.py +9 -0
  105. foundry_mcp/resources/specs.py +591 -0
  106. foundry_mcp/schemas/__init__.py +38 -0
  107. foundry_mcp/schemas/sdd-spec-schema.json +386 -0
  108. foundry_mcp/server.py +164 -0
  109. foundry_mcp/tools/__init__.py +10 -0
  110. foundry_mcp/tools/unified/__init__.py +71 -0
  111. foundry_mcp/tools/unified/authoring.py +1487 -0
  112. foundry_mcp/tools/unified/context_helpers.py +98 -0
  113. foundry_mcp/tools/unified/documentation_helpers.py +198 -0
  114. foundry_mcp/tools/unified/environment.py +939 -0
  115. foundry_mcp/tools/unified/error.py +462 -0
  116. foundry_mcp/tools/unified/health.py +225 -0
  117. foundry_mcp/tools/unified/journal.py +841 -0
  118. foundry_mcp/tools/unified/lifecycle.py +632 -0
  119. foundry_mcp/tools/unified/metrics.py +777 -0
  120. foundry_mcp/tools/unified/plan.py +745 -0
  121. foundry_mcp/tools/unified/pr.py +294 -0
  122. foundry_mcp/tools/unified/provider.py +629 -0
  123. foundry_mcp/tools/unified/review.py +685 -0
  124. foundry_mcp/tools/unified/review_helpers.py +299 -0
  125. foundry_mcp/tools/unified/router.py +102 -0
  126. foundry_mcp/tools/unified/server.py +580 -0
  127. foundry_mcp/tools/unified/spec.py +808 -0
  128. foundry_mcp/tools/unified/task.py +2202 -0
  129. foundry_mcp/tools/unified/test.py +370 -0
  130. foundry_mcp/tools/unified/verification.py +520 -0
  131. foundry_mcp-0.3.3.dist-info/METADATA +337 -0
  132. foundry_mcp-0.3.3.dist-info/RECORD +135 -0
  133. foundry_mcp-0.3.3.dist-info/WHEEL +4 -0
  134. foundry_mcp-0.3.3.dist-info/entry_points.txt +3 -0
  135. foundry_mcp-0.3.3.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,676 @@
1
+ """Testing commands for SDD CLI.
2
+
3
+ Provides commands for running and managing tests including:
4
+ - Running pytest with presets
5
+ - Discovering tests
6
+ - Checking test toolchain
7
+ - AI consultation for test failures
8
+ """
9
+
10
+ import json
11
+ import subprocess
12
+ import time
13
+ from typing import Any, Dict, Optional
14
+
15
+ import click
16
+
17
+ from foundry_mcp.cli.logging import cli_command, get_cli_logger
18
+ from foundry_mcp.cli.output import emit_error, emit_success
19
+ from foundry_mcp.cli.registry import get_context
20
+ from foundry_mcp.cli.resilience import (
21
+ FAST_TIMEOUT,
22
+ MEDIUM_TIMEOUT,
23
+ handle_keyboard_interrupt,
24
+ with_sync_timeout,
25
+ )
26
+
27
+ logger = get_cli_logger()
28
+
29
+ # Default timeout for test operations
30
+ TEST_TIMEOUT = 300 # 5 minutes
31
+
32
+
33
+ @click.group("test")
34
+ def test_group() -> None:
35
+ """Test runner commands."""
36
+ pass
37
+
38
+
39
+ @test_group.command("run")
40
+ @click.argument("target", required=False)
41
+ @click.option(
42
+ "--preset",
43
+ type=click.Choice(["quick", "full", "unit", "integration", "smoke"]),
44
+ help="Use a preset configuration.",
45
+ )
46
+ @click.option(
47
+ "--timeout",
48
+ type=int,
49
+ default=TEST_TIMEOUT,
50
+ help="Timeout in seconds.",
51
+ )
52
+ @click.option(
53
+ "--verbose/--quiet",
54
+ default=True,
55
+ help="Enable verbose output.",
56
+ )
57
+ @click.option(
58
+ "--fail-fast",
59
+ is_flag=True,
60
+ help="Stop on first failure.",
61
+ )
62
+ @click.option(
63
+ "--markers",
64
+ help="Pytest markers expression (e.g., 'not slow').",
65
+ )
66
+ @click.option(
67
+ "--coverage/--no-coverage",
68
+ default=False,
69
+ help="Enable coverage reporting via pytest-cov.",
70
+ )
71
+ @click.option(
72
+ "--parallel",
73
+ "-n",
74
+ type=int,
75
+ default=None,
76
+ help="Run tests in parallel with N workers (requires pytest-xdist).",
77
+ )
78
+ @click.pass_context
79
+ @cli_command("test")
80
+ @handle_keyboard_interrupt()
81
+ def test_run_cmd(
82
+ ctx: click.Context,
83
+ target: Optional[str],
84
+ preset: Optional[str],
85
+ timeout: int,
86
+ verbose: bool,
87
+ fail_fast: bool,
88
+ markers: Optional[str],
89
+ coverage: bool,
90
+ parallel: Optional[int],
91
+ ) -> None:
92
+ """Run tests using pytest.
93
+
94
+ TARGET is the test target (file, directory, or test name pattern).
95
+ """
96
+ timeout = max(1, timeout)
97
+ cli_ctx = get_context(ctx)
98
+
99
+ # Build pytest command
100
+ cmd = ["pytest"]
101
+
102
+ if target:
103
+ cmd.append(target)
104
+
105
+ if verbose:
106
+ cmd.append("-v")
107
+
108
+ if fail_fast:
109
+ cmd.append("-x")
110
+
111
+ if markers:
112
+ cmd.extend(["-m", markers])
113
+
114
+ # Apply preset configurations
115
+ if preset == "quick":
116
+ cmd.extend(["-x", "-m", "not slow"])
117
+ elif preset == "unit":
118
+ cmd.extend(["-m", "unit"])
119
+ elif preset == "integration":
120
+ cmd.extend(["-m", "integration"])
121
+ elif preset == "smoke":
122
+ cmd.extend(["-m", "smoke", "-x"])
123
+
124
+ # Coverage support
125
+ if coverage:
126
+ cmd.extend(["--cov", "--cov-report=term-missing"])
127
+
128
+ # Parallel execution support (requires pytest-xdist)
129
+ if parallel is not None and parallel > 0:
130
+ cmd.extend(["-n", str(parallel)])
131
+
132
+ # Add JSON output format
133
+ cmd.extend(["--tb=short", "-q"])
134
+
135
+ def _run_pytest() -> None:
136
+ start_time = time.perf_counter()
137
+ try:
138
+ result = subprocess.run(
139
+ cmd,
140
+ capture_output=True,
141
+ text=True,
142
+ timeout=timeout,
143
+ cwd=str(cli_ctx.specs_dir.parent) if cli_ctx.specs_dir else None,
144
+ )
145
+ except subprocess.TimeoutExpired:
146
+ emit_error(
147
+ f"Test run timed out after {timeout}s",
148
+ code="TIMEOUT",
149
+ error_type="internal",
150
+ remediation="Try a smaller test target or increase timeout with --timeout",
151
+ details={"target": target, "timeout_seconds": timeout},
152
+ )
153
+ except FileNotFoundError:
154
+ emit_error(
155
+ "pytest not found",
156
+ code="PYTEST_NOT_FOUND",
157
+ error_type="internal",
158
+ remediation="Install pytest: pip install pytest",
159
+ details={"hint": "Install pytest: pip install pytest"},
160
+ )
161
+
162
+ duration_ms = (time.perf_counter() - start_time) * 1000
163
+
164
+ summary = {
165
+ "passed": 0,
166
+ "failed": 0,
167
+ "skipped": 0,
168
+ "errors": 0,
169
+ }
170
+ for line in result.stdout.split("\n"):
171
+ if "passed" in line:
172
+ try:
173
+ parts = line.split()
174
+ for i, token in enumerate(parts):
175
+ if token in ("passed", "failed", "skipped", "error", "errors"):
176
+ key = "errors" if token in ("error", "errors") else token
177
+ summary[key] = int(parts[i - 1])
178
+ except (ValueError, IndexError):
179
+ continue
180
+ summary["total"] = (
181
+ summary["passed"]
182
+ + summary["failed"]
183
+ + summary["skipped"]
184
+ + summary["errors"]
185
+ )
186
+
187
+ payload = {
188
+ "target": target,
189
+ "preset": preset,
190
+ "exit_code": result.returncode,
191
+ "summary": summary,
192
+ "stdout": result.stdout,
193
+ "stderr": result.stderr if result.returncode != 0 else None,
194
+ }
195
+ telemetry = {"duration_ms": round(duration_ms, 2)}
196
+
197
+ if result.returncode != 0:
198
+ emit_error(
199
+ "Tests failed",
200
+ code="TEST_FAILED",
201
+ error_type="internal",
202
+ remediation="Inspect pytest output and fix failing tests",
203
+ details={**payload, "telemetry": telemetry},
204
+ )
205
+
206
+ emit_success({**payload, "passed": True, "telemetry": telemetry})
207
+
208
+ run_with_timeout = with_sync_timeout(
209
+ timeout, f"Test run timed out after {timeout}s"
210
+ )(_run_pytest)
211
+ run_with_timeout()
212
+
213
+
214
+ @test_group.command("discover")
215
+ @click.argument("target", required=False)
216
+ @click.option(
217
+ "--pattern",
218
+ default=None,
219
+ help="Optional pytest -k expression to filter collected tests.",
220
+ )
221
+ @click.option(
222
+ "--list/--no-list",
223
+ "list_only",
224
+ default=True,
225
+ help="List tests without running (pass --no-list to execute them).",
226
+ )
227
+ @click.pass_context
228
+ @cli_command("discover")
229
+ @handle_keyboard_interrupt()
230
+ @with_sync_timeout(MEDIUM_TIMEOUT, "Test discovery timed out")
231
+ def test_discover_cmd(
232
+ ctx: click.Context,
233
+ target: Optional[str],
234
+ pattern: Optional[str],
235
+ list_only: bool,
236
+ ) -> None:
237
+ """Discover tests without running them.
238
+
239
+ TARGET is the directory or file to search.
240
+ """
241
+ start_time = time.perf_counter()
242
+ cli_ctx = get_context(ctx)
243
+
244
+ def _truncate(text: Optional[str], limit: int = 4000) -> Optional[str]:
245
+ if text is None:
246
+ return None
247
+ if len(text) <= limit:
248
+ return text
249
+ return text[-limit:]
250
+
251
+ # Build pytest collect command
252
+ collect_cmd = ["pytest", "--collect-only", "-q"]
253
+ if pattern:
254
+ collect_cmd.extend(["-k", pattern])
255
+ if target:
256
+ collect_cmd.append(target)
257
+
258
+ try:
259
+ collect_result = subprocess.run(
260
+ collect_cmd,
261
+ capture_output=True,
262
+ text=True,
263
+ timeout=60,
264
+ cwd=str(cli_ctx.specs_dir.parent) if cli_ctx.specs_dir else None,
265
+ )
266
+ except subprocess.TimeoutExpired:
267
+ emit_error(
268
+ "Test discovery timed out",
269
+ code="TIMEOUT",
270
+ error_type="internal",
271
+ remediation="Try a smaller target directory or check for slow fixtures",
272
+ details={"target": target, "pattern": pattern},
273
+ )
274
+ return
275
+ except FileNotFoundError:
276
+ emit_error(
277
+ "pytest not found",
278
+ code="PYTEST_NOT_FOUND",
279
+ error_type="internal",
280
+ remediation="Install pytest: pip install pytest",
281
+ details={"hint": "Install pytest: pip install pytest"},
282
+ )
283
+ return
284
+
285
+ if collect_result.returncode != 0:
286
+ emit_error(
287
+ "Test discovery failed",
288
+ code="TEST_DISCOVERY_FAILED",
289
+ error_type="internal",
290
+ remediation="Inspect pytest output for collection errors",
291
+ details={
292
+ "target": target,
293
+ "pattern": pattern,
294
+ "stdout": _truncate(collect_result.stdout),
295
+ "stderr": _truncate(collect_result.stderr),
296
+ },
297
+ )
298
+ return
299
+
300
+ duration_ms = (time.perf_counter() - start_time) * 1000
301
+
302
+ # Parse collected tests
303
+ tests = []
304
+ for line in collect_result.stdout.split("\n"):
305
+ line = line.strip()
306
+ if "::" in line and not line.startswith("<"):
307
+ tests.append(line)
308
+
309
+ response: Dict[str, Any] = {
310
+ "target": target,
311
+ "pattern": pattern,
312
+ "tests": tests,
313
+ "total_count": len(tests),
314
+ "list_only": list_only,
315
+ "telemetry": {"duration_ms": round(duration_ms, 2)},
316
+ }
317
+
318
+ if list_only:
319
+ emit_success(response)
320
+ return
321
+
322
+ # Execute tests when --no-list is supplied
323
+ run_cmd = ["pytest", "-q"]
324
+ if pattern:
325
+ run_cmd.extend(["-k", pattern])
326
+ if target:
327
+ run_cmd.append(target)
328
+
329
+ try:
330
+ run_result = subprocess.run(
331
+ run_cmd,
332
+ capture_output=True,
333
+ text=True,
334
+ timeout=MEDIUM_TIMEOUT,
335
+ cwd=str(cli_ctx.specs_dir.parent) if cli_ctx.specs_dir else None,
336
+ )
337
+ except subprocess.TimeoutExpired:
338
+ emit_error(
339
+ "Test execution timed out",
340
+ code="TIMEOUT",
341
+ error_type="internal",
342
+ remediation="Rerun with a narrower target or pattern",
343
+ details={"target": target, "pattern": pattern},
344
+ )
345
+ return
346
+ except FileNotFoundError:
347
+ emit_error(
348
+ "pytest not found",
349
+ code="PYTEST_NOT_FOUND",
350
+ error_type="internal",
351
+ remediation="Install pytest: pip install pytest",
352
+ details={"hint": "Install pytest: pip install pytest"},
353
+ )
354
+ return
355
+
356
+ test_run = {
357
+ "return_code": run_result.returncode,
358
+ "passed": run_result.returncode == 0,
359
+ "stdout": _truncate(run_result.stdout),
360
+ "stderr": _truncate(run_result.stderr),
361
+ }
362
+
363
+ if run_result.returncode != 0:
364
+ emit_error(
365
+ "Test execution failed",
366
+ code="TEST_FAILED",
367
+ error_type="internal",
368
+ remediation="Fix the failing tests above",
369
+ details={**response, "test_run": test_run},
370
+ )
371
+ return
372
+
373
+ response["test_run"] = test_run
374
+ emit_success(response)
375
+
376
+
377
+ @test_group.command("presets")
378
+ @click.pass_context
379
+ @cli_command("presets")
380
+ @handle_keyboard_interrupt()
381
+ @with_sync_timeout(FAST_TIMEOUT, "Presets lookup timed out")
382
+ def test_presets_cmd(ctx: click.Context) -> None:
383
+ """Get available test presets."""
384
+ start_time = time.perf_counter()
385
+
386
+ presets = {
387
+ "quick": {
388
+ "description": "Fast test run with fail_fast and slow tests excluded",
389
+ "markers": "not slow",
390
+ "fail_fast": True,
391
+ "timeout": 60,
392
+ },
393
+ "full": {
394
+ "description": "Complete test suite",
395
+ "markers": None,
396
+ "fail_fast": False,
397
+ "timeout": 300,
398
+ },
399
+ "unit": {
400
+ "description": "Unit tests only",
401
+ "markers": "unit",
402
+ "fail_fast": False,
403
+ "timeout": 120,
404
+ },
405
+ "integration": {
406
+ "description": "Integration tests only",
407
+ "markers": "integration",
408
+ "fail_fast": False,
409
+ "timeout": 300,
410
+ },
411
+ "smoke": {
412
+ "description": "Smoke tests for quick validation",
413
+ "markers": "smoke",
414
+ "fail_fast": True,
415
+ "timeout": 30,
416
+ },
417
+ }
418
+
419
+ duration_ms = (time.perf_counter() - start_time) * 1000
420
+
421
+ emit_success(
422
+ {
423
+ "presets": presets,
424
+ "default_preset": "quick",
425
+ "telemetry": {"duration_ms": round(duration_ms, 2)},
426
+ }
427
+ )
428
+
429
+
430
+ @test_group.command("check-tools")
431
+ @click.pass_context
432
+ @cli_command("check-tools")
433
+ @handle_keyboard_interrupt()
434
+ @with_sync_timeout(FAST_TIMEOUT, "Tool check timed out")
435
+ def test_check_tools_cmd(ctx: click.Context) -> None:
436
+ """Check test toolchain availability."""
437
+ start_time = time.perf_counter()
438
+
439
+ tools = {}
440
+
441
+ # Check pytest
442
+ try:
443
+ result = subprocess.run(
444
+ ["pytest", "--version"],
445
+ capture_output=True,
446
+ text=True,
447
+ timeout=5,
448
+ )
449
+ tools["pytest"] = {
450
+ "available": result.returncode == 0,
451
+ "version": result.stdout.split("\n")[0].strip()
452
+ if result.returncode == 0
453
+ else None,
454
+ }
455
+ except (FileNotFoundError, subprocess.TimeoutExpired):
456
+ tools["pytest"] = {"available": False, "version": None}
457
+
458
+ # Check coverage
459
+ try:
460
+ result = subprocess.run(
461
+ ["coverage", "--version"],
462
+ capture_output=True,
463
+ text=True,
464
+ timeout=5,
465
+ )
466
+ tools["coverage"] = {
467
+ "available": result.returncode == 0,
468
+ "version": result.stdout.split("\n")[0].strip()
469
+ if result.returncode == 0
470
+ else None,
471
+ }
472
+ except (FileNotFoundError, subprocess.TimeoutExpired):
473
+ tools["coverage"] = {"available": False, "version": None}
474
+
475
+ # Check pytest-cov
476
+ try:
477
+ result = subprocess.run(
478
+ ["python", "-c", "import pytest_cov; print(pytest_cov.__version__)"],
479
+ capture_output=True,
480
+ text=True,
481
+ timeout=5,
482
+ )
483
+ tools["pytest-cov"] = {
484
+ "available": result.returncode == 0,
485
+ "version": result.stdout.strip() if result.returncode == 0 else None,
486
+ }
487
+ except (FileNotFoundError, subprocess.TimeoutExpired):
488
+ tools["pytest-cov"] = {"available": False, "version": None}
489
+
490
+ duration_ms = (time.perf_counter() - start_time) * 1000
491
+
492
+ all_available = all(t.get("available", False) for t in tools.values())
493
+ recommendations = []
494
+
495
+ if not tools.get("pytest", {}).get("available"):
496
+ recommendations.append("Install pytest: pip install pytest")
497
+ if not tools.get("coverage", {}).get("available"):
498
+ recommendations.append("Install coverage: pip install coverage")
499
+ if not tools.get("pytest-cov", {}).get("available"):
500
+ recommendations.append("Install pytest-cov: pip install pytest-cov")
501
+
502
+ emit_success(
503
+ {
504
+ "tools": tools,
505
+ "all_available": all_available,
506
+ "recommendations": recommendations,
507
+ "telemetry": {"duration_ms": round(duration_ms, 2)},
508
+ }
509
+ )
510
+
511
+
512
+ @test_group.command("quick")
513
+ @click.argument("target", required=False)
514
+ @click.pass_context
515
+ @cli_command("quick")
516
+ @handle_keyboard_interrupt()
517
+ @with_sync_timeout(MEDIUM_TIMEOUT, "Quick tests timed out")
518
+ def test_quick_cmd(ctx: click.Context, target: Optional[str]) -> None:
519
+ """Run quick tests (preset: quick)."""
520
+ ctx.invoke(test_run_cmd, target=target, preset="quick")
521
+
522
+
523
+ @test_group.command("unit")
524
+ @click.argument("target", required=False)
525
+ @click.pass_context
526
+ @cli_command("unit")
527
+ @handle_keyboard_interrupt()
528
+ @with_sync_timeout(MEDIUM_TIMEOUT, "Unit tests timed out")
529
+ def test_unit_cmd(ctx: click.Context, target: Optional[str]) -> None:
530
+ """Run unit tests (preset: unit)."""
531
+ ctx.invoke(test_run_cmd, target=target, preset="unit")
532
+
533
+
534
+ # Consultation timeout (longer for AI analysis)
535
+ CONSULT_TIMEOUT = 300
536
+
537
+
538
+ @test_group.command("consult")
539
+ @click.argument("pattern", required=False)
540
+ @click.option(
541
+ "--issue",
542
+ required=True,
543
+ help="Description of the test failure or issue to analyze.",
544
+ )
545
+ @click.option(
546
+ "--tools",
547
+ help="Comma-separated list of AI tools to use (e.g., 'gemini,cursor-agent').",
548
+ )
549
+ @click.option(
550
+ "--model",
551
+ help="Specific LLM model to use for analysis.",
552
+ )
553
+ @click.pass_context
554
+ @cli_command("consult")
555
+ @handle_keyboard_interrupt()
556
+ @with_sync_timeout(CONSULT_TIMEOUT, "Test consultation timed out")
557
+ def test_consult_cmd(
558
+ ctx: click.Context,
559
+ pattern: Optional[str],
560
+ issue: str,
561
+ tools: Optional[str],
562
+ model: Optional[str],
563
+ ) -> None:
564
+ """Consult AI about test failures or issues.
565
+
566
+ PATTERN is an optional test pattern to filter tests (e.g., 'test_auth*').
567
+
568
+ Example:
569
+ sdd test consult --issue "test_login is flaky and fails intermittently"
570
+ sdd test consult test_api --issue "assertion error on line 42"
571
+ """
572
+ start_time = time.perf_counter()
573
+ cli_ctx = get_context(ctx)
574
+
575
+ # Build consultation prompt
576
+ prompt_parts = [f"Test issue: {issue}"]
577
+ if pattern:
578
+ prompt_parts.append(f"Test pattern: {pattern}")
579
+
580
+ # Check for recent test output to include as context
581
+ test_context = None
582
+ try:
583
+ # Run a quick test discovery to get context
584
+ if pattern:
585
+ discover_result = subprocess.run(
586
+ ["pytest", "--collect-only", "-q", pattern],
587
+ capture_output=True,
588
+ text=True,
589
+ timeout=30,
590
+ cwd=str(cli_ctx.specs_dir.parent) if cli_ctx.specs_dir else None,
591
+ )
592
+ if discover_result.returncode == 0:
593
+ test_context = discover_result.stdout
594
+ except (subprocess.TimeoutExpired, FileNotFoundError):
595
+ pass
596
+
597
+ # Build the consultation command
598
+ cmd = ["sdd", "consult", "--json"]
599
+ cmd.extend(["--prompt", " | ".join(prompt_parts)])
600
+
601
+ if tools:
602
+ cmd.extend(["--tools", tools])
603
+ if model:
604
+ cmd.extend(["--model", model])
605
+ if cli_ctx.specs_dir:
606
+ cmd.extend(["--path", str(cli_ctx.specs_dir.parent)])
607
+
608
+ try:
609
+ result = subprocess.run(
610
+ cmd,
611
+ capture_output=True,
612
+ text=True,
613
+ timeout=CONSULT_TIMEOUT,
614
+ )
615
+
616
+ duration_ms = (time.perf_counter() - start_time) * 1000
617
+
618
+ if result.returncode == 0:
619
+ try:
620
+ response_data = json.loads(result.stdout)
621
+ emit_success(
622
+ {
623
+ "pattern": pattern,
624
+ "issue": issue,
625
+ "tools_used": tools.split(",") if tools else ["default"],
626
+ "model": model,
627
+ "response": response_data,
628
+ "test_context": test_context[:500] if test_context else None,
629
+ "telemetry": {"duration_ms": round(duration_ms, 2)},
630
+ }
631
+ )
632
+ except json.JSONDecodeError:
633
+ emit_success(
634
+ {
635
+ "pattern": pattern,
636
+ "issue": issue,
637
+ "tools_used": tools.split(",") if tools else ["default"],
638
+ "model": model,
639
+ "response": {"raw_output": result.stdout},
640
+ "test_context": test_context[:500] if test_context else None,
641
+ "telemetry": {"duration_ms": round(duration_ms, 2)},
642
+ }
643
+ )
644
+ else:
645
+ emit_error(
646
+ "Test consultation failed",
647
+ code="CONSULT_FAILED",
648
+ error_type="internal",
649
+ remediation="Check AI tool availability and API configuration",
650
+ details={
651
+ "pattern": pattern,
652
+ "issue": issue,
653
+ "stderr": result.stderr[:500] if result.stderr else None,
654
+ },
655
+ )
656
+
657
+ except subprocess.TimeoutExpired:
658
+ emit_error(
659
+ f"Test consultation timed out after {CONSULT_TIMEOUT}s",
660
+ code="TIMEOUT",
661
+ error_type="internal",
662
+ remediation="Try a more specific issue description or check AI service status",
663
+ details={
664
+ "pattern": pattern,
665
+ "issue": issue,
666
+ "timeout_seconds": CONSULT_TIMEOUT,
667
+ },
668
+ )
669
+ except FileNotFoundError:
670
+ emit_error(
671
+ "sdd command not found",
672
+ code="SDD_NOT_FOUND",
673
+ error_type="internal",
674
+ remediation="Ensure sdd is installed and in PATH",
675
+ details={"hint": "Run: pip install foundry-sdd"},
676
+ )