crackerjack 0.29.0__py3-none-any.whl → 0.31.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (158) hide show
  1. crackerjack/CLAUDE.md +1005 -0
  2. crackerjack/RULES.md +380 -0
  3. crackerjack/__init__.py +42 -13
  4. crackerjack/__main__.py +225 -253
  5. crackerjack/agents/__init__.py +41 -0
  6. crackerjack/agents/architect_agent.py +281 -0
  7. crackerjack/agents/base.py +169 -0
  8. crackerjack/agents/coordinator.py +512 -0
  9. crackerjack/agents/documentation_agent.py +498 -0
  10. crackerjack/agents/dry_agent.py +388 -0
  11. crackerjack/agents/formatting_agent.py +245 -0
  12. crackerjack/agents/import_optimization_agent.py +281 -0
  13. crackerjack/agents/performance_agent.py +669 -0
  14. crackerjack/agents/proactive_agent.py +104 -0
  15. crackerjack/agents/refactoring_agent.py +788 -0
  16. crackerjack/agents/security_agent.py +529 -0
  17. crackerjack/agents/test_creation_agent.py +652 -0
  18. crackerjack/agents/test_specialist_agent.py +486 -0
  19. crackerjack/agents/tracker.py +212 -0
  20. crackerjack/api.py +560 -0
  21. crackerjack/cli/__init__.py +24 -0
  22. crackerjack/cli/facade.py +104 -0
  23. crackerjack/cli/handlers.py +267 -0
  24. crackerjack/cli/interactive.py +471 -0
  25. crackerjack/cli/options.py +401 -0
  26. crackerjack/cli/utils.py +18 -0
  27. crackerjack/code_cleaner.py +670 -0
  28. crackerjack/config/__init__.py +19 -0
  29. crackerjack/config/hooks.py +218 -0
  30. crackerjack/core/__init__.py +0 -0
  31. crackerjack/core/async_workflow_orchestrator.py +406 -0
  32. crackerjack/core/autofix_coordinator.py +200 -0
  33. crackerjack/core/container.py +104 -0
  34. crackerjack/core/enhanced_container.py +542 -0
  35. crackerjack/core/performance.py +243 -0
  36. crackerjack/core/phase_coordinator.py +561 -0
  37. crackerjack/core/proactive_workflow.py +316 -0
  38. crackerjack/core/session_coordinator.py +289 -0
  39. crackerjack/core/workflow_orchestrator.py +640 -0
  40. crackerjack/dynamic_config.py +577 -0
  41. crackerjack/errors.py +263 -41
  42. crackerjack/executors/__init__.py +11 -0
  43. crackerjack/executors/async_hook_executor.py +431 -0
  44. crackerjack/executors/cached_hook_executor.py +242 -0
  45. crackerjack/executors/hook_executor.py +345 -0
  46. crackerjack/executors/individual_hook_executor.py +669 -0
  47. crackerjack/intelligence/__init__.py +44 -0
  48. crackerjack/intelligence/adaptive_learning.py +751 -0
  49. crackerjack/intelligence/agent_orchestrator.py +551 -0
  50. crackerjack/intelligence/agent_registry.py +414 -0
  51. crackerjack/intelligence/agent_selector.py +502 -0
  52. crackerjack/intelligence/integration.py +290 -0
  53. crackerjack/interactive.py +576 -315
  54. crackerjack/managers/__init__.py +11 -0
  55. crackerjack/managers/async_hook_manager.py +135 -0
  56. crackerjack/managers/hook_manager.py +137 -0
  57. crackerjack/managers/publish_manager.py +411 -0
  58. crackerjack/managers/test_command_builder.py +151 -0
  59. crackerjack/managers/test_executor.py +435 -0
  60. crackerjack/managers/test_manager.py +258 -0
  61. crackerjack/managers/test_manager_backup.py +1124 -0
  62. crackerjack/managers/test_progress.py +144 -0
  63. crackerjack/mcp/__init__.py +0 -0
  64. crackerjack/mcp/cache.py +336 -0
  65. crackerjack/mcp/client_runner.py +104 -0
  66. crackerjack/mcp/context.py +615 -0
  67. crackerjack/mcp/dashboard.py +636 -0
  68. crackerjack/mcp/enhanced_progress_monitor.py +479 -0
  69. crackerjack/mcp/file_monitor.py +336 -0
  70. crackerjack/mcp/progress_components.py +569 -0
  71. crackerjack/mcp/progress_monitor.py +949 -0
  72. crackerjack/mcp/rate_limiter.py +332 -0
  73. crackerjack/mcp/server.py +22 -0
  74. crackerjack/mcp/server_core.py +244 -0
  75. crackerjack/mcp/service_watchdog.py +501 -0
  76. crackerjack/mcp/state.py +395 -0
  77. crackerjack/mcp/task_manager.py +257 -0
  78. crackerjack/mcp/tools/__init__.py +17 -0
  79. crackerjack/mcp/tools/core_tools.py +249 -0
  80. crackerjack/mcp/tools/error_analyzer.py +308 -0
  81. crackerjack/mcp/tools/execution_tools.py +370 -0
  82. crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
  83. crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
  84. crackerjack/mcp/tools/intelligence_tools.py +314 -0
  85. crackerjack/mcp/tools/monitoring_tools.py +502 -0
  86. crackerjack/mcp/tools/proactive_tools.py +384 -0
  87. crackerjack/mcp/tools/progress_tools.py +141 -0
  88. crackerjack/mcp/tools/utility_tools.py +341 -0
  89. crackerjack/mcp/tools/workflow_executor.py +360 -0
  90. crackerjack/mcp/websocket/__init__.py +14 -0
  91. crackerjack/mcp/websocket/app.py +39 -0
  92. crackerjack/mcp/websocket/endpoints.py +559 -0
  93. crackerjack/mcp/websocket/jobs.py +253 -0
  94. crackerjack/mcp/websocket/server.py +116 -0
  95. crackerjack/mcp/websocket/websocket_handler.py +78 -0
  96. crackerjack/mcp/websocket_server.py +10 -0
  97. crackerjack/models/__init__.py +31 -0
  98. crackerjack/models/config.py +93 -0
  99. crackerjack/models/config_adapter.py +230 -0
  100. crackerjack/models/protocols.py +118 -0
  101. crackerjack/models/task.py +154 -0
  102. crackerjack/monitoring/ai_agent_watchdog.py +450 -0
  103. crackerjack/monitoring/regression_prevention.py +638 -0
  104. crackerjack/orchestration/__init__.py +0 -0
  105. crackerjack/orchestration/advanced_orchestrator.py +970 -0
  106. crackerjack/orchestration/execution_strategies.py +341 -0
  107. crackerjack/orchestration/test_progress_streamer.py +636 -0
  108. crackerjack/plugins/__init__.py +15 -0
  109. crackerjack/plugins/base.py +200 -0
  110. crackerjack/plugins/hooks.py +246 -0
  111. crackerjack/plugins/loader.py +335 -0
  112. crackerjack/plugins/managers.py +259 -0
  113. crackerjack/py313.py +8 -3
  114. crackerjack/services/__init__.py +22 -0
  115. crackerjack/services/cache.py +314 -0
  116. crackerjack/services/config.py +347 -0
  117. crackerjack/services/config_integrity.py +99 -0
  118. crackerjack/services/contextual_ai_assistant.py +516 -0
  119. crackerjack/services/coverage_ratchet.py +347 -0
  120. crackerjack/services/debug.py +736 -0
  121. crackerjack/services/dependency_monitor.py +617 -0
  122. crackerjack/services/enhanced_filesystem.py +439 -0
  123. crackerjack/services/file_hasher.py +151 -0
  124. crackerjack/services/filesystem.py +395 -0
  125. crackerjack/services/git.py +165 -0
  126. crackerjack/services/health_metrics.py +611 -0
  127. crackerjack/services/initialization.py +847 -0
  128. crackerjack/services/log_manager.py +286 -0
  129. crackerjack/services/logging.py +174 -0
  130. crackerjack/services/metrics.py +578 -0
  131. crackerjack/services/pattern_cache.py +362 -0
  132. crackerjack/services/pattern_detector.py +515 -0
  133. crackerjack/services/performance_benchmarks.py +653 -0
  134. crackerjack/services/security.py +163 -0
  135. crackerjack/services/server_manager.py +234 -0
  136. crackerjack/services/smart_scheduling.py +144 -0
  137. crackerjack/services/tool_version_service.py +61 -0
  138. crackerjack/services/unified_config.py +437 -0
  139. crackerjack/services/version_checker.py +248 -0
  140. crackerjack/slash_commands/__init__.py +14 -0
  141. crackerjack/slash_commands/init.md +122 -0
  142. crackerjack/slash_commands/run.md +163 -0
  143. crackerjack/slash_commands/status.md +127 -0
  144. crackerjack-0.31.4.dist-info/METADATA +742 -0
  145. crackerjack-0.31.4.dist-info/RECORD +148 -0
  146. crackerjack-0.31.4.dist-info/entry_points.txt +2 -0
  147. crackerjack/.gitignore +0 -34
  148. crackerjack/.libcst.codemod.yaml +0 -18
  149. crackerjack/.pdm.toml +0 -1
  150. crackerjack/.pre-commit-config-ai.yaml +0 -149
  151. crackerjack/.pre-commit-config-fast.yaml +0 -69
  152. crackerjack/.pre-commit-config.yaml +0 -114
  153. crackerjack/crackerjack.py +0 -4140
  154. crackerjack/pyproject.toml +0 -285
  155. crackerjack-0.29.0.dist-info/METADATA +0 -1289
  156. crackerjack-0.29.0.dist-info/RECORD +0 -17
  157. {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/WHEEL +0 -0
  158. {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1124 @@
1
+ import re
2
+ import subprocess
3
+ import threading
4
+ import time
5
+ import typing as t
6
+ from pathlib import Path
7
+
8
+ from rich.align import Align
9
+ from rich.console import Console
10
+ from rich.live import Live
11
+ from rich.table import Table
12
+
13
+ from crackerjack.models.protocols import OptionsProtocol
14
+ from crackerjack.services.coverage_ratchet import CoverageRatchetService
15
+
16
+
17
+ class TestProgress:
18
+ def __init__(self) -> None:
19
+ self.total_tests: int = 0
20
+ self.passed: int = 0
21
+ self.failed: int = 0
22
+ self.skipped: int = 0
23
+ self.errors: int = 0
24
+ self.current_test: str = ""
25
+ self.start_time: float = 0
26
+ self.is_complete: bool = False
27
+ self.is_collecting: bool = True
28
+ self.files_discovered: int = 0
29
+ self.collection_status: str = "Starting collection..."
30
+ self._lock = threading.Lock()
31
+ self._seen_files: set[str] = set() # Track seen files to prevent duplicates
32
+
33
+ @property
34
+ def completed(self) -> int:
35
+ return self.passed + self.failed + self.skipped + self.errors
36
+
37
+ @property
38
+ def elapsed_time(self) -> float:
39
+ return time.time() - self.start_time if self.start_time else 0
40
+
41
+ @property
42
+ def eta_seconds(self) -> float | None:
43
+ if self.completed <= 0 or self.total_tests <= 0:
44
+ return None
45
+ progress_rate = (
46
+ self.completed / self.elapsed_time if self.elapsed_time > 0 else 0
47
+ )
48
+ remaining = self.total_tests - self.completed
49
+ return remaining / progress_rate if progress_rate > 0 else None
50
+
51
+ def update(self, **kwargs: t.Any) -> None:
52
+ with self._lock:
53
+ for key, value in kwargs.items():
54
+ if hasattr(self, key):
55
+ setattr(self, key, value)
56
+
57
+ def format_progress(self) -> Align:
58
+ """Format test progress display with appropriate phase-specific content."""
59
+ with self._lock:
60
+ if self.is_collecting:
61
+ table = self._format_collection_progress()
62
+ else:
63
+ table = self._format_execution_progress()
64
+ # Left-align the table as requested
65
+ return Align.left(table)
66
+
67
+ def _format_collection_progress(self) -> Table:
68
+ """Format progress display for test collection phase."""
69
+ table = Table(
70
+ title="🔍 Test Collection",
71
+ header_style="bold yellow",
72
+ show_lines=True,
73
+ border_style="yellow",
74
+ title_style="bold yellow",
75
+ expand=True, # Use full terminal width like rich.live demo
76
+ min_width=80, # Ensure minimum width
77
+ )
78
+
79
+ # Add multiple columns for better alignment (like complexipy)
80
+ table.add_column("Type", style="cyan", ratio=1)
81
+ table.add_column("Details", style="white", ratio=3) # Wider middle column
82
+ table.add_column("Count", style="green", ratio=1)
83
+
84
+ # Add status
85
+ table.add_row("Status", self.collection_status, "")
86
+
87
+ # Add collection stats
88
+ if self.files_discovered > 0:
89
+ table.add_row("Files", "Test files discovered", str(self.files_discovered))
90
+
91
+ if self.total_tests > 0:
92
+ table.add_row("Tests", "Total tests found", str(self.total_tests))
93
+
94
+ # Add progress bar
95
+ if self.files_discovered > 0:
96
+ progress_chars = "▓" * min(self.files_discovered, 15) + "░" * max(
97
+ 0, 15 - self.files_discovered
98
+ )
99
+ table.add_row(
100
+ "Progress", f"[{progress_chars}]", f"{self.files_discovered}/15"
101
+ )
102
+
103
+ # Add duration
104
+ table.add_row("Duration", f"{self.elapsed_time:.1f} seconds", "")
105
+
106
+ return table
107
+
108
+ def _format_execution_progress(self) -> Table:
109
+ """Format progress display for test execution phase."""
110
+ table = Table(
111
+ title="🧪 Test Execution",
112
+ header_style="bold cyan",
113
+ show_lines=True,
114
+ border_style="cyan",
115
+ title_style="bold cyan",
116
+ expand=True, # Use full terminal width like rich.live demo
117
+ min_width=80, # Ensure minimum width
118
+ )
119
+
120
+ # Add multiple columns for better alignment (like complexipy)
121
+ table.add_column("Metric", style="cyan", ratio=1)
122
+ table.add_column("Details", style="white", ratio=3) # Wider middle column
123
+ table.add_column("Count", style="green", ratio=1)
124
+
125
+ # Test results summary
126
+ if self.total_tests > 0:
127
+ table.add_row("Total", "Total tests", str(self.total_tests))
128
+ table.add_row("Passed", "Tests passed", f"[green]{self.passed}[/green]")
129
+
130
+ if self.failed > 0:
131
+ table.add_row("Failed", "Tests failed", f"[red]{self.failed}[/red]")
132
+ if self.skipped > 0:
133
+ table.add_row(
134
+ "Skipped", "Tests skipped", f"[yellow]{self.skipped}[/yellow]"
135
+ )
136
+ if self.errors > 0:
137
+ table.add_row("Errors", "Test errors", f"[red]{self.errors}[/red]")
138
+
139
+ # Progress percentage and bar
140
+ if self.total_tests > 0:
141
+ percentage = (self.completed / self.total_tests) * 100
142
+ filled = int((self.completed / self.total_tests) * 15)
143
+ bar = "█" * filled + "░" * (15 - filled)
144
+ table.add_row("Progress", f"[{bar}]", f"{percentage:.1f}%")
145
+
146
+ # Current test
147
+ if self.current_test:
148
+ test_name = self.current_test
149
+ if len(test_name) > 40: # Reasonable truncation
150
+ test_name = test_name[:37] + "..."
151
+ table.add_row("Current", test_name, "")
152
+
153
+ # Duration and ETA
154
+ duration_text = f"{self.elapsed_time:.1f}s"
155
+ if self.eta_seconds is not None and self.eta_seconds > 0:
156
+ table.add_row("Duration", duration_text, f"ETA: ~{self.eta_seconds:.0f}s")
157
+ else:
158
+ table.add_row("Duration", duration_text, "")
159
+
160
+ return table
161
+
162
+
163
+ class TestManagementImpl:
164
+ def __init__(self, console: Console, pkg_path: Path) -> None:
165
+ self.console = console
166
+ self.pkg_path = pkg_path
167
+ self._last_test_failures: list[str] = []
168
+ self._progress_callback: t.Callable[[dict[str, t.Any]], None] | None = None
169
+ self.coverage_ratchet = CoverageRatchetService(pkg_path, console)
170
+ self.coverage_ratchet_enabled = True
171
+
172
+ def set_progress_callback(
173
+ self,
174
+ callback: t.Callable[[dict[str, t.Any]], None] | None,
175
+ ) -> None:
176
+ """Set callback for AI mode structured progress updates."""
177
+ self._progress_callback = callback
178
+
179
+ def set_coverage_ratchet_enabled(self, enabled: bool) -> None:
180
+ """Enable or disable the coverage ratchet system."""
181
+ self.coverage_ratchet_enabled = enabled
182
+ if enabled:
183
+ self.console.print(
184
+ "[cyan]📊[/cyan] Coverage ratchet enabled - targeting 100% coverage"
185
+ )
186
+ else:
187
+ self.console.print("[yellow]⚠️[/yellow] Coverage ratchet disabled")
188
+
189
+ def get_coverage_ratchet_status(self) -> dict[str, t.Any]:
190
+ """Get comprehensive coverage ratchet status."""
191
+ return self.coverage_ratchet.get_status_report()
192
+
193
+ def _run_test_command(
194
+ self,
195
+ cmd: list[str],
196
+ timeout: int = 600,
197
+ ) -> subprocess.CompletedProcess[str]:
198
+ import os
199
+ from pathlib import Path
200
+
201
+ # Set up coverage data file in cache directory
202
+ cache_dir = Path.home() / ".cache" / "crackerjack" / "coverage"
203
+ cache_dir.mkdir(parents=True, exist_ok=True)
204
+
205
+ env = os.environ.copy()
206
+ env["COVERAGE_FILE"] = str(cache_dir / ".coverage")
207
+
208
+ return subprocess.run(
209
+ cmd,
210
+ check=False,
211
+ cwd=self.pkg_path,
212
+ capture_output=True,
213
+ text=True,
214
+ timeout=timeout,
215
+ env=env,
216
+ )
217
+
218
+ def _run_test_command_with_progress(
219
+ self,
220
+ cmd: list[str],
221
+ timeout: int = 600,
222
+ show_progress: bool = True,
223
+ ) -> subprocess.CompletedProcess[str]:
224
+ if not show_progress:
225
+ return self._run_test_command(cmd, timeout)
226
+
227
+ try:
228
+ return self._execute_with_live_progress(cmd, timeout)
229
+ except Exception as e:
230
+ return self._handle_progress_error(e, cmd, timeout)
231
+
232
+ def _execute_with_live_progress(
233
+ self,
234
+ cmd: list[str],
235
+ timeout: int,
236
+ ) -> subprocess.CompletedProcess[str]:
237
+ progress = self._initialize_progress()
238
+ stdout_lines: list[str] = []
239
+ stderr_lines: list[str] = []
240
+ # Use a mutable container to share last_activity_time between threads
241
+ activity_tracker = {"last_time": time.time()}
242
+
243
+ with (
244
+ Live(
245
+ progress.format_progress(),
246
+ refresh_per_second=2,
247
+ console=self.console,
248
+ auto_refresh=False,
249
+ transient=True,
250
+ ) as live,
251
+ subprocess.Popen(
252
+ cmd,
253
+ cwd=self.pkg_path,
254
+ stdout=subprocess.PIPE,
255
+ stderr=subprocess.PIPE,
256
+ text=True,
257
+ env=self._setup_test_environment(),
258
+ ) as process,
259
+ ):
260
+ threads = self._start_reader_threads(
261
+ process,
262
+ progress,
263
+ stdout_lines,
264
+ stderr_lines,
265
+ live,
266
+ activity_tracker,
267
+ )
268
+
269
+ returncode = self._wait_for_completion(process, progress, live, timeout)
270
+ self._cleanup_threads(threads, progress, live)
271
+
272
+ return subprocess.CompletedProcess(
273
+ args=cmd,
274
+ returncode=returncode,
275
+ stdout="\n".join(stdout_lines),
276
+ stderr="\n".join(stderr_lines),
277
+ )
278
+
279
+ def _initialize_progress(self) -> TestProgress:
280
+ progress = TestProgress()
281
+ progress.start_time = time.time()
282
+ progress.collection_status = "Initializing test collection..."
283
+ return progress
284
+
285
+ def _setup_test_environment(self) -> dict[str, str]:
286
+ import os
287
+ from pathlib import Path
288
+
289
+ cache_dir = Path.home() / ".cache" / "crackerjack" / "coverage"
290
+ cache_dir.mkdir(parents=True, exist_ok=True)
291
+
292
+ env = os.environ.copy()
293
+ env["COVERAGE_FILE"] = str(cache_dir / ".coverage")
294
+ return env
295
+
296
+ def _start_reader_threads(
297
+ self,
298
+ process: subprocess.Popen[str],
299
+ progress: TestProgress,
300
+ stdout_lines: list[str],
301
+ stderr_lines: list[str],
302
+ live: Live,
303
+ activity_tracker: dict[str, float],
304
+ ) -> dict[str, threading.Thread]:
305
+ read_output = self._create_stdout_reader(
306
+ process,
307
+ progress,
308
+ stdout_lines,
309
+ live,
310
+ activity_tracker,
311
+ )
312
+ read_stderr = self._create_stderr_reader(process, stderr_lines)
313
+ monitor_stuck = self._create_monitor_thread(
314
+ process,
315
+ progress,
316
+ live,
317
+ activity_tracker,
318
+ )
319
+
320
+ threads = {
321
+ "stdout": threading.Thread(target=read_output, daemon=True),
322
+ "stderr": threading.Thread(target=read_stderr, daemon=True),
323
+ "monitor": threading.Thread(target=monitor_stuck, daemon=True),
324
+ }
325
+
326
+ for thread in threads.values():
327
+ thread.start()
328
+
329
+ return threads
330
+
331
+ def _create_stdout_reader(
332
+ self,
333
+ process: subprocess.Popen[str],
334
+ progress: TestProgress,
335
+ stdout_lines: list[str],
336
+ live: Live,
337
+ activity_tracker: dict[str, float],
338
+ ) -> t.Callable[[], None]:
339
+ def read_output() -> None:
340
+ refresh_state = {"last_refresh": 0, "last_content": ""}
341
+
342
+ if process.stdout:
343
+ for line in iter(process.stdout.readline, ""):
344
+ if not line:
345
+ break
346
+
347
+ processed_line = line.rstrip()
348
+ if processed_line.strip():
349
+ self._process_test_output_line(
350
+ processed_line, stdout_lines, progress, activity_tracker
351
+ )
352
+ self._update_display_if_needed(progress, live, refresh_state)
353
+
354
+ return read_output
355
+
356
+ def _process_test_output_line(
357
+ self,
358
+ line: str,
359
+ stdout_lines: list[str],
360
+ progress: TestProgress,
361
+ activity_tracker: dict[str, float],
362
+ ) -> None:
363
+ """Process a single line of test output."""
364
+ stdout_lines.append(line)
365
+ self._parse_test_line(line, progress)
366
+ activity_tracker["last_time"] = time.time()
367
+
368
+ def _update_display_if_needed(
369
+ self,
370
+ progress: TestProgress,
371
+ live: Live,
372
+ refresh_state: dict[str, t.Any],
373
+ ) -> None:
374
+ """Update display if refresh criteria are met."""
375
+ current_time = time.time()
376
+ refresh_interval = self._get_refresh_interval(progress)
377
+ current_content = self._get_current_content_signature(progress)
378
+
379
+ if self._should_refresh_display(
380
+ current_time, refresh_state, refresh_interval, current_content
381
+ ):
382
+ live.update(progress.format_progress())
383
+ live.refresh()
384
+ refresh_state["last_refresh"] = current_time
385
+ refresh_state["last_content"] = current_content
386
+
387
+ def _get_refresh_interval(self, progress: TestProgress) -> float:
388
+ """Get appropriate refresh interval based on test phase."""
389
+ return 1.0 if progress.is_collecting else 0.25
390
+
391
+ def _get_current_content_signature(self, progress: TestProgress) -> str:
392
+ """Get a signature of current progress content for change detection."""
393
+ return f"{progress.collection_status}:{progress.files_discovered}:{progress.total_tests}"
394
+
395
+ def _should_refresh_display(
396
+ self,
397
+ current_time: float,
398
+ refresh_state: dict[str, t.Any],
399
+ refresh_interval: float,
400
+ current_content: str,
401
+ ) -> bool:
402
+ """Determine if display should be refreshed."""
403
+ time_elapsed = current_time - refresh_state["last_refresh"] > refresh_interval
404
+ content_changed = current_content != refresh_state["last_content"]
405
+ return time_elapsed or content_changed
406
+
407
+ def _create_stderr_reader(
408
+ self,
409
+ process: subprocess.Popen[str],
410
+ stderr_lines: list[str],
411
+ ) -> t.Callable[[], None]:
412
+ def read_stderr() -> None:
413
+ if process.stderr:
414
+ for line in iter(process.stderr.readline, ""):
415
+ if not line:
416
+ break
417
+ stderr_lines.append(line.rstrip())
418
+
419
+ return read_stderr
420
+
421
+ def _create_monitor_thread(
422
+ self,
423
+ process: subprocess.Popen[str],
424
+ progress: TestProgress,
425
+ live: Live,
426
+ activity_tracker: dict[str, float],
427
+ ) -> t.Callable[[], None]:
428
+ def monitor_stuck_tests() -> None:
429
+ while process.poll() is None:
430
+ time.sleep(5)
431
+ current_time = time.time()
432
+ if current_time - activity_tracker["last_time"] > 30:
433
+ self._mark_test_as_stuck(
434
+ progress,
435
+ current_time - activity_tracker["last_time"],
436
+ live,
437
+ )
438
+
439
+ return monitor_stuck_tests
440
+
441
+ def _mark_test_as_stuck(
442
+ self,
443
+ progress: TestProgress,
444
+ stuck_time: float,
445
+ live: Live,
446
+ ) -> None:
447
+ if progress.current_test and "stuck" not in progress.current_test.lower():
448
+ progress.update(
449
+ current_test=f"{progress.current_test} (possibly stuck - {stuck_time:.0f}s)",
450
+ )
451
+ live.update(progress.format_progress())
452
+ live.refresh()
453
+
454
+ def _wait_for_completion(
455
+ self,
456
+ process: subprocess.Popen[str],
457
+ progress: TestProgress,
458
+ live: Live,
459
+ timeout: int,
460
+ ) -> int:
461
+ try:
462
+ return process.wait(timeout=timeout)
463
+ except subprocess.TimeoutExpired:
464
+ process.kill()
465
+ progress.update(current_test="TIMEOUT - Process killed")
466
+ live.update(progress.format_progress())
467
+ live.refresh()
468
+ raise
469
+
470
+ def _cleanup_threads(
471
+ self,
472
+ threads: dict[str, threading.Thread],
473
+ progress: TestProgress,
474
+ live: Live,
475
+ ) -> None:
476
+ threads["stdout"].join(timeout=1)
477
+ threads["stderr"].join(timeout=1)
478
+ progress.is_complete = True
479
+ live.update(progress.format_progress())
480
+ live.refresh()
481
+
482
+ def _handle_progress_error(
483
+ self,
484
+ error: Exception,
485
+ cmd: list[str],
486
+ timeout: int,
487
+ ) -> subprocess.CompletedProcess[str]:
488
+ from contextlib import suppress
489
+
490
+ with suppress(Exception):
491
+ self.console.print(f"[red]❌ Progress display failed: {error}[/red]")
492
+ self.console.print("[yellow]⚠️ Falling back to standard mode[/yellow]")
493
+ return self._run_test_command(cmd, timeout)
494
+
495
+ def _parse_test_line(self, line: str, progress: TestProgress) -> None:
496
+ if self._handle_collection_completion(line, progress):
497
+ return
498
+ if self._handle_session_events(line, progress):
499
+ return
500
+ if self._handle_collection_progress(line, progress):
501
+ return
502
+ if self._handle_test_execution(line, progress):
503
+ return
504
+ self._handle_running_test(line, progress)
505
+
506
+ def _handle_collection_completion(self, line: str, progress: TestProgress) -> bool:
507
+ if match := re.search(r"collected (\d+) items?", line):
508
+ progress.update(
509
+ total_tests=int(match.group(1)),
510
+ is_collecting=False,
511
+ current_test="Starting test execution...",
512
+ )
513
+ return True
514
+ return False
515
+
516
+ def _handle_session_events(self, line: str, progress: TestProgress) -> bool:
517
+ if "test session starts" in line.lower():
518
+ progress.update(collection_status="Session starting...")
519
+ return True
520
+ if line.startswith("collecting") or "collecting" in line.lower():
521
+ progress.update(collection_status="Collecting tests...")
522
+ return True
523
+ return False
524
+
525
+ def _handle_collection_progress(self, line: str, progress: TestProgress) -> bool:
526
+ if not progress.is_collecting:
527
+ return False
528
+
529
+ # Only process meaningful collection lines, not every line containing ".py"
530
+ if line.strip().startswith("collecting") or "collecting" in line.lower():
531
+ progress.update(collection_status="Collecting tests...")
532
+ return True
533
+
534
+ # Very restrictive file discovery - only count actual test discoveries
535
+ if (
536
+ "::" in line
537
+ and ".py" in line
538
+ and ("test_" in line or "tests/" in line)
539
+ and not any(
540
+ status in line for status in ("PASSED", "FAILED", "SKIPPED", "ERROR")
541
+ )
542
+ ):
543
+ # Only update if we haven't seen this file before
544
+ filename = line.split("/")[-1] if "/" in line else line.split("::")[0]
545
+ if filename.endswith(".py") and filename not in progress._seen_files:
546
+ progress._seen_files.add(filename)
547
+ new_count = progress.files_discovered + 1
548
+ progress.update(
549
+ files_discovered=new_count,
550
+ collection_status=f"Discovering tests... ({new_count} files)",
551
+ )
552
+ return True
553
+
554
+ return False
555
+
556
+ def _handle_test_execution(self, line: str, progress: TestProgress) -> bool:
557
+ if not (
558
+ "::" in line
559
+ and any(
560
+ status in line for status in ("PASSED", "FAILED", "SKIPPED", "ERROR")
561
+ )
562
+ ):
563
+ return False
564
+
565
+ if "PASSED" in line:
566
+ progress.update(passed=progress.passed + 1)
567
+ elif "FAILED" in line:
568
+ progress.update(failed=progress.failed + 1)
569
+ elif "SKIPPED" in line:
570
+ progress.update(skipped=progress.skipped + 1)
571
+ elif "ERROR" in line:
572
+ progress.update(errors=progress.errors + 1)
573
+
574
+ self._extract_current_test(line, progress)
575
+ return True
576
+
577
+ def _handle_running_test(self, line: str, progress: TestProgress) -> None:
578
+ if "::" not in line or any(
579
+ status in line for status in ("PASSED", "FAILED", "SKIPPED", "ERROR")
580
+ ):
581
+ return
582
+
583
+ parts = line.split()
584
+ if parts and "::" in parts[0]:
585
+ test_path = parts[0]
586
+ if "/" in test_path:
587
+ test_path = test_path.split("/")[-1]
588
+ progress.update(current_test=f"Running: {test_path}")
589
+
590
+ def _extract_current_test(self, line: str, progress: TestProgress) -> None:
591
+ # Extract test name from pytest output line
592
+ parts = line.split()
593
+ if parts and "::" in parts[0]:
594
+ test_path = parts[0]
595
+ # Simplify the test path for display
596
+ if "/" in test_path:
597
+ test_path = test_path.split("/")[-1] # Get just the filename part
598
+ progress.update(current_test=test_path)
599
+
600
+ def _run_test_command_with_ai_progress(
601
+ self,
602
+ cmd: list[str],
603
+ timeout: int = 600,
604
+ ) -> subprocess.CompletedProcess[str]:
605
+ """Run tests with periodic structured progress updates for AI mode."""
606
+ try:
607
+ env = self._setup_coverage_env()
608
+ progress = TestProgress()
609
+ progress.start_time = time.time()
610
+
611
+ return self._execute_test_process_with_progress(cmd, timeout, env, progress)
612
+ except Exception:
613
+ # Fallback to standard mode
614
+ return self._run_test_command(cmd, timeout)
615
+
616
+ def _setup_coverage_env(self) -> dict[str, str]:
617
+ """Set up environment with coverage configuration."""
618
+ import os
619
+ from pathlib import Path
620
+
621
+ cache_dir = Path.home() / ".cache" / "crackerjack" / "coverage"
622
+ cache_dir.mkdir(parents=True, exist_ok=True)
623
+
624
+ env = os.environ.copy()
625
+ env["COVERAGE_FILE"] = str(cache_dir / ".coverage")
626
+ return env
627
+
628
+ def _execute_test_process_with_progress(
629
+ self,
630
+ cmd: list[str],
631
+ timeout: int,
632
+ env: dict[str, str],
633
+ progress: TestProgress,
634
+ ) -> subprocess.CompletedProcess[str]:
635
+ """Execute test process with progress tracking."""
636
+ stdout_lines: list[str] = []
637
+ stderr_lines: list[str] = []
638
+ last_update_time = [time.time()] # Use list for mutable reference
639
+
640
+ with subprocess.Popen(
641
+ cmd,
642
+ cwd=self.pkg_path,
643
+ stdout=subprocess.PIPE,
644
+ stderr=subprocess.PIPE,
645
+ text=True,
646
+ env=env,
647
+ ) as process:
648
+ # Start reader threads
649
+ stdout_thread = threading.Thread(
650
+ target=self._read_stdout_with_progress,
651
+ args=(process, stdout_lines, progress, last_update_time),
652
+ daemon=True,
653
+ )
654
+ stderr_thread = threading.Thread(
655
+ target=self._read_stderr_lines,
656
+ args=(process, stderr_lines),
657
+ daemon=True,
658
+ )
659
+
660
+ stdout_thread.start()
661
+ stderr_thread.start()
662
+
663
+ # Wait for process completion
664
+ returncode = self._wait_for_process_completion(process, timeout)
665
+
666
+ # Clean up threads
667
+ stdout_thread.join(timeout=1)
668
+ stderr_thread.join(timeout=1)
669
+
670
+ # Final progress update
671
+ progress.is_complete = True
672
+ self._emit_ai_progress(progress)
673
+
674
+ return subprocess.CompletedProcess(
675
+ args=cmd,
676
+ returncode=returncode,
677
+ stdout="\n".join(stdout_lines),
678
+ stderr="\n".join(stderr_lines),
679
+ )
680
+
681
+ def _read_stdout_with_progress(
682
+ self,
683
+ process: subprocess.Popen[str],
684
+ stdout_lines: list[str],
685
+ progress: TestProgress,
686
+ last_update_time: list[float],
687
+ ) -> None:
688
+ """Read stdout and update progress."""
689
+ if not process.stdout:
690
+ return
691
+
692
+ for line in iter(process.stdout.readline, ""):
693
+ if not line:
694
+ break
695
+ line = line.rstrip()
696
+ stdout_lines.append(line)
697
+ self._parse_test_line(line, progress)
698
+
699
+ # Emit structured progress every 10 seconds
700
+ current_time = time.time()
701
+ if current_time - last_update_time[0] >= 10:
702
+ self._emit_ai_progress(progress)
703
+ last_update_time[0] = current_time
704
+
705
+ def _read_stderr_lines(
706
+ self,
707
+ process: subprocess.Popen[str],
708
+ stderr_lines: list[str],
709
+ ) -> None:
710
+ """Read stderr lines."""
711
+ if not process.stderr:
712
+ return
713
+
714
+ for line in iter(process.stderr.readline, ""):
715
+ if not line:
716
+ break
717
+ stderr_lines.append(line.rstrip())
718
+
719
+ def _wait_for_process_completion(
720
+ self,
721
+ process: subprocess.Popen[str],
722
+ timeout: int,
723
+ ) -> int:
724
+ """Wait for process completion with timeout handling."""
725
+ try:
726
+ return process.wait(timeout=timeout)
727
+ except subprocess.TimeoutExpired:
728
+ process.kill()
729
+ raise
730
+
731
+ def _emit_ai_progress(self, progress: TestProgress) -> None:
732
+ """Emit structured progress data for AI consumption."""
733
+ if not self._progress_callback:
734
+ return
735
+
736
+ progress_data = {
737
+ "timestamp": progress.elapsed_time,
738
+ "status": "complete" if progress.is_complete else "running",
739
+ "progress_percentage": (progress.completed / progress.total_tests * 100)
740
+ if progress.total_tests > 0
741
+ else 0,
742
+ "completed": progress.completed,
743
+ "total": progress.total_tests,
744
+ "passed": progress.passed,
745
+ "failed": progress.failed,
746
+ "skipped": progress.skipped,
747
+ "errors": progress.errors,
748
+ "current_test": progress.current_test,
749
+ "elapsed_seconds": progress.elapsed_time,
750
+ "eta_seconds": progress.eta_seconds,
751
+ }
752
+
753
+ # Include console-friendly message for periodic updates
754
+ if not progress.is_complete and progress.total_tests > 0:
755
+ percentage = progress.completed / progress.total_tests * 100
756
+ self.console.print(
757
+ f"📊 Progress update ({progress.elapsed_time:.0f}s): "
758
+ f"{progress.completed}/{progress.total_tests} tests completed ({percentage:.0f}%)",
759
+ )
760
+
761
+ self._progress_callback(progress_data)
762
+
763
+ def _get_optimal_workers(self, options: OptionsProtocol) -> int:
764
+ if options.test_workers > 0:
765
+ return options.test_workers
766
+ import os
767
+
768
+ cpu_count = os.cpu_count() or 1
769
+ test_files = list(self.pkg_path.glob("tests/test_*.py"))
770
+ if len(test_files) < 5:
771
+ return min(2, cpu_count)
772
+
773
+ return min(cpu_count, 8)
774
+
775
+ def _get_test_timeout(self, options: OptionsProtocol) -> int:
776
+ if options.test_timeout > 0:
777
+ return options.test_timeout
778
+ test_files = list(self.pkg_path.glob("tests/test_*.py"))
779
+ base_timeout = 300
780
+
781
+ import math
782
+
783
+ calculated_timeout = base_timeout + int(math.sqrt(len(test_files)) * 20)
784
+ return min(calculated_timeout, 600)
785
+
786
+ def run_tests(self, options: OptionsProtocol) -> bool:
787
+ """Main entry point for test execution with proper error handling."""
788
+ self._last_test_failures = []
789
+ start_time = time.time()
790
+
791
+ try:
792
+ return self._execute_test_workflow(options, start_time)
793
+ except subprocess.TimeoutExpired:
794
+ return self._handle_test_timeout(start_time)
795
+ except Exception as e:
796
+ return self._handle_test_error(start_time, e)
797
+
798
+ def _execute_test_workflow(
799
+ self,
800
+ options: OptionsProtocol,
801
+ start_time: float,
802
+ ) -> bool:
803
+ """Execute the complete test workflow."""
804
+ cmd = self._build_test_command(options)
805
+ timeout = self._get_test_timeout(options)
806
+ result = self._execute_tests_with_appropriate_mode(cmd, timeout, options)
807
+ duration = time.time() - start_time
808
+ return self._process_test_results(result, duration)
809
+
810
+ def _execute_tests_with_appropriate_mode(
811
+ self,
812
+ cmd: list[str],
813
+ timeout: int,
814
+ options: OptionsProtocol,
815
+ ) -> subprocess.CompletedProcess[str]:
816
+ """Execute tests using the appropriate mode based on options."""
817
+ execution_mode = self._determine_execution_mode(options)
818
+ extended_timeout = timeout + 60
819
+
820
+ if execution_mode == "ai_progress":
821
+ self._print_test_start_message(cmd, timeout, options)
822
+ return self._run_test_command_with_ai_progress(
823
+ cmd,
824
+ timeout=extended_timeout,
825
+ )
826
+ if execution_mode == "console_progress":
827
+ return self._run_test_command_with_progress(cmd, timeout=extended_timeout)
828
+ # standard mode
829
+ self._print_test_start_message(cmd, timeout, options)
830
+ return self._run_test_command(cmd, timeout=extended_timeout)
831
+
832
+ def _determine_execution_mode(self, options: OptionsProtocol) -> str:
833
+ """Determine which execution mode to use based on options."""
834
+ is_ai_mode = getattr(options, "ai_agent", False)
835
+ is_benchmark = options.benchmark
836
+
837
+ if is_ai_mode and self._progress_callback:
838
+ return "ai_progress"
839
+ if not is_ai_mode and not is_benchmark:
840
+ return "console_progress"
841
+ return "standard"
842
+
843
+ def _handle_test_timeout(self, start_time: float) -> bool:
844
+ """Handle test execution timeout."""
845
+ duration = time.time() - start_time
846
+ self.console.print(f"[red]⏰[/red] Tests timed out after {duration:.1f}s")
847
+ return False
848
+
849
+ def _handle_test_error(self, start_time: float, error: Exception) -> bool:
850
+ """Handle test execution errors."""
851
+ self.console.print(f"[red]💥[/red] Test execution failed: {error}")
852
+ return False
853
+
854
+ def _build_test_command(self, options: OptionsProtocol) -> list[str]:
855
+ cmd = ["python", "-m", "pytest"]
856
+ self._add_coverage_options(cmd, options)
857
+ self._add_worker_options(cmd, options)
858
+ self._add_benchmark_options(cmd, options)
859
+ self._add_timeout_options(cmd, options)
860
+
861
+ # For progress modes, we need verbose output to parse test names
862
+ is_ai_mode = getattr(options, "ai_agent", False)
863
+ needs_verbose = (not is_ai_mode and not options.benchmark) or (
864
+ is_ai_mode and self._progress_callback
865
+ )
866
+ self._add_verbosity_options(cmd, options, force_verbose=bool(needs_verbose))
867
+ self._add_test_path(cmd)
868
+
869
+ return cmd
870
+
871
+ def _add_coverage_options(self, cmd: list[str], options: OptionsProtocol) -> None:
872
+ if not options.benchmark:
873
+ cmd.extend(["--cov=crackerjack", "--cov-report=term-missing"])
874
+
875
+ def _add_worker_options(self, cmd: list[str], options: OptionsProtocol) -> None:
876
+ if not options.benchmark:
877
+ workers = self._get_optimal_workers(options)
878
+ if workers > 1:
879
+ cmd.extend(["-n", str(workers)])
880
+ self.console.print(f"[cyan]🔧[/cyan] Using {workers} test workers")
881
+
882
+ def _add_benchmark_options(self, cmd: list[str], options: OptionsProtocol) -> None:
883
+ if options.benchmark:
884
+ self.console.print(
885
+ "[cyan]📊[/cyan] Running in benchmark mode (no parallelization)",
886
+ )
887
+ cmd.append("--benchmark-only")
888
+
889
+ def _add_timeout_options(self, cmd: list[str], options: OptionsProtocol) -> None:
890
+ timeout = self._get_test_timeout(options)
891
+ cmd.extend(["--timeout", str(timeout)])
892
+
893
+ def _add_verbosity_options(
894
+ self,
895
+ cmd: list[str],
896
+ options: OptionsProtocol,
897
+ force_verbose: bool = False,
898
+ ) -> None:
899
+ if options.verbose or force_verbose:
900
+ cmd.append("-v")
901
+
902
+ def _add_test_path(self, cmd: list[str]) -> None:
903
+ test_path = self.pkg_path / "tests"
904
+ if test_path.exists():
905
+ cmd.append(str(test_path))
906
+
907
+ def _print_test_start_message(
908
+ self,
909
+ cmd: list[str],
910
+ timeout: int,
911
+ options: OptionsProtocol,
912
+ ) -> None:
913
+ self.console.print(
914
+ f"[yellow]🧪[/yellow] Running tests... (timeout: {timeout}s)",
915
+ )
916
+ if options.verbose:
917
+ self.console.print(f"[dim]Command: {' '.join(cmd)}[/dim]")
918
+
919
+ def _process_test_results(
920
+ self,
921
+ result: subprocess.CompletedProcess[str],
922
+ duration: float,
923
+ ) -> bool:
924
+ output = result.stdout + result.stderr
925
+ success = result.returncode == 0
926
+
927
+ # Process coverage ratchet if enabled and tests passed
928
+ if self.coverage_ratchet_enabled and success:
929
+ if not self._process_coverage_ratchet():
930
+ return False # Coverage regression detected
931
+
932
+ if success:
933
+ return self._handle_test_success(output, duration)
934
+ return self._handle_test_failure(output, duration)
935
+
936
+ def _process_coverage_ratchet(self) -> bool:
937
+ """Process coverage ratchet and return False if regression detected."""
938
+ coverage_data = self.get_coverage()
939
+ if not coverage_data:
940
+ return True
941
+
942
+ current_coverage = coverage_data.get("total_coverage", 0)
943
+ ratchet_result = self.coverage_ratchet.update_coverage(current_coverage)
944
+
945
+ return self._handle_ratchet_result(ratchet_result)
946
+
947
+ def _handle_ratchet_result(self, ratchet_result: dict[str, t.Any]) -> bool:
948
+ """Handle coverage ratchet result and return False if regression detected."""
949
+ status = ratchet_result["status"]
950
+
951
+ if status == "improved":
952
+ self._handle_coverage_improvement(ratchet_result)
953
+ elif status == "regression":
954
+ self.console.print(f"[red]📉 {ratchet_result['message']}[/red]")
955
+ return False # Fail the test run on coverage regression
956
+ elif status == "maintained":
957
+ self.console.print(f"[cyan]📊 {ratchet_result['message']}[/cyan]")
958
+
959
+ self._display_progress_visualization()
960
+ return True
961
+
962
+ def _handle_coverage_improvement(self, ratchet_result: dict[str, t.Any]) -> None:
963
+ """Handle coverage improvement display and milestone celebration."""
964
+ self.console.print(f"[green]🎉 {ratchet_result['message']}[/green]")
965
+
966
+ if "milestones" in ratchet_result and ratchet_result["milestones"]:
967
+ self.coverage_ratchet.display_milestone_celebration(
968
+ ratchet_result["milestones"]
969
+ )
970
+
971
+ if "next_milestone" in ratchet_result and ratchet_result["next_milestone"]:
972
+ next_milestone = ratchet_result["next_milestone"]
973
+ points_needed = ratchet_result.get("points_to_next", 0)
974
+ self.console.print(
975
+ f"[cyan]🎯 Next milestone: {next_milestone:.0f}% (+{points_needed:.2f}% needed)[/cyan]"
976
+ )
977
+
978
+ def _display_progress_visualization(self) -> None:
979
+ """Display coverage progress visualization."""
980
+ progress_viz = self.coverage_ratchet.get_progress_visualization()
981
+ for line in progress_viz.split("\n"):
982
+ if line.strip():
983
+ self.console.print(f"[dim]{line}[/dim]")
984
+
985
+ def _handle_test_success(self, output: str, duration: float) -> bool:
986
+ self.console.print(f"[green]✅[/green] Tests passed ({duration:.1f}s)")
987
+ lines = output.split("\n")
988
+ for line in lines:
989
+ if "passed" in line and ("failed" in line or "error" in line):
990
+ self.console.print(f"[cyan]📊[/cyan] {line.strip()}")
991
+ break
992
+
993
+ return True
994
+
995
+ def _handle_test_failure(self, output: str, duration: float) -> bool:
996
+ self.console.print(f"[red]❌[/red] Tests failed ({duration:.1f}s)")
997
+ failure_lines = self._extract_failure_lines(output)
998
+ if failure_lines:
999
+ self.console.print("[red]💥[/red] Failure summary: ")
1000
+ for line in failure_lines[:10]:
1001
+ if line.strip():
1002
+ self.console.print(f" [dim]{line}[/dim]")
1003
+
1004
+ self._last_test_failures = failure_lines or ["Test execution failed"]
1005
+
1006
+ return False
1007
+
1008
+ def _extract_failure_lines(self, output: str) -> list[str]:
1009
+ lines = output.split("\n")
1010
+ in_failure_section = False
1011
+ failure_lines: list[str] = []
1012
+ for line in lines:
1013
+ if "FAILURES" in line or "ERRORS" in line:
1014
+ in_failure_section = True
1015
+ elif in_failure_section and line.startswith(" = "):
1016
+ break
1017
+ elif in_failure_section:
1018
+ failure_lines.append(line)
1019
+
1020
+ return failure_lines
1021
+
1022
+ def get_coverage(self) -> dict[str, t.Any]:
1023
+ try:
1024
+ result = self._run_test_command(
1025
+ ["python", "-m", "coverage", "report", "--format=json"],
1026
+ )
1027
+ if result.returncode == 0:
1028
+ import json
1029
+
1030
+ coverage_data = json.loads(result.stdout)
1031
+
1032
+ return {
1033
+ "total_coverage": coverage_data.get("totals", {}).get(
1034
+ "percent_covered",
1035
+ 0,
1036
+ ),
1037
+ "files": coverage_data.get("files", {}),
1038
+ "summary": coverage_data.get("totals", {}),
1039
+ }
1040
+ self.console.print("[yellow]⚠️[/yellow] Could not get coverage data")
1041
+ return {}
1042
+ except Exception as e:
1043
+ self.console.print(f"[yellow]⚠️[/yellow] Error getting coverage: {e}")
1044
+ return {}
1045
+
1046
+ def run_specific_tests(self, test_pattern: str) -> bool:
1047
+ try:
1048
+ cmd = ["python", "-m", "pytest", "-k", test_pattern, "-v"]
1049
+ self.console.print(
1050
+ f"[yellow]🎯[/yellow] Running tests matching: {test_pattern}",
1051
+ )
1052
+ result = self._run_test_command(cmd)
1053
+ if result.returncode == 0:
1054
+ self.console.print("[green]✅[/green] Specific tests passed")
1055
+ return True
1056
+ self.console.print("[red]❌[/red] Specific tests failed")
1057
+ return False
1058
+ except Exception as e:
1059
+ self.console.print(f"[red]💥[/red] Error running specific tests: {e}")
1060
+ return False
1061
+
1062
+ def validate_test_environment(self) -> bool:
1063
+ issues: list[str] = []
1064
+ try:
1065
+ result = self._run_test_command(["python", "-m", "pytest", "--version"])
1066
+ if result.returncode != 0:
1067
+ issues.append("pytest not available")
1068
+ except (subprocess.SubprocessError, OSError, FileNotFoundError):
1069
+ issues.append("pytest not accessible")
1070
+ test_dir = self.pkg_path / "tests"
1071
+ if not test_dir.exists():
1072
+ issues.append("tests directory not found")
1073
+ test_files = list(test_dir.glob("test_*.py")) if test_dir.exists() else []
1074
+ if not test_files:
1075
+ issues.append("no test files found")
1076
+ if issues:
1077
+ self.console.print("[red]❌[/red] Test environment issues: ")
1078
+ for issue in issues:
1079
+ self.console.print(f" - {issue}")
1080
+ return False
1081
+ self.console.print("[green]✅[/green] Test environment validated")
1082
+ return True
1083
+
1084
+ def get_test_stats(self) -> dict[str, t.Any]:
1085
+ test_dir = self.pkg_path / "tests"
1086
+ if not test_dir.exists():
1087
+ return {"test_files": 0, "total_tests": 0, "test_lines": 0}
1088
+ test_files = list(test_dir.glob("test_*.py"))
1089
+ total_lines = 0
1090
+ total_tests = 0
1091
+ for test_file in test_files:
1092
+ try:
1093
+ content = test_file.read_text()
1094
+ total_lines += len(content.split("\n"))
1095
+ total_tests += content.count("def test_")
1096
+ except (OSError, UnicodeDecodeError, PermissionError):
1097
+ continue
1098
+
1099
+ return {
1100
+ "test_files": len(test_files),
1101
+ "total_tests": total_tests,
1102
+ "test_lines": total_lines,
1103
+ "avg_tests_per_file": total_tests / len(test_files) if test_files else 0,
1104
+ }
1105
+
1106
+ def get_test_failures(self) -> list[str]:
1107
+ return self._last_test_failures
1108
+
1109
+ def get_test_command(self, options: OptionsProtocol) -> list[str]:
1110
+ return self._build_test_command(options)
1111
+
1112
+ def get_coverage_report(self) -> str | None:
1113
+ try:
1114
+ coverage_data = self.get_coverage()
1115
+ if coverage_data:
1116
+ total = coverage_data.get("total", 0)
1117
+ return f"Total coverage: {total}%"
1118
+ return None
1119
+ except Exception:
1120
+ return None
1121
+
1122
+ def has_tests(self) -> bool:
1123
+ test_files = list(self.pkg_path.glob("tests/test_*.py"))
1124
+ return len(test_files) > 0