crackerjack 0.31.10__py3-none-any.whl → 0.31.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (155) hide show
  1. crackerjack/CLAUDE.md +288 -705
  2. crackerjack/__main__.py +22 -8
  3. crackerjack/agents/__init__.py +0 -3
  4. crackerjack/agents/architect_agent.py +0 -43
  5. crackerjack/agents/base.py +1 -9
  6. crackerjack/agents/coordinator.py +2 -148
  7. crackerjack/agents/documentation_agent.py +109 -81
  8. crackerjack/agents/dry_agent.py +122 -97
  9. crackerjack/agents/formatting_agent.py +3 -16
  10. crackerjack/agents/import_optimization_agent.py +1174 -130
  11. crackerjack/agents/performance_agent.py +956 -188
  12. crackerjack/agents/performance_helpers.py +229 -0
  13. crackerjack/agents/proactive_agent.py +1 -48
  14. crackerjack/agents/refactoring_agent.py +516 -246
  15. crackerjack/agents/refactoring_helpers.py +282 -0
  16. crackerjack/agents/security_agent.py +393 -90
  17. crackerjack/agents/test_creation_agent.py +1776 -120
  18. crackerjack/agents/test_specialist_agent.py +59 -15
  19. crackerjack/agents/tracker.py +0 -102
  20. crackerjack/api.py +145 -37
  21. crackerjack/cli/handlers.py +48 -30
  22. crackerjack/cli/interactive.py +11 -11
  23. crackerjack/cli/options.py +66 -4
  24. crackerjack/code_cleaner.py +808 -148
  25. crackerjack/config/global_lock_config.py +110 -0
  26. crackerjack/config/hooks.py +43 -64
  27. crackerjack/core/async_workflow_orchestrator.py +247 -97
  28. crackerjack/core/autofix_coordinator.py +192 -109
  29. crackerjack/core/enhanced_container.py +46 -63
  30. crackerjack/core/file_lifecycle.py +549 -0
  31. crackerjack/core/performance.py +9 -8
  32. crackerjack/core/performance_monitor.py +395 -0
  33. crackerjack/core/phase_coordinator.py +281 -94
  34. crackerjack/core/proactive_workflow.py +9 -58
  35. crackerjack/core/resource_manager.py +501 -0
  36. crackerjack/core/service_watchdog.py +490 -0
  37. crackerjack/core/session_coordinator.py +4 -8
  38. crackerjack/core/timeout_manager.py +504 -0
  39. crackerjack/core/websocket_lifecycle.py +475 -0
  40. crackerjack/core/workflow_orchestrator.py +343 -209
  41. crackerjack/dynamic_config.py +47 -6
  42. crackerjack/errors.py +3 -4
  43. crackerjack/executors/async_hook_executor.py +63 -13
  44. crackerjack/executors/cached_hook_executor.py +14 -14
  45. crackerjack/executors/hook_executor.py +100 -37
  46. crackerjack/executors/hook_lock_manager.py +856 -0
  47. crackerjack/executors/individual_hook_executor.py +120 -86
  48. crackerjack/intelligence/__init__.py +0 -7
  49. crackerjack/intelligence/adaptive_learning.py +13 -86
  50. crackerjack/intelligence/agent_orchestrator.py +15 -78
  51. crackerjack/intelligence/agent_registry.py +12 -59
  52. crackerjack/intelligence/agent_selector.py +31 -92
  53. crackerjack/intelligence/integration.py +1 -41
  54. crackerjack/interactive.py +9 -9
  55. crackerjack/managers/async_hook_manager.py +25 -8
  56. crackerjack/managers/hook_manager.py +9 -9
  57. crackerjack/managers/publish_manager.py +57 -59
  58. crackerjack/managers/test_command_builder.py +6 -36
  59. crackerjack/managers/test_executor.py +9 -61
  60. crackerjack/managers/test_manager.py +17 -63
  61. crackerjack/managers/test_manager_backup.py +77 -127
  62. crackerjack/managers/test_progress.py +4 -23
  63. crackerjack/mcp/cache.py +5 -12
  64. crackerjack/mcp/client_runner.py +10 -10
  65. crackerjack/mcp/context.py +64 -6
  66. crackerjack/mcp/dashboard.py +14 -11
  67. crackerjack/mcp/enhanced_progress_monitor.py +55 -55
  68. crackerjack/mcp/file_monitor.py +72 -42
  69. crackerjack/mcp/progress_components.py +103 -84
  70. crackerjack/mcp/progress_monitor.py +122 -49
  71. crackerjack/mcp/rate_limiter.py +12 -12
  72. crackerjack/mcp/server_core.py +16 -22
  73. crackerjack/mcp/service_watchdog.py +26 -26
  74. crackerjack/mcp/state.py +15 -0
  75. crackerjack/mcp/tools/core_tools.py +95 -39
  76. crackerjack/mcp/tools/error_analyzer.py +6 -32
  77. crackerjack/mcp/tools/execution_tools.py +1 -56
  78. crackerjack/mcp/tools/execution_tools_backup.py +35 -131
  79. crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
  80. crackerjack/mcp/tools/intelligence_tools.py +2 -55
  81. crackerjack/mcp/tools/monitoring_tools.py +308 -145
  82. crackerjack/mcp/tools/proactive_tools.py +12 -42
  83. crackerjack/mcp/tools/progress_tools.py +23 -15
  84. crackerjack/mcp/tools/utility_tools.py +3 -40
  85. crackerjack/mcp/tools/workflow_executor.py +40 -60
  86. crackerjack/mcp/websocket/app.py +0 -3
  87. crackerjack/mcp/websocket/endpoints.py +206 -268
  88. crackerjack/mcp/websocket/jobs.py +213 -66
  89. crackerjack/mcp/websocket/server.py +84 -6
  90. crackerjack/mcp/websocket/websocket_handler.py +137 -29
  91. crackerjack/models/config_adapter.py +3 -16
  92. crackerjack/models/protocols.py +162 -3
  93. crackerjack/models/resource_protocols.py +454 -0
  94. crackerjack/models/task.py +3 -3
  95. crackerjack/monitoring/__init__.py +0 -0
  96. crackerjack/monitoring/ai_agent_watchdog.py +25 -71
  97. crackerjack/monitoring/regression_prevention.py +28 -87
  98. crackerjack/orchestration/advanced_orchestrator.py +44 -78
  99. crackerjack/orchestration/coverage_improvement.py +10 -60
  100. crackerjack/orchestration/execution_strategies.py +16 -16
  101. crackerjack/orchestration/test_progress_streamer.py +61 -53
  102. crackerjack/plugins/base.py +1 -1
  103. crackerjack/plugins/managers.py +22 -20
  104. crackerjack/py313.py +65 -21
  105. crackerjack/services/backup_service.py +467 -0
  106. crackerjack/services/bounded_status_operations.py +627 -0
  107. crackerjack/services/cache.py +7 -9
  108. crackerjack/services/config.py +35 -52
  109. crackerjack/services/config_integrity.py +5 -16
  110. crackerjack/services/config_merge.py +542 -0
  111. crackerjack/services/contextual_ai_assistant.py +17 -19
  112. crackerjack/services/coverage_ratchet.py +44 -73
  113. crackerjack/services/debug.py +25 -39
  114. crackerjack/services/dependency_monitor.py +52 -50
  115. crackerjack/services/enhanced_filesystem.py +14 -11
  116. crackerjack/services/file_hasher.py +1 -1
  117. crackerjack/services/filesystem.py +1 -12
  118. crackerjack/services/git.py +71 -47
  119. crackerjack/services/health_metrics.py +31 -27
  120. crackerjack/services/initialization.py +276 -428
  121. crackerjack/services/input_validator.py +760 -0
  122. crackerjack/services/log_manager.py +16 -16
  123. crackerjack/services/logging.py +7 -6
  124. crackerjack/services/metrics.py +43 -43
  125. crackerjack/services/pattern_cache.py +2 -31
  126. crackerjack/services/pattern_detector.py +26 -63
  127. crackerjack/services/performance_benchmarks.py +20 -45
  128. crackerjack/services/regex_patterns.py +2887 -0
  129. crackerjack/services/regex_utils.py +537 -0
  130. crackerjack/services/secure_path_utils.py +683 -0
  131. crackerjack/services/secure_status_formatter.py +534 -0
  132. crackerjack/services/secure_subprocess.py +605 -0
  133. crackerjack/services/security.py +47 -10
  134. crackerjack/services/security_logger.py +492 -0
  135. crackerjack/services/server_manager.py +109 -50
  136. crackerjack/services/smart_scheduling.py +8 -25
  137. crackerjack/services/status_authentication.py +603 -0
  138. crackerjack/services/status_security_manager.py +442 -0
  139. crackerjack/services/thread_safe_status_collector.py +546 -0
  140. crackerjack/services/tool_version_service.py +1 -23
  141. crackerjack/services/unified_config.py +36 -58
  142. crackerjack/services/validation_rate_limiter.py +269 -0
  143. crackerjack/services/version_checker.py +9 -40
  144. crackerjack/services/websocket_resource_limiter.py +572 -0
  145. crackerjack/slash_commands/__init__.py +52 -2
  146. crackerjack/tools/__init__.py +0 -0
  147. crackerjack/tools/validate_input_validator_patterns.py +262 -0
  148. crackerjack/tools/validate_regex_patterns.py +198 -0
  149. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/METADATA +197 -12
  150. crackerjack-0.31.12.dist-info/RECORD +178 -0
  151. crackerjack/cli/facade.py +0 -104
  152. crackerjack-0.31.10.dist-info/RECORD +0 -149
  153. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/WHEEL +0 -0
  154. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/entry_points.txt +0 -0
  155. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/licenses/LICENSE +0 -0
@@ -28,7 +28,7 @@ class TestProgress:
28
28
  self.files_discovered: int = 0
29
29
  self.collection_status: str = "Starting collection..."
30
30
  self._lock = threading.Lock()
31
- self._seen_files: set[str] = set() # Track seen files to prevent duplicates
31
+ self._seen_files: set[str] = set()
32
32
 
33
33
  @property
34
34
  def completed(self) -> int:
@@ -55,105 +55,92 @@ class TestProgress:
55
55
  setattr(self, key, value)
56
56
 
57
57
  def format_progress(self) -> Align:
58
- """Format test progress display with appropriate phase-specific content."""
59
58
  with self._lock:
60
59
  if self.is_collecting:
61
60
  table = self._format_collection_progress()
62
61
  else:
63
62
  table = self._format_execution_progress()
64
- # Left-align the table as requested
63
+
65
64
  return Align.left(table)
66
65
 
67
66
  def _format_collection_progress(self) -> Table:
68
- """Format progress display for test collection phase."""
69
67
  table = Table(
70
68
  title="🔍 Test Collection",
71
69
  header_style="bold yellow",
72
70
  show_lines=True,
73
71
  border_style="yellow",
74
72
  title_style="bold yellow",
75
- expand=True, # Use full terminal width like rich.live demo
76
- min_width=80, # Ensure minimum width
73
+ expand=True,
74
+ min_width=80,
77
75
  )
78
76
 
79
- # Add multiple columns for better alignment (like complexipy)
80
77
  table.add_column("Type", style="cyan", ratio=1)
81
- table.add_column("Details", style="white", ratio=3) # Wider middle column
78
+ table.add_column("Details", style="white", ratio=3)
82
79
  table.add_column("Count", style="green", ratio=1)
83
80
 
84
- # Add status
85
81
  table.add_row("Status", self.collection_status, "")
86
82
 
87
- # Add collection stats
88
83
  if self.files_discovered > 0:
89
84
  table.add_row("Files", "Test files discovered", str(self.files_discovered))
90
85
 
91
86
  if self.total_tests > 0:
92
87
  table.add_row("Tests", "Total tests found", str(self.total_tests))
93
88
 
94
- # Add progress bar
95
89
  if self.files_discovered > 0:
96
90
  progress_chars = "▓" * min(self.files_discovered, 15) + "░" * max(
97
91
  0, 15 - self.files_discovered
98
92
  )
99
93
  table.add_row(
100
- "Progress", f"[{progress_chars}]", f"{self.files_discovered}/15"
94
+ "Progress", f"[{progress_chars}]", f"{self.files_discovered}/ 15"
101
95
  )
102
96
 
103
- # Add duration
104
- table.add_row("Duration", f"{self.elapsed_time:.1f} seconds", "")
97
+ table.add_row("Duration", f"{self.elapsed_time: .1f} seconds", "")
105
98
 
106
99
  return table
107
100
 
108
101
  def _format_execution_progress(self) -> Table:
109
- """Format progress display for test execution phase."""
110
102
  table = Table(
111
103
  title="🧪 Test Execution",
112
104
  header_style="bold cyan",
113
105
  show_lines=True,
114
106
  border_style="cyan",
115
107
  title_style="bold cyan",
116
- expand=True, # Use full terminal width like rich.live demo
117
- min_width=80, # Ensure minimum width
108
+ expand=True,
109
+ min_width=80,
118
110
  )
119
111
 
120
- # Add multiple columns for better alignment (like complexipy)
121
112
  table.add_column("Metric", style="cyan", ratio=1)
122
- table.add_column("Details", style="white", ratio=3) # Wider middle column
113
+ table.add_column("Details", style="white", ratio=3)
123
114
  table.add_column("Count", style="green", ratio=1)
124
115
 
125
- # Test results summary
126
116
  if self.total_tests > 0:
127
117
  table.add_row("Total", "Total tests", str(self.total_tests))
128
- table.add_row("Passed", "Tests passed", f"[green]{self.passed}[/green]")
118
+ table.add_row("Passed", "Tests passed", f"[green]{self.passed}[/ green]")
129
119
 
130
120
  if self.failed > 0:
131
- table.add_row("Failed", "Tests failed", f"[red]{self.failed}[/red]")
121
+ table.add_row("Failed", "Tests failed", f"[red]{self.failed}[/ red]")
132
122
  if self.skipped > 0:
133
123
  table.add_row(
134
- "Skipped", "Tests skipped", f"[yellow]{self.skipped}[/yellow]"
124
+ "Skipped", "Tests skipped", f"[yellow]{self.skipped}[/ yellow]"
135
125
  )
136
126
  if self.errors > 0:
137
- table.add_row("Errors", "Test errors", f"[red]{self.errors}[/red]")
127
+ table.add_row("Errors", "Test errors", f"[red]{self.errors}[/ red]")
138
128
 
139
- # Progress percentage and bar
140
129
  if self.total_tests > 0:
141
130
  percentage = (self.completed / self.total_tests) * 100
142
131
  filled = int((self.completed / self.total_tests) * 15)
143
132
  bar = "█" * filled + "░" * (15 - filled)
144
- table.add_row("Progress", f"[{bar}]", f"{percentage:.1f}%")
133
+ table.add_row("Progress", f"[{bar}]", f"{percentage: .1f}%")
145
134
 
146
- # Current test
147
135
  if self.current_test:
148
136
  test_name = self.current_test
149
- if len(test_name) > 40: # Reasonable truncation
137
+ if len(test_name) > 40:
150
138
  test_name = test_name[:37] + "..."
151
139
  table.add_row("Current", test_name, "")
152
140
 
153
- # Duration and ETA
154
- duration_text = f"{self.elapsed_time:.1f}s"
141
+ duration_text = f"{self.elapsed_time: .1f}s"
155
142
  if self.eta_seconds is not None and self.eta_seconds > 0:
156
- table.add_row("Duration", duration_text, f"ETA: ~{self.eta_seconds:.0f}s")
143
+ table.add_row("Duration", duration_text, f"ETA: ~{self.eta_seconds: .0f}s")
157
144
  else:
158
145
  table.add_row("Duration", duration_text, "")
159
146
 
@@ -173,21 +160,18 @@ class TestManagementImpl:
173
160
  self,
174
161
  callback: t.Callable[[dict[str, t.Any]], None] | None,
175
162
  ) -> None:
176
- """Set callback for AI mode structured progress updates."""
177
163
  self._progress_callback = callback
178
164
 
179
165
  def set_coverage_ratchet_enabled(self, enabled: bool) -> None:
180
- """Enable or disable the coverage ratchet system."""
181
166
  self.coverage_ratchet_enabled = enabled
182
167
  if enabled:
183
168
  self.console.print(
184
- "[cyan]📊[/cyan] Coverage ratchet enabled - targeting 100% coverage"
169
+ "[cyan]📊[/ cyan] Coverage ratchet enabled-targeting 100 % coverage"
185
170
  )
186
171
  else:
187
- self.console.print("[yellow]⚠️[/yellow] Coverage ratchet disabled")
172
+ self.console.print("[yellow]⚠️[/ yellow] Coverage ratchet disabled")
188
173
 
189
174
  def get_coverage_ratchet_status(self) -> dict[str, t.Any]:
190
- """Get comprehensive coverage ratchet status."""
191
175
  return self.coverage_ratchet.get_status_report()
192
176
 
193
177
  def _run_test_command(
@@ -198,7 +182,6 @@ class TestManagementImpl:
198
182
  import os
199
183
  from pathlib import Path
200
184
 
201
- # Set up coverage data file in cache directory
202
185
  cache_dir = Path.home() / ".cache" / "crackerjack" / "coverage"
203
186
  cache_dir.mkdir(parents=True, exist_ok=True)
204
187
 
@@ -237,7 +220,7 @@ class TestManagementImpl:
237
220
  progress = self._initialize_progress()
238
221
  stdout_lines: list[str] = []
239
222
  stderr_lines: list[str] = []
240
- # Use a mutable container to share last_activity_time between threads
223
+
241
224
  activity_tracker = {"last_time": time.time()}
242
225
 
243
226
  with (
@@ -360,7 +343,6 @@ class TestManagementImpl:
360
343
  progress: TestProgress,
361
344
  activity_tracker: dict[str, float],
362
345
  ) -> None:
363
- """Process a single line of test output."""
364
346
  stdout_lines.append(line)
365
347
  self._parse_test_line(line, progress)
366
348
  activity_tracker["last_time"] = time.time()
@@ -371,7 +353,6 @@ class TestManagementImpl:
371
353
  live: Live,
372
354
  refresh_state: dict[str, t.Any],
373
355
  ) -> None:
374
- """Update display if refresh criteria are met."""
375
356
  current_time = time.time()
376
357
  refresh_interval = self._get_refresh_interval(progress)
377
358
  current_content = self._get_current_content_signature(progress)
@@ -385,12 +366,10 @@ class TestManagementImpl:
385
366
  refresh_state["last_content"] = current_content
386
367
 
387
368
  def _get_refresh_interval(self, progress: TestProgress) -> float:
388
- """Get appropriate refresh interval based on test phase."""
389
369
  return 1.0 if progress.is_collecting else 0.25
390
370
 
391
371
  def _get_current_content_signature(self, progress: TestProgress) -> str:
392
- """Get a signature of current progress content for change detection."""
393
- return f"{progress.collection_status}:{progress.files_discovered}:{progress.total_tests}"
372
+ return f"{progress.collection_status}: {progress.files_discovered}: {progress.total_tests}"
394
373
 
395
374
  def _should_refresh_display(
396
375
  self,
@@ -399,7 +378,6 @@ class TestManagementImpl:
399
378
  refresh_interval: float,
400
379
  current_content: str,
401
380
  ) -> bool:
402
- """Determine if display should be refreshed."""
403
381
  time_elapsed = current_time - refresh_state["last_refresh"] > refresh_interval
404
382
  content_changed = current_content != refresh_state["last_content"]
405
383
  return time_elapsed or content_changed
@@ -446,7 +424,7 @@ class TestManagementImpl:
446
424
  ) -> None:
447
425
  if progress.current_test and "stuck" not in progress.current_test.lower():
448
426
  progress.update(
449
- current_test=f"{progress.current_test} (possibly stuck - {stuck_time:.0f}s)",
427
+ current_test=f"{progress.current_test} (possibly stuck-{stuck_time: .0f}s)",
450
428
  )
451
429
  live.update(progress.format_progress())
452
430
  live.refresh()
@@ -462,7 +440,7 @@ class TestManagementImpl:
462
440
  return process.wait(timeout=timeout)
463
441
  except subprocess.TimeoutExpired:
464
442
  process.kill()
465
- progress.update(current_test="TIMEOUT - Process killed")
443
+ progress.update(current_test="TIMEOUT-Process killed")
466
444
  live.update(progress.format_progress())
467
445
  live.refresh()
468
446
  raise
@@ -488,8 +466,8 @@ class TestManagementImpl:
488
466
  from contextlib import suppress
489
467
 
490
468
  with suppress(Exception):
491
- self.console.print(f"[red]❌ Progress display failed: {error}[/red]")
492
- self.console.print("[yellow]⚠️ Falling back to standard mode[/yellow]")
469
+ self.console.print(f"[red]❌ Progress display failed: {error}[/ red]")
470
+ self.console.print("[yellow]⚠️ Falling back to standard mode[/ yellow]")
493
471
  return self._run_test_command(cmd, timeout)
494
472
 
495
473
  def _parse_test_line(self, line: str, progress: TestProgress) -> None:
@@ -504,7 +482,9 @@ class TestManagementImpl:
504
482
  self._handle_running_test(line, progress)
505
483
 
506
484
  def _handle_collection_completion(self, line: str, progress: TestProgress) -> bool:
507
- if match := re.search(r"collected (\d+) items?", line):
485
+ if match := re.search(
486
+ r"collected (\d +) items?", line
487
+ ): # REGEX OK: parsing pytest collection output
508
488
  progress.update(
509
489
  total_tests=int(match.group(1)),
510
490
  is_collecting=False,
@@ -526,22 +506,19 @@ class TestManagementImpl:
526
506
  if not progress.is_collecting:
527
507
  return False
528
508
 
529
- # Only process meaningful collection lines, not every line containing ".py"
530
509
  if line.strip().startswith("collecting") or "collecting" in line.lower():
531
510
  progress.update(collection_status="Collecting tests...")
532
511
  return True
533
512
 
534
- # Very restrictive file discovery - only count actual test discoveries
535
513
  if (
536
- "::" in line
514
+ ":: " in line
537
515
  and ".py" in line
538
- and ("test_" in line or "tests/" in line)
516
+ and ("test_" in line or "tests /" in line)
539
517
  and not any(
540
518
  status in line for status in ("PASSED", "FAILED", "SKIPPED", "ERROR")
541
519
  )
542
520
  ):
543
- # Only update if we haven't seen this file before
544
- filename = line.split("/")[-1] if "/" in line else line.split("::")[0]
521
+ filename = line.split("/")[-1] if "/" in line else line.split(":: ")[0]
545
522
  if filename.endswith(".py") and filename not in progress._seen_files:
546
523
  progress._seen_files.add(filename)
547
524
  new_count = progress.files_discovered + 1
@@ -555,7 +532,7 @@ class TestManagementImpl:
555
532
 
556
533
  def _handle_test_execution(self, line: str, progress: TestProgress) -> bool:
557
534
  if not (
558
- "::" in line
535
+ ":: " in line
559
536
  and any(
560
537
  status in line for status in ("PASSED", "FAILED", "SKIPPED", "ERROR")
561
538
  )
@@ -575,26 +552,25 @@ class TestManagementImpl:
575
552
  return True
576
553
 
577
554
  def _handle_running_test(self, line: str, progress: TestProgress) -> None:
578
- if "::" not in line or any(
555
+ if ":: " not in line or any(
579
556
  status in line for status in ("PASSED", "FAILED", "SKIPPED", "ERROR")
580
557
  ):
581
558
  return
582
559
 
583
560
  parts = line.split()
584
- if parts and "::" in parts[0]:
561
+ if parts and ":: " in parts[0]:
585
562
  test_path = parts[0]
586
563
  if "/" in test_path:
587
564
  test_path = test_path.split("/")[-1]
588
565
  progress.update(current_test=f"Running: {test_path}")
589
566
 
590
567
  def _extract_current_test(self, line: str, progress: TestProgress) -> None:
591
- # Extract test name from pytest output line
592
568
  parts = line.split()
593
- if parts and "::" in parts[0]:
569
+ if parts and ":: " in parts[0]:
594
570
  test_path = parts[0]
595
- # Simplify the test path for display
571
+
596
572
  if "/" in test_path:
597
- test_path = test_path.split("/")[-1] # Get just the filename part
573
+ test_path = test_path.split("/")[-1]
598
574
  progress.update(current_test=test_path)
599
575
 
600
576
  def _run_test_command_with_ai_progress(
@@ -602,7 +578,6 @@ class TestManagementImpl:
602
578
  cmd: list[str],
603
579
  timeout: int = 600,
604
580
  ) -> subprocess.CompletedProcess[str]:
605
- """Run tests with periodic structured progress updates for AI mode."""
606
581
  try:
607
582
  env = self._setup_coverage_env()
608
583
  progress = TestProgress()
@@ -610,11 +585,9 @@ class TestManagementImpl:
610
585
 
611
586
  return self._execute_test_process_with_progress(cmd, timeout, env, progress)
612
587
  except Exception:
613
- # Fallback to standard mode
614
588
  return self._run_test_command(cmd, timeout)
615
589
 
616
590
  def _setup_coverage_env(self) -> dict[str, str]:
617
- """Set up environment with coverage configuration."""
618
591
  import os
619
592
  from pathlib import Path
620
593
 
@@ -632,10 +605,9 @@ class TestManagementImpl:
632
605
  env: dict[str, str],
633
606
  progress: TestProgress,
634
607
  ) -> subprocess.CompletedProcess[str]:
635
- """Execute test process with progress tracking."""
636
608
  stdout_lines: list[str] = []
637
609
  stderr_lines: list[str] = []
638
- last_update_time = [time.time()] # Use list for mutable reference
610
+ last_update_time = [time.time()]
639
611
 
640
612
  with subprocess.Popen(
641
613
  cmd,
@@ -645,7 +617,6 @@ class TestManagementImpl:
645
617
  text=True,
646
618
  env=env,
647
619
  ) as process:
648
- # Start reader threads
649
620
  stdout_thread = threading.Thread(
650
621
  target=self._read_stdout_with_progress,
651
622
  args=(process, stdout_lines, progress, last_update_time),
@@ -660,14 +631,11 @@ class TestManagementImpl:
660
631
  stdout_thread.start()
661
632
  stderr_thread.start()
662
633
 
663
- # Wait for process completion
664
634
  returncode = self._wait_for_process_completion(process, timeout)
665
635
 
666
- # Clean up threads
667
636
  stdout_thread.join(timeout=1)
668
637
  stderr_thread.join(timeout=1)
669
638
 
670
- # Final progress update
671
639
  progress.is_complete = True
672
640
  self._emit_ai_progress(progress)
673
641
 
@@ -685,7 +653,6 @@ class TestManagementImpl:
685
653
  progress: TestProgress,
686
654
  last_update_time: list[float],
687
655
  ) -> None:
688
- """Read stdout and update progress."""
689
656
  if not process.stdout:
690
657
  return
691
658
 
@@ -696,7 +663,6 @@ class TestManagementImpl:
696
663
  stdout_lines.append(line)
697
664
  self._parse_test_line(line, progress)
698
665
 
699
- # Emit structured progress every 10 seconds
700
666
  current_time = time.time()
701
667
  if current_time - last_update_time[0] >= 10:
702
668
  self._emit_ai_progress(progress)
@@ -707,7 +673,6 @@ class TestManagementImpl:
707
673
  process: subprocess.Popen[str],
708
674
  stderr_lines: list[str],
709
675
  ) -> None:
710
- """Read stderr lines."""
711
676
  if not process.stderr:
712
677
  return
713
678
 
@@ -721,7 +686,6 @@ class TestManagementImpl:
721
686
  process: subprocess.Popen[str],
722
687
  timeout: int,
723
688
  ) -> int:
724
- """Wait for process completion with timeout handling."""
725
689
  try:
726
690
  return process.wait(timeout=timeout)
727
691
  except subprocess.TimeoutExpired:
@@ -729,7 +693,6 @@ class TestManagementImpl:
729
693
  raise
730
694
 
731
695
  def _emit_ai_progress(self, progress: TestProgress) -> None:
732
- """Emit structured progress data for AI consumption."""
733
696
  if not self._progress_callback:
734
697
  return
735
698
 
@@ -750,12 +713,11 @@ class TestManagementImpl:
750
713
  "eta_seconds": progress.eta_seconds,
751
714
  }
752
715
 
753
- # Include console-friendly message for periodic updates
754
716
  if not progress.is_complete and progress.total_tests > 0:
755
717
  percentage = progress.completed / progress.total_tests * 100
756
718
  self.console.print(
757
- f"📊 Progress update ({progress.elapsed_time:.0f}s): "
758
- f"{progress.completed}/{progress.total_tests} tests completed ({percentage:.0f}%)",
719
+ f"📊 Progress update ({progress.elapsed_time: .0f}s): "
720
+ f"{progress.completed}/{progress.total_tests} tests completed ({percentage: .0f}%)",
759
721
  )
760
722
 
761
723
  self._progress_callback(progress_data)
@@ -784,7 +746,6 @@ class TestManagementImpl:
784
746
  return min(calculated_timeout, 600)
785
747
 
786
748
  def run_tests(self, options: OptionsProtocol) -> bool:
787
- """Main entry point for test execution with proper error handling."""
788
749
  self._last_test_failures = []
789
750
  start_time = time.time()
790
751
 
@@ -800,7 +761,6 @@ class TestManagementImpl:
800
761
  options: OptionsProtocol,
801
762
  start_time: float,
802
763
  ) -> bool:
803
- """Execute the complete test workflow."""
804
764
  cmd = self._build_test_command(options)
805
765
  timeout = self._get_test_timeout(options)
806
766
  result = self._execute_tests_with_appropriate_mode(cmd, timeout, options)
@@ -813,7 +773,6 @@ class TestManagementImpl:
813
773
  timeout: int,
814
774
  options: OptionsProtocol,
815
775
  ) -> subprocess.CompletedProcess[str]:
816
- """Execute tests using the appropriate mode based on options."""
817
776
  execution_mode = self._determine_execution_mode(options)
818
777
  extended_timeout = timeout + 60
819
778
 
@@ -825,12 +784,11 @@ class TestManagementImpl:
825
784
  )
826
785
  if execution_mode == "console_progress":
827
786
  return self._run_test_command_with_progress(cmd, timeout=extended_timeout)
828
- # standard mode
787
+
829
788
  self._print_test_start_message(cmd, timeout, options)
830
789
  return self._run_test_command(cmd, timeout=extended_timeout)
831
790
 
832
791
  def _determine_execution_mode(self, options: OptionsProtocol) -> str:
833
- """Determine which execution mode to use based on options."""
834
792
  is_ai_mode = getattr(options, "ai_agent", False)
835
793
  is_benchmark = options.benchmark
836
794
 
@@ -841,24 +799,21 @@ class TestManagementImpl:
841
799
  return "standard"
842
800
 
843
801
  def _handle_test_timeout(self, start_time: float) -> bool:
844
- """Handle test execution timeout."""
845
802
  duration = time.time() - start_time
846
- self.console.print(f"[red]⏰[/red] Tests timed out after {duration:.1f}s")
803
+ self.console.print(f"[red]⏰[/ red] Tests timed out after {duration: .1f}s")
847
804
  return False
848
805
 
849
806
  def _handle_test_error(self, start_time: float, error: Exception) -> bool:
850
- """Handle test execution errors."""
851
- self.console.print(f"[red]💥[/red] Test execution failed: {error}")
807
+ self.console.print(f"[red]💥[/ red] Test execution failed: {error}")
852
808
  return False
853
809
 
854
810
  def _build_test_command(self, options: OptionsProtocol) -> list[str]:
855
- cmd = ["python", "-m", "pytest"]
811
+ cmd = ["python", "- m", "pytest"]
856
812
  self._add_coverage_options(cmd, options)
857
813
  self._add_worker_options(cmd, options)
858
814
  self._add_benchmark_options(cmd, options)
859
815
  self._add_timeout_options(cmd, options)
860
816
 
861
- # For progress modes, we need verbose output to parse test names
862
817
  is_ai_mode = getattr(options, "ai_agent", False)
863
818
  needs_verbose = (not is_ai_mode and not options.benchmark) or (
864
819
  is_ai_mode and self._progress_callback
@@ -876,13 +831,13 @@ class TestManagementImpl:
876
831
  if not options.benchmark:
877
832
  workers = self._get_optimal_workers(options)
878
833
  if workers > 1:
879
- cmd.extend(["-n", str(workers)])
880
- self.console.print(f"[cyan]🔧[/cyan] Using {workers} test workers")
834
+ cmd.extend(["- n", str(workers)])
835
+ self.console.print(f"[cyan]🔧[/ cyan] Using {workers} test workers")
881
836
 
882
837
  def _add_benchmark_options(self, cmd: list[str], options: OptionsProtocol) -> None:
883
838
  if options.benchmark:
884
839
  self.console.print(
885
- "[cyan]📊[/cyan] Running in benchmark mode (no parallelization)",
840
+ "[cyan]📊[/ cyan] Running in benchmark mode (no parallelization)",
886
841
  )
887
842
  cmd.append("--benchmark-only")
888
843
 
@@ -897,7 +852,7 @@ class TestManagementImpl:
897
852
  force_verbose: bool = False,
898
853
  ) -> None:
899
854
  if options.verbose or force_verbose:
900
- cmd.append("-v")
855
+ cmd.append("- v")
901
856
 
902
857
  def _add_test_path(self, cmd: list[str]) -> None:
903
858
  test_path = self.pkg_path / "tests"
@@ -911,10 +866,10 @@ class TestManagementImpl:
911
866
  options: OptionsProtocol,
912
867
  ) -> None:
913
868
  self.console.print(
914
- f"[yellow]🧪[/yellow] Running tests... (timeout: {timeout}s)",
869
+ f"[yellow]🧪[/ yellow] Running tests... (timeout: {timeout}s)",
915
870
  )
916
871
  if options.verbose:
917
- self.console.print(f"[dim]Command: {' '.join(cmd)}[/dim]")
872
+ self.console.print(f"[dim]Command: {' '.join(cmd)}[/ dim]")
918
873
 
919
874
  def _process_test_results(
920
875
  self,
@@ -924,17 +879,15 @@ class TestManagementImpl:
924
879
  output = result.stdout + result.stderr
925
880
  success = result.returncode == 0
926
881
 
927
- # Process coverage ratchet if enabled and tests passed
928
882
  if self.coverage_ratchet_enabled and success:
929
883
  if not self._process_coverage_ratchet():
930
- return False # Coverage regression detected
884
+ return False
931
885
 
932
886
  if success:
933
887
  return self._handle_test_success(output, duration)
934
888
  return self._handle_test_failure(output, duration)
935
889
 
936
890
  def _process_coverage_ratchet(self) -> bool:
937
- """Process coverage ratchet and return False if regression detected."""
938
891
  coverage_data = self.get_coverage()
939
892
  if not coverage_data:
940
893
  return True
@@ -945,23 +898,21 @@ class TestManagementImpl:
945
898
  return self._handle_ratchet_result(ratchet_result)
946
899
 
947
900
  def _handle_ratchet_result(self, ratchet_result: dict[str, t.Any]) -> bool:
948
- """Handle coverage ratchet result and return False if regression detected."""
949
901
  status = ratchet_result["status"]
950
902
 
951
903
  if status == "improved":
952
904
  self._handle_coverage_improvement(ratchet_result)
953
905
  elif status == "regression":
954
- self.console.print(f"[red]📉 {ratchet_result['message']}[/red]")
955
- return False # Fail the test run on coverage regression
906
+ self.console.print(f"[red]📉 {ratchet_result['message']}[/ red]")
907
+ return False
956
908
  elif status == "maintained":
957
- self.console.print(f"[cyan]📊 {ratchet_result['message']}[/cyan]")
909
+ self.console.print(f"[cyan]📊 {ratchet_result['message']}[/ cyan]")
958
910
 
959
911
  self._display_progress_visualization()
960
912
  return True
961
913
 
962
914
  def _handle_coverage_improvement(self, ratchet_result: dict[str, t.Any]) -> None:
963
- """Handle coverage improvement display and milestone celebration."""
964
- self.console.print(f"[green]🎉 {ratchet_result['message']}[/green]")
915
+ self.console.print(f"[green]🎉 {ratchet_result['message']}[/ green]")
965
916
 
966
917
  if "milestones" in ratchet_result and ratchet_result["milestones"]:
967
918
  self.coverage_ratchet.display_milestone_celebration(
@@ -972,34 +923,33 @@ class TestManagementImpl:
972
923
  next_milestone = ratchet_result["next_milestone"]
973
924
  points_needed = ratchet_result.get("points_to_next", 0)
974
925
  self.console.print(
975
- f"[cyan]🎯 Next milestone: {next_milestone:.0f}% (+{points_needed:.2f}% needed)[/cyan]"
926
+ f"[cyan]🎯 Next milestone: {next_milestone: .0f}% (+{points_needed: .2f}% needed)[/ cyan]"
976
927
  )
977
928
 
978
929
  def _display_progress_visualization(self) -> None:
979
- """Display coverage progress visualization."""
980
930
  progress_viz = self.coverage_ratchet.get_progress_visualization()
981
931
  for line in progress_viz.split("\n"):
982
932
  if line.strip():
983
- self.console.print(f"[dim]{line}[/dim]")
933
+ self.console.print(f"[dim]{line}[/ dim]")
984
934
 
985
935
  def _handle_test_success(self, output: str, duration: float) -> bool:
986
- self.console.print(f"[green]✅[/green] Tests passed ({duration:.1f}s)")
936
+ self.console.print(f"[green]✅[/ green] Tests passed ({duration: .1f}s)")
987
937
  lines = output.split("\n")
988
938
  for line in lines:
989
939
  if "passed" in line and ("failed" in line or "error" in line):
990
- self.console.print(f"[cyan]📊[/cyan] {line.strip()}")
940
+ self.console.print(f"[cyan]📊[/ cyan] {line.strip()}")
991
941
  break
992
942
 
993
943
  return True
994
944
 
995
945
  def _handle_test_failure(self, output: str, duration: float) -> bool:
996
- self.console.print(f"[red]❌[/red] Tests failed ({duration:.1f}s)")
946
+ self.console.print(f"[red]❌[/ red] Tests failed ({duration: .1f}s)")
997
947
  failure_lines = self._extract_failure_lines(output)
998
948
  if failure_lines:
999
- self.console.print("[red]💥[/red] Failure summary: ")
949
+ self.console.print("[red]💥[/ red] Failure summary: ")
1000
950
  for line in failure_lines[:10]:
1001
951
  if line.strip():
1002
- self.console.print(f" [dim]{line}[/dim]")
952
+ self.console.print(f" [dim]{line}[/ dim]")
1003
953
 
1004
954
  self._last_test_failures = failure_lines or ["Test execution failed"]
1005
955
 
@@ -1022,7 +972,7 @@ class TestManagementImpl:
1022
972
  def get_coverage(self) -> dict[str, t.Any]:
1023
973
  try:
1024
974
  result = self._run_test_command(
1025
- ["python", "-m", "coverage", "report", "--format=json"],
975
+ ["python", "- m", "coverage", "report", "--format=json"],
1026
976
  )
1027
977
  if result.returncode == 0:
1028
978
  import json
@@ -1037,32 +987,32 @@ class TestManagementImpl:
1037
987
  "files": coverage_data.get("files", {}),
1038
988
  "summary": coverage_data.get("totals", {}),
1039
989
  }
1040
- self.console.print("[yellow]⚠️[/yellow] Could not get coverage data")
990
+ self.console.print("[yellow]⚠️[/ yellow] Could not get coverage data")
1041
991
  return {}
1042
992
  except Exception as e:
1043
- self.console.print(f"[yellow]⚠️[/yellow] Error getting coverage: {e}")
993
+ self.console.print(f"[yellow]⚠️[/ yellow] Error getting coverage: {e}")
1044
994
  return {}
1045
995
 
1046
996
  def run_specific_tests(self, test_pattern: str) -> bool:
1047
997
  try:
1048
- cmd = ["python", "-m", "pytest", "-k", test_pattern, "-v"]
998
+ cmd = ["python", "- m", "pytest", "- k", test_pattern, "- v"]
1049
999
  self.console.print(
1050
- f"[yellow]🎯[/yellow] Running tests matching: {test_pattern}",
1000
+ f"[yellow]🎯[/ yellow] Running tests matching: {test_pattern}",
1051
1001
  )
1052
1002
  result = self._run_test_command(cmd)
1053
1003
  if result.returncode == 0:
1054
- self.console.print("[green]✅[/green] Specific tests passed")
1004
+ self.console.print("[green]✅[/ green] Specific tests passed")
1055
1005
  return True
1056
- self.console.print("[red]❌[/red] Specific tests failed")
1006
+ self.console.print("[red]❌[/ red] Specific tests failed")
1057
1007
  return False
1058
1008
  except Exception as e:
1059
- self.console.print(f"[red]💥[/red] Error running specific tests: {e}")
1009
+ self.console.print(f"[red]💥[/ red] Error running specific tests: {e}")
1060
1010
  return False
1061
1011
 
1062
1012
  def validate_test_environment(self) -> bool:
1063
1013
  issues: list[str] = []
1064
1014
  try:
1065
- result = self._run_test_command(["python", "-m", "pytest", "--version"])
1015
+ result = self._run_test_command(["python", "- m", "pytest", "--version"])
1066
1016
  if result.returncode != 0:
1067
1017
  issues.append("pytest not available")
1068
1018
  except (subprocess.SubprocessError, OSError, FileNotFoundError):
@@ -1070,22 +1020,22 @@ class TestManagementImpl:
1070
1020
  test_dir = self.pkg_path / "tests"
1071
1021
  if not test_dir.exists():
1072
1022
  issues.append("tests directory not found")
1073
- test_files = list(test_dir.glob("test_*.py")) if test_dir.exists() else []
1023
+ test_files = list(test_dir.glob("test_ *.py")) if test_dir.exists() else []
1074
1024
  if not test_files:
1075
1025
  issues.append("no test files found")
1076
1026
  if issues:
1077
- self.console.print("[red]❌[/red] Test environment issues: ")
1027
+ self.console.print("[red]❌[/ red] Test environment issues: ")
1078
1028
  for issue in issues:
1079
- self.console.print(f" - {issue}")
1029
+ self.console.print(f"-{issue}")
1080
1030
  return False
1081
- self.console.print("[green]✅[/green] Test environment validated")
1031
+ self.console.print("[green]✅[/ green] Test environment validated")
1082
1032
  return True
1083
1033
 
1084
1034
  def get_test_stats(self) -> dict[str, t.Any]:
1085
1035
  test_dir = self.pkg_path / "tests"
1086
1036
  if not test_dir.exists():
1087
1037
  return {"test_files": 0, "total_tests": 0, "test_lines": 0}
1088
- test_files = list(test_dir.glob("test_*.py"))
1038
+ test_files = list(test_dir.glob("test_ *.py"))
1089
1039
  total_lines = 0
1090
1040
  total_tests = 0
1091
1041
  for test_file in test_files: