crackerjack 0.31.10__py3-none-any.whl → 0.31.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (155) hide show
  1. crackerjack/CLAUDE.md +288 -705
  2. crackerjack/__main__.py +22 -8
  3. crackerjack/agents/__init__.py +0 -3
  4. crackerjack/agents/architect_agent.py +0 -43
  5. crackerjack/agents/base.py +1 -9
  6. crackerjack/agents/coordinator.py +2 -148
  7. crackerjack/agents/documentation_agent.py +109 -81
  8. crackerjack/agents/dry_agent.py +122 -97
  9. crackerjack/agents/formatting_agent.py +3 -16
  10. crackerjack/agents/import_optimization_agent.py +1174 -130
  11. crackerjack/agents/performance_agent.py +956 -188
  12. crackerjack/agents/performance_helpers.py +229 -0
  13. crackerjack/agents/proactive_agent.py +1 -48
  14. crackerjack/agents/refactoring_agent.py +516 -246
  15. crackerjack/agents/refactoring_helpers.py +282 -0
  16. crackerjack/agents/security_agent.py +393 -90
  17. crackerjack/agents/test_creation_agent.py +1776 -120
  18. crackerjack/agents/test_specialist_agent.py +59 -15
  19. crackerjack/agents/tracker.py +0 -102
  20. crackerjack/api.py +145 -37
  21. crackerjack/cli/handlers.py +48 -30
  22. crackerjack/cli/interactive.py +11 -11
  23. crackerjack/cli/options.py +66 -4
  24. crackerjack/code_cleaner.py +808 -148
  25. crackerjack/config/global_lock_config.py +110 -0
  26. crackerjack/config/hooks.py +43 -64
  27. crackerjack/core/async_workflow_orchestrator.py +247 -97
  28. crackerjack/core/autofix_coordinator.py +192 -109
  29. crackerjack/core/enhanced_container.py +46 -63
  30. crackerjack/core/file_lifecycle.py +549 -0
  31. crackerjack/core/performance.py +9 -8
  32. crackerjack/core/performance_monitor.py +395 -0
  33. crackerjack/core/phase_coordinator.py +281 -94
  34. crackerjack/core/proactive_workflow.py +9 -58
  35. crackerjack/core/resource_manager.py +501 -0
  36. crackerjack/core/service_watchdog.py +490 -0
  37. crackerjack/core/session_coordinator.py +4 -8
  38. crackerjack/core/timeout_manager.py +504 -0
  39. crackerjack/core/websocket_lifecycle.py +475 -0
  40. crackerjack/core/workflow_orchestrator.py +343 -209
  41. crackerjack/dynamic_config.py +47 -6
  42. crackerjack/errors.py +3 -4
  43. crackerjack/executors/async_hook_executor.py +63 -13
  44. crackerjack/executors/cached_hook_executor.py +14 -14
  45. crackerjack/executors/hook_executor.py +100 -37
  46. crackerjack/executors/hook_lock_manager.py +856 -0
  47. crackerjack/executors/individual_hook_executor.py +120 -86
  48. crackerjack/intelligence/__init__.py +0 -7
  49. crackerjack/intelligence/adaptive_learning.py +13 -86
  50. crackerjack/intelligence/agent_orchestrator.py +15 -78
  51. crackerjack/intelligence/agent_registry.py +12 -59
  52. crackerjack/intelligence/agent_selector.py +31 -92
  53. crackerjack/intelligence/integration.py +1 -41
  54. crackerjack/interactive.py +9 -9
  55. crackerjack/managers/async_hook_manager.py +25 -8
  56. crackerjack/managers/hook_manager.py +9 -9
  57. crackerjack/managers/publish_manager.py +57 -59
  58. crackerjack/managers/test_command_builder.py +6 -36
  59. crackerjack/managers/test_executor.py +9 -61
  60. crackerjack/managers/test_manager.py +17 -63
  61. crackerjack/managers/test_manager_backup.py +77 -127
  62. crackerjack/managers/test_progress.py +4 -23
  63. crackerjack/mcp/cache.py +5 -12
  64. crackerjack/mcp/client_runner.py +10 -10
  65. crackerjack/mcp/context.py +64 -6
  66. crackerjack/mcp/dashboard.py +14 -11
  67. crackerjack/mcp/enhanced_progress_monitor.py +55 -55
  68. crackerjack/mcp/file_monitor.py +72 -42
  69. crackerjack/mcp/progress_components.py +103 -84
  70. crackerjack/mcp/progress_monitor.py +122 -49
  71. crackerjack/mcp/rate_limiter.py +12 -12
  72. crackerjack/mcp/server_core.py +16 -22
  73. crackerjack/mcp/service_watchdog.py +26 -26
  74. crackerjack/mcp/state.py +15 -0
  75. crackerjack/mcp/tools/core_tools.py +95 -39
  76. crackerjack/mcp/tools/error_analyzer.py +6 -32
  77. crackerjack/mcp/tools/execution_tools.py +1 -56
  78. crackerjack/mcp/tools/execution_tools_backup.py +35 -131
  79. crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
  80. crackerjack/mcp/tools/intelligence_tools.py +2 -55
  81. crackerjack/mcp/tools/monitoring_tools.py +308 -145
  82. crackerjack/mcp/tools/proactive_tools.py +12 -42
  83. crackerjack/mcp/tools/progress_tools.py +23 -15
  84. crackerjack/mcp/tools/utility_tools.py +3 -40
  85. crackerjack/mcp/tools/workflow_executor.py +40 -60
  86. crackerjack/mcp/websocket/app.py +0 -3
  87. crackerjack/mcp/websocket/endpoints.py +206 -268
  88. crackerjack/mcp/websocket/jobs.py +213 -66
  89. crackerjack/mcp/websocket/server.py +84 -6
  90. crackerjack/mcp/websocket/websocket_handler.py +137 -29
  91. crackerjack/models/config_adapter.py +3 -16
  92. crackerjack/models/protocols.py +162 -3
  93. crackerjack/models/resource_protocols.py +454 -0
  94. crackerjack/models/task.py +3 -3
  95. crackerjack/monitoring/__init__.py +0 -0
  96. crackerjack/monitoring/ai_agent_watchdog.py +25 -71
  97. crackerjack/monitoring/regression_prevention.py +28 -87
  98. crackerjack/orchestration/advanced_orchestrator.py +44 -78
  99. crackerjack/orchestration/coverage_improvement.py +10 -60
  100. crackerjack/orchestration/execution_strategies.py +16 -16
  101. crackerjack/orchestration/test_progress_streamer.py +61 -53
  102. crackerjack/plugins/base.py +1 -1
  103. crackerjack/plugins/managers.py +22 -20
  104. crackerjack/py313.py +65 -21
  105. crackerjack/services/backup_service.py +467 -0
  106. crackerjack/services/bounded_status_operations.py +627 -0
  107. crackerjack/services/cache.py +7 -9
  108. crackerjack/services/config.py +35 -52
  109. crackerjack/services/config_integrity.py +5 -16
  110. crackerjack/services/config_merge.py +542 -0
  111. crackerjack/services/contextual_ai_assistant.py +17 -19
  112. crackerjack/services/coverage_ratchet.py +44 -73
  113. crackerjack/services/debug.py +25 -39
  114. crackerjack/services/dependency_monitor.py +52 -50
  115. crackerjack/services/enhanced_filesystem.py +14 -11
  116. crackerjack/services/file_hasher.py +1 -1
  117. crackerjack/services/filesystem.py +1 -12
  118. crackerjack/services/git.py +71 -47
  119. crackerjack/services/health_metrics.py +31 -27
  120. crackerjack/services/initialization.py +276 -428
  121. crackerjack/services/input_validator.py +760 -0
  122. crackerjack/services/log_manager.py +16 -16
  123. crackerjack/services/logging.py +7 -6
  124. crackerjack/services/metrics.py +43 -43
  125. crackerjack/services/pattern_cache.py +2 -31
  126. crackerjack/services/pattern_detector.py +26 -63
  127. crackerjack/services/performance_benchmarks.py +20 -45
  128. crackerjack/services/regex_patterns.py +2887 -0
  129. crackerjack/services/regex_utils.py +537 -0
  130. crackerjack/services/secure_path_utils.py +683 -0
  131. crackerjack/services/secure_status_formatter.py +534 -0
  132. crackerjack/services/secure_subprocess.py +605 -0
  133. crackerjack/services/security.py +47 -10
  134. crackerjack/services/security_logger.py +492 -0
  135. crackerjack/services/server_manager.py +109 -50
  136. crackerjack/services/smart_scheduling.py +8 -25
  137. crackerjack/services/status_authentication.py +603 -0
  138. crackerjack/services/status_security_manager.py +442 -0
  139. crackerjack/services/thread_safe_status_collector.py +546 -0
  140. crackerjack/services/tool_version_service.py +1 -23
  141. crackerjack/services/unified_config.py +36 -58
  142. crackerjack/services/validation_rate_limiter.py +269 -0
  143. crackerjack/services/version_checker.py +9 -40
  144. crackerjack/services/websocket_resource_limiter.py +572 -0
  145. crackerjack/slash_commands/__init__.py +52 -2
  146. crackerjack/tools/__init__.py +0 -0
  147. crackerjack/tools/validate_input_validator_patterns.py +262 -0
  148. crackerjack/tools/validate_regex_patterns.py +198 -0
  149. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/METADATA +197 -12
  150. crackerjack-0.31.12.dist-info/RECORD +178 -0
  151. crackerjack/cli/facade.py +0 -104
  152. crackerjack-0.31.10.dist-info/RECORD +0 -149
  153. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/WHEEL +0 -0
  154. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/entry_points.txt +0 -0
  155. {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/licenses/LICENSE +0 -0
@@ -102,8 +102,6 @@ class CorrelationTracker:
102
102
 
103
103
 
104
104
  class MinimalProgressStreamer:
105
- """Fallback progress streamer that provides minimal functionality."""
106
-
107
105
  def __init__(self) -> None:
108
106
  pass
109
107
 
@@ -208,40 +206,31 @@ class AdvancedWorkflowOrchestrator:
208
206
  self.test_streamer = TestProgressStreamer(console, pkg_path)
209
207
  self.planner = OrchestrationPlanner(console)
210
208
 
211
- # Initialize progress_streamer early (needed by _detect_and_configure_mcp_mode)
212
209
  self.correlation_tracker = CorrelationTracker()
213
210
  try:
214
211
  self.progress_streamer = ProgressStreamer(self.config, session)
215
212
  except Exception as e:
216
- # Fallback to a minimal progress streamer if there's an issue
217
213
  console.print(
218
- f"[yellow]Warning: ProgressStreamer initialization failed: {e}[/yellow]",
214
+ f"[yellow]Warning: ProgressStreamer initialization failed: {e}[/ yellow]",
219
215
  )
220
216
  self.progress_streamer = MinimalProgressStreamer()
221
217
  self.metrics = get_metrics_collector()
222
218
 
223
- # Detect if running in MCP mode and configure accordingly
224
219
  self._detect_and_configure_mcp_mode()
225
220
 
226
221
  self.agent_coordinator: AgentCoordinator | None = None
227
222
 
228
223
  def _detect_and_configure_mcp_mode(self) -> None:
229
- """Detect if running in MCP context and configure for minimal terminal I/O."""
230
- # Check for MCP context indicators
231
224
  is_mcp_mode = (
232
- # Console is using StringIO (stdio mode)
233
225
  hasattr(self.console.file, "getvalue")
234
- # Or console is not attached to a real terminal
235
226
  or not self.console.is_terminal
236
- # Or we have a web job ID (indicates MCP execution)
237
227
  or hasattr(self.session, "job_id")
238
228
  )
239
229
 
240
230
  if is_mcp_mode:
241
- # Configure individual executor for MCP mode to prevent terminal lockup
242
231
  self.individual_executor.set_mcp_mode(True)
243
232
  self.console.print(
244
- "[dim]🔧 MCP mode detected - using minimal output mode[/dim]",
233
+ "[dim]🔧 MCP mode detected-using minimal output mode[/ dim]",
245
234
  )
246
235
  if self.config.ai_coordination_mode in (
247
236
  AICoordinationMode.MULTI_AGENT,
@@ -255,24 +244,20 @@ class AdvancedWorkflowOrchestrator:
255
244
  self.test_streamer.set_progress_callback(self._update_test_suite_progress)
256
245
 
257
246
  def _configure_verbose_mode(self, options: OptionsProtocol) -> None:
258
- """Configure hook output verbosity based on user options."""
259
- # Enable verbose output if explicitly requested, otherwise use quiet mode
260
247
  verbose_mode = getattr(options, "verbose", False)
261
248
 
262
- # Don't override MCP mode detection - only configure if not already in MCP mode
263
249
  if not hasattr(self.console.file, "getvalue"):
264
- # Set quiet mode (suppress realtime output) unless verbose mode is enabled
265
250
  quiet_mode = not verbose_mode
266
251
  self.individual_executor.set_mcp_mode(quiet_mode)
267
252
 
268
253
  if verbose_mode:
269
254
  self.console.print(
270
- "[dim]🔧 Verbose mode enabled - showing detailed hook output[/dim]",
255
+ "[dim]🔧 Verbose mode enabled-showing detailed hook output[/ dim]",
271
256
  )
272
257
 
273
258
  def _initialize_multi_agent_system(self) -> None:
274
259
  self.console.print(
275
- "[bold cyan]🤖 Initializing Multi-Agent AI System[/bold cyan]",
260
+ "[bold cyan]🤖 Initializing Multi-Agent AI System[/ bold cyan]",
276
261
  )
277
262
 
278
263
  agent_context = AgentContext(
@@ -285,14 +270,14 @@ class AdvancedWorkflowOrchestrator:
285
270
 
286
271
  capabilities = self.agent_coordinator.get_agent_capabilities()
287
272
  self.console.print(
288
- f"[green]✅ Initialized {len(capabilities)} specialized agents: [/green]",
273
+ f"[green]✅ Initialized {len(capabilities)} specialized agents: [/ green]",
289
274
  )
290
275
  for agent_name, info in capabilities.items():
291
276
  types_str = ", ".join(info["supported_types"])
292
277
  self.console.print(f" • {agent_name}: {types_str}")
293
278
 
294
279
  self.console.print(
295
- f"[cyan]AI Coordination Mode: {self.config.ai_coordination_mode.value}[/cyan]",
280
+ f"[cyan]AI Coordination Mode: {self.config.ai_coordination_mode.value}[/ cyan]",
296
281
  )
297
282
 
298
283
  def _display_iteration_stats(
@@ -305,40 +290,36 @@ class AdvancedWorkflowOrchestrator:
305
290
  ai_time: float,
306
291
  context: t.Any,
307
292
  ) -> None:
308
- """Display rich iteration statistics panel."""
309
- # Create timing table
310
293
  timing_table = Table(show_header=True, header_style="bold cyan")
311
294
  timing_table.add_column("Phase", style="cyan")
312
295
  timing_table.add_column("This Iteration", justify="right", style="yellow")
313
296
  timing_table.add_column("Cumulative", justify="right", style="green")
314
297
 
315
- # Add timing rows
316
298
  timing_table.add_row(
317
299
  "🔧 Hooks",
318
- f"{iteration_times.get('hooks', 0):.1f}s",
319
- f"{hooks_time:.1f}s",
300
+ f"{iteration_times.get('hooks', 0): .1f}s",
301
+ f"{hooks_time: .1f}s",
320
302
  )
321
303
  timing_table.add_row(
322
304
  "🧪 Tests",
323
- f"{iteration_times.get('tests', 0):.1f}s",
324
- f"{tests_time:.1f}s",
305
+ f"{iteration_times.get('tests', 0): .1f}s",
306
+ f"{tests_time: .1f}s",
325
307
  )
326
308
  timing_table.add_row(
327
309
  "🤖 AI Analysis",
328
- f"{iteration_times.get('ai', 0):.1f}s",
329
- f"{ai_time:.1f}s",
310
+ f"{iteration_times.get('ai', 0): .1f}s",
311
+ f"{ai_time: .1f}s",
330
312
  )
331
313
 
332
314
  total_iteration_time = sum(iteration_times.values())
333
315
  total_cumulative_time = hooks_time + tests_time + ai_time
334
316
  timing_table.add_row(
335
317
  "📊 Total",
336
- f"{total_iteration_time:.1f}s",
337
- f"{total_cumulative_time:.1f}s",
318
+ f"{total_iteration_time: .1f}s",
319
+ f"{total_cumulative_time: .1f}s",
338
320
  style="bold",
339
321
  )
340
322
 
341
- # Create status table
342
323
  status_table = Table(show_header=True, header_style="bold magenta")
343
324
  status_table.add_column("Metric", style="magenta")
344
325
  status_table.add_column("Value", justify="right", style="white")
@@ -346,7 +327,7 @@ class AdvancedWorkflowOrchestrator:
346
327
  status_table.add_row("🔄 Iteration", f"{iteration}/{max_iterations}")
347
328
  status_table.add_row(
348
329
  "📈 Progress",
349
- f"{(iteration / max_iterations) * 100:.1f}%",
330
+ f"{(iteration / max_iterations) * 100: .1f}%",
350
331
  )
351
332
 
352
333
  if hasattr(context, "hook_failures"):
@@ -354,18 +335,17 @@ class AdvancedWorkflowOrchestrator:
354
335
  if hasattr(context, "test_failures"):
355
336
  status_table.add_row("🧪 Test Failures", str(len(context.test_failures)))
356
337
 
357
- # Create the panel with properly rendered tables
358
338
  panel_content = Group(
359
- "[bold white]Timing Breakdown[/bold white]",
339
+ "[bold white]Timing Breakdown[/ bold white]",
360
340
  timing_table,
361
341
  "",
362
- "[bold white]Status Summary[/bold white]",
342
+ "[bold white]Status Summary[/ bold white]",
363
343
  status_table,
364
344
  )
365
345
 
366
346
  iteration_panel = Panel(
367
347
  panel_content,
368
- title=f"[bold bright_blue]📊 Iteration {iteration} Statistics[/bold bright_blue]",
348
+ title=f"[bold bright_blue]📊 Iteration {iteration} Statistics[/ bold bright_blue]",
369
349
  border_style="bright_blue",
370
350
  padding=(1, 2),
371
351
  )
@@ -379,7 +359,6 @@ class AdvancedWorkflowOrchestrator:
379
359
  options: OptionsProtocol,
380
360
  max_iterations: int = 10,
381
361
  ) -> bool:
382
- # Configure verbose mode before starting workflow
383
362
  self._configure_verbose_mode(options)
384
363
 
385
364
  workflow_start_time = time.time()
@@ -388,7 +367,7 @@ class AdvancedWorkflowOrchestrator:
388
367
  )
389
368
 
390
369
  self.console.print(
391
- "\n[bold bright_blue]🚀 STARTING ORCHESTRATED WORKFLOW[/bold bright_blue]",
370
+ "\n[bold bright_blue]🚀 STARTING ORCHESTRATED WORKFLOW[/ bold bright_blue]",
392
371
  )
393
372
 
394
373
  context = ExecutionContext(self.pkg_path, options)
@@ -414,7 +393,7 @@ class AdvancedWorkflowOrchestrator:
414
393
 
415
394
  for iteration in range(1, max_iterations + 1):
416
395
  self.console.print(
417
- f"\n[bold bright_yellow]🔄 ITERATION {iteration} / {max_iterations}[/bold bright_yellow]",
396
+ f"\n[bold bright_yellow]🔄 ITERATION {iteration} / {max_iterations}[/ bold bright_yellow]",
418
397
  )
419
398
 
420
399
  context.iteration_count = iteration
@@ -430,7 +409,6 @@ class AdvancedWorkflowOrchestrator:
430
409
  tests_time += iteration_times.get("tests", 0)
431
410
  ai_time += iteration_times.get("ai", 0)
432
411
 
433
- # Display iteration statistics panel
434
412
  self._display_iteration_stats(
435
413
  iteration,
436
414
  max_iterations,
@@ -443,7 +421,7 @@ class AdvancedWorkflowOrchestrator:
443
421
 
444
422
  if iteration_success:
445
423
  self.console.print(
446
- f"\n[bold green]🎉 WORKFLOW COMPLETED SUCCESSFULLY IN {iteration} ITERATIONS![/bold green]",
424
+ f"\n[bold green]🎉 WORKFLOW COMPLETED SUCCESSFULLY IN {iteration} ITERATIONS ![/ bold green]",
447
425
  )
448
426
  success = True
449
427
  break
@@ -456,7 +434,7 @@ class AdvancedWorkflowOrchestrator:
456
434
 
457
435
  if not success:
458
436
  self.console.print(
459
- f"\n[bold red]❌ WORKFLOW INCOMPLETE AFTER {max_iterations} ITERATIONS[/bold red]",
437
+ f"\n[bold red]❌ WORKFLOW INCOMPLETE AFTER {max_iterations} ITERATIONS[/ bold red]",
460
438
  )
461
439
 
462
440
  self._print_final_analysis()
@@ -549,7 +527,6 @@ class AdvancedWorkflowOrchestrator:
549
527
  strategy = hook_plan["strategy"]
550
528
  execution_mode = hook_plan["execution_mode"]
551
529
 
552
- # Special handling for fast hooks with autofix cycle
553
530
  if strategy.name == "fast":
554
531
  fast_results = await self._execute_fast_hooks_with_autofix(
555
532
  strategy,
@@ -558,7 +535,6 @@ class AdvancedWorkflowOrchestrator:
558
535
  )
559
536
  all_results.extend(fast_results)
560
537
  else:
561
- # Regular execution for non-fast hooks
562
538
  self.progress_streamer.update_stage(
563
539
  "hooks",
564
540
  f"executing_{strategy.name}",
@@ -582,7 +558,6 @@ class AdvancedWorkflowOrchestrator:
582
558
  execution_mode: ExecutionStrategy,
583
559
  context: ExecutionContext,
584
560
  ) -> list[HookResult]:
585
- """Execute fast hooks with autofix cycle if they fail twice."""
586
561
  self.progress_streamer.update_stage("hooks", "fast_hooks_with_autofix")
587
562
 
588
563
  max_autofix_cycles = 2
@@ -590,10 +565,9 @@ class AdvancedWorkflowOrchestrator:
590
565
 
591
566
  while autofix_cycle < max_autofix_cycles:
592
567
  self.console.print(
593
- f"[cyan]🚀 Fast hooks execution (autofix cycle {autofix_cycle + 1}/{max_autofix_cycles})[/cyan]",
568
+ f"[cyan]🚀 Fast hooks execution (autofix cycle {autofix_cycle + 1}/{max_autofix_cycles})[/ cyan]",
594
569
  )
595
570
 
596
- # Run fast hooks twice
597
571
  first_attempt = await self._execute_fast_hooks_attempt(
598
572
  strategy,
599
573
  execution_mode,
@@ -601,13 +575,12 @@ class AdvancedWorkflowOrchestrator:
601
575
 
602
576
  if all(r.status == "passed" for r in first_attempt):
603
577
  self.console.print(
604
- "[green]✅ Fast hooks passed on first attempt[/green]",
578
+ "[green]✅ Fast hooks passed on first attempt[/ green]",
605
579
  )
606
580
  return first_attempt
607
581
 
608
- # First attempt failed, try second attempt
609
582
  self.console.print(
610
- "[yellow]⚠️ Fast hooks failed on first attempt, retrying...[/yellow]",
583
+ "[yellow]⚠️ Fast hooks failed on first attempt, retrying...[/ yellow]",
611
584
  )
612
585
  second_attempt = await self._execute_fast_hooks_attempt(
613
586
  strategy,
@@ -616,24 +589,22 @@ class AdvancedWorkflowOrchestrator:
616
589
 
617
590
  if all(r.status == "passed" for r in second_attempt):
618
591
  self.console.print(
619
- "[green]✅ Fast hooks passed on second attempt[/green]",
592
+ "[green]✅ Fast hooks passed on second attempt[/ green]",
620
593
  )
621
594
  return second_attempt
622
595
 
623
- # Both attempts failed, check if we should run autofix
624
596
  autofix_cycle += 1
625
597
  if autofix_cycle < max_autofix_cycles:
626
598
  self.console.print(
627
- "[red]❌ Fast hooks failed twice, triggering autofix cycle...[/red]",
599
+ "[red]❌ Fast hooks failed twice, triggering autofix cycle...[/ red]",
628
600
  )
629
601
  await self._trigger_autofix_for_fast_hooks(second_attempt)
630
602
  else:
631
603
  self.console.print(
632
- "[red]❌ Fast hooks failed after maximum autofix cycles[/red]",
604
+ "[red]❌ Fast hooks failed after maximum autofix cycles[/ red]",
633
605
  )
634
606
  return second_attempt
635
607
 
636
- # Should never reach here, but return empty results as fallback
637
608
  return []
638
609
 
639
610
  async def _execute_fast_hooks_attempt(
@@ -641,7 +612,6 @@ class AdvancedWorkflowOrchestrator:
641
612
  strategy: HookStrategy,
642
613
  execution_mode: ExecutionStrategy,
643
614
  ) -> list[HookResult]:
644
- """Execute a single attempt of fast hooks."""
645
615
  if execution_mode == ExecutionStrategy.INDIVIDUAL:
646
616
  result = await self.individual_executor.execute_strategy_individual(
647
617
  strategy,
@@ -654,19 +624,16 @@ class AdvancedWorkflowOrchestrator:
654
624
  self,
655
625
  failed_results: list[HookResult],
656
626
  ) -> None:
657
- """Trigger AI autofix cycle for failed fast hooks."""
658
627
  self.console.print(
659
- "[magenta]🤖 Starting AI autofix cycle for fast hooks...[/magenta]",
628
+ "[magenta]🤖 Starting AI autofix cycle for fast hooks...[/ magenta]",
660
629
  )
661
630
 
662
- # Create mock test results for AI analysis (fast hooks don't include tests)
663
631
  mock_test_results = {
664
632
  "success": True,
665
633
  "failed_tests": [],
666
634
  "individual_tests": [],
667
635
  }
668
636
 
669
- # Create a minimal execution plan for AI analysis
670
637
  from .execution_strategies import ExecutionPlan, ExecutionStrategy
671
638
 
672
639
  mock_plan = ExecutionPlan(
@@ -685,7 +652,6 @@ class AdvancedWorkflowOrchestrator:
685
652
  estimated_total_duration=0,
686
653
  )
687
654
 
688
- # Execute AI analysis and fixes for hook failures
689
655
  ai_fixes = await self._execute_ai_phase(
690
656
  mock_plan,
691
657
  failed_results,
@@ -694,10 +660,10 @@ class AdvancedWorkflowOrchestrator:
694
660
 
695
661
  if ai_fixes:
696
662
  self.console.print(
697
- f"[green]✅ Applied {len(ai_fixes)} AI fixes for fast hooks[/green]",
663
+ f"[green]✅ Applied {len(ai_fixes)} AI fixes for fast hooks[/ green]",
698
664
  )
699
665
  else:
700
- self.console.print("[yellow]⚠️ No AI fixes could be applied[/yellow]")
666
+ self.console.print("[yellow]⚠️ No AI fixes could be applied[/ yellow]")
701
667
 
702
668
  async def _execute_tests_phase(
703
669
  self,
@@ -748,7 +714,7 @@ class AdvancedWorkflowOrchestrator:
748
714
  current_test = suite_progress.current_test or "running tests"
749
715
  self.progress_streamer.update_stage(
750
716
  "tests",
751
- f"{suite_progress.completed_tests} / {suite_progress.total_tests} - {current_test}",
717
+ f"{suite_progress.completed_tests} / {suite_progress.total_tests}-{current_test}",
752
718
  )
753
719
 
754
720
  async def _execute_ai_phase(
@@ -767,7 +733,7 @@ class AdvancedWorkflowOrchestrator:
767
733
 
768
734
  correlation_data = self.correlation_tracker.get_correlation_data()
769
735
 
770
- self.console.print("\n[bold magenta]🤖 AI ANALYSIS PHASE[/bold magenta]")
736
+ self.console.print("\n[bold magenta]🤖 AI ANALYSIS PHASE[/ bold magenta]")
771
737
  self.console.print(f"AI Mode: {self.config.ai_coordination_mode.value}")
772
738
  self.console.print(f"Failed hooks: {len(failed_hooks)}")
773
739
  self.console.print(f"Failed tests: {len(failed_tests)}")
@@ -812,7 +778,7 @@ class AdvancedWorkflowOrchestrator:
812
778
  failed_individual_tests: list[t.Any],
813
779
  correlation_data: dict[str, t.Any],
814
780
  ) -> list[str]:
815
- self.console.print("[bold cyan]🤖 Multi-Agent Analysis Started[/bold cyan]")
781
+ self.console.print("[bold cyan]🤖 Multi-Agent Analysis Started[/ bold cyan]")
816
782
 
817
783
  issues = []
818
784
 
@@ -853,11 +819,11 @@ class AdvancedWorkflowOrchestrator:
853
819
  return ["No issues identified for multi-agent analysis"]
854
820
 
855
821
  self.console.print(
856
- f"[cyan]Processing {len(issues)} issues with specialized agents...[/cyan]",
822
+ f"[cyan]Processing {len(issues)} issues with specialized agents...[/ cyan]",
857
823
  )
858
824
 
859
825
  assert self.agent_coordinator is not None
860
- # Use proactive handling by default for better architectural planning
826
+
861
827
  result = await self.agent_coordinator.handle_issues_proactively(issues)
862
828
 
863
829
  ai_fixes = []
@@ -865,7 +831,7 @@ class AdvancedWorkflowOrchestrator:
865
831
  ai_fixes.extend(result.fixes_applied)
866
832
  else:
867
833
  ai_fixes.append(
868
- f"Multi-agent analysis completed with {result.confidence:.2f} confidence",
834
+ f"Multi-agent analysis completed with {result.confidence: .2f} confidence",
869
835
  )
870
836
 
871
837
  if result.remaining_issues:
@@ -877,7 +843,7 @@ class AdvancedWorkflowOrchestrator:
877
843
  )
878
844
 
879
845
  self.console.print(
880
- f"[green]✅ Multi-agent analysis completed: {len(result.fixes_applied)} fixes applied[/green]",
846
+ f"[green]✅ Multi-agent analysis completed: {len(result.fixes_applied)} fixes applied[/ green]",
881
847
  )
882
848
  return ai_fixes
883
849
 
@@ -920,13 +886,13 @@ class AdvancedWorkflowOrchestrator:
920
886
 
921
887
  if problematic_hooks:
922
888
  self.console.print(
923
- f"[yellow]🧠 Adapting strategy due to recurring failures in: {', '.join(problematic_hooks)}[/yellow]",
889
+ f"[yellow]🧠 Adapting strategy due to recurring failures in: {', '.join(problematic_hooks)}[/ yellow]",
924
890
  )
925
891
 
926
892
  if current_plan.execution_strategy == ExecutionStrategy.BATCH:
927
893
  self.config.execution_strategy = ExecutionStrategy.INDIVIDUAL
928
894
  self.console.print(
929
- "[cyan]📋 Switching to individual execution for better debugging[/cyan]",
895
+ "[cyan]📋 Switching to individual execution for better debugging[/ cyan]",
930
896
  )
931
897
 
932
898
  hook_strategies = [
@@ -944,7 +910,7 @@ class AdvancedWorkflowOrchestrator:
944
910
 
945
911
  self.console.print("\n" + "=" * 80)
946
912
  self.console.print(
947
- "[bold bright_magenta]🔍 CORRELATION ANALYSIS[/bold bright_magenta]",
913
+ "[bold bright_magenta]🔍 CORRELATION ANALYSIS[/ bold bright_magenta]",
948
914
  )
949
915
  self.console.print("=" * 80)
950
916
 
@@ -952,14 +918,14 @@ class AdvancedWorkflowOrchestrator:
952
918
 
953
919
  if correlation_data["problematic_hooks"]:
954
920
  self.console.print(
955
- "\n[bold red]Problematic hooks (recurring failures): [/bold red]",
921
+ "\n[bold red]Problematic hooks (recurring failures): [/ bold red]",
956
922
  )
957
923
  for hook in correlation_data["problematic_hooks"]:
958
924
  failures = correlation_data["failure_patterns"][hook]
959
- self.console.print(f" ❌ {hook} - failed in {len(failures)} iterations")
925
+ self.console.print(f" ❌ {hook}-failed in {len(failures)} iterations")
960
926
 
961
927
  if correlation_data["recent_trends"]:
962
- self.console.print("\n[bold yellow]Recent trends: [/bold yellow]")
928
+ self.console.print("\n[bold yellow]Recent trends: [/ bold yellow]")
963
929
  for trend in correlation_data["recent_trends"][-2:]:
964
930
  failed_count = len(trend["failed_hooks"])
965
931
  self.console.print(
@@ -1,10 +1,3 @@
1
- """Coverage improvement orchestration.
2
-
3
- This module provides proactive test coverage improvement by analyzing coverage gaps
4
- and triggering the TestCreationAgent to automatically generate missing tests.
5
- Integrated into the AI agent workflow after successful test execution.
6
- """
7
-
8
1
  import logging
9
2
  import typing as t
10
3
  from pathlib import Path
@@ -15,69 +8,50 @@ from crackerjack.services.coverage_ratchet import CoverageRatchetService
15
8
 
16
9
 
17
10
  class CoverageImprovementOrchestrator:
18
- """Orchestrates automatic test coverage improvement."""
19
-
20
11
  def __init__(self, project_path: Path, console: t.Any = None) -> None:
21
12
  self.project_path = project_path
22
13
  self.logger = logging.getLogger(__name__)
23
14
  self.console = console
24
15
  self.coverage_service = CoverageRatchetService(project_path, console)
25
- self.min_coverage_improvement = 2.0 # Minimum 2% improvement target per run
16
+ self.min_coverage_improvement = 2.0
26
17
 
27
18
  async def should_improve_coverage(
28
19
  self, current_coverage: float | None = None
29
20
  ) -> bool:
30
- """Determine if coverage improvement should be attempted.
31
-
32
- Args:
33
- current_coverage: Current coverage percentage, will be detected if None
34
-
35
- Returns:
36
- True if coverage improvement should be attempted
37
- """
38
21
  try:
39
22
  if current_coverage is None:
40
23
  coverage_status = self.coverage_service.get_status_report()
41
24
  current_coverage = coverage_status.get("current_coverage", 0.0)
42
25
 
43
- # Always try to improve if coverage is below 100%
44
26
  if current_coverage is not None and current_coverage < 100.0:
45
27
  self.logger.info(
46
- f"Coverage at {current_coverage:.1f}% - improvement recommended"
28
+ f"Coverage at {current_coverage: .1f}%-improvement recommended"
47
29
  )
48
30
  return True
49
31
 
50
- self.logger.info("Coverage at 100% - no improvement needed")
32
+ self.logger.info("Coverage at 100 %-no improvement needed")
51
33
  return False
52
34
 
53
35
  except Exception as e:
54
36
  self.logger.warning(f"Could not determine coverage status: {e}")
55
- # Default to trying improvement if we can't determine coverage
37
+
56
38
  return True
57
39
 
58
40
  async def create_coverage_improvement_issue(
59
41
  self, coverage_gap: float | None = None
60
42
  ) -> Issue:
61
- """Create an issue for coverage improvement.
62
-
63
- Args:
64
- coverage_gap: Percentage gap to 100% coverage
65
-
66
- Returns:
67
- Issue configured for coverage improvement
68
- """
69
43
  if coverage_gap is None:
70
44
  try:
71
45
  coverage_status = self.coverage_service.get_status_report()
72
46
  current_coverage = coverage_status.get("current_coverage", 0.0)
73
47
  coverage_gap = 100.0 - current_coverage
74
48
  except Exception:
75
- coverage_gap = 90.0 # Default gap if we can't determine
49
+ coverage_gap = 90.0
76
50
 
77
51
  message = (
78
52
  f"Proactive coverage improvement requested. "
79
- f"Gap to 100%: {coverage_gap:.1f}%. "
80
- f"Target improvement: {min(self.min_coverage_improvement, coverage_gap) if coverage_gap is not None else self.min_coverage_improvement:.1f}%"
53
+ f"Gap to 100 %: {coverage_gap: .1f}%. "
54
+ f"Target improvement: {min(self.min_coverage_improvement, coverage_gap) if coverage_gap is not None else self.min_coverage_improvement: .1f}%"
81
55
  )
82
56
 
83
57
  return Issue(
@@ -85,38 +59,26 @@ class CoverageImprovementOrchestrator:
85
59
  type=IssueType.COVERAGE_IMPROVEMENT,
86
60
  severity=Priority.MEDIUM,
87
61
  message=message,
88
- file_path=None, # Project-wide improvement
62
+ file_path=None,
89
63
  stage="coverage_improvement",
90
64
  )
91
65
 
92
66
  async def execute_coverage_improvement(
93
67
  self, agent_context: t.Any
94
68
  ) -> dict[str, t.Any]:
95
- """Execute proactive coverage improvement.
96
-
97
- Args:
98
- agent_context: AgentContext for the TestCreationAgent
99
-
100
- Returns:
101
- Dictionary with improvement results
102
- """
103
69
  try:
104
70
  self.logger.info("Starting proactive coverage improvement")
105
71
 
106
- # Check if improvement is needed
107
72
  if not await self.should_improve_coverage():
108
73
  return self._create_skipped_result("Coverage improvement not needed")
109
74
 
110
- # Create coverage improvement issue and agent
111
75
  issue = await self.create_coverage_improvement_issue()
112
76
  test_agent = TestCreationAgent(agent_context)
113
77
 
114
- # Validate agent confidence
115
78
  confidence = await test_agent.can_handle(issue)
116
79
  if confidence < 0.5:
117
80
  return self._create_low_confidence_result(confidence)
118
81
 
119
- # Execute the coverage improvement
120
82
  fix_result = await test_agent.analyze_and_fix(issue)
121
83
  result = self._create_completion_result(fix_result)
122
84
 
@@ -127,7 +89,6 @@ class CoverageImprovementOrchestrator:
127
89
  return self._create_error_result(e)
128
90
 
129
91
  def _create_skipped_result(self, reason: str) -> dict[str, t.Any]:
130
- """Create result dict for skipped coverage improvement."""
131
92
  return {
132
93
  "status": "skipped",
133
94
  "reason": reason,
@@ -135,7 +96,6 @@ class CoverageImprovementOrchestrator:
135
96
  }
136
97
 
137
98
  def _create_low_confidence_result(self, confidence: float) -> dict[str, t.Any]:
138
- """Create result dict for low confidence scenario."""
139
99
  self.logger.warning(f"TestCreationAgent confidence too low: {confidence}")
140
100
  return {
141
101
  "status": "skipped",
@@ -144,7 +104,6 @@ class CoverageImprovementOrchestrator:
144
104
  }
145
105
 
146
106
  def _create_completion_result(self, fix_result: t.Any) -> dict[str, t.Any]:
147
- """Create result dict from fix results."""
148
107
  return {
149
108
  "status": "completed" if fix_result.success else "failed",
150
109
  "confidence": fix_result.confidence,
@@ -154,25 +113,23 @@ class CoverageImprovementOrchestrator:
154
113
  }
155
114
 
156
115
  def _log_and_display_results(self, fix_result: t.Any) -> None:
157
- """Log and display the results of coverage improvement."""
158
116
  if fix_result.success:
159
117
  self.logger.info(
160
118
  f"Coverage improvement successful: {len(fix_result.fixes_applied)} fixes applied"
161
119
  )
162
120
  if self.console:
163
121
  self.console.print(
164
- f"[green]📈[/green] Coverage improved: {len(fix_result.fixes_applied)} "
122
+ f"[green]📈[/ green] Coverage improved: {len(fix_result.fixes_applied)} "
165
123
  f"tests created in {len(fix_result.files_modified)} files"
166
124
  )
167
125
  else:
168
126
  self.logger.warning("Coverage improvement failed")
169
127
  if self.console:
170
128
  self.console.print(
171
- "[yellow]⚠️[/yellow] Coverage improvement attempt completed with issues"
129
+ "[yellow]⚠️[/ yellow] Coverage improvement attempt completed with issues"
172
130
  )
173
131
 
174
132
  def _create_error_result(self, error: Exception) -> dict[str, t.Any]:
175
- """Create result dict for error scenarios."""
176
133
  self.logger.error(f"Coverage improvement failed with error: {error}")
177
134
  return {
178
135
  "status": "error",
@@ -182,11 +139,6 @@ class CoverageImprovementOrchestrator:
182
139
  }
183
140
 
184
141
  async def get_coverage_improvement_recommendations(self) -> list[str]:
185
- """Get recommendations for coverage improvement strategies.
186
-
187
- Returns:
188
- List of strategic recommendations for improving coverage
189
- """
190
142
  recommendations = [
191
143
  "Focus on core business logic functions first",
192
144
  "Add tests for error handling and edge cases",
@@ -198,7 +150,6 @@ class CoverageImprovementOrchestrator:
198
150
  from contextlib import suppress
199
151
 
200
152
  with suppress(Exception):
201
- # Add coverage-specific recommendations based on current state
202
153
  coverage_status = self.coverage_service.get_status_report()
203
154
  current_coverage = coverage_status.get("current_coverage", 0.0)
204
155
 
@@ -219,5 +170,4 @@ class CoverageImprovementOrchestrator:
219
170
  async def create_coverage_improvement_orchestrator(
220
171
  project_path: Path, console: t.Any = None
221
172
  ) -> CoverageImprovementOrchestrator:
222
- """Create a coverage improvement orchestrator instance."""
223
173
  return CoverageImprovementOrchestrator(project_path, console)