crackerjack 0.31.10__py3-none-any.whl → 0.31.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (155) hide show
  1. crackerjack/CLAUDE.md +288 -705
  2. crackerjack/__main__.py +22 -8
  3. crackerjack/agents/__init__.py +0 -3
  4. crackerjack/agents/architect_agent.py +0 -43
  5. crackerjack/agents/base.py +1 -9
  6. crackerjack/agents/coordinator.py +2 -148
  7. crackerjack/agents/documentation_agent.py +109 -81
  8. crackerjack/agents/dry_agent.py +122 -97
  9. crackerjack/agents/formatting_agent.py +3 -16
  10. crackerjack/agents/import_optimization_agent.py +1174 -130
  11. crackerjack/agents/performance_agent.py +956 -188
  12. crackerjack/agents/performance_helpers.py +229 -0
  13. crackerjack/agents/proactive_agent.py +1 -48
  14. crackerjack/agents/refactoring_agent.py +516 -246
  15. crackerjack/agents/refactoring_helpers.py +282 -0
  16. crackerjack/agents/security_agent.py +393 -90
  17. crackerjack/agents/test_creation_agent.py +1776 -120
  18. crackerjack/agents/test_specialist_agent.py +59 -15
  19. crackerjack/agents/tracker.py +0 -102
  20. crackerjack/api.py +145 -37
  21. crackerjack/cli/handlers.py +48 -30
  22. crackerjack/cli/interactive.py +11 -11
  23. crackerjack/cli/options.py +66 -4
  24. crackerjack/code_cleaner.py +808 -148
  25. crackerjack/config/global_lock_config.py +110 -0
  26. crackerjack/config/hooks.py +43 -64
  27. crackerjack/core/async_workflow_orchestrator.py +247 -97
  28. crackerjack/core/autofix_coordinator.py +192 -109
  29. crackerjack/core/enhanced_container.py +46 -63
  30. crackerjack/core/file_lifecycle.py +549 -0
  31. crackerjack/core/performance.py +9 -8
  32. crackerjack/core/performance_monitor.py +395 -0
  33. crackerjack/core/phase_coordinator.py +281 -94
  34. crackerjack/core/proactive_workflow.py +9 -58
  35. crackerjack/core/resource_manager.py +501 -0
  36. crackerjack/core/service_watchdog.py +490 -0
  37. crackerjack/core/session_coordinator.py +4 -8
  38. crackerjack/core/timeout_manager.py +504 -0
  39. crackerjack/core/websocket_lifecycle.py +475 -0
  40. crackerjack/core/workflow_orchestrator.py +343 -209
  41. crackerjack/dynamic_config.py +50 -9
  42. crackerjack/errors.py +3 -4
  43. crackerjack/executors/async_hook_executor.py +63 -13
  44. crackerjack/executors/cached_hook_executor.py +14 -14
  45. crackerjack/executors/hook_executor.py +100 -37
  46. crackerjack/executors/hook_lock_manager.py +856 -0
  47. crackerjack/executors/individual_hook_executor.py +120 -86
  48. crackerjack/intelligence/__init__.py +0 -7
  49. crackerjack/intelligence/adaptive_learning.py +13 -86
  50. crackerjack/intelligence/agent_orchestrator.py +15 -78
  51. crackerjack/intelligence/agent_registry.py +12 -59
  52. crackerjack/intelligence/agent_selector.py +31 -92
  53. crackerjack/intelligence/integration.py +1 -41
  54. crackerjack/interactive.py +9 -9
  55. crackerjack/managers/async_hook_manager.py +25 -8
  56. crackerjack/managers/hook_manager.py +9 -9
  57. crackerjack/managers/publish_manager.py +57 -59
  58. crackerjack/managers/test_command_builder.py +6 -36
  59. crackerjack/managers/test_executor.py +9 -61
  60. crackerjack/managers/test_manager.py +17 -63
  61. crackerjack/managers/test_manager_backup.py +77 -127
  62. crackerjack/managers/test_progress.py +4 -23
  63. crackerjack/mcp/cache.py +5 -12
  64. crackerjack/mcp/client_runner.py +10 -10
  65. crackerjack/mcp/context.py +64 -6
  66. crackerjack/mcp/dashboard.py +14 -11
  67. crackerjack/mcp/enhanced_progress_monitor.py +55 -55
  68. crackerjack/mcp/file_monitor.py +72 -42
  69. crackerjack/mcp/progress_components.py +103 -84
  70. crackerjack/mcp/progress_monitor.py +122 -49
  71. crackerjack/mcp/rate_limiter.py +12 -12
  72. crackerjack/mcp/server_core.py +16 -22
  73. crackerjack/mcp/service_watchdog.py +26 -26
  74. crackerjack/mcp/state.py +15 -0
  75. crackerjack/mcp/tools/core_tools.py +95 -39
  76. crackerjack/mcp/tools/error_analyzer.py +6 -32
  77. crackerjack/mcp/tools/execution_tools.py +1 -56
  78. crackerjack/mcp/tools/execution_tools_backup.py +35 -131
  79. crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
  80. crackerjack/mcp/tools/intelligence_tools.py +2 -55
  81. crackerjack/mcp/tools/monitoring_tools.py +308 -145
  82. crackerjack/mcp/tools/proactive_tools.py +12 -42
  83. crackerjack/mcp/tools/progress_tools.py +23 -15
  84. crackerjack/mcp/tools/utility_tools.py +3 -40
  85. crackerjack/mcp/tools/workflow_executor.py +40 -60
  86. crackerjack/mcp/websocket/app.py +0 -3
  87. crackerjack/mcp/websocket/endpoints.py +206 -268
  88. crackerjack/mcp/websocket/jobs.py +213 -66
  89. crackerjack/mcp/websocket/server.py +84 -6
  90. crackerjack/mcp/websocket/websocket_handler.py +137 -29
  91. crackerjack/models/config_adapter.py +3 -16
  92. crackerjack/models/protocols.py +162 -3
  93. crackerjack/models/resource_protocols.py +454 -0
  94. crackerjack/models/task.py +3 -3
  95. crackerjack/monitoring/__init__.py +0 -0
  96. crackerjack/monitoring/ai_agent_watchdog.py +25 -71
  97. crackerjack/monitoring/regression_prevention.py +28 -87
  98. crackerjack/orchestration/advanced_orchestrator.py +44 -78
  99. crackerjack/orchestration/coverage_improvement.py +10 -60
  100. crackerjack/orchestration/execution_strategies.py +16 -16
  101. crackerjack/orchestration/test_progress_streamer.py +61 -53
  102. crackerjack/plugins/base.py +1 -1
  103. crackerjack/plugins/managers.py +22 -20
  104. crackerjack/py313.py +65 -21
  105. crackerjack/services/backup_service.py +467 -0
  106. crackerjack/services/bounded_status_operations.py +627 -0
  107. crackerjack/services/cache.py +7 -9
  108. crackerjack/services/config.py +35 -52
  109. crackerjack/services/config_integrity.py +5 -16
  110. crackerjack/services/config_merge.py +542 -0
  111. crackerjack/services/contextual_ai_assistant.py +17 -19
  112. crackerjack/services/coverage_ratchet.py +44 -73
  113. crackerjack/services/debug.py +25 -39
  114. crackerjack/services/dependency_monitor.py +52 -50
  115. crackerjack/services/enhanced_filesystem.py +14 -11
  116. crackerjack/services/file_hasher.py +1 -1
  117. crackerjack/services/filesystem.py +1 -12
  118. crackerjack/services/git.py +71 -47
  119. crackerjack/services/health_metrics.py +31 -27
  120. crackerjack/services/initialization.py +276 -428
  121. crackerjack/services/input_validator.py +760 -0
  122. crackerjack/services/log_manager.py +16 -16
  123. crackerjack/services/logging.py +7 -6
  124. crackerjack/services/metrics.py +43 -43
  125. crackerjack/services/pattern_cache.py +2 -31
  126. crackerjack/services/pattern_detector.py +26 -63
  127. crackerjack/services/performance_benchmarks.py +20 -45
  128. crackerjack/services/regex_patterns.py +2887 -0
  129. crackerjack/services/regex_utils.py +537 -0
  130. crackerjack/services/secure_path_utils.py +683 -0
  131. crackerjack/services/secure_status_formatter.py +534 -0
  132. crackerjack/services/secure_subprocess.py +605 -0
  133. crackerjack/services/security.py +47 -10
  134. crackerjack/services/security_logger.py +492 -0
  135. crackerjack/services/server_manager.py +109 -50
  136. crackerjack/services/smart_scheduling.py +8 -25
  137. crackerjack/services/status_authentication.py +603 -0
  138. crackerjack/services/status_security_manager.py +442 -0
  139. crackerjack/services/thread_safe_status_collector.py +546 -0
  140. crackerjack/services/tool_version_service.py +1 -23
  141. crackerjack/services/unified_config.py +36 -58
  142. crackerjack/services/validation_rate_limiter.py +269 -0
  143. crackerjack/services/version_checker.py +9 -40
  144. crackerjack/services/websocket_resource_limiter.py +572 -0
  145. crackerjack/slash_commands/__init__.py +52 -2
  146. crackerjack/tools/__init__.py +0 -0
  147. crackerjack/tools/validate_input_validator_patterns.py +262 -0
  148. crackerjack/tools/validate_regex_patterns.py +198 -0
  149. {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/METADATA +197 -12
  150. crackerjack-0.31.13.dist-info/RECORD +178 -0
  151. crackerjack/cli/facade.py +0 -104
  152. crackerjack-0.31.10.dist-info/RECORD +0 -149
  153. {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/WHEEL +0 -0
  154. {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/entry_points.txt +0 -0
  155. {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/licenses/LICENSE +0 -0
@@ -58,7 +58,7 @@ class WorkflowPipeline:
58
58
  async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
59
59
  with LoggingContext(
60
60
  "workflow_execution",
61
- testing=getattr(options, "testing", False),
61
+ testing=getattr(options, "test", False),
62
62
  skip_hooks=getattr(options, "skip_hooks", False),
63
63
  ):
64
64
  start_time = time.time()
@@ -78,7 +78,6 @@ class WorkflowPipeline:
78
78
  self.session.cleanup_resources()
79
79
 
80
80
  def _initialize_workflow_session(self, options: OptionsProtocol) -> None:
81
- """Initialize session tracking and debug logging for workflow execution."""
82
81
  self.session.initialize_session_tracking(options)
83
82
  self.session.track_task("workflow", "Complete crackerjack workflow")
84
83
 
@@ -87,7 +86,6 @@ class WorkflowPipeline:
87
86
  self._log_workflow_startup_info(options)
88
87
 
89
88
  def _log_workflow_startup_debug(self, options: OptionsProtocol) -> None:
90
- """Log debug information for workflow startup."""
91
89
  if not self._should_debug():
92
90
  return
93
91
 
@@ -95,22 +93,20 @@ class WorkflowPipeline:
95
93
  "workflow_execution",
96
94
  "started",
97
95
  details={
98
- "testing": getattr(options, "testing", False),
96
+ "testing": getattr(options, "test", False),
99
97
  "skip_hooks": getattr(options, "skip_hooks", False),
100
98
  "ai_agent": getattr(options, "ai_agent", False),
101
99
  },
102
100
  )
103
101
 
104
102
  def _configure_session_cleanup(self, options: OptionsProtocol) -> None:
105
- """Configure session cleanup settings if specified."""
106
103
  if hasattr(options, "cleanup"):
107
104
  self.session.set_cleanup_config(options.cleanup)
108
105
 
109
106
  def _log_workflow_startup_info(self, options: OptionsProtocol) -> None:
110
- """Log informational message about workflow startup."""
111
107
  self.logger.info(
112
108
  "Starting complete workflow execution",
113
- testing=getattr(options, "testing", False),
109
+ testing=getattr(options, "test", False),
114
110
  skip_hooks=getattr(options, "skip_hooks", False),
115
111
  package_path=str(self.pkg_path),
116
112
  )
@@ -118,7 +114,6 @@ class WorkflowPipeline:
118
114
  async def _execute_workflow_with_timing(
119
115
  self, options: OptionsProtocol, start_time: float
120
116
  ) -> bool:
121
- """Execute workflow phases and handle success/completion logging."""
122
117
  success = await self._execute_workflow_phases(options)
123
118
  self.session.finalize_session(start_time, success)
124
119
 
@@ -129,7 +124,6 @@ class WorkflowPipeline:
129
124
  return success
130
125
 
131
126
  def _log_workflow_completion(self, success: bool, duration: float) -> None:
132
- """Log workflow completion information."""
133
127
  self.logger.info(
134
128
  "Workflow execution completed",
135
129
  success=success,
@@ -137,7 +131,6 @@ class WorkflowPipeline:
137
131
  )
138
132
 
139
133
  def _log_workflow_completion_debug(self, success: bool, duration: float) -> None:
140
- """Log debug information for workflow completion."""
141
134
  if not self._should_debug():
142
135
  return
143
136
 
@@ -152,14 +145,12 @@ class WorkflowPipeline:
152
145
  self.debugger.print_debug_summary()
153
146
 
154
147
  def _handle_user_interruption(self) -> bool:
155
- """Handle KeyboardInterrupt gracefully."""
156
148
  self.console.print("Interrupted by user")
157
149
  self.session.fail_task("workflow", "Interrupted by user")
158
150
  self.logger.warning("Workflow interrupted by user")
159
151
  return False
160
152
 
161
153
  def _handle_workflow_exception(self, error: Exception) -> bool:
162
- """Handle unexpected workflow exceptions."""
163
154
  self.console.print(f"Error: {error}")
164
155
  self.session.fail_task("workflow", f"Unexpected error: {error}")
165
156
  self.logger.exception(
@@ -173,11 +164,8 @@ class WorkflowPipeline:
173
164
  success = True
174
165
  self.phases.run_configuration_phase(options)
175
166
 
176
- if not self.phases.run_cleaning_phase(options):
177
- success = False
178
- self.session.fail_task("workflow", "Cleaning phase failed")
179
- return False
180
-
167
+ # Code cleaning is now integrated into the quality phase
168
+ # to run after fast hooks but before comprehensive hooks
181
169
  if not await self._execute_quality_phase(options):
182
170
  success = False
183
171
  return False
@@ -195,7 +183,7 @@ class WorkflowPipeline:
195
183
  return self._run_fast_hooks_phase(options)
196
184
  if hasattr(options, "comp") and options.comp:
197
185
  return self._run_comprehensive_hooks_phase(options)
198
- if options.test:
186
+ if getattr(options, "test", False):
199
187
  return await self._execute_test_workflow(options)
200
188
  return self._execute_standard_hooks_workflow(options)
201
189
 
@@ -205,6 +193,15 @@ class WorkflowPipeline:
205
193
  if not self._run_initial_fast_hooks(options, iteration):
206
194
  return False
207
195
 
196
+ # Run code cleaning after fast hooks but before comprehensive hooks
197
+ if getattr(options, "clean", False):
198
+ if not self._run_code_cleaning_phase(options):
199
+ return False
200
+ # Run fast hooks again after cleaning for sanity check
201
+ if not self._run_post_cleaning_fast_hooks(options):
202
+ return False
203
+ self._mark_code_cleaning_complete()
204
+
208
205
  testing_passed, comprehensive_passed = self._run_main_quality_phases(options)
209
206
 
210
207
  if options.ai_agent:
@@ -217,23 +214,20 @@ class WorkflowPipeline:
217
214
  )
218
215
 
219
216
  def _start_iteration_tracking(self, options: OptionsProtocol) -> int:
220
- """Start iteration tracking for AI agent mode."""
221
217
  iteration = 1
222
218
  if options.ai_agent and self._should_debug():
223
219
  self.debugger.log_iteration_start(iteration)
224
220
  return iteration
225
221
 
226
222
  def _run_initial_fast_hooks(self, options: OptionsProtocol, iteration: int) -> bool:
227
- """Run initial fast hooks phase and handle failure."""
228
223
  fast_hooks_passed = self._run_fast_hooks_phase(options)
229
224
  if not fast_hooks_passed:
230
225
  if options.ai_agent and self._should_debug():
231
226
  self.debugger.log_iteration_end(iteration, False)
232
- return False # Fast hooks must pass before proceeding
227
+ return False
233
228
  return True
234
229
 
235
230
  def _run_main_quality_phases(self, options: OptionsProtocol) -> tuple[bool, bool]:
236
- """Run tests and comprehensive hooks to collect ALL issues."""
237
231
  testing_passed = self._run_testing_phase(options)
238
232
  comprehensive_passed = self._run_comprehensive_hooks_phase(options)
239
233
  return testing_passed, comprehensive_passed
@@ -245,7 +239,6 @@ class WorkflowPipeline:
245
239
  testing_passed: bool,
246
240
  comprehensive_passed: bool,
247
241
  ) -> bool:
248
- """Handle AI agent workflow with failure collection and fixing."""
249
242
  if not testing_passed or not comprehensive_passed:
250
243
  success = await self._run_ai_agent_fixing_phase(options)
251
244
  if self._should_debug():
@@ -254,7 +247,7 @@ class WorkflowPipeline:
254
247
 
255
248
  if self._should_debug():
256
249
  self.debugger.log_iteration_end(iteration, True)
257
- return True # All phases passed, no fixes needed
250
+ return True
258
251
 
259
252
  def _handle_standard_workflow(
260
253
  self,
@@ -263,21 +256,19 @@ class WorkflowPipeline:
263
256
  testing_passed: bool,
264
257
  comprehensive_passed: bool,
265
258
  ) -> bool:
266
- """Handle standard workflow where all phases must pass."""
267
259
  success = testing_passed and comprehensive_passed
268
260
 
269
- # Debug information for workflow continuation issues
270
261
  if not success and getattr(options, "verbose", False):
271
262
  self.console.print(
272
- f"[yellow]⚠️ Workflow stopped - testing_passed: {testing_passed}, comprehensive_passed: {comprehensive_passed}[/yellow]"
263
+ f"[yellow]⚠️ Workflow stopped-testing_passed: {testing_passed}, comprehensive_passed: {comprehensive_passed}[/ yellow]"
273
264
  )
274
265
  if not testing_passed:
275
266
  self.console.print(
276
- "[yellow] → Tests reported failure despite appearing successful[/yellow]"
267
+ "[yellow] → Tests reported failure despite appearing successful[/ yellow]"
277
268
  )
278
269
  if not comprehensive_passed:
279
270
  self.console.print(
280
- "[yellow] → Comprehensive hooks reported failure despite appearing successful[/yellow]"
271
+ "[yellow] → Comprehensive hooks reported failure despite appearing successful[/ yellow]"
281
272
  )
282
273
 
283
274
  if options.ai_agent and self._should_debug():
@@ -303,8 +294,7 @@ class WorkflowPipeline:
303
294
  self.session.fail_task("workflow", "Testing failed")
304
295
  self._handle_test_failures()
305
296
  self._update_mcp_status("tests", "failed")
306
- # In AI agent mode, continue to collect more failures
307
- # In non-AI mode, this will be handled by caller
297
+
308
298
  else:
309
299
  self._update_mcp_status("tests", "completed")
310
300
 
@@ -317,8 +307,7 @@ class WorkflowPipeline:
317
307
  if not success:
318
308
  self.session.fail_task("comprehensive_hooks", "Comprehensive hooks failed")
319
309
  self._update_mcp_status("comprehensive", "failed")
320
- # In AI agent mode, continue to collect more failures
321
- # In non-AI mode, this will be handled by caller
310
+
322
311
  else:
323
312
  self._update_mcp_status("comprehensive", "completed")
324
313
 
@@ -328,7 +317,41 @@ class WorkflowPipeline:
328
317
  if hasattr(self, "_mcp_state_manager") and self._mcp_state_manager:
329
318
  self._mcp_state_manager.update_stage_status(stage, status)
330
319
 
331
- self.session.update_stage(stage, status)
320
+ def _run_code_cleaning_phase(self, options: OptionsProtocol) -> bool:
321
+ """Run code cleaning phase after fast hooks but before comprehensive hooks."""
322
+ self.console.print("\n[bold blue]🧹 Running Code Cleaning Phase...[/bold blue]")
323
+
324
+ success = self.phases.run_cleaning_phase(options)
325
+ if success:
326
+ self.console.print("[green]✅ Code cleaning completed successfully[/green]")
327
+ else:
328
+ self.console.print("[red]❌ Code cleaning failed[/red]")
329
+ self.session.fail_task("workflow", "Code cleaning phase failed")
330
+
331
+ return success
332
+
333
+ def _run_post_cleaning_fast_hooks(self, options: OptionsProtocol) -> bool:
334
+ """Run fast hooks again after code cleaning for sanity check."""
335
+ self.console.print(
336
+ "\n[bold cyan]🔍 Running Post-Cleaning Fast Hooks Sanity Check...[/bold cyan]"
337
+ )
338
+
339
+ success = self._run_fast_hooks_phase(options)
340
+ if success:
341
+ self.console.print("[green]✅ Post-cleaning sanity check passed[/green]")
342
+ else:
343
+ self.console.print("[red]❌ Post-cleaning sanity check failed[/red]")
344
+ self.session.fail_task("workflow", "Post-cleaning fast hooks failed")
345
+
346
+ return success
347
+
348
+ def _has_code_cleaning_run(self) -> bool:
349
+ """Check if code cleaning has already run in this workflow."""
350
+ return getattr(self, "_code_cleaning_complete", False)
351
+
352
+ def _mark_code_cleaning_complete(self) -> None:
353
+ """Mark code cleaning as complete for this workflow."""
354
+ self._code_cleaning_complete = True
332
355
 
333
356
  def _handle_test_failures(self) -> None:
334
357
  if not (hasattr(self, "_mcp_state_manager") and self._mcp_state_manager):
@@ -340,7 +363,6 @@ class WorkflowPipeline:
340
363
 
341
364
  failures = test_manager.get_test_failures()
342
365
 
343
- # Log test failure count for debugging
344
366
  if self._should_debug():
345
367
  self.debugger.log_test_failures(len(failures))
346
368
 
@@ -351,7 +373,7 @@ class WorkflowPipeline:
351
373
  id=f"test_failure_{i}",
352
374
  type="test_failure",
353
375
  message=failure.strip(),
354
- file_path="tests/",
376
+ file_path="tests /",
355
377
  priority=Priority.HIGH,
356
378
  stage="tests",
357
379
  auto_fixable=False,
@@ -359,22 +381,39 @@ class WorkflowPipeline:
359
381
  self._mcp_state_manager.add_issue(issue)
360
382
 
361
383
  def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
362
- """Execute standard hooks workflow with proper state management."""
363
384
  self._update_hooks_status_running()
364
385
 
365
- hooks_success = self.phases.run_hooks_phase(options)
386
+ # Run fast hooks first
387
+ fast_hooks_success = self._run_fast_hooks_phase(options)
388
+ if not fast_hooks_success:
389
+ self._handle_hooks_completion(False)
390
+ return False
391
+
392
+ # Run code cleaning after fast hooks but before comprehensive hooks
393
+ if getattr(options, "clean", False):
394
+ if not self._run_code_cleaning_phase(options):
395
+ self._handle_hooks_completion(False)
396
+ return False
397
+ # Run fast hooks again after cleaning for sanity check
398
+ if not self._run_post_cleaning_fast_hooks(options):
399
+ self._handle_hooks_completion(False)
400
+ return False
401
+ self._mark_code_cleaning_complete()
402
+
403
+ # Run comprehensive hooks
404
+ comprehensive_success = self._run_comprehensive_hooks_phase(options)
405
+
406
+ hooks_success = fast_hooks_success and comprehensive_success
366
407
  self._handle_hooks_completion(hooks_success)
367
408
 
368
409
  return hooks_success
369
410
 
370
411
  def _update_hooks_status_running(self) -> None:
371
- """Update MCP state to running for hook phases."""
372
412
  if self._has_mcp_state_manager():
373
413
  self._mcp_state_manager.update_stage_status("fast", "running")
374
414
  self._mcp_state_manager.update_stage_status("comprehensive", "running")
375
415
 
376
416
  def _handle_hooks_completion(self, hooks_success: bool) -> None:
377
- """Handle hooks completion with appropriate status updates."""
378
417
  if not hooks_success:
379
418
  self.session.fail_task("workflow", "Hooks failed")
380
419
  self._update_hooks_status_failed()
@@ -382,28 +421,35 @@ class WorkflowPipeline:
382
421
  self._update_hooks_status_completed()
383
422
 
384
423
  def _has_mcp_state_manager(self) -> bool:
385
- """Check if MCP state manager is available."""
386
424
  return hasattr(self, "_mcp_state_manager") and self._mcp_state_manager
387
425
 
388
426
  def _update_hooks_status_failed(self) -> None:
389
- """Update MCP state to failed for hook phases."""
390
427
  if self._has_mcp_state_manager():
391
428
  self._mcp_state_manager.update_stage_status("fast", "failed")
392
429
  self._mcp_state_manager.update_stage_status("comprehensive", "failed")
393
430
 
394
431
  def _update_hooks_status_completed(self) -> None:
395
- """Update MCP state to completed for hook phases."""
396
432
  if self._has_mcp_state_manager():
397
433
  self._mcp_state_manager.update_stage_status("fast", "completed")
398
434
  self._mcp_state_manager.update_stage_status("comprehensive", "completed")
399
435
 
400
436
  async def _run_ai_agent_fixing_phase(self, options: OptionsProtocol) -> bool:
401
- """Run AI agent fixing phase to analyze and fix collected failures."""
402
437
  self._update_mcp_status("ai_fixing", "running")
403
438
  self.logger.info("Starting AI agent fixing phase")
404
439
  self._log_debug_phase_start()
405
440
 
406
441
  try:
442
+ # If code cleaning is enabled and hasn't run yet, run it first
443
+ # to provide cleaner, more standardized code for the AI agents
444
+ if getattr(options, "clean", False) and not self._has_code_cleaning_run():
445
+ self.console.print(
446
+ "\n[bold yellow]🤖 AI agents recommend running code cleaning first for better results...[/bold yellow]"
447
+ )
448
+ if self._run_code_cleaning_phase(options):
449
+ # Run fast hooks sanity check after cleaning
450
+ self._run_post_cleaning_fast_hooks(options)
451
+ self._mark_code_cleaning_complete()
452
+
407
453
  agent_coordinator = self._setup_agent_coordinator()
408
454
  issues = await self._collect_issues_from_failures()
409
455
 
@@ -419,7 +465,6 @@ class WorkflowPipeline:
419
465
  return self._handle_fixing_phase_error(e)
420
466
 
421
467
  def _log_debug_phase_start(self) -> None:
422
- """Log debug information for phase start."""
423
468
  if self._should_debug():
424
469
  self.debugger.log_workflow_phase(
425
470
  "ai_agent_fixing",
@@ -428,7 +473,6 @@ class WorkflowPipeline:
428
473
  )
429
474
 
430
475
  def _setup_agent_coordinator(self) -> AgentCoordinator:
431
- """Set up agent coordinator with proper context."""
432
476
  from crackerjack.agents.coordinator import AgentCoordinator
433
477
 
434
478
  agent_context = AgentContext(
@@ -441,7 +485,6 @@ class WorkflowPipeline:
441
485
  return agent_coordinator
442
486
 
443
487
  def _handle_no_issues_found(self) -> bool:
444
- """Handle case when no issues are collected."""
445
488
  self.logger.info("No issues collected for AI agent fixing")
446
489
  self._update_mcp_status("ai_fixing", "completed")
447
490
  return True
@@ -449,7 +492,6 @@ class WorkflowPipeline:
449
492
  async def _process_fix_results(
450
493
  self, options: OptionsProtocol, fix_result: t.Any
451
494
  ) -> bool:
452
- """Process fix results and verify success."""
453
495
  verification_success = await self._verify_fixes_applied(options, fix_result)
454
496
  success = fix_result.success and verification_success
455
497
 
@@ -462,7 +504,6 @@ class WorkflowPipeline:
462
504
  return success
463
505
 
464
506
  def _handle_successful_fixes(self, fix_result: t.Any) -> None:
465
- """Handle successful fix results."""
466
507
  self.logger.info(
467
508
  "AI agents successfully fixed all issues and verification passed"
468
509
  )
@@ -472,10 +513,9 @@ class WorkflowPipeline:
472
513
  def _handle_failed_fixes(
473
514
  self, fix_result: t.Any, verification_success: bool
474
515
  ) -> None:
475
- """Handle failed fix results."""
476
516
  if not verification_success:
477
517
  self.logger.warning(
478
- "AI agent fixes did not pass verification - issues still exist"
518
+ "AI agent fixes did not pass verification-issues still exist"
479
519
  )
480
520
  else:
481
521
  self.logger.warning(
@@ -484,7 +524,6 @@ class WorkflowPipeline:
484
524
  self._update_mcp_status("ai_fixing", "failed")
485
525
 
486
526
  def _log_fix_counts_if_debugging(self, fix_result: t.Any) -> None:
487
- """Log fix counts for debugging if debug mode is enabled."""
488
527
  if not self._should_debug():
489
528
  return
490
529
 
@@ -497,7 +536,6 @@ class WorkflowPipeline:
497
536
  self.debugger.log_hook_fixes(hook_fixes)
498
537
 
499
538
  def _log_debug_phase_completion(self, success: bool, fix_result: t.Any) -> None:
500
- """Log debug information for phase completion."""
501
539
  if self._should_debug():
502
540
  self.debugger.log_workflow_phase(
503
541
  "ai_agent_fixing",
@@ -510,7 +548,6 @@ class WorkflowPipeline:
510
548
  )
511
549
 
512
550
  def _handle_fixing_phase_error(self, error: Exception) -> bool:
513
- """Handle errors during the fixing phase."""
514
551
  self.logger.exception(f"AI agent fixing phase failed: {error}")
515
552
  self.session.fail_task("ai_fixing", f"AI agent fixing failed: {error}")
516
553
  self._update_mcp_status("ai_fixing", "failed")
@@ -527,53 +564,67 @@ class WorkflowPipeline:
527
564
  async def _verify_fixes_applied(
528
565
  self, options: OptionsProtocol, fix_result: t.Any
529
566
  ) -> bool:
530
- """Verify that AI agent fixes actually resolved the issues by re-running checks."""
531
567
  if not fix_result.fixes_applied:
532
- return True # No fixes were applied, nothing to verify
568
+ return True
533
569
 
534
570
  self.logger.info("Verifying AI agent fixes by re-running quality checks")
535
571
 
536
- # Re-run the phases that previously failed to verify fixes
537
572
  verification_success = True
538
573
 
539
- # Check if we need to re-run tests
540
- if any("test" in fix.lower() for fix in fix_result.fixes_applied):
541
- self.logger.info("Re-running tests to verify test fixes")
542
- test_success = self.phases.run_testing_phase(options)
543
- if not test_success:
544
- self.logger.warning(
545
- "Test verification failed - test fixes did not work"
546
- )
574
+ # Verify test fixes
575
+ if self._should_verify_test_fixes(fix_result.fixes_applied):
576
+ if not await self._verify_test_fixes(options):
577
+ verification_success = False
578
+
579
+ # Verify hook fixes
580
+ if self._should_verify_hook_fixes(fix_result.fixes_applied):
581
+ if not await self._verify_hook_fixes(options):
547
582
  verification_success = False
548
583
 
549
- # Check if we need to re-run comprehensive hooks
584
+ self._log_verification_result(verification_success)
585
+ return verification_success
586
+
587
+ def _should_verify_test_fixes(self, fixes_applied: list[str]) -> bool:
588
+ """Check if test fixes need verification."""
589
+ return any("test" in fix.lower() for fix in fixes_applied)
590
+
591
+ async def _verify_test_fixes(self, options: OptionsProtocol) -> bool:
592
+ """Verify test fixes by re-running tests."""
593
+ self.logger.info("Re-running tests to verify test fixes")
594
+ test_success = self.phases.run_testing_phase(options)
595
+ if not test_success:
596
+ self.logger.warning("Test verification failed-test fixes did not work")
597
+ return test_success
598
+
599
+ def _should_verify_hook_fixes(self, fixes_applied: list[str]) -> bool:
600
+ """Check if hook fixes need verification."""
550
601
  hook_fixes = [
551
602
  f
552
- for f in fix_result.fixes_applied
603
+ for f in fixes_applied
553
604
  if "hook" not in f.lower()
554
605
  or "complexity" in f.lower()
555
606
  or "type" in f.lower()
556
607
  ]
557
- if hook_fixes:
558
- self.logger.info("Re-running comprehensive hooks to verify hook fixes")
559
- hook_success = self.phases.run_comprehensive_hooks_only(options)
560
- if not hook_success:
561
- self.logger.warning(
562
- "Hook verification failed - hook fixes did not work"
563
- )
564
- verification_success = False
565
-
608
+ return bool(hook_fixes)
609
+
610
+ async def _verify_hook_fixes(self, options: OptionsProtocol) -> bool:
611
+ """Verify hook fixes by re-running comprehensive hooks."""
612
+ self.logger.info("Re-running comprehensive hooks to verify hook fixes")
613
+ hook_success = self.phases.run_comprehensive_hooks_only(options)
614
+ if not hook_success:
615
+ self.logger.warning("Hook verification failed-hook fixes did not work")
616
+ return hook_success
617
+
618
+ def _log_verification_result(self, verification_success: bool) -> None:
619
+ """Log the final verification result."""
566
620
  if verification_success:
567
621
  self.logger.info("All AI agent fixes verified successfully")
568
622
  else:
569
623
  self.logger.error(
570
- "Verification failed - some fixes did not resolve the issues"
624
+ "Verification failed-some fixes did not resolve the issues"
571
625
  )
572
626
 
573
- return verification_success
574
-
575
627
  async def _collect_issues_from_failures(self) -> list[Issue]:
576
- """Collect issues from test and comprehensive hook failures."""
577
628
  issues: list[Issue] = []
578
629
 
579
630
  test_issues, test_count = self._collect_test_failure_issues()
@@ -587,7 +638,6 @@ class WorkflowPipeline:
587
638
  return issues
588
639
 
589
640
  def _collect_test_failure_issues(self) -> tuple[list[Issue], int]:
590
- """Collect test failure issues and return count."""
591
641
  issues: list[Issue] = []
592
642
  test_count = 0
593
643
 
@@ -599,7 +649,7 @@ class WorkflowPipeline:
599
649
  test_count = len(test_failures)
600
650
  for i, failure in enumerate(
601
651
  test_failures[:20],
602
- ): # Limit to prevent overload
652
+ ):
603
653
  issue = Issue(
604
654
  id=f"test_failure_{i}",
605
655
  type=IssueType.TEST_FAILURE,
@@ -612,7 +662,6 @@ class WorkflowPipeline:
612
662
  return issues, test_count
613
663
 
614
664
  def _collect_hook_failure_issues(self) -> tuple[list[Issue], int]:
615
- """Collect hook failure issues and return count."""
616
665
  issues: list[Issue] = []
617
666
  hook_count = 0
618
667
 
@@ -625,7 +674,6 @@ class WorkflowPipeline:
625
674
  return issues, hook_count
626
675
 
627
676
  def _process_hook_results(self, hook_results: t.Any) -> tuple[list[Issue], int]:
628
- """Process hook results and extract issues."""
629
677
  issues: list[Issue] = []
630
678
  hook_count = 0
631
679
 
@@ -640,17 +688,14 @@ class WorkflowPipeline:
640
688
  return issues, hook_count
641
689
 
642
690
  def _is_hook_result_failed(self, result: t.Any) -> bool:
643
- """Check if hook result indicates failure."""
644
691
  return result.status in ("failed", "error", "timeout")
645
692
 
646
693
  def _extract_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
647
- """Extract issues from a single hook result."""
648
694
  if result.issues_found:
649
695
  return self._create_specific_issues_from_hook_result(result)
650
696
  return [self._create_generic_issue_from_hook_result(result)]
651
697
 
652
698
  def _create_specific_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
653
- """Create specific issues from hook result with detailed information."""
654
699
  issues: list[Issue] = []
655
700
  hook_context = f"{result.name}: "
656
701
 
@@ -661,7 +706,6 @@ class WorkflowPipeline:
661
706
  return issues
662
707
 
663
708
  def _create_generic_issue_from_hook_result(self, result: t.Any) -> Issue:
664
- """Create a generic issue for hook failure without specific details."""
665
709
  issue_type = self._determine_hook_issue_type(result.name)
666
710
  return Issue(
667
711
  id=f"hook_failure_{result.name}",
@@ -672,13 +716,16 @@ class WorkflowPipeline:
672
716
  )
673
717
 
674
718
  def _determine_hook_issue_type(self, hook_name: str) -> IssueType:
675
- """Determine issue type based on hook name."""
676
719
  formatting_hooks = {
677
720
  "trailing-whitespace",
678
721
  "end-of-file-fixer",
679
722
  "ruff-format",
680
723
  "ruff-check",
681
724
  }
725
+
726
+ if hook_name == "validate-regex-patterns":
727
+ return IssueType.REGEX_VALIDATION
728
+
682
729
  return (
683
730
  IssueType.FORMATTING
684
731
  if hook_name in formatting_hooks
@@ -686,7 +733,6 @@ class WorkflowPipeline:
686
733
  )
687
734
 
688
735
  def _fallback_to_session_tracker(self) -> tuple[list[Issue], int]:
689
- """Fallback to session tracker if hook manager fails."""
690
736
  issues: list[Issue] = []
691
737
  hook_count = 0
692
738
 
@@ -702,14 +748,12 @@ class WorkflowPipeline:
702
748
  return issues, hook_count
703
749
 
704
750
  def _is_failed_hook_task(self, task_data: t.Any, task_id: str) -> bool:
705
- """Check if a task is a failed hook task."""
706
751
  return task_data.status == "failed" and task_id in (
707
752
  "fast_hooks",
708
753
  "comprehensive_hooks",
709
754
  )
710
755
 
711
756
  def _process_hook_failure(self, task_id: str, task_data: t.Any) -> list[Issue]:
712
- """Process a single hook failure and return corresponding issues."""
713
757
  error_msg = getattr(task_data, "error_message", "Unknown error")
714
758
  specific_issues = self._parse_hook_error_details(task_id, error_msg)
715
759
 
@@ -719,7 +763,6 @@ class WorkflowPipeline:
719
763
  return [self._create_generic_hook_issue(task_id, error_msg)]
720
764
 
721
765
  def _create_generic_hook_issue(self, task_id: str, error_msg: str) -> Issue:
722
- """Create a generic issue for unspecific hook failures."""
723
766
  issue_type = IssueType.FORMATTING if "fast" in task_id else IssueType.TYPE_ERROR
724
767
  return Issue(
725
768
  id=f"hook_failure_{task_id}",
@@ -730,128 +773,138 @@ class WorkflowPipeline:
730
773
  )
731
774
 
732
775
  def _parse_hook_error_details(self, task_id: str, error_msg: str) -> list[Issue]:
733
- """Parse specific hook failure details to create targeted issues."""
776
+ """Parse hook error details and create specific issues."""
734
777
  issues: list[Issue] = []
735
778
 
736
- # For comprehensive hooks, parse specific tool failures
737
779
  if task_id == "comprehensive_hooks":
738
- # Check for complexipy failures (complexity violations)
739
- if "complexipy" in error_msg.lower():
740
- issues.append(
741
- Issue(
742
- id="complexipy_violation",
743
- type=IssueType.COMPLEXITY,
744
- severity=Priority.HIGH,
745
- message="Code complexity violation detected by complexipy",
746
- stage="comprehensive",
747
- )
748
- )
780
+ issues.extend(self._parse_comprehensive_hook_errors(error_msg))
781
+ elif task_id == "fast_hooks":
782
+ issues.append(self._create_fast_hook_issue())
749
783
 
750
- # Check for pyright failures (type errors)
751
- if "pyright" in error_msg.lower():
752
- issues.append(
753
- Issue(
754
- id="pyright_type_error",
755
- type=IssueType.TYPE_ERROR,
756
- severity=Priority.HIGH,
757
- message="Type checking errors detected by pyright",
758
- stage="comprehensive",
759
- )
760
- )
784
+ return issues
761
785
 
762
- # Check for bandit failures (security issues)
763
- if "bandit" in error_msg.lower():
764
- issues.append(
765
- Issue(
766
- id="bandit_security_issue",
767
- type=IssueType.SECURITY,
768
- severity=Priority.HIGH,
769
- message="Security vulnerabilities detected by bandit",
770
- stage="comprehensive",
771
- )
772
- )
786
+ def _parse_comprehensive_hook_errors(self, error_msg: str) -> list[Issue]:
787
+ """Parse comprehensive hook error messages and create specific issues."""
788
+ issues: list[Issue] = []
789
+ error_lower = error_msg.lower()
773
790
 
774
- # Check for refurb failures (code quality issues)
775
- if "refurb" in error_msg.lower():
776
- issues.append(
777
- Issue(
778
- id="refurb_quality_issue",
779
- type=IssueType.PERFORMANCE, # Use PERFORMANCE as closest match for refurb issues
780
- severity=Priority.MEDIUM,
781
- message="Code quality issues detected by refurb",
782
- stage="comprehensive",
783
- )
784
- )
791
+ # Check each error type
792
+ complexity_issue = self._check_complexity_error(error_lower)
793
+ if complexity_issue:
794
+ issues.append(complexity_issue)
785
795
 
786
- # Check for vulture failures (dead code)
787
- if "vulture" in error_msg.lower():
788
- issues.append(
789
- Issue(
790
- id="vulture_dead_code",
791
- type=IssueType.DEAD_CODE,
792
- severity=Priority.MEDIUM,
793
- message="Dead code detected by vulture",
794
- stage="comprehensive",
795
- )
796
- )
796
+ type_error_issue = self._check_type_error(error_lower)
797
+ if type_error_issue:
798
+ issues.append(type_error_issue)
797
799
 
798
- elif task_id == "fast_hooks":
799
- # Fast hooks are typically formatting issues
800
- issues.append(
801
- Issue(
802
- id="fast_hooks_formatting",
803
- type=IssueType.FORMATTING,
804
- severity=Priority.LOW,
805
- message="Code formatting issues detected",
806
- stage="fast",
807
- )
808
- )
800
+ security_issue = self._check_security_error(error_lower)
801
+ if security_issue:
802
+ issues.append(security_issue)
803
+
804
+ performance_issue = self._check_performance_error(error_lower)
805
+ if performance_issue:
806
+ issues.append(performance_issue)
807
+
808
+ dead_code_issue = self._check_dead_code_error(error_lower)
809
+ if dead_code_issue:
810
+ issues.append(dead_code_issue)
811
+
812
+ regex_issue = self._check_regex_validation_error(error_lower)
813
+ if regex_issue:
814
+ issues.append(regex_issue)
809
815
 
810
816
  return issues
811
817
 
818
+ def _check_complexity_error(self, error_lower: str) -> Issue | None:
819
+ """Check for complexity errors and create issue if found."""
820
+ if "complexipy" in error_lower or "c901" in error_lower:
821
+ return Issue(
822
+ id="complexity_violation",
823
+ type=IssueType.COMPLEXITY,
824
+ severity=Priority.HIGH,
825
+ message="Code complexity violation detected",
826
+ stage="comprehensive",
827
+ )
828
+ return None
829
+
830
+ def _check_type_error(self, error_lower: str) -> Issue | None:
831
+ """Check for type errors and create issue if found."""
832
+ if "pyright" in error_lower:
833
+ return Issue(
834
+ id="pyright_type_error",
835
+ type=IssueType.TYPE_ERROR,
836
+ severity=Priority.HIGH,
837
+ message="Type checking errors detected by pyright",
838
+ stage="comprehensive",
839
+ )
840
+ return None
841
+
842
+ def _check_security_error(self, error_lower: str) -> Issue | None:
843
+ """Check for security errors and create issue if found."""
844
+ if "bandit" in error_lower:
845
+ return Issue(
846
+ id="bandit_security_issue",
847
+ type=IssueType.SECURITY,
848
+ severity=Priority.HIGH,
849
+ message="Security vulnerabilities detected by bandit",
850
+ stage="comprehensive",
851
+ )
852
+ return None
853
+
854
+ def _check_performance_error(self, error_lower: str) -> Issue | None:
855
+ """Check for performance errors and create issue if found."""
856
+ if "refurb" in error_lower:
857
+ return Issue(
858
+ id="refurb_quality_issue",
859
+ type=IssueType.PERFORMANCE,
860
+ severity=Priority.MEDIUM,
861
+ message="Code quality issues detected by refurb",
862
+ stage="comprehensive",
863
+ )
864
+ return None
865
+
866
+ def _check_dead_code_error(self, error_lower: str) -> Issue | None:
867
+ """Check for dead code errors and create issue if found."""
868
+ if "vulture" in error_lower:
869
+ return Issue(
870
+ id="vulture_dead_code",
871
+ type=IssueType.DEAD_CODE,
872
+ severity=Priority.MEDIUM,
873
+ message="Dead code detected by vulture",
874
+ stage="comprehensive",
875
+ )
876
+ return None
877
+
878
+ def _check_regex_validation_error(self, error_lower: str) -> Issue | None:
879
+ """Check for regex validation errors and create issue if found."""
880
+ regex_keywords = ("raw regex", "regex pattern", r"\g<", "replacement")
881
+ if "validate-regex-patterns" in error_lower or any(
882
+ keyword in error_lower for keyword in regex_keywords
883
+ ):
884
+ return Issue(
885
+ id="regex_validation_failure",
886
+ type=IssueType.REGEX_VALIDATION,
887
+ severity=Priority.HIGH,
888
+ message="Unsafe regex patterns detected by validate-regex-patterns",
889
+ stage="fast",
890
+ )
891
+ return None
892
+
893
+ def _create_fast_hook_issue(self) -> Issue:
894
+ """Create an issue for fast hook errors."""
895
+ return Issue(
896
+ id="fast_hooks_formatting",
897
+ type=IssueType.FORMATTING,
898
+ severity=Priority.LOW,
899
+ message="Code formatting issues detected",
900
+ stage="fast",
901
+ )
902
+
812
903
  def _parse_issues_for_agents(self, issue_strings: list[str]) -> list[Issue]:
813
- """Parse string issues into structured Issue objects for AI agents."""
814
904
  issues: list[Issue] = []
815
905
 
816
906
  for i, issue_str in enumerate(issue_strings):
817
- # Determine issue type from content patterns
818
- issue_type = IssueType.FORMATTING
819
- priority = Priority.MEDIUM
820
-
821
- if any(
822
- keyword in issue_str.lower()
823
- for keyword in ("type", "annotation", "pyright")
824
- ):
825
- issue_type = IssueType.TYPE_ERROR
826
- priority = Priority.HIGH
827
- elif any(
828
- keyword in issue_str.lower()
829
- for keyword in ("security", "bandit", "hardcoded")
830
- ):
831
- issue_type = IssueType.SECURITY
832
- priority = Priority.HIGH
833
- elif any(
834
- keyword in issue_str.lower() for keyword in ("complexity", "complexipy")
835
- ):
836
- issue_type = IssueType.COMPLEXITY
837
- priority = Priority.HIGH
838
- elif any(
839
- keyword in issue_str.lower()
840
- for keyword in ("unused", "dead", "vulture")
841
- ):
842
- issue_type = IssueType.DEAD_CODE
843
- priority = Priority.MEDIUM
844
- elif any(
845
- keyword in issue_str.lower()
846
- for keyword in ("performance", "refurb", "furb")
847
- ):
848
- issue_type = IssueType.PERFORMANCE
849
- priority = Priority.MEDIUM
850
- elif any(
851
- keyword in issue_str.lower() for keyword in ("import", "creosote")
852
- ):
853
- issue_type = IssueType.IMPORT_ERROR
854
- priority = Priority.MEDIUM
907
+ issue_type, priority = self._classify_issue(issue_str)
855
908
 
856
909
  issue = Issue(
857
910
  id=f"parsed_issue_{i}",
@@ -864,10 +917,80 @@ class WorkflowPipeline:
864
917
 
865
918
  return issues
866
919
 
920
+ def _classify_issue(self, issue_str: str) -> tuple[IssueType, Priority]:
921
+ """Classify an issue string to determine its type and priority."""
922
+ issue_lower = issue_str.lower()
923
+
924
+ # Check high-priority issues first
925
+ if self._is_type_error(issue_lower):
926
+ return IssueType.TYPE_ERROR, Priority.HIGH
927
+ if self._is_security_issue(issue_lower):
928
+ return IssueType.SECURITY, Priority.HIGH
929
+ if self._is_complexity_issue(issue_lower):
930
+ return IssueType.COMPLEXITY, Priority.HIGH
931
+ if self._is_regex_validation_issue(issue_lower):
932
+ return IssueType.REGEX_VALIDATION, Priority.HIGH
933
+
934
+ # Check medium-priority issues
935
+ if self._is_dead_code_issue(issue_lower):
936
+ return IssueType.DEAD_CODE, Priority.MEDIUM
937
+ if self._is_performance_issue(issue_lower):
938
+ return IssueType.PERFORMANCE, Priority.MEDIUM
939
+ if self._is_import_error(issue_lower):
940
+ return IssueType.IMPORT_ERROR, Priority.MEDIUM
941
+
942
+ # Default to formatting issue
943
+ return IssueType.FORMATTING, Priority.MEDIUM
944
+
945
+ def _is_type_error(self, issue_lower: str) -> bool:
946
+ """Check if issue is related to type errors."""
947
+ return any(
948
+ keyword in issue_lower for keyword in ("type", "annotation", "pyright")
949
+ )
950
+
951
+ def _is_security_issue(self, issue_lower: str) -> bool:
952
+ """Check if issue is related to security."""
953
+ return any(
954
+ keyword in issue_lower for keyword in ("security", "bandit", "hardcoded")
955
+ )
956
+
957
+ def _is_complexity_issue(self, issue_lower: str) -> bool:
958
+ """Check if issue is related to code complexity."""
959
+ return any(
960
+ keyword in issue_lower
961
+ for keyword in ("complexity", "complexipy", "c901", "too complex")
962
+ )
963
+
964
+ def _is_regex_validation_issue(self, issue_lower: str) -> bool:
965
+ """Check if issue is related to regex validation."""
966
+ return any(
967
+ keyword in issue_lower
968
+ for keyword in (
969
+ "regex",
970
+ "pattern",
971
+ "validate-regex-patterns",
972
+ r"\g<",
973
+ "replacement",
974
+ )
975
+ )
976
+
977
+ def _is_dead_code_issue(self, issue_lower: str) -> bool:
978
+ """Check if issue is related to dead code."""
979
+ return any(keyword in issue_lower for keyword in ("unused", "dead", "vulture"))
980
+
981
+ def _is_performance_issue(self, issue_lower: str) -> bool:
982
+ """Check if issue is related to performance."""
983
+ return any(
984
+ keyword in issue_lower for keyword in ("performance", "refurb", "furb")
985
+ )
986
+
987
+ def _is_import_error(self, issue_lower: str) -> bool:
988
+ """Check if issue is related to import errors."""
989
+ return any(keyword in issue_lower for keyword in ("import", "creosote"))
990
+
867
991
  def _log_failure_counts_if_debugging(
868
992
  self, test_count: int, hook_count: int
869
993
  ) -> None:
870
- """Log failure counts if debugging is enabled."""
871
994
  if self._should_debug():
872
995
  self.debugger.log_test_failures(test_count)
873
996
  self.debugger.log_hook_failures(hook_count)
@@ -881,14 +1004,17 @@ class WorkflowOrchestrator:
881
1004
  dry_run: bool = False,
882
1005
  web_job_id: str | None = None,
883
1006
  verbose: bool = False,
1007
+ debug: bool = False,
884
1008
  ) -> None:
885
1009
  self.console = console or Console(force_terminal=True)
886
1010
  self.pkg_path = pkg_path or Path.cwd()
887
1011
  self.dry_run = dry_run
888
1012
  self.web_job_id = web_job_id
889
1013
  self.verbose = verbose
1014
+ self.debug = debug
890
1015
 
891
1016
  from crackerjack.models.protocols import (
1017
+ ConfigMergeServiceProtocol,
892
1018
  FileSystemInterface,
893
1019
  GitInterface,
894
1020
  HookManager,
@@ -896,9 +1022,14 @@ class WorkflowOrchestrator:
896
1022
  TestManagerProtocol,
897
1023
  )
898
1024
 
899
- from .container import create_container
1025
+ # Initialize logging first so container creation respects log levels
1026
+ self._initialize_logging()
1027
+
1028
+ self.logger = get_logger("crackerjack.orchestrator")
1029
+
1030
+ from .enhanced_container import create_enhanced_container
900
1031
 
901
- self.container = create_container(
1032
+ self.container = create_enhanced_container(
902
1033
  console=self.console,
903
1034
  pkg_path=self.pkg_path,
904
1035
  dry_run=self.dry_run,
@@ -915,6 +1046,7 @@ class WorkflowOrchestrator:
915
1046
  hook_manager=self.container.get(HookManager),
916
1047
  test_manager=self.container.get(TestManagerProtocol),
917
1048
  publish_manager=self.container.get(PublishManager),
1049
+ config_merge_service=self.container.get(ConfigMergeServiceProtocol),
918
1050
  )
919
1051
 
920
1052
  self.pipeline = WorkflowPipeline(
@@ -924,10 +1056,6 @@ class WorkflowOrchestrator:
924
1056
  phases=self.phases,
925
1057
  )
926
1058
 
927
- self.logger = get_logger("crackerjack.orchestrator")
928
-
929
- self._initialize_logging()
930
-
931
1059
  def _initialize_logging(self) -> None:
932
1060
  from crackerjack.services.log_manager import get_log_manager
933
1061
 
@@ -935,9 +1063,15 @@ class WorkflowOrchestrator:
935
1063
  session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
936
1064
  debug_log_file = log_manager.create_debug_log_file(session_id)
937
1065
 
938
- setup_structured_logging(log_file=debug_log_file)
1066
+ # Set log level based on verbosity - DEBUG only in verbose or debug mode
1067
+ log_level = "DEBUG" if (self.verbose or self.debug) else "INFO"
1068
+ setup_structured_logging(
1069
+ level=log_level, json_output=False, log_file=debug_log_file
1070
+ )
939
1071
 
940
- self.logger.info(
1072
+ # Use a temporary logger for the initialization message
1073
+ temp_logger = get_logger("crackerjack.orchestrator.init")
1074
+ temp_logger.debug(
941
1075
  "Structured logging initialized",
942
1076
  log_file=str(debug_log_file),
943
1077
  log_directory=str(log_manager.log_dir),