crackerjack 0.31.9__py3-none-any.whl → 0.31.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (155) hide show
  1. crackerjack/CLAUDE.md +288 -705
  2. crackerjack/__main__.py +22 -8
  3. crackerjack/agents/__init__.py +0 -3
  4. crackerjack/agents/architect_agent.py +0 -43
  5. crackerjack/agents/base.py +1 -9
  6. crackerjack/agents/coordinator.py +2 -148
  7. crackerjack/agents/documentation_agent.py +109 -81
  8. crackerjack/agents/dry_agent.py +122 -97
  9. crackerjack/agents/formatting_agent.py +3 -16
  10. crackerjack/agents/import_optimization_agent.py +1174 -130
  11. crackerjack/agents/performance_agent.py +956 -188
  12. crackerjack/agents/performance_helpers.py +229 -0
  13. crackerjack/agents/proactive_agent.py +1 -48
  14. crackerjack/agents/refactoring_agent.py +516 -246
  15. crackerjack/agents/refactoring_helpers.py +282 -0
  16. crackerjack/agents/security_agent.py +393 -90
  17. crackerjack/agents/test_creation_agent.py +1776 -120
  18. crackerjack/agents/test_specialist_agent.py +59 -15
  19. crackerjack/agents/tracker.py +0 -102
  20. crackerjack/api.py +145 -37
  21. crackerjack/cli/handlers.py +48 -30
  22. crackerjack/cli/interactive.py +11 -11
  23. crackerjack/cli/options.py +66 -4
  24. crackerjack/code_cleaner.py +808 -148
  25. crackerjack/config/global_lock_config.py +110 -0
  26. crackerjack/config/hooks.py +43 -64
  27. crackerjack/core/async_workflow_orchestrator.py +247 -97
  28. crackerjack/core/autofix_coordinator.py +192 -109
  29. crackerjack/core/enhanced_container.py +46 -63
  30. crackerjack/core/file_lifecycle.py +549 -0
  31. crackerjack/core/performance.py +9 -8
  32. crackerjack/core/performance_monitor.py +395 -0
  33. crackerjack/core/phase_coordinator.py +282 -95
  34. crackerjack/core/proactive_workflow.py +9 -58
  35. crackerjack/core/resource_manager.py +501 -0
  36. crackerjack/core/service_watchdog.py +490 -0
  37. crackerjack/core/session_coordinator.py +4 -8
  38. crackerjack/core/timeout_manager.py +504 -0
  39. crackerjack/core/websocket_lifecycle.py +475 -0
  40. crackerjack/core/workflow_orchestrator.py +355 -204
  41. crackerjack/dynamic_config.py +47 -6
  42. crackerjack/errors.py +3 -4
  43. crackerjack/executors/async_hook_executor.py +63 -13
  44. crackerjack/executors/cached_hook_executor.py +14 -14
  45. crackerjack/executors/hook_executor.py +100 -37
  46. crackerjack/executors/hook_lock_manager.py +856 -0
  47. crackerjack/executors/individual_hook_executor.py +120 -86
  48. crackerjack/intelligence/__init__.py +0 -7
  49. crackerjack/intelligence/adaptive_learning.py +13 -86
  50. crackerjack/intelligence/agent_orchestrator.py +15 -78
  51. crackerjack/intelligence/agent_registry.py +12 -59
  52. crackerjack/intelligence/agent_selector.py +31 -92
  53. crackerjack/intelligence/integration.py +1 -41
  54. crackerjack/interactive.py +9 -9
  55. crackerjack/managers/async_hook_manager.py +25 -8
  56. crackerjack/managers/hook_manager.py +9 -9
  57. crackerjack/managers/publish_manager.py +57 -59
  58. crackerjack/managers/test_command_builder.py +6 -36
  59. crackerjack/managers/test_executor.py +9 -61
  60. crackerjack/managers/test_manager.py +52 -62
  61. crackerjack/managers/test_manager_backup.py +77 -127
  62. crackerjack/managers/test_progress.py +4 -23
  63. crackerjack/mcp/cache.py +5 -12
  64. crackerjack/mcp/client_runner.py +10 -10
  65. crackerjack/mcp/context.py +64 -6
  66. crackerjack/mcp/dashboard.py +14 -11
  67. crackerjack/mcp/enhanced_progress_monitor.py +55 -55
  68. crackerjack/mcp/file_monitor.py +72 -42
  69. crackerjack/mcp/progress_components.py +103 -84
  70. crackerjack/mcp/progress_monitor.py +122 -49
  71. crackerjack/mcp/rate_limiter.py +12 -12
  72. crackerjack/mcp/server_core.py +16 -22
  73. crackerjack/mcp/service_watchdog.py +26 -26
  74. crackerjack/mcp/state.py +15 -0
  75. crackerjack/mcp/tools/core_tools.py +95 -39
  76. crackerjack/mcp/tools/error_analyzer.py +6 -32
  77. crackerjack/mcp/tools/execution_tools.py +1 -56
  78. crackerjack/mcp/tools/execution_tools_backup.py +35 -131
  79. crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
  80. crackerjack/mcp/tools/intelligence_tools.py +2 -55
  81. crackerjack/mcp/tools/monitoring_tools.py +308 -145
  82. crackerjack/mcp/tools/proactive_tools.py +12 -42
  83. crackerjack/mcp/tools/progress_tools.py +23 -15
  84. crackerjack/mcp/tools/utility_tools.py +3 -40
  85. crackerjack/mcp/tools/workflow_executor.py +40 -60
  86. crackerjack/mcp/websocket/app.py +0 -3
  87. crackerjack/mcp/websocket/endpoints.py +206 -268
  88. crackerjack/mcp/websocket/jobs.py +213 -66
  89. crackerjack/mcp/websocket/server.py +84 -6
  90. crackerjack/mcp/websocket/websocket_handler.py +137 -29
  91. crackerjack/models/config_adapter.py +3 -16
  92. crackerjack/models/protocols.py +162 -3
  93. crackerjack/models/resource_protocols.py +454 -0
  94. crackerjack/models/task.py +3 -3
  95. crackerjack/monitoring/__init__.py +0 -0
  96. crackerjack/monitoring/ai_agent_watchdog.py +25 -71
  97. crackerjack/monitoring/regression_prevention.py +28 -87
  98. crackerjack/orchestration/advanced_orchestrator.py +44 -78
  99. crackerjack/orchestration/coverage_improvement.py +10 -60
  100. crackerjack/orchestration/execution_strategies.py +16 -16
  101. crackerjack/orchestration/test_progress_streamer.py +61 -53
  102. crackerjack/plugins/base.py +1 -1
  103. crackerjack/plugins/managers.py +22 -20
  104. crackerjack/py313.py +65 -21
  105. crackerjack/services/backup_service.py +467 -0
  106. crackerjack/services/bounded_status_operations.py +627 -0
  107. crackerjack/services/cache.py +7 -9
  108. crackerjack/services/config.py +35 -52
  109. crackerjack/services/config_integrity.py +5 -16
  110. crackerjack/services/config_merge.py +542 -0
  111. crackerjack/services/contextual_ai_assistant.py +17 -19
  112. crackerjack/services/coverage_ratchet.py +51 -76
  113. crackerjack/services/debug.py +25 -39
  114. crackerjack/services/dependency_monitor.py +52 -50
  115. crackerjack/services/enhanced_filesystem.py +14 -11
  116. crackerjack/services/file_hasher.py +1 -1
  117. crackerjack/services/filesystem.py +1 -12
  118. crackerjack/services/git.py +78 -44
  119. crackerjack/services/health_metrics.py +31 -27
  120. crackerjack/services/initialization.py +281 -433
  121. crackerjack/services/input_validator.py +760 -0
  122. crackerjack/services/log_manager.py +16 -16
  123. crackerjack/services/logging.py +7 -6
  124. crackerjack/services/metrics.py +43 -43
  125. crackerjack/services/pattern_cache.py +2 -31
  126. crackerjack/services/pattern_detector.py +26 -63
  127. crackerjack/services/performance_benchmarks.py +20 -45
  128. crackerjack/services/regex_patterns.py +2887 -0
  129. crackerjack/services/regex_utils.py +537 -0
  130. crackerjack/services/secure_path_utils.py +683 -0
  131. crackerjack/services/secure_status_formatter.py +534 -0
  132. crackerjack/services/secure_subprocess.py +605 -0
  133. crackerjack/services/security.py +47 -10
  134. crackerjack/services/security_logger.py +492 -0
  135. crackerjack/services/server_manager.py +109 -50
  136. crackerjack/services/smart_scheduling.py +8 -25
  137. crackerjack/services/status_authentication.py +603 -0
  138. crackerjack/services/status_security_manager.py +442 -0
  139. crackerjack/services/thread_safe_status_collector.py +546 -0
  140. crackerjack/services/tool_version_service.py +1 -23
  141. crackerjack/services/unified_config.py +36 -58
  142. crackerjack/services/validation_rate_limiter.py +269 -0
  143. crackerjack/services/version_checker.py +9 -40
  144. crackerjack/services/websocket_resource_limiter.py +572 -0
  145. crackerjack/slash_commands/__init__.py +52 -2
  146. crackerjack/tools/__init__.py +0 -0
  147. crackerjack/tools/validate_input_validator_patterns.py +262 -0
  148. crackerjack/tools/validate_regex_patterns.py +198 -0
  149. {crackerjack-0.31.9.dist-info → crackerjack-0.31.12.dist-info}/METADATA +197 -12
  150. crackerjack-0.31.12.dist-info/RECORD +178 -0
  151. crackerjack/cli/facade.py +0 -104
  152. crackerjack-0.31.9.dist-info/RECORD +0 -149
  153. {crackerjack-0.31.9.dist-info → crackerjack-0.31.12.dist-info}/WHEEL +0 -0
  154. {crackerjack-0.31.9.dist-info → crackerjack-0.31.12.dist-info}/entry_points.txt +0 -0
  155. {crackerjack-0.31.9.dist-info → crackerjack-0.31.12.dist-info}/licenses/LICENSE +0 -0
@@ -58,7 +58,7 @@ class WorkflowPipeline:
58
58
  async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
59
59
  with LoggingContext(
60
60
  "workflow_execution",
61
- testing=getattr(options, "testing", False),
61
+ testing=getattr(options, "test", False),
62
62
  skip_hooks=getattr(options, "skip_hooks", False),
63
63
  ):
64
64
  start_time = time.time()
@@ -78,7 +78,6 @@ class WorkflowPipeline:
78
78
  self.session.cleanup_resources()
79
79
 
80
80
  def _initialize_workflow_session(self, options: OptionsProtocol) -> None:
81
- """Initialize session tracking and debug logging for workflow execution."""
82
81
  self.session.initialize_session_tracking(options)
83
82
  self.session.track_task("workflow", "Complete crackerjack workflow")
84
83
 
@@ -87,7 +86,6 @@ class WorkflowPipeline:
87
86
  self._log_workflow_startup_info(options)
88
87
 
89
88
  def _log_workflow_startup_debug(self, options: OptionsProtocol) -> None:
90
- """Log debug information for workflow startup."""
91
89
  if not self._should_debug():
92
90
  return
93
91
 
@@ -95,22 +93,20 @@ class WorkflowPipeline:
95
93
  "workflow_execution",
96
94
  "started",
97
95
  details={
98
- "testing": getattr(options, "testing", False),
96
+ "testing": getattr(options, "test", False),
99
97
  "skip_hooks": getattr(options, "skip_hooks", False),
100
98
  "ai_agent": getattr(options, "ai_agent", False),
101
99
  },
102
100
  )
103
101
 
104
102
  def _configure_session_cleanup(self, options: OptionsProtocol) -> None:
105
- """Configure session cleanup settings if specified."""
106
103
  if hasattr(options, "cleanup"):
107
104
  self.session.set_cleanup_config(options.cleanup)
108
105
 
109
106
  def _log_workflow_startup_info(self, options: OptionsProtocol) -> None:
110
- """Log informational message about workflow startup."""
111
107
  self.logger.info(
112
108
  "Starting complete workflow execution",
113
- testing=getattr(options, "testing", False),
109
+ testing=getattr(options, "test", False),
114
110
  skip_hooks=getattr(options, "skip_hooks", False),
115
111
  package_path=str(self.pkg_path),
116
112
  )
@@ -118,7 +114,6 @@ class WorkflowPipeline:
118
114
  async def _execute_workflow_with_timing(
119
115
  self, options: OptionsProtocol, start_time: float
120
116
  ) -> bool:
121
- """Execute workflow phases and handle success/completion logging."""
122
117
  success = await self._execute_workflow_phases(options)
123
118
  self.session.finalize_session(start_time, success)
124
119
 
@@ -129,7 +124,6 @@ class WorkflowPipeline:
129
124
  return success
130
125
 
131
126
  def _log_workflow_completion(self, success: bool, duration: float) -> None:
132
- """Log workflow completion information."""
133
127
  self.logger.info(
134
128
  "Workflow execution completed",
135
129
  success=success,
@@ -137,7 +131,6 @@ class WorkflowPipeline:
137
131
  )
138
132
 
139
133
  def _log_workflow_completion_debug(self, success: bool, duration: float) -> None:
140
- """Log debug information for workflow completion."""
141
134
  if not self._should_debug():
142
135
  return
143
136
 
@@ -152,14 +145,12 @@ class WorkflowPipeline:
152
145
  self.debugger.print_debug_summary()
153
146
 
154
147
  def _handle_user_interruption(self) -> bool:
155
- """Handle KeyboardInterrupt gracefully."""
156
148
  self.console.print("Interrupted by user")
157
149
  self.session.fail_task("workflow", "Interrupted by user")
158
150
  self.logger.warning("Workflow interrupted by user")
159
151
  return False
160
152
 
161
153
  def _handle_workflow_exception(self, error: Exception) -> bool:
162
- """Handle unexpected workflow exceptions."""
163
154
  self.console.print(f"Error: {error}")
164
155
  self.session.fail_task("workflow", f"Unexpected error: {error}")
165
156
  self.logger.exception(
@@ -172,10 +163,9 @@ class WorkflowPipeline:
172
163
  async def _execute_workflow_phases(self, options: OptionsProtocol) -> bool:
173
164
  success = True
174
165
  self.phases.run_configuration_phase(options)
175
- if not self.phases.run_cleaning_phase(options):
176
- success = False
177
- self.session.fail_task("workflow", "Cleaning phase failed")
178
- return False
166
+
167
+ # Code cleaning is now integrated into the quality phase
168
+ # to run after fast hooks but before comprehensive hooks
179
169
  if not await self._execute_quality_phase(options):
180
170
  success = False
181
171
  return False
@@ -193,7 +183,7 @@ class WorkflowPipeline:
193
183
  return self._run_fast_hooks_phase(options)
194
184
  if hasattr(options, "comp") and options.comp:
195
185
  return self._run_comprehensive_hooks_phase(options)
196
- if options.test:
186
+ if getattr(options, "test", False):
197
187
  return await self._execute_test_workflow(options)
198
188
  return self._execute_standard_hooks_workflow(options)
199
189
 
@@ -203,6 +193,15 @@ class WorkflowPipeline:
203
193
  if not self._run_initial_fast_hooks(options, iteration):
204
194
  return False
205
195
 
196
+ # Run code cleaning after fast hooks but before comprehensive hooks
197
+ if getattr(options, "clean", False):
198
+ if not self._run_code_cleaning_phase(options):
199
+ return False
200
+ # Run fast hooks again after cleaning for sanity check
201
+ if not self._run_post_cleaning_fast_hooks(options):
202
+ return False
203
+ self._mark_code_cleaning_complete()
204
+
206
205
  testing_passed, comprehensive_passed = self._run_main_quality_phases(options)
207
206
 
208
207
  if options.ai_agent:
@@ -215,23 +214,20 @@ class WorkflowPipeline:
215
214
  )
216
215
 
217
216
  def _start_iteration_tracking(self, options: OptionsProtocol) -> int:
218
- """Start iteration tracking for AI agent mode."""
219
217
  iteration = 1
220
218
  if options.ai_agent and self._should_debug():
221
219
  self.debugger.log_iteration_start(iteration)
222
220
  return iteration
223
221
 
224
222
  def _run_initial_fast_hooks(self, options: OptionsProtocol, iteration: int) -> bool:
225
- """Run initial fast hooks phase and handle failure."""
226
223
  fast_hooks_passed = self._run_fast_hooks_phase(options)
227
224
  if not fast_hooks_passed:
228
225
  if options.ai_agent and self._should_debug():
229
226
  self.debugger.log_iteration_end(iteration, False)
230
- return False # Fast hooks must pass before proceeding
227
+ return False
231
228
  return True
232
229
 
233
230
  def _run_main_quality_phases(self, options: OptionsProtocol) -> tuple[bool, bool]:
234
- """Run tests and comprehensive hooks to collect ALL issues."""
235
231
  testing_passed = self._run_testing_phase(options)
236
232
  comprehensive_passed = self._run_comprehensive_hooks_phase(options)
237
233
  return testing_passed, comprehensive_passed
@@ -243,7 +239,6 @@ class WorkflowPipeline:
243
239
  testing_passed: bool,
244
240
  comprehensive_passed: bool,
245
241
  ) -> bool:
246
- """Handle AI agent workflow with failure collection and fixing."""
247
242
  if not testing_passed or not comprehensive_passed:
248
243
  success = await self._run_ai_agent_fixing_phase(options)
249
244
  if self._should_debug():
@@ -252,7 +247,7 @@ class WorkflowPipeline:
252
247
 
253
248
  if self._should_debug():
254
249
  self.debugger.log_iteration_end(iteration, True)
255
- return True # All phases passed, no fixes needed
250
+ return True
256
251
 
257
252
  def _handle_standard_workflow(
258
253
  self,
@@ -261,8 +256,21 @@ class WorkflowPipeline:
261
256
  testing_passed: bool,
262
257
  comprehensive_passed: bool,
263
258
  ) -> bool:
264
- """Handle standard workflow where all phases must pass."""
265
259
  success = testing_passed and comprehensive_passed
260
+
261
+ if not success and getattr(options, "verbose", False):
262
+ self.console.print(
263
+ f"[yellow]⚠️ Workflow stopped-testing_passed: {testing_passed}, comprehensive_passed: {comprehensive_passed}[/ yellow]"
264
+ )
265
+ if not testing_passed:
266
+ self.console.print(
267
+ "[yellow] → Tests reported failure despite appearing successful[/ yellow]"
268
+ )
269
+ if not comprehensive_passed:
270
+ self.console.print(
271
+ "[yellow] → Comprehensive hooks reported failure despite appearing successful[/ yellow]"
272
+ )
273
+
266
274
  if options.ai_agent and self._should_debug():
267
275
  self.debugger.log_iteration_end(iteration, success)
268
276
  return success
@@ -286,8 +294,7 @@ class WorkflowPipeline:
286
294
  self.session.fail_task("workflow", "Testing failed")
287
295
  self._handle_test_failures()
288
296
  self._update_mcp_status("tests", "failed")
289
- # In AI agent mode, continue to collect more failures
290
- # In non-AI mode, this will be handled by caller
297
+
291
298
  else:
292
299
  self._update_mcp_status("tests", "completed")
293
300
 
@@ -300,8 +307,7 @@ class WorkflowPipeline:
300
307
  if not success:
301
308
  self.session.fail_task("comprehensive_hooks", "Comprehensive hooks failed")
302
309
  self._update_mcp_status("comprehensive", "failed")
303
- # In AI agent mode, continue to collect more failures
304
- # In non-AI mode, this will be handled by caller
310
+
305
311
  else:
306
312
  self._update_mcp_status("comprehensive", "completed")
307
313
 
@@ -311,7 +317,41 @@ class WorkflowPipeline:
311
317
  if hasattr(self, "_mcp_state_manager") and self._mcp_state_manager:
312
318
  self._mcp_state_manager.update_stage_status(stage, status)
313
319
 
314
- self.session.update_stage(stage, status)
320
+ def _run_code_cleaning_phase(self, options: OptionsProtocol) -> bool:
321
+ """Run code cleaning phase after fast hooks but before comprehensive hooks."""
322
+ self.console.print("\n[bold blue]🧹 Running Code Cleaning Phase...[/bold blue]")
323
+
324
+ success = self.phases.run_cleaning_phase(options)
325
+ if success:
326
+ self.console.print("[green]✅ Code cleaning completed successfully[/green]")
327
+ else:
328
+ self.console.print("[red]❌ Code cleaning failed[/red]")
329
+ self.session.fail_task("workflow", "Code cleaning phase failed")
330
+
331
+ return success
332
+
333
+ def _run_post_cleaning_fast_hooks(self, options: OptionsProtocol) -> bool:
334
+ """Run fast hooks again after code cleaning for sanity check."""
335
+ self.console.print(
336
+ "\n[bold cyan]🔍 Running Post-Cleaning Fast Hooks Sanity Check...[/bold cyan]"
337
+ )
338
+
339
+ success = self._run_fast_hooks_phase(options)
340
+ if success:
341
+ self.console.print("[green]✅ Post-cleaning sanity check passed[/green]")
342
+ else:
343
+ self.console.print("[red]❌ Post-cleaning sanity check failed[/red]")
344
+ self.session.fail_task("workflow", "Post-cleaning fast hooks failed")
345
+
346
+ return success
347
+
348
+ def _has_code_cleaning_run(self) -> bool:
349
+ """Check if code cleaning has already run in this workflow."""
350
+ return getattr(self, "_code_cleaning_complete", False)
351
+
352
+ def _mark_code_cleaning_complete(self) -> None:
353
+ """Mark code cleaning as complete for this workflow."""
354
+ self._code_cleaning_complete = True
315
355
 
316
356
  def _handle_test_failures(self) -> None:
317
357
  if not (hasattr(self, "_mcp_state_manager") and self._mcp_state_manager):
@@ -323,7 +363,6 @@ class WorkflowPipeline:
323
363
 
324
364
  failures = test_manager.get_test_failures()
325
365
 
326
- # Log test failure count for debugging
327
366
  if self._should_debug():
328
367
  self.debugger.log_test_failures(len(failures))
329
368
 
@@ -334,7 +373,7 @@ class WorkflowPipeline:
334
373
  id=f"test_failure_{i}",
335
374
  type="test_failure",
336
375
  message=failure.strip(),
337
- file_path="tests/",
376
+ file_path="tests /",
338
377
  priority=Priority.HIGH,
339
378
  stage="tests",
340
379
  auto_fixable=False,
@@ -342,22 +381,39 @@ class WorkflowPipeline:
342
381
  self._mcp_state_manager.add_issue(issue)
343
382
 
344
383
  def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
345
- """Execute standard hooks workflow with proper state management."""
346
384
  self._update_hooks_status_running()
347
385
 
348
- hooks_success = self.phases.run_hooks_phase(options)
386
+ # Run fast hooks first
387
+ fast_hooks_success = self._run_fast_hooks_phase(options)
388
+ if not fast_hooks_success:
389
+ self._handle_hooks_completion(False)
390
+ return False
391
+
392
+ # Run code cleaning after fast hooks but before comprehensive hooks
393
+ if getattr(options, "clean", False):
394
+ if not self._run_code_cleaning_phase(options):
395
+ self._handle_hooks_completion(False)
396
+ return False
397
+ # Run fast hooks again after cleaning for sanity check
398
+ if not self._run_post_cleaning_fast_hooks(options):
399
+ self._handle_hooks_completion(False)
400
+ return False
401
+ self._mark_code_cleaning_complete()
402
+
403
+ # Run comprehensive hooks
404
+ comprehensive_success = self._run_comprehensive_hooks_phase(options)
405
+
406
+ hooks_success = fast_hooks_success and comprehensive_success
349
407
  self._handle_hooks_completion(hooks_success)
350
408
 
351
409
  return hooks_success
352
410
 
353
411
  def _update_hooks_status_running(self) -> None:
354
- """Update MCP state to running for hook phases."""
355
412
  if self._has_mcp_state_manager():
356
413
  self._mcp_state_manager.update_stage_status("fast", "running")
357
414
  self._mcp_state_manager.update_stage_status("comprehensive", "running")
358
415
 
359
416
  def _handle_hooks_completion(self, hooks_success: bool) -> None:
360
- """Handle hooks completion with appropriate status updates."""
361
417
  if not hooks_success:
362
418
  self.session.fail_task("workflow", "Hooks failed")
363
419
  self._update_hooks_status_failed()
@@ -365,28 +421,35 @@ class WorkflowPipeline:
365
421
  self._update_hooks_status_completed()
366
422
 
367
423
  def _has_mcp_state_manager(self) -> bool:
368
- """Check if MCP state manager is available."""
369
424
  return hasattr(self, "_mcp_state_manager") and self._mcp_state_manager
370
425
 
371
426
  def _update_hooks_status_failed(self) -> None:
372
- """Update MCP state to failed for hook phases."""
373
427
  if self._has_mcp_state_manager():
374
428
  self._mcp_state_manager.update_stage_status("fast", "failed")
375
429
  self._mcp_state_manager.update_stage_status("comprehensive", "failed")
376
430
 
377
431
  def _update_hooks_status_completed(self) -> None:
378
- """Update MCP state to completed for hook phases."""
379
432
  if self._has_mcp_state_manager():
380
433
  self._mcp_state_manager.update_stage_status("fast", "completed")
381
434
  self._mcp_state_manager.update_stage_status("comprehensive", "completed")
382
435
 
383
436
  async def _run_ai_agent_fixing_phase(self, options: OptionsProtocol) -> bool:
384
- """Run AI agent fixing phase to analyze and fix collected failures."""
385
437
  self._update_mcp_status("ai_fixing", "running")
386
438
  self.logger.info("Starting AI agent fixing phase")
387
439
  self._log_debug_phase_start()
388
440
 
389
441
  try:
442
+ # If code cleaning is enabled and hasn't run yet, run it first
443
+ # to provide cleaner, more standardized code for the AI agents
444
+ if getattr(options, "clean", False) and not self._has_code_cleaning_run():
445
+ self.console.print(
446
+ "\n[bold yellow]🤖 AI agents recommend running code cleaning first for better results...[/bold yellow]"
447
+ )
448
+ if self._run_code_cleaning_phase(options):
449
+ # Run fast hooks sanity check after cleaning
450
+ self._run_post_cleaning_fast_hooks(options)
451
+ self._mark_code_cleaning_complete()
452
+
390
453
  agent_coordinator = self._setup_agent_coordinator()
391
454
  issues = await self._collect_issues_from_failures()
392
455
 
@@ -402,7 +465,6 @@ class WorkflowPipeline:
402
465
  return self._handle_fixing_phase_error(e)
403
466
 
404
467
  def _log_debug_phase_start(self) -> None:
405
- """Log debug information for phase start."""
406
468
  if self._should_debug():
407
469
  self.debugger.log_workflow_phase(
408
470
  "ai_agent_fixing",
@@ -411,7 +473,6 @@ class WorkflowPipeline:
411
473
  )
412
474
 
413
475
  def _setup_agent_coordinator(self) -> AgentCoordinator:
414
- """Set up agent coordinator with proper context."""
415
476
  from crackerjack.agents.coordinator import AgentCoordinator
416
477
 
417
478
  agent_context = AgentContext(
@@ -424,7 +485,6 @@ class WorkflowPipeline:
424
485
  return agent_coordinator
425
486
 
426
487
  def _handle_no_issues_found(self) -> bool:
427
- """Handle case when no issues are collected."""
428
488
  self.logger.info("No issues collected for AI agent fixing")
429
489
  self._update_mcp_status("ai_fixing", "completed")
430
490
  return True
@@ -432,7 +492,6 @@ class WorkflowPipeline:
432
492
  async def _process_fix_results(
433
493
  self, options: OptionsProtocol, fix_result: t.Any
434
494
  ) -> bool:
435
- """Process fix results and verify success."""
436
495
  verification_success = await self._verify_fixes_applied(options, fix_result)
437
496
  success = fix_result.success and verification_success
438
497
 
@@ -445,7 +504,6 @@ class WorkflowPipeline:
445
504
  return success
446
505
 
447
506
  def _handle_successful_fixes(self, fix_result: t.Any) -> None:
448
- """Handle successful fix results."""
449
507
  self.logger.info(
450
508
  "AI agents successfully fixed all issues and verification passed"
451
509
  )
@@ -455,10 +513,9 @@ class WorkflowPipeline:
455
513
  def _handle_failed_fixes(
456
514
  self, fix_result: t.Any, verification_success: bool
457
515
  ) -> None:
458
- """Handle failed fix results."""
459
516
  if not verification_success:
460
517
  self.logger.warning(
461
- "AI agent fixes did not pass verification - issues still exist"
518
+ "AI agent fixes did not pass verification-issues still exist"
462
519
  )
463
520
  else:
464
521
  self.logger.warning(
@@ -467,7 +524,6 @@ class WorkflowPipeline:
467
524
  self._update_mcp_status("ai_fixing", "failed")
468
525
 
469
526
  def _log_fix_counts_if_debugging(self, fix_result: t.Any) -> None:
470
- """Log fix counts for debugging if debug mode is enabled."""
471
527
  if not self._should_debug():
472
528
  return
473
529
 
@@ -480,7 +536,6 @@ class WorkflowPipeline:
480
536
  self.debugger.log_hook_fixes(hook_fixes)
481
537
 
482
538
  def _log_debug_phase_completion(self, success: bool, fix_result: t.Any) -> None:
483
- """Log debug information for phase completion."""
484
539
  if self._should_debug():
485
540
  self.debugger.log_workflow_phase(
486
541
  "ai_agent_fixing",
@@ -493,7 +548,6 @@ class WorkflowPipeline:
493
548
  )
494
549
 
495
550
  def _handle_fixing_phase_error(self, error: Exception) -> bool:
496
- """Handle errors during the fixing phase."""
497
551
  self.logger.exception(f"AI agent fixing phase failed: {error}")
498
552
  self.session.fail_task("ai_fixing", f"AI agent fixing failed: {error}")
499
553
  self._update_mcp_status("ai_fixing", "failed")
@@ -510,53 +564,67 @@ class WorkflowPipeline:
510
564
  async def _verify_fixes_applied(
511
565
  self, options: OptionsProtocol, fix_result: t.Any
512
566
  ) -> bool:
513
- """Verify that AI agent fixes actually resolved the issues by re-running checks."""
514
567
  if not fix_result.fixes_applied:
515
- return True # No fixes were applied, nothing to verify
568
+ return True
516
569
 
517
570
  self.logger.info("Verifying AI agent fixes by re-running quality checks")
518
571
 
519
- # Re-run the phases that previously failed to verify fixes
520
572
  verification_success = True
521
573
 
522
- # Check if we need to re-run tests
523
- if any("test" in fix.lower() for fix in fix_result.fixes_applied):
524
- self.logger.info("Re-running tests to verify test fixes")
525
- test_success = self.phases.run_testing_phase(options)
526
- if not test_success:
527
- self.logger.warning(
528
- "Test verification failed - test fixes did not work"
529
- )
574
+ # Verify test fixes
575
+ if self._should_verify_test_fixes(fix_result.fixes_applied):
576
+ if not await self._verify_test_fixes(options):
577
+ verification_success = False
578
+
579
+ # Verify hook fixes
580
+ if self._should_verify_hook_fixes(fix_result.fixes_applied):
581
+ if not await self._verify_hook_fixes(options):
530
582
  verification_success = False
531
583
 
532
- # Check if we need to re-run comprehensive hooks
584
+ self._log_verification_result(verification_success)
585
+ return verification_success
586
+
587
+ def _should_verify_test_fixes(self, fixes_applied: list[str]) -> bool:
588
+ """Check if test fixes need verification."""
589
+ return any("test" in fix.lower() for fix in fixes_applied)
590
+
591
+ async def _verify_test_fixes(self, options: OptionsProtocol) -> bool:
592
+ """Verify test fixes by re-running tests."""
593
+ self.logger.info("Re-running tests to verify test fixes")
594
+ test_success = self.phases.run_testing_phase(options)
595
+ if not test_success:
596
+ self.logger.warning("Test verification failed-test fixes did not work")
597
+ return test_success
598
+
599
+ def _should_verify_hook_fixes(self, fixes_applied: list[str]) -> bool:
600
+ """Check if hook fixes need verification."""
533
601
  hook_fixes = [
534
602
  f
535
- for f in fix_result.fixes_applied
603
+ for f in fixes_applied
536
604
  if "hook" not in f.lower()
537
605
  or "complexity" in f.lower()
538
606
  or "type" in f.lower()
539
607
  ]
540
- if hook_fixes:
541
- self.logger.info("Re-running comprehensive hooks to verify hook fixes")
542
- hook_success = self.phases.run_comprehensive_hooks_only(options)
543
- if not hook_success:
544
- self.logger.warning(
545
- "Hook verification failed - hook fixes did not work"
546
- )
547
- verification_success = False
548
-
608
+ return bool(hook_fixes)
609
+
610
+ async def _verify_hook_fixes(self, options: OptionsProtocol) -> bool:
611
+ """Verify hook fixes by re-running comprehensive hooks."""
612
+ self.logger.info("Re-running comprehensive hooks to verify hook fixes")
613
+ hook_success = self.phases.run_comprehensive_hooks_only(options)
614
+ if not hook_success:
615
+ self.logger.warning("Hook verification failed-hook fixes did not work")
616
+ return hook_success
617
+
618
+ def _log_verification_result(self, verification_success: bool) -> None:
619
+ """Log the final verification result."""
549
620
  if verification_success:
550
621
  self.logger.info("All AI agent fixes verified successfully")
551
622
  else:
552
623
  self.logger.error(
553
- "Verification failed - some fixes did not resolve the issues"
624
+ "Verification failed-some fixes did not resolve the issues"
554
625
  )
555
626
 
556
- return verification_success
557
-
558
627
  async def _collect_issues_from_failures(self) -> list[Issue]:
559
- """Collect issues from test and comprehensive hook failures."""
560
628
  issues: list[Issue] = []
561
629
 
562
630
  test_issues, test_count = self._collect_test_failure_issues()
@@ -570,7 +638,6 @@ class WorkflowPipeline:
570
638
  return issues
571
639
 
572
640
  def _collect_test_failure_issues(self) -> tuple[list[Issue], int]:
573
- """Collect test failure issues and return count."""
574
641
  issues: list[Issue] = []
575
642
  test_count = 0
576
643
 
@@ -582,7 +649,7 @@ class WorkflowPipeline:
582
649
  test_count = len(test_failures)
583
650
  for i, failure in enumerate(
584
651
  test_failures[:20],
585
- ): # Limit to prevent overload
652
+ ):
586
653
  issue = Issue(
587
654
  id=f"test_failure_{i}",
588
655
  type=IssueType.TEST_FAILURE,
@@ -595,7 +662,6 @@ class WorkflowPipeline:
595
662
  return issues, test_count
596
663
 
597
664
  def _collect_hook_failure_issues(self) -> tuple[list[Issue], int]:
598
- """Collect hook failure issues and return count."""
599
665
  issues: list[Issue] = []
600
666
  hook_count = 0
601
667
 
@@ -608,7 +674,6 @@ class WorkflowPipeline:
608
674
  return issues, hook_count
609
675
 
610
676
  def _process_hook_results(self, hook_results: t.Any) -> tuple[list[Issue], int]:
611
- """Process hook results and extract issues."""
612
677
  issues: list[Issue] = []
613
678
  hook_count = 0
614
679
 
@@ -623,17 +688,14 @@ class WorkflowPipeline:
623
688
  return issues, hook_count
624
689
 
625
690
  def _is_hook_result_failed(self, result: t.Any) -> bool:
626
- """Check if hook result indicates failure."""
627
691
  return result.status in ("failed", "error", "timeout")
628
692
 
629
693
  def _extract_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
630
- """Extract issues from a single hook result."""
631
694
  if result.issues_found:
632
695
  return self._create_specific_issues_from_hook_result(result)
633
696
  return [self._create_generic_issue_from_hook_result(result)]
634
697
 
635
698
  def _create_specific_issues_from_hook_result(self, result: t.Any) -> list[Issue]:
636
- """Create specific issues from hook result with detailed information."""
637
699
  issues: list[Issue] = []
638
700
  hook_context = f"{result.name}: "
639
701
 
@@ -644,7 +706,6 @@ class WorkflowPipeline:
644
706
  return issues
645
707
 
646
708
  def _create_generic_issue_from_hook_result(self, result: t.Any) -> Issue:
647
- """Create a generic issue for hook failure without specific details."""
648
709
  issue_type = self._determine_hook_issue_type(result.name)
649
710
  return Issue(
650
711
  id=f"hook_failure_{result.name}",
@@ -655,13 +716,16 @@ class WorkflowPipeline:
655
716
  )
656
717
 
657
718
  def _determine_hook_issue_type(self, hook_name: str) -> IssueType:
658
- """Determine issue type based on hook name."""
659
719
  formatting_hooks = {
660
720
  "trailing-whitespace",
661
721
  "end-of-file-fixer",
662
722
  "ruff-format",
663
723
  "ruff-check",
664
724
  }
725
+
726
+ if hook_name == "validate-regex-patterns":
727
+ return IssueType.REGEX_VALIDATION
728
+
665
729
  return (
666
730
  IssueType.FORMATTING
667
731
  if hook_name in formatting_hooks
@@ -669,7 +733,6 @@ class WorkflowPipeline:
669
733
  )
670
734
 
671
735
  def _fallback_to_session_tracker(self) -> tuple[list[Issue], int]:
672
- """Fallback to session tracker if hook manager fails."""
673
736
  issues: list[Issue] = []
674
737
  hook_count = 0
675
738
 
@@ -685,14 +748,12 @@ class WorkflowPipeline:
685
748
  return issues, hook_count
686
749
 
687
750
  def _is_failed_hook_task(self, task_data: t.Any, task_id: str) -> bool:
688
- """Check if a task is a failed hook task."""
689
751
  return task_data.status == "failed" and task_id in (
690
752
  "fast_hooks",
691
753
  "comprehensive_hooks",
692
754
  )
693
755
 
694
756
  def _process_hook_failure(self, task_id: str, task_data: t.Any) -> list[Issue]:
695
- """Process a single hook failure and return corresponding issues."""
696
757
  error_msg = getattr(task_data, "error_message", "Unknown error")
697
758
  specific_issues = self._parse_hook_error_details(task_id, error_msg)
698
759
 
@@ -702,7 +763,6 @@ class WorkflowPipeline:
702
763
  return [self._create_generic_hook_issue(task_id, error_msg)]
703
764
 
704
765
  def _create_generic_hook_issue(self, task_id: str, error_msg: str) -> Issue:
705
- """Create a generic issue for unspecific hook failures."""
706
766
  issue_type = IssueType.FORMATTING if "fast" in task_id else IssueType.TYPE_ERROR
707
767
  return Issue(
708
768
  id=f"hook_failure_{task_id}",
@@ -713,128 +773,138 @@ class WorkflowPipeline:
713
773
  )
714
774
 
715
775
  def _parse_hook_error_details(self, task_id: str, error_msg: str) -> list[Issue]:
716
- """Parse specific hook failure details to create targeted issues."""
776
+ """Parse hook error details and create specific issues."""
717
777
  issues: list[Issue] = []
718
778
 
719
- # For comprehensive hooks, parse specific tool failures
720
779
  if task_id == "comprehensive_hooks":
721
- # Check for complexipy failures (complexity violations)
722
- if "complexipy" in error_msg.lower():
723
- issues.append(
724
- Issue(
725
- id="complexipy_violation",
726
- type=IssueType.COMPLEXITY,
727
- severity=Priority.HIGH,
728
- message="Code complexity violation detected by complexipy",
729
- stage="comprehensive",
730
- )
731
- )
780
+ issues.extend(self._parse_comprehensive_hook_errors(error_msg))
781
+ elif task_id == "fast_hooks":
782
+ issues.append(self._create_fast_hook_issue())
732
783
 
733
- # Check for pyright failures (type errors)
734
- if "pyright" in error_msg.lower():
735
- issues.append(
736
- Issue(
737
- id="pyright_type_error",
738
- type=IssueType.TYPE_ERROR,
739
- severity=Priority.HIGH,
740
- message="Type checking errors detected by pyright",
741
- stage="comprehensive",
742
- )
743
- )
784
+ return issues
744
785
 
745
- # Check for bandit failures (security issues)
746
- if "bandit" in error_msg.lower():
747
- issues.append(
748
- Issue(
749
- id="bandit_security_issue",
750
- type=IssueType.SECURITY,
751
- severity=Priority.HIGH,
752
- message="Security vulnerabilities detected by bandit",
753
- stage="comprehensive",
754
- )
755
- )
786
+ def _parse_comprehensive_hook_errors(self, error_msg: str) -> list[Issue]:
787
+ """Parse comprehensive hook error messages and create specific issues."""
788
+ issues: list[Issue] = []
789
+ error_lower = error_msg.lower()
756
790
 
757
- # Check for refurb failures (code quality issues)
758
- if "refurb" in error_msg.lower():
759
- issues.append(
760
- Issue(
761
- id="refurb_quality_issue",
762
- type=IssueType.PERFORMANCE, # Use PERFORMANCE as closest match for refurb issues
763
- severity=Priority.MEDIUM,
764
- message="Code quality issues detected by refurb",
765
- stage="comprehensive",
766
- )
767
- )
791
+ # Check each error type
792
+ complexity_issue = self._check_complexity_error(error_lower)
793
+ if complexity_issue:
794
+ issues.append(complexity_issue)
768
795
 
769
- # Check for vulture failures (dead code)
770
- if "vulture" in error_msg.lower():
771
- issues.append(
772
- Issue(
773
- id="vulture_dead_code",
774
- type=IssueType.DEAD_CODE,
775
- severity=Priority.MEDIUM,
776
- message="Dead code detected by vulture",
777
- stage="comprehensive",
778
- )
779
- )
796
+ type_error_issue = self._check_type_error(error_lower)
797
+ if type_error_issue:
798
+ issues.append(type_error_issue)
780
799
 
781
- elif task_id == "fast_hooks":
782
- # Fast hooks are typically formatting issues
783
- issues.append(
784
- Issue(
785
- id="fast_hooks_formatting",
786
- type=IssueType.FORMATTING,
787
- severity=Priority.LOW,
788
- message="Code formatting issues detected",
789
- stage="fast",
790
- )
791
- )
800
+ security_issue = self._check_security_error(error_lower)
801
+ if security_issue:
802
+ issues.append(security_issue)
803
+
804
+ performance_issue = self._check_performance_error(error_lower)
805
+ if performance_issue:
806
+ issues.append(performance_issue)
807
+
808
+ dead_code_issue = self._check_dead_code_error(error_lower)
809
+ if dead_code_issue:
810
+ issues.append(dead_code_issue)
811
+
812
+ regex_issue = self._check_regex_validation_error(error_lower)
813
+ if regex_issue:
814
+ issues.append(regex_issue)
792
815
 
793
816
  return issues
794
817
 
818
+ def _check_complexity_error(self, error_lower: str) -> Issue | None:
819
+ """Check for complexity errors and create issue if found."""
820
+ if "complexipy" in error_lower or "c901" in error_lower:
821
+ return Issue(
822
+ id="complexity_violation",
823
+ type=IssueType.COMPLEXITY,
824
+ severity=Priority.HIGH,
825
+ message="Code complexity violation detected",
826
+ stage="comprehensive",
827
+ )
828
+ return None
829
+
830
+ def _check_type_error(self, error_lower: str) -> Issue | None:
831
+ """Check for type errors and create issue if found."""
832
+ if "pyright" in error_lower:
833
+ return Issue(
834
+ id="pyright_type_error",
835
+ type=IssueType.TYPE_ERROR,
836
+ severity=Priority.HIGH,
837
+ message="Type checking errors detected by pyright",
838
+ stage="comprehensive",
839
+ )
840
+ return None
841
+
842
+ def _check_security_error(self, error_lower: str) -> Issue | None:
843
+ """Check for security errors and create issue if found."""
844
+ if "bandit" in error_lower:
845
+ return Issue(
846
+ id="bandit_security_issue",
847
+ type=IssueType.SECURITY,
848
+ severity=Priority.HIGH,
849
+ message="Security vulnerabilities detected by bandit",
850
+ stage="comprehensive",
851
+ )
852
+ return None
853
+
854
+ def _check_performance_error(self, error_lower: str) -> Issue | None:
855
+ """Check for performance errors and create issue if found."""
856
+ if "refurb" in error_lower:
857
+ return Issue(
858
+ id="refurb_quality_issue",
859
+ type=IssueType.PERFORMANCE,
860
+ severity=Priority.MEDIUM,
861
+ message="Code quality issues detected by refurb",
862
+ stage="comprehensive",
863
+ )
864
+ return None
865
+
866
+ def _check_dead_code_error(self, error_lower: str) -> Issue | None:
867
+ """Check for dead code errors and create issue if found."""
868
+ if "vulture" in error_lower:
869
+ return Issue(
870
+ id="vulture_dead_code",
871
+ type=IssueType.DEAD_CODE,
872
+ severity=Priority.MEDIUM,
873
+ message="Dead code detected by vulture",
874
+ stage="comprehensive",
875
+ )
876
+ return None
877
+
878
+ def _check_regex_validation_error(self, error_lower: str) -> Issue | None:
879
+ """Check for regex validation errors and create issue if found."""
880
+ regex_keywords = ("raw regex", "regex pattern", r"\g<", "replacement")
881
+ if "validate-regex-patterns" in error_lower or any(
882
+ keyword in error_lower for keyword in regex_keywords
883
+ ):
884
+ return Issue(
885
+ id="regex_validation_failure",
886
+ type=IssueType.REGEX_VALIDATION,
887
+ severity=Priority.HIGH,
888
+ message="Unsafe regex patterns detected by validate-regex-patterns",
889
+ stage="fast",
890
+ )
891
+ return None
892
+
893
+ def _create_fast_hook_issue(self) -> Issue:
894
+ """Create an issue for fast hook errors."""
895
+ return Issue(
896
+ id="fast_hooks_formatting",
897
+ type=IssueType.FORMATTING,
898
+ severity=Priority.LOW,
899
+ message="Code formatting issues detected",
900
+ stage="fast",
901
+ )
902
+
795
903
  def _parse_issues_for_agents(self, issue_strings: list[str]) -> list[Issue]:
796
- """Parse string issues into structured Issue objects for AI agents."""
797
904
  issues: list[Issue] = []
798
905
 
799
906
  for i, issue_str in enumerate(issue_strings):
800
- # Determine issue type from content patterns
801
- issue_type = IssueType.FORMATTING
802
- priority = Priority.MEDIUM
803
-
804
- if any(
805
- keyword in issue_str.lower()
806
- for keyword in ("type", "annotation", "pyright")
807
- ):
808
- issue_type = IssueType.TYPE_ERROR
809
- priority = Priority.HIGH
810
- elif any(
811
- keyword in issue_str.lower()
812
- for keyword in ("security", "bandit", "hardcoded")
813
- ):
814
- issue_type = IssueType.SECURITY
815
- priority = Priority.HIGH
816
- elif any(
817
- keyword in issue_str.lower() for keyword in ("complexity", "complexipy")
818
- ):
819
- issue_type = IssueType.COMPLEXITY
820
- priority = Priority.HIGH
821
- elif any(
822
- keyword in issue_str.lower()
823
- for keyword in ("unused", "dead", "vulture")
824
- ):
825
- issue_type = IssueType.DEAD_CODE
826
- priority = Priority.MEDIUM
827
- elif any(
828
- keyword in issue_str.lower()
829
- for keyword in ("performance", "refurb", "furb")
830
- ):
831
- issue_type = IssueType.PERFORMANCE
832
- priority = Priority.MEDIUM
833
- elif any(
834
- keyword in issue_str.lower() for keyword in ("import", "creosote")
835
- ):
836
- issue_type = IssueType.IMPORT_ERROR
837
- priority = Priority.MEDIUM
907
+ issue_type, priority = self._classify_issue(issue_str)
838
908
 
839
909
  issue = Issue(
840
910
  id=f"parsed_issue_{i}",
@@ -847,10 +917,80 @@ class WorkflowPipeline:
847
917
 
848
918
  return issues
849
919
 
920
+ def _classify_issue(self, issue_str: str) -> tuple[IssueType, Priority]:
921
+ """Classify an issue string to determine its type and priority."""
922
+ issue_lower = issue_str.lower()
923
+
924
+ # Check high-priority issues first
925
+ if self._is_type_error(issue_lower):
926
+ return IssueType.TYPE_ERROR, Priority.HIGH
927
+ if self._is_security_issue(issue_lower):
928
+ return IssueType.SECURITY, Priority.HIGH
929
+ if self._is_complexity_issue(issue_lower):
930
+ return IssueType.COMPLEXITY, Priority.HIGH
931
+ if self._is_regex_validation_issue(issue_lower):
932
+ return IssueType.REGEX_VALIDATION, Priority.HIGH
933
+
934
+ # Check medium-priority issues
935
+ if self._is_dead_code_issue(issue_lower):
936
+ return IssueType.DEAD_CODE, Priority.MEDIUM
937
+ if self._is_performance_issue(issue_lower):
938
+ return IssueType.PERFORMANCE, Priority.MEDIUM
939
+ if self._is_import_error(issue_lower):
940
+ return IssueType.IMPORT_ERROR, Priority.MEDIUM
941
+
942
+ # Default to formatting issue
943
+ return IssueType.FORMATTING, Priority.MEDIUM
944
+
945
+ def _is_type_error(self, issue_lower: str) -> bool:
946
+ """Check if issue is related to type errors."""
947
+ return any(
948
+ keyword in issue_lower for keyword in ("type", "annotation", "pyright")
949
+ )
950
+
951
+ def _is_security_issue(self, issue_lower: str) -> bool:
952
+ """Check if issue is related to security."""
953
+ return any(
954
+ keyword in issue_lower for keyword in ("security", "bandit", "hardcoded")
955
+ )
956
+
957
+ def _is_complexity_issue(self, issue_lower: str) -> bool:
958
+ """Check if issue is related to code complexity."""
959
+ return any(
960
+ keyword in issue_lower
961
+ for keyword in ("complexity", "complexipy", "c901", "too complex")
962
+ )
963
+
964
+ def _is_regex_validation_issue(self, issue_lower: str) -> bool:
965
+ """Check if issue is related to regex validation."""
966
+ return any(
967
+ keyword in issue_lower
968
+ for keyword in (
969
+ "regex",
970
+ "pattern",
971
+ "validate-regex-patterns",
972
+ r"\g<",
973
+ "replacement",
974
+ )
975
+ )
976
+
977
+ def _is_dead_code_issue(self, issue_lower: str) -> bool:
978
+ """Check if issue is related to dead code."""
979
+ return any(keyword in issue_lower for keyword in ("unused", "dead", "vulture"))
980
+
981
+ def _is_performance_issue(self, issue_lower: str) -> bool:
982
+ """Check if issue is related to performance."""
983
+ return any(
984
+ keyword in issue_lower for keyword in ("performance", "refurb", "furb")
985
+ )
986
+
987
+ def _is_import_error(self, issue_lower: str) -> bool:
988
+ """Check if issue is related to import errors."""
989
+ return any(keyword in issue_lower for keyword in ("import", "creosote"))
990
+
850
991
  def _log_failure_counts_if_debugging(
851
992
  self, test_count: int, hook_count: int
852
993
  ) -> None:
853
- """Log failure counts if debugging is enabled."""
854
994
  if self._should_debug():
855
995
  self.debugger.log_test_failures(test_count)
856
996
  self.debugger.log_hook_failures(hook_count)
@@ -864,14 +1004,17 @@ class WorkflowOrchestrator:
864
1004
  dry_run: bool = False,
865
1005
  web_job_id: str | None = None,
866
1006
  verbose: bool = False,
1007
+ debug: bool = False,
867
1008
  ) -> None:
868
1009
  self.console = console or Console(force_terminal=True)
869
1010
  self.pkg_path = pkg_path or Path.cwd()
870
1011
  self.dry_run = dry_run
871
1012
  self.web_job_id = web_job_id
872
1013
  self.verbose = verbose
1014
+ self.debug = debug
873
1015
 
874
1016
  from crackerjack.models.protocols import (
1017
+ ConfigMergeServiceProtocol,
875
1018
  FileSystemInterface,
876
1019
  GitInterface,
877
1020
  HookManager,
@@ -879,9 +1022,14 @@ class WorkflowOrchestrator:
879
1022
  TestManagerProtocol,
880
1023
  )
881
1024
 
882
- from .container import create_container
1025
+ # Initialize logging first so container creation respects log levels
1026
+ self._initialize_logging()
1027
+
1028
+ self.logger = get_logger("crackerjack.orchestrator")
1029
+
1030
+ from .enhanced_container import create_enhanced_container
883
1031
 
884
- self.container = create_container(
1032
+ self.container = create_enhanced_container(
885
1033
  console=self.console,
886
1034
  pkg_path=self.pkg_path,
887
1035
  dry_run=self.dry_run,
@@ -898,6 +1046,7 @@ class WorkflowOrchestrator:
898
1046
  hook_manager=self.container.get(HookManager),
899
1047
  test_manager=self.container.get(TestManagerProtocol),
900
1048
  publish_manager=self.container.get(PublishManager),
1049
+ config_merge_service=self.container.get(ConfigMergeServiceProtocol),
901
1050
  )
902
1051
 
903
1052
  self.pipeline = WorkflowPipeline(
@@ -907,10 +1056,6 @@ class WorkflowOrchestrator:
907
1056
  phases=self.phases,
908
1057
  )
909
1058
 
910
- self.logger = get_logger("crackerjack.orchestrator")
911
-
912
- self._initialize_logging()
913
-
914
1059
  def _initialize_logging(self) -> None:
915
1060
  from crackerjack.services.log_manager import get_log_manager
916
1061
 
@@ -918,9 +1063,15 @@ class WorkflowOrchestrator:
918
1063
  session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
919
1064
  debug_log_file = log_manager.create_debug_log_file(session_id)
920
1065
 
921
- setup_structured_logging(log_file=debug_log_file)
1066
+ # Set log level based on verbosity - DEBUG only in verbose or debug mode
1067
+ log_level = "DEBUG" if (self.verbose or self.debug) else "INFO"
1068
+ setup_structured_logging(
1069
+ level=log_level, json_output=False, log_file=debug_log_file
1070
+ )
922
1071
 
923
- self.logger.info(
1072
+ # Use a temporary logger for the initialization message
1073
+ temp_logger = get_logger("crackerjack.orchestrator.init")
1074
+ temp_logger.debug(
924
1075
  "Structured logging initialized",
925
1076
  log_file=str(debug_log_file),
926
1077
  log_directory=str(log_manager.log_dir),