crackerjack 0.31.9__py3-none-any.whl → 0.31.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (155) hide show
  1. crackerjack/CLAUDE.md +288 -705
  2. crackerjack/__main__.py +22 -8
  3. crackerjack/agents/__init__.py +0 -3
  4. crackerjack/agents/architect_agent.py +0 -43
  5. crackerjack/agents/base.py +1 -9
  6. crackerjack/agents/coordinator.py +2 -148
  7. crackerjack/agents/documentation_agent.py +109 -81
  8. crackerjack/agents/dry_agent.py +122 -97
  9. crackerjack/agents/formatting_agent.py +3 -16
  10. crackerjack/agents/import_optimization_agent.py +1174 -130
  11. crackerjack/agents/performance_agent.py +956 -188
  12. crackerjack/agents/performance_helpers.py +229 -0
  13. crackerjack/agents/proactive_agent.py +1 -48
  14. crackerjack/agents/refactoring_agent.py +516 -246
  15. crackerjack/agents/refactoring_helpers.py +282 -0
  16. crackerjack/agents/security_agent.py +393 -90
  17. crackerjack/agents/test_creation_agent.py +1776 -120
  18. crackerjack/agents/test_specialist_agent.py +59 -15
  19. crackerjack/agents/tracker.py +0 -102
  20. crackerjack/api.py +145 -37
  21. crackerjack/cli/handlers.py +48 -30
  22. crackerjack/cli/interactive.py +11 -11
  23. crackerjack/cli/options.py +66 -4
  24. crackerjack/code_cleaner.py +808 -148
  25. crackerjack/config/global_lock_config.py +110 -0
  26. crackerjack/config/hooks.py +43 -64
  27. crackerjack/core/async_workflow_orchestrator.py +247 -97
  28. crackerjack/core/autofix_coordinator.py +192 -109
  29. crackerjack/core/enhanced_container.py +46 -63
  30. crackerjack/core/file_lifecycle.py +549 -0
  31. crackerjack/core/performance.py +9 -8
  32. crackerjack/core/performance_monitor.py +395 -0
  33. crackerjack/core/phase_coordinator.py +282 -95
  34. crackerjack/core/proactive_workflow.py +9 -58
  35. crackerjack/core/resource_manager.py +501 -0
  36. crackerjack/core/service_watchdog.py +490 -0
  37. crackerjack/core/session_coordinator.py +4 -8
  38. crackerjack/core/timeout_manager.py +504 -0
  39. crackerjack/core/websocket_lifecycle.py +475 -0
  40. crackerjack/core/workflow_orchestrator.py +355 -204
  41. crackerjack/dynamic_config.py +47 -6
  42. crackerjack/errors.py +3 -4
  43. crackerjack/executors/async_hook_executor.py +63 -13
  44. crackerjack/executors/cached_hook_executor.py +14 -14
  45. crackerjack/executors/hook_executor.py +100 -37
  46. crackerjack/executors/hook_lock_manager.py +856 -0
  47. crackerjack/executors/individual_hook_executor.py +120 -86
  48. crackerjack/intelligence/__init__.py +0 -7
  49. crackerjack/intelligence/adaptive_learning.py +13 -86
  50. crackerjack/intelligence/agent_orchestrator.py +15 -78
  51. crackerjack/intelligence/agent_registry.py +12 -59
  52. crackerjack/intelligence/agent_selector.py +31 -92
  53. crackerjack/intelligence/integration.py +1 -41
  54. crackerjack/interactive.py +9 -9
  55. crackerjack/managers/async_hook_manager.py +25 -8
  56. crackerjack/managers/hook_manager.py +9 -9
  57. crackerjack/managers/publish_manager.py +57 -59
  58. crackerjack/managers/test_command_builder.py +6 -36
  59. crackerjack/managers/test_executor.py +9 -61
  60. crackerjack/managers/test_manager.py +52 -62
  61. crackerjack/managers/test_manager_backup.py +77 -127
  62. crackerjack/managers/test_progress.py +4 -23
  63. crackerjack/mcp/cache.py +5 -12
  64. crackerjack/mcp/client_runner.py +10 -10
  65. crackerjack/mcp/context.py +64 -6
  66. crackerjack/mcp/dashboard.py +14 -11
  67. crackerjack/mcp/enhanced_progress_monitor.py +55 -55
  68. crackerjack/mcp/file_monitor.py +72 -42
  69. crackerjack/mcp/progress_components.py +103 -84
  70. crackerjack/mcp/progress_monitor.py +122 -49
  71. crackerjack/mcp/rate_limiter.py +12 -12
  72. crackerjack/mcp/server_core.py +16 -22
  73. crackerjack/mcp/service_watchdog.py +26 -26
  74. crackerjack/mcp/state.py +15 -0
  75. crackerjack/mcp/tools/core_tools.py +95 -39
  76. crackerjack/mcp/tools/error_analyzer.py +6 -32
  77. crackerjack/mcp/tools/execution_tools.py +1 -56
  78. crackerjack/mcp/tools/execution_tools_backup.py +35 -131
  79. crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
  80. crackerjack/mcp/tools/intelligence_tools.py +2 -55
  81. crackerjack/mcp/tools/monitoring_tools.py +308 -145
  82. crackerjack/mcp/tools/proactive_tools.py +12 -42
  83. crackerjack/mcp/tools/progress_tools.py +23 -15
  84. crackerjack/mcp/tools/utility_tools.py +3 -40
  85. crackerjack/mcp/tools/workflow_executor.py +40 -60
  86. crackerjack/mcp/websocket/app.py +0 -3
  87. crackerjack/mcp/websocket/endpoints.py +206 -268
  88. crackerjack/mcp/websocket/jobs.py +213 -66
  89. crackerjack/mcp/websocket/server.py +84 -6
  90. crackerjack/mcp/websocket/websocket_handler.py +137 -29
  91. crackerjack/models/config_adapter.py +3 -16
  92. crackerjack/models/protocols.py +162 -3
  93. crackerjack/models/resource_protocols.py +454 -0
  94. crackerjack/models/task.py +3 -3
  95. crackerjack/monitoring/__init__.py +0 -0
  96. crackerjack/monitoring/ai_agent_watchdog.py +25 -71
  97. crackerjack/monitoring/regression_prevention.py +28 -87
  98. crackerjack/orchestration/advanced_orchestrator.py +44 -78
  99. crackerjack/orchestration/coverage_improvement.py +10 -60
  100. crackerjack/orchestration/execution_strategies.py +16 -16
  101. crackerjack/orchestration/test_progress_streamer.py +61 -53
  102. crackerjack/plugins/base.py +1 -1
  103. crackerjack/plugins/managers.py +22 -20
  104. crackerjack/py313.py +65 -21
  105. crackerjack/services/backup_service.py +467 -0
  106. crackerjack/services/bounded_status_operations.py +627 -0
  107. crackerjack/services/cache.py +7 -9
  108. crackerjack/services/config.py +35 -52
  109. crackerjack/services/config_integrity.py +5 -16
  110. crackerjack/services/config_merge.py +542 -0
  111. crackerjack/services/contextual_ai_assistant.py +17 -19
  112. crackerjack/services/coverage_ratchet.py +51 -76
  113. crackerjack/services/debug.py +25 -39
  114. crackerjack/services/dependency_monitor.py +52 -50
  115. crackerjack/services/enhanced_filesystem.py +14 -11
  116. crackerjack/services/file_hasher.py +1 -1
  117. crackerjack/services/filesystem.py +1 -12
  118. crackerjack/services/git.py +78 -44
  119. crackerjack/services/health_metrics.py +31 -27
  120. crackerjack/services/initialization.py +281 -433
  121. crackerjack/services/input_validator.py +760 -0
  122. crackerjack/services/log_manager.py +16 -16
  123. crackerjack/services/logging.py +7 -6
  124. crackerjack/services/metrics.py +43 -43
  125. crackerjack/services/pattern_cache.py +2 -31
  126. crackerjack/services/pattern_detector.py +26 -63
  127. crackerjack/services/performance_benchmarks.py +20 -45
  128. crackerjack/services/regex_patterns.py +2887 -0
  129. crackerjack/services/regex_utils.py +537 -0
  130. crackerjack/services/secure_path_utils.py +683 -0
  131. crackerjack/services/secure_status_formatter.py +534 -0
  132. crackerjack/services/secure_subprocess.py +605 -0
  133. crackerjack/services/security.py +47 -10
  134. crackerjack/services/security_logger.py +492 -0
  135. crackerjack/services/server_manager.py +109 -50
  136. crackerjack/services/smart_scheduling.py +8 -25
  137. crackerjack/services/status_authentication.py +603 -0
  138. crackerjack/services/status_security_manager.py +442 -0
  139. crackerjack/services/thread_safe_status_collector.py +546 -0
  140. crackerjack/services/tool_version_service.py +1 -23
  141. crackerjack/services/unified_config.py +36 -58
  142. crackerjack/services/validation_rate_limiter.py +269 -0
  143. crackerjack/services/version_checker.py +9 -40
  144. crackerjack/services/websocket_resource_limiter.py +572 -0
  145. crackerjack/slash_commands/__init__.py +52 -2
  146. crackerjack/tools/__init__.py +0 -0
  147. crackerjack/tools/validate_input_validator_patterns.py +262 -0
  148. crackerjack/tools/validate_regex_patterns.py +198 -0
  149. {crackerjack-0.31.9.dist-info → crackerjack-0.31.12.dist-info}/METADATA +197 -12
  150. crackerjack-0.31.12.dist-info/RECORD +178 -0
  151. crackerjack/cli/facade.py +0 -104
  152. crackerjack-0.31.9.dist-info/RECORD +0 -149
  153. {crackerjack-0.31.9.dist-info → crackerjack-0.31.12.dist-info}/WHEEL +0 -0
  154. {crackerjack-0.31.9.dist-info → crackerjack-0.31.12.dist-info}/entry_points.txt +0 -0
  155. {crackerjack-0.31.9.dist-info → crackerjack-0.31.12.dist-info}/licenses/LICENSE +0 -0
@@ -11,6 +11,7 @@ from crackerjack.models.protocols import OptionsProtocol
11
11
 
12
12
  from .phase_coordinator import PhaseCoordinator
13
13
  from .session_coordinator import SessionCoordinator
14
+ from .timeout_manager import TimeoutStrategy, get_timeout_manager
14
15
 
15
16
 
16
17
  class AsyncWorkflowPipeline:
@@ -26,6 +27,9 @@ class AsyncWorkflowPipeline:
26
27
  self.session = session
27
28
  self.phases = phases
28
29
  self.logger = logging.getLogger("crackerjack.async_pipeline")
30
+ self.timeout_manager = get_timeout_manager()
31
+ self._active_tasks: list[asyncio.Task[t.Any]] = []
32
+ self.resource_context: t.Any | None = None
29
33
 
30
34
  async def run_complete_workflow_async(self, options: OptionsProtocol) -> bool:
31
35
  start_time = time.time()
@@ -33,12 +37,15 @@ class AsyncWorkflowPipeline:
33
37
  self.session.track_task("workflow", "Complete async crackerjack workflow")
34
38
 
35
39
  try:
36
- if hasattr(options, "ai_agent") and options.ai_agent:
37
- success = await self._execute_ai_agent_workflow_async(options)
38
- else:
39
- success = await self._execute_workflow_phases_async(options)
40
- self.session.finalize_session(start_time, success)
41
- return success
40
+ async with self.timeout_manager.timeout_context(
41
+ "complete_workflow", strategy=TimeoutStrategy.GRACEFUL_DEGRADATION
42
+ ):
43
+ if hasattr(options, "ai_agent") and options.ai_agent:
44
+ success = await self._execute_ai_agent_workflow_async(options)
45
+ else:
46
+ success = await self._execute_workflow_phases_async(options)
47
+ self.session.finalize_session(start_time, success)
48
+ return success
42
49
  except KeyboardInterrupt:
43
50
  self.console.print("Interrupted by user")
44
51
  self.session.fail_task("workflow", "Interrupted by user")
@@ -76,11 +83,39 @@ class AsyncWorkflowPipeline:
76
83
 
77
84
  return success
78
85
 
86
+ async def _cleanup_active_tasks(self) -> None:
87
+ """Clean up all active tasks."""
88
+ if not self._active_tasks:
89
+ return
90
+
91
+ self.logger.info(f"Cleaning up {len(self._active_tasks)} active tasks")
92
+
93
+ # Cancel all active tasks
94
+ for task in self._active_tasks:
95
+ if not task.done():
96
+ task.cancel()
97
+
98
+ # Wait for tasks to complete with timeout
99
+ if self._active_tasks:
100
+ try:
101
+ await asyncio.wait_for(
102
+ asyncio.gather(*self._active_tasks, return_exceptions=True),
103
+ timeout=30.0,
104
+ )
105
+ except TimeoutError:
106
+ self.logger.warning("Timeout waiting for task cleanup")
107
+
108
+ self._active_tasks.clear()
109
+
79
110
  async def _execute_cleaning_phase_async(self, options: OptionsProtocol) -> bool:
80
111
  if not options.clean:
81
112
  return True
82
113
 
83
- return await asyncio.to_thread(self.phases.run_cleaning_phase, options)
114
+ return await self.timeout_manager.with_timeout(
115
+ "file_operations",
116
+ asyncio.to_thread(self.phases.run_cleaning_phase, options),
117
+ strategy=TimeoutStrategy.RETRY_WITH_BACKOFF,
118
+ )
84
119
 
85
120
  async def _execute_quality_phase_async(self, options: OptionsProtocol) -> bool:
86
121
  if hasattr(options, "fast") and options.fast:
@@ -94,38 +129,117 @@ class AsyncWorkflowPipeline:
94
129
  async def _execute_test_workflow_async(self, options: OptionsProtocol) -> bool:
95
130
  overall_success = True
96
131
 
132
+ # Fast hooks with timeout
97
133
  if not await self._run_fast_hooks_async(options):
98
134
  overall_success = False
99
-
100
135
  self.session.fail_task("workflow", "Fast hooks failed")
101
136
  return False
102
137
 
103
- test_task = asyncio.create_task(self._run_testing_phase_async(options))
104
- hooks_task = asyncio.create_task(self._run_comprehensive_hooks_async(options))
138
+ # Run tests and comprehensive hooks in parallel
139
+ try:
140
+ test_task, hooks_task = self._create_parallel_tasks(options)
141
+ done, pending = await self._execute_parallel_tasks(test_task, hooks_task)
142
+
143
+ # Cancel any pending tasks
144
+ await self._cleanup_pending_tasks(pending)
145
+
146
+ # Process and validate results
147
+ test_success, hooks_success = await self._process_task_results(
148
+ done, test_task, hooks_task
149
+ )
150
+
151
+ return self._validate_workflow_results(
152
+ test_success, hooks_success, overall_success
153
+ )
154
+
155
+ except Exception as e:
156
+ self.logger.error(f"Test workflow execution error: {e}")
157
+ self.session.fail_task("workflow", f"Test workflow error: {e}")
158
+ return False
159
+
160
+ def _create_parallel_tasks(
161
+ self, options: OptionsProtocol
162
+ ) -> tuple[asyncio.Task[bool], asyncio.Task[bool]]:
163
+ """Create test and hooks tasks with timeout handling."""
164
+ test_task = asyncio.create_task(
165
+ self.timeout_manager.with_timeout(
166
+ "test_execution",
167
+ self._run_testing_phase_async(options),
168
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
169
+ )
170
+ )
171
+ hooks_task = asyncio.create_task(
172
+ self.timeout_manager.with_timeout(
173
+ "comprehensive_hooks",
174
+ self._run_comprehensive_hooks_async(options),
175
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
176
+ )
177
+ )
178
+ return test_task, hooks_task
179
+
180
+ async def _execute_parallel_tasks(
181
+ self, test_task: asyncio.Task[bool], hooks_task: asyncio.Task[bool]
182
+ ) -> tuple[set[asyncio.Task[bool]], set[asyncio.Task[bool]]]:
183
+ """Execute tasks in parallel with combined timeout."""
184
+ combined_timeout = (
185
+ self.timeout_manager.get_timeout("test_execution")
186
+ + self.timeout_manager.get_timeout("comprehensive_hooks")
187
+ + 60 # Extra buffer
188
+ )
105
189
 
106
- test_success, hooks_success = await asyncio.gather(
107
- test_task,
108
- hooks_task,
109
- return_exceptions=True,
190
+ done, pending = await asyncio.wait(
191
+ [test_task, hooks_task],
192
+ timeout=combined_timeout,
193
+ return_when=asyncio.ALL_COMPLETED,
110
194
  )
111
195
 
112
- if isinstance(test_success, Exception):
113
- self.logger.error(f"Test execution error: {test_success}")
114
- test_success = False
196
+ return done, pending
115
197
 
116
- if isinstance(hooks_success, Exception):
117
- self.logger.error(f"Hooks execution error: {hooks_success}")
118
- hooks_success = False
198
+ async def _cleanup_pending_tasks(self, pending: set[asyncio.Task[t.Any]]) -> None:
199
+ """Clean up any pending tasks."""
200
+ for task in pending:
201
+ task.cancel()
202
+ try:
203
+ await task
204
+ except asyncio.CancelledError:
205
+ pass
119
206
 
207
+ async def _process_task_results(
208
+ self,
209
+ done: set[asyncio.Task[bool]],
210
+ test_task: asyncio.Task[bool],
211
+ hooks_task: asyncio.Task[bool],
212
+ ) -> tuple[bool, bool]:
213
+ """Process results from completed tasks."""
214
+ test_success = hooks_success = False
215
+
216
+ for task in done:
217
+ try:
218
+ result = await task
219
+ if task == test_task:
220
+ test_success = result
221
+ elif task == hooks_task:
222
+ hooks_success = result
223
+ except Exception as e:
224
+ self.logger.error(f"Task execution error: {e}")
225
+ if task == test_task:
226
+ test_success = False
227
+ elif task == hooks_task:
228
+ hooks_success = False
229
+
230
+ return test_success, hooks_success
231
+
232
+ def _validate_workflow_results(
233
+ self, test_success: bool, hooks_success: bool, overall_success: bool
234
+ ) -> bool:
235
+ """Validate workflow results and handle failures."""
120
236
  if not test_success:
121
237
  overall_success = False
122
-
123
238
  self.session.fail_task("workflow", "Testing failed")
124
239
  return False
125
240
 
126
241
  if not hooks_success:
127
242
  overall_success = False
128
-
129
243
  self.session.fail_task("workflow", "Comprehensive hooks failed")
130
244
  return False
131
245
 
@@ -141,64 +255,97 @@ class AsyncWorkflowPipeline:
141
255
  return False
142
256
  return True
143
257
 
258
+ async def _create_managed_task(
259
+ self,
260
+ coro: t.Coroutine[t.Any, t.Any, t.Any],
261
+ timeout: float = 300.0,
262
+ task_name: str = "workflow_task",
263
+ ) -> asyncio.Task[t.Any]:
264
+ """Create a managed task with automatic cleanup."""
265
+ task = asyncio.create_task(coro, name=task_name)
266
+
267
+ if self.resource_context:
268
+ self.resource_context.managed_task(task, timeout)
269
+
270
+ self._active_tasks.append(task)
271
+ return task
272
+
144
273
  async def _run_fast_hooks_async(self, options: OptionsProtocol) -> bool:
145
- return await asyncio.to_thread(self.phases.run_fast_hooks_only, options)
274
+ return await self.timeout_manager.with_timeout(
275
+ "fast_hooks",
276
+ asyncio.to_thread(self.phases.run_fast_hooks_only, options),
277
+ strategy=TimeoutStrategy.RETRY_WITH_BACKOFF,
278
+ )
146
279
 
147
280
  async def _run_comprehensive_hooks_async(self, options: OptionsProtocol) -> bool:
148
- return await asyncio.to_thread(
149
- self.phases.run_comprehensive_hooks_only,
150
- options,
281
+ return await self.timeout_manager.with_timeout(
282
+ "comprehensive_hooks",
283
+ asyncio.to_thread(self.phases.run_comprehensive_hooks_only, options),
284
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
151
285
  )
152
286
 
153
287
  async def _run_hooks_phase_async(self, options: OptionsProtocol) -> bool:
154
- return await asyncio.to_thread(self.phases.run_hooks_phase, options)
288
+ return await self.timeout_manager.with_timeout(
289
+ "comprehensive_hooks",
290
+ asyncio.to_thread(self.phases.run_hooks_phase, options),
291
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
292
+ )
155
293
 
156
294
  async def _run_testing_phase_async(self, options: OptionsProtocol) -> bool:
157
- return await asyncio.to_thread(self.phases.run_testing_phase, options)
295
+ return await self.timeout_manager.with_timeout(
296
+ "test_execution",
297
+ asyncio.to_thread(self.phases.run_testing_phase, options),
298
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
299
+ )
158
300
 
159
301
  async def _execute_ai_agent_workflow_async(
160
302
  self, options: OptionsProtocol, max_iterations: int = 10
161
303
  ) -> bool:
162
- """Execute AI agent workflow with iterative fixing between iterations."""
163
304
  self.console.print(
164
305
  f"🤖 Starting AI Agent workflow (max {max_iterations} iterations)"
165
306
  )
166
307
 
167
- # Always run configuration phase first
168
308
  self.phases.run_configuration_phase(options)
169
309
 
170
- # Run cleaning phase if requested
171
310
  if not await self._execute_cleaning_phase_async(options):
172
311
  self.session.fail_task("workflow", "Cleaning phase failed")
173
312
  return False
174
313
 
175
- # Iterative quality improvement with AI fixing
176
314
  iteration_success = await self._run_iterative_quality_improvement(
177
315
  options, max_iterations
178
316
  )
179
317
  if not iteration_success:
180
318
  return False
181
319
 
182
- # Run remaining phases
183
320
  return await self._run_final_workflow_phases(options)
184
321
 
185
322
  async def _run_iterative_quality_improvement(
186
323
  self, options: OptionsProtocol, max_iterations: int
187
324
  ) -> bool:
188
- """Run iterative quality improvement until all checks pass."""
189
325
  for iteration in range(1, max_iterations + 1):
190
326
  self.console.print(f"\n🔄 Iteration {iteration}/{max_iterations}")
191
327
 
192
- iteration_result = await self._execute_single_iteration(options, iteration)
328
+ try:
329
+ # Each iteration has its own timeout
330
+ iteration_result = await self.timeout_manager.with_timeout(
331
+ "workflow_iteration",
332
+ self._execute_single_iteration(options, iteration),
333
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
334
+ )
193
335
 
194
- if iteration_result == "success":
195
- self.console.print("✅ All quality checks passed!")
196
- return True
197
- elif iteration_result == "failed":
198
- return False
199
- # Continue to next iteration if result == "continue"
336
+ if iteration_result == "success":
337
+ self.console.print("✅ All quality checks passed !")
338
+ return True
339
+ elif iteration_result == "failed":
340
+ return False
341
+
342
+ except Exception as e:
343
+ self.logger.error(f"Iteration {iteration} failed with error: {e}")
344
+ self.console.print(f"⚠️ Iteration {iteration} failed: {e}")
345
+ # Continue to next iteration unless it's a critical error
346
+ if iteration == max_iterations:
347
+ return False
200
348
 
201
- # If we exhausted all iterations without success
202
349
  self.console.print(
203
350
  f"❌ Failed to achieve code quality after {max_iterations} iterations"
204
351
  )
@@ -208,19 +355,14 @@ class AsyncWorkflowPipeline:
208
355
  async def _execute_single_iteration(
209
356
  self, options: OptionsProtocol, iteration: int
210
357
  ) -> str:
211
- """Execute a single AI agent iteration. Returns 'success', 'failed', or 'continue'."""
212
- # Step 1: Fast hooks with retry logic
213
358
  fast_hooks_success = await self._run_fast_hooks_with_retry_async(options)
214
359
 
215
- # Step 2 & 3: Collect ALL issues
216
360
  test_issues = await self._collect_test_issues_async(options)
217
361
  hook_issues = await self._collect_comprehensive_hook_issues_async(options)
218
362
 
219
- # If everything passes, we're done
220
363
  if fast_hooks_success and not test_issues and not hook_issues:
221
364
  return "success"
222
365
 
223
- # Step 4: Apply AI fixes for ALL collected issues
224
366
  if test_issues or hook_issues:
225
367
  fix_success = await self._apply_ai_fixes_async(
226
368
  options, test_issues, hook_issues, iteration
@@ -237,15 +379,12 @@ class AsyncWorkflowPipeline:
237
379
  def _parse_issues_for_agents(
238
380
  self, test_issues: list[str], hook_issues: list[str]
239
381
  ) -> list[Issue]:
240
- """Parse string issues into structured Issue objects for AI agent processing."""
241
382
  structured_issues = []
242
383
 
243
- # Parse hook issues using dedicated parsers
244
384
  for issue in hook_issues:
245
385
  parsed_issue = self._parse_single_hook_issue(issue)
246
386
  structured_issues.append(parsed_issue)
247
387
 
248
- # Parse test issues using dedicated parser
249
388
  for issue in test_issues:
250
389
  parsed_issue = self._parse_single_test_issue(issue)
251
390
  structured_issues.append(parsed_issue)
@@ -253,39 +392,36 @@ class AsyncWorkflowPipeline:
253
392
  return structured_issues
254
393
 
255
394
  def _parse_single_hook_issue(self, issue: str) -> Issue:
256
- """Parse a single hook issue into structured format."""
257
395
  from crackerjack.agents.base import IssueType, Priority
258
396
 
259
- # Try refurb-specific parsing first
260
- if "refurb:" in issue and "[FURB" in issue:
397
+ if "refurb: " in issue and "[FURB" in issue:
261
398
  return self._parse_refurb_issue(issue)
262
399
 
263
- # Use generic hook issue parsers
264
400
  hook_type_mapping = {
265
- "pyright:": (IssueType.TYPE_ERROR, Priority.HIGH, "pyright"),
401
+ "pyright: ": (IssueType.TYPE_ERROR, Priority.HIGH, "pyright"),
266
402
  "Type error": (IssueType.TYPE_ERROR, Priority.HIGH, "pyright"),
267
- "bandit:": (IssueType.SECURITY, Priority.HIGH, "bandit"),
268
- "vulture:": (IssueType.DEAD_CODE, Priority.MEDIUM, "vulture"),
269
- "complexipy:": (IssueType.COMPLEXITY, Priority.MEDIUM, "complexipy"),
403
+ "bandit: ": (IssueType.SECURITY, Priority.HIGH, "bandit"),
404
+ "vulture: ": (IssueType.DEAD_CODE, Priority.MEDIUM, "vulture"),
405
+ "complexipy: ": (IssueType.COMPLEXITY, Priority.MEDIUM, "complexipy"),
270
406
  }
271
407
 
272
408
  for keyword, (issue_type, priority, stage) in hook_type_mapping.items():
273
409
  if keyword in issue:
274
410
  return self._create_generic_issue(issue, issue_type, priority, stage)
275
411
 
276
- # Default to generic hook issue
277
412
  return self._create_generic_issue(
278
413
  issue, IssueType.FORMATTING, Priority.MEDIUM, "hook"
279
414
  )
280
415
 
281
416
  def _parse_refurb_issue(self, issue: str) -> Issue:
282
- """Parse refurb-specific issue format."""
283
417
  import re
284
418
  import uuid
285
419
 
286
420
  from crackerjack.agents.base import Issue, IssueType, Priority
287
421
 
288
- match = re.search(r"refurb:\s*(.+?):(\d+):(\d+)\s+\[(\w+)\]:\s*(.+)", issue)
422
+ match = re.search( # REGEX OK: parsing structured refurb tool output
423
+ r"refurb: \s *(.+?): (\d +): (\d +)\s +\[(\w +)\]: \s *(.+)", issue
424
+ )
289
425
  if match:
290
426
  file_path, line_num, _, error_code, message = match.groups()
291
427
  return Issue(
@@ -299,13 +435,11 @@ class AsyncWorkflowPipeline:
299
435
  stage="refurb",
300
436
  )
301
437
 
302
- # Fallback to generic parsing if regex fails
303
438
  return self._create_generic_issue(
304
439
  issue, IssueType.FORMATTING, Priority.MEDIUM, "refurb"
305
440
  )
306
441
 
307
442
  def _parse_single_test_issue(self, issue: str) -> Issue:
308
- """Parse a single test issue into structured format."""
309
443
  import uuid
310
444
 
311
445
  from crackerjack.agents.base import Issue, IssueType, Priority
@@ -327,7 +461,6 @@ class AsyncWorkflowPipeline:
327
461
  def _create_generic_issue(
328
462
  self, issue: str, issue_type: IssueType, priority: Priority, stage: str
329
463
  ) -> Issue:
330
- """Create a generic Issue object with standard fields."""
331
464
  import uuid
332
465
 
333
466
  from crackerjack.agents.base import Issue
@@ -342,7 +475,6 @@ class AsyncWorkflowPipeline:
342
475
  )
343
476
 
344
477
  async def _run_final_workflow_phases(self, options: OptionsProtocol) -> bool:
345
- """Run the final publishing and commit phases."""
346
478
  if not self.phases.run_publishing_phase(options):
347
479
  self.session.fail_task("workflow", "Publishing failed")
348
480
  return False
@@ -354,51 +486,45 @@ class AsyncWorkflowPipeline:
354
486
  return True
355
487
 
356
488
  async def _run_fast_hooks_with_retry_async(self, options: OptionsProtocol) -> bool:
357
- """Run fast hooks with one retry if they fail."""
358
- success = await self._run_fast_hooks_async(options)
359
- if not success:
360
- self.console.print("⚠️ Fast hooks failed, retrying once...")
361
- success = await self._run_fast_hooks_async(options)
362
- return success
489
+ return await asyncio.to_thread(self.phases.run_fast_hooks_only, options)
363
490
 
364
491
  async def _collect_test_issues_async(self, options: OptionsProtocol) -> list[str]:
365
- """Collect all test failures without stopping on first failure."""
366
492
  if not options.test:
367
493
  return []
368
494
 
369
495
  try:
370
- success = await self._run_testing_phase_async(options)
496
+ success = await self.timeout_manager.with_timeout(
497
+ "test_execution",
498
+ self._run_testing_phase_async(options),
499
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
500
+ )
371
501
  if success:
372
502
  return []
373
503
  else:
374
- # Get specific test failure details from test manager
375
504
  test_failures = self.phases.test_manager.get_test_failures()
376
505
  if test_failures:
377
506
  return [f"Test failure: {failure}" for failure in test_failures]
378
507
  else:
379
- # Fallback if no specific failures captured
380
- return ["Test failures detected - see logs for details"]
508
+ return ["Test failures detected-see logs for details"]
381
509
  except Exception as e:
382
510
  return [f"Test execution error: {e}"]
383
511
 
384
512
  async def _collect_comprehensive_hook_issues_async(
385
513
  self, options: OptionsProtocol
386
514
  ) -> list[str]:
387
- """Collect all comprehensive hook issues without stopping on first failure."""
388
515
  try:
389
- # Run hooks and capture detailed results
390
- hook_results = await asyncio.to_thread(
391
- self.phases.hook_manager.run_comprehensive_hooks,
516
+ hook_results = await self.timeout_manager.with_timeout(
517
+ "comprehensive_hooks",
518
+ asyncio.to_thread(self.phases.hook_manager.run_comprehensive_hooks),
519
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
392
520
  )
393
521
 
394
- # Extract specific issues from failed hooks
395
522
  all_issues = []
396
523
  for result in hook_results:
397
524
  if (
398
525
  result.status in ("failed", "error", "timeout")
399
526
  and result.issues_found
400
527
  ):
401
- # Add hook context to each issue for better AI agent understanding
402
528
  hook_context = f"{result.name}: "
403
529
  for issue in result.issues_found:
404
530
  all_issues.append(hook_context + issue)
@@ -415,7 +541,6 @@ class AsyncWorkflowPipeline:
415
541
  hook_issues: list[str],
416
542
  iteration: int,
417
543
  ) -> bool:
418
- """Apply AI fixes for all collected issues in batch using AgentCoordinator."""
419
544
  all_issues = test_issues + hook_issues
420
545
  if not all_issues:
421
546
  return True
@@ -425,8 +550,10 @@ class AsyncWorkflowPipeline:
425
550
  )
426
551
 
427
552
  try:
428
- return await self._execute_ai_fix_workflow(
429
- test_issues, hook_issues, iteration
553
+ return await self.timeout_manager.with_timeout(
554
+ "ai_agent_processing",
555
+ self._execute_ai_fix_workflow(test_issues, hook_issues, iteration),
556
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
430
557
  )
431
558
  except Exception as e:
432
559
  return self._handle_ai_fix_error(e)
@@ -434,7 +561,6 @@ class AsyncWorkflowPipeline:
434
561
  async def _execute_ai_fix_workflow(
435
562
  self, test_issues: list[str], hook_issues: list[str], iteration: int
436
563
  ) -> bool:
437
- """Execute the AI fix workflow and return success status."""
438
564
  structured_issues = self._parse_issues_for_agents(test_issues, hook_issues)
439
565
 
440
566
  if not structured_issues:
@@ -442,13 +568,18 @@ class AsyncWorkflowPipeline:
442
568
  return True
443
569
 
444
570
  coordinator = self._create_agent_coordinator()
445
- fix_result = await coordinator.handle_issues(structured_issues)
571
+
572
+ # Apply timeout to AI coordinator processing
573
+ fix_result = await self.timeout_manager.with_timeout(
574
+ "ai_agent_processing",
575
+ coordinator.handle_issues(structured_issues),
576
+ strategy=TimeoutStrategy.GRACEFUL_DEGRADATION,
577
+ )
446
578
 
447
579
  self._report_fix_results(fix_result, iteration)
448
580
  return fix_result.success
449
581
 
450
582
  def _create_agent_coordinator(self):
451
- """Create and configure the AI agent coordinator."""
452
583
  from crackerjack.agents.base import AgentContext
453
584
  from crackerjack.agents.coordinator import AgentCoordinator
454
585
 
@@ -456,27 +587,23 @@ class AsyncWorkflowPipeline:
456
587
  return AgentCoordinator(context)
457
588
 
458
589
  def _report_fix_results(self, fix_result: FixResult, iteration: int) -> None:
459
- """Report the results of AI fix attempts to the console."""
460
590
  if fix_result.success:
461
591
  self._report_successful_fixes(fix_result, iteration)
462
592
  else:
463
593
  self._report_failed_fixes(fix_result, iteration)
464
594
 
465
595
  def _report_successful_fixes(self, fix_result: FixResult, iteration: int) -> None:
466
- """Report successful AI fixes to the console."""
467
596
  self.console.print(f"✅ AI fixes applied successfully in iteration {iteration}")
468
597
  if fix_result.fixes_applied:
469
- self.console.print(f" Applied {len(fix_result.fixes_applied)} fixes")
598
+ self.console.print(f" Applied {len(fix_result.fixes_applied)} fixes")
470
599
 
471
600
  def _report_failed_fixes(self, fix_result: FixResult, iteration: int) -> None:
472
- """Report failed AI fixes to the console."""
473
601
  self.console.print(f"⚠️ Some AI fixes failed in iteration {iteration}")
474
602
  if fix_result.remaining_issues:
475
- for error in fix_result.remaining_issues[:3]: # Show first 3 errors
476
- self.console.print(f" Error: {error}")
603
+ for error in fix_result.remaining_issues[:3]:
604
+ self.console.print(f" Error: {error}")
477
605
 
478
606
  def _handle_ai_fix_error(self, error: Exception) -> bool:
479
- """Handle errors during AI fix execution."""
480
607
  self.logger.error(f"AI fixing failed: {error}")
481
608
  self.console.print(f"❌ AI agent system error: {error}")
482
609
  return False
@@ -489,13 +616,21 @@ class AsyncWorkflowOrchestrator:
489
616
  pkg_path: Path | None = None,
490
617
  dry_run: bool = False,
491
618
  web_job_id: str | None = None,
619
+ verbose: bool = False,
620
+ debug: bool = False,
492
621
  ) -> None:
493
622
  self.console = console or Console(force_terminal=True)
494
623
  self.pkg_path = pkg_path or Path.cwd()
495
624
  self.dry_run = dry_run
496
625
  self.web_job_id = web_job_id
626
+ self.verbose = verbose
627
+ self.debug = debug
628
+
629
+ # Initialize logging first so container creation respects log levels
630
+ self._initialize_logging()
497
631
 
498
632
  from crackerjack.models.protocols import (
633
+ ConfigMergeServiceProtocol,
499
634
  FileSystemInterface,
500
635
  GitInterface,
501
636
  HookManager,
@@ -503,9 +638,9 @@ class AsyncWorkflowOrchestrator:
503
638
  TestManagerProtocol,
504
639
  )
505
640
 
506
- from .container import create_container
641
+ from .enhanced_container import create_enhanced_container
507
642
 
508
- self.container = create_container(
643
+ self.container = create_enhanced_container(
509
644
  console=self.console,
510
645
  pkg_path=self.pkg_path,
511
646
  dry_run=self.dry_run,
@@ -521,6 +656,7 @@ class AsyncWorkflowOrchestrator:
521
656
  hook_manager=self.container.get(HookManager),
522
657
  test_manager=self.container.get(TestManagerProtocol),
523
658
  publish_manager=self.container.get(PublishManager),
659
+ config_merge_service=self.container.get(ConfigMergeServiceProtocol),
524
660
  )
525
661
 
526
662
  self.async_pipeline = AsyncWorkflowPipeline(
@@ -532,6 +668,20 @@ class AsyncWorkflowOrchestrator:
532
668
 
533
669
  self.logger = logging.getLogger("crackerjack.async_orchestrator")
534
670
 
671
+ def _initialize_logging(self) -> None:
672
+ from crackerjack.services.log_manager import get_log_manager
673
+ from crackerjack.services.logging import setup_structured_logging
674
+
675
+ log_manager = get_log_manager()
676
+ session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
677
+ debug_log_file = log_manager.create_debug_log_file(session_id)
678
+
679
+ # Set log level based on verbosity - DEBUG only in verbose or debug mode
680
+ log_level = "DEBUG" if (self.verbose or self.debug) else "INFO"
681
+ setup_structured_logging(
682
+ level=log_level, json_output=False, log_file=debug_log_file
683
+ )
684
+
535
685
  async def run_complete_workflow_async(self, options: OptionsProtocol) -> bool:
536
686
  return await self.async_pipeline.run_complete_workflow_async(options)
537
687