crackerjack 0.32.0__py3-none-any.whl → 0.33.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (200) hide show
  1. crackerjack/__main__.py +1350 -34
  2. crackerjack/adapters/__init__.py +17 -0
  3. crackerjack/adapters/lsp_client.py +358 -0
  4. crackerjack/adapters/rust_tool_adapter.py +194 -0
  5. crackerjack/adapters/rust_tool_manager.py +193 -0
  6. crackerjack/adapters/skylos_adapter.py +231 -0
  7. crackerjack/adapters/zuban_adapter.py +560 -0
  8. crackerjack/agents/base.py +7 -3
  9. crackerjack/agents/coordinator.py +271 -33
  10. crackerjack/agents/documentation_agent.py +9 -15
  11. crackerjack/agents/dry_agent.py +3 -15
  12. crackerjack/agents/formatting_agent.py +1 -1
  13. crackerjack/agents/import_optimization_agent.py +36 -180
  14. crackerjack/agents/performance_agent.py +17 -98
  15. crackerjack/agents/performance_helpers.py +7 -31
  16. crackerjack/agents/proactive_agent.py +1 -3
  17. crackerjack/agents/refactoring_agent.py +16 -85
  18. crackerjack/agents/refactoring_helpers.py +7 -42
  19. crackerjack/agents/security_agent.py +9 -48
  20. crackerjack/agents/test_creation_agent.py +356 -513
  21. crackerjack/agents/test_specialist_agent.py +0 -4
  22. crackerjack/api.py +6 -25
  23. crackerjack/cli/cache_handlers.py +204 -0
  24. crackerjack/cli/cache_handlers_enhanced.py +683 -0
  25. crackerjack/cli/facade.py +100 -0
  26. crackerjack/cli/handlers.py +224 -9
  27. crackerjack/cli/interactive.py +6 -4
  28. crackerjack/cli/options.py +642 -55
  29. crackerjack/cli/utils.py +2 -1
  30. crackerjack/code_cleaner.py +58 -117
  31. crackerjack/config/global_lock_config.py +8 -48
  32. crackerjack/config/hooks.py +53 -62
  33. crackerjack/core/async_workflow_orchestrator.py +24 -34
  34. crackerjack/core/autofix_coordinator.py +3 -17
  35. crackerjack/core/enhanced_container.py +64 -6
  36. crackerjack/core/file_lifecycle.py +12 -89
  37. crackerjack/core/performance.py +2 -2
  38. crackerjack/core/performance_monitor.py +15 -55
  39. crackerjack/core/phase_coordinator.py +257 -218
  40. crackerjack/core/resource_manager.py +14 -90
  41. crackerjack/core/service_watchdog.py +62 -95
  42. crackerjack/core/session_coordinator.py +149 -0
  43. crackerjack/core/timeout_manager.py +14 -72
  44. crackerjack/core/websocket_lifecycle.py +13 -78
  45. crackerjack/core/workflow_orchestrator.py +558 -240
  46. crackerjack/docs/INDEX.md +11 -0
  47. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  48. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  49. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  50. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  51. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  52. crackerjack/documentation/__init__.py +31 -0
  53. crackerjack/documentation/ai_templates.py +756 -0
  54. crackerjack/documentation/dual_output_generator.py +765 -0
  55. crackerjack/documentation/mkdocs_integration.py +518 -0
  56. crackerjack/documentation/reference_generator.py +977 -0
  57. crackerjack/dynamic_config.py +55 -50
  58. crackerjack/executors/async_hook_executor.py +10 -15
  59. crackerjack/executors/cached_hook_executor.py +117 -43
  60. crackerjack/executors/hook_executor.py +8 -34
  61. crackerjack/executors/hook_lock_manager.py +26 -183
  62. crackerjack/executors/individual_hook_executor.py +13 -11
  63. crackerjack/executors/lsp_aware_hook_executor.py +270 -0
  64. crackerjack/executors/tool_proxy.py +417 -0
  65. crackerjack/hooks/lsp_hook.py +79 -0
  66. crackerjack/intelligence/adaptive_learning.py +25 -10
  67. crackerjack/intelligence/agent_orchestrator.py +2 -5
  68. crackerjack/intelligence/agent_registry.py +34 -24
  69. crackerjack/intelligence/agent_selector.py +5 -7
  70. crackerjack/interactive.py +17 -6
  71. crackerjack/managers/async_hook_manager.py +0 -1
  72. crackerjack/managers/hook_manager.py +79 -1
  73. crackerjack/managers/publish_manager.py +66 -13
  74. crackerjack/managers/test_command_builder.py +5 -17
  75. crackerjack/managers/test_executor.py +1 -3
  76. crackerjack/managers/test_manager.py +109 -7
  77. crackerjack/managers/test_manager_backup.py +10 -9
  78. crackerjack/mcp/cache.py +2 -2
  79. crackerjack/mcp/client_runner.py +1 -1
  80. crackerjack/mcp/context.py +191 -68
  81. crackerjack/mcp/dashboard.py +7 -5
  82. crackerjack/mcp/enhanced_progress_monitor.py +31 -28
  83. crackerjack/mcp/file_monitor.py +30 -23
  84. crackerjack/mcp/progress_components.py +31 -21
  85. crackerjack/mcp/progress_monitor.py +50 -53
  86. crackerjack/mcp/rate_limiter.py +6 -6
  87. crackerjack/mcp/server_core.py +161 -32
  88. crackerjack/mcp/service_watchdog.py +2 -1
  89. crackerjack/mcp/state.py +4 -7
  90. crackerjack/mcp/task_manager.py +11 -9
  91. crackerjack/mcp/tools/core_tools.py +174 -33
  92. crackerjack/mcp/tools/error_analyzer.py +3 -2
  93. crackerjack/mcp/tools/execution_tools.py +15 -12
  94. crackerjack/mcp/tools/execution_tools_backup.py +42 -30
  95. crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
  96. crackerjack/mcp/tools/intelligence_tools.py +5 -2
  97. crackerjack/mcp/tools/monitoring_tools.py +33 -70
  98. crackerjack/mcp/tools/proactive_tools.py +24 -11
  99. crackerjack/mcp/tools/progress_tools.py +5 -8
  100. crackerjack/mcp/tools/utility_tools.py +20 -14
  101. crackerjack/mcp/tools/workflow_executor.py +62 -40
  102. crackerjack/mcp/websocket/app.py +8 -0
  103. crackerjack/mcp/websocket/endpoints.py +352 -357
  104. crackerjack/mcp/websocket/jobs.py +40 -57
  105. crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
  106. crackerjack/mcp/websocket/server.py +7 -25
  107. crackerjack/mcp/websocket/websocket_handler.py +6 -17
  108. crackerjack/mixins/__init__.py +3 -0
  109. crackerjack/mixins/error_handling.py +145 -0
  110. crackerjack/models/config.py +21 -1
  111. crackerjack/models/config_adapter.py +49 -1
  112. crackerjack/models/protocols.py +176 -107
  113. crackerjack/models/resource_protocols.py +55 -210
  114. crackerjack/models/task.py +3 -0
  115. crackerjack/monitoring/ai_agent_watchdog.py +13 -13
  116. crackerjack/monitoring/metrics_collector.py +426 -0
  117. crackerjack/monitoring/regression_prevention.py +8 -8
  118. crackerjack/monitoring/websocket_server.py +643 -0
  119. crackerjack/orchestration/advanced_orchestrator.py +11 -6
  120. crackerjack/orchestration/coverage_improvement.py +3 -3
  121. crackerjack/orchestration/execution_strategies.py +26 -6
  122. crackerjack/orchestration/test_progress_streamer.py +8 -5
  123. crackerjack/plugins/base.py +2 -2
  124. crackerjack/plugins/hooks.py +7 -0
  125. crackerjack/plugins/managers.py +11 -8
  126. crackerjack/security/__init__.py +0 -1
  127. crackerjack/security/audit.py +90 -105
  128. crackerjack/services/anomaly_detector.py +392 -0
  129. crackerjack/services/api_extractor.py +615 -0
  130. crackerjack/services/backup_service.py +2 -2
  131. crackerjack/services/bounded_status_operations.py +15 -152
  132. crackerjack/services/cache.py +127 -1
  133. crackerjack/services/changelog_automation.py +395 -0
  134. crackerjack/services/config.py +18 -11
  135. crackerjack/services/config_merge.py +30 -85
  136. crackerjack/services/config_template.py +506 -0
  137. crackerjack/services/contextual_ai_assistant.py +48 -22
  138. crackerjack/services/coverage_badge_service.py +171 -0
  139. crackerjack/services/coverage_ratchet.py +41 -17
  140. crackerjack/services/debug.py +3 -3
  141. crackerjack/services/dependency_analyzer.py +460 -0
  142. crackerjack/services/dependency_monitor.py +14 -11
  143. crackerjack/services/documentation_generator.py +491 -0
  144. crackerjack/services/documentation_service.py +675 -0
  145. crackerjack/services/enhanced_filesystem.py +6 -5
  146. crackerjack/services/enterprise_optimizer.py +865 -0
  147. crackerjack/services/error_pattern_analyzer.py +676 -0
  148. crackerjack/services/file_hasher.py +1 -1
  149. crackerjack/services/git.py +41 -45
  150. crackerjack/services/health_metrics.py +10 -8
  151. crackerjack/services/heatmap_generator.py +735 -0
  152. crackerjack/services/initialization.py +30 -33
  153. crackerjack/services/input_validator.py +5 -97
  154. crackerjack/services/intelligent_commit.py +327 -0
  155. crackerjack/services/log_manager.py +15 -12
  156. crackerjack/services/logging.py +4 -3
  157. crackerjack/services/lsp_client.py +628 -0
  158. crackerjack/services/memory_optimizer.py +409 -0
  159. crackerjack/services/metrics.py +42 -33
  160. crackerjack/services/parallel_executor.py +416 -0
  161. crackerjack/services/pattern_cache.py +1 -1
  162. crackerjack/services/pattern_detector.py +6 -6
  163. crackerjack/services/performance_benchmarks.py +250 -576
  164. crackerjack/services/performance_cache.py +382 -0
  165. crackerjack/services/performance_monitor.py +565 -0
  166. crackerjack/services/predictive_analytics.py +510 -0
  167. crackerjack/services/quality_baseline.py +234 -0
  168. crackerjack/services/quality_baseline_enhanced.py +646 -0
  169. crackerjack/services/quality_intelligence.py +785 -0
  170. crackerjack/services/regex_patterns.py +605 -524
  171. crackerjack/services/regex_utils.py +43 -123
  172. crackerjack/services/secure_path_utils.py +5 -164
  173. crackerjack/services/secure_status_formatter.py +30 -141
  174. crackerjack/services/secure_subprocess.py +11 -92
  175. crackerjack/services/security.py +61 -30
  176. crackerjack/services/security_logger.py +18 -22
  177. crackerjack/services/server_manager.py +124 -16
  178. crackerjack/services/status_authentication.py +16 -159
  179. crackerjack/services/status_security_manager.py +4 -131
  180. crackerjack/services/terminal_utils.py +0 -0
  181. crackerjack/services/thread_safe_status_collector.py +19 -125
  182. crackerjack/services/unified_config.py +21 -13
  183. crackerjack/services/validation_rate_limiter.py +5 -54
  184. crackerjack/services/version_analyzer.py +459 -0
  185. crackerjack/services/version_checker.py +1 -1
  186. crackerjack/services/websocket_resource_limiter.py +10 -144
  187. crackerjack/services/zuban_lsp_service.py +390 -0
  188. crackerjack/slash_commands/__init__.py +2 -7
  189. crackerjack/slash_commands/run.md +2 -2
  190. crackerjack/tools/validate_input_validator_patterns.py +14 -40
  191. crackerjack/tools/validate_regex_patterns.py +19 -48
  192. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/METADATA +197 -26
  193. crackerjack-0.33.1.dist-info/RECORD +229 -0
  194. crackerjack/CLAUDE.md +0 -207
  195. crackerjack/RULES.md +0 -380
  196. crackerjack/py313.py +0 -234
  197. crackerjack-0.32.0.dist-info/RECORD +0 -180
  198. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/WHEEL +0 -0
  199. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/entry_points.txt +0 -0
  200. {crackerjack-0.32.0.dist-info → crackerjack-0.33.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,3 +1,4 @@
1
+ import asyncio
1
2
  import time
2
3
  import typing as t
3
4
  from pathlib import Path
@@ -7,12 +8,22 @@ from rich.console import Console
7
8
  from crackerjack.agents.base import AgentContext, Issue, IssueType, Priority
8
9
  from crackerjack.agents.coordinator import AgentCoordinator
9
10
  from crackerjack.models.protocols import OptionsProtocol
10
- from crackerjack.services.debug import get_ai_agent_debugger
11
+ from crackerjack.services.debug import (
12
+ AIAgentDebugger,
13
+ NoOpDebugger,
14
+ get_ai_agent_debugger,
15
+ )
11
16
  from crackerjack.services.logging import (
12
17
  LoggingContext,
13
18
  get_logger,
14
19
  setup_structured_logging,
15
20
  )
21
+ from crackerjack.services.memory_optimizer import get_memory_optimizer, memory_optimized
22
+ from crackerjack.services.performance_cache import get_performance_cache
23
+ from crackerjack.services.performance_monitor import (
24
+ get_performance_monitor,
25
+ phase_monitor,
26
+ )
16
27
 
17
28
  from .phase_coordinator import PhaseCoordinator
18
29
  from .session_coordinator import SessionCoordinator
@@ -40,13 +51,17 @@ class WorkflowPipeline:
40
51
  self.session = session
41
52
  self.phases = phases
42
53
  self._mcp_state_manager: t.Any = None
43
- self._last_security_audit: t.Any = None # Store security audit report
54
+ self._last_security_audit: t.Any = None
44
55
 
45
56
  self.logger = get_logger("crackerjack.pipeline")
46
- self._debugger = None
57
+ self._debugger: AIAgentDebugger | NoOpDebugger | None = None
58
+
59
+ self._performance_monitor = get_performance_monitor()
60
+ self._memory_optimizer = get_memory_optimizer()
61
+ self._cache = get_performance_cache()
47
62
 
48
63
  @property
49
- def debugger(self):
64
+ def debugger(self) -> AIAgentDebugger | NoOpDebugger:
50
65
  if self._debugger is None:
51
66
  self._debugger = get_ai_agent_debugger()
52
67
  return self._debugger
@@ -56,7 +71,14 @@ class WorkflowPipeline:
56
71
 
57
72
  return os.environ.get("AI_AGENT_DEBUG", "0") == "1"
58
73
 
74
+ @memory_optimized
59
75
  async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
76
+ workflow_id = f"workflow_{int(time.time())}"
77
+
78
+ self._performance_monitor.start_workflow(workflow_id)
79
+
80
+ await self._cache.start()
81
+
60
82
  with LoggingContext(
61
83
  "workflow_execution",
62
84
  testing=getattr(options, "test", False),
@@ -66,24 +88,43 @@ class WorkflowPipeline:
66
88
  self._initialize_workflow_session(options)
67
89
 
68
90
  try:
69
- success = await self._execute_workflow_with_timing(options, start_time)
91
+ success = await self._execute_workflow_with_timing(
92
+ options, start_time, workflow_id
93
+ )
94
+
95
+ workflow_perf = self._performance_monitor.end_workflow(
96
+ workflow_id, success
97
+ )
98
+ self.logger.info(
99
+ f"Workflow performance: {workflow_perf.performance_score: .1f} score, "
100
+ f"{workflow_perf.total_duration_seconds: .2f}s duration"
101
+ )
102
+
70
103
  return success
71
104
 
72
105
  except KeyboardInterrupt:
106
+ self._performance_monitor.end_workflow(workflow_id, False)
73
107
  return self._handle_user_interruption()
74
108
 
75
109
  except Exception as e:
110
+ self._performance_monitor.end_workflow(workflow_id, False)
76
111
  return self._handle_workflow_exception(e)
77
112
 
78
113
  finally:
79
114
  self.session.cleanup_resources()
80
115
 
116
+ self._memory_optimizer.optimize_memory()
117
+ await self._cache.stop()
118
+
81
119
  def _initialize_workflow_session(self, options: OptionsProtocol) -> None:
82
120
  self.session.initialize_session_tracking(options)
83
121
  self.session.track_task("workflow", "Complete crackerjack workflow")
84
122
 
85
123
  self._log_workflow_startup_debug(options)
86
124
  self._configure_session_cleanup(options)
125
+ self._initialize_zuban_lsp(options)
126
+ self._configure_hook_manager_lsp(options)
127
+ self._register_lsp_cleanup_handler(options)
87
128
  self._log_workflow_startup_info(options)
88
129
 
89
130
  def _log_workflow_startup_debug(self, options: OptionsProtocol) -> None:
@@ -104,6 +145,138 @@ class WorkflowPipeline:
104
145
  if hasattr(options, "cleanup"):
105
146
  self.session.set_cleanup_config(options.cleanup)
106
147
 
148
+ def _initialize_zuban_lsp(self, options: OptionsProtocol) -> None:
149
+ """Initialize Zuban LSP server if not disabled."""
150
+ # Check if LSP is disabled via CLI flag or configuration
151
+ if getattr(options, "no_zuban_lsp", False):
152
+ self.logger.debug("Zuban LSP server disabled by --no-zuban-lsp flag")
153
+ return
154
+
155
+ # Get configuration from options (will use config system if available)
156
+ config = getattr(options, "zuban_lsp", None)
157
+ if config and not config.enabled:
158
+ self.logger.debug("Zuban LSP server disabled in configuration")
159
+ return
160
+
161
+ if config and not config.auto_start:
162
+ self.logger.debug("Zuban LSP server auto-start disabled in configuration")
163
+ return
164
+
165
+ # Check if LSP server is already running to avoid duplicates
166
+ from crackerjack.services.server_manager import find_zuban_lsp_processes
167
+
168
+ existing_processes = find_zuban_lsp_processes()
169
+ if existing_processes:
170
+ self.logger.debug(
171
+ f"Zuban LSP server already running (PID: {existing_processes[0]['pid']})"
172
+ )
173
+ return
174
+
175
+ # Auto-start LSP server in background
176
+ try:
177
+ import subprocess
178
+ import sys
179
+
180
+ # Use configuration values if available, otherwise fallback to CLI options
181
+ if config:
182
+ zuban_lsp_port = config.port
183
+ zuban_lsp_mode = config.mode
184
+ else:
185
+ zuban_lsp_port = getattr(options, "zuban_lsp_port", 8677)
186
+ zuban_lsp_mode = getattr(options, "zuban_lsp_mode", "stdio")
187
+
188
+ cmd = [
189
+ sys.executable,
190
+ "-m",
191
+ "crackerjack",
192
+ "--start-zuban-lsp",
193
+ "--zuban-lsp-port",
194
+ str(zuban_lsp_port),
195
+ "--zuban-lsp-mode",
196
+ zuban_lsp_mode,
197
+ ]
198
+
199
+ subprocess.Popen(
200
+ cmd,
201
+ stdout=subprocess.DEVNULL,
202
+ stderr=subprocess.DEVNULL,
203
+ start_new_session=True,
204
+ )
205
+
206
+ self.logger.info(
207
+ f"Auto-started Zuban LSP server on port {zuban_lsp_port} ({zuban_lsp_mode} mode)"
208
+ )
209
+
210
+ except Exception as e:
211
+ self.logger.warning(f"Failed to auto-start Zuban LSP server: {e}")
212
+
213
+ def _log_zuban_lsp_status(self) -> None:
214
+ """Display current Zuban LSP server status during workflow startup."""
215
+ from crackerjack.services.server_manager import find_zuban_lsp_processes
216
+
217
+ try:
218
+ lsp_processes = find_zuban_lsp_processes()
219
+
220
+ if lsp_processes:
221
+ proc = lsp_processes[0] # Show first running process
222
+ self.logger.info(
223
+ f"🔍 Zuban LSP server running (PID: {proc['pid']}, "
224
+ f"CPU: {proc['cpu']}%, Memory: {proc['mem']}%)"
225
+ )
226
+ else:
227
+ self.logger.info("🔍 Zuban LSP server not running")
228
+
229
+ except Exception as e:
230
+ self.logger.debug(f"Failed to check Zuban LSP status: {e}")
231
+
232
+ def _configure_hook_manager_lsp(self, options: OptionsProtocol) -> None:
233
+ """Configure hook manager with LSP optimization settings."""
234
+ # Check if LSP hooks are enabled
235
+ enable_lsp_hooks = getattr(options, "enable_lsp_hooks", False)
236
+
237
+ # Configure the hook manager
238
+ hook_manager = self.phases.hook_manager
239
+ if hasattr(hook_manager, "configure_lsp_optimization"):
240
+ hook_manager.configure_lsp_optimization(enable_lsp_hooks)
241
+
242
+ if enable_lsp_hooks and not getattr(options, "no_zuban_lsp", False):
243
+ self.console.print(
244
+ "🔍 LSP-optimized hook execution enabled for faster type checking",
245
+ style="blue",
246
+ )
247
+
248
+ def _register_lsp_cleanup_handler(self, options: OptionsProtocol) -> None:
249
+ """Register cleanup handler to stop LSP server when workflow completes."""
250
+ # Get configuration to check if we should handle LSP cleanup
251
+ config = getattr(options, "zuban_lsp", None)
252
+ if config and not config.enabled:
253
+ return
254
+
255
+ if getattr(options, "no_zuban_lsp", False):
256
+ return
257
+
258
+ def cleanup_lsp_server() -> None:
259
+ """Cleanup function to gracefully stop LSP server if it was auto-started."""
260
+ try:
261
+ from crackerjack.services.server_manager import (
262
+ find_zuban_lsp_processes,
263
+ stop_process,
264
+ )
265
+
266
+ lsp_processes = find_zuban_lsp_processes()
267
+ if lsp_processes:
268
+ for proc in lsp_processes:
269
+ self.logger.debug(
270
+ f"Stopping auto-started Zuban LSP server (PID: {proc['pid']})"
271
+ )
272
+ stop_process(proc["pid"])
273
+
274
+ except Exception as e:
275
+ self.logger.debug(f"Error during LSP cleanup: {e}")
276
+
277
+ # Register the cleanup handler with the session
278
+ self.session.register_cleanup(cleanup_lsp_server)
279
+
107
280
  def _log_workflow_startup_info(self, options: OptionsProtocol) -> None:
108
281
  self.logger.info(
109
282
  "Starting complete workflow execution",
@@ -112,10 +285,13 @@ class WorkflowPipeline:
112
285
  package_path=str(self.pkg_path),
113
286
  )
114
287
 
288
+ # Display Zuban LSP server status
289
+ self._log_zuban_lsp_status()
290
+
115
291
  async def _execute_workflow_with_timing(
116
- self, options: OptionsProtocol, start_time: float
292
+ self, options: OptionsProtocol, start_time: float, workflow_id: str
117
293
  ) -> bool:
118
- success = await self._execute_workflow_phases(options)
294
+ success = await self._execute_workflow_phases(options, workflow_id)
119
295
  self.session.finalize_session(start_time, success)
120
296
 
121
297
  duration = time.time() - start_time
@@ -161,62 +337,127 @@ class WorkflowPipeline:
161
337
  )
162
338
  return False
163
339
 
164
- async def _execute_workflow_phases(self, options: OptionsProtocol) -> bool:
340
+ async def _execute_workflow_phases(
341
+ self, options: OptionsProtocol, workflow_id: str
342
+ ) -> bool:
165
343
  success = True
166
- self.phases.run_configuration_phase(options)
167
344
 
168
- # Code cleaning is now integrated into the quality phase
169
- # to run after fast hooks but before comprehensive hooks
170
- quality_success = await self._execute_quality_phase(options)
345
+ with phase_monitor(workflow_id, "configuration"):
346
+ config_success = self.phases.run_configuration_phase(options)
347
+ success = success and config_success
348
+
349
+ quality_success = await self._execute_quality_phase(options, workflow_id)
171
350
  if not quality_success:
172
351
  success = False
173
- # Don't return early - continue to publishing/commit phases if requested
174
- # This allows -p and -c flags to work even when quality checks fail
175
- if not (options.publish or options.all or options.commit):
176
- # Only exit early if no publishing/commit operations are requested
352
+
353
+ if self._is_publishing_workflow(options):
177
354
  return False
178
355
 
179
- if not self.phases.run_publishing_phase(options):
356
+ if not await self._execute_publishing_workflow(options, workflow_id):
180
357
  success = False
181
- self.session.fail_task("workflow", "Publishing failed")
182
358
  return False
183
- if not self.phases.run_commit_phase(options):
359
+
360
+ if not await self._execute_commit_workflow(options, workflow_id):
184
361
  success = False
185
362
 
186
363
  return success
187
364
 
188
- async def _execute_quality_phase(self, options: OptionsProtocol) -> bool:
365
+ def _is_publishing_workflow(self, options: OptionsProtocol) -> bool:
366
+ return bool(options.publish or options.all or options.commit)
367
+
368
+ async def _execute_publishing_workflow(
369
+ self, options: OptionsProtocol, workflow_id: str
370
+ ) -> bool:
371
+ if not options.publish and not options.all:
372
+ return True
373
+
374
+ with phase_monitor(workflow_id, "publishing"):
375
+ if not self.phases.run_publishing_phase(options):
376
+ self.session.fail_task("workflow", "Publishing failed")
377
+ return False
378
+ return True
379
+
380
+ async def _execute_commit_workflow(
381
+ self, options: OptionsProtocol, workflow_id: str
382
+ ) -> bool:
383
+ if not options.commit:
384
+ return True
385
+
386
+ with phase_monitor(workflow_id, "commit"):
387
+ if not self.phases.run_commit_phase(options):
388
+ return False
389
+ return True
390
+
391
+ async def _execute_quality_phase(
392
+ self, options: OptionsProtocol, workflow_id: str
393
+ ) -> bool:
189
394
  if hasattr(options, "fast") and options.fast:
190
- return self._run_fast_hooks_phase(options)
395
+ return await self._run_fast_hooks_phase_monitored(options, workflow_id)
191
396
  if hasattr(options, "comp") and options.comp:
192
- return self._run_comprehensive_hooks_phase(options)
397
+ return await self._run_comprehensive_hooks_phase_monitored(
398
+ options, workflow_id
399
+ )
193
400
  if getattr(options, "test", False):
194
- return await self._execute_test_workflow(options)
195
- return self._execute_standard_hooks_workflow(options)
401
+ return await self._execute_test_workflow(options, workflow_id)
402
+ return await self._execute_standard_hooks_workflow_monitored(
403
+ options, workflow_id
404
+ )
196
405
 
197
- async def _execute_test_workflow(self, options: OptionsProtocol) -> bool:
406
+ async def _execute_test_workflow(
407
+ self, options: OptionsProtocol, workflow_id: str
408
+ ) -> bool:
198
409
  iteration = self._start_iteration_tracking(options)
199
410
 
200
- if not self._run_initial_fast_hooks(options, iteration):
411
+ if not await self._execute_initial_phases(options, workflow_id, iteration):
201
412
  return False
202
413
 
203
- # Run code cleaning after fast hooks but before comprehensive hooks
204
- if getattr(options, "clean", False):
205
- if not self._run_code_cleaning_phase(options):
206
- return False
207
- # Run fast hooks again after cleaning for sanity check
208
- if not self._run_post_cleaning_fast_hooks(options):
414
+ (
415
+ testing_passed,
416
+ comprehensive_passed,
417
+ ) = await self._run_main_quality_phases_async(options, workflow_id)
418
+
419
+ return await self._handle_workflow_completion(
420
+ options, iteration, testing_passed, comprehensive_passed, workflow_id
421
+ )
422
+
423
+ async def _execute_initial_phases(
424
+ self, options: OptionsProtocol, workflow_id: str, iteration: int
425
+ ) -> bool:
426
+ with phase_monitor(workflow_id, "fast_hooks") as monitor:
427
+ if not await self._run_initial_fast_hooks_async(
428
+ options, iteration, monitor
429
+ ):
209
430
  return False
210
- self._mark_code_cleaning_complete()
211
431
 
212
- testing_passed, comprehensive_passed = self._run_main_quality_phases(options)
432
+ return self._execute_optional_cleaning_phase(options)
433
+
434
+ def _execute_optional_cleaning_phase(self, options: OptionsProtocol) -> bool:
435
+ if not getattr(options, "clean", False):
436
+ return True
437
+
438
+ if not self._run_code_cleaning_phase(options):
439
+ return False
440
+
441
+ if not self._run_post_cleaning_fast_hooks(options):
442
+ return False
443
+
444
+ self._mark_code_cleaning_complete()
445
+ return True
213
446
 
447
+ async def _handle_workflow_completion(
448
+ self,
449
+ options: OptionsProtocol,
450
+ iteration: int,
451
+ testing_passed: bool,
452
+ comprehensive_passed: bool,
453
+ workflow_id: str = "unknown",
454
+ ) -> bool:
214
455
  if options.ai_agent:
215
456
  return await self._handle_ai_agent_workflow(
216
- options, iteration, testing_passed, comprehensive_passed
457
+ options, iteration, testing_passed, comprehensive_passed, workflow_id
217
458
  )
218
459
 
219
- return self._handle_standard_workflow(
460
+ return await self._handle_standard_workflow(
220
461
  options, iteration, testing_passed, comprehensive_passed
221
462
  )
222
463
 
@@ -234,9 +475,36 @@ class WorkflowPipeline:
234
475
  return False
235
476
  return True
236
477
 
237
- def _run_main_quality_phases(self, options: OptionsProtocol) -> tuple[bool, bool]:
238
- testing_passed = self._run_testing_phase(options)
239
- comprehensive_passed = self._run_comprehensive_hooks_phase(options)
478
+ async def _run_main_quality_phases_async(
479
+ self, options: OptionsProtocol, workflow_id: str
480
+ ) -> tuple[bool, bool]:
481
+ testing_task = asyncio.create_task(
482
+ self._run_testing_phase_async(options, workflow_id)
483
+ )
484
+ comprehensive_task = asyncio.create_task(
485
+ self._run_comprehensive_hooks_phase_monitored(options, workflow_id)
486
+ )
487
+
488
+ results = await asyncio.gather(
489
+ testing_task, comprehensive_task, return_exceptions=True
490
+ )
491
+
492
+ testing_result, comprehensive_result = results
493
+
494
+ if isinstance(testing_result, Exception):
495
+ self.logger.error(f"Testing phase failed with exception: {testing_result}")
496
+ testing_passed = False
497
+ else:
498
+ testing_passed = bool(testing_result)
499
+
500
+ if isinstance(comprehensive_result, Exception):
501
+ self.logger.error(
502
+ f"Comprehensive hooks failed with exception: {comprehensive_result}"
503
+ )
504
+ comprehensive_passed = False
505
+ else:
506
+ comprehensive_passed = bool(comprehensive_result)
507
+
240
508
  return testing_passed, comprehensive_passed
241
509
 
242
510
  async def _handle_ai_agent_workflow(
@@ -245,77 +513,101 @@ class WorkflowPipeline:
245
513
  iteration: int,
246
514
  testing_passed: bool,
247
515
  comprehensive_passed: bool,
516
+ workflow_id: str = "unknown",
248
517
  ) -> bool:
249
- # Check security gates for publishing operations
518
+ if not await self._process_security_gates(options):
519
+ return False
520
+
521
+ needs_ai_fixing = self._determine_ai_fixing_needed(
522
+ testing_passed, comprehensive_passed, bool(options.publish or options.all)
523
+ )
524
+
525
+ if needs_ai_fixing:
526
+ return await self._execute_ai_fixing_workflow(options, iteration)
527
+
528
+ return self._finalize_ai_workflow_success(
529
+ options, iteration, testing_passed, comprehensive_passed
530
+ )
531
+
532
+ async def _process_security_gates(self, options: OptionsProtocol) -> bool:
250
533
  publishing_requested, security_blocks = (
251
534
  self._check_security_gates_for_publishing(options)
252
535
  )
253
536
 
254
- if publishing_requested and security_blocks:
255
- # Try AI fixing for security issues, then re-check
256
- security_fix_result = await self._handle_security_gate_failure(
257
- options, allow_ai_fixing=True
258
- )
259
- if not security_fix_result:
260
- return False
261
- # If AI fixing resolved security issues, continue with normal flow
537
+ if not (publishing_requested and security_blocks):
538
+ return True
262
539
 
263
- # Determine if we need AI fixing based on publishing requirements
264
- needs_ai_fixing = self._determine_ai_fixing_needed(
265
- testing_passed, comprehensive_passed, publishing_requested
540
+ security_fix_result = await self._handle_security_gate_failure(
541
+ options, allow_ai_fixing=True
266
542
  )
543
+ return security_fix_result
267
544
 
268
- if needs_ai_fixing:
269
- success = await self._run_ai_agent_fixing_phase(options)
270
- if self._should_debug():
271
- self.debugger.log_iteration_end(iteration, success)
272
- return success
545
+ async def _execute_ai_fixing_workflow(
546
+ self, options: OptionsProtocol, iteration: int
547
+ ) -> bool:
548
+ success = await self._run_ai_agent_fixing_phase(options)
549
+ if self._should_debug():
550
+ self.debugger.log_iteration_end(iteration, success)
551
+ return success
552
+
553
+ def _finalize_ai_workflow_success(
554
+ self,
555
+ options: OptionsProtocol,
556
+ iteration: int,
557
+ testing_passed: bool,
558
+ comprehensive_passed: bool,
559
+ ) -> bool:
560
+ publishing_requested = bool(options.publish or options.all)
273
561
 
274
- # Determine final success based on publishing requirements
275
562
  final_success = self._determine_workflow_success(
276
- testing_passed,
277
- comprehensive_passed,
278
- publishing_requested,
279
- workflow_type="ai",
563
+ testing_passed, comprehensive_passed, publishing_requested
280
564
  )
281
565
 
282
- # Show security audit warning for partial success in publishing workflows
283
- if (
284
- publishing_requested
285
- and final_success
286
- and not (testing_passed and comprehensive_passed)
287
- ):
288
- self._show_security_audit_warning()
566
+ self._show_partial_success_warning_if_needed(
567
+ publishing_requested, final_success, testing_passed, comprehensive_passed
568
+ )
289
569
 
290
570
  if self._should_debug():
291
571
  self.debugger.log_iteration_end(iteration, final_success)
572
+
292
573
  return final_success
293
574
 
294
- def _handle_standard_workflow(
575
+ def _show_partial_success_warning_if_needed(
576
+ self,
577
+ publishing_requested: bool,
578
+ final_success: bool,
579
+ testing_passed: bool,
580
+ comprehensive_passed: bool,
581
+ ) -> None:
582
+ should_show_warning = (
583
+ publishing_requested
584
+ and final_success
585
+ and not (testing_passed and comprehensive_passed)
586
+ )
587
+
588
+ if should_show_warning:
589
+ self._show_security_audit_warning()
590
+
591
+ async def _handle_standard_workflow(
295
592
  self,
296
593
  options: OptionsProtocol,
297
594
  iteration: int,
298
595
  testing_passed: bool,
299
596
  comprehensive_passed: bool,
300
597
  ) -> bool:
301
- # Check security gates for publishing operations
302
598
  publishing_requested, security_blocks = (
303
599
  self._check_security_gates_for_publishing(options)
304
600
  )
305
601
 
306
602
  if publishing_requested and security_blocks:
307
- # Standard workflow cannot bypass security gates
308
- return self._handle_security_gate_failure(options, allow_ai_fixing=False)
603
+ return await self._handle_security_gate_failure(options)
309
604
 
310
- # Determine success based on publishing requirements
311
605
  success = self._determine_workflow_success(
312
606
  testing_passed,
313
607
  comprehensive_passed,
314
608
  publishing_requested,
315
- workflow_type="standard",
316
609
  )
317
610
 
318
- # Show security audit warning for partial success in publishing workflows
319
611
  if (
320
612
  publishing_requested
321
613
  and success
@@ -324,10 +616,9 @@ class WorkflowPipeline:
324
616
  self._show_security_audit_warning()
325
617
  elif publishing_requested and not success:
326
618
  self.console.print(
327
- "[red]❌ Both tests and comprehensive hooks failed - cannot proceed to publishing[/red]"
619
+ "[red]❌ Quality checks failed - cannot proceed to publishing[/red]"
328
620
  )
329
621
 
330
- # Show verbose failure details if requested
331
622
  if not success and getattr(options, "verbose", False):
332
623
  self._show_verbose_failure_details(testing_passed, comprehensive_passed)
333
624
 
@@ -378,7 +669,6 @@ class WorkflowPipeline:
378
669
  self._mcp_state_manager.update_stage_status(stage, status)
379
670
 
380
671
  def _run_code_cleaning_phase(self, options: OptionsProtocol) -> bool:
381
- """Run code cleaning phase after fast hooks but before comprehensive hooks."""
382
672
  self.console.print("\n[bold blue]🧹 Running Code Cleaning Phase...[/bold blue]")
383
673
 
384
674
  success = self.phases.run_cleaning_phase(options)
@@ -391,7 +681,6 @@ class WorkflowPipeline:
391
681
  return success
392
682
 
393
683
  def _run_post_cleaning_fast_hooks(self, options: OptionsProtocol) -> bool:
394
- """Run fast hooks again after code cleaning for sanity check."""
395
684
  self.console.print(
396
685
  "\n[bold cyan]🔍 Running Post-Cleaning Fast Hooks Sanity Check...[/bold cyan]"
397
686
  )
@@ -406,11 +695,9 @@ class WorkflowPipeline:
406
695
  return success
407
696
 
408
697
  def _has_code_cleaning_run(self) -> bool:
409
- """Check if code cleaning has already run in this workflow."""
410
698
  return getattr(self, "_code_cleaning_complete", False)
411
699
 
412
700
  def _mark_code_cleaning_complete(self) -> None:
413
- """Mark code cleaning as complete for this workflow."""
414
701
  self._code_cleaning_complete = True
415
702
 
416
703
  def _handle_test_failures(self) -> None:
@@ -443,24 +730,21 @@ class WorkflowPipeline:
443
730
  def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
444
731
  self._update_hooks_status_running()
445
732
 
446
- # Run fast hooks first
447
733
  fast_hooks_success = self._run_fast_hooks_phase(options)
448
734
  if not fast_hooks_success:
449
735
  self._handle_hooks_completion(False)
450
736
  return False
451
737
 
452
- # Run code cleaning after fast hooks but before comprehensive hooks
453
738
  if getattr(options, "clean", False):
454
739
  if not self._run_code_cleaning_phase(options):
455
740
  self._handle_hooks_completion(False)
456
741
  return False
457
- # Run fast hooks again after cleaning for sanity check
742
+
458
743
  if not self._run_post_cleaning_fast_hooks(options):
459
744
  self._handle_hooks_completion(False)
460
745
  return False
461
746
  self._mark_code_cleaning_complete()
462
747
 
463
- # Run comprehensive hooks
464
748
  comprehensive_success = self._run_comprehensive_hooks_phase(options)
465
749
 
466
750
  hooks_success = fast_hooks_success and comprehensive_success
@@ -494,36 +778,59 @@ class WorkflowPipeline:
494
778
  self._mcp_state_manager.update_stage_status("comprehensive", "completed")
495
779
 
496
780
  async def _run_ai_agent_fixing_phase(self, options: OptionsProtocol) -> bool:
497
- self._update_mcp_status("ai_fixing", "running")
498
- self.logger.info("Starting AI agent fixing phase")
499
- self._log_debug_phase_start()
781
+ self._initialize_ai_fixing_phase(options)
500
782
 
501
783
  try:
502
- # If code cleaning is enabled and hasn't run yet, run it first
503
- # to provide cleaner, more standardized code for the AI agents
504
- if getattr(options, "clean", False) and not self._has_code_cleaning_run():
505
- self.console.print(
506
- "\n[bold yellow]🤖 AI agents recommend running code cleaning first for better results...[/bold yellow]"
507
- )
508
- if self._run_code_cleaning_phase(options):
509
- # Run fast hooks sanity check after cleaning
510
- self._run_post_cleaning_fast_hooks(options)
511
- self._mark_code_cleaning_complete()
784
+ self._prepare_ai_fixing_environment(options)
512
785
 
513
- agent_coordinator = self._setup_agent_coordinator()
514
- issues = await self._collect_issues_from_failures()
786
+ agent_coordinator, issues = await self._setup_ai_fixing_workflow()
515
787
 
516
788
  if not issues:
517
789
  return self._handle_no_issues_found()
518
790
 
519
- self.logger.info(f"AI agents will attempt to fix {len(issues)} issues")
520
- fix_result = await agent_coordinator.handle_issues(issues)
521
-
522
- return await self._process_fix_results(options, fix_result)
791
+ return await self._execute_ai_fixes(options, agent_coordinator, issues)
523
792
 
524
793
  except Exception as e:
525
794
  return self._handle_fixing_phase_error(e)
526
795
 
796
+ def _initialize_ai_fixing_phase(self, options: OptionsProtocol) -> None:
797
+ self._update_mcp_status("ai_fixing", "running")
798
+ self.logger.info("Starting AI agent fixing phase")
799
+ self._log_debug_phase_start()
800
+
801
+ def _prepare_ai_fixing_environment(self, options: OptionsProtocol) -> None:
802
+ should_run_cleaning = (
803
+ getattr(options, "clean", False) and not self._has_code_cleaning_run()
804
+ )
805
+
806
+ if not should_run_cleaning:
807
+ return
808
+
809
+ self.console.print(
810
+ "\n[bold yellow]🤖 AI agents recommend running code cleaning first for better results...[/bold yellow]"
811
+ )
812
+
813
+ if self._run_code_cleaning_phase(options):
814
+ self._run_post_cleaning_fast_hooks(options)
815
+ self._mark_code_cleaning_complete()
816
+
817
+ async def _setup_ai_fixing_workflow(
818
+ self,
819
+ ) -> tuple[AgentCoordinator, list[t.Any]]:
820
+ agent_coordinator = self._setup_agent_coordinator()
821
+ issues = await self._collect_issues_from_failures()
822
+ return agent_coordinator, issues
823
+
824
+ async def _execute_ai_fixes(
825
+ self,
826
+ options: OptionsProtocol,
827
+ agent_coordinator: AgentCoordinator,
828
+ issues: list[t.Any],
829
+ ) -> bool:
830
+ self.logger.info(f"AI agents will attempt to fix {len(issues)} issues")
831
+ fix_result = await agent_coordinator.handle_issues(issues)
832
+ return await self._process_fix_results(options, fix_result)
833
+
527
834
  def _log_debug_phase_start(self) -> None:
528
835
  if self._should_debug():
529
836
  self.debugger.log_workflow_phase(
@@ -631,12 +938,10 @@ class WorkflowPipeline:
631
938
 
632
939
  verification_success = True
633
940
 
634
- # Verify test fixes
635
941
  if self._should_verify_test_fixes(fix_result.fixes_applied):
636
942
  if not await self._verify_test_fixes(options):
637
943
  verification_success = False
638
944
 
639
- # Verify hook fixes
640
945
  if self._should_verify_hook_fixes(fix_result.fixes_applied):
641
946
  if not await self._verify_hook_fixes(options):
642
947
  verification_success = False
@@ -645,11 +950,9 @@ class WorkflowPipeline:
645
950
  return verification_success
646
951
 
647
952
  def _should_verify_test_fixes(self, fixes_applied: list[str]) -> bool:
648
- """Check if test fixes need verification."""
649
953
  return any("test" in fix.lower() for fix in fixes_applied)
650
954
 
651
955
  async def _verify_test_fixes(self, options: OptionsProtocol) -> bool:
652
- """Verify test fixes by re-running tests."""
653
956
  self.logger.info("Re-running tests to verify test fixes")
654
957
  test_success = self.phases.run_testing_phase(options)
655
958
  if not test_success:
@@ -657,7 +960,6 @@ class WorkflowPipeline:
657
960
  return test_success
658
961
 
659
962
  def _should_verify_hook_fixes(self, fixes_applied: list[str]) -> bool:
660
- """Check if hook fixes need verification."""
661
963
  hook_fixes = [
662
964
  f
663
965
  for f in fixes_applied
@@ -668,7 +970,6 @@ class WorkflowPipeline:
668
970
  return bool(hook_fixes)
669
971
 
670
972
  async def _verify_hook_fixes(self, options: OptionsProtocol) -> bool:
671
- """Verify hook fixes by re-running comprehensive hooks."""
672
973
  self.logger.info("Re-running comprehensive hooks to verify hook fixes")
673
974
  hook_success = self.phases.run_comprehensive_hooks_only(options)
674
975
  if not hook_success:
@@ -676,7 +977,6 @@ class WorkflowPipeline:
676
977
  return hook_success
677
978
 
678
979
  def _log_verification_result(self, verification_success: bool) -> None:
679
- """Log the final verification result."""
680
980
  if verification_success:
681
981
  self.logger.info("All AI agent fixes verified successfully")
682
982
  else:
@@ -833,7 +1133,6 @@ class WorkflowPipeline:
833
1133
  )
834
1134
 
835
1135
  def _parse_hook_error_details(self, task_id: str, error_msg: str) -> list[Issue]:
836
- """Parse hook error details and create specific issues."""
837
1136
  issues: list[Issue] = []
838
1137
 
839
1138
  if task_id == "comprehensive_hooks":
@@ -844,11 +1143,9 @@ class WorkflowPipeline:
844
1143
  return issues
845
1144
 
846
1145
  def _parse_comprehensive_hook_errors(self, error_msg: str) -> list[Issue]:
847
- """Parse comprehensive hook error messages and create specific issues."""
848
1146
  issues: list[Issue] = []
849
1147
  error_lower = error_msg.lower()
850
1148
 
851
- # Check each error type
852
1149
  complexity_issue = self._check_complexity_error(error_lower)
853
1150
  if complexity_issue:
854
1151
  issues.append(complexity_issue)
@@ -876,7 +1173,6 @@ class WorkflowPipeline:
876
1173
  return issues
877
1174
 
878
1175
  def _check_complexity_error(self, error_lower: str) -> Issue | None:
879
- """Check for complexity errors and create issue if found."""
880
1176
  if "complexipy" in error_lower or "c901" in error_lower:
881
1177
  return Issue(
882
1178
  id="complexity_violation",
@@ -888,7 +1184,6 @@ class WorkflowPipeline:
888
1184
  return None
889
1185
 
890
1186
  def _check_type_error(self, error_lower: str) -> Issue | None:
891
- """Check for type errors and create issue if found."""
892
1187
  if "pyright" in error_lower:
893
1188
  return Issue(
894
1189
  id="pyright_type_error",
@@ -900,7 +1195,6 @@ class WorkflowPipeline:
900
1195
  return None
901
1196
 
902
1197
  def _check_security_error(self, error_lower: str) -> Issue | None:
903
- """Check for security errors and create issue if found."""
904
1198
  if "bandit" in error_lower:
905
1199
  return Issue(
906
1200
  id="bandit_security_issue",
@@ -912,7 +1206,6 @@ class WorkflowPipeline:
912
1206
  return None
913
1207
 
914
1208
  def _check_performance_error(self, error_lower: str) -> Issue | None:
915
- """Check for performance errors and create issue if found."""
916
1209
  if "refurb" in error_lower:
917
1210
  return Issue(
918
1211
  id="refurb_quality_issue",
@@ -924,7 +1217,6 @@ class WorkflowPipeline:
924
1217
  return None
925
1218
 
926
1219
  def _check_dead_code_error(self, error_lower: str) -> Issue | None:
927
- """Check for dead code errors and create issue if found."""
928
1220
  if "vulture" in error_lower:
929
1221
  return Issue(
930
1222
  id="vulture_dead_code",
@@ -936,7 +1228,6 @@ class WorkflowPipeline:
936
1228
  return None
937
1229
 
938
1230
  def _check_regex_validation_error(self, error_lower: str) -> Issue | None:
939
- """Check for regex validation errors and create issue if found."""
940
1231
  regex_keywords = ("raw regex", "regex pattern", r"\g<", "replacement")
941
1232
  if "validate-regex-patterns" in error_lower or any(
942
1233
  keyword in error_lower for keyword in regex_keywords
@@ -951,7 +1242,6 @@ class WorkflowPipeline:
951
1242
  return None
952
1243
 
953
1244
  def _create_fast_hook_issue(self) -> Issue:
954
- """Create an issue for fast hook errors."""
955
1245
  return Issue(
956
1246
  id="fast_hooks_formatting",
957
1247
  type=IssueType.FORMATTING,
@@ -978,10 +1268,8 @@ class WorkflowPipeline:
978
1268
  return issues
979
1269
 
980
1270
  def _classify_issue(self, issue_str: str) -> tuple[IssueType, Priority]:
981
- """Classify an issue string to determine its type and priority."""
982
1271
  issue_lower = issue_str.lower()
983
1272
 
984
- # Check high-priority issues first
985
1273
  if self._is_type_error(issue_lower):
986
1274
  return IssueType.TYPE_ERROR, Priority.HIGH
987
1275
  if self._is_security_issue(issue_lower):
@@ -991,7 +1279,6 @@ class WorkflowPipeline:
991
1279
  if self._is_regex_validation_issue(issue_lower):
992
1280
  return IssueType.REGEX_VALIDATION, Priority.HIGH
993
1281
 
994
- # Check medium-priority issues
995
1282
  if self._is_dead_code_issue(issue_lower):
996
1283
  return IssueType.DEAD_CODE, Priority.MEDIUM
997
1284
  if self._is_performance_issue(issue_lower):
@@ -999,30 +1286,25 @@ class WorkflowPipeline:
999
1286
  if self._is_import_error(issue_lower):
1000
1287
  return IssueType.IMPORT_ERROR, Priority.MEDIUM
1001
1288
 
1002
- # Default to formatting issue
1003
1289
  return IssueType.FORMATTING, Priority.MEDIUM
1004
1290
 
1005
1291
  def _is_type_error(self, issue_lower: str) -> bool:
1006
- """Check if issue is related to type errors."""
1007
1292
  return any(
1008
1293
  keyword in issue_lower for keyword in ("type", "annotation", "pyright")
1009
1294
  )
1010
1295
 
1011
1296
  def _is_security_issue(self, issue_lower: str) -> bool:
1012
- """Check if issue is related to security."""
1013
1297
  return any(
1014
1298
  keyword in issue_lower for keyword in ("security", "bandit", "hardcoded")
1015
1299
  )
1016
1300
 
1017
1301
  def _is_complexity_issue(self, issue_lower: str) -> bool:
1018
- """Check if issue is related to code complexity."""
1019
1302
  return any(
1020
1303
  keyword in issue_lower
1021
1304
  for keyword in ("complexity", "complexipy", "c901", "too complex")
1022
1305
  )
1023
1306
 
1024
1307
  def _is_regex_validation_issue(self, issue_lower: str) -> bool:
1025
- """Check if issue is related to regex validation."""
1026
1308
  return any(
1027
1309
  keyword in issue_lower
1028
1310
  for keyword in (
@@ -1035,17 +1317,14 @@ class WorkflowPipeline:
1035
1317
  )
1036
1318
 
1037
1319
  def _is_dead_code_issue(self, issue_lower: str) -> bool:
1038
- """Check if issue is related to dead code."""
1039
1320
  return any(keyword in issue_lower for keyword in ("unused", "dead", "vulture"))
1040
1321
 
1041
1322
  def _is_performance_issue(self, issue_lower: str) -> bool:
1042
- """Check if issue is related to performance."""
1043
1323
  return any(
1044
1324
  keyword in issue_lower for keyword in ("performance", "refurb", "furb")
1045
1325
  )
1046
1326
 
1047
1327
  def _is_import_error(self, issue_lower: str) -> bool:
1048
- """Check if issue is related to import errors."""
1049
1328
  return any(keyword in issue_lower for keyword in ("import", "creosote"))
1050
1329
 
1051
1330
  def _log_failure_counts_if_debugging(
@@ -1058,41 +1337,25 @@ class WorkflowPipeline:
1058
1337
  def _check_security_gates_for_publishing(
1059
1338
  self, options: OptionsProtocol
1060
1339
  ) -> tuple[bool, bool]:
1061
- """Check if publishing is requested and if security gates block it.
1062
-
1063
- Returns:
1064
- tuple[bool, bool]: (publishing_requested, security_blocks_publishing)
1065
- """
1066
1340
  publishing_requested = bool(options.publish or options.all or options.commit)
1067
1341
 
1068
1342
  if not publishing_requested:
1069
1343
  return False, False
1070
1344
 
1071
- # Check security gates for publishing operations
1072
1345
  try:
1073
1346
  security_blocks_publishing = self._check_security_critical_failures()
1074
1347
  return publishing_requested, security_blocks_publishing
1075
1348
  except Exception as e:
1076
- # Fail securely if security check fails
1077
1349
  self.logger.warning(f"Security check failed: {e} - blocking publishing")
1078
1350
  self.console.print(
1079
1351
  "[red]🔒 SECURITY CHECK FAILED: Unable to verify security status - publishing BLOCKED[/red]"
1080
1352
  )
1081
- # Return True for security_blocks to fail securely
1353
+
1082
1354
  return publishing_requested, True
1083
1355
 
1084
1356
  async def _handle_security_gate_failure(
1085
1357
  self, options: OptionsProtocol, allow_ai_fixing: bool = False
1086
1358
  ) -> bool:
1087
- """Handle security gate failures with optional AI fixing.
1088
-
1089
- Args:
1090
- options: Workflow options
1091
- allow_ai_fixing: Whether AI fixing is allowed for security issues
1092
-
1093
- Returns:
1094
- bool: True if security issues resolved, False if still blocked
1095
- """
1096
1359
  self.console.print(
1097
1360
  "[red]🔒 SECURITY GATE: Critical security checks failed[/red]"
1098
1361
  )
@@ -1105,10 +1368,8 @@ class WorkflowPipeline:
1105
1368
  "[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
1106
1369
  )
1107
1370
 
1108
- # Try AI fixing for security issues
1109
1371
  ai_fix_success = await self._run_ai_agent_fixing_phase(options)
1110
1372
  if ai_fix_success:
1111
- # Re-check security after AI fixing
1112
1373
  try:
1113
1374
  security_still_blocks = self._check_security_critical_failures()
1114
1375
  if not security_still_blocks:
@@ -1128,7 +1389,6 @@ class WorkflowPipeline:
1128
1389
  return False
1129
1390
  return False
1130
1391
  else:
1131
- # Standard workflow cannot bypass security gates
1132
1392
  self.console.print(
1133
1393
  "[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
1134
1394
  )
@@ -1140,33 +1400,25 @@ class WorkflowPipeline:
1140
1400
  comprehensive_passed: bool,
1141
1401
  publishing_requested: bool,
1142
1402
  ) -> bool:
1143
- """Determine if AI fixing is needed based on test results and publishing requirements."""
1144
1403
  if publishing_requested:
1145
- # For publish/commit workflows, only trigger AI fixing if both fail
1146
- return not testing_passed and not comprehensive_passed
1147
- else:
1148
- # For regular workflows, trigger AI fixing if either fails
1149
1404
  return not testing_passed or not comprehensive_passed
1150
1405
 
1406
+ return not testing_passed or not comprehensive_passed
1407
+
1151
1408
  def _determine_workflow_success(
1152
1409
  self,
1153
1410
  testing_passed: bool,
1154
1411
  comprehensive_passed: bool,
1155
1412
  publishing_requested: bool,
1156
- workflow_type: str,
1157
1413
  ) -> bool:
1158
- """Determine workflow success based on test results and workflow type."""
1159
1414
  if publishing_requested:
1160
- # For publishing workflows, either test or comprehensive passing is sufficient
1161
- return testing_passed or comprehensive_passed
1162
- else:
1163
- # For regular workflows, both must pass
1164
1415
  return testing_passed and comprehensive_passed
1165
1416
 
1417
+ return testing_passed and comprehensive_passed
1418
+
1166
1419
  def _show_verbose_failure_details(
1167
1420
  self, testing_passed: bool, comprehensive_passed: bool
1168
1421
  ) -> None:
1169
- """Show detailed failure information in verbose mode."""
1170
1422
  self.console.print(
1171
1423
  f"[yellow]⚠️ Quality phase results - testing_passed: {testing_passed}, comprehensive_passed: {comprehensive_passed}[/yellow]"
1172
1424
  )
@@ -1178,105 +1430,89 @@ class WorkflowPipeline:
1178
1430
  )
1179
1431
 
1180
1432
  def _check_security_critical_failures(self) -> bool:
1181
- """Check if any security-critical hooks have failed.
1182
-
1183
- Returns:
1184
- True if security-critical hooks failed and block publishing
1185
- """
1186
1433
  try:
1187
1434
  from crackerjack.security.audit import SecurityAuditor
1188
1435
 
1189
1436
  auditor = SecurityAuditor()
1190
1437
 
1191
- # Get hook results - we need to be careful not to re-run hooks
1192
- # Instead, check the session tracker for recent failures
1193
1438
  fast_results = self._get_recent_fast_hook_results()
1194
1439
  comprehensive_results = self._get_recent_comprehensive_hook_results()
1195
1440
 
1196
- # Generate security audit report
1197
1441
  audit_report = auditor.audit_hook_results(
1198
1442
  fast_results, comprehensive_results
1199
1443
  )
1200
1444
 
1201
- # Store audit report for later use
1202
1445
  self._last_security_audit = audit_report
1203
1446
 
1204
- # Block publishing if critical failures exist
1205
1447
  return audit_report.has_critical_failures
1206
1448
 
1207
1449
  except Exception as e:
1208
- # Fail securely - if we can't determine security status, block publishing
1209
1450
  self.logger.warning(f"Security audit failed: {e} - failing securely")
1210
- # Re-raise the exception so it can be caught by the calling method
1451
+
1211
1452
  raise
1212
1453
 
1213
1454
  def _get_recent_fast_hook_results(self) -> list[t.Any]:
1214
- """Get recent fast hook results from session tracker."""
1215
- results = []
1455
+ results = self._extract_hook_results_from_session("fast_hooks")
1456
+
1457
+ if not results:
1458
+ results = self._create_mock_hook_results(["gitleaks"])
1216
1459
 
1217
- # Try to get results from session tracker
1218
- if hasattr(self.session, "session_tracker") and self.session.session_tracker:
1219
- for task_id, task_data in self.session.session_tracker.tasks.items():
1220
- if task_id == "fast_hooks" and hasattr(task_data, "hook_results"):
1460
+ return results
1461
+
1462
+ def _extract_hook_results_from_session(self, hook_type: str) -> list[t.Any]:
1463
+ results: list[t.Any] = []
1464
+
1465
+ session_tracker = self._get_session_tracker()
1466
+ if not session_tracker:
1467
+ return results
1468
+
1469
+ for task_id, task_data in session_tracker.tasks.items():
1470
+ if task_id == hook_type and hasattr(task_data, "hook_results"):
1471
+ if task_data.hook_results:
1221
1472
  results.extend(task_data.hook_results)
1222
1473
 
1223
- # If no results from session, create mock failed results for critical hooks
1224
- # This ensures we fail securely when we can't determine actual status
1225
- if not results:
1226
- critical_fast_hooks = ["gitleaks"]
1227
- for hook_name in critical_fast_hooks:
1228
- # Create a mock result that appears to have failed
1229
- # This will trigger security blocking if we can't determine actual status
1230
- mock_result = type(
1231
- "MockResult",
1232
- (),
1233
- {
1234
- "name": hook_name,
1235
- "status": "unknown", # Unknown status = fail securely
1236
- "output": "Unable to determine hook status",
1237
- },
1238
- )()
1239
- results.append(mock_result)
1474
+ return results
1475
+
1476
+ def _get_session_tracker(self) -> t.Any | None:
1477
+ return (
1478
+ getattr(self.session, "session_tracker", None)
1479
+ if hasattr(self.session, "session_tracker")
1480
+ else None
1481
+ )
1482
+
1483
+ def _create_mock_hook_results(self, critical_hooks: list[str]) -> list[t.Any]:
1484
+ results: list[t.Any] = []
1485
+
1486
+ for hook_name in critical_hooks:
1487
+ mock_result = self._create_mock_hook_result(hook_name)
1488
+ results.append(mock_result)
1240
1489
 
1241
1490
  return results
1242
1491
 
1492
+ def _create_mock_hook_result(self, hook_name: str) -> t.Any:
1493
+ return type(
1494
+ "MockResult",
1495
+ (),
1496
+ {
1497
+ "name": hook_name,
1498
+ "status": "unknown",
1499
+ "output": "Unable to determine hook status",
1500
+ },
1501
+ )()
1502
+
1243
1503
  def _get_recent_comprehensive_hook_results(self) -> list[t.Any]:
1244
- """Get recent comprehensive hook results from session tracker."""
1245
- results = []
1246
-
1247
- # Try to get results from session tracker
1248
- if hasattr(self.session, "session_tracker") and self.session.session_tracker:
1249
- for task_id, task_data in self.session.session_tracker.tasks.items():
1250
- if task_id == "comprehensive_hooks" and hasattr(
1251
- task_data, "hook_results"
1252
- ):
1253
- results.extend(task_data.hook_results)
1504
+ results = self._extract_hook_results_from_session("comprehensive_hooks")
1254
1505
 
1255
- # If no results from session, create mock failed results for critical hooks
1256
1506
  if not results:
1257
- critical_comprehensive_hooks = ["bandit", "pyright"]
1258
- for hook_name in critical_comprehensive_hooks:
1259
- mock_result = type(
1260
- "MockResult",
1261
- (),
1262
- {
1263
- "name": hook_name,
1264
- "status": "unknown", # Unknown status = fail securely
1265
- "output": "Unable to determine hook status",
1266
- },
1267
- )()
1268
- results.append(mock_result)
1507
+ results = self._create_mock_hook_results(["bandit", "pyright"])
1269
1508
 
1270
1509
  return results
1271
1510
 
1272
1511
  def _is_security_critical_failure(self, result: t.Any) -> bool:
1273
- """Check if a hook result represents a security-critical failure."""
1274
-
1275
- # List of security-critical hook names (fail-safe approach)
1276
1512
  security_critical_hooks = {
1277
- "bandit", # Security vulnerability scanning
1278
- "pyright", # Type safety prevents security holes
1279
- "gitleaks", # Secret detection
1513
+ "bandit",
1514
+ "pyright",
1515
+ "gitleaks",
1280
1516
  }
1281
1517
 
1282
1518
  hook_name = getattr(result, "name", "").lower()
@@ -1289,8 +1525,6 @@ class WorkflowPipeline:
1289
1525
  return hook_name in security_critical_hooks and is_failed
1290
1526
 
1291
1527
  def _show_security_audit_warning(self) -> None:
1292
- """Show security audit warning when proceeding with partial success."""
1293
- # Use stored audit report if available
1294
1528
  audit_report = getattr(self, "_last_security_audit", None)
1295
1529
 
1296
1530
  if audit_report:
@@ -1298,23 +1532,19 @@ class WorkflowPipeline:
1298
1532
  "[yellow]⚠️ SECURITY AUDIT: Proceeding with partial quality success[/yellow]"
1299
1533
  )
1300
1534
 
1301
- # Show security status
1302
1535
  for warning in audit_report.security_warnings:
1303
1536
  if "CRITICAL" in warning:
1304
- # This shouldn't happen if we're showing warnings, but fail-safe
1305
1537
  self.console.print(f"[red]{warning}[/red]")
1306
1538
  elif "HIGH" in warning:
1307
1539
  self.console.print(f"[yellow]{warning}[/yellow]")
1308
1540
  else:
1309
1541
  self.console.print(f"[blue]{warning}[/blue]")
1310
1542
 
1311
- # Show recommendations
1312
1543
  if audit_report.recommendations:
1313
- self.console.print("[bold]Security Recommendations:[/bold]")
1314
- for rec in audit_report.recommendations[:3]: # Show top 3
1544
+ self.console.print("[bold]Security Recommendations: [/bold]")
1545
+ for rec in audit_report.recommendations[:3]:
1315
1546
  self.console.print(f"[dim]{rec}[/dim]")
1316
1547
  else:
1317
- # Fallback if no audit report available
1318
1548
  self.console.print(
1319
1549
  "[yellow]⚠️ SECURITY AUDIT: Proceeding with partial quality success[/yellow]"
1320
1550
  )
@@ -1325,6 +1555,92 @@ class WorkflowPipeline:
1325
1555
  "[yellow]⚠️ Some non-critical quality checks failed - consider reviewing before production deployment[/yellow]"
1326
1556
  )
1327
1557
 
1558
+ async def _run_initial_fast_hooks_async(
1559
+ self, options: OptionsProtocol, iteration: int, monitor: t.Any
1560
+ ) -> bool:
1561
+ monitor.record_sequential_op()
1562
+ fast_hooks_passed = self._run_fast_hooks_phase(options)
1563
+ if not fast_hooks_passed:
1564
+ if options.ai_agent and self._should_debug():
1565
+ self.debugger.log_iteration_end(iteration, False)
1566
+ return False
1567
+ return True
1568
+
1569
+ async def _run_fast_hooks_phase_monitored(
1570
+ self, options: OptionsProtocol, workflow_id: str
1571
+ ) -> bool:
1572
+ with phase_monitor(workflow_id, "fast_hooks") as monitor:
1573
+ monitor.record_sequential_op()
1574
+ return self._run_fast_hooks_phase(options)
1575
+
1576
+ async def _run_comprehensive_hooks_phase_monitored(
1577
+ self, options: OptionsProtocol, workflow_id: str
1578
+ ) -> bool:
1579
+ with phase_monitor(workflow_id, "comprehensive_hooks") as monitor:
1580
+ monitor.record_sequential_op()
1581
+ return self._run_comprehensive_hooks_phase(options)
1582
+
1583
+ async def _run_testing_phase_async(
1584
+ self, options: OptionsProtocol, workflow_id: str
1585
+ ) -> bool:
1586
+ with phase_monitor(workflow_id, "testing") as monitor:
1587
+ monitor.record_sequential_op()
1588
+ return self._run_testing_phase(options)
1589
+
1590
+ async def _execute_standard_hooks_workflow_monitored(
1591
+ self, options: OptionsProtocol, workflow_id: str
1592
+ ) -> bool:
1593
+ with phase_monitor(workflow_id, "hooks") as monitor:
1594
+ self._update_hooks_status_running()
1595
+
1596
+ fast_hooks_success = self._execute_monitored_fast_hooks_phase(
1597
+ options, monitor
1598
+ )
1599
+ if not fast_hooks_success:
1600
+ self._handle_hooks_completion(False)
1601
+ return False
1602
+
1603
+ if not self._execute_monitored_cleaning_phase(options):
1604
+ self._handle_hooks_completion(False)
1605
+ return False
1606
+
1607
+ comprehensive_success = self._execute_monitored_comprehensive_phase(
1608
+ options, monitor
1609
+ )
1610
+
1611
+ hooks_success = fast_hooks_success and comprehensive_success
1612
+ self._handle_hooks_completion(hooks_success)
1613
+ return hooks_success
1614
+
1615
+ def _execute_monitored_fast_hooks_phase(
1616
+ self, options: OptionsProtocol, monitor: t.Any
1617
+ ) -> bool:
1618
+ fast_hooks_success = self._run_fast_hooks_phase(options)
1619
+ if fast_hooks_success:
1620
+ monitor.record_sequential_op()
1621
+ return fast_hooks_success
1622
+
1623
+ def _execute_monitored_cleaning_phase(self, options: OptionsProtocol) -> bool:
1624
+ if not getattr(options, "clean", False):
1625
+ return True
1626
+
1627
+ if not self._run_code_cleaning_phase(options):
1628
+ return False
1629
+
1630
+ if not self._run_post_cleaning_fast_hooks(options):
1631
+ return False
1632
+
1633
+ self._mark_code_cleaning_complete()
1634
+ return True
1635
+
1636
+ def _execute_monitored_comprehensive_phase(
1637
+ self, options: OptionsProtocol, monitor: t.Any
1638
+ ) -> bool:
1639
+ comprehensive_success = self._run_comprehensive_hooks_phase(options)
1640
+ if comprehensive_success:
1641
+ monitor.record_sequential_op()
1642
+ return comprehensive_success
1643
+
1328
1644
 
1329
1645
  class WorkflowOrchestrator:
1330
1646
  def __init__(
@@ -1352,7 +1668,6 @@ class WorkflowOrchestrator:
1352
1668
  TestManagerProtocol,
1353
1669
  )
1354
1670
 
1355
- # Initialize logging first so container creation respects log levels
1356
1671
  self._initialize_logging()
1357
1672
 
1358
1673
  self.logger = get_logger("crackerjack.orchestrator")
@@ -1393,13 +1708,11 @@ class WorkflowOrchestrator:
1393
1708
  session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
1394
1709
  debug_log_file = log_manager.create_debug_log_file(session_id)
1395
1710
 
1396
- # Set log level based on debug flag only - verbose should not enable DEBUG logs
1397
1711
  log_level = "DEBUG" if self.debug else "INFO"
1398
1712
  setup_structured_logging(
1399
1713
  level=log_level, json_output=False, log_file=debug_log_file
1400
1714
  )
1401
1715
 
1402
- # Use a temporary logger for the initialization message
1403
1716
  temp_logger = get_logger("crackerjack.orchestrator.init")
1404
1717
  temp_logger.debug(
1405
1718
  "Structured logging initialized",
@@ -1446,7 +1759,12 @@ class WorkflowOrchestrator:
1446
1759
  return self.phases.run_configuration_phase(options)
1447
1760
 
1448
1761
  async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
1449
- return await self.pipeline.run_complete_workflow(options)
1762
+ result: bool = await self.pipeline.run_complete_workflow(options)
1763
+ return result
1764
+
1765
+ def run_complete_workflow_sync(self, options: OptionsProtocol) -> bool:
1766
+ """Sync wrapper for run_complete_workflow."""
1767
+ return asyncio.run(self.run_complete_workflow(options))
1450
1768
 
1451
1769
  def _cleanup_resources(self) -> None:
1452
1770
  self.session.cleanup_resources()