crackerjack 0.33.0__py3-none-any.whl → 0.33.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (198) hide show
  1. crackerjack/__main__.py +1350 -34
  2. crackerjack/adapters/__init__.py +17 -0
  3. crackerjack/adapters/lsp_client.py +358 -0
  4. crackerjack/adapters/rust_tool_adapter.py +194 -0
  5. crackerjack/adapters/rust_tool_manager.py +193 -0
  6. crackerjack/adapters/skylos_adapter.py +231 -0
  7. crackerjack/adapters/zuban_adapter.py +560 -0
  8. crackerjack/agents/base.py +7 -3
  9. crackerjack/agents/coordinator.py +271 -33
  10. crackerjack/agents/documentation_agent.py +9 -15
  11. crackerjack/agents/dry_agent.py +3 -15
  12. crackerjack/agents/formatting_agent.py +1 -1
  13. crackerjack/agents/import_optimization_agent.py +36 -180
  14. crackerjack/agents/performance_agent.py +17 -98
  15. crackerjack/agents/performance_helpers.py +7 -31
  16. crackerjack/agents/proactive_agent.py +1 -3
  17. crackerjack/agents/refactoring_agent.py +16 -85
  18. crackerjack/agents/refactoring_helpers.py +7 -42
  19. crackerjack/agents/security_agent.py +9 -48
  20. crackerjack/agents/test_creation_agent.py +356 -513
  21. crackerjack/agents/test_specialist_agent.py +0 -4
  22. crackerjack/api.py +6 -25
  23. crackerjack/cli/cache_handlers.py +204 -0
  24. crackerjack/cli/cache_handlers_enhanced.py +683 -0
  25. crackerjack/cli/facade.py +100 -0
  26. crackerjack/cli/handlers.py +224 -9
  27. crackerjack/cli/interactive.py +6 -4
  28. crackerjack/cli/options.py +642 -55
  29. crackerjack/cli/utils.py +2 -1
  30. crackerjack/code_cleaner.py +58 -117
  31. crackerjack/config/global_lock_config.py +8 -48
  32. crackerjack/config/hooks.py +53 -62
  33. crackerjack/core/async_workflow_orchestrator.py +24 -34
  34. crackerjack/core/autofix_coordinator.py +3 -17
  35. crackerjack/core/enhanced_container.py +4 -13
  36. crackerjack/core/file_lifecycle.py +12 -89
  37. crackerjack/core/performance.py +2 -2
  38. crackerjack/core/performance_monitor.py +15 -55
  39. crackerjack/core/phase_coordinator.py +104 -204
  40. crackerjack/core/resource_manager.py +14 -90
  41. crackerjack/core/service_watchdog.py +62 -95
  42. crackerjack/core/session_coordinator.py +149 -0
  43. crackerjack/core/timeout_manager.py +14 -72
  44. crackerjack/core/websocket_lifecycle.py +13 -78
  45. crackerjack/core/workflow_orchestrator.py +171 -174
  46. crackerjack/docs/INDEX.md +11 -0
  47. crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
  48. crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
  49. crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
  50. crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
  51. crackerjack/docs/generated/api/SERVICES.md +1252 -0
  52. crackerjack/documentation/__init__.py +31 -0
  53. crackerjack/documentation/ai_templates.py +756 -0
  54. crackerjack/documentation/dual_output_generator.py +765 -0
  55. crackerjack/documentation/mkdocs_integration.py +518 -0
  56. crackerjack/documentation/reference_generator.py +977 -0
  57. crackerjack/dynamic_config.py +55 -50
  58. crackerjack/executors/async_hook_executor.py +10 -15
  59. crackerjack/executors/cached_hook_executor.py +117 -43
  60. crackerjack/executors/hook_executor.py +8 -34
  61. crackerjack/executors/hook_lock_manager.py +26 -183
  62. crackerjack/executors/individual_hook_executor.py +13 -11
  63. crackerjack/executors/lsp_aware_hook_executor.py +270 -0
  64. crackerjack/executors/tool_proxy.py +417 -0
  65. crackerjack/hooks/lsp_hook.py +79 -0
  66. crackerjack/intelligence/adaptive_learning.py +25 -10
  67. crackerjack/intelligence/agent_orchestrator.py +2 -5
  68. crackerjack/intelligence/agent_registry.py +34 -24
  69. crackerjack/intelligence/agent_selector.py +5 -7
  70. crackerjack/interactive.py +17 -6
  71. crackerjack/managers/async_hook_manager.py +0 -1
  72. crackerjack/managers/hook_manager.py +79 -1
  73. crackerjack/managers/publish_manager.py +44 -8
  74. crackerjack/managers/test_command_builder.py +1 -15
  75. crackerjack/managers/test_executor.py +1 -3
  76. crackerjack/managers/test_manager.py +98 -7
  77. crackerjack/managers/test_manager_backup.py +10 -9
  78. crackerjack/mcp/cache.py +2 -2
  79. crackerjack/mcp/client_runner.py +1 -1
  80. crackerjack/mcp/context.py +191 -68
  81. crackerjack/mcp/dashboard.py +7 -5
  82. crackerjack/mcp/enhanced_progress_monitor.py +31 -28
  83. crackerjack/mcp/file_monitor.py +30 -23
  84. crackerjack/mcp/progress_components.py +31 -21
  85. crackerjack/mcp/progress_monitor.py +50 -53
  86. crackerjack/mcp/rate_limiter.py +6 -6
  87. crackerjack/mcp/server_core.py +17 -16
  88. crackerjack/mcp/service_watchdog.py +2 -1
  89. crackerjack/mcp/state.py +4 -7
  90. crackerjack/mcp/task_manager.py +11 -9
  91. crackerjack/mcp/tools/core_tools.py +173 -32
  92. crackerjack/mcp/tools/error_analyzer.py +3 -2
  93. crackerjack/mcp/tools/execution_tools.py +8 -10
  94. crackerjack/mcp/tools/execution_tools_backup.py +42 -30
  95. crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
  96. crackerjack/mcp/tools/intelligence_tools.py +5 -2
  97. crackerjack/mcp/tools/monitoring_tools.py +33 -70
  98. crackerjack/mcp/tools/proactive_tools.py +24 -11
  99. crackerjack/mcp/tools/progress_tools.py +5 -8
  100. crackerjack/mcp/tools/utility_tools.py +20 -14
  101. crackerjack/mcp/tools/workflow_executor.py +62 -40
  102. crackerjack/mcp/websocket/app.py +8 -0
  103. crackerjack/mcp/websocket/endpoints.py +352 -357
  104. crackerjack/mcp/websocket/jobs.py +40 -57
  105. crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
  106. crackerjack/mcp/websocket/server.py +7 -25
  107. crackerjack/mcp/websocket/websocket_handler.py +6 -17
  108. crackerjack/mixins/__init__.py +0 -2
  109. crackerjack/mixins/error_handling.py +1 -70
  110. crackerjack/models/config.py +12 -1
  111. crackerjack/models/config_adapter.py +49 -1
  112. crackerjack/models/protocols.py +122 -122
  113. crackerjack/models/resource_protocols.py +55 -210
  114. crackerjack/monitoring/ai_agent_watchdog.py +13 -13
  115. crackerjack/monitoring/metrics_collector.py +426 -0
  116. crackerjack/monitoring/regression_prevention.py +8 -8
  117. crackerjack/monitoring/websocket_server.py +643 -0
  118. crackerjack/orchestration/advanced_orchestrator.py +11 -6
  119. crackerjack/orchestration/coverage_improvement.py +3 -3
  120. crackerjack/orchestration/execution_strategies.py +26 -6
  121. crackerjack/orchestration/test_progress_streamer.py +8 -5
  122. crackerjack/plugins/base.py +2 -2
  123. crackerjack/plugins/hooks.py +7 -0
  124. crackerjack/plugins/managers.py +11 -8
  125. crackerjack/security/__init__.py +0 -1
  126. crackerjack/security/audit.py +6 -35
  127. crackerjack/services/anomaly_detector.py +392 -0
  128. crackerjack/services/api_extractor.py +615 -0
  129. crackerjack/services/backup_service.py +2 -2
  130. crackerjack/services/bounded_status_operations.py +15 -152
  131. crackerjack/services/cache.py +127 -1
  132. crackerjack/services/changelog_automation.py +395 -0
  133. crackerjack/services/config.py +15 -9
  134. crackerjack/services/config_merge.py +19 -80
  135. crackerjack/services/config_template.py +506 -0
  136. crackerjack/services/contextual_ai_assistant.py +48 -22
  137. crackerjack/services/coverage_badge_service.py +171 -0
  138. crackerjack/services/coverage_ratchet.py +27 -25
  139. crackerjack/services/debug.py +3 -3
  140. crackerjack/services/dependency_analyzer.py +460 -0
  141. crackerjack/services/dependency_monitor.py +14 -11
  142. crackerjack/services/documentation_generator.py +491 -0
  143. crackerjack/services/documentation_service.py +675 -0
  144. crackerjack/services/enhanced_filesystem.py +6 -5
  145. crackerjack/services/enterprise_optimizer.py +865 -0
  146. crackerjack/services/error_pattern_analyzer.py +676 -0
  147. crackerjack/services/file_hasher.py +1 -1
  148. crackerjack/services/git.py +8 -25
  149. crackerjack/services/health_metrics.py +10 -8
  150. crackerjack/services/heatmap_generator.py +735 -0
  151. crackerjack/services/initialization.py +11 -30
  152. crackerjack/services/input_validator.py +5 -97
  153. crackerjack/services/intelligent_commit.py +327 -0
  154. crackerjack/services/log_manager.py +15 -12
  155. crackerjack/services/logging.py +4 -3
  156. crackerjack/services/lsp_client.py +628 -0
  157. crackerjack/services/memory_optimizer.py +19 -87
  158. crackerjack/services/metrics.py +42 -33
  159. crackerjack/services/parallel_executor.py +9 -67
  160. crackerjack/services/pattern_cache.py +1 -1
  161. crackerjack/services/pattern_detector.py +6 -6
  162. crackerjack/services/performance_benchmarks.py +18 -59
  163. crackerjack/services/performance_cache.py +20 -81
  164. crackerjack/services/performance_monitor.py +27 -95
  165. crackerjack/services/predictive_analytics.py +510 -0
  166. crackerjack/services/quality_baseline.py +234 -0
  167. crackerjack/services/quality_baseline_enhanced.py +646 -0
  168. crackerjack/services/quality_intelligence.py +785 -0
  169. crackerjack/services/regex_patterns.py +618 -524
  170. crackerjack/services/regex_utils.py +43 -123
  171. crackerjack/services/secure_path_utils.py +5 -164
  172. crackerjack/services/secure_status_formatter.py +30 -141
  173. crackerjack/services/secure_subprocess.py +11 -92
  174. crackerjack/services/security.py +9 -41
  175. crackerjack/services/security_logger.py +12 -24
  176. crackerjack/services/server_manager.py +124 -16
  177. crackerjack/services/status_authentication.py +16 -159
  178. crackerjack/services/status_security_manager.py +4 -131
  179. crackerjack/services/thread_safe_status_collector.py +19 -125
  180. crackerjack/services/unified_config.py +21 -13
  181. crackerjack/services/validation_rate_limiter.py +5 -54
  182. crackerjack/services/version_analyzer.py +459 -0
  183. crackerjack/services/version_checker.py +1 -1
  184. crackerjack/services/websocket_resource_limiter.py +10 -144
  185. crackerjack/services/zuban_lsp_service.py +390 -0
  186. crackerjack/slash_commands/__init__.py +2 -7
  187. crackerjack/slash_commands/run.md +2 -2
  188. crackerjack/tools/validate_input_validator_patterns.py +14 -40
  189. crackerjack/tools/validate_regex_patterns.py +19 -48
  190. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/METADATA +196 -25
  191. crackerjack-0.33.2.dist-info/RECORD +229 -0
  192. crackerjack/CLAUDE.md +0 -207
  193. crackerjack/RULES.md +0 -380
  194. crackerjack/py313.py +0 -234
  195. crackerjack-0.33.0.dist-info/RECORD +0 -187
  196. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/WHEEL +0 -0
  197. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/entry_points.txt +0 -0
  198. {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/licenses/LICENSE +0 -0
@@ -8,7 +8,11 @@ from rich.console import Console
8
8
  from crackerjack.agents.base import AgentContext, Issue, IssueType, Priority
9
9
  from crackerjack.agents.coordinator import AgentCoordinator
10
10
  from crackerjack.models.protocols import OptionsProtocol
11
- from crackerjack.services.debug import get_ai_agent_debugger
11
+ from crackerjack.services.debug import (
12
+ AIAgentDebugger,
13
+ NoOpDebugger,
14
+ get_ai_agent_debugger,
15
+ )
12
16
  from crackerjack.services.logging import (
13
17
  LoggingContext,
14
18
  get_logger,
@@ -47,18 +51,17 @@ class WorkflowPipeline:
47
51
  self.session = session
48
52
  self.phases = phases
49
53
  self._mcp_state_manager: t.Any = None
50
- self._last_security_audit: t.Any = None # Store security audit report
54
+ self._last_security_audit: t.Any = None
51
55
 
52
56
  self.logger = get_logger("crackerjack.pipeline")
53
- self._debugger = None
57
+ self._debugger: AIAgentDebugger | NoOpDebugger | None = None
54
58
 
55
- # Performance optimization services
56
59
  self._performance_monitor = get_performance_monitor()
57
60
  self._memory_optimizer = get_memory_optimizer()
58
61
  self._cache = get_performance_cache()
59
62
 
60
63
  @property
61
- def debugger(self):
64
+ def debugger(self) -> AIAgentDebugger | NoOpDebugger:
62
65
  if self._debugger is None:
63
66
  self._debugger = get_ai_agent_debugger()
64
67
  return self._debugger
@@ -72,10 +75,8 @@ class WorkflowPipeline:
72
75
  async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
73
76
  workflow_id = f"workflow_{int(time.time())}"
74
77
 
75
- # Start performance monitoring
76
78
  self._performance_monitor.start_workflow(workflow_id)
77
79
 
78
- # Start cache service if not already running
79
80
  await self._cache.start()
80
81
 
81
82
  with LoggingContext(
@@ -91,13 +92,12 @@ class WorkflowPipeline:
91
92
  options, start_time, workflow_id
92
93
  )
93
94
 
94
- # Finalize performance monitoring
95
95
  workflow_perf = self._performance_monitor.end_workflow(
96
96
  workflow_id, success
97
97
  )
98
98
  self.logger.info(
99
- f"Workflow performance: {workflow_perf.performance_score:.1f} score, "
100
- f"{workflow_perf.total_duration_seconds:.2f}s duration"
99
+ f"Workflow performance: {workflow_perf.performance_score: .1f} score, "
100
+ f"{workflow_perf.total_duration_seconds: .2f}s duration"
101
101
  )
102
102
 
103
103
  return success
@@ -112,7 +112,7 @@ class WorkflowPipeline:
112
112
 
113
113
  finally:
114
114
  self.session.cleanup_resources()
115
- # Optimize memory after workflow completion
115
+
116
116
  self._memory_optimizer.optimize_memory()
117
117
  await self._cache.stop()
118
118
 
@@ -122,6 +122,9 @@ class WorkflowPipeline:
122
122
 
123
123
  self._log_workflow_startup_debug(options)
124
124
  self._configure_session_cleanup(options)
125
+ self._initialize_zuban_lsp(options)
126
+ self._configure_hook_manager_lsp(options)
127
+ self._register_lsp_cleanup_handler(options)
125
128
  self._log_workflow_startup_info(options)
126
129
 
127
130
  def _log_workflow_startup_debug(self, options: OptionsProtocol) -> None:
@@ -142,6 +145,138 @@ class WorkflowPipeline:
142
145
  if hasattr(options, "cleanup"):
143
146
  self.session.set_cleanup_config(options.cleanup)
144
147
 
148
+ def _initialize_zuban_lsp(self, options: OptionsProtocol) -> None:
149
+ """Initialize Zuban LSP server if not disabled."""
150
+ # Check if LSP is disabled via CLI flag or configuration
151
+ if getattr(options, "no_zuban_lsp", False):
152
+ self.logger.debug("Zuban LSP server disabled by --no-zuban-lsp flag")
153
+ return
154
+
155
+ # Get configuration from options (will use config system if available)
156
+ config = getattr(options, "zuban_lsp", None)
157
+ if config and not config.enabled:
158
+ self.logger.debug("Zuban LSP server disabled in configuration")
159
+ return
160
+
161
+ if config and not config.auto_start:
162
+ self.logger.debug("Zuban LSP server auto-start disabled in configuration")
163
+ return
164
+
165
+ # Check if LSP server is already running to avoid duplicates
166
+ from crackerjack.services.server_manager import find_zuban_lsp_processes
167
+
168
+ existing_processes = find_zuban_lsp_processes()
169
+ if existing_processes:
170
+ self.logger.debug(
171
+ f"Zuban LSP server already running (PID: {existing_processes[0]['pid']})"
172
+ )
173
+ return
174
+
175
+ # Auto-start LSP server in background
176
+ try:
177
+ import subprocess
178
+ import sys
179
+
180
+ # Use configuration values if available, otherwise fallback to CLI options
181
+ if config:
182
+ zuban_lsp_port = config.port
183
+ zuban_lsp_mode = config.mode
184
+ else:
185
+ zuban_lsp_port = getattr(options, "zuban_lsp_port", 8677)
186
+ zuban_lsp_mode = getattr(options, "zuban_lsp_mode", "stdio")
187
+
188
+ cmd = [
189
+ sys.executable,
190
+ "-m",
191
+ "crackerjack",
192
+ "--start-zuban-lsp",
193
+ "--zuban-lsp-port",
194
+ str(zuban_lsp_port),
195
+ "--zuban-lsp-mode",
196
+ zuban_lsp_mode,
197
+ ]
198
+
199
+ subprocess.Popen(
200
+ cmd,
201
+ stdout=subprocess.DEVNULL,
202
+ stderr=subprocess.DEVNULL,
203
+ start_new_session=True,
204
+ )
205
+
206
+ self.logger.info(
207
+ f"Auto-started Zuban LSP server on port {zuban_lsp_port} ({zuban_lsp_mode} mode)"
208
+ )
209
+
210
+ except Exception as e:
211
+ self.logger.warning(f"Failed to auto-start Zuban LSP server: {e}")
212
+
213
+ def _log_zuban_lsp_status(self) -> None:
214
+ """Display current Zuban LSP server status during workflow startup."""
215
+ from crackerjack.services.server_manager import find_zuban_lsp_processes
216
+
217
+ try:
218
+ lsp_processes = find_zuban_lsp_processes()
219
+
220
+ if lsp_processes:
221
+ proc = lsp_processes[0] # Show first running process
222
+ self.logger.info(
223
+ f"🔍 Zuban LSP server running (PID: {proc['pid']}, "
224
+ f"CPU: {proc['cpu']}%, Memory: {proc['mem']}%)"
225
+ )
226
+ else:
227
+ self.logger.info("🔍 Zuban LSP server not running")
228
+
229
+ except Exception as e:
230
+ self.logger.debug(f"Failed to check Zuban LSP status: {e}")
231
+
232
+ def _configure_hook_manager_lsp(self, options: OptionsProtocol) -> None:
233
+ """Configure hook manager with LSP optimization settings."""
234
+ # Check if LSP hooks are enabled
235
+ enable_lsp_hooks = getattr(options, "enable_lsp_hooks", False)
236
+
237
+ # Configure the hook manager
238
+ hook_manager = self.phases.hook_manager
239
+ if hasattr(hook_manager, "configure_lsp_optimization"):
240
+ hook_manager.configure_lsp_optimization(enable_lsp_hooks)
241
+
242
+ if enable_lsp_hooks and not getattr(options, "no_zuban_lsp", False):
243
+ self.console.print(
244
+ "🔍 LSP-optimized hook execution enabled for faster type checking",
245
+ style="blue",
246
+ )
247
+
248
+ def _register_lsp_cleanup_handler(self, options: OptionsProtocol) -> None:
249
+ """Register cleanup handler to stop LSP server when workflow completes."""
250
+ # Get configuration to check if we should handle LSP cleanup
251
+ config = getattr(options, "zuban_lsp", None)
252
+ if config and not config.enabled:
253
+ return
254
+
255
+ if getattr(options, "no_zuban_lsp", False):
256
+ return
257
+
258
+ def cleanup_lsp_server() -> None:
259
+ """Cleanup function to gracefully stop LSP server if it was auto-started."""
260
+ try:
261
+ from crackerjack.services.server_manager import (
262
+ find_zuban_lsp_processes,
263
+ stop_process,
264
+ )
265
+
266
+ lsp_processes = find_zuban_lsp_processes()
267
+ if lsp_processes:
268
+ for proc in lsp_processes:
269
+ self.logger.debug(
270
+ f"Stopping auto-started Zuban LSP server (PID: {proc['pid']})"
271
+ )
272
+ stop_process(proc["pid"])
273
+
274
+ except Exception as e:
275
+ self.logger.debug(f"Error during LSP cleanup: {e}")
276
+
277
+ # Register the cleanup handler with the session
278
+ self.session.register_cleanup(cleanup_lsp_server)
279
+
145
280
  def _log_workflow_startup_info(self, options: OptionsProtocol) -> None:
146
281
  self.logger.info(
147
282
  "Starting complete workflow execution",
@@ -150,6 +285,9 @@ class WorkflowPipeline:
150
285
  package_path=str(self.pkg_path),
151
286
  )
152
287
 
288
+ # Display Zuban LSP server status
289
+ self._log_zuban_lsp_status()
290
+
153
291
  async def _execute_workflow_with_timing(
154
292
  self, options: OptionsProtocol, start_time: float, workflow_id: str
155
293
  ) -> bool:
@@ -202,41 +340,34 @@ class WorkflowPipeline:
202
340
  async def _execute_workflow_phases(
203
341
  self, options: OptionsProtocol, workflow_id: str
204
342
  ) -> bool:
205
- """Execute all workflow phases with proper security gates and performance monitoring."""
206
343
  success = True
207
344
 
208
- # Configuration phase with monitoring
209
345
  with phase_monitor(workflow_id, "configuration"):
210
346
  config_success = self.phases.run_configuration_phase(options)
211
347
  success = success and config_success
212
348
 
213
- # Execute quality phase (includes testing and comprehensive checks)
214
349
  quality_success = await self._execute_quality_phase(options, workflow_id)
215
350
  if not quality_success:
216
351
  success = False
217
- # For publishing workflows, enforce security gates
352
+
218
353
  if self._is_publishing_workflow(options):
219
- return False # Exit early - publishing requires ALL quality checks
354
+ return False
220
355
 
221
- # Execute publishing workflow if requested
222
356
  if not await self._execute_publishing_workflow(options, workflow_id):
223
357
  success = False
224
358
  return False
225
359
 
226
- # Execute commit workflow if requested
227
360
  if not await self._execute_commit_workflow(options, workflow_id):
228
361
  success = False
229
362
 
230
363
  return success
231
364
 
232
365
  def _is_publishing_workflow(self, options: OptionsProtocol) -> bool:
233
- """Check if this is a publishing workflow that requires strict security gates."""
234
366
  return bool(options.publish or options.all or options.commit)
235
367
 
236
368
  async def _execute_publishing_workflow(
237
369
  self, options: OptionsProtocol, workflow_id: str
238
370
  ) -> bool:
239
- """Execute publishing workflow with proper error handling and monitoring."""
240
371
  if not options.publish and not options.all:
241
372
  return True
242
373
 
@@ -249,7 +380,6 @@ class WorkflowPipeline:
249
380
  async def _execute_commit_workflow(
250
381
  self, options: OptionsProtocol, workflow_id: str
251
382
  ) -> bool:
252
- """Execute commit workflow with proper error handling and monitoring."""
253
383
  if not options.commit:
254
384
  return True
255
385
 
@@ -278,17 +408,14 @@ class WorkflowPipeline:
278
408
  ) -> bool:
279
409
  iteration = self._start_iteration_tracking(options)
280
410
 
281
- # Execute initial phases (fast hooks + optional cleaning)
282
411
  if not await self._execute_initial_phases(options, workflow_id, iteration):
283
412
  return False
284
413
 
285
- # Run main quality phases
286
414
  (
287
415
  testing_passed,
288
416
  comprehensive_passed,
289
417
  ) = await self._run_main_quality_phases_async(options, workflow_id)
290
418
 
291
- # Handle workflow completion based on agent mode
292
419
  return await self._handle_workflow_completion(
293
420
  options, iteration, testing_passed, comprehensive_passed, workflow_id
294
421
  )
@@ -296,26 +423,21 @@ class WorkflowPipeline:
296
423
  async def _execute_initial_phases(
297
424
  self, options: OptionsProtocol, workflow_id: str, iteration: int
298
425
  ) -> bool:
299
- """Execute fast hooks and optional code cleaning phases."""
300
- # Fast hooks with performance monitoring
301
426
  with phase_monitor(workflow_id, "fast_hooks") as monitor:
302
427
  if not await self._run_initial_fast_hooks_async(
303
428
  options, iteration, monitor
304
429
  ):
305
430
  return False
306
431
 
307
- # Run code cleaning if enabled
308
432
  return self._execute_optional_cleaning_phase(options)
309
433
 
310
434
  def _execute_optional_cleaning_phase(self, options: OptionsProtocol) -> bool:
311
- """Execute code cleaning phase if enabled."""
312
435
  if not getattr(options, "clean", False):
313
436
  return True
314
437
 
315
438
  if not self._run_code_cleaning_phase(options):
316
439
  return False
317
440
 
318
- # Run fast hooks again after cleaning for sanity check
319
441
  if not self._run_post_cleaning_fast_hooks(options):
320
442
  return False
321
443
 
@@ -330,7 +452,6 @@ class WorkflowPipeline:
330
452
  comprehensive_passed: bool,
331
453
  workflow_id: str = "unknown",
332
454
  ) -> bool:
333
- """Handle workflow completion based on agent mode."""
334
455
  if options.ai_agent:
335
456
  return await self._handle_ai_agent_workflow(
336
457
  options, iteration, testing_passed, comprehensive_passed, workflow_id
@@ -357,7 +478,6 @@ class WorkflowPipeline:
357
478
  async def _run_main_quality_phases_async(
358
479
  self, options: OptionsProtocol, workflow_id: str
359
480
  ) -> tuple[bool, bool]:
360
- # Run testing and comprehensive phases in parallel where possible
361
481
  testing_task = asyncio.create_task(
362
482
  self._run_testing_phase_async(options, workflow_id)
363
483
  )
@@ -369,7 +489,6 @@ class WorkflowPipeline:
369
489
  testing_task, comprehensive_task, return_exceptions=True
370
490
  )
371
491
 
372
- # Handle exceptions and ensure boolean types
373
492
  testing_result, comprehensive_result = results
374
493
 
375
494
  if isinstance(testing_result, Exception):
@@ -396,11 +515,9 @@ class WorkflowPipeline:
396
515
  comprehensive_passed: bool,
397
516
  workflow_id: str = "unknown",
398
517
  ) -> bool:
399
- # Handle security gates first
400
518
  if not await self._process_security_gates(options):
401
519
  return False
402
520
 
403
- # Determine if AI fixing is needed
404
521
  needs_ai_fixing = self._determine_ai_fixing_needed(
405
522
  testing_passed, comprehensive_passed, bool(options.publish or options.all)
406
523
  )
@@ -408,13 +525,11 @@ class WorkflowPipeline:
408
525
  if needs_ai_fixing:
409
526
  return await self._execute_ai_fixing_workflow(options, iteration)
410
527
 
411
- # Handle success case without AI fixing
412
528
  return self._finalize_ai_workflow_success(
413
529
  options, iteration, testing_passed, comprehensive_passed
414
530
  )
415
531
 
416
532
  async def _process_security_gates(self, options: OptionsProtocol) -> bool:
417
- """Process security gates for publishing operations."""
418
533
  publishing_requested, security_blocks = (
419
534
  self._check_security_gates_for_publishing(options)
420
535
  )
@@ -422,7 +537,6 @@ class WorkflowPipeline:
422
537
  if not (publishing_requested and security_blocks):
423
538
  return True
424
539
 
425
- # Try AI fixing for security issues, then re-check
426
540
  security_fix_result = await self._handle_security_gate_failure(
427
541
  options, allow_ai_fixing=True
428
542
  )
@@ -431,7 +545,6 @@ class WorkflowPipeline:
431
545
  async def _execute_ai_fixing_workflow(
432
546
  self, options: OptionsProtocol, iteration: int
433
547
  ) -> bool:
434
- """Execute AI fixing workflow and handle debugging."""
435
548
  success = await self._run_ai_agent_fixing_phase(options)
436
549
  if self._should_debug():
437
550
  self.debugger.log_iteration_end(iteration, success)
@@ -444,7 +557,6 @@ class WorkflowPipeline:
444
557
  testing_passed: bool,
445
558
  comprehensive_passed: bool,
446
559
  ) -> bool:
447
- """Finalize AI workflow when no fixing is needed."""
448
560
  publishing_requested = bool(options.publish or options.all)
449
561
 
450
562
  final_success = self._determine_workflow_success(
@@ -467,7 +579,6 @@ class WorkflowPipeline:
467
579
  testing_passed: bool,
468
580
  comprehensive_passed: bool,
469
581
  ) -> None:
470
- """Show security audit warning for partial success in publishing workflows."""
471
582
  should_show_warning = (
472
583
  publishing_requested
473
584
  and final_success
@@ -484,23 +595,19 @@ class WorkflowPipeline:
484
595
  testing_passed: bool,
485
596
  comprehensive_passed: bool,
486
597
  ) -> bool:
487
- # Check security gates for publishing operations
488
598
  publishing_requested, security_blocks = (
489
599
  self._check_security_gates_for_publishing(options)
490
600
  )
491
601
 
492
602
  if publishing_requested and security_blocks:
493
- # Standard workflow cannot bypass security gates
494
603
  return await self._handle_security_gate_failure(options)
495
604
 
496
- # Determine success based on publishing requirements
497
605
  success = self._determine_workflow_success(
498
606
  testing_passed,
499
607
  comprehensive_passed,
500
608
  publishing_requested,
501
609
  )
502
610
 
503
- # Show security audit warning for partial success in publishing workflows
504
611
  if (
505
612
  publishing_requested
506
613
  and success
@@ -512,7 +619,6 @@ class WorkflowPipeline:
512
619
  "[red]❌ Quality checks failed - cannot proceed to publishing[/red]"
513
620
  )
514
621
 
515
- # Show verbose failure details if requested
516
622
  if not success and getattr(options, "verbose", False):
517
623
  self._show_verbose_failure_details(testing_passed, comprehensive_passed)
518
624
 
@@ -563,7 +669,6 @@ class WorkflowPipeline:
563
669
  self._mcp_state_manager.update_stage_status(stage, status)
564
670
 
565
671
  def _run_code_cleaning_phase(self, options: OptionsProtocol) -> bool:
566
- """Run code cleaning phase after fast hooks but before comprehensive hooks."""
567
672
  self.console.print("\n[bold blue]🧹 Running Code Cleaning Phase...[/bold blue]")
568
673
 
569
674
  success = self.phases.run_cleaning_phase(options)
@@ -576,7 +681,6 @@ class WorkflowPipeline:
576
681
  return success
577
682
 
578
683
  def _run_post_cleaning_fast_hooks(self, options: OptionsProtocol) -> bool:
579
- """Run fast hooks again after code cleaning for sanity check."""
580
684
  self.console.print(
581
685
  "\n[bold cyan]🔍 Running Post-Cleaning Fast Hooks Sanity Check...[/bold cyan]"
582
686
  )
@@ -591,11 +695,9 @@ class WorkflowPipeline:
591
695
  return success
592
696
 
593
697
  def _has_code_cleaning_run(self) -> bool:
594
- """Check if code cleaning has already run in this workflow."""
595
698
  return getattr(self, "_code_cleaning_complete", False)
596
699
 
597
700
  def _mark_code_cleaning_complete(self) -> None:
598
- """Mark code cleaning as complete for this workflow."""
599
701
  self._code_cleaning_complete = True
600
702
 
601
703
  def _handle_test_failures(self) -> None:
@@ -628,24 +730,21 @@ class WorkflowPipeline:
628
730
  def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
629
731
  self._update_hooks_status_running()
630
732
 
631
- # Run fast hooks first
632
733
  fast_hooks_success = self._run_fast_hooks_phase(options)
633
734
  if not fast_hooks_success:
634
735
  self._handle_hooks_completion(False)
635
736
  return False
636
737
 
637
- # Run code cleaning after fast hooks but before comprehensive hooks
638
738
  if getattr(options, "clean", False):
639
739
  if not self._run_code_cleaning_phase(options):
640
740
  self._handle_hooks_completion(False)
641
741
  return False
642
- # Run fast hooks again after cleaning for sanity check
742
+
643
743
  if not self._run_post_cleaning_fast_hooks(options):
644
744
  self._handle_hooks_completion(False)
645
745
  return False
646
746
  self._mark_code_cleaning_complete()
647
747
 
648
- # Run comprehensive hooks
649
748
  comprehensive_success = self._run_comprehensive_hooks_phase(options)
650
749
 
651
750
  hooks_success = fast_hooks_success and comprehensive_success
@@ -682,29 +781,24 @@ class WorkflowPipeline:
682
781
  self._initialize_ai_fixing_phase(options)
683
782
 
684
783
  try:
685
- # Prepare environment for AI agents
686
784
  self._prepare_ai_fixing_environment(options)
687
785
 
688
- # Setup coordinator and collect issues
689
786
  agent_coordinator, issues = await self._setup_ai_fixing_workflow()
690
787
 
691
788
  if not issues:
692
789
  return self._handle_no_issues_found()
693
790
 
694
- # Execute AI fixing
695
791
  return await self._execute_ai_fixes(options, agent_coordinator, issues)
696
792
 
697
793
  except Exception as e:
698
794
  return self._handle_fixing_phase_error(e)
699
795
 
700
796
  def _initialize_ai_fixing_phase(self, options: OptionsProtocol) -> None:
701
- """Initialize the AI fixing phase with status updates and logging."""
702
797
  self._update_mcp_status("ai_fixing", "running")
703
798
  self.logger.info("Starting AI agent fixing phase")
704
799
  self._log_debug_phase_start()
705
800
 
706
801
  def _prepare_ai_fixing_environment(self, options: OptionsProtocol) -> None:
707
- """Prepare the environment for AI agents by running optional code cleaning."""
708
802
  should_run_cleaning = (
709
803
  getattr(options, "clean", False) and not self._has_code_cleaning_run()
710
804
  )
@@ -717,14 +811,12 @@ class WorkflowPipeline:
717
811
  )
718
812
 
719
813
  if self._run_code_cleaning_phase(options):
720
- # Run fast hooks sanity check after cleaning
721
814
  self._run_post_cleaning_fast_hooks(options)
722
815
  self._mark_code_cleaning_complete()
723
816
 
724
817
  async def _setup_ai_fixing_workflow(
725
818
  self,
726
819
  ) -> tuple[AgentCoordinator, list[t.Any]]:
727
- """Setup agent coordinator and collect issues to fix."""
728
820
  agent_coordinator = self._setup_agent_coordinator()
729
821
  issues = await self._collect_issues_from_failures()
730
822
  return agent_coordinator, issues
@@ -735,7 +827,6 @@ class WorkflowPipeline:
735
827
  agent_coordinator: AgentCoordinator,
736
828
  issues: list[t.Any],
737
829
  ) -> bool:
738
- """Execute AI fixes and process results."""
739
830
  self.logger.info(f"AI agents will attempt to fix {len(issues)} issues")
740
831
  fix_result = await agent_coordinator.handle_issues(issues)
741
832
  return await self._process_fix_results(options, fix_result)
@@ -847,12 +938,10 @@ class WorkflowPipeline:
847
938
 
848
939
  verification_success = True
849
940
 
850
- # Verify test fixes
851
941
  if self._should_verify_test_fixes(fix_result.fixes_applied):
852
942
  if not await self._verify_test_fixes(options):
853
943
  verification_success = False
854
944
 
855
- # Verify hook fixes
856
945
  if self._should_verify_hook_fixes(fix_result.fixes_applied):
857
946
  if not await self._verify_hook_fixes(options):
858
947
  verification_success = False
@@ -861,11 +950,9 @@ class WorkflowPipeline:
861
950
  return verification_success
862
951
 
863
952
  def _should_verify_test_fixes(self, fixes_applied: list[str]) -> bool:
864
- """Check if test fixes need verification."""
865
953
  return any("test" in fix.lower() for fix in fixes_applied)
866
954
 
867
955
  async def _verify_test_fixes(self, options: OptionsProtocol) -> bool:
868
- """Verify test fixes by re-running tests."""
869
956
  self.logger.info("Re-running tests to verify test fixes")
870
957
  test_success = self.phases.run_testing_phase(options)
871
958
  if not test_success:
@@ -873,7 +960,6 @@ class WorkflowPipeline:
873
960
  return test_success
874
961
 
875
962
  def _should_verify_hook_fixes(self, fixes_applied: list[str]) -> bool:
876
- """Check if hook fixes need verification."""
877
963
  hook_fixes = [
878
964
  f
879
965
  for f in fixes_applied
@@ -884,7 +970,6 @@ class WorkflowPipeline:
884
970
  return bool(hook_fixes)
885
971
 
886
972
  async def _verify_hook_fixes(self, options: OptionsProtocol) -> bool:
887
- """Verify hook fixes by re-running comprehensive hooks."""
888
973
  self.logger.info("Re-running comprehensive hooks to verify hook fixes")
889
974
  hook_success = self.phases.run_comprehensive_hooks_only(options)
890
975
  if not hook_success:
@@ -892,7 +977,6 @@ class WorkflowPipeline:
892
977
  return hook_success
893
978
 
894
979
  def _log_verification_result(self, verification_success: bool) -> None:
895
- """Log the final verification result."""
896
980
  if verification_success:
897
981
  self.logger.info("All AI agent fixes verified successfully")
898
982
  else:
@@ -1049,7 +1133,6 @@ class WorkflowPipeline:
1049
1133
  )
1050
1134
 
1051
1135
  def _parse_hook_error_details(self, task_id: str, error_msg: str) -> list[Issue]:
1052
- """Parse hook error details and create specific issues."""
1053
1136
  issues: list[Issue] = []
1054
1137
 
1055
1138
  if task_id == "comprehensive_hooks":
@@ -1060,11 +1143,9 @@ class WorkflowPipeline:
1060
1143
  return issues
1061
1144
 
1062
1145
  def _parse_comprehensive_hook_errors(self, error_msg: str) -> list[Issue]:
1063
- """Parse comprehensive hook error messages and create specific issues."""
1064
1146
  issues: list[Issue] = []
1065
1147
  error_lower = error_msg.lower()
1066
1148
 
1067
- # Check each error type
1068
1149
  complexity_issue = self._check_complexity_error(error_lower)
1069
1150
  if complexity_issue:
1070
1151
  issues.append(complexity_issue)
@@ -1092,7 +1173,6 @@ class WorkflowPipeline:
1092
1173
  return issues
1093
1174
 
1094
1175
  def _check_complexity_error(self, error_lower: str) -> Issue | None:
1095
- """Check for complexity errors and create issue if found."""
1096
1176
  if "complexipy" in error_lower or "c901" in error_lower:
1097
1177
  return Issue(
1098
1178
  id="complexity_violation",
@@ -1104,7 +1184,6 @@ class WorkflowPipeline:
1104
1184
  return None
1105
1185
 
1106
1186
  def _check_type_error(self, error_lower: str) -> Issue | None:
1107
- """Check for type errors and create issue if found."""
1108
1187
  if "pyright" in error_lower:
1109
1188
  return Issue(
1110
1189
  id="pyright_type_error",
@@ -1116,7 +1195,6 @@ class WorkflowPipeline:
1116
1195
  return None
1117
1196
 
1118
1197
  def _check_security_error(self, error_lower: str) -> Issue | None:
1119
- """Check for security errors and create issue if found."""
1120
1198
  if "bandit" in error_lower:
1121
1199
  return Issue(
1122
1200
  id="bandit_security_issue",
@@ -1128,7 +1206,6 @@ class WorkflowPipeline:
1128
1206
  return None
1129
1207
 
1130
1208
  def _check_performance_error(self, error_lower: str) -> Issue | None:
1131
- """Check for performance errors and create issue if found."""
1132
1209
  if "refurb" in error_lower:
1133
1210
  return Issue(
1134
1211
  id="refurb_quality_issue",
@@ -1140,7 +1217,6 @@ class WorkflowPipeline:
1140
1217
  return None
1141
1218
 
1142
1219
  def _check_dead_code_error(self, error_lower: str) -> Issue | None:
1143
- """Check for dead code errors and create issue if found."""
1144
1220
  if "vulture" in error_lower:
1145
1221
  return Issue(
1146
1222
  id="vulture_dead_code",
@@ -1152,7 +1228,6 @@ class WorkflowPipeline:
1152
1228
  return None
1153
1229
 
1154
1230
  def _check_regex_validation_error(self, error_lower: str) -> Issue | None:
1155
- """Check for regex validation errors and create issue if found."""
1156
1231
  regex_keywords = ("raw regex", "regex pattern", r"\g<", "replacement")
1157
1232
  if "validate-regex-patterns" in error_lower or any(
1158
1233
  keyword in error_lower for keyword in regex_keywords
@@ -1167,7 +1242,6 @@ class WorkflowPipeline:
1167
1242
  return None
1168
1243
 
1169
1244
  def _create_fast_hook_issue(self) -> Issue:
1170
- """Create an issue for fast hook errors."""
1171
1245
  return Issue(
1172
1246
  id="fast_hooks_formatting",
1173
1247
  type=IssueType.FORMATTING,
@@ -1194,10 +1268,8 @@ class WorkflowPipeline:
1194
1268
  return issues
1195
1269
 
1196
1270
  def _classify_issue(self, issue_str: str) -> tuple[IssueType, Priority]:
1197
- """Classify an issue string to determine its type and priority."""
1198
1271
  issue_lower = issue_str.lower()
1199
1272
 
1200
- # Check high-priority issues first
1201
1273
  if self._is_type_error(issue_lower):
1202
1274
  return IssueType.TYPE_ERROR, Priority.HIGH
1203
1275
  if self._is_security_issue(issue_lower):
@@ -1207,7 +1279,6 @@ class WorkflowPipeline:
1207
1279
  if self._is_regex_validation_issue(issue_lower):
1208
1280
  return IssueType.REGEX_VALIDATION, Priority.HIGH
1209
1281
 
1210
- # Check medium-priority issues
1211
1282
  if self._is_dead_code_issue(issue_lower):
1212
1283
  return IssueType.DEAD_CODE, Priority.MEDIUM
1213
1284
  if self._is_performance_issue(issue_lower):
@@ -1215,30 +1286,25 @@ class WorkflowPipeline:
1215
1286
  if self._is_import_error(issue_lower):
1216
1287
  return IssueType.IMPORT_ERROR, Priority.MEDIUM
1217
1288
 
1218
- # Default to formatting issue
1219
1289
  return IssueType.FORMATTING, Priority.MEDIUM
1220
1290
 
1221
1291
  def _is_type_error(self, issue_lower: str) -> bool:
1222
- """Check if issue is related to type errors."""
1223
1292
  return any(
1224
1293
  keyword in issue_lower for keyword in ("type", "annotation", "pyright")
1225
1294
  )
1226
1295
 
1227
1296
  def _is_security_issue(self, issue_lower: str) -> bool:
1228
- """Check if issue is related to security."""
1229
1297
  return any(
1230
1298
  keyword in issue_lower for keyword in ("security", "bandit", "hardcoded")
1231
1299
  )
1232
1300
 
1233
1301
  def _is_complexity_issue(self, issue_lower: str) -> bool:
1234
- """Check if issue is related to code complexity."""
1235
1302
  return any(
1236
1303
  keyword in issue_lower
1237
1304
  for keyword in ("complexity", "complexipy", "c901", "too complex")
1238
1305
  )
1239
1306
 
1240
1307
  def _is_regex_validation_issue(self, issue_lower: str) -> bool:
1241
- """Check if issue is related to regex validation."""
1242
1308
  return any(
1243
1309
  keyword in issue_lower
1244
1310
  for keyword in (
@@ -1251,17 +1317,14 @@ class WorkflowPipeline:
1251
1317
  )
1252
1318
 
1253
1319
  def _is_dead_code_issue(self, issue_lower: str) -> bool:
1254
- """Check if issue is related to dead code."""
1255
1320
  return any(keyword in issue_lower for keyword in ("unused", "dead", "vulture"))
1256
1321
 
1257
1322
  def _is_performance_issue(self, issue_lower: str) -> bool:
1258
- """Check if issue is related to performance."""
1259
1323
  return any(
1260
1324
  keyword in issue_lower for keyword in ("performance", "refurb", "furb")
1261
1325
  )
1262
1326
 
1263
1327
  def _is_import_error(self, issue_lower: str) -> bool:
1264
- """Check if issue is related to import errors."""
1265
1328
  return any(keyword in issue_lower for keyword in ("import", "creosote"))
1266
1329
 
1267
1330
  def _log_failure_counts_if_debugging(
@@ -1274,41 +1337,25 @@ class WorkflowPipeline:
1274
1337
  def _check_security_gates_for_publishing(
1275
1338
  self, options: OptionsProtocol
1276
1339
  ) -> tuple[bool, bool]:
1277
- """Check if publishing is requested and if security gates block it.
1278
-
1279
- Returns:
1280
- tuple[bool, bool]: (publishing_requested, security_blocks_publishing)
1281
- """
1282
1340
  publishing_requested = bool(options.publish or options.all or options.commit)
1283
1341
 
1284
1342
  if not publishing_requested:
1285
1343
  return False, False
1286
1344
 
1287
- # Check security gates for publishing operations
1288
1345
  try:
1289
1346
  security_blocks_publishing = self._check_security_critical_failures()
1290
1347
  return publishing_requested, security_blocks_publishing
1291
1348
  except Exception as e:
1292
- # Fail securely if security check fails
1293
1349
  self.logger.warning(f"Security check failed: {e} - blocking publishing")
1294
1350
  self.console.print(
1295
1351
  "[red]🔒 SECURITY CHECK FAILED: Unable to verify security status - publishing BLOCKED[/red]"
1296
1352
  )
1297
- # Return True for security_blocks to fail securely
1353
+
1298
1354
  return publishing_requested, True
1299
1355
 
1300
1356
  async def _handle_security_gate_failure(
1301
1357
  self, options: OptionsProtocol, allow_ai_fixing: bool = False
1302
1358
  ) -> bool:
1303
- """Handle security gate failures with optional AI fixing.
1304
-
1305
- Args:
1306
- options: Workflow options
1307
- allow_ai_fixing: Whether AI fixing is allowed for security issues
1308
-
1309
- Returns:
1310
- bool: True if security issues resolved, False if still blocked
1311
- """
1312
1359
  self.console.print(
1313
1360
  "[red]🔒 SECURITY GATE: Critical security checks failed[/red]"
1314
1361
  )
@@ -1321,10 +1368,8 @@ class WorkflowPipeline:
1321
1368
  "[yellow]🤖 Attempting AI-assisted security issue resolution...[/yellow]"
1322
1369
  )
1323
1370
 
1324
- # Try AI fixing for security issues
1325
1371
  ai_fix_success = await self._run_ai_agent_fixing_phase(options)
1326
1372
  if ai_fix_success:
1327
- # Re-check security after AI fixing
1328
1373
  try:
1329
1374
  security_still_blocks = self._check_security_critical_failures()
1330
1375
  if not security_still_blocks:
@@ -1344,7 +1389,6 @@ class WorkflowPipeline:
1344
1389
  return False
1345
1390
  return False
1346
1391
  else:
1347
- # Standard workflow cannot bypass security gates
1348
1392
  self.console.print(
1349
1393
  "[red]Security-critical hooks (bandit, pyright, gitleaks) must pass before publishing[/red]"
1350
1394
  )
@@ -1356,11 +1400,9 @@ class WorkflowPipeline:
1356
1400
  comprehensive_passed: bool,
1357
1401
  publishing_requested: bool,
1358
1402
  ) -> bool:
1359
- """Determine if AI fixing is needed based on test results and publishing requirements."""
1360
1403
  if publishing_requested:
1361
- # For publish/commit workflows, trigger AI fixing if either fails (since both must pass)
1362
1404
  return not testing_passed or not comprehensive_passed
1363
- # For regular workflows, trigger AI fixing if either fails
1405
+
1364
1406
  return not testing_passed or not comprehensive_passed
1365
1407
 
1366
1408
  def _determine_workflow_success(
@@ -1369,17 +1411,14 @@ class WorkflowPipeline:
1369
1411
  comprehensive_passed: bool,
1370
1412
  publishing_requested: bool,
1371
1413
  ) -> bool:
1372
- """Determine workflow success based on test results and workflow type."""
1373
1414
  if publishing_requested:
1374
- # For publishing workflows, ALL quality checks (tests AND comprehensive hooks) must pass
1375
1415
  return testing_passed and comprehensive_passed
1376
- # For regular workflows, both must pass as well
1416
+
1377
1417
  return testing_passed and comprehensive_passed
1378
1418
 
1379
1419
  def _show_verbose_failure_details(
1380
1420
  self, testing_passed: bool, comprehensive_passed: bool
1381
1421
  ) -> None:
1382
- """Show detailed failure information in verbose mode."""
1383
1422
  self.console.print(
1384
1423
  f"[yellow]⚠️ Quality phase results - testing_passed: {testing_passed}, comprehensive_passed: {comprehensive_passed}[/yellow]"
1385
1424
  )
@@ -1391,52 +1430,37 @@ class WorkflowPipeline:
1391
1430
  )
1392
1431
 
1393
1432
  def _check_security_critical_failures(self) -> bool:
1394
- """Check if any security-critical hooks have failed.
1395
-
1396
- Returns:
1397
- True if security-critical hooks failed and block publishing
1398
- """
1399
1433
  try:
1400
1434
  from crackerjack.security.audit import SecurityAuditor
1401
1435
 
1402
1436
  auditor = SecurityAuditor()
1403
1437
 
1404
- # Get hook results - we need to be careful not to re-run hooks
1405
- # Instead, check the session tracker for recent failures
1406
1438
  fast_results = self._get_recent_fast_hook_results()
1407
1439
  comprehensive_results = self._get_recent_comprehensive_hook_results()
1408
1440
 
1409
- # Generate security audit report
1410
1441
  audit_report = auditor.audit_hook_results(
1411
1442
  fast_results, comprehensive_results
1412
1443
  )
1413
1444
 
1414
- # Store audit report for later use
1415
1445
  self._last_security_audit = audit_report
1416
1446
 
1417
- # Block publishing if critical failures exist
1418
1447
  return audit_report.has_critical_failures
1419
1448
 
1420
1449
  except Exception as e:
1421
- # Fail securely - if we can't determine security status, block publishing
1422
1450
  self.logger.warning(f"Security audit failed: {e} - failing securely")
1423
- # Re-raise the exception so it can be caught by the calling method
1451
+
1424
1452
  raise
1425
1453
 
1426
1454
  def _get_recent_fast_hook_results(self) -> list[t.Any]:
1427
- """Get recent fast hook results from session tracker."""
1428
- # Try to get results from session tracker
1429
1455
  results = self._extract_hook_results_from_session("fast_hooks")
1430
1456
 
1431
- # If no results from session, create mock failed results for critical hooks
1432
1457
  if not results:
1433
1458
  results = self._create_mock_hook_results(["gitleaks"])
1434
1459
 
1435
1460
  return results
1436
1461
 
1437
1462
  def _extract_hook_results_from_session(self, hook_type: str) -> list[t.Any]:
1438
- """Extract hook results from session tracker for given hook type."""
1439
- results = []
1463
+ results: list[t.Any] = []
1440
1464
 
1441
1465
  session_tracker = self._get_session_tracker()
1442
1466
  if not session_tracker:
@@ -1450,7 +1474,6 @@ class WorkflowPipeline:
1450
1474
  return results
1451
1475
 
1452
1476
  def _get_session_tracker(self) -> t.Any | None:
1453
- """Get session tracker if available."""
1454
1477
  return (
1455
1478
  getattr(self.session, "session_tracker", None)
1456
1479
  if hasattr(self.session, "session_tracker")
@@ -1458,8 +1481,7 @@ class WorkflowPipeline:
1458
1481
  )
1459
1482
 
1460
1483
  def _create_mock_hook_results(self, critical_hooks: list[str]) -> list[t.Any]:
1461
- """Create mock failed results for critical hooks to fail securely."""
1462
- results = []
1484
+ results: list[t.Any] = []
1463
1485
 
1464
1486
  for hook_name in critical_hooks:
1465
1487
  mock_result = self._create_mock_hook_result(hook_name)
@@ -1468,36 +1490,29 @@ class WorkflowPipeline:
1468
1490
  return results
1469
1491
 
1470
1492
  def _create_mock_hook_result(self, hook_name: str) -> t.Any:
1471
- """Create a mock result that appears to have failed for security purposes."""
1472
1493
  return type(
1473
1494
  "MockResult",
1474
1495
  (),
1475
1496
  {
1476
1497
  "name": hook_name,
1477
- "status": "unknown", # Unknown status = fail securely
1498
+ "status": "unknown",
1478
1499
  "output": "Unable to determine hook status",
1479
1500
  },
1480
1501
  )()
1481
1502
 
1482
1503
  def _get_recent_comprehensive_hook_results(self) -> list[t.Any]:
1483
- """Get recent comprehensive hook results from session tracker."""
1484
- # Try to get results from session tracker
1485
1504
  results = self._extract_hook_results_from_session("comprehensive_hooks")
1486
1505
 
1487
- # If no results from session, create mock failed results for critical hooks
1488
1506
  if not results:
1489
1507
  results = self._create_mock_hook_results(["bandit", "pyright"])
1490
1508
 
1491
1509
  return results
1492
1510
 
1493
1511
  def _is_security_critical_failure(self, result: t.Any) -> bool:
1494
- """Check if a hook result represents a security-critical failure."""
1495
-
1496
- # List of security-critical hook names (fail-safe approach)
1497
1512
  security_critical_hooks = {
1498
- "bandit", # Security vulnerability scanning
1499
- "pyright", # Type safety prevents security holes
1500
- "gitleaks", # Secret detection
1513
+ "bandit",
1514
+ "pyright",
1515
+ "gitleaks",
1501
1516
  }
1502
1517
 
1503
1518
  hook_name = getattr(result, "name", "").lower()
@@ -1510,8 +1525,6 @@ class WorkflowPipeline:
1510
1525
  return hook_name in security_critical_hooks and is_failed
1511
1526
 
1512
1527
  def _show_security_audit_warning(self) -> None:
1513
- """Show security audit warning when proceeding with partial success."""
1514
- # Use stored audit report if available
1515
1528
  audit_report = getattr(self, "_last_security_audit", None)
1516
1529
 
1517
1530
  if audit_report:
@@ -1519,23 +1532,19 @@ class WorkflowPipeline:
1519
1532
  "[yellow]⚠️ SECURITY AUDIT: Proceeding with partial quality success[/yellow]"
1520
1533
  )
1521
1534
 
1522
- # Show security status
1523
1535
  for warning in audit_report.security_warnings:
1524
1536
  if "CRITICAL" in warning:
1525
- # This shouldn't happen if we're showing warnings, but fail-safe
1526
1537
  self.console.print(f"[red]{warning}[/red]")
1527
1538
  elif "HIGH" in warning:
1528
1539
  self.console.print(f"[yellow]{warning}[/yellow]")
1529
1540
  else:
1530
1541
  self.console.print(f"[blue]{warning}[/blue]")
1531
1542
 
1532
- # Show recommendations
1533
1543
  if audit_report.recommendations:
1534
- self.console.print("[bold]Security Recommendations:[/bold]")
1535
- for rec in audit_report.recommendations[:3]: # Show top 3
1544
+ self.console.print("[bold]Security Recommendations: [/bold]")
1545
+ for rec in audit_report.recommendations[:3]:
1536
1546
  self.console.print(f"[dim]{rec}[/dim]")
1537
1547
  else:
1538
- # Fallback if no audit report available
1539
1548
  self.console.print(
1540
1549
  "[yellow]⚠️ SECURITY AUDIT: Proceeding with partial quality success[/yellow]"
1541
1550
  )
@@ -1546,12 +1555,10 @@ class WorkflowPipeline:
1546
1555
  "[yellow]⚠️ Some non-critical quality checks failed - consider reviewing before production deployment[/yellow]"
1547
1556
  )
1548
1557
 
1549
- # Performance-optimized async methods
1550
1558
  async def _run_initial_fast_hooks_async(
1551
1559
  self, options: OptionsProtocol, iteration: int, monitor: t.Any
1552
1560
  ) -> bool:
1553
- """Run initial fast hooks asynchronously with monitoring."""
1554
- monitor.record_sequential_op() # Fast hooks run sequentially for safety
1561
+ monitor.record_sequential_op()
1555
1562
  fast_hooks_passed = self._run_fast_hooks_phase(options)
1556
1563
  if not fast_hooks_passed:
1557
1564
  if options.ai_agent and self._should_debug():
@@ -1562,7 +1569,6 @@ class WorkflowPipeline:
1562
1569
  async def _run_fast_hooks_phase_monitored(
1563
1570
  self, options: OptionsProtocol, workflow_id: str
1564
1571
  ) -> bool:
1565
- """Run fast hooks phase with performance monitoring."""
1566
1572
  with phase_monitor(workflow_id, "fast_hooks") as monitor:
1567
1573
  monitor.record_sequential_op()
1568
1574
  return self._run_fast_hooks_phase(options)
@@ -1570,7 +1576,6 @@ class WorkflowPipeline:
1570
1576
  async def _run_comprehensive_hooks_phase_monitored(
1571
1577
  self, options: OptionsProtocol, workflow_id: str
1572
1578
  ) -> bool:
1573
- """Run comprehensive hooks phase with performance monitoring."""
1574
1579
  with phase_monitor(workflow_id, "comprehensive_hooks") as monitor:
1575
1580
  monitor.record_sequential_op()
1576
1581
  return self._run_comprehensive_hooks_phase(options)
@@ -1578,7 +1583,6 @@ class WorkflowPipeline:
1578
1583
  async def _run_testing_phase_async(
1579
1584
  self, options: OptionsProtocol, workflow_id: str
1580
1585
  ) -> bool:
1581
- """Run testing phase asynchronously with monitoring."""
1582
1586
  with phase_monitor(workflow_id, "testing") as monitor:
1583
1587
  monitor.record_sequential_op()
1584
1588
  return self._run_testing_phase(options)
@@ -1586,11 +1590,9 @@ class WorkflowPipeline:
1586
1590
  async def _execute_standard_hooks_workflow_monitored(
1587
1591
  self, options: OptionsProtocol, workflow_id: str
1588
1592
  ) -> bool:
1589
- """Execute standard hooks workflow with performance monitoring."""
1590
1593
  with phase_monitor(workflow_id, "hooks") as monitor:
1591
1594
  self._update_hooks_status_running()
1592
1595
 
1593
- # Execute fast hooks phase
1594
1596
  fast_hooks_success = self._execute_monitored_fast_hooks_phase(
1595
1597
  options, monitor
1596
1598
  )
@@ -1598,17 +1600,14 @@ class WorkflowPipeline:
1598
1600
  self._handle_hooks_completion(False)
1599
1601
  return False
1600
1602
 
1601
- # Execute optional cleaning phase
1602
1603
  if not self._execute_monitored_cleaning_phase(options):
1603
1604
  self._handle_hooks_completion(False)
1604
1605
  return False
1605
1606
 
1606
- # Execute comprehensive hooks phase
1607
1607
  comprehensive_success = self._execute_monitored_comprehensive_phase(
1608
1608
  options, monitor
1609
1609
  )
1610
1610
 
1611
- # Complete workflow
1612
1611
  hooks_success = fast_hooks_success and comprehensive_success
1613
1612
  self._handle_hooks_completion(hooks_success)
1614
1613
  return hooks_success
@@ -1616,21 +1615,18 @@ class WorkflowPipeline:
1616
1615
  def _execute_monitored_fast_hooks_phase(
1617
1616
  self, options: OptionsProtocol, monitor: t.Any
1618
1617
  ) -> bool:
1619
- """Execute fast hooks phase with monitoring."""
1620
1618
  fast_hooks_success = self._run_fast_hooks_phase(options)
1621
1619
  if fast_hooks_success:
1622
1620
  monitor.record_sequential_op()
1623
1621
  return fast_hooks_success
1624
1622
 
1625
1623
  def _execute_monitored_cleaning_phase(self, options: OptionsProtocol) -> bool:
1626
- """Execute optional code cleaning phase."""
1627
1624
  if not getattr(options, "clean", False):
1628
1625
  return True
1629
1626
 
1630
1627
  if not self._run_code_cleaning_phase(options):
1631
1628
  return False
1632
1629
 
1633
- # Run fast hooks again after cleaning for sanity check
1634
1630
  if not self._run_post_cleaning_fast_hooks(options):
1635
1631
  return False
1636
1632
 
@@ -1640,7 +1636,6 @@ class WorkflowPipeline:
1640
1636
  def _execute_monitored_comprehensive_phase(
1641
1637
  self, options: OptionsProtocol, monitor: t.Any
1642
1638
  ) -> bool:
1643
- """Execute comprehensive hooks phase with monitoring."""
1644
1639
  comprehensive_success = self._run_comprehensive_hooks_phase(options)
1645
1640
  if comprehensive_success:
1646
1641
  monitor.record_sequential_op()
@@ -1673,7 +1668,6 @@ class WorkflowOrchestrator:
1673
1668
  TestManagerProtocol,
1674
1669
  )
1675
1670
 
1676
- # Initialize logging first so container creation respects log levels
1677
1671
  self._initialize_logging()
1678
1672
 
1679
1673
  self.logger = get_logger("crackerjack.orchestrator")
@@ -1714,13 +1708,11 @@ class WorkflowOrchestrator:
1714
1708
  session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
1715
1709
  debug_log_file = log_manager.create_debug_log_file(session_id)
1716
1710
 
1717
- # Set log level based on debug flag only - verbose should not enable DEBUG logs
1718
1711
  log_level = "DEBUG" if self.debug else "INFO"
1719
1712
  setup_structured_logging(
1720
1713
  level=log_level, json_output=False, log_file=debug_log_file
1721
1714
  )
1722
1715
 
1723
- # Use a temporary logger for the initialization message
1724
1716
  temp_logger = get_logger("crackerjack.orchestrator.init")
1725
1717
  temp_logger.debug(
1726
1718
  "Structured logging initialized",
@@ -1767,7 +1759,12 @@ class WorkflowOrchestrator:
1767
1759
  return self.phases.run_configuration_phase(options)
1768
1760
 
1769
1761
  async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
1770
- return await self.pipeline.run_complete_workflow(options)
1762
+ result: bool = await self.pipeline.run_complete_workflow(options)
1763
+ return result
1764
+
1765
+ def run_complete_workflow_sync(self, options: OptionsProtocol) -> bool:
1766
+ """Sync wrapper for run_complete_workflow."""
1767
+ return asyncio.run(self.run_complete_workflow(options))
1771
1768
 
1772
1769
  def _cleanup_resources(self) -> None:
1773
1770
  self.session.cleanup_resources()