crackerjack 0.29.0__py3-none-any.whl → 0.31.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (158) hide show
  1. crackerjack/CLAUDE.md +1005 -0
  2. crackerjack/RULES.md +380 -0
  3. crackerjack/__init__.py +42 -13
  4. crackerjack/__main__.py +225 -253
  5. crackerjack/agents/__init__.py +41 -0
  6. crackerjack/agents/architect_agent.py +281 -0
  7. crackerjack/agents/base.py +169 -0
  8. crackerjack/agents/coordinator.py +512 -0
  9. crackerjack/agents/documentation_agent.py +498 -0
  10. crackerjack/agents/dry_agent.py +388 -0
  11. crackerjack/agents/formatting_agent.py +245 -0
  12. crackerjack/agents/import_optimization_agent.py +281 -0
  13. crackerjack/agents/performance_agent.py +669 -0
  14. crackerjack/agents/proactive_agent.py +104 -0
  15. crackerjack/agents/refactoring_agent.py +788 -0
  16. crackerjack/agents/security_agent.py +529 -0
  17. crackerjack/agents/test_creation_agent.py +652 -0
  18. crackerjack/agents/test_specialist_agent.py +486 -0
  19. crackerjack/agents/tracker.py +212 -0
  20. crackerjack/api.py +560 -0
  21. crackerjack/cli/__init__.py +24 -0
  22. crackerjack/cli/facade.py +104 -0
  23. crackerjack/cli/handlers.py +267 -0
  24. crackerjack/cli/interactive.py +471 -0
  25. crackerjack/cli/options.py +401 -0
  26. crackerjack/cli/utils.py +18 -0
  27. crackerjack/code_cleaner.py +670 -0
  28. crackerjack/config/__init__.py +19 -0
  29. crackerjack/config/hooks.py +218 -0
  30. crackerjack/core/__init__.py +0 -0
  31. crackerjack/core/async_workflow_orchestrator.py +406 -0
  32. crackerjack/core/autofix_coordinator.py +200 -0
  33. crackerjack/core/container.py +104 -0
  34. crackerjack/core/enhanced_container.py +542 -0
  35. crackerjack/core/performance.py +243 -0
  36. crackerjack/core/phase_coordinator.py +561 -0
  37. crackerjack/core/proactive_workflow.py +316 -0
  38. crackerjack/core/session_coordinator.py +289 -0
  39. crackerjack/core/workflow_orchestrator.py +640 -0
  40. crackerjack/dynamic_config.py +577 -0
  41. crackerjack/errors.py +263 -41
  42. crackerjack/executors/__init__.py +11 -0
  43. crackerjack/executors/async_hook_executor.py +431 -0
  44. crackerjack/executors/cached_hook_executor.py +242 -0
  45. crackerjack/executors/hook_executor.py +345 -0
  46. crackerjack/executors/individual_hook_executor.py +669 -0
  47. crackerjack/intelligence/__init__.py +44 -0
  48. crackerjack/intelligence/adaptive_learning.py +751 -0
  49. crackerjack/intelligence/agent_orchestrator.py +551 -0
  50. crackerjack/intelligence/agent_registry.py +414 -0
  51. crackerjack/intelligence/agent_selector.py +502 -0
  52. crackerjack/intelligence/integration.py +290 -0
  53. crackerjack/interactive.py +576 -315
  54. crackerjack/managers/__init__.py +11 -0
  55. crackerjack/managers/async_hook_manager.py +135 -0
  56. crackerjack/managers/hook_manager.py +137 -0
  57. crackerjack/managers/publish_manager.py +411 -0
  58. crackerjack/managers/test_command_builder.py +151 -0
  59. crackerjack/managers/test_executor.py +435 -0
  60. crackerjack/managers/test_manager.py +258 -0
  61. crackerjack/managers/test_manager_backup.py +1124 -0
  62. crackerjack/managers/test_progress.py +144 -0
  63. crackerjack/mcp/__init__.py +0 -0
  64. crackerjack/mcp/cache.py +336 -0
  65. crackerjack/mcp/client_runner.py +104 -0
  66. crackerjack/mcp/context.py +615 -0
  67. crackerjack/mcp/dashboard.py +636 -0
  68. crackerjack/mcp/enhanced_progress_monitor.py +479 -0
  69. crackerjack/mcp/file_monitor.py +336 -0
  70. crackerjack/mcp/progress_components.py +569 -0
  71. crackerjack/mcp/progress_monitor.py +949 -0
  72. crackerjack/mcp/rate_limiter.py +332 -0
  73. crackerjack/mcp/server.py +22 -0
  74. crackerjack/mcp/server_core.py +244 -0
  75. crackerjack/mcp/service_watchdog.py +501 -0
  76. crackerjack/mcp/state.py +395 -0
  77. crackerjack/mcp/task_manager.py +257 -0
  78. crackerjack/mcp/tools/__init__.py +17 -0
  79. crackerjack/mcp/tools/core_tools.py +249 -0
  80. crackerjack/mcp/tools/error_analyzer.py +308 -0
  81. crackerjack/mcp/tools/execution_tools.py +370 -0
  82. crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
  83. crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
  84. crackerjack/mcp/tools/intelligence_tools.py +314 -0
  85. crackerjack/mcp/tools/monitoring_tools.py +502 -0
  86. crackerjack/mcp/tools/proactive_tools.py +384 -0
  87. crackerjack/mcp/tools/progress_tools.py +141 -0
  88. crackerjack/mcp/tools/utility_tools.py +341 -0
  89. crackerjack/mcp/tools/workflow_executor.py +360 -0
  90. crackerjack/mcp/websocket/__init__.py +14 -0
  91. crackerjack/mcp/websocket/app.py +39 -0
  92. crackerjack/mcp/websocket/endpoints.py +559 -0
  93. crackerjack/mcp/websocket/jobs.py +253 -0
  94. crackerjack/mcp/websocket/server.py +116 -0
  95. crackerjack/mcp/websocket/websocket_handler.py +78 -0
  96. crackerjack/mcp/websocket_server.py +10 -0
  97. crackerjack/models/__init__.py +31 -0
  98. crackerjack/models/config.py +93 -0
  99. crackerjack/models/config_adapter.py +230 -0
  100. crackerjack/models/protocols.py +118 -0
  101. crackerjack/models/task.py +154 -0
  102. crackerjack/monitoring/ai_agent_watchdog.py +450 -0
  103. crackerjack/monitoring/regression_prevention.py +638 -0
  104. crackerjack/orchestration/__init__.py +0 -0
  105. crackerjack/orchestration/advanced_orchestrator.py +970 -0
  106. crackerjack/orchestration/execution_strategies.py +341 -0
  107. crackerjack/orchestration/test_progress_streamer.py +636 -0
  108. crackerjack/plugins/__init__.py +15 -0
  109. crackerjack/plugins/base.py +200 -0
  110. crackerjack/plugins/hooks.py +246 -0
  111. crackerjack/plugins/loader.py +335 -0
  112. crackerjack/plugins/managers.py +259 -0
  113. crackerjack/py313.py +8 -3
  114. crackerjack/services/__init__.py +22 -0
  115. crackerjack/services/cache.py +314 -0
  116. crackerjack/services/config.py +347 -0
  117. crackerjack/services/config_integrity.py +99 -0
  118. crackerjack/services/contextual_ai_assistant.py +516 -0
  119. crackerjack/services/coverage_ratchet.py +347 -0
  120. crackerjack/services/debug.py +736 -0
  121. crackerjack/services/dependency_monitor.py +617 -0
  122. crackerjack/services/enhanced_filesystem.py +439 -0
  123. crackerjack/services/file_hasher.py +151 -0
  124. crackerjack/services/filesystem.py +395 -0
  125. crackerjack/services/git.py +165 -0
  126. crackerjack/services/health_metrics.py +611 -0
  127. crackerjack/services/initialization.py +847 -0
  128. crackerjack/services/log_manager.py +286 -0
  129. crackerjack/services/logging.py +174 -0
  130. crackerjack/services/metrics.py +578 -0
  131. crackerjack/services/pattern_cache.py +362 -0
  132. crackerjack/services/pattern_detector.py +515 -0
  133. crackerjack/services/performance_benchmarks.py +653 -0
  134. crackerjack/services/security.py +163 -0
  135. crackerjack/services/server_manager.py +234 -0
  136. crackerjack/services/smart_scheduling.py +144 -0
  137. crackerjack/services/tool_version_service.py +61 -0
  138. crackerjack/services/unified_config.py +437 -0
  139. crackerjack/services/version_checker.py +248 -0
  140. crackerjack/slash_commands/__init__.py +14 -0
  141. crackerjack/slash_commands/init.md +122 -0
  142. crackerjack/slash_commands/run.md +163 -0
  143. crackerjack/slash_commands/status.md +127 -0
  144. crackerjack-0.31.4.dist-info/METADATA +742 -0
  145. crackerjack-0.31.4.dist-info/RECORD +148 -0
  146. crackerjack-0.31.4.dist-info/entry_points.txt +2 -0
  147. crackerjack/.gitignore +0 -34
  148. crackerjack/.libcst.codemod.yaml +0 -18
  149. crackerjack/.pdm.toml +0 -1
  150. crackerjack/.pre-commit-config-ai.yaml +0 -149
  151. crackerjack/.pre-commit-config-fast.yaml +0 -69
  152. crackerjack/.pre-commit-config.yaml +0 -114
  153. crackerjack/crackerjack.py +0 -4140
  154. crackerjack/pyproject.toml +0 -285
  155. crackerjack-0.29.0.dist-info/METADATA +0 -1289
  156. crackerjack-0.29.0.dist-info/RECORD +0 -17
  157. {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/WHEEL +0 -0
  158. {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,970 @@
1
+ import time
2
+ import typing as t
3
+ from contextlib import suppress
4
+ from pathlib import Path
5
+
6
+ from rich.console import Console, Group
7
+ from rich.panel import Panel
8
+ from rich.table import Table
9
+
10
+ from crackerjack.agents import (
11
+ AgentContext,
12
+ AgentCoordinator,
13
+ Issue,
14
+ IssueType,
15
+ Priority,
16
+ )
17
+ from crackerjack.config.hooks import HookConfigLoader, HookStrategy
18
+ from crackerjack.core.session_coordinator import SessionCoordinator
19
+ from crackerjack.executors.hook_executor import HookExecutor
20
+ from crackerjack.executors.individual_hook_executor import (
21
+ HookProgress,
22
+ IndividualHookExecutor,
23
+ )
24
+ from crackerjack.managers.test_manager import TestManagementImpl
25
+ from crackerjack.models.protocols import OptionsProtocol
26
+ from crackerjack.models.task import HookResult
27
+ from crackerjack.services.metrics import get_metrics_collector
28
+
29
+ from .execution_strategies import (
30
+ AICoordinationMode,
31
+ ExecutionContext,
32
+ ExecutionPlan,
33
+ ExecutionStrategy,
34
+ OrchestrationConfig,
35
+ OrchestrationPlanner,
36
+ )
37
+ from .test_progress_streamer import TestProgressStreamer, TestSuiteProgress
38
+
39
+
40
+ class CorrelationTracker:
41
+ def __init__(self) -> None:
42
+ self.iteration_data: list[dict[str, t.Any]] = []
43
+ self.failure_patterns: dict[str, list[str]] = {}
44
+ self.fix_success_rates: dict[str, float] = {}
45
+
46
+ def record_iteration(
47
+ self,
48
+ iteration: int,
49
+ hook_results: list[HookResult],
50
+ test_results: dict[str, t.Any],
51
+ ai_fixes: list[str],
52
+ ) -> None:
53
+ failed_hooks = [r.name for r in hook_results if r.status == "failed"]
54
+
55
+ iteration_data = {
56
+ "iteration": iteration,
57
+ "timestamp": time.time(),
58
+ "failed_hooks": failed_hooks,
59
+ "test_failures": test_results.get("failed_tests", []),
60
+ "ai_fixes_applied": ai_fixes,
61
+ "total_errors": sum(
62
+ len(getattr(r, "error_details", [])) for r in hook_results
63
+ ),
64
+ }
65
+
66
+ self.iteration_data.append(iteration_data)
67
+ self._analyze_failure_patterns()
68
+
69
+ def _analyze_failure_patterns(self) -> None:
70
+ if len(self.iteration_data) < 2:
71
+ return
72
+
73
+ for i in range(1, len(self.iteration_data)):
74
+ current = self.iteration_data[i]
75
+ previous = self.iteration_data[i - 1]
76
+
77
+ recurring_failures = set(current["failed_hooks"]) & set(
78
+ previous["failed_hooks"],
79
+ )
80
+
81
+ for hook in recurring_failures:
82
+ if hook not in self.failure_patterns:
83
+ self.failure_patterns[hook] = []
84
+ self.failure_patterns[hook].append(f"iteration_{current['iteration']}")
85
+
86
+ def get_problematic_hooks(self) -> list[str]:
87
+ return [
88
+ hook
89
+ for hook, failures in self.failure_patterns.items()
90
+ if len(failures) >= 2
91
+ ]
92
+
93
+ def get_correlation_data(self) -> dict[str, t.Any]:
94
+ return {
95
+ "iteration_count": len(self.iteration_data),
96
+ "failure_patterns": self.failure_patterns,
97
+ "problematic_hooks": self.get_problematic_hooks(),
98
+ "recent_trends": self.iteration_data[-3:]
99
+ if len(self.iteration_data) >= 3
100
+ else self.iteration_data,
101
+ }
102
+
103
+
104
+ class MinimalProgressStreamer:
105
+ """Fallback progress streamer that provides minimal functionality."""
106
+
107
+ def __init__(self) -> None:
108
+ pass
109
+
110
+ def update_stage(self, stage: str, substage: str = "") -> None:
111
+ pass
112
+
113
+ def update_hook_progress(self, progress: HookProgress) -> None:
114
+ pass
115
+
116
+ def _stream_update(self, data: dict[str, t.Any]) -> None:
117
+ pass
118
+
119
+
120
+ class ProgressStreamer:
121
+ def __init__(
122
+ self,
123
+ config: OrchestrationConfig,
124
+ session: SessionCoordinator,
125
+ ) -> None:
126
+ self.config = config
127
+ self.session = session
128
+ self.current_stage = "initialization"
129
+ self.current_substage = ""
130
+ self.hook_progress: dict[str, HookProgress] = {}
131
+
132
+ def update_stage(self, stage: str, substage: str = "") -> None:
133
+ self.current_stage = stage
134
+ self.current_substage = substage
135
+ self._stream_update(
136
+ {
137
+ "type": "stage_update",
138
+ "stage": stage,
139
+ "substage": substage,
140
+ "timestamp": time.time(),
141
+ },
142
+ )
143
+
144
+ def update_hook_progress(self, progress: HookProgress) -> None:
145
+ self.hook_progress[progress.hook_name] = progress
146
+ self._stream_update(
147
+ {
148
+ "type": "hook_progress",
149
+ "hook_name": progress.hook_name,
150
+ "progress": progress.to_dict(),
151
+ "timestamp": time.time(),
152
+ },
153
+ )
154
+
155
+ def _stream_update(self, update_data: dict[str, t.Any]) -> None:
156
+ self.session.update_stage(
157
+ self.current_stage,
158
+ f"{self.current_substage}: {update_data.get('hook_name', 'processing')}",
159
+ )
160
+
161
+ if hasattr(self.session, "web_job_id") and self.session.web_job_id:
162
+ self._update_websocket_progress(update_data)
163
+
164
+ def _update_websocket_progress(self, update_data: dict[str, t.Any]) -> None:
165
+ with suppress(Exception):
166
+ if hasattr(self.session, "progress_file") and self.session.progress_file:
167
+ import json
168
+
169
+ progress_data: dict[str, t.Any] = {}
170
+ if self.session.progress_file.exists():
171
+ with self.session.progress_file.open() as f:
172
+ progress_data = json.load(f)
173
+
174
+ progress_data.update(
175
+ {
176
+ "current_stage": self.current_stage,
177
+ "current_substage": self.current_substage,
178
+ "hook_progress": {
179
+ name: prog.to_dict()
180
+ for name, prog in self.hook_progress.items()
181
+ },
182
+ "last_update": update_data,
183
+ "updated_at": time.time(),
184
+ },
185
+ )
186
+
187
+ with self.session.progress_file.open("w") as f:
188
+ json.dump(progress_data, f, indent=2)
189
+
190
+
191
+ class AdvancedWorkflowOrchestrator:
192
+ def __init__(
193
+ self,
194
+ console: Console,
195
+ pkg_path: Path,
196
+ session: SessionCoordinator,
197
+ config: OrchestrationConfig | None = None,
198
+ ) -> None:
199
+ self.console = console
200
+ self.pkg_path = pkg_path
201
+ self.session = session
202
+ self.config = config or OrchestrationConfig()
203
+
204
+ self.hook_config_loader = HookConfigLoader()
205
+ self.batch_executor = HookExecutor(console, pkg_path, quiet=True)
206
+ self.individual_executor = IndividualHookExecutor(console, pkg_path)
207
+ self.test_manager = TestManagementImpl(console, pkg_path)
208
+ self.test_streamer = TestProgressStreamer(console, pkg_path)
209
+ self.planner = OrchestrationPlanner(console)
210
+
211
+ # Initialize progress_streamer early (needed by _detect_and_configure_mcp_mode)
212
+ self.correlation_tracker = CorrelationTracker()
213
+ try:
214
+ self.progress_streamer = ProgressStreamer(self.config, session)
215
+ except Exception as e:
216
+ # Fallback to a minimal progress streamer if there's an issue
217
+ console.print(
218
+ f"[yellow]Warning: ProgressStreamer initialization failed: {e}[/yellow]",
219
+ )
220
+ self.progress_streamer = MinimalProgressStreamer()
221
+ self.metrics = get_metrics_collector()
222
+
223
+ # Detect if running in MCP mode and configure accordingly
224
+ self._detect_and_configure_mcp_mode()
225
+
226
+ self.agent_coordinator: AgentCoordinator | None = None
227
+
228
+ def _detect_and_configure_mcp_mode(self) -> None:
229
+ """Detect if running in MCP context and configure for minimal terminal I/O."""
230
+ # Check for MCP context indicators
231
+ is_mcp_mode = (
232
+ # Console is using StringIO (stdio mode)
233
+ hasattr(self.console.file, "getvalue")
234
+ # Or console is not attached to a real terminal
235
+ or not self.console.is_terminal
236
+ # Or we have a web job ID (indicates MCP execution)
237
+ or hasattr(self.session, "job_id")
238
+ )
239
+
240
+ if is_mcp_mode:
241
+ # Configure individual executor for MCP mode to prevent terminal lockup
242
+ self.individual_executor.set_mcp_mode(True)
243
+ self.console.print(
244
+ "[dim]🔧 MCP mode detected - using minimal output mode[/dim]",
245
+ )
246
+ if self.config.ai_coordination_mode in (
247
+ AICoordinationMode.MULTI_AGENT,
248
+ AICoordinationMode.COORDINATOR,
249
+ ):
250
+ self._initialize_multi_agent_system()
251
+
252
+ self.individual_executor.set_progress_callback(
253
+ self.progress_streamer.update_hook_progress,
254
+ )
255
+ self.test_streamer.set_progress_callback(self._update_test_suite_progress)
256
+
257
+ def _configure_verbose_mode(self, options: OptionsProtocol) -> None:
258
+ """Configure hook output verbosity based on user options."""
259
+ # Enable verbose output if explicitly requested, otherwise use quiet mode
260
+ verbose_mode = getattr(options, "verbose", False)
261
+
262
+ # Don't override MCP mode detection - only configure if not already in MCP mode
263
+ if not hasattr(self.console.file, "getvalue"):
264
+ # Set quiet mode (suppress realtime output) unless verbose mode is enabled
265
+ quiet_mode = not verbose_mode
266
+ self.individual_executor.set_mcp_mode(quiet_mode)
267
+
268
+ if verbose_mode:
269
+ self.console.print(
270
+ "[dim]🔧 Verbose mode enabled - showing detailed hook output[/dim]",
271
+ )
272
+
273
+ def _initialize_multi_agent_system(self) -> None:
274
+ self.console.print(
275
+ "[bold cyan]🤖 Initializing Multi-Agent AI System[/bold cyan]",
276
+ )
277
+
278
+ agent_context = AgentContext(
279
+ project_path=self.pkg_path,
280
+ session_id=getattr(self.session, "job_id", None),
281
+ )
282
+
283
+ self.agent_coordinator = AgentCoordinator(agent_context)
284
+ self.agent_coordinator.initialize_agents()
285
+
286
+ capabilities = self.agent_coordinator.get_agent_capabilities()
287
+ self.console.print(
288
+ f"[green]✅ Initialized {len(capabilities)} specialized agents: [/green]",
289
+ )
290
+ for agent_name, info in capabilities.items():
291
+ types_str = ", ".join(info["supported_types"])
292
+ self.console.print(f" • {agent_name}: {types_str}")
293
+
294
+ self.console.print(
295
+ f"[cyan]AI Coordination Mode: {self.config.ai_coordination_mode.value}[/cyan]",
296
+ )
297
+
298
+ def _display_iteration_stats(
299
+ self,
300
+ iteration: int,
301
+ max_iterations: int,
302
+ iteration_times: dict[str, float],
303
+ hooks_time: float,
304
+ tests_time: float,
305
+ ai_time: float,
306
+ context: t.Any,
307
+ ) -> None:
308
+ """Display rich iteration statistics panel."""
309
+ # Create timing table
310
+ timing_table = Table(show_header=True, header_style="bold cyan")
311
+ timing_table.add_column("Phase", style="cyan")
312
+ timing_table.add_column("This Iteration", justify="right", style="yellow")
313
+ timing_table.add_column("Cumulative", justify="right", style="green")
314
+
315
+ # Add timing rows
316
+ timing_table.add_row(
317
+ "🔧 Hooks",
318
+ f"{iteration_times.get('hooks', 0):.1f}s",
319
+ f"{hooks_time:.1f}s",
320
+ )
321
+ timing_table.add_row(
322
+ "🧪 Tests",
323
+ f"{iteration_times.get('tests', 0):.1f}s",
324
+ f"{tests_time:.1f}s",
325
+ )
326
+ timing_table.add_row(
327
+ "🤖 AI Analysis",
328
+ f"{iteration_times.get('ai', 0):.1f}s",
329
+ f"{ai_time:.1f}s",
330
+ )
331
+
332
+ total_iteration_time = sum(iteration_times.values())
333
+ total_cumulative_time = hooks_time + tests_time + ai_time
334
+ timing_table.add_row(
335
+ "📊 Total",
336
+ f"{total_iteration_time:.1f}s",
337
+ f"{total_cumulative_time:.1f}s",
338
+ style="bold",
339
+ )
340
+
341
+ # Create status table
342
+ status_table = Table(show_header=True, header_style="bold magenta")
343
+ status_table.add_column("Metric", style="magenta")
344
+ status_table.add_column("Value", justify="right", style="white")
345
+
346
+ status_table.add_row("🔄 Iteration", f"{iteration}/{max_iterations}")
347
+ status_table.add_row(
348
+ "📈 Progress",
349
+ f"{(iteration / max_iterations) * 100:.1f}%",
350
+ )
351
+
352
+ if hasattr(context, "hook_failures"):
353
+ status_table.add_row("❌ Hook Failures", str(len(context.hook_failures)))
354
+ if hasattr(context, "test_failures"):
355
+ status_table.add_row("🧪 Test Failures", str(len(context.test_failures)))
356
+
357
+ # Create the panel with properly rendered tables
358
+ panel_content = Group(
359
+ "[bold white]Timing Breakdown[/bold white]",
360
+ timing_table,
361
+ "",
362
+ "[bold white]Status Summary[/bold white]",
363
+ status_table,
364
+ )
365
+
366
+ iteration_panel = Panel(
367
+ panel_content,
368
+ title=f"[bold bright_blue]📊 Iteration {iteration} Statistics[/bold bright_blue]",
369
+ border_style="bright_blue",
370
+ padding=(1, 2),
371
+ )
372
+
373
+ self.console.print()
374
+ self.console.print(iteration_panel)
375
+ self.console.print()
376
+
377
+ async def execute_orchestrated_workflow(
378
+ self,
379
+ options: OptionsProtocol,
380
+ max_iterations: int = 10,
381
+ ) -> bool:
382
+ # Configure verbose mode before starting workflow
383
+ self._configure_verbose_mode(options)
384
+
385
+ workflow_start_time = time.time()
386
+ job_id = (
387
+ getattr(self.session, "job_id", None) or f"orchestration_{int(time.time())}"
388
+ )
389
+
390
+ self.console.print(
391
+ "\n[bold bright_blue]🚀 STARTING ORCHESTRATED WORKFLOW[/bold bright_blue]",
392
+ )
393
+
394
+ context = ExecutionContext(self.pkg_path, options)
395
+
396
+ hook_strategies = [
397
+ self.hook_config_loader.load_strategy("fast"),
398
+ self.hook_config_loader.load_strategy("comprehensive"),
399
+ ]
400
+
401
+ execution_plan = self.planner.create_execution_plan(
402
+ self.config,
403
+ context,
404
+ hook_strategies,
405
+ )
406
+
407
+ execution_plan.print_plan_summary(self.console)
408
+
409
+ success = False
410
+ strategy_switches = 0
411
+ hooks_time = 0
412
+ tests_time = 0
413
+ ai_time = 0
414
+
415
+ for iteration in range(1, max_iterations + 1):
416
+ self.console.print(
417
+ f"\n[bold bright_yellow]🔄 ITERATION {iteration} / {max_iterations}[/bold bright_yellow]",
418
+ )
419
+
420
+ context.iteration_count = iteration
421
+
422
+ time.time()
423
+ iteration_success, iteration_times = await self._execute_single_iteration(
424
+ execution_plan,
425
+ context,
426
+ iteration,
427
+ )
428
+
429
+ hooks_time += iteration_times.get("hooks", 0)
430
+ tests_time += iteration_times.get("tests", 0)
431
+ ai_time += iteration_times.get("ai", 0)
432
+
433
+ # Display iteration statistics panel
434
+ self._display_iteration_stats(
435
+ iteration,
436
+ max_iterations,
437
+ iteration_times,
438
+ hooks_time,
439
+ tests_time,
440
+ ai_time,
441
+ context,
442
+ )
443
+
444
+ if iteration_success:
445
+ self.console.print(
446
+ f"\n[bold green]🎉 WORKFLOW COMPLETED SUCCESSFULLY IN {iteration} ITERATIONS![/bold green]",
447
+ )
448
+ success = True
449
+ break
450
+
451
+ if iteration < max_iterations:
452
+ old_strategy = execution_plan.execution_strategy
453
+ execution_plan = self._adapt_execution_plan(execution_plan, context)
454
+ if execution_plan.execution_strategy != old_strategy:
455
+ strategy_switches += 1
456
+
457
+ if not success:
458
+ self.console.print(
459
+ f"\n[bold red]❌ WORKFLOW INCOMPLETE AFTER {max_iterations} ITERATIONS[/bold red]",
460
+ )
461
+
462
+ self._print_final_analysis()
463
+
464
+ total_time = int((time.time() - workflow_start_time) * 1000)
465
+ correlation_data = self.correlation_tracker.get_correlation_data()
466
+
467
+ self.metrics.record_orchestration_execution(
468
+ job_id=job_id,
469
+ execution_strategy=execution_plan.execution_strategy.value,
470
+ progress_level=self.config.progress_level.value,
471
+ ai_mode=self.config.ai_coordination_mode.value,
472
+ iteration_count=context.iteration_count,
473
+ strategy_switches=strategy_switches,
474
+ correlation_insights=correlation_data,
475
+ total_execution_time_ms=total_time,
476
+ hooks_execution_time_ms=round(hooks_time * 1000),
477
+ tests_execution_time_ms=round(tests_time * 1000),
478
+ ai_analysis_time_ms=round(ai_time * 1000),
479
+ )
480
+
481
+ return success
482
+
483
+ async def _execute_single_iteration(
484
+ self,
485
+ plan: ExecutionPlan,
486
+ context: ExecutionContext,
487
+ iteration: int,
488
+ ) -> tuple[bool, dict[str, float]]:
489
+ self.progress_streamer.update_stage("iteration_start", f"iteration_{iteration}")
490
+
491
+ phase_times: dict[str, float] = {"hooks": 0.0, "tests": 0.0, "ai": 0.0}
492
+
493
+ hooks_start = time.time()
494
+ hook_results = await self._execute_hooks_phase(plan, context)
495
+ phase_times["hooks"] = time.time() - hooks_start
496
+
497
+ tests_start = time.time()
498
+ test_results = await self._execute_tests_phase(plan, context)
499
+ phase_times["tests"] = time.time() - tests_start
500
+
501
+ ai_fixes = []
502
+ if not (
503
+ all(r.status == "passed" for r in hook_results)
504
+ and test_results.get("success", False)
505
+ ):
506
+ ai_start = time.time()
507
+ ai_fixes = await self._execute_ai_phase(plan, hook_results, test_results)
508
+ phase_times["ai"] = time.time() - ai_start
509
+
510
+ job_id = (
511
+ getattr(self.session, "job_id", None) or f"orchestration_{int(time.time())}"
512
+ )
513
+ self.metrics.record_strategy_decision(
514
+ job_id=job_id,
515
+ iteration=iteration,
516
+ previous_strategy=getattr(context, "previous_strategy", None),
517
+ selected_strategy=plan.execution_strategy.value,
518
+ decision_reason=f"Iteration {iteration} execution strategy",
519
+ context_data={
520
+ "failed_hooks": len([r for r in hook_results if r.status == "failed"]),
521
+ "failed_tests": len(test_results.get("failed_tests", [])),
522
+ "ai_fixes_applied": len(ai_fixes),
523
+ },
524
+ effectiveness_score=None,
525
+ )
526
+
527
+ self.correlation_tracker.record_iteration(
528
+ iteration,
529
+ hook_results,
530
+ test_results,
531
+ ai_fixes,
532
+ )
533
+
534
+ all_hooks_passed = all(r.status == "passed" for r in hook_results)
535
+ all_tests_passed = test_results.get("success", False)
536
+
537
+ return all_hooks_passed and all_tests_passed, phase_times
538
+
539
+ async def _execute_hooks_phase(
540
+ self,
541
+ plan: ExecutionPlan,
542
+ context: ExecutionContext,
543
+ ) -> list[HookResult]:
544
+ self.progress_streamer.update_stage("hooks", "starting")
545
+
546
+ all_results = []
547
+
548
+ for hook_plan in plan.hook_plans:
549
+ strategy = hook_plan["strategy"]
550
+ execution_mode = hook_plan["execution_mode"]
551
+
552
+ # Special handling for fast hooks with autofix cycle
553
+ if strategy.name == "fast":
554
+ fast_results = await self._execute_fast_hooks_with_autofix(
555
+ strategy,
556
+ execution_mode,
557
+ context,
558
+ )
559
+ all_results.extend(fast_results)
560
+ else:
561
+ # Regular execution for non-fast hooks
562
+ self.progress_streamer.update_stage(
563
+ "hooks",
564
+ f"executing_{strategy.name}",
565
+ )
566
+
567
+ if execution_mode == ExecutionStrategy.INDIVIDUAL:
568
+ result = await self.individual_executor.execute_strategy_individual(
569
+ strategy,
570
+ )
571
+ all_results.extend(result.hook_results)
572
+ else:
573
+ results = self.batch_executor.execute_strategy(strategy)
574
+ all_results.extend(results.results)
575
+
576
+ self.progress_streamer.update_stage("hooks", "completed")
577
+ return all_results
578
+
579
+ async def _execute_fast_hooks_with_autofix(
580
+ self,
581
+ strategy: HookStrategy,
582
+ execution_mode: ExecutionStrategy,
583
+ context: ExecutionContext,
584
+ ) -> list[HookResult]:
585
+ """Execute fast hooks with autofix cycle if they fail twice."""
586
+ self.progress_streamer.update_stage("hooks", "fast_hooks_with_autofix")
587
+
588
+ max_autofix_cycles = 2
589
+ autofix_cycle = 0
590
+
591
+ while autofix_cycle < max_autofix_cycles:
592
+ self.console.print(
593
+ f"[cyan]🚀 Fast hooks execution (autofix cycle {autofix_cycle + 1}/{max_autofix_cycles})[/cyan]",
594
+ )
595
+
596
+ # Run fast hooks twice
597
+ first_attempt = await self._execute_fast_hooks_attempt(
598
+ strategy,
599
+ execution_mode,
600
+ )
601
+
602
+ if all(r.status == "passed" for r in first_attempt):
603
+ self.console.print(
604
+ "[green]✅ Fast hooks passed on first attempt[/green]",
605
+ )
606
+ return first_attempt
607
+
608
+ # First attempt failed, try second attempt
609
+ self.console.print(
610
+ "[yellow]⚠️ Fast hooks failed on first attempt, retrying...[/yellow]",
611
+ )
612
+ second_attempt = await self._execute_fast_hooks_attempt(
613
+ strategy,
614
+ execution_mode,
615
+ )
616
+
617
+ if all(r.status == "passed" for r in second_attempt):
618
+ self.console.print(
619
+ "[green]✅ Fast hooks passed on second attempt[/green]",
620
+ )
621
+ return second_attempt
622
+
623
+ # Both attempts failed, check if we should run autofix
624
+ autofix_cycle += 1
625
+ if autofix_cycle < max_autofix_cycles:
626
+ self.console.print(
627
+ "[red]❌ Fast hooks failed twice, triggering autofix cycle...[/red]",
628
+ )
629
+ await self._trigger_autofix_for_fast_hooks(second_attempt)
630
+ else:
631
+ self.console.print(
632
+ "[red]❌ Fast hooks failed after maximum autofix cycles[/red]",
633
+ )
634
+ return second_attempt
635
+
636
+ # Should never reach here, but return empty results as fallback
637
+ return []
638
+
639
+ async def _execute_fast_hooks_attempt(
640
+ self,
641
+ strategy: HookStrategy,
642
+ execution_mode: ExecutionStrategy,
643
+ ) -> list[HookResult]:
644
+ """Execute a single attempt of fast hooks."""
645
+ if execution_mode == ExecutionStrategy.INDIVIDUAL:
646
+ result = await self.individual_executor.execute_strategy_individual(
647
+ strategy,
648
+ )
649
+ return result.hook_results
650
+ results = self.batch_executor.execute_strategy(strategy)
651
+ return results.results
652
+
653
+ async def _trigger_autofix_for_fast_hooks(
654
+ self,
655
+ failed_results: list[HookResult],
656
+ ) -> None:
657
+ """Trigger AI autofix cycle for failed fast hooks."""
658
+ self.console.print(
659
+ "[magenta]🤖 Starting AI autofix cycle for fast hooks...[/magenta]",
660
+ )
661
+
662
+ # Create mock test results for AI analysis (fast hooks don't include tests)
663
+ mock_test_results = {
664
+ "success": True,
665
+ "failed_tests": [],
666
+ "individual_tests": [],
667
+ }
668
+
669
+ # Create a minimal execution plan for AI analysis
670
+ from .execution_strategies import ExecutionPlan, ExecutionStrategy
671
+
672
+ mock_plan = ExecutionPlan(
673
+ config=self.config,
674
+ execution_strategy=ExecutionStrategy.BATCH,
675
+ hook_plans=[],
676
+ test_plan={"mode": "full_suite", "estimated_duration": 0},
677
+ ai_plan={
678
+ "mode": self.config.ai_coordination_mode,
679
+ "intelligence_level": self.config.ai_intelligence,
680
+ "batch_processing": True,
681
+ "correlation_tracking": self.config.correlation_tracking,
682
+ "failure_analysis": self.config.failure_analysis,
683
+ "adaptive_retry": self.config.intelligent_retry,
684
+ },
685
+ estimated_total_duration=0,
686
+ )
687
+
688
+ # Execute AI analysis and fixes for hook failures
689
+ ai_fixes = await self._execute_ai_phase(
690
+ mock_plan,
691
+ failed_results,
692
+ mock_test_results,
693
+ )
694
+
695
+ if ai_fixes:
696
+ self.console.print(
697
+ f"[green]✅ Applied {len(ai_fixes)} AI fixes for fast hooks[/green]",
698
+ )
699
+ else:
700
+ self.console.print("[yellow]⚠️ No AI fixes could be applied[/yellow]")
701
+
702
+ async def _execute_tests_phase(
703
+ self,
704
+ plan: ExecutionPlan,
705
+ context: ExecutionContext,
706
+ ) -> dict[str, t.Any]:
707
+ self.progress_streamer.update_stage("tests", "starting")
708
+
709
+ test_mode = plan.test_plan.get("mode", "full_suite")
710
+
711
+ if test_mode in ("individual_with_progress", "selective"):
712
+ test_results = await self.test_streamer.run_tests_with_streaming(
713
+ context.options,
714
+ test_mode,
715
+ )
716
+
717
+ job_id = (
718
+ getattr(self.session, "job_id", None)
719
+ or f"orchestration_{int(time.time())}"
720
+ )
721
+ individual_tests = test_results.get("individual_tests", [])
722
+
723
+ for test in individual_tests:
724
+ self.metrics.record_individual_test(
725
+ job_id=job_id,
726
+ test_id=test.test_id,
727
+ test_file=test.test_file,
728
+ test_class=test.test_class,
729
+ test_method=test.test_method,
730
+ status=test.status,
731
+ execution_time_ms=int((test.duration or 0) * 1000),
732
+ error_message=test.error_message,
733
+ error_traceback=test.failure_traceback,
734
+ )
735
+ else:
736
+ test_success = self.test_manager.run_tests(context.options)
737
+ test_results = {
738
+ "success": test_success,
739
+ "failed_tests": [],
740
+ "suite_progress": None,
741
+ "individual_tests": [],
742
+ }
743
+
744
+ self.progress_streamer.update_stage("tests", "completed")
745
+ return test_results
746
+
747
+ def _update_test_suite_progress(self, suite_progress: TestSuiteProgress) -> None:
748
+ current_test = suite_progress.current_test or "running tests"
749
+ self.progress_streamer.update_stage(
750
+ "tests",
751
+ f"{suite_progress.completed_tests} / {suite_progress.total_tests} - {current_test}",
752
+ )
753
+
754
+ async def _execute_ai_phase(
755
+ self,
756
+ plan: ExecutionPlan,
757
+ hook_results: list[HookResult],
758
+ test_results: dict[str, t.Any],
759
+ ) -> list[str]:
760
+ self.progress_streamer.update_stage("ai_analysis", "analyzing_failures")
761
+
762
+ failed_hooks = [r for r in hook_results if r.status == "failed"]
763
+ failed_tests = test_results.get("failed_tests", [])
764
+
765
+ individual_tests = test_results.get("individual_tests", [])
766
+ failed_individual_tests = [t for t in individual_tests if t.status == "failed"]
767
+
768
+ correlation_data = self.correlation_tracker.get_correlation_data()
769
+
770
+ self.console.print("\n[bold magenta]🤖 AI ANALYSIS PHASE[/bold magenta]")
771
+ self.console.print(f"AI Mode: {self.config.ai_coordination_mode.value}")
772
+ self.console.print(f"Failed hooks: {len(failed_hooks)}")
773
+ self.console.print(f"Failed tests: {len(failed_tests)}")
774
+
775
+ if failed_individual_tests:
776
+ self.console.print(
777
+ f"Individual test failures: {len(failed_individual_tests)}",
778
+ )
779
+ for test in failed_individual_tests[:3]:
780
+ self.console.print(f" ❌ {test.test_id}")
781
+
782
+ if correlation_data["problematic_hooks"]:
783
+ self.console.print(
784
+ f"Problematic hooks (recurring): {', '.join(correlation_data['problematic_hooks'])}",
785
+ )
786
+
787
+ if self.agent_coordinator and self.config.ai_coordination_mode in (
788
+ AICoordinationMode.MULTI_AGENT,
789
+ AICoordinationMode.COORDINATOR,
790
+ ):
791
+ ai_fixes = await self._execute_multi_agent_analysis(
792
+ failed_hooks,
793
+ failed_tests,
794
+ failed_individual_tests,
795
+ correlation_data,
796
+ )
797
+ else:
798
+ ai_fixes = await self._execute_single_agent_analysis(
799
+ failed_hooks,
800
+ failed_tests,
801
+ failed_individual_tests,
802
+ correlation_data,
803
+ )
804
+
805
+ self.progress_streamer.update_stage("ai_analysis", "completed")
806
+ return ai_fixes
807
+
808
+ async def _execute_multi_agent_analysis(
809
+ self,
810
+ failed_hooks: list[HookResult],
811
+ failed_tests: list[str],
812
+ failed_individual_tests: list[t.Any],
813
+ correlation_data: dict[str, t.Any],
814
+ ) -> list[str]:
815
+ self.console.print("[bold cyan]🤖 Multi-Agent Analysis Started[/bold cyan]")
816
+
817
+ issues = []
818
+
819
+ for hook_result in failed_hooks:
820
+ issue_type = self._map_hook_to_issue_type(hook_result.name)
821
+ issue = Issue(
822
+ id=f"hook_{hook_result.name}_{hash(str(hook_result.issues_found))}",
823
+ type=issue_type,
824
+ severity=Priority.HIGH
825
+ if hook_result.name in correlation_data.get("problematic_hooks", [])
826
+ else Priority.MEDIUM,
827
+ message=(
828
+ hook_result.issues_found[0]
829
+ if hook_result.issues_found
830
+ else f"{hook_result.name} failed"
831
+ ),
832
+ stage="hooks",
833
+ details=getattr(hook_result, "error_details", []),
834
+ )
835
+ issues.append(issue)
836
+
837
+ for test_failure in failed_individual_tests:
838
+ issue = Issue(
839
+ id=f"test_{test_failure.test_id}",
840
+ type=IssueType.TEST_FAILURE,
841
+ severity=Priority.HIGH,
842
+ message=test_failure.error_message
843
+ or f"Test failed: {test_failure.test_id}",
844
+ file_path=getattr(test_failure, "test_file", None),
845
+ stage="tests",
846
+ details=[test_failure.failure_traceback]
847
+ if hasattr(test_failure, "failure_traceback")
848
+ else [],
849
+ )
850
+ issues.append(issue)
851
+
852
+ if not issues:
853
+ return ["No issues identified for multi-agent analysis"]
854
+
855
+ self.console.print(
856
+ f"[cyan]Processing {len(issues)} issues with specialized agents...[/cyan]",
857
+ )
858
+
859
+ assert self.agent_coordinator is not None
860
+ # Use proactive handling by default for better architectural planning
861
+ result = await self.agent_coordinator.handle_issues_proactively(issues)
862
+
863
+ ai_fixes = []
864
+ if result.fixes_applied:
865
+ ai_fixes.extend(result.fixes_applied)
866
+ else:
867
+ ai_fixes.append(
868
+ f"Multi-agent analysis completed with {result.confidence:.2f} confidence",
869
+ )
870
+
871
+ if result.remaining_issues:
872
+ ai_fixes.append(f"Remaining issues: {len(result.remaining_issues)}")
873
+
874
+ if result.recommendations:
875
+ ai_fixes.extend(
876
+ [f"Recommendation: {rec}" for rec in result.recommendations[:3]],
877
+ )
878
+
879
+ self.console.print(
880
+ f"[green]✅ Multi-agent analysis completed: {len(result.fixes_applied)} fixes applied[/green]",
881
+ )
882
+ return ai_fixes
883
+
884
+ async def _execute_single_agent_analysis(
885
+ self,
886
+ failed_hooks: list[HookResult],
887
+ failed_tests: list[str],
888
+ failed_individual_tests: list[t.Any],
889
+ correlation_data: dict[str, t.Any],
890
+ ) -> list[str]:
891
+ return [
892
+ f"[Single Agent] Analyzed {len(failed_hooks)} hook failures",
893
+ f"[Single Agent] Analyzed {len(failed_tests)} test failures",
894
+ f"[Single Agent] Analyzed {len(failed_individual_tests)} individual test failures",
895
+ "[Single Agent] Applied batch fixes based on correlation analysis",
896
+ ]
897
+
898
+ def _map_hook_to_issue_type(self, hook_name: str) -> IssueType:
899
+ hook_type_mapping = {
900
+ "ruff-format": IssueType.FORMATTING,
901
+ "ruff-check": IssueType.FORMATTING,
902
+ "pyright": IssueType.TYPE_ERROR,
903
+ "bandit": IssueType.SECURITY,
904
+ "vulture": IssueType.DEAD_CODE,
905
+ "refurb": IssueType.COMPLEXITY,
906
+ "creosote": IssueType.DEPENDENCY,
907
+ "gitleaks": IssueType.SECURITY,
908
+ "trailing-whitespace": IssueType.FORMATTING,
909
+ "end-of-file-fixer": IssueType.FORMATTING,
910
+ }
911
+
912
+ return hook_type_mapping.get(hook_name) or IssueType.FORMATTING
913
+
914
+ def _adapt_execution_plan(
915
+ self,
916
+ current_plan: ExecutionPlan,
917
+ context: ExecutionContext,
918
+ ) -> ExecutionPlan:
919
+ problematic_hooks = self.correlation_tracker.get_problematic_hooks()
920
+
921
+ if problematic_hooks:
922
+ self.console.print(
923
+ f"[yellow]🧠 Adapting strategy due to recurring failures in: {', '.join(problematic_hooks)}[/yellow]",
924
+ )
925
+
926
+ if current_plan.execution_strategy == ExecutionStrategy.BATCH:
927
+ self.config.execution_strategy = ExecutionStrategy.INDIVIDUAL
928
+ self.console.print(
929
+ "[cyan]📋 Switching to individual execution for better debugging[/cyan]",
930
+ )
931
+
932
+ hook_strategies = [
933
+ self.hook_config_loader.load_strategy("fast"),
934
+ self.hook_config_loader.load_strategy("comprehensive"),
935
+ ]
936
+
937
+ return self.planner.create_execution_plan(self.config, context, hook_strategies)
938
+
939
+ def _print_final_analysis(self) -> None:
940
+ correlation_data = self.correlation_tracker.get_correlation_data()
941
+
942
+ if correlation_data["iteration_count"] == 0:
943
+ return
944
+
945
+ self.console.print("\n" + "=" * 80)
946
+ self.console.print(
947
+ "[bold bright_magenta]🔍 CORRELATION ANALYSIS[/bold bright_magenta]",
948
+ )
949
+ self.console.print("=" * 80)
950
+
951
+ self.console.print(f"Total iterations: {correlation_data['iteration_count']}")
952
+
953
+ if correlation_data["problematic_hooks"]:
954
+ self.console.print(
955
+ "\n[bold red]Problematic hooks (recurring failures): [/bold red]",
956
+ )
957
+ for hook in correlation_data["problematic_hooks"]:
958
+ failures = correlation_data["failure_patterns"][hook]
959
+ self.console.print(f" ❌ {hook} - failed in {len(failures)} iterations")
960
+
961
+ if correlation_data["recent_trends"]:
962
+ self.console.print("\n[bold yellow]Recent trends: [/bold yellow]")
963
+ for trend in correlation_data["recent_trends"][-2:]:
964
+ failed_count = len(trend["failed_hooks"])
965
+ self.console.print(
966
+ f" Iteration {trend['iteration']}: {failed_count} failed hooks, "
967
+ f"{trend['total_errors']} total errors",
968
+ )
969
+
970
+ self.console.print("=" * 80)