crackerjack 0.29.0__py3-none-any.whl → 0.31.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +1005 -0
- crackerjack/RULES.md +380 -0
- crackerjack/__init__.py +42 -13
- crackerjack/__main__.py +225 -253
- crackerjack/agents/__init__.py +41 -0
- crackerjack/agents/architect_agent.py +281 -0
- crackerjack/agents/base.py +169 -0
- crackerjack/agents/coordinator.py +512 -0
- crackerjack/agents/documentation_agent.py +498 -0
- crackerjack/agents/dry_agent.py +388 -0
- crackerjack/agents/formatting_agent.py +245 -0
- crackerjack/agents/import_optimization_agent.py +281 -0
- crackerjack/agents/performance_agent.py +669 -0
- crackerjack/agents/proactive_agent.py +104 -0
- crackerjack/agents/refactoring_agent.py +788 -0
- crackerjack/agents/security_agent.py +529 -0
- crackerjack/agents/test_creation_agent.py +652 -0
- crackerjack/agents/test_specialist_agent.py +486 -0
- crackerjack/agents/tracker.py +212 -0
- crackerjack/api.py +560 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/facade.py +104 -0
- crackerjack/cli/handlers.py +267 -0
- crackerjack/cli/interactive.py +471 -0
- crackerjack/cli/options.py +401 -0
- crackerjack/cli/utils.py +18 -0
- crackerjack/code_cleaner.py +670 -0
- crackerjack/config/__init__.py +19 -0
- crackerjack/config/hooks.py +218 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +406 -0
- crackerjack/core/autofix_coordinator.py +200 -0
- crackerjack/core/container.py +104 -0
- crackerjack/core/enhanced_container.py +542 -0
- crackerjack/core/performance.py +243 -0
- crackerjack/core/phase_coordinator.py +561 -0
- crackerjack/core/proactive_workflow.py +316 -0
- crackerjack/core/session_coordinator.py +289 -0
- crackerjack/core/workflow_orchestrator.py +640 -0
- crackerjack/dynamic_config.py +577 -0
- crackerjack/errors.py +263 -41
- crackerjack/executors/__init__.py +11 -0
- crackerjack/executors/async_hook_executor.py +431 -0
- crackerjack/executors/cached_hook_executor.py +242 -0
- crackerjack/executors/hook_executor.py +345 -0
- crackerjack/executors/individual_hook_executor.py +669 -0
- crackerjack/intelligence/__init__.py +44 -0
- crackerjack/intelligence/adaptive_learning.py +751 -0
- crackerjack/intelligence/agent_orchestrator.py +551 -0
- crackerjack/intelligence/agent_registry.py +414 -0
- crackerjack/intelligence/agent_selector.py +502 -0
- crackerjack/intelligence/integration.py +290 -0
- crackerjack/interactive.py +576 -315
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +137 -0
- crackerjack/managers/publish_manager.py +411 -0
- crackerjack/managers/test_command_builder.py +151 -0
- crackerjack/managers/test_executor.py +435 -0
- crackerjack/managers/test_manager.py +258 -0
- crackerjack/managers/test_manager_backup.py +1124 -0
- crackerjack/managers/test_progress.py +144 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +336 -0
- crackerjack/mcp/client_runner.py +104 -0
- crackerjack/mcp/context.py +615 -0
- crackerjack/mcp/dashboard.py +636 -0
- crackerjack/mcp/enhanced_progress_monitor.py +479 -0
- crackerjack/mcp/file_monitor.py +336 -0
- crackerjack/mcp/progress_components.py +569 -0
- crackerjack/mcp/progress_monitor.py +949 -0
- crackerjack/mcp/rate_limiter.py +332 -0
- crackerjack/mcp/server.py +22 -0
- crackerjack/mcp/server_core.py +244 -0
- crackerjack/mcp/service_watchdog.py +501 -0
- crackerjack/mcp/state.py +395 -0
- crackerjack/mcp/task_manager.py +257 -0
- crackerjack/mcp/tools/__init__.py +17 -0
- crackerjack/mcp/tools/core_tools.py +249 -0
- crackerjack/mcp/tools/error_analyzer.py +308 -0
- crackerjack/mcp/tools/execution_tools.py +370 -0
- crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
- crackerjack/mcp/tools/intelligence_tools.py +314 -0
- crackerjack/mcp/tools/monitoring_tools.py +502 -0
- crackerjack/mcp/tools/proactive_tools.py +384 -0
- crackerjack/mcp/tools/progress_tools.py +141 -0
- crackerjack/mcp/tools/utility_tools.py +341 -0
- crackerjack/mcp/tools/workflow_executor.py +360 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +39 -0
- crackerjack/mcp/websocket/endpoints.py +559 -0
- crackerjack/mcp/websocket/jobs.py +253 -0
- crackerjack/mcp/websocket/server.py +116 -0
- crackerjack/mcp/websocket/websocket_handler.py +78 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/__init__.py +31 -0
- crackerjack/models/config.py +93 -0
- crackerjack/models/config_adapter.py +230 -0
- crackerjack/models/protocols.py +118 -0
- crackerjack/models/task.py +154 -0
- crackerjack/monitoring/ai_agent_watchdog.py +450 -0
- crackerjack/monitoring/regression_prevention.py +638 -0
- crackerjack/orchestration/__init__.py +0 -0
- crackerjack/orchestration/advanced_orchestrator.py +970 -0
- crackerjack/orchestration/execution_strategies.py +341 -0
- crackerjack/orchestration/test_progress_streamer.py +636 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +246 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +259 -0
- crackerjack/py313.py +8 -3
- crackerjack/services/__init__.py +22 -0
- crackerjack/services/cache.py +314 -0
- crackerjack/services/config.py +347 -0
- crackerjack/services/config_integrity.py +99 -0
- crackerjack/services/contextual_ai_assistant.py +516 -0
- crackerjack/services/coverage_ratchet.py +347 -0
- crackerjack/services/debug.py +736 -0
- crackerjack/services/dependency_monitor.py +617 -0
- crackerjack/services/enhanced_filesystem.py +439 -0
- crackerjack/services/file_hasher.py +151 -0
- crackerjack/services/filesystem.py +395 -0
- crackerjack/services/git.py +165 -0
- crackerjack/services/health_metrics.py +611 -0
- crackerjack/services/initialization.py +847 -0
- crackerjack/services/log_manager.py +286 -0
- crackerjack/services/logging.py +174 -0
- crackerjack/services/metrics.py +578 -0
- crackerjack/services/pattern_cache.py +362 -0
- crackerjack/services/pattern_detector.py +515 -0
- crackerjack/services/performance_benchmarks.py +653 -0
- crackerjack/services/security.py +163 -0
- crackerjack/services/server_manager.py +234 -0
- crackerjack/services/smart_scheduling.py +144 -0
- crackerjack/services/tool_version_service.py +61 -0
- crackerjack/services/unified_config.py +437 -0
- crackerjack/services/version_checker.py +248 -0
- crackerjack/slash_commands/__init__.py +14 -0
- crackerjack/slash_commands/init.md +122 -0
- crackerjack/slash_commands/run.md +163 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack-0.31.4.dist-info/METADATA +742 -0
- crackerjack-0.31.4.dist-info/RECORD +148 -0
- crackerjack-0.31.4.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -34
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/.pre-commit-config-ai.yaml +0 -149
- crackerjack/.pre-commit-config-fast.yaml +0 -69
- crackerjack/.pre-commit-config.yaml +0 -114
- crackerjack/crackerjack.py +0 -4140
- crackerjack/pyproject.toml +0 -285
- crackerjack-0.29.0.dist-info/METADATA +0 -1289
- crackerjack-0.29.0.dist-info/RECORD +0 -17
- {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/WHEEL +0 -0
- {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,640 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import typing as t
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from rich.console import Console
|
|
6
|
+
|
|
7
|
+
from crackerjack.agents.base import AgentContext, Issue, IssueType, Priority
|
|
8
|
+
from crackerjack.agents.coordinator import AgentCoordinator
|
|
9
|
+
from crackerjack.models.protocols import OptionsProtocol
|
|
10
|
+
from crackerjack.services.debug import get_ai_agent_debugger
|
|
11
|
+
from crackerjack.services.logging import (
|
|
12
|
+
LoggingContext,
|
|
13
|
+
get_logger,
|
|
14
|
+
setup_structured_logging,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
from .phase_coordinator import PhaseCoordinator
|
|
18
|
+
from .session_coordinator import SessionCoordinator
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def version() -> str:
|
|
22
|
+
try:
|
|
23
|
+
import importlib.metadata
|
|
24
|
+
|
|
25
|
+
return importlib.metadata.version("crackerjack")
|
|
26
|
+
except Exception:
|
|
27
|
+
return "unknown"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class WorkflowPipeline:
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
console: Console,
|
|
34
|
+
pkg_path: Path,
|
|
35
|
+
session: SessionCoordinator,
|
|
36
|
+
phases: PhaseCoordinator,
|
|
37
|
+
) -> None:
|
|
38
|
+
self.console = console
|
|
39
|
+
self.pkg_path = pkg_path
|
|
40
|
+
self.session = session
|
|
41
|
+
self.phases = phases
|
|
42
|
+
self._mcp_state_manager: t.Any = None
|
|
43
|
+
|
|
44
|
+
self.logger = get_logger("crackerjack.pipeline")
|
|
45
|
+
self._debugger = None
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def debugger(self):
|
|
49
|
+
if self._debugger is None:
|
|
50
|
+
self._debugger = get_ai_agent_debugger()
|
|
51
|
+
return self._debugger
|
|
52
|
+
|
|
53
|
+
def _should_debug(self) -> bool:
|
|
54
|
+
import os
|
|
55
|
+
|
|
56
|
+
return os.environ.get("AI_AGENT_DEBUG", "0") == "1"
|
|
57
|
+
|
|
58
|
+
async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
|
|
59
|
+
with LoggingContext(
|
|
60
|
+
"workflow_execution",
|
|
61
|
+
testing=getattr(options, "testing", False),
|
|
62
|
+
skip_hooks=getattr(options, "skip_hooks", False),
|
|
63
|
+
):
|
|
64
|
+
start_time = time.time()
|
|
65
|
+
self.session.initialize_session_tracking(options)
|
|
66
|
+
self.session.track_task("workflow", "Complete crackerjack workflow")
|
|
67
|
+
|
|
68
|
+
if self._should_debug():
|
|
69
|
+
self.debugger.log_workflow_phase(
|
|
70
|
+
"workflow_execution",
|
|
71
|
+
"started",
|
|
72
|
+
details={
|
|
73
|
+
"testing": getattr(options, "testing", False),
|
|
74
|
+
"skip_hooks": getattr(options, "skip_hooks", False),
|
|
75
|
+
"ai_agent": getattr(options, "ai_agent", False),
|
|
76
|
+
},
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
if hasattr(options, "cleanup"):
|
|
80
|
+
self.session.set_cleanup_config(options.cleanup)
|
|
81
|
+
|
|
82
|
+
self.logger.info(
|
|
83
|
+
"Starting complete workflow execution",
|
|
84
|
+
testing=getattr(options, "testing", False),
|
|
85
|
+
skip_hooks=getattr(options, "skip_hooks", False),
|
|
86
|
+
package_path=str(self.pkg_path),
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
success = await self._execute_workflow_phases(options)
|
|
91
|
+
self.session.finalize_session(start_time, success)
|
|
92
|
+
|
|
93
|
+
duration = time.time() - start_time
|
|
94
|
+
self.logger.info(
|
|
95
|
+
"Workflow execution completed",
|
|
96
|
+
success=success,
|
|
97
|
+
duration_seconds=round(duration, 2),
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
if self._should_debug():
|
|
101
|
+
# Set final workflow success status
|
|
102
|
+
self.debugger.set_workflow_success(success)
|
|
103
|
+
|
|
104
|
+
self.debugger.log_workflow_phase(
|
|
105
|
+
"workflow_execution",
|
|
106
|
+
"completed" if success else "failed",
|
|
107
|
+
duration=duration,
|
|
108
|
+
)
|
|
109
|
+
if self.debugger.enabled:
|
|
110
|
+
self.debugger.print_debug_summary()
|
|
111
|
+
|
|
112
|
+
return success
|
|
113
|
+
|
|
114
|
+
except KeyboardInterrupt:
|
|
115
|
+
self.console.print("Interrupted by user")
|
|
116
|
+
self.session.fail_task("workflow", "Interrupted by user")
|
|
117
|
+
self.logger.warning("Workflow interrupted by user")
|
|
118
|
+
return False
|
|
119
|
+
|
|
120
|
+
except Exception as e:
|
|
121
|
+
self.console.print(f"Error: {e}")
|
|
122
|
+
self.session.fail_task("workflow", f"Unexpected error: {e}")
|
|
123
|
+
self.logger.exception(
|
|
124
|
+
"Workflow execution failed",
|
|
125
|
+
error=str(e),
|
|
126
|
+
error_type=type(e).__name__,
|
|
127
|
+
)
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
finally:
|
|
131
|
+
self.session.cleanup_resources()
|
|
132
|
+
|
|
133
|
+
async def _execute_workflow_phases(self, options: OptionsProtocol) -> bool:
|
|
134
|
+
success = True
|
|
135
|
+
self.phases.run_configuration_phase(options)
|
|
136
|
+
if not self.phases.run_cleaning_phase(options):
|
|
137
|
+
success = False
|
|
138
|
+
self.session.fail_task("workflow", "Cleaning phase failed")
|
|
139
|
+
return False
|
|
140
|
+
if not await self._execute_quality_phase(options):
|
|
141
|
+
success = False
|
|
142
|
+
return False
|
|
143
|
+
if not self.phases.run_publishing_phase(options):
|
|
144
|
+
success = False
|
|
145
|
+
self.session.fail_task("workflow", "Publishing failed")
|
|
146
|
+
return False
|
|
147
|
+
if not self.phases.run_commit_phase(options):
|
|
148
|
+
success = False
|
|
149
|
+
|
|
150
|
+
return success
|
|
151
|
+
|
|
152
|
+
async def _execute_quality_phase(self, options: OptionsProtocol) -> bool:
|
|
153
|
+
if hasattr(options, "fast") and options.fast:
|
|
154
|
+
return self._run_fast_hooks_phase(options)
|
|
155
|
+
if hasattr(options, "comp") and options.comp:
|
|
156
|
+
return self._run_comprehensive_hooks_phase(options)
|
|
157
|
+
if options.test:
|
|
158
|
+
return await self._execute_test_workflow(options)
|
|
159
|
+
return self._execute_standard_hooks_workflow(options)
|
|
160
|
+
|
|
161
|
+
async def _execute_test_workflow(self, options: OptionsProtocol) -> bool:
|
|
162
|
+
iteration = self._start_iteration_tracking(options)
|
|
163
|
+
|
|
164
|
+
if not self._run_initial_fast_hooks(options, iteration):
|
|
165
|
+
return False
|
|
166
|
+
|
|
167
|
+
testing_passed, comprehensive_passed = self._run_main_quality_phases(options)
|
|
168
|
+
|
|
169
|
+
if options.ai_agent:
|
|
170
|
+
return await self._handle_ai_agent_workflow(
|
|
171
|
+
options, iteration, testing_passed, comprehensive_passed
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
return self._handle_standard_workflow(
|
|
175
|
+
options, iteration, testing_passed, comprehensive_passed
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
def _start_iteration_tracking(self, options: OptionsProtocol) -> int:
|
|
179
|
+
"""Start iteration tracking for AI agent mode."""
|
|
180
|
+
iteration = 1
|
|
181
|
+
if options.ai_agent and self._should_debug():
|
|
182
|
+
self.debugger.log_iteration_start(iteration)
|
|
183
|
+
return iteration
|
|
184
|
+
|
|
185
|
+
def _run_initial_fast_hooks(self, options: OptionsProtocol, iteration: int) -> bool:
|
|
186
|
+
"""Run initial fast hooks phase and handle failure."""
|
|
187
|
+
fast_hooks_passed = self._run_fast_hooks_phase(options)
|
|
188
|
+
if not fast_hooks_passed:
|
|
189
|
+
if options.ai_agent and self._should_debug():
|
|
190
|
+
self.debugger.log_iteration_end(iteration, False)
|
|
191
|
+
return False # Fast hooks must pass before proceeding
|
|
192
|
+
return True
|
|
193
|
+
|
|
194
|
+
def _run_main_quality_phases(self, options: OptionsProtocol) -> tuple[bool, bool]:
|
|
195
|
+
"""Run tests and comprehensive hooks to collect ALL issues."""
|
|
196
|
+
testing_passed = self._run_testing_phase(options)
|
|
197
|
+
comprehensive_passed = self._run_comprehensive_hooks_phase(options)
|
|
198
|
+
return testing_passed, comprehensive_passed
|
|
199
|
+
|
|
200
|
+
async def _handle_ai_agent_workflow(
|
|
201
|
+
self,
|
|
202
|
+
options: OptionsProtocol,
|
|
203
|
+
iteration: int,
|
|
204
|
+
testing_passed: bool,
|
|
205
|
+
comprehensive_passed: bool,
|
|
206
|
+
) -> bool:
|
|
207
|
+
"""Handle AI agent workflow with failure collection and fixing."""
|
|
208
|
+
if not testing_passed or not comprehensive_passed:
|
|
209
|
+
success = await self._run_ai_agent_fixing_phase(options)
|
|
210
|
+
if self._should_debug():
|
|
211
|
+
self.debugger.log_iteration_end(iteration, success)
|
|
212
|
+
return success
|
|
213
|
+
|
|
214
|
+
if self._should_debug():
|
|
215
|
+
self.debugger.log_iteration_end(iteration, True)
|
|
216
|
+
return True # All phases passed, no fixes needed
|
|
217
|
+
|
|
218
|
+
def _handle_standard_workflow(
|
|
219
|
+
self,
|
|
220
|
+
options: OptionsProtocol,
|
|
221
|
+
iteration: int,
|
|
222
|
+
testing_passed: bool,
|
|
223
|
+
comprehensive_passed: bool,
|
|
224
|
+
) -> bool:
|
|
225
|
+
"""Handle standard workflow where all phases must pass."""
|
|
226
|
+
success = testing_passed and comprehensive_passed
|
|
227
|
+
if options.ai_agent and self._should_debug():
|
|
228
|
+
self.debugger.log_iteration_end(iteration, success)
|
|
229
|
+
return success
|
|
230
|
+
|
|
231
|
+
def _run_fast_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
232
|
+
self._update_mcp_status("fast", "running")
|
|
233
|
+
|
|
234
|
+
if not self.phases.run_fast_hooks_only(options):
|
|
235
|
+
self.session.fail_task("workflow", "Fast hooks failed")
|
|
236
|
+
self._update_mcp_status("fast", "failed")
|
|
237
|
+
return False
|
|
238
|
+
|
|
239
|
+
self._update_mcp_status("fast", "completed")
|
|
240
|
+
return True
|
|
241
|
+
|
|
242
|
+
def _run_testing_phase(self, options: OptionsProtocol) -> bool:
|
|
243
|
+
self._update_mcp_status("tests", "running")
|
|
244
|
+
|
|
245
|
+
success = self.phases.run_testing_phase(options)
|
|
246
|
+
if not success:
|
|
247
|
+
self.session.fail_task("workflow", "Testing failed")
|
|
248
|
+
self._handle_test_failures()
|
|
249
|
+
self._update_mcp_status("tests", "failed")
|
|
250
|
+
# In AI agent mode, continue to collect more failures
|
|
251
|
+
# In non-AI mode, this will be handled by caller
|
|
252
|
+
else:
|
|
253
|
+
self._update_mcp_status("tests", "completed")
|
|
254
|
+
|
|
255
|
+
return success
|
|
256
|
+
|
|
257
|
+
def _run_comprehensive_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
258
|
+
self._update_mcp_status("comprehensive", "running")
|
|
259
|
+
|
|
260
|
+
success = self.phases.run_comprehensive_hooks_only(options)
|
|
261
|
+
if not success:
|
|
262
|
+
self.session.fail_task("workflow", "Comprehensive hooks failed")
|
|
263
|
+
self._update_mcp_status("comprehensive", "failed")
|
|
264
|
+
# In AI agent mode, continue to collect more failures
|
|
265
|
+
# In non-AI mode, this will be handled by caller
|
|
266
|
+
else:
|
|
267
|
+
self._update_mcp_status("comprehensive", "completed")
|
|
268
|
+
|
|
269
|
+
return success
|
|
270
|
+
|
|
271
|
+
def _update_mcp_status(self, stage: str, status: str) -> None:
|
|
272
|
+
if hasattr(self, "_mcp_state_manager") and self._mcp_state_manager:
|
|
273
|
+
self._mcp_state_manager.update_stage_status(stage, status)
|
|
274
|
+
|
|
275
|
+
self.session.update_stage(stage, status)
|
|
276
|
+
|
|
277
|
+
def _handle_test_failures(self) -> None:
|
|
278
|
+
if not (hasattr(self, "_mcp_state_manager") and self._mcp_state_manager):
|
|
279
|
+
return
|
|
280
|
+
|
|
281
|
+
test_manager = self.phases.test_manager
|
|
282
|
+
if not hasattr(test_manager, "get_test_failures"):
|
|
283
|
+
return
|
|
284
|
+
|
|
285
|
+
failures = test_manager.get_test_failures()
|
|
286
|
+
|
|
287
|
+
# Log test failure count for debugging
|
|
288
|
+
if self._should_debug():
|
|
289
|
+
self.debugger.log_test_failures(len(failures))
|
|
290
|
+
|
|
291
|
+
from crackerjack.mcp.state import Issue, Priority
|
|
292
|
+
|
|
293
|
+
for i, failure in enumerate(failures[:10]):
|
|
294
|
+
issue = Issue(
|
|
295
|
+
id=f"test_failure_{i}",
|
|
296
|
+
type="test_failure",
|
|
297
|
+
message=failure.strip(),
|
|
298
|
+
file_path="tests/",
|
|
299
|
+
priority=Priority.HIGH,
|
|
300
|
+
stage="tests",
|
|
301
|
+
auto_fixable=False,
|
|
302
|
+
)
|
|
303
|
+
self._mcp_state_manager.add_issue(issue)
|
|
304
|
+
|
|
305
|
+
def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
|
|
306
|
+
"""Execute standard hooks workflow with proper state management."""
|
|
307
|
+
self._update_hooks_status_running()
|
|
308
|
+
|
|
309
|
+
hooks_success = self.phases.run_hooks_phase(options)
|
|
310
|
+
self._handle_hooks_completion(hooks_success)
|
|
311
|
+
|
|
312
|
+
return hooks_success
|
|
313
|
+
|
|
314
|
+
def _update_hooks_status_running(self) -> None:
|
|
315
|
+
"""Update MCP state to running for hook phases."""
|
|
316
|
+
if self._has_mcp_state_manager():
|
|
317
|
+
self._mcp_state_manager.update_stage_status("fast", "running")
|
|
318
|
+
self._mcp_state_manager.update_stage_status("comprehensive", "running")
|
|
319
|
+
|
|
320
|
+
def _handle_hooks_completion(self, hooks_success: bool) -> None:
|
|
321
|
+
"""Handle hooks completion with appropriate status updates."""
|
|
322
|
+
if not hooks_success:
|
|
323
|
+
self.session.fail_task("workflow", "Hooks failed")
|
|
324
|
+
self._update_hooks_status_failed()
|
|
325
|
+
else:
|
|
326
|
+
self._update_hooks_status_completed()
|
|
327
|
+
|
|
328
|
+
def _has_mcp_state_manager(self) -> bool:
|
|
329
|
+
"""Check if MCP state manager is available."""
|
|
330
|
+
return hasattr(self, "_mcp_state_manager") and self._mcp_state_manager
|
|
331
|
+
|
|
332
|
+
def _update_hooks_status_failed(self) -> None:
|
|
333
|
+
"""Update MCP state to failed for hook phases."""
|
|
334
|
+
if self._has_mcp_state_manager():
|
|
335
|
+
self._mcp_state_manager.update_stage_status("fast", "failed")
|
|
336
|
+
self._mcp_state_manager.update_stage_status("comprehensive", "failed")
|
|
337
|
+
|
|
338
|
+
def _update_hooks_status_completed(self) -> None:
|
|
339
|
+
"""Update MCP state to completed for hook phases."""
|
|
340
|
+
if self._has_mcp_state_manager():
|
|
341
|
+
self._mcp_state_manager.update_stage_status("fast", "completed")
|
|
342
|
+
self._mcp_state_manager.update_stage_status("comprehensive", "completed")
|
|
343
|
+
|
|
344
|
+
async def _run_ai_agent_fixing_phase(self, options: OptionsProtocol) -> bool:
|
|
345
|
+
"""Run AI agent fixing phase to analyze and fix collected failures."""
|
|
346
|
+
self._update_mcp_status("ai_fixing", "running")
|
|
347
|
+
self.logger.info("Starting AI agent fixing phase")
|
|
348
|
+
|
|
349
|
+
if self._should_debug():
|
|
350
|
+
self.debugger.log_workflow_phase(
|
|
351
|
+
"ai_agent_fixing",
|
|
352
|
+
"started",
|
|
353
|
+
details={"ai_agent": True},
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
try:
|
|
357
|
+
# Create AI agent context
|
|
358
|
+
agent_context = AgentContext(
|
|
359
|
+
project_path=self.pkg_path,
|
|
360
|
+
session_id=getattr(self.session, "session_id", None),
|
|
361
|
+
)
|
|
362
|
+
|
|
363
|
+
# Initialize agent coordinator
|
|
364
|
+
agent_coordinator = AgentCoordinator(agent_context)
|
|
365
|
+
agent_coordinator.initialize_agents()
|
|
366
|
+
|
|
367
|
+
# Collect issues from failures
|
|
368
|
+
issues = await self._collect_issues_from_failures()
|
|
369
|
+
|
|
370
|
+
if not issues:
|
|
371
|
+
self.logger.info("No issues collected for AI agent fixing")
|
|
372
|
+
self._update_mcp_status("ai_fixing", "completed")
|
|
373
|
+
return True
|
|
374
|
+
|
|
375
|
+
self.logger.info(f"AI agents will attempt to fix {len(issues)} issues")
|
|
376
|
+
|
|
377
|
+
# Let agents handle the issues
|
|
378
|
+
fix_result = await agent_coordinator.handle_issues(issues)
|
|
379
|
+
|
|
380
|
+
success = fix_result.success
|
|
381
|
+
if success:
|
|
382
|
+
self.logger.info("AI agents successfully fixed all issues")
|
|
383
|
+
self._update_mcp_status("ai_fixing", "completed")
|
|
384
|
+
|
|
385
|
+
# Log fix counts for debugging
|
|
386
|
+
if self._should_debug():
|
|
387
|
+
total_fixes = len(fix_result.fixes_applied)
|
|
388
|
+
# Estimate test vs hook fixes based on original issue types
|
|
389
|
+
test_fixes = len(
|
|
390
|
+
[f for f in fix_result.fixes_applied if "test" in f.lower()],
|
|
391
|
+
)
|
|
392
|
+
hook_fixes = total_fixes - test_fixes
|
|
393
|
+
self.debugger.log_test_fixes(test_fixes)
|
|
394
|
+
self.debugger.log_hook_fixes(hook_fixes)
|
|
395
|
+
else:
|
|
396
|
+
self.logger.warning(
|
|
397
|
+
f"AI agents could not fix all issues: {fix_result.remaining_issues}",
|
|
398
|
+
)
|
|
399
|
+
self._update_mcp_status("ai_fixing", "failed")
|
|
400
|
+
|
|
401
|
+
if self._should_debug():
|
|
402
|
+
self.debugger.log_workflow_phase(
|
|
403
|
+
"ai_agent_fixing",
|
|
404
|
+
"completed" if success else "failed",
|
|
405
|
+
details={
|
|
406
|
+
"confidence": fix_result.confidence,
|
|
407
|
+
"fixes_applied": len(fix_result.fixes_applied),
|
|
408
|
+
"remaining_issues": len(fix_result.remaining_issues),
|
|
409
|
+
},
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
return success
|
|
413
|
+
|
|
414
|
+
except Exception as e:
|
|
415
|
+
self.logger.exception(f"AI agent fixing phase failed: {e}")
|
|
416
|
+
self.session.fail_task("ai_fixing", f"AI agent fixing failed: {e}")
|
|
417
|
+
self._update_mcp_status("ai_fixing", "failed")
|
|
418
|
+
|
|
419
|
+
if self._should_debug():
|
|
420
|
+
self.debugger.log_workflow_phase(
|
|
421
|
+
"ai_agent_fixing",
|
|
422
|
+
"failed",
|
|
423
|
+
details={"error": str(e)},
|
|
424
|
+
)
|
|
425
|
+
|
|
426
|
+
return False
|
|
427
|
+
|
|
428
|
+
async def _collect_issues_from_failures(self) -> list[Issue]:
|
|
429
|
+
"""Collect issues from test and comprehensive hook failures."""
|
|
430
|
+
issues: list[Issue] = []
|
|
431
|
+
|
|
432
|
+
test_issues, test_count = self._collect_test_failure_issues()
|
|
433
|
+
hook_issues, hook_count = self._collect_hook_failure_issues()
|
|
434
|
+
|
|
435
|
+
issues.extend(test_issues)
|
|
436
|
+
issues.extend(hook_issues)
|
|
437
|
+
|
|
438
|
+
self._log_failure_counts_if_debugging(test_count, hook_count)
|
|
439
|
+
|
|
440
|
+
return issues
|
|
441
|
+
|
|
442
|
+
def _collect_test_failure_issues(self) -> tuple[list[Issue], int]:
|
|
443
|
+
"""Collect test failure issues and return count."""
|
|
444
|
+
issues: list[Issue] = []
|
|
445
|
+
test_count = 0
|
|
446
|
+
|
|
447
|
+
if hasattr(self.phases, "test_manager") and hasattr(
|
|
448
|
+
self.phases.test_manager,
|
|
449
|
+
"get_test_failures",
|
|
450
|
+
):
|
|
451
|
+
test_failures = self.phases.test_manager.get_test_failures()
|
|
452
|
+
test_count = len(test_failures)
|
|
453
|
+
for i, failure in enumerate(
|
|
454
|
+
test_failures[:20],
|
|
455
|
+
): # Limit to prevent overload
|
|
456
|
+
issue = Issue(
|
|
457
|
+
id=f"test_failure_{i}",
|
|
458
|
+
type=IssueType.TEST_FAILURE,
|
|
459
|
+
severity=Priority.HIGH,
|
|
460
|
+
message=failure.strip(),
|
|
461
|
+
stage="tests",
|
|
462
|
+
)
|
|
463
|
+
issues.append(issue)
|
|
464
|
+
|
|
465
|
+
return issues, test_count
|
|
466
|
+
|
|
467
|
+
def _collect_hook_failure_issues(self) -> tuple[list[Issue], int]:
|
|
468
|
+
"""Collect hook failure issues and return count."""
|
|
469
|
+
issues: list[Issue] = []
|
|
470
|
+
hook_count = 0
|
|
471
|
+
|
|
472
|
+
if self.session.session_tracker:
|
|
473
|
+
for task_id, task_data in self.session.session_tracker.tasks.items():
|
|
474
|
+
if task_data.status == "failed" and task_id in (
|
|
475
|
+
"fast_hooks",
|
|
476
|
+
"comprehensive_hooks",
|
|
477
|
+
):
|
|
478
|
+
hook_count += 1
|
|
479
|
+
issue_type = (
|
|
480
|
+
IssueType.FORMATTING
|
|
481
|
+
if "fast" in task_id
|
|
482
|
+
else IssueType.TYPE_ERROR
|
|
483
|
+
)
|
|
484
|
+
error_msg = getattr(task_data, "error_message", "Unknown error")
|
|
485
|
+
issue = Issue(
|
|
486
|
+
id=f"hook_failure_{task_id}",
|
|
487
|
+
type=issue_type,
|
|
488
|
+
severity=Priority.MEDIUM,
|
|
489
|
+
message=error_msg,
|
|
490
|
+
stage=task_id.replace("_hooks", ""),
|
|
491
|
+
)
|
|
492
|
+
issues.append(issue)
|
|
493
|
+
|
|
494
|
+
return issues, hook_count
|
|
495
|
+
|
|
496
|
+
def _log_failure_counts_if_debugging(
|
|
497
|
+
self, test_count: int, hook_count: int
|
|
498
|
+
) -> None:
|
|
499
|
+
"""Log failure counts if debugging is enabled."""
|
|
500
|
+
if self._should_debug():
|
|
501
|
+
self.debugger.log_test_failures(test_count)
|
|
502
|
+
self.debugger.log_hook_failures(hook_count)
|
|
503
|
+
|
|
504
|
+
|
|
505
|
+
class WorkflowOrchestrator:
|
|
506
|
+
def __init__(
|
|
507
|
+
self,
|
|
508
|
+
console: Console | None = None,
|
|
509
|
+
pkg_path: Path | None = None,
|
|
510
|
+
dry_run: bool = False,
|
|
511
|
+
web_job_id: str | None = None,
|
|
512
|
+
verbose: bool = False,
|
|
513
|
+
) -> None:
|
|
514
|
+
self.console = console or Console(force_terminal=True)
|
|
515
|
+
self.pkg_path = pkg_path or Path.cwd()
|
|
516
|
+
self.dry_run = dry_run
|
|
517
|
+
self.web_job_id = web_job_id
|
|
518
|
+
self.verbose = verbose
|
|
519
|
+
|
|
520
|
+
from crackerjack.models.protocols import (
|
|
521
|
+
FileSystemInterface,
|
|
522
|
+
GitInterface,
|
|
523
|
+
HookManager,
|
|
524
|
+
PublishManager,
|
|
525
|
+
TestManagerProtocol,
|
|
526
|
+
)
|
|
527
|
+
|
|
528
|
+
from .container import create_container
|
|
529
|
+
|
|
530
|
+
self.container = create_container(
|
|
531
|
+
console=self.console,
|
|
532
|
+
pkg_path=self.pkg_path,
|
|
533
|
+
dry_run=self.dry_run,
|
|
534
|
+
verbose=self.verbose,
|
|
535
|
+
)
|
|
536
|
+
|
|
537
|
+
self.session = SessionCoordinator(self.console, self.pkg_path, self.web_job_id)
|
|
538
|
+
self.phases = PhaseCoordinator(
|
|
539
|
+
console=self.console,
|
|
540
|
+
pkg_path=self.pkg_path,
|
|
541
|
+
session=self.session,
|
|
542
|
+
filesystem=self.container.get(FileSystemInterface),
|
|
543
|
+
git_service=self.container.get(GitInterface),
|
|
544
|
+
hook_manager=self.container.get(HookManager),
|
|
545
|
+
test_manager=self.container.get(TestManagerProtocol),
|
|
546
|
+
publish_manager=self.container.get(PublishManager),
|
|
547
|
+
)
|
|
548
|
+
|
|
549
|
+
self.pipeline = WorkflowPipeline(
|
|
550
|
+
console=self.console,
|
|
551
|
+
pkg_path=self.pkg_path,
|
|
552
|
+
session=self.session,
|
|
553
|
+
phases=self.phases,
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
self.logger = get_logger("crackerjack.orchestrator")
|
|
557
|
+
|
|
558
|
+
self._initialize_logging()
|
|
559
|
+
|
|
560
|
+
def _initialize_logging(self) -> None:
|
|
561
|
+
from crackerjack.services.log_manager import get_log_manager
|
|
562
|
+
|
|
563
|
+
log_manager = get_log_manager()
|
|
564
|
+
session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
|
|
565
|
+
debug_log_file = log_manager.create_debug_log_file(session_id)
|
|
566
|
+
|
|
567
|
+
setup_structured_logging(log_file=debug_log_file)
|
|
568
|
+
|
|
569
|
+
self.logger.info(
|
|
570
|
+
"Structured logging initialized",
|
|
571
|
+
log_file=str(debug_log_file),
|
|
572
|
+
log_directory=str(log_manager.log_dir),
|
|
573
|
+
package_path=str(self.pkg_path),
|
|
574
|
+
dry_run=self.dry_run,
|
|
575
|
+
)
|
|
576
|
+
|
|
577
|
+
def _initialize_session_tracking(self, options: OptionsProtocol) -> None:
|
|
578
|
+
self.session.initialize_session_tracking(options)
|
|
579
|
+
|
|
580
|
+
def _track_task(self, task_id: str, task_name: str) -> None:
|
|
581
|
+
self.session.track_task(task_id, task_name)
|
|
582
|
+
|
|
583
|
+
def _complete_task(self, task_id: str, details: str | None = None) -> None:
|
|
584
|
+
self.session.complete_task(task_id, details)
|
|
585
|
+
|
|
586
|
+
def _fail_task(self, task_id: str, error: str) -> None:
|
|
587
|
+
self.session.fail_task(task_id, error)
|
|
588
|
+
|
|
589
|
+
def run_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
590
|
+
return self.phases.run_cleaning_phase(options)
|
|
591
|
+
|
|
592
|
+
def run_fast_hooks_only(self, options: OptionsProtocol) -> bool:
|
|
593
|
+
return self.phases.run_fast_hooks_only(options)
|
|
594
|
+
|
|
595
|
+
def run_comprehensive_hooks_only(self, options: OptionsProtocol) -> bool:
|
|
596
|
+
return self.phases.run_comprehensive_hooks_only(options)
|
|
597
|
+
|
|
598
|
+
def run_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
599
|
+
return self.phases.run_hooks_phase(options)
|
|
600
|
+
|
|
601
|
+
def run_testing_phase(self, options: OptionsProtocol) -> bool:
|
|
602
|
+
return self.phases.run_testing_phase(options)
|
|
603
|
+
|
|
604
|
+
def run_publishing_phase(self, options: OptionsProtocol) -> bool:
|
|
605
|
+
return self.phases.run_publishing_phase(options)
|
|
606
|
+
|
|
607
|
+
def run_commit_phase(self, options: OptionsProtocol) -> bool:
|
|
608
|
+
return self.phases.run_commit_phase(options)
|
|
609
|
+
|
|
610
|
+
def run_configuration_phase(self, options: OptionsProtocol) -> bool:
|
|
611
|
+
return self.phases.run_configuration_phase(options)
|
|
612
|
+
|
|
613
|
+
async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
|
|
614
|
+
return await self.pipeline.run_complete_workflow(options)
|
|
615
|
+
|
|
616
|
+
def _cleanup_resources(self) -> None:
|
|
617
|
+
self.session.cleanup_resources()
|
|
618
|
+
|
|
619
|
+
def _register_cleanup(self, cleanup_handler: t.Callable[[], None]) -> None:
|
|
620
|
+
self.session.register_cleanup(cleanup_handler)
|
|
621
|
+
|
|
622
|
+
def _track_lock_file(self, lock_file_path: Path) -> None:
|
|
623
|
+
self.session.track_lock_file(lock_file_path)
|
|
624
|
+
|
|
625
|
+
def _get_version(self) -> str:
|
|
626
|
+
try:
|
|
627
|
+
return version()
|
|
628
|
+
except Exception:
|
|
629
|
+
return "unknown"
|
|
630
|
+
|
|
631
|
+
async def process(self, options: OptionsProtocol) -> bool:
|
|
632
|
+
self.session.start_session("process_workflow")
|
|
633
|
+
|
|
634
|
+
try:
|
|
635
|
+
result = await self.run_complete_workflow(options)
|
|
636
|
+
self.session.end_session(success=result)
|
|
637
|
+
return result
|
|
638
|
+
except Exception:
|
|
639
|
+
self.session.end_session(success=False)
|
|
640
|
+
return False
|