crackerjack 0.30.3__py3-none-any.whl → 0.31.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +1005 -0
- crackerjack/RULES.md +380 -0
- crackerjack/__init__.py +42 -13
- crackerjack/__main__.py +227 -299
- crackerjack/agents/__init__.py +41 -0
- crackerjack/agents/architect_agent.py +281 -0
- crackerjack/agents/base.py +170 -0
- crackerjack/agents/coordinator.py +512 -0
- crackerjack/agents/documentation_agent.py +498 -0
- crackerjack/agents/dry_agent.py +388 -0
- crackerjack/agents/formatting_agent.py +245 -0
- crackerjack/agents/import_optimization_agent.py +281 -0
- crackerjack/agents/performance_agent.py +669 -0
- crackerjack/agents/proactive_agent.py +104 -0
- crackerjack/agents/refactoring_agent.py +788 -0
- crackerjack/agents/security_agent.py +529 -0
- crackerjack/agents/test_creation_agent.py +657 -0
- crackerjack/agents/test_specialist_agent.py +486 -0
- crackerjack/agents/tracker.py +212 -0
- crackerjack/api.py +560 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/facade.py +104 -0
- crackerjack/cli/handlers.py +267 -0
- crackerjack/cli/interactive.py +471 -0
- crackerjack/cli/options.py +409 -0
- crackerjack/cli/utils.py +18 -0
- crackerjack/code_cleaner.py +618 -928
- crackerjack/config/__init__.py +19 -0
- crackerjack/config/hooks.py +218 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +406 -0
- crackerjack/core/autofix_coordinator.py +200 -0
- crackerjack/core/container.py +104 -0
- crackerjack/core/enhanced_container.py +542 -0
- crackerjack/core/performance.py +243 -0
- crackerjack/core/phase_coordinator.py +585 -0
- crackerjack/core/proactive_workflow.py +316 -0
- crackerjack/core/session_coordinator.py +289 -0
- crackerjack/core/workflow_orchestrator.py +826 -0
- crackerjack/dynamic_config.py +94 -103
- crackerjack/errors.py +263 -41
- crackerjack/executors/__init__.py +11 -0
- crackerjack/executors/async_hook_executor.py +431 -0
- crackerjack/executors/cached_hook_executor.py +242 -0
- crackerjack/executors/hook_executor.py +345 -0
- crackerjack/executors/individual_hook_executor.py +669 -0
- crackerjack/intelligence/__init__.py +44 -0
- crackerjack/intelligence/adaptive_learning.py +751 -0
- crackerjack/intelligence/agent_orchestrator.py +551 -0
- crackerjack/intelligence/agent_registry.py +414 -0
- crackerjack/intelligence/agent_selector.py +502 -0
- crackerjack/intelligence/integration.py +290 -0
- crackerjack/interactive.py +576 -315
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +137 -0
- crackerjack/managers/publish_manager.py +433 -0
- crackerjack/managers/test_command_builder.py +151 -0
- crackerjack/managers/test_executor.py +443 -0
- crackerjack/managers/test_manager.py +258 -0
- crackerjack/managers/test_manager_backup.py +1124 -0
- crackerjack/managers/test_progress.py +114 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +336 -0
- crackerjack/mcp/client_runner.py +104 -0
- crackerjack/mcp/context.py +621 -0
- crackerjack/mcp/dashboard.py +636 -0
- crackerjack/mcp/enhanced_progress_monitor.py +479 -0
- crackerjack/mcp/file_monitor.py +336 -0
- crackerjack/mcp/progress_components.py +569 -0
- crackerjack/mcp/progress_monitor.py +949 -0
- crackerjack/mcp/rate_limiter.py +332 -0
- crackerjack/mcp/server.py +22 -0
- crackerjack/mcp/server_core.py +244 -0
- crackerjack/mcp/service_watchdog.py +501 -0
- crackerjack/mcp/state.py +395 -0
- crackerjack/mcp/task_manager.py +257 -0
- crackerjack/mcp/tools/__init__.py +17 -0
- crackerjack/mcp/tools/core_tools.py +249 -0
- crackerjack/mcp/tools/error_analyzer.py +308 -0
- crackerjack/mcp/tools/execution_tools.py +372 -0
- crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
- crackerjack/mcp/tools/intelligence_tools.py +314 -0
- crackerjack/mcp/tools/monitoring_tools.py +502 -0
- crackerjack/mcp/tools/proactive_tools.py +384 -0
- crackerjack/mcp/tools/progress_tools.py +217 -0
- crackerjack/mcp/tools/utility_tools.py +341 -0
- crackerjack/mcp/tools/workflow_executor.py +565 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +39 -0
- crackerjack/mcp/websocket/endpoints.py +559 -0
- crackerjack/mcp/websocket/jobs.py +253 -0
- crackerjack/mcp/websocket/server.py +116 -0
- crackerjack/mcp/websocket/websocket_handler.py +78 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/__init__.py +31 -0
- crackerjack/models/config.py +93 -0
- crackerjack/models/config_adapter.py +230 -0
- crackerjack/models/protocols.py +118 -0
- crackerjack/models/task.py +154 -0
- crackerjack/monitoring/ai_agent_watchdog.py +450 -0
- crackerjack/monitoring/regression_prevention.py +638 -0
- crackerjack/orchestration/__init__.py +0 -0
- crackerjack/orchestration/advanced_orchestrator.py +970 -0
- crackerjack/orchestration/coverage_improvement.py +223 -0
- crackerjack/orchestration/execution_strategies.py +341 -0
- crackerjack/orchestration/test_progress_streamer.py +636 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +246 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +259 -0
- crackerjack/py313.py +8 -3
- crackerjack/services/__init__.py +22 -0
- crackerjack/services/cache.py +314 -0
- crackerjack/services/config.py +358 -0
- crackerjack/services/config_integrity.py +99 -0
- crackerjack/services/contextual_ai_assistant.py +516 -0
- crackerjack/services/coverage_ratchet.py +356 -0
- crackerjack/services/debug.py +736 -0
- crackerjack/services/dependency_monitor.py +617 -0
- crackerjack/services/enhanced_filesystem.py +439 -0
- crackerjack/services/file_hasher.py +151 -0
- crackerjack/services/filesystem.py +421 -0
- crackerjack/services/git.py +176 -0
- crackerjack/services/health_metrics.py +611 -0
- crackerjack/services/initialization.py +873 -0
- crackerjack/services/log_manager.py +286 -0
- crackerjack/services/logging.py +174 -0
- crackerjack/services/metrics.py +578 -0
- crackerjack/services/pattern_cache.py +362 -0
- crackerjack/services/pattern_detector.py +515 -0
- crackerjack/services/performance_benchmarks.py +653 -0
- crackerjack/services/security.py +163 -0
- crackerjack/services/server_manager.py +234 -0
- crackerjack/services/smart_scheduling.py +144 -0
- crackerjack/services/tool_version_service.py +61 -0
- crackerjack/services/unified_config.py +437 -0
- crackerjack/services/version_checker.py +248 -0
- crackerjack/slash_commands/__init__.py +14 -0
- crackerjack/slash_commands/init.md +122 -0
- crackerjack/slash_commands/run.md +163 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack-0.31.7.dist-info/METADATA +742 -0
- crackerjack-0.31.7.dist-info/RECORD +149 -0
- crackerjack-0.31.7.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -34
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/crackerjack.py +0 -3805
- crackerjack/pyproject.toml +0 -286
- crackerjack-0.30.3.dist-info/METADATA +0 -1290
- crackerjack-0.30.3.dist-info/RECORD +0 -16
- {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/WHEEL +0 -0
- {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,826 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import typing as t
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from rich.console import Console
|
|
6
|
+
|
|
7
|
+
from crackerjack.agents.base import AgentContext, Issue, IssueType, Priority
|
|
8
|
+
from crackerjack.agents.coordinator import AgentCoordinator
|
|
9
|
+
from crackerjack.models.protocols import OptionsProtocol
|
|
10
|
+
from crackerjack.services.debug import get_ai_agent_debugger
|
|
11
|
+
from crackerjack.services.logging import (
|
|
12
|
+
LoggingContext,
|
|
13
|
+
get_logger,
|
|
14
|
+
setup_structured_logging,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
from .phase_coordinator import PhaseCoordinator
|
|
18
|
+
from .session_coordinator import SessionCoordinator
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def version() -> str:
|
|
22
|
+
try:
|
|
23
|
+
import importlib.metadata
|
|
24
|
+
|
|
25
|
+
return importlib.metadata.version("crackerjack")
|
|
26
|
+
except Exception:
|
|
27
|
+
return "unknown"
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class WorkflowPipeline:
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
console: Console,
|
|
34
|
+
pkg_path: Path,
|
|
35
|
+
session: SessionCoordinator,
|
|
36
|
+
phases: PhaseCoordinator,
|
|
37
|
+
) -> None:
|
|
38
|
+
self.console = console
|
|
39
|
+
self.pkg_path = pkg_path
|
|
40
|
+
self.session = session
|
|
41
|
+
self.phases = phases
|
|
42
|
+
self._mcp_state_manager: t.Any = None
|
|
43
|
+
|
|
44
|
+
self.logger = get_logger("crackerjack.pipeline")
|
|
45
|
+
self._debugger = None
|
|
46
|
+
|
|
47
|
+
@property
|
|
48
|
+
def debugger(self):
|
|
49
|
+
if self._debugger is None:
|
|
50
|
+
self._debugger = get_ai_agent_debugger()
|
|
51
|
+
return self._debugger
|
|
52
|
+
|
|
53
|
+
def _should_debug(self) -> bool:
|
|
54
|
+
import os
|
|
55
|
+
|
|
56
|
+
return os.environ.get("AI_AGENT_DEBUG", "0") == "1"
|
|
57
|
+
|
|
58
|
+
async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
|
|
59
|
+
with LoggingContext(
|
|
60
|
+
"workflow_execution",
|
|
61
|
+
testing=getattr(options, "testing", False),
|
|
62
|
+
skip_hooks=getattr(options, "skip_hooks", False),
|
|
63
|
+
):
|
|
64
|
+
start_time = time.time()
|
|
65
|
+
self.session.initialize_session_tracking(options)
|
|
66
|
+
self.session.track_task("workflow", "Complete crackerjack workflow")
|
|
67
|
+
|
|
68
|
+
if self._should_debug():
|
|
69
|
+
self.debugger.log_workflow_phase(
|
|
70
|
+
"workflow_execution",
|
|
71
|
+
"started",
|
|
72
|
+
details={
|
|
73
|
+
"testing": getattr(options, "testing", False),
|
|
74
|
+
"skip_hooks": getattr(options, "skip_hooks", False),
|
|
75
|
+
"ai_agent": getattr(options, "ai_agent", False),
|
|
76
|
+
},
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
if hasattr(options, "cleanup"):
|
|
80
|
+
self.session.set_cleanup_config(options.cleanup)
|
|
81
|
+
|
|
82
|
+
self.logger.info(
|
|
83
|
+
"Starting complete workflow execution",
|
|
84
|
+
testing=getattr(options, "testing", False),
|
|
85
|
+
skip_hooks=getattr(options, "skip_hooks", False),
|
|
86
|
+
package_path=str(self.pkg_path),
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
try:
|
|
90
|
+
success = await self._execute_workflow_phases(options)
|
|
91
|
+
self.session.finalize_session(start_time, success)
|
|
92
|
+
|
|
93
|
+
duration = time.time() - start_time
|
|
94
|
+
self.logger.info(
|
|
95
|
+
"Workflow execution completed",
|
|
96
|
+
success=success,
|
|
97
|
+
duration_seconds=round(duration, 2),
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
if self._should_debug():
|
|
101
|
+
# Set final workflow success status
|
|
102
|
+
self.debugger.set_workflow_success(success)
|
|
103
|
+
|
|
104
|
+
self.debugger.log_workflow_phase(
|
|
105
|
+
"workflow_execution",
|
|
106
|
+
"completed" if success else "failed",
|
|
107
|
+
duration=duration,
|
|
108
|
+
)
|
|
109
|
+
if self.debugger.enabled:
|
|
110
|
+
self.debugger.print_debug_summary()
|
|
111
|
+
|
|
112
|
+
return success
|
|
113
|
+
|
|
114
|
+
except KeyboardInterrupt:
|
|
115
|
+
self.console.print("Interrupted by user")
|
|
116
|
+
self.session.fail_task("workflow", "Interrupted by user")
|
|
117
|
+
self.logger.warning("Workflow interrupted by user")
|
|
118
|
+
return False
|
|
119
|
+
|
|
120
|
+
except Exception as e:
|
|
121
|
+
self.console.print(f"Error: {e}")
|
|
122
|
+
self.session.fail_task("workflow", f"Unexpected error: {e}")
|
|
123
|
+
self.logger.exception(
|
|
124
|
+
"Workflow execution failed",
|
|
125
|
+
error=str(e),
|
|
126
|
+
error_type=type(e).__name__,
|
|
127
|
+
)
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
finally:
|
|
131
|
+
self.session.cleanup_resources()
|
|
132
|
+
|
|
133
|
+
async def _execute_workflow_phases(self, options: OptionsProtocol) -> bool:
|
|
134
|
+
success = True
|
|
135
|
+
self.phases.run_configuration_phase(options)
|
|
136
|
+
if not self.phases.run_cleaning_phase(options):
|
|
137
|
+
success = False
|
|
138
|
+
self.session.fail_task("workflow", "Cleaning phase failed")
|
|
139
|
+
return False
|
|
140
|
+
if not await self._execute_quality_phase(options):
|
|
141
|
+
success = False
|
|
142
|
+
return False
|
|
143
|
+
if not self.phases.run_publishing_phase(options):
|
|
144
|
+
success = False
|
|
145
|
+
self.session.fail_task("workflow", "Publishing failed")
|
|
146
|
+
return False
|
|
147
|
+
if not self.phases.run_commit_phase(options):
|
|
148
|
+
success = False
|
|
149
|
+
|
|
150
|
+
return success
|
|
151
|
+
|
|
152
|
+
async def _execute_quality_phase(self, options: OptionsProtocol) -> bool:
|
|
153
|
+
if hasattr(options, "fast") and options.fast:
|
|
154
|
+
return self._run_fast_hooks_phase(options)
|
|
155
|
+
if hasattr(options, "comp") and options.comp:
|
|
156
|
+
return self._run_comprehensive_hooks_phase(options)
|
|
157
|
+
if options.test:
|
|
158
|
+
return await self._execute_test_workflow(options)
|
|
159
|
+
return self._execute_standard_hooks_workflow(options)
|
|
160
|
+
|
|
161
|
+
async def _execute_test_workflow(self, options: OptionsProtocol) -> bool:
|
|
162
|
+
iteration = self._start_iteration_tracking(options)
|
|
163
|
+
|
|
164
|
+
if not self._run_initial_fast_hooks(options, iteration):
|
|
165
|
+
return False
|
|
166
|
+
|
|
167
|
+
testing_passed, comprehensive_passed = self._run_main_quality_phases(options)
|
|
168
|
+
|
|
169
|
+
if options.ai_agent:
|
|
170
|
+
return await self._handle_ai_agent_workflow(
|
|
171
|
+
options, iteration, testing_passed, comprehensive_passed
|
|
172
|
+
)
|
|
173
|
+
|
|
174
|
+
return self._handle_standard_workflow(
|
|
175
|
+
options, iteration, testing_passed, comprehensive_passed
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
def _start_iteration_tracking(self, options: OptionsProtocol) -> int:
|
|
179
|
+
"""Start iteration tracking for AI agent mode."""
|
|
180
|
+
iteration = 1
|
|
181
|
+
if options.ai_agent and self._should_debug():
|
|
182
|
+
self.debugger.log_iteration_start(iteration)
|
|
183
|
+
return iteration
|
|
184
|
+
|
|
185
|
+
def _run_initial_fast_hooks(self, options: OptionsProtocol, iteration: int) -> bool:
|
|
186
|
+
"""Run initial fast hooks phase and handle failure."""
|
|
187
|
+
fast_hooks_passed = self._run_fast_hooks_phase(options)
|
|
188
|
+
if not fast_hooks_passed:
|
|
189
|
+
if options.ai_agent and self._should_debug():
|
|
190
|
+
self.debugger.log_iteration_end(iteration, False)
|
|
191
|
+
return False # Fast hooks must pass before proceeding
|
|
192
|
+
return True
|
|
193
|
+
|
|
194
|
+
def _run_main_quality_phases(self, options: OptionsProtocol) -> tuple[bool, bool]:
|
|
195
|
+
"""Run tests and comprehensive hooks to collect ALL issues."""
|
|
196
|
+
testing_passed = self._run_testing_phase(options)
|
|
197
|
+
comprehensive_passed = self._run_comprehensive_hooks_phase(options)
|
|
198
|
+
return testing_passed, comprehensive_passed
|
|
199
|
+
|
|
200
|
+
async def _handle_ai_agent_workflow(
|
|
201
|
+
self,
|
|
202
|
+
options: OptionsProtocol,
|
|
203
|
+
iteration: int,
|
|
204
|
+
testing_passed: bool,
|
|
205
|
+
comprehensive_passed: bool,
|
|
206
|
+
) -> bool:
|
|
207
|
+
"""Handle AI agent workflow with failure collection and fixing."""
|
|
208
|
+
if not testing_passed or not comprehensive_passed:
|
|
209
|
+
success = await self._run_ai_agent_fixing_phase(options)
|
|
210
|
+
if self._should_debug():
|
|
211
|
+
self.debugger.log_iteration_end(iteration, success)
|
|
212
|
+
return success
|
|
213
|
+
|
|
214
|
+
if self._should_debug():
|
|
215
|
+
self.debugger.log_iteration_end(iteration, True)
|
|
216
|
+
return True # All phases passed, no fixes needed
|
|
217
|
+
|
|
218
|
+
def _handle_standard_workflow(
|
|
219
|
+
self,
|
|
220
|
+
options: OptionsProtocol,
|
|
221
|
+
iteration: int,
|
|
222
|
+
testing_passed: bool,
|
|
223
|
+
comprehensive_passed: bool,
|
|
224
|
+
) -> bool:
|
|
225
|
+
"""Handle standard workflow where all phases must pass."""
|
|
226
|
+
success = testing_passed and comprehensive_passed
|
|
227
|
+
if options.ai_agent and self._should_debug():
|
|
228
|
+
self.debugger.log_iteration_end(iteration, success)
|
|
229
|
+
return success
|
|
230
|
+
|
|
231
|
+
def _run_fast_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
232
|
+
self._update_mcp_status("fast", "running")
|
|
233
|
+
|
|
234
|
+
if not self.phases.run_fast_hooks_only(options):
|
|
235
|
+
self.session.fail_task("workflow", "Fast hooks failed")
|
|
236
|
+
self._update_mcp_status("fast", "failed")
|
|
237
|
+
return False
|
|
238
|
+
|
|
239
|
+
self._update_mcp_status("fast", "completed")
|
|
240
|
+
return True
|
|
241
|
+
|
|
242
|
+
def _run_testing_phase(self, options: OptionsProtocol) -> bool:
|
|
243
|
+
self._update_mcp_status("tests", "running")
|
|
244
|
+
|
|
245
|
+
success = self.phases.run_testing_phase(options)
|
|
246
|
+
if not success:
|
|
247
|
+
self.session.fail_task("workflow", "Testing failed")
|
|
248
|
+
self._handle_test_failures()
|
|
249
|
+
self._update_mcp_status("tests", "failed")
|
|
250
|
+
# In AI agent mode, continue to collect more failures
|
|
251
|
+
# In non-AI mode, this will be handled by caller
|
|
252
|
+
else:
|
|
253
|
+
self._update_mcp_status("tests", "completed")
|
|
254
|
+
|
|
255
|
+
return success
|
|
256
|
+
|
|
257
|
+
def _run_comprehensive_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
258
|
+
self._update_mcp_status("comprehensive", "running")
|
|
259
|
+
|
|
260
|
+
success = self.phases.run_comprehensive_hooks_only(options)
|
|
261
|
+
if not success:
|
|
262
|
+
self.session.fail_task("comprehensive_hooks", "Comprehensive hooks failed")
|
|
263
|
+
self._update_mcp_status("comprehensive", "failed")
|
|
264
|
+
# In AI agent mode, continue to collect more failures
|
|
265
|
+
# In non-AI mode, this will be handled by caller
|
|
266
|
+
else:
|
|
267
|
+
self._update_mcp_status("comprehensive", "completed")
|
|
268
|
+
|
|
269
|
+
return success
|
|
270
|
+
|
|
271
|
+
def _update_mcp_status(self, stage: str, status: str) -> None:
|
|
272
|
+
if hasattr(self, "_mcp_state_manager") and self._mcp_state_manager:
|
|
273
|
+
self._mcp_state_manager.update_stage_status(stage, status)
|
|
274
|
+
|
|
275
|
+
self.session.update_stage(stage, status)
|
|
276
|
+
|
|
277
|
+
def _handle_test_failures(self) -> None:
|
|
278
|
+
if not (hasattr(self, "_mcp_state_manager") and self._mcp_state_manager):
|
|
279
|
+
return
|
|
280
|
+
|
|
281
|
+
test_manager = self.phases.test_manager
|
|
282
|
+
if not hasattr(test_manager, "get_test_failures"):
|
|
283
|
+
return
|
|
284
|
+
|
|
285
|
+
failures = test_manager.get_test_failures()
|
|
286
|
+
|
|
287
|
+
# Log test failure count for debugging
|
|
288
|
+
if self._should_debug():
|
|
289
|
+
self.debugger.log_test_failures(len(failures))
|
|
290
|
+
|
|
291
|
+
from crackerjack.mcp.state import Issue, Priority
|
|
292
|
+
|
|
293
|
+
for i, failure in enumerate(failures[:10]):
|
|
294
|
+
issue = Issue(
|
|
295
|
+
id=f"test_failure_{i}",
|
|
296
|
+
type="test_failure",
|
|
297
|
+
message=failure.strip(),
|
|
298
|
+
file_path="tests/",
|
|
299
|
+
priority=Priority.HIGH,
|
|
300
|
+
stage="tests",
|
|
301
|
+
auto_fixable=False,
|
|
302
|
+
)
|
|
303
|
+
self._mcp_state_manager.add_issue(issue)
|
|
304
|
+
|
|
305
|
+
def _execute_standard_hooks_workflow(self, options: OptionsProtocol) -> bool:
|
|
306
|
+
"""Execute standard hooks workflow with proper state management."""
|
|
307
|
+
self._update_hooks_status_running()
|
|
308
|
+
|
|
309
|
+
hooks_success = self.phases.run_hooks_phase(options)
|
|
310
|
+
self._handle_hooks_completion(hooks_success)
|
|
311
|
+
|
|
312
|
+
return hooks_success
|
|
313
|
+
|
|
314
|
+
def _update_hooks_status_running(self) -> None:
|
|
315
|
+
"""Update MCP state to running for hook phases."""
|
|
316
|
+
if self._has_mcp_state_manager():
|
|
317
|
+
self._mcp_state_manager.update_stage_status("fast", "running")
|
|
318
|
+
self._mcp_state_manager.update_stage_status("comprehensive", "running")
|
|
319
|
+
|
|
320
|
+
def _handle_hooks_completion(self, hooks_success: bool) -> None:
|
|
321
|
+
"""Handle hooks completion with appropriate status updates."""
|
|
322
|
+
if not hooks_success:
|
|
323
|
+
self.session.fail_task("workflow", "Hooks failed")
|
|
324
|
+
self._update_hooks_status_failed()
|
|
325
|
+
else:
|
|
326
|
+
self._update_hooks_status_completed()
|
|
327
|
+
|
|
328
|
+
def _has_mcp_state_manager(self) -> bool:
|
|
329
|
+
"""Check if MCP state manager is available."""
|
|
330
|
+
return hasattr(self, "_mcp_state_manager") and self._mcp_state_manager
|
|
331
|
+
|
|
332
|
+
def _update_hooks_status_failed(self) -> None:
|
|
333
|
+
"""Update MCP state to failed for hook phases."""
|
|
334
|
+
if self._has_mcp_state_manager():
|
|
335
|
+
self._mcp_state_manager.update_stage_status("fast", "failed")
|
|
336
|
+
self._mcp_state_manager.update_stage_status("comprehensive", "failed")
|
|
337
|
+
|
|
338
|
+
def _update_hooks_status_completed(self) -> None:
|
|
339
|
+
"""Update MCP state to completed for hook phases."""
|
|
340
|
+
if self._has_mcp_state_manager():
|
|
341
|
+
self._mcp_state_manager.update_stage_status("fast", "completed")
|
|
342
|
+
self._mcp_state_manager.update_stage_status("comprehensive", "completed")
|
|
343
|
+
|
|
344
|
+
async def _run_ai_agent_fixing_phase(self, options: OptionsProtocol) -> bool:
|
|
345
|
+
"""Run AI agent fixing phase to analyze and fix collected failures."""
|
|
346
|
+
self._update_mcp_status("ai_fixing", "running")
|
|
347
|
+
self.logger.info("Starting AI agent fixing phase")
|
|
348
|
+
self._log_debug_phase_start()
|
|
349
|
+
|
|
350
|
+
try:
|
|
351
|
+
agent_coordinator = self._setup_agent_coordinator()
|
|
352
|
+
issues = await self._collect_issues_from_failures()
|
|
353
|
+
|
|
354
|
+
if not issues:
|
|
355
|
+
return self._handle_no_issues_found()
|
|
356
|
+
|
|
357
|
+
self.logger.info(f"AI agents will attempt to fix {len(issues)} issues")
|
|
358
|
+
fix_result = await agent_coordinator.handle_issues(issues)
|
|
359
|
+
|
|
360
|
+
return await self._process_fix_results(options, fix_result)
|
|
361
|
+
|
|
362
|
+
except Exception as e:
|
|
363
|
+
return self._handle_fixing_phase_error(e)
|
|
364
|
+
|
|
365
|
+
def _log_debug_phase_start(self) -> None:
|
|
366
|
+
"""Log debug information for phase start."""
|
|
367
|
+
if self._should_debug():
|
|
368
|
+
self.debugger.log_workflow_phase(
|
|
369
|
+
"ai_agent_fixing",
|
|
370
|
+
"started",
|
|
371
|
+
details={"ai_agent": True},
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
def _setup_agent_coordinator(self) -> AgentCoordinator:
|
|
375
|
+
"""Set up agent coordinator with proper context."""
|
|
376
|
+
from crackerjack.agents.coordinator import AgentCoordinator
|
|
377
|
+
|
|
378
|
+
agent_context = AgentContext(
|
|
379
|
+
project_path=self.pkg_path,
|
|
380
|
+
session_id=getattr(self.session, "session_id", None),
|
|
381
|
+
)
|
|
382
|
+
|
|
383
|
+
agent_coordinator = AgentCoordinator(agent_context)
|
|
384
|
+
agent_coordinator.initialize_agents()
|
|
385
|
+
return agent_coordinator
|
|
386
|
+
|
|
387
|
+
def _handle_no_issues_found(self) -> bool:
|
|
388
|
+
"""Handle case when no issues are collected."""
|
|
389
|
+
self.logger.info("No issues collected for AI agent fixing")
|
|
390
|
+
self._update_mcp_status("ai_fixing", "completed")
|
|
391
|
+
return True
|
|
392
|
+
|
|
393
|
+
async def _process_fix_results(
|
|
394
|
+
self, options: OptionsProtocol, fix_result: t.Any
|
|
395
|
+
) -> bool:
|
|
396
|
+
"""Process fix results and verify success."""
|
|
397
|
+
verification_success = await self._verify_fixes_applied(options, fix_result)
|
|
398
|
+
success = fix_result.success and verification_success
|
|
399
|
+
|
|
400
|
+
if success:
|
|
401
|
+
self._handle_successful_fixes(fix_result)
|
|
402
|
+
else:
|
|
403
|
+
self._handle_failed_fixes(fix_result, verification_success)
|
|
404
|
+
|
|
405
|
+
self._log_debug_phase_completion(success, fix_result)
|
|
406
|
+
return success
|
|
407
|
+
|
|
408
|
+
def _handle_successful_fixes(self, fix_result: t.Any) -> None:
|
|
409
|
+
"""Handle successful fix results."""
|
|
410
|
+
self.logger.info(
|
|
411
|
+
"AI agents successfully fixed all issues and verification passed"
|
|
412
|
+
)
|
|
413
|
+
self._update_mcp_status("ai_fixing", "completed")
|
|
414
|
+
self._log_fix_counts_if_debugging(fix_result)
|
|
415
|
+
|
|
416
|
+
def _handle_failed_fixes(
|
|
417
|
+
self, fix_result: t.Any, verification_success: bool
|
|
418
|
+
) -> None:
|
|
419
|
+
"""Handle failed fix results."""
|
|
420
|
+
if not verification_success:
|
|
421
|
+
self.logger.warning(
|
|
422
|
+
"AI agent fixes did not pass verification - issues still exist"
|
|
423
|
+
)
|
|
424
|
+
else:
|
|
425
|
+
self.logger.warning(
|
|
426
|
+
f"AI agents could not fix all issues: {fix_result.remaining_issues}",
|
|
427
|
+
)
|
|
428
|
+
self._update_mcp_status("ai_fixing", "failed")
|
|
429
|
+
|
|
430
|
+
def _log_fix_counts_if_debugging(self, fix_result: t.Any) -> None:
|
|
431
|
+
"""Log fix counts for debugging if debug mode is enabled."""
|
|
432
|
+
if not self._should_debug():
|
|
433
|
+
return
|
|
434
|
+
|
|
435
|
+
total_fixes = len(fix_result.fixes_applied)
|
|
436
|
+
test_fixes = len(
|
|
437
|
+
[f for f in fix_result.fixes_applied if "test" in f.lower()],
|
|
438
|
+
)
|
|
439
|
+
hook_fixes = total_fixes - test_fixes
|
|
440
|
+
self.debugger.log_test_fixes(test_fixes)
|
|
441
|
+
self.debugger.log_hook_fixes(hook_fixes)
|
|
442
|
+
|
|
443
|
+
def _log_debug_phase_completion(self, success: bool, fix_result: t.Any) -> None:
|
|
444
|
+
"""Log debug information for phase completion."""
|
|
445
|
+
if self._should_debug():
|
|
446
|
+
self.debugger.log_workflow_phase(
|
|
447
|
+
"ai_agent_fixing",
|
|
448
|
+
"completed" if success else "failed",
|
|
449
|
+
details={
|
|
450
|
+
"confidence": fix_result.confidence,
|
|
451
|
+
"fixes_applied": len(fix_result.fixes_applied),
|
|
452
|
+
"remaining_issues": len(fix_result.remaining_issues),
|
|
453
|
+
},
|
|
454
|
+
)
|
|
455
|
+
|
|
456
|
+
def _handle_fixing_phase_error(self, error: Exception) -> bool:
|
|
457
|
+
"""Handle errors during the fixing phase."""
|
|
458
|
+
self.logger.exception(f"AI agent fixing phase failed: {error}")
|
|
459
|
+
self.session.fail_task("ai_fixing", f"AI agent fixing failed: {error}")
|
|
460
|
+
self._update_mcp_status("ai_fixing", "failed")
|
|
461
|
+
|
|
462
|
+
if self._should_debug():
|
|
463
|
+
self.debugger.log_workflow_phase(
|
|
464
|
+
"ai_agent_fixing",
|
|
465
|
+
"failed",
|
|
466
|
+
details={"error": str(error)},
|
|
467
|
+
)
|
|
468
|
+
|
|
469
|
+
return False
|
|
470
|
+
|
|
471
|
+
async def _verify_fixes_applied(
|
|
472
|
+
self, options: OptionsProtocol, fix_result: t.Any
|
|
473
|
+
) -> bool:
|
|
474
|
+
"""Verify that AI agent fixes actually resolved the issues by re-running checks."""
|
|
475
|
+
if not fix_result.fixes_applied:
|
|
476
|
+
return True # No fixes were applied, nothing to verify
|
|
477
|
+
|
|
478
|
+
self.logger.info("Verifying AI agent fixes by re-running quality checks")
|
|
479
|
+
|
|
480
|
+
# Re-run the phases that previously failed to verify fixes
|
|
481
|
+
verification_success = True
|
|
482
|
+
|
|
483
|
+
# Check if we need to re-run tests
|
|
484
|
+
if any("test" in fix.lower() for fix in fix_result.fixes_applied):
|
|
485
|
+
self.logger.info("Re-running tests to verify test fixes")
|
|
486
|
+
test_success = self.phases.run_testing_phase(options)
|
|
487
|
+
if not test_success:
|
|
488
|
+
self.logger.warning(
|
|
489
|
+
"Test verification failed - test fixes did not work"
|
|
490
|
+
)
|
|
491
|
+
verification_success = False
|
|
492
|
+
|
|
493
|
+
# Check if we need to re-run comprehensive hooks
|
|
494
|
+
hook_fixes = [
|
|
495
|
+
f
|
|
496
|
+
for f in fix_result.fixes_applied
|
|
497
|
+
if "hook" not in f.lower()
|
|
498
|
+
or "complexity" in f.lower()
|
|
499
|
+
or "type" in f.lower()
|
|
500
|
+
]
|
|
501
|
+
if hook_fixes:
|
|
502
|
+
self.logger.info("Re-running comprehensive hooks to verify hook fixes")
|
|
503
|
+
hook_success = self.phases.run_comprehensive_hooks_only(options)
|
|
504
|
+
if not hook_success:
|
|
505
|
+
self.logger.warning(
|
|
506
|
+
"Hook verification failed - hook fixes did not work"
|
|
507
|
+
)
|
|
508
|
+
verification_success = False
|
|
509
|
+
|
|
510
|
+
if verification_success:
|
|
511
|
+
self.logger.info("All AI agent fixes verified successfully")
|
|
512
|
+
else:
|
|
513
|
+
self.logger.error(
|
|
514
|
+
"Verification failed - some fixes did not resolve the issues"
|
|
515
|
+
)
|
|
516
|
+
|
|
517
|
+
return verification_success
|
|
518
|
+
|
|
519
|
+
async def _collect_issues_from_failures(self) -> list[Issue]:
|
|
520
|
+
"""Collect issues from test and comprehensive hook failures."""
|
|
521
|
+
issues: list[Issue] = []
|
|
522
|
+
|
|
523
|
+
test_issues, test_count = self._collect_test_failure_issues()
|
|
524
|
+
hook_issues, hook_count = self._collect_hook_failure_issues()
|
|
525
|
+
|
|
526
|
+
issues.extend(test_issues)
|
|
527
|
+
issues.extend(hook_issues)
|
|
528
|
+
|
|
529
|
+
self._log_failure_counts_if_debugging(test_count, hook_count)
|
|
530
|
+
|
|
531
|
+
return issues
|
|
532
|
+
|
|
533
|
+
def _collect_test_failure_issues(self) -> tuple[list[Issue], int]:
|
|
534
|
+
"""Collect test failure issues and return count."""
|
|
535
|
+
issues: list[Issue] = []
|
|
536
|
+
test_count = 0
|
|
537
|
+
|
|
538
|
+
if hasattr(self.phases, "test_manager") and hasattr(
|
|
539
|
+
self.phases.test_manager,
|
|
540
|
+
"get_test_failures",
|
|
541
|
+
):
|
|
542
|
+
test_failures = self.phases.test_manager.get_test_failures()
|
|
543
|
+
test_count = len(test_failures)
|
|
544
|
+
for i, failure in enumerate(
|
|
545
|
+
test_failures[:20],
|
|
546
|
+
): # Limit to prevent overload
|
|
547
|
+
issue = Issue(
|
|
548
|
+
id=f"test_failure_{i}",
|
|
549
|
+
type=IssueType.TEST_FAILURE,
|
|
550
|
+
severity=Priority.HIGH,
|
|
551
|
+
message=failure.strip(),
|
|
552
|
+
stage="tests",
|
|
553
|
+
)
|
|
554
|
+
issues.append(issue)
|
|
555
|
+
|
|
556
|
+
return issues, test_count
|
|
557
|
+
|
|
558
|
+
def _collect_hook_failure_issues(self) -> tuple[list[Issue], int]:
|
|
559
|
+
"""Collect hook failure issues and return count."""
|
|
560
|
+
issues: list[Issue] = []
|
|
561
|
+
hook_count = 0
|
|
562
|
+
|
|
563
|
+
if not self.session.session_tracker:
|
|
564
|
+
return issues, hook_count
|
|
565
|
+
|
|
566
|
+
for task_id, task_data in self.session.session_tracker.tasks.items():
|
|
567
|
+
if self._is_failed_hook_task(task_data, task_id):
|
|
568
|
+
hook_count += 1
|
|
569
|
+
hook_issues = self._process_hook_failure(task_id, task_data)
|
|
570
|
+
issues.extend(hook_issues)
|
|
571
|
+
|
|
572
|
+
return issues, hook_count
|
|
573
|
+
|
|
574
|
+
def _is_failed_hook_task(self, task_data: t.Any, task_id: str) -> bool:
|
|
575
|
+
"""Check if a task is a failed hook task."""
|
|
576
|
+
return task_data.status == "failed" and task_id in (
|
|
577
|
+
"fast_hooks",
|
|
578
|
+
"comprehensive_hooks",
|
|
579
|
+
)
|
|
580
|
+
|
|
581
|
+
def _process_hook_failure(self, task_id: str, task_data: t.Any) -> list[Issue]:
|
|
582
|
+
"""Process a single hook failure and return corresponding issues."""
|
|
583
|
+
error_msg = getattr(task_data, "error_message", "Unknown error")
|
|
584
|
+
specific_issues = self._parse_hook_error_details(task_id, error_msg)
|
|
585
|
+
|
|
586
|
+
if specific_issues:
|
|
587
|
+
return specific_issues
|
|
588
|
+
|
|
589
|
+
return [self._create_generic_hook_issue(task_id, error_msg)]
|
|
590
|
+
|
|
591
|
+
def _create_generic_hook_issue(self, task_id: str, error_msg: str) -> Issue:
|
|
592
|
+
"""Create a generic issue for unspecific hook failures."""
|
|
593
|
+
issue_type = IssueType.FORMATTING if "fast" in task_id else IssueType.TYPE_ERROR
|
|
594
|
+
return Issue(
|
|
595
|
+
id=f"hook_failure_{task_id}",
|
|
596
|
+
type=issue_type,
|
|
597
|
+
severity=Priority.MEDIUM,
|
|
598
|
+
message=error_msg,
|
|
599
|
+
stage=task_id.replace("_hooks", ""),
|
|
600
|
+
)
|
|
601
|
+
|
|
602
|
+
def _parse_hook_error_details(self, task_id: str, error_msg: str) -> list[Issue]:
|
|
603
|
+
"""Parse specific hook failure details to create targeted issues."""
|
|
604
|
+
issues: list[Issue] = []
|
|
605
|
+
|
|
606
|
+
# For comprehensive hooks, parse specific tool failures
|
|
607
|
+
if task_id == "comprehensive_hooks":
|
|
608
|
+
# Check for complexipy failures (complexity violations)
|
|
609
|
+
if "complexipy" in error_msg.lower():
|
|
610
|
+
issues.append(
|
|
611
|
+
Issue(
|
|
612
|
+
id="complexipy_violation",
|
|
613
|
+
type=IssueType.COMPLEXITY,
|
|
614
|
+
severity=Priority.HIGH,
|
|
615
|
+
message="Code complexity violation detected by complexipy",
|
|
616
|
+
stage="comprehensive",
|
|
617
|
+
)
|
|
618
|
+
)
|
|
619
|
+
|
|
620
|
+
# Check for pyright failures (type errors)
|
|
621
|
+
if "pyright" in error_msg.lower():
|
|
622
|
+
issues.append(
|
|
623
|
+
Issue(
|
|
624
|
+
id="pyright_type_error",
|
|
625
|
+
type=IssueType.TYPE_ERROR,
|
|
626
|
+
severity=Priority.HIGH,
|
|
627
|
+
message="Type checking errors detected by pyright",
|
|
628
|
+
stage="comprehensive",
|
|
629
|
+
)
|
|
630
|
+
)
|
|
631
|
+
|
|
632
|
+
# Check for bandit failures (security issues)
|
|
633
|
+
if "bandit" in error_msg.lower():
|
|
634
|
+
issues.append(
|
|
635
|
+
Issue(
|
|
636
|
+
id="bandit_security_issue",
|
|
637
|
+
type=IssueType.SECURITY,
|
|
638
|
+
severity=Priority.HIGH,
|
|
639
|
+
message="Security vulnerabilities detected by bandit",
|
|
640
|
+
stage="comprehensive",
|
|
641
|
+
)
|
|
642
|
+
)
|
|
643
|
+
|
|
644
|
+
# Check for refurb failures (code quality issues)
|
|
645
|
+
if "refurb" in error_msg.lower():
|
|
646
|
+
issues.append(
|
|
647
|
+
Issue(
|
|
648
|
+
id="refurb_quality_issue",
|
|
649
|
+
type=IssueType.PERFORMANCE, # Use PERFORMANCE as closest match for refurb issues
|
|
650
|
+
severity=Priority.MEDIUM,
|
|
651
|
+
message="Code quality issues detected by refurb",
|
|
652
|
+
stage="comprehensive",
|
|
653
|
+
)
|
|
654
|
+
)
|
|
655
|
+
|
|
656
|
+
# Check for vulture failures (dead code)
|
|
657
|
+
if "vulture" in error_msg.lower():
|
|
658
|
+
issues.append(
|
|
659
|
+
Issue(
|
|
660
|
+
id="vulture_dead_code",
|
|
661
|
+
type=IssueType.DEAD_CODE,
|
|
662
|
+
severity=Priority.MEDIUM,
|
|
663
|
+
message="Dead code detected by vulture",
|
|
664
|
+
stage="comprehensive",
|
|
665
|
+
)
|
|
666
|
+
)
|
|
667
|
+
|
|
668
|
+
elif task_id == "fast_hooks":
|
|
669
|
+
# Fast hooks are typically formatting issues
|
|
670
|
+
issues.append(
|
|
671
|
+
Issue(
|
|
672
|
+
id="fast_hooks_formatting",
|
|
673
|
+
type=IssueType.FORMATTING,
|
|
674
|
+
severity=Priority.LOW,
|
|
675
|
+
message="Code formatting issues detected",
|
|
676
|
+
stage="fast",
|
|
677
|
+
)
|
|
678
|
+
)
|
|
679
|
+
|
|
680
|
+
return issues
|
|
681
|
+
|
|
682
|
+
def _log_failure_counts_if_debugging(
|
|
683
|
+
self, test_count: int, hook_count: int
|
|
684
|
+
) -> None:
|
|
685
|
+
"""Log failure counts if debugging is enabled."""
|
|
686
|
+
if self._should_debug():
|
|
687
|
+
self.debugger.log_test_failures(test_count)
|
|
688
|
+
self.debugger.log_hook_failures(hook_count)
|
|
689
|
+
|
|
690
|
+
|
|
691
|
+
class WorkflowOrchestrator:
|
|
692
|
+
def __init__(
|
|
693
|
+
self,
|
|
694
|
+
console: Console | None = None,
|
|
695
|
+
pkg_path: Path | None = None,
|
|
696
|
+
dry_run: bool = False,
|
|
697
|
+
web_job_id: str | None = None,
|
|
698
|
+
verbose: bool = False,
|
|
699
|
+
) -> None:
|
|
700
|
+
self.console = console or Console(force_terminal=True)
|
|
701
|
+
self.pkg_path = pkg_path or Path.cwd()
|
|
702
|
+
self.dry_run = dry_run
|
|
703
|
+
self.web_job_id = web_job_id
|
|
704
|
+
self.verbose = verbose
|
|
705
|
+
|
|
706
|
+
from crackerjack.models.protocols import (
|
|
707
|
+
FileSystemInterface,
|
|
708
|
+
GitInterface,
|
|
709
|
+
HookManager,
|
|
710
|
+
PublishManager,
|
|
711
|
+
TestManagerProtocol,
|
|
712
|
+
)
|
|
713
|
+
|
|
714
|
+
from .container import create_container
|
|
715
|
+
|
|
716
|
+
self.container = create_container(
|
|
717
|
+
console=self.console,
|
|
718
|
+
pkg_path=self.pkg_path,
|
|
719
|
+
dry_run=self.dry_run,
|
|
720
|
+
verbose=self.verbose,
|
|
721
|
+
)
|
|
722
|
+
|
|
723
|
+
self.session = SessionCoordinator(self.console, self.pkg_path, self.web_job_id)
|
|
724
|
+
self.phases = PhaseCoordinator(
|
|
725
|
+
console=self.console,
|
|
726
|
+
pkg_path=self.pkg_path,
|
|
727
|
+
session=self.session,
|
|
728
|
+
filesystem=self.container.get(FileSystemInterface),
|
|
729
|
+
git_service=self.container.get(GitInterface),
|
|
730
|
+
hook_manager=self.container.get(HookManager),
|
|
731
|
+
test_manager=self.container.get(TestManagerProtocol),
|
|
732
|
+
publish_manager=self.container.get(PublishManager),
|
|
733
|
+
)
|
|
734
|
+
|
|
735
|
+
self.pipeline = WorkflowPipeline(
|
|
736
|
+
console=self.console,
|
|
737
|
+
pkg_path=self.pkg_path,
|
|
738
|
+
session=self.session,
|
|
739
|
+
phases=self.phases,
|
|
740
|
+
)
|
|
741
|
+
|
|
742
|
+
self.logger = get_logger("crackerjack.orchestrator")
|
|
743
|
+
|
|
744
|
+
self._initialize_logging()
|
|
745
|
+
|
|
746
|
+
def _initialize_logging(self) -> None:
|
|
747
|
+
from crackerjack.services.log_manager import get_log_manager
|
|
748
|
+
|
|
749
|
+
log_manager = get_log_manager()
|
|
750
|
+
session_id = getattr(self, "web_job_id", None) or str(int(time.time()))[:8]
|
|
751
|
+
debug_log_file = log_manager.create_debug_log_file(session_id)
|
|
752
|
+
|
|
753
|
+
setup_structured_logging(log_file=debug_log_file)
|
|
754
|
+
|
|
755
|
+
self.logger.info(
|
|
756
|
+
"Structured logging initialized",
|
|
757
|
+
log_file=str(debug_log_file),
|
|
758
|
+
log_directory=str(log_manager.log_dir),
|
|
759
|
+
package_path=str(self.pkg_path),
|
|
760
|
+
dry_run=self.dry_run,
|
|
761
|
+
)
|
|
762
|
+
|
|
763
|
+
def _initialize_session_tracking(self, options: OptionsProtocol) -> None:
|
|
764
|
+
self.session.initialize_session_tracking(options)
|
|
765
|
+
|
|
766
|
+
def _track_task(self, task_id: str, task_name: str) -> None:
|
|
767
|
+
self.session.track_task(task_id, task_name)
|
|
768
|
+
|
|
769
|
+
def _complete_task(self, task_id: str, details: str | None = None) -> None:
|
|
770
|
+
self.session.complete_task(task_id, details)
|
|
771
|
+
|
|
772
|
+
def _fail_task(self, task_id: str, error: str) -> None:
|
|
773
|
+
self.session.fail_task(task_id, error)
|
|
774
|
+
|
|
775
|
+
def run_cleaning_phase(self, options: OptionsProtocol) -> bool:
|
|
776
|
+
return self.phases.run_cleaning_phase(options)
|
|
777
|
+
|
|
778
|
+
def run_fast_hooks_only(self, options: OptionsProtocol) -> bool:
|
|
779
|
+
return self.phases.run_fast_hooks_only(options)
|
|
780
|
+
|
|
781
|
+
def run_comprehensive_hooks_only(self, options: OptionsProtocol) -> bool:
|
|
782
|
+
return self.phases.run_comprehensive_hooks_only(options)
|
|
783
|
+
|
|
784
|
+
def run_hooks_phase(self, options: OptionsProtocol) -> bool:
|
|
785
|
+
return self.phases.run_hooks_phase(options)
|
|
786
|
+
|
|
787
|
+
def run_testing_phase(self, options: OptionsProtocol) -> bool:
|
|
788
|
+
return self.phases.run_testing_phase(options)
|
|
789
|
+
|
|
790
|
+
def run_publishing_phase(self, options: OptionsProtocol) -> bool:
|
|
791
|
+
return self.phases.run_publishing_phase(options)
|
|
792
|
+
|
|
793
|
+
def run_commit_phase(self, options: OptionsProtocol) -> bool:
|
|
794
|
+
return self.phases.run_commit_phase(options)
|
|
795
|
+
|
|
796
|
+
def run_configuration_phase(self, options: OptionsProtocol) -> bool:
|
|
797
|
+
return self.phases.run_configuration_phase(options)
|
|
798
|
+
|
|
799
|
+
async def run_complete_workflow(self, options: OptionsProtocol) -> bool:
|
|
800
|
+
return await self.pipeline.run_complete_workflow(options)
|
|
801
|
+
|
|
802
|
+
def _cleanup_resources(self) -> None:
|
|
803
|
+
self.session.cleanup_resources()
|
|
804
|
+
|
|
805
|
+
def _register_cleanup(self, cleanup_handler: t.Callable[[], None]) -> None:
|
|
806
|
+
self.session.register_cleanup(cleanup_handler)
|
|
807
|
+
|
|
808
|
+
def _track_lock_file(self, lock_file_path: Path) -> None:
|
|
809
|
+
self.session.track_lock_file(lock_file_path)
|
|
810
|
+
|
|
811
|
+
def _get_version(self) -> str:
|
|
812
|
+
try:
|
|
813
|
+
return version()
|
|
814
|
+
except Exception:
|
|
815
|
+
return "unknown"
|
|
816
|
+
|
|
817
|
+
async def process(self, options: OptionsProtocol) -> bool:
|
|
818
|
+
self.session.start_session("process_workflow")
|
|
819
|
+
|
|
820
|
+
try:
|
|
821
|
+
result = await self.run_complete_workflow(options)
|
|
822
|
+
self.session.end_session(success=result)
|
|
823
|
+
return result
|
|
824
|
+
except Exception:
|
|
825
|
+
self.session.end_session(success=False)
|
|
826
|
+
return False
|