crackerjack 0.30.3__py3-none-any.whl → 0.31.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +1005 -0
- crackerjack/RULES.md +380 -0
- crackerjack/__init__.py +42 -13
- crackerjack/__main__.py +225 -299
- crackerjack/agents/__init__.py +41 -0
- crackerjack/agents/architect_agent.py +281 -0
- crackerjack/agents/base.py +169 -0
- crackerjack/agents/coordinator.py +512 -0
- crackerjack/agents/documentation_agent.py +498 -0
- crackerjack/agents/dry_agent.py +388 -0
- crackerjack/agents/formatting_agent.py +245 -0
- crackerjack/agents/import_optimization_agent.py +281 -0
- crackerjack/agents/performance_agent.py +669 -0
- crackerjack/agents/proactive_agent.py +104 -0
- crackerjack/agents/refactoring_agent.py +788 -0
- crackerjack/agents/security_agent.py +529 -0
- crackerjack/agents/test_creation_agent.py +652 -0
- crackerjack/agents/test_specialist_agent.py +486 -0
- crackerjack/agents/tracker.py +212 -0
- crackerjack/api.py +560 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/facade.py +104 -0
- crackerjack/cli/handlers.py +267 -0
- crackerjack/cli/interactive.py +471 -0
- crackerjack/cli/options.py +401 -0
- crackerjack/cli/utils.py +18 -0
- crackerjack/code_cleaner.py +618 -928
- crackerjack/config/__init__.py +19 -0
- crackerjack/config/hooks.py +218 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +406 -0
- crackerjack/core/autofix_coordinator.py +200 -0
- crackerjack/core/container.py +104 -0
- crackerjack/core/enhanced_container.py +542 -0
- crackerjack/core/performance.py +243 -0
- crackerjack/core/phase_coordinator.py +561 -0
- crackerjack/core/proactive_workflow.py +316 -0
- crackerjack/core/session_coordinator.py +289 -0
- crackerjack/core/workflow_orchestrator.py +640 -0
- crackerjack/dynamic_config.py +94 -103
- crackerjack/errors.py +263 -41
- crackerjack/executors/__init__.py +11 -0
- crackerjack/executors/async_hook_executor.py +431 -0
- crackerjack/executors/cached_hook_executor.py +242 -0
- crackerjack/executors/hook_executor.py +345 -0
- crackerjack/executors/individual_hook_executor.py +669 -0
- crackerjack/intelligence/__init__.py +44 -0
- crackerjack/intelligence/adaptive_learning.py +751 -0
- crackerjack/intelligence/agent_orchestrator.py +551 -0
- crackerjack/intelligence/agent_registry.py +414 -0
- crackerjack/intelligence/agent_selector.py +502 -0
- crackerjack/intelligence/integration.py +290 -0
- crackerjack/interactive.py +576 -315
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +137 -0
- crackerjack/managers/publish_manager.py +411 -0
- crackerjack/managers/test_command_builder.py +151 -0
- crackerjack/managers/test_executor.py +435 -0
- crackerjack/managers/test_manager.py +258 -0
- crackerjack/managers/test_manager_backup.py +1124 -0
- crackerjack/managers/test_progress.py +144 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +336 -0
- crackerjack/mcp/client_runner.py +104 -0
- crackerjack/mcp/context.py +615 -0
- crackerjack/mcp/dashboard.py +636 -0
- crackerjack/mcp/enhanced_progress_monitor.py +479 -0
- crackerjack/mcp/file_monitor.py +336 -0
- crackerjack/mcp/progress_components.py +569 -0
- crackerjack/mcp/progress_monitor.py +949 -0
- crackerjack/mcp/rate_limiter.py +332 -0
- crackerjack/mcp/server.py +22 -0
- crackerjack/mcp/server_core.py +244 -0
- crackerjack/mcp/service_watchdog.py +501 -0
- crackerjack/mcp/state.py +395 -0
- crackerjack/mcp/task_manager.py +257 -0
- crackerjack/mcp/tools/__init__.py +17 -0
- crackerjack/mcp/tools/core_tools.py +249 -0
- crackerjack/mcp/tools/error_analyzer.py +308 -0
- crackerjack/mcp/tools/execution_tools.py +370 -0
- crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
- crackerjack/mcp/tools/intelligence_tools.py +314 -0
- crackerjack/mcp/tools/monitoring_tools.py +502 -0
- crackerjack/mcp/tools/proactive_tools.py +384 -0
- crackerjack/mcp/tools/progress_tools.py +141 -0
- crackerjack/mcp/tools/utility_tools.py +341 -0
- crackerjack/mcp/tools/workflow_executor.py +360 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +39 -0
- crackerjack/mcp/websocket/endpoints.py +559 -0
- crackerjack/mcp/websocket/jobs.py +253 -0
- crackerjack/mcp/websocket/server.py +116 -0
- crackerjack/mcp/websocket/websocket_handler.py +78 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/__init__.py +31 -0
- crackerjack/models/config.py +93 -0
- crackerjack/models/config_adapter.py +230 -0
- crackerjack/models/protocols.py +118 -0
- crackerjack/models/task.py +154 -0
- crackerjack/monitoring/ai_agent_watchdog.py +450 -0
- crackerjack/monitoring/regression_prevention.py +638 -0
- crackerjack/orchestration/__init__.py +0 -0
- crackerjack/orchestration/advanced_orchestrator.py +970 -0
- crackerjack/orchestration/execution_strategies.py +341 -0
- crackerjack/orchestration/test_progress_streamer.py +636 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +246 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +259 -0
- crackerjack/py313.py +8 -3
- crackerjack/services/__init__.py +22 -0
- crackerjack/services/cache.py +314 -0
- crackerjack/services/config.py +347 -0
- crackerjack/services/config_integrity.py +99 -0
- crackerjack/services/contextual_ai_assistant.py +516 -0
- crackerjack/services/coverage_ratchet.py +347 -0
- crackerjack/services/debug.py +736 -0
- crackerjack/services/dependency_monitor.py +617 -0
- crackerjack/services/enhanced_filesystem.py +439 -0
- crackerjack/services/file_hasher.py +151 -0
- crackerjack/services/filesystem.py +395 -0
- crackerjack/services/git.py +165 -0
- crackerjack/services/health_metrics.py +611 -0
- crackerjack/services/initialization.py +847 -0
- crackerjack/services/log_manager.py +286 -0
- crackerjack/services/logging.py +174 -0
- crackerjack/services/metrics.py +578 -0
- crackerjack/services/pattern_cache.py +362 -0
- crackerjack/services/pattern_detector.py +515 -0
- crackerjack/services/performance_benchmarks.py +653 -0
- crackerjack/services/security.py +163 -0
- crackerjack/services/server_manager.py +234 -0
- crackerjack/services/smart_scheduling.py +144 -0
- crackerjack/services/tool_version_service.py +61 -0
- crackerjack/services/unified_config.py +437 -0
- crackerjack/services/version_checker.py +248 -0
- crackerjack/slash_commands/__init__.py +14 -0
- crackerjack/slash_commands/init.md +122 -0
- crackerjack/slash_commands/run.md +163 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack-0.31.4.dist-info/METADATA +742 -0
- crackerjack-0.31.4.dist-info/RECORD +148 -0
- crackerjack-0.31.4.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -34
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/crackerjack.py +0 -3805
- crackerjack/pyproject.toml +0 -286
- crackerjack-0.30.3.dist-info/METADATA +0 -1290
- crackerjack-0.30.3.dist-info/RECORD +0 -16
- {crackerjack-0.30.3.dist-info → crackerjack-0.31.4.dist-info}/WHEEL +0 -0
- {crackerjack-0.30.3.dist-info → crackerjack-0.31.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,316 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import time
|
|
3
|
+
import typing as t
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
from ..agents.base import AgentContext, Issue, IssueType, Priority
|
|
7
|
+
from ..agents.coordinator import AgentCoordinator
|
|
8
|
+
from ..models.protocols import OptionsProtocol
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ProactiveWorkflowPipeline:
|
|
12
|
+
"""Enhanced workflow pipeline with proactive architectural planning.
|
|
13
|
+
|
|
14
|
+
This pipeline adds a planning phase before each iteration to prevent
|
|
15
|
+
issues through intelligent architecture rather than reactive fixing.
|
|
16
|
+
"""
|
|
17
|
+
|
|
18
|
+
def __init__(self, project_path: Path) -> None:
|
|
19
|
+
self.project_path = project_path
|
|
20
|
+
self.logger = logging.getLogger(__name__)
|
|
21
|
+
self._architect_agent_coordinator: AgentCoordinator | None = None
|
|
22
|
+
|
|
23
|
+
async def run_complete_workflow_with_planning(
|
|
24
|
+
self, options: OptionsProtocol
|
|
25
|
+
) -> bool:
|
|
26
|
+
"""Execute workflow with proactive planning phases."""
|
|
27
|
+
self.logger.info("Starting proactive workflow with architectural planning")
|
|
28
|
+
|
|
29
|
+
start_time = time.time()
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
# Phase 1: Initial architectural assessment
|
|
33
|
+
assessment = await self._assess_codebase_architecture()
|
|
34
|
+
|
|
35
|
+
if assessment.needs_planning:
|
|
36
|
+
self.logger.info("Codebase requires architectural planning")
|
|
37
|
+
|
|
38
|
+
# Phase 2: Create comprehensive architectural plan
|
|
39
|
+
architectural_plan = await self._create_comprehensive_plan(assessment)
|
|
40
|
+
|
|
41
|
+
# Phase 3: Execute workflow following the plan
|
|
42
|
+
result = await self._execute_planned_workflow(
|
|
43
|
+
options, architectural_plan
|
|
44
|
+
)
|
|
45
|
+
else:
|
|
46
|
+
self.logger.info(
|
|
47
|
+
"Codebase is architecturally sound, using standard workflow"
|
|
48
|
+
)
|
|
49
|
+
# Fallback to standard workflow for simple fixes
|
|
50
|
+
result = await self._execute_standard_workflow(options)
|
|
51
|
+
|
|
52
|
+
execution_time = time.time() - start_time
|
|
53
|
+
self.logger.info(f"Proactive workflow completed in {execution_time:.2f}s")
|
|
54
|
+
|
|
55
|
+
return result
|
|
56
|
+
|
|
57
|
+
except Exception as e:
|
|
58
|
+
self.logger.exception(f"Proactive workflow failed: {e}")
|
|
59
|
+
# Fallback to standard workflow on planning failure
|
|
60
|
+
return await self._execute_standard_workflow(options)
|
|
61
|
+
|
|
62
|
+
async def _assess_codebase_architecture(self) -> "ArchitecturalAssessment":
|
|
63
|
+
"""Assess the codebase to determine if proactive planning is needed."""
|
|
64
|
+
self.logger.info("Assessing codebase architecture...")
|
|
65
|
+
|
|
66
|
+
# Initialize architect coordinator if needed
|
|
67
|
+
if not self._architect_agent_coordinator:
|
|
68
|
+
agent_context = AgentContext(project_path=self.project_path)
|
|
69
|
+
self._architect_agent_coordinator = AgentCoordinator(agent_context)
|
|
70
|
+
self._architect_agent_coordinator.initialize_agents()
|
|
71
|
+
|
|
72
|
+
# Create test issues to assess complexity
|
|
73
|
+
test_issues = await self._identify_potential_issues()
|
|
74
|
+
|
|
75
|
+
# Determine if planning is beneficial
|
|
76
|
+
needs_planning = self._evaluate_planning_need(test_issues)
|
|
77
|
+
|
|
78
|
+
return ArchitecturalAssessment(
|
|
79
|
+
needs_planning=needs_planning,
|
|
80
|
+
complexity_score=len(test_issues),
|
|
81
|
+
potential_issues=test_issues,
|
|
82
|
+
recommended_strategy="proactive" if needs_planning else "standard",
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
async def _identify_potential_issues(self) -> list[Issue]:
|
|
86
|
+
"""Identify potential architectural issues in the codebase."""
|
|
87
|
+
# This would integrate with static analysis tools
|
|
88
|
+
# For now, create representative issues based on common patterns
|
|
89
|
+
|
|
90
|
+
potential_issues = []
|
|
91
|
+
|
|
92
|
+
# Check for complexity hotspots (would use real analysis)
|
|
93
|
+
potential_issues.extend(
|
|
94
|
+
(
|
|
95
|
+
Issue(
|
|
96
|
+
id="arch_assessment_complexity",
|
|
97
|
+
type=IssueType.COMPLEXITY,
|
|
98
|
+
severity=Priority.HIGH,
|
|
99
|
+
message="Potential complexity hotspots detected",
|
|
100
|
+
file_path=str(self.project_path),
|
|
101
|
+
details=["Multiple functions may exceed complexity threshold"],
|
|
102
|
+
),
|
|
103
|
+
Issue(
|
|
104
|
+
id="arch_assessment_dry",
|
|
105
|
+
type=IssueType.DRY_VIOLATION,
|
|
106
|
+
severity=Priority.MEDIUM,
|
|
107
|
+
message="Potential code duplication patterns",
|
|
108
|
+
file_path=str(self.project_path),
|
|
109
|
+
details=["Similar patterns may exist across modules"],
|
|
110
|
+
),
|
|
111
|
+
)
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
return potential_issues
|
|
115
|
+
|
|
116
|
+
def _evaluate_planning_need(self, issues: list[Issue]) -> bool:
|
|
117
|
+
"""Evaluate if proactive planning would be beneficial."""
|
|
118
|
+
# Planning is beneficial for:
|
|
119
|
+
# 1. Multiple complex issues
|
|
120
|
+
# 2. Architectural issues (complexity, DRY, performance)
|
|
121
|
+
# 3. Projects with many interdependencies
|
|
122
|
+
|
|
123
|
+
complex_issues = [
|
|
124
|
+
issue
|
|
125
|
+
for issue in issues
|
|
126
|
+
if issue.type
|
|
127
|
+
in {IssueType.COMPLEXITY, IssueType.DRY_VIOLATION, IssueType.PERFORMANCE}
|
|
128
|
+
]
|
|
129
|
+
|
|
130
|
+
# Need planning if we have 2+ complex issues
|
|
131
|
+
return len(complex_issues) >= 2
|
|
132
|
+
|
|
133
|
+
async def _create_comprehensive_plan(
|
|
134
|
+
self, assessment: "ArchitecturalAssessment"
|
|
135
|
+
) -> dict[str, t.Any]:
|
|
136
|
+
"""Create comprehensive architectural plan based on assessment."""
|
|
137
|
+
self.logger.info("Creating comprehensive architectural plan...")
|
|
138
|
+
|
|
139
|
+
assert self._architect_agent_coordinator is not None
|
|
140
|
+
|
|
141
|
+
# Use ArchitectAgent to create the plan
|
|
142
|
+
architect = self._architect_agent_coordinator._get_architect_agent()
|
|
143
|
+
|
|
144
|
+
if not architect:
|
|
145
|
+
self.logger.warning("No ArchitectAgent available, creating basic plan")
|
|
146
|
+
return {
|
|
147
|
+
"strategy": "basic_reactive",
|
|
148
|
+
"phases": ["standard_workflow"],
|
|
149
|
+
"patterns": ["default"],
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
# Create plan for the most complex issue as representative
|
|
153
|
+
complex_issues = [
|
|
154
|
+
issue
|
|
155
|
+
for issue in assessment.potential_issues
|
|
156
|
+
if issue.type in {IssueType.COMPLEXITY, IssueType.DRY_VIOLATION}
|
|
157
|
+
]
|
|
158
|
+
|
|
159
|
+
if complex_issues:
|
|
160
|
+
primary_issue = complex_issues[0]
|
|
161
|
+
base_plan = await architect.plan_before_action(primary_issue)
|
|
162
|
+
|
|
163
|
+
# Extend to comprehensive workflow plan
|
|
164
|
+
comprehensive_plan = base_plan | {
|
|
165
|
+
"phases": [
|
|
166
|
+
"configuration_setup",
|
|
167
|
+
"fast_hooks_with_architecture",
|
|
168
|
+
"architectural_refactoring",
|
|
169
|
+
"comprehensive_validation",
|
|
170
|
+
"pattern_learning",
|
|
171
|
+
],
|
|
172
|
+
"integration_points": [
|
|
173
|
+
"architect_guided_fixing",
|
|
174
|
+
"pattern_caching",
|
|
175
|
+
"validation_against_plan",
|
|
176
|
+
],
|
|
177
|
+
}
|
|
178
|
+
else:
|
|
179
|
+
comprehensive_plan = {
|
|
180
|
+
"strategy": "lightweight_proactive",
|
|
181
|
+
"phases": ["standard_workflow_enhanced"],
|
|
182
|
+
"patterns": ["cached_patterns"],
|
|
183
|
+
}
|
|
184
|
+
|
|
185
|
+
self.logger.info(
|
|
186
|
+
f"Created plan with strategy: {comprehensive_plan.get('strategy')}"
|
|
187
|
+
)
|
|
188
|
+
return comprehensive_plan
|
|
189
|
+
|
|
190
|
+
async def _execute_planned_workflow(
|
|
191
|
+
self, options: OptionsProtocol, plan: dict[str, t.Any]
|
|
192
|
+
) -> bool:
|
|
193
|
+
"""Execute workflow following the architectural plan."""
|
|
194
|
+
strategy = plan.get("strategy", "basic_reactive")
|
|
195
|
+
phases = plan.get("phases", ["standard_workflow"])
|
|
196
|
+
|
|
197
|
+
self.logger.info(f"Executing {strategy} workflow with {len(phases)} phases")
|
|
198
|
+
|
|
199
|
+
# Execute each phase according to the plan
|
|
200
|
+
for phase in phases:
|
|
201
|
+
success = await self._execute_workflow_phase(phase, options, plan)
|
|
202
|
+
if not success and phase in (
|
|
203
|
+
"configuration_setup",
|
|
204
|
+
"architectural_refactoring",
|
|
205
|
+
):
|
|
206
|
+
# Critical phases - fail fast
|
|
207
|
+
self.logger.error(f"Critical phase {phase} failed")
|
|
208
|
+
return False
|
|
209
|
+
elif not success:
|
|
210
|
+
# Non-critical phases - log and continue
|
|
211
|
+
self.logger.warning(f"Phase {phase} had issues but continuing")
|
|
212
|
+
|
|
213
|
+
return True
|
|
214
|
+
|
|
215
|
+
async def _execute_workflow_phase(
|
|
216
|
+
self, phase: str, options: OptionsProtocol, plan: dict[str, t.Any]
|
|
217
|
+
) -> bool:
|
|
218
|
+
"""Execute a specific workflow phase."""
|
|
219
|
+
self.logger.info(f"Executing phase: {phase}")
|
|
220
|
+
|
|
221
|
+
# Different phase implementations
|
|
222
|
+
if phase == "configuration_setup":
|
|
223
|
+
return await self._setup_with_architecture(options, plan)
|
|
224
|
+
elif phase == "fast_hooks_with_architecture":
|
|
225
|
+
return await self._run_fast_hooks_with_planning(options, plan)
|
|
226
|
+
elif phase == "architectural_refactoring":
|
|
227
|
+
return await self._perform_architectural_refactoring(options, plan)
|
|
228
|
+
elif phase == "comprehensive_validation":
|
|
229
|
+
return await self._comprehensive_validation(options, plan)
|
|
230
|
+
elif phase == "pattern_learning":
|
|
231
|
+
return await self._learn_and_cache_patterns(plan)
|
|
232
|
+
# Fallback to standard workflow for unknown phases
|
|
233
|
+
return await self._execute_standard_workflow(options)
|
|
234
|
+
|
|
235
|
+
async def _setup_with_architecture(
|
|
236
|
+
self, options: OptionsProtocol, plan: dict[str, t.Any]
|
|
237
|
+
) -> bool:
|
|
238
|
+
"""Setup phase with architectural considerations."""
|
|
239
|
+
self.logger.info("Setting up project with architectural planning")
|
|
240
|
+
# This would integrate with existing setup logic
|
|
241
|
+
# For now, return success as architecture is already integrated
|
|
242
|
+
return True
|
|
243
|
+
|
|
244
|
+
async def _run_fast_hooks_with_planning(
|
|
245
|
+
self, options: OptionsProtocol, plan: dict[str, t.Any]
|
|
246
|
+
) -> bool:
|
|
247
|
+
"""Run fast hooks with architectural awareness."""
|
|
248
|
+
self.logger.info("Running fast hooks with architectural planning")
|
|
249
|
+
# This would integrate with existing hook manager
|
|
250
|
+
# Enhanced to use architectural patterns from the plan
|
|
251
|
+
return True
|
|
252
|
+
|
|
253
|
+
async def _perform_architectural_refactoring(
|
|
254
|
+
self, options: OptionsProtocol, plan: dict[str, t.Any]
|
|
255
|
+
) -> bool:
|
|
256
|
+
"""Perform refactoring following architectural plan."""
|
|
257
|
+
self.logger.info("Performing architectural refactoring")
|
|
258
|
+
|
|
259
|
+
# Use ArchitectAgent to guide refactoring
|
|
260
|
+
if self._architect_agent_coordinator:
|
|
261
|
+
architect = self._architect_agent_coordinator._get_architect_agent()
|
|
262
|
+
if architect:
|
|
263
|
+
# This would apply the architectural patterns
|
|
264
|
+
patterns = plan.get("patterns", [])
|
|
265
|
+
self.logger.info(f"Applying architectural patterns: {patterns}")
|
|
266
|
+
return True
|
|
267
|
+
|
|
268
|
+
return True
|
|
269
|
+
|
|
270
|
+
async def _comprehensive_validation(
|
|
271
|
+
self, options: OptionsProtocol, plan: dict[str, t.Any]
|
|
272
|
+
) -> bool:
|
|
273
|
+
"""Validate results against architectural plan."""
|
|
274
|
+
self.logger.info("Performing comprehensive validation")
|
|
275
|
+
validation_steps = plan.get("validation", [])
|
|
276
|
+
|
|
277
|
+
for step in validation_steps:
|
|
278
|
+
self.logger.info(f"Validating: {step}")
|
|
279
|
+
# Implement specific validation logic
|
|
280
|
+
|
|
281
|
+
return True
|
|
282
|
+
|
|
283
|
+
async def _learn_and_cache_patterns(self, plan: dict[str, t.Any]) -> bool:
|
|
284
|
+
"""Learn from successful patterns and cache them."""
|
|
285
|
+
self.logger.info("Learning and caching successful patterns")
|
|
286
|
+
|
|
287
|
+
# Cache successful patterns from the plan
|
|
288
|
+
if self._architect_agent_coordinator:
|
|
289
|
+
architect = self._architect_agent_coordinator._get_architect_agent()
|
|
290
|
+
if architect and hasattr(architect, "get_cached_patterns"):
|
|
291
|
+
cached_patterns = architect.get_cached_patterns()
|
|
292
|
+
self.logger.info(f"Cached {len(cached_patterns)} patterns")
|
|
293
|
+
|
|
294
|
+
return True
|
|
295
|
+
|
|
296
|
+
async def _execute_standard_workflow(self, options: OptionsProtocol) -> bool:
|
|
297
|
+
"""Fallback to standard workflow execution."""
|
|
298
|
+
self.logger.info("Executing standard workflow (fallback)")
|
|
299
|
+
# This would delegate to the existing workflow pipeline
|
|
300
|
+
return True
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
class ArchitecturalAssessment:
|
|
304
|
+
"""Assessment of codebase architecture for planning decisions."""
|
|
305
|
+
|
|
306
|
+
def __init__(
|
|
307
|
+
self,
|
|
308
|
+
needs_planning: bool,
|
|
309
|
+
complexity_score: int,
|
|
310
|
+
potential_issues: list[Issue],
|
|
311
|
+
recommended_strategy: str,
|
|
312
|
+
) -> None:
|
|
313
|
+
self.needs_planning = needs_planning
|
|
314
|
+
self.complexity_score = complexity_score
|
|
315
|
+
self.potential_issues = potential_issues
|
|
316
|
+
self.recommended_strategy = recommended_strategy
|
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
import json
|
|
2
|
+
import logging
|
|
3
|
+
import time
|
|
4
|
+
import typing as t
|
|
5
|
+
from contextlib import suppress
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
|
|
8
|
+
from rich.console import Console
|
|
9
|
+
|
|
10
|
+
from crackerjack.models.protocols import OptionsProtocol
|
|
11
|
+
from crackerjack.models.task import SessionTracker
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class SessionCoordinator:
|
|
15
|
+
def __init__(
|
|
16
|
+
self,
|
|
17
|
+
console: Console,
|
|
18
|
+
pkg_path: Path,
|
|
19
|
+
web_job_id: str | None = None,
|
|
20
|
+
) -> None:
|
|
21
|
+
self.console = console
|
|
22
|
+
self.pkg_path = pkg_path
|
|
23
|
+
self.session_tracker: SessionTracker | None = None
|
|
24
|
+
self._cleanup_handlers: list[t.Callable[[], None]] = []
|
|
25
|
+
self._thread_pool = None
|
|
26
|
+
self._lock_files: set[Path] = set()
|
|
27
|
+
|
|
28
|
+
import uuid
|
|
29
|
+
|
|
30
|
+
self.session_id = web_job_id or str(uuid.uuid4())
|
|
31
|
+
self.web_job_id = web_job_id
|
|
32
|
+
self.start_time = time.time()
|
|
33
|
+
self.tasks: dict[str, t.Any] = {}
|
|
34
|
+
self.current_task: str | None = None
|
|
35
|
+
self.success: bool = False
|
|
36
|
+
|
|
37
|
+
self._setup_logging()
|
|
38
|
+
|
|
39
|
+
if self.web_job_id:
|
|
40
|
+
self._setup_websocket_progress_file()
|
|
41
|
+
|
|
42
|
+
def start_session(self, task_name: str) -> None:
|
|
43
|
+
self.current_task = task_name
|
|
44
|
+
|
|
45
|
+
def end_session(self, success: bool = True) -> None:
|
|
46
|
+
self.success = success
|
|
47
|
+
self.end_time = time.time()
|
|
48
|
+
if success:
|
|
49
|
+
self.complete_task("session", "Session completed successfully")
|
|
50
|
+
else:
|
|
51
|
+
self.fail_task("session", "Session completed with errors")
|
|
52
|
+
|
|
53
|
+
def initialize_session_tracking(self, options: OptionsProtocol) -> None:
|
|
54
|
+
if hasattr(options, "track_progress") and options.track_progress:
|
|
55
|
+
import uuid
|
|
56
|
+
|
|
57
|
+
self.session_tracker = SessionTracker(
|
|
58
|
+
console=self.console,
|
|
59
|
+
session_id=str(uuid.uuid4()),
|
|
60
|
+
start_time=time.time(),
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
def track_task(self, task_id: str, task_name: str) -> str:
|
|
64
|
+
import time
|
|
65
|
+
|
|
66
|
+
task_obj = type(
|
|
67
|
+
"Task",
|
|
68
|
+
(),
|
|
69
|
+
{
|
|
70
|
+
"task_id": task_id,
|
|
71
|
+
"description": task_name,
|
|
72
|
+
"start_time": time.time(),
|
|
73
|
+
"status": "in_progress",
|
|
74
|
+
"details": None,
|
|
75
|
+
"end_time": None,
|
|
76
|
+
"progress": 0,
|
|
77
|
+
},
|
|
78
|
+
)()
|
|
79
|
+
|
|
80
|
+
self.tasks[task_id] = task_obj
|
|
81
|
+
|
|
82
|
+
if self.session_tracker:
|
|
83
|
+
self.session_tracker.start_task(task_id, task_name)
|
|
84
|
+
|
|
85
|
+
return task_id
|
|
86
|
+
|
|
87
|
+
def update_task(
|
|
88
|
+
self,
|
|
89
|
+
task_id: str,
|
|
90
|
+
status: str,
|
|
91
|
+
details: str | None = None,
|
|
92
|
+
progress: int | None = None,
|
|
93
|
+
) -> None:
|
|
94
|
+
if task_id in self.tasks:
|
|
95
|
+
task = self.tasks[task_id]
|
|
96
|
+
task.status = status
|
|
97
|
+
if details:
|
|
98
|
+
task.details = details
|
|
99
|
+
if progress is not None:
|
|
100
|
+
task.progress = progress
|
|
101
|
+
|
|
102
|
+
if status in ("completed", "failed"):
|
|
103
|
+
task.end_time = time.time()
|
|
104
|
+
|
|
105
|
+
def complete_task(self, task_id: str, details: str | None = None) -> None:
|
|
106
|
+
if self.session_tracker:
|
|
107
|
+
self.session_tracker.complete_task(task_id, details=details)
|
|
108
|
+
|
|
109
|
+
def fail_task(self, task_id: str, error: str) -> None:
|
|
110
|
+
if self.session_tracker:
|
|
111
|
+
self.session_tracker.fail_task(task_id, error)
|
|
112
|
+
|
|
113
|
+
def get_session_summary(self) -> dict[str, int] | None:
|
|
114
|
+
if self.session_tracker:
|
|
115
|
+
return self.session_tracker.get_summary()
|
|
116
|
+
return None
|
|
117
|
+
|
|
118
|
+
def get_summary(self) -> dict[str, t.Any]:
|
|
119
|
+
duration = getattr(self, "end_time", time.time()) - self.start_time
|
|
120
|
+
tasks_count = len(self.tasks)
|
|
121
|
+
|
|
122
|
+
if self.session_tracker:
|
|
123
|
+
return self.session_tracker.get_summary()
|
|
124
|
+
|
|
125
|
+
return {
|
|
126
|
+
"session_id": self.session_id,
|
|
127
|
+
"duration": duration,
|
|
128
|
+
"tasks_count": tasks_count,
|
|
129
|
+
"success": self.success,
|
|
130
|
+
"tasks": [
|
|
131
|
+
{
|
|
132
|
+
"task_id": task.task_id,
|
|
133
|
+
"description": task.description,
|
|
134
|
+
"status": task.status,
|
|
135
|
+
"details": task.details,
|
|
136
|
+
"start_time": task.start_time,
|
|
137
|
+
"end_time": task.end_time,
|
|
138
|
+
"progress": task.progress,
|
|
139
|
+
}
|
|
140
|
+
for task in self.tasks.values()
|
|
141
|
+
],
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
def finalize_session(self, start_time: float, success: bool) -> None:
|
|
145
|
+
total_time = time.time() - start_time
|
|
146
|
+
if success:
|
|
147
|
+
self.complete_task(
|
|
148
|
+
"workflow",
|
|
149
|
+
f"Completed successfully in {total_time:.1f}s",
|
|
150
|
+
)
|
|
151
|
+
else:
|
|
152
|
+
self.complete_task(
|
|
153
|
+
"workflow",
|
|
154
|
+
f"Completed with issues in {total_time:.1f}s",
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
def register_cleanup(self, cleanup_handler: t.Callable[[], None]) -> None:
|
|
158
|
+
self._cleanup_handlers.append(cleanup_handler)
|
|
159
|
+
|
|
160
|
+
def track_lock_file(self, lock_file_path: Path) -> None:
|
|
161
|
+
self._lock_files.add(lock_file_path)
|
|
162
|
+
|
|
163
|
+
def cleanup_resources(self) -> None:
|
|
164
|
+
for cleanup_handler in self._cleanup_handlers:
|
|
165
|
+
with suppress(Exception):
|
|
166
|
+
cleanup_handler()
|
|
167
|
+
|
|
168
|
+
self._cleanup_temporary_files()
|
|
169
|
+
|
|
170
|
+
def _cleanup_temporary_files(self) -> None:
|
|
171
|
+
if not hasattr(self, "_cleanup_config") or self._cleanup_config is None:
|
|
172
|
+
self._cleanup_debug_logs()
|
|
173
|
+
self._cleanup_coverage_files()
|
|
174
|
+
self._cleanup_pycache_directories()
|
|
175
|
+
elif self._cleanup_config.auto_cleanup:
|
|
176
|
+
self._cleanup_debug_logs(keep_recent=self._cleanup_config.keep_debug_logs)
|
|
177
|
+
self._cleanup_coverage_files(
|
|
178
|
+
keep_recent=self._cleanup_config.keep_coverage_files,
|
|
179
|
+
)
|
|
180
|
+
self._cleanup_pycache_directories()
|
|
181
|
+
|
|
182
|
+
def set_cleanup_config(self, cleanup_config: t.Any) -> None:
|
|
183
|
+
self._cleanup_config = cleanup_config
|
|
184
|
+
|
|
185
|
+
def _cleanup_debug_logs(self, keep_recent: int = 5) -> None:
|
|
186
|
+
with suppress(Exception):
|
|
187
|
+
from crackerjack.services.log_manager import get_log_manager
|
|
188
|
+
|
|
189
|
+
log_manager = get_log_manager()
|
|
190
|
+
|
|
191
|
+
log_manager.rotate_logs(
|
|
192
|
+
log_manager.debug_dir,
|
|
193
|
+
"debug-*.log",
|
|
194
|
+
max_files=keep_recent,
|
|
195
|
+
max_age_days=7,
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
legacy_pattern = "crackerjack-debug-*.log"
|
|
199
|
+
legacy_files = sorted(
|
|
200
|
+
self.pkg_path.glob(legacy_pattern),
|
|
201
|
+
key=lambda p: p.stat().st_mtime,
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
for old_file in legacy_files[:-keep_recent]:
|
|
205
|
+
with suppress(FileNotFoundError, PermissionError):
|
|
206
|
+
old_file.unlink()
|
|
207
|
+
|
|
208
|
+
def _cleanup_coverage_files(self, keep_recent: int = 10) -> None:
|
|
209
|
+
with suppress(Exception):
|
|
210
|
+
# Clean up coverage files from cache directory
|
|
211
|
+
cache_dir = Path.home() / ".cache" / "crackerjack" / "coverage"
|
|
212
|
+
if cache_dir.exists():
|
|
213
|
+
pattern = ".coverage*"
|
|
214
|
+
coverage_files = sorted(
|
|
215
|
+
cache_dir.glob(pattern),
|
|
216
|
+
key=lambda p: p.stat().st_mtime,
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
for old_file in coverage_files[:-keep_recent]:
|
|
220
|
+
with suppress(FileNotFoundError, PermissionError):
|
|
221
|
+
old_file.unlink()
|
|
222
|
+
|
|
223
|
+
# Also clean up any legacy coverage files from project root
|
|
224
|
+
pattern = ".coverage.*"
|
|
225
|
+
coverage_files = sorted(
|
|
226
|
+
self.pkg_path.glob(pattern),
|
|
227
|
+
key=lambda p: p.stat().st_mtime,
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
for old_file in coverage_files:
|
|
231
|
+
with suppress(FileNotFoundError, PermissionError):
|
|
232
|
+
old_file.unlink()
|
|
233
|
+
|
|
234
|
+
def _cleanup_pycache_directories(self) -> None:
|
|
235
|
+
"""Remove __pycache__ directories from the package to keep repo clean."""
|
|
236
|
+
with suppress(Exception):
|
|
237
|
+
import shutil
|
|
238
|
+
|
|
239
|
+
# Clean __pycache__ directories in package
|
|
240
|
+
for pycache_dir in self.pkg_path.rglob("__pycache__"):
|
|
241
|
+
if pycache_dir.is_dir():
|
|
242
|
+
with suppress(FileNotFoundError, PermissionError):
|
|
243
|
+
shutil.rmtree(pycache_dir)
|
|
244
|
+
|
|
245
|
+
def _setup_logging(self) -> None:
|
|
246
|
+
logger = logging.getLogger("crackerjack")
|
|
247
|
+
if not logger.handlers:
|
|
248
|
+
handler = logging.StreamHandler()
|
|
249
|
+
handler.setLevel(logging.WARNING)
|
|
250
|
+
logger.addHandler(handler)
|
|
251
|
+
logger.setLevel(logging.WARNING)
|
|
252
|
+
|
|
253
|
+
def _setup_websocket_progress_file(self) -> None:
|
|
254
|
+
import tempfile
|
|
255
|
+
|
|
256
|
+
self.progress_dir = Path(tempfile.gettempdir()) / "crackerjack-mcp-progress"
|
|
257
|
+
self.progress_file = self.progress_dir / f"job-{self.web_job_id}.json"
|
|
258
|
+
|
|
259
|
+
if self.progress_file.exists():
|
|
260
|
+
self._update_websocket_progress("running", "Crackerjack process started")
|
|
261
|
+
|
|
262
|
+
def _update_websocket_progress(self, status: str, message: str) -> None:
|
|
263
|
+
if not hasattr(self, "progress_file") or not self.progress_file:
|
|
264
|
+
return
|
|
265
|
+
|
|
266
|
+
try:
|
|
267
|
+
progress_data = {}
|
|
268
|
+
if self.progress_file.exists():
|
|
269
|
+
progress_data = json.loads(self.progress_file.read_text())
|
|
270
|
+
|
|
271
|
+
progress_data.update(
|
|
272
|
+
{
|
|
273
|
+
"status": status,
|
|
274
|
+
"message": message,
|
|
275
|
+
"updated_at": time.time(),
|
|
276
|
+
"current_stage": message,
|
|
277
|
+
},
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
self.progress_file.write_text(json.dumps(progress_data, indent=2))
|
|
281
|
+
|
|
282
|
+
except Exception as e:
|
|
283
|
+
self.console.print(
|
|
284
|
+
f"[dim yellow]Warning: Could not update progress file: {e}[/dim yellow]",
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
def update_stage(self, stage: str, status: str) -> None:
|
|
288
|
+
if self.web_job_id:
|
|
289
|
+
self._update_websocket_progress(status, f"{stage}: {status}")
|