crackerjack 0.30.3__py3-none-any.whl → 0.31.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of crackerjack might be problematic. Click here for more details.

Files changed (156) hide show
  1. crackerjack/CLAUDE.md +1005 -0
  2. crackerjack/RULES.md +380 -0
  3. crackerjack/__init__.py +42 -13
  4. crackerjack/__main__.py +227 -299
  5. crackerjack/agents/__init__.py +41 -0
  6. crackerjack/agents/architect_agent.py +281 -0
  7. crackerjack/agents/base.py +170 -0
  8. crackerjack/agents/coordinator.py +512 -0
  9. crackerjack/agents/documentation_agent.py +498 -0
  10. crackerjack/agents/dry_agent.py +388 -0
  11. crackerjack/agents/formatting_agent.py +245 -0
  12. crackerjack/agents/import_optimization_agent.py +281 -0
  13. crackerjack/agents/performance_agent.py +669 -0
  14. crackerjack/agents/proactive_agent.py +104 -0
  15. crackerjack/agents/refactoring_agent.py +788 -0
  16. crackerjack/agents/security_agent.py +529 -0
  17. crackerjack/agents/test_creation_agent.py +657 -0
  18. crackerjack/agents/test_specialist_agent.py +486 -0
  19. crackerjack/agents/tracker.py +212 -0
  20. crackerjack/api.py +560 -0
  21. crackerjack/cli/__init__.py +24 -0
  22. crackerjack/cli/facade.py +104 -0
  23. crackerjack/cli/handlers.py +267 -0
  24. crackerjack/cli/interactive.py +471 -0
  25. crackerjack/cli/options.py +409 -0
  26. crackerjack/cli/utils.py +18 -0
  27. crackerjack/code_cleaner.py +618 -928
  28. crackerjack/config/__init__.py +19 -0
  29. crackerjack/config/hooks.py +218 -0
  30. crackerjack/core/__init__.py +0 -0
  31. crackerjack/core/async_workflow_orchestrator.py +406 -0
  32. crackerjack/core/autofix_coordinator.py +200 -0
  33. crackerjack/core/container.py +104 -0
  34. crackerjack/core/enhanced_container.py +542 -0
  35. crackerjack/core/performance.py +243 -0
  36. crackerjack/core/phase_coordinator.py +585 -0
  37. crackerjack/core/proactive_workflow.py +316 -0
  38. crackerjack/core/session_coordinator.py +289 -0
  39. crackerjack/core/workflow_orchestrator.py +826 -0
  40. crackerjack/dynamic_config.py +94 -103
  41. crackerjack/errors.py +263 -41
  42. crackerjack/executors/__init__.py +11 -0
  43. crackerjack/executors/async_hook_executor.py +431 -0
  44. crackerjack/executors/cached_hook_executor.py +242 -0
  45. crackerjack/executors/hook_executor.py +345 -0
  46. crackerjack/executors/individual_hook_executor.py +669 -0
  47. crackerjack/intelligence/__init__.py +44 -0
  48. crackerjack/intelligence/adaptive_learning.py +751 -0
  49. crackerjack/intelligence/agent_orchestrator.py +551 -0
  50. crackerjack/intelligence/agent_registry.py +414 -0
  51. crackerjack/intelligence/agent_selector.py +502 -0
  52. crackerjack/intelligence/integration.py +290 -0
  53. crackerjack/interactive.py +576 -315
  54. crackerjack/managers/__init__.py +11 -0
  55. crackerjack/managers/async_hook_manager.py +135 -0
  56. crackerjack/managers/hook_manager.py +137 -0
  57. crackerjack/managers/publish_manager.py +433 -0
  58. crackerjack/managers/test_command_builder.py +151 -0
  59. crackerjack/managers/test_executor.py +443 -0
  60. crackerjack/managers/test_manager.py +258 -0
  61. crackerjack/managers/test_manager_backup.py +1124 -0
  62. crackerjack/managers/test_progress.py +114 -0
  63. crackerjack/mcp/__init__.py +0 -0
  64. crackerjack/mcp/cache.py +336 -0
  65. crackerjack/mcp/client_runner.py +104 -0
  66. crackerjack/mcp/context.py +621 -0
  67. crackerjack/mcp/dashboard.py +636 -0
  68. crackerjack/mcp/enhanced_progress_monitor.py +479 -0
  69. crackerjack/mcp/file_monitor.py +336 -0
  70. crackerjack/mcp/progress_components.py +569 -0
  71. crackerjack/mcp/progress_monitor.py +949 -0
  72. crackerjack/mcp/rate_limiter.py +332 -0
  73. crackerjack/mcp/server.py +22 -0
  74. crackerjack/mcp/server_core.py +244 -0
  75. crackerjack/mcp/service_watchdog.py +501 -0
  76. crackerjack/mcp/state.py +395 -0
  77. crackerjack/mcp/task_manager.py +257 -0
  78. crackerjack/mcp/tools/__init__.py +17 -0
  79. crackerjack/mcp/tools/core_tools.py +249 -0
  80. crackerjack/mcp/tools/error_analyzer.py +308 -0
  81. crackerjack/mcp/tools/execution_tools.py +372 -0
  82. crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
  83. crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
  84. crackerjack/mcp/tools/intelligence_tools.py +314 -0
  85. crackerjack/mcp/tools/monitoring_tools.py +502 -0
  86. crackerjack/mcp/tools/proactive_tools.py +384 -0
  87. crackerjack/mcp/tools/progress_tools.py +217 -0
  88. crackerjack/mcp/tools/utility_tools.py +341 -0
  89. crackerjack/mcp/tools/workflow_executor.py +565 -0
  90. crackerjack/mcp/websocket/__init__.py +14 -0
  91. crackerjack/mcp/websocket/app.py +39 -0
  92. crackerjack/mcp/websocket/endpoints.py +559 -0
  93. crackerjack/mcp/websocket/jobs.py +253 -0
  94. crackerjack/mcp/websocket/server.py +116 -0
  95. crackerjack/mcp/websocket/websocket_handler.py +78 -0
  96. crackerjack/mcp/websocket_server.py +10 -0
  97. crackerjack/models/__init__.py +31 -0
  98. crackerjack/models/config.py +93 -0
  99. crackerjack/models/config_adapter.py +230 -0
  100. crackerjack/models/protocols.py +118 -0
  101. crackerjack/models/task.py +154 -0
  102. crackerjack/monitoring/ai_agent_watchdog.py +450 -0
  103. crackerjack/monitoring/regression_prevention.py +638 -0
  104. crackerjack/orchestration/__init__.py +0 -0
  105. crackerjack/orchestration/advanced_orchestrator.py +970 -0
  106. crackerjack/orchestration/coverage_improvement.py +223 -0
  107. crackerjack/orchestration/execution_strategies.py +341 -0
  108. crackerjack/orchestration/test_progress_streamer.py +636 -0
  109. crackerjack/plugins/__init__.py +15 -0
  110. crackerjack/plugins/base.py +200 -0
  111. crackerjack/plugins/hooks.py +246 -0
  112. crackerjack/plugins/loader.py +335 -0
  113. crackerjack/plugins/managers.py +259 -0
  114. crackerjack/py313.py +8 -3
  115. crackerjack/services/__init__.py +22 -0
  116. crackerjack/services/cache.py +314 -0
  117. crackerjack/services/config.py +358 -0
  118. crackerjack/services/config_integrity.py +99 -0
  119. crackerjack/services/contextual_ai_assistant.py +516 -0
  120. crackerjack/services/coverage_ratchet.py +356 -0
  121. crackerjack/services/debug.py +736 -0
  122. crackerjack/services/dependency_monitor.py +617 -0
  123. crackerjack/services/enhanced_filesystem.py +439 -0
  124. crackerjack/services/file_hasher.py +151 -0
  125. crackerjack/services/filesystem.py +421 -0
  126. crackerjack/services/git.py +176 -0
  127. crackerjack/services/health_metrics.py +611 -0
  128. crackerjack/services/initialization.py +873 -0
  129. crackerjack/services/log_manager.py +286 -0
  130. crackerjack/services/logging.py +174 -0
  131. crackerjack/services/metrics.py +578 -0
  132. crackerjack/services/pattern_cache.py +362 -0
  133. crackerjack/services/pattern_detector.py +515 -0
  134. crackerjack/services/performance_benchmarks.py +653 -0
  135. crackerjack/services/security.py +163 -0
  136. crackerjack/services/server_manager.py +234 -0
  137. crackerjack/services/smart_scheduling.py +144 -0
  138. crackerjack/services/tool_version_service.py +61 -0
  139. crackerjack/services/unified_config.py +437 -0
  140. crackerjack/services/version_checker.py +248 -0
  141. crackerjack/slash_commands/__init__.py +14 -0
  142. crackerjack/slash_commands/init.md +122 -0
  143. crackerjack/slash_commands/run.md +163 -0
  144. crackerjack/slash_commands/status.md +127 -0
  145. crackerjack-0.31.7.dist-info/METADATA +742 -0
  146. crackerjack-0.31.7.dist-info/RECORD +149 -0
  147. crackerjack-0.31.7.dist-info/entry_points.txt +2 -0
  148. crackerjack/.gitignore +0 -34
  149. crackerjack/.libcst.codemod.yaml +0 -18
  150. crackerjack/.pdm.toml +0 -1
  151. crackerjack/crackerjack.py +0 -3805
  152. crackerjack/pyproject.toml +0 -286
  153. crackerjack-0.30.3.dist-info/METADATA +0 -1290
  154. crackerjack-0.30.3.dist-info/RECORD +0 -16
  155. {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/WHEEL +0 -0
  156. {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,316 @@
1
+ import logging
2
+ import time
3
+ import typing as t
4
+ from pathlib import Path
5
+
6
+ from ..agents.base import AgentContext, Issue, IssueType, Priority
7
+ from ..agents.coordinator import AgentCoordinator
8
+ from ..models.protocols import OptionsProtocol
9
+
10
+
11
+ class ProactiveWorkflowPipeline:
12
+ """Enhanced workflow pipeline with proactive architectural planning.
13
+
14
+ This pipeline adds a planning phase before each iteration to prevent
15
+ issues through intelligent architecture rather than reactive fixing.
16
+ """
17
+
18
+ def __init__(self, project_path: Path) -> None:
19
+ self.project_path = project_path
20
+ self.logger = logging.getLogger(__name__)
21
+ self._architect_agent_coordinator: AgentCoordinator | None = None
22
+
23
+ async def run_complete_workflow_with_planning(
24
+ self, options: OptionsProtocol
25
+ ) -> bool:
26
+ """Execute workflow with proactive planning phases."""
27
+ self.logger.info("Starting proactive workflow with architectural planning")
28
+
29
+ start_time = time.time()
30
+
31
+ try:
32
+ # Phase 1: Initial architectural assessment
33
+ assessment = await self._assess_codebase_architecture()
34
+
35
+ if assessment.needs_planning:
36
+ self.logger.info("Codebase requires architectural planning")
37
+
38
+ # Phase 2: Create comprehensive architectural plan
39
+ architectural_plan = await self._create_comprehensive_plan(assessment)
40
+
41
+ # Phase 3: Execute workflow following the plan
42
+ result = await self._execute_planned_workflow(
43
+ options, architectural_plan
44
+ )
45
+ else:
46
+ self.logger.info(
47
+ "Codebase is architecturally sound, using standard workflow"
48
+ )
49
+ # Fallback to standard workflow for simple fixes
50
+ result = await self._execute_standard_workflow(options)
51
+
52
+ execution_time = time.time() - start_time
53
+ self.logger.info(f"Proactive workflow completed in {execution_time:.2f}s")
54
+
55
+ return result
56
+
57
+ except Exception as e:
58
+ self.logger.exception(f"Proactive workflow failed: {e}")
59
+ # Fallback to standard workflow on planning failure
60
+ return await self._execute_standard_workflow(options)
61
+
62
+ async def _assess_codebase_architecture(self) -> "ArchitecturalAssessment":
63
+ """Assess the codebase to determine if proactive planning is needed."""
64
+ self.logger.info("Assessing codebase architecture...")
65
+
66
+ # Initialize architect coordinator if needed
67
+ if not self._architect_agent_coordinator:
68
+ agent_context = AgentContext(project_path=self.project_path)
69
+ self._architect_agent_coordinator = AgentCoordinator(agent_context)
70
+ self._architect_agent_coordinator.initialize_agents()
71
+
72
+ # Create test issues to assess complexity
73
+ test_issues = await self._identify_potential_issues()
74
+
75
+ # Determine if planning is beneficial
76
+ needs_planning = self._evaluate_planning_need(test_issues)
77
+
78
+ return ArchitecturalAssessment(
79
+ needs_planning=needs_planning,
80
+ complexity_score=len(test_issues),
81
+ potential_issues=test_issues,
82
+ recommended_strategy="proactive" if needs_planning else "standard",
83
+ )
84
+
85
+ async def _identify_potential_issues(self) -> list[Issue]:
86
+ """Identify potential architectural issues in the codebase."""
87
+ # This would integrate with static analysis tools
88
+ # For now, create representative issues based on common patterns
89
+
90
+ potential_issues = []
91
+
92
+ # Check for complexity hotspots (would use real analysis)
93
+ potential_issues.extend(
94
+ (
95
+ Issue(
96
+ id="arch_assessment_complexity",
97
+ type=IssueType.COMPLEXITY,
98
+ severity=Priority.HIGH,
99
+ message="Potential complexity hotspots detected",
100
+ file_path=str(self.project_path),
101
+ details=["Multiple functions may exceed complexity threshold"],
102
+ ),
103
+ Issue(
104
+ id="arch_assessment_dry",
105
+ type=IssueType.DRY_VIOLATION,
106
+ severity=Priority.MEDIUM,
107
+ message="Potential code duplication patterns",
108
+ file_path=str(self.project_path),
109
+ details=["Similar patterns may exist across modules"],
110
+ ),
111
+ )
112
+ )
113
+
114
+ return potential_issues
115
+
116
+ def _evaluate_planning_need(self, issues: list[Issue]) -> bool:
117
+ """Evaluate if proactive planning would be beneficial."""
118
+ # Planning is beneficial for:
119
+ # 1. Multiple complex issues
120
+ # 2. Architectural issues (complexity, DRY, performance)
121
+ # 3. Projects with many interdependencies
122
+
123
+ complex_issues = [
124
+ issue
125
+ for issue in issues
126
+ if issue.type
127
+ in {IssueType.COMPLEXITY, IssueType.DRY_VIOLATION, IssueType.PERFORMANCE}
128
+ ]
129
+
130
+ # Need planning if we have 2+ complex issues
131
+ return len(complex_issues) >= 2
132
+
133
+ async def _create_comprehensive_plan(
134
+ self, assessment: "ArchitecturalAssessment"
135
+ ) -> dict[str, t.Any]:
136
+ """Create comprehensive architectural plan based on assessment."""
137
+ self.logger.info("Creating comprehensive architectural plan...")
138
+
139
+ assert self._architect_agent_coordinator is not None
140
+
141
+ # Use ArchitectAgent to create the plan
142
+ architect = self._architect_agent_coordinator._get_architect_agent()
143
+
144
+ if not architect:
145
+ self.logger.warning("No ArchitectAgent available, creating basic plan")
146
+ return {
147
+ "strategy": "basic_reactive",
148
+ "phases": ["standard_workflow"],
149
+ "patterns": ["default"],
150
+ }
151
+
152
+ # Create plan for the most complex issue as representative
153
+ complex_issues = [
154
+ issue
155
+ for issue in assessment.potential_issues
156
+ if issue.type in {IssueType.COMPLEXITY, IssueType.DRY_VIOLATION}
157
+ ]
158
+
159
+ if complex_issues:
160
+ primary_issue = complex_issues[0]
161
+ base_plan = await architect.plan_before_action(primary_issue)
162
+
163
+ # Extend to comprehensive workflow plan
164
+ comprehensive_plan = base_plan | {
165
+ "phases": [
166
+ "configuration_setup",
167
+ "fast_hooks_with_architecture",
168
+ "architectural_refactoring",
169
+ "comprehensive_validation",
170
+ "pattern_learning",
171
+ ],
172
+ "integration_points": [
173
+ "architect_guided_fixing",
174
+ "pattern_caching",
175
+ "validation_against_plan",
176
+ ],
177
+ }
178
+ else:
179
+ comprehensive_plan = {
180
+ "strategy": "lightweight_proactive",
181
+ "phases": ["standard_workflow_enhanced"],
182
+ "patterns": ["cached_patterns"],
183
+ }
184
+
185
+ self.logger.info(
186
+ f"Created plan with strategy: {comprehensive_plan.get('strategy')}"
187
+ )
188
+ return comprehensive_plan
189
+
190
+ async def _execute_planned_workflow(
191
+ self, options: OptionsProtocol, plan: dict[str, t.Any]
192
+ ) -> bool:
193
+ """Execute workflow following the architectural plan."""
194
+ strategy = plan.get("strategy", "basic_reactive")
195
+ phases = plan.get("phases", ["standard_workflow"])
196
+
197
+ self.logger.info(f"Executing {strategy} workflow with {len(phases)} phases")
198
+
199
+ # Execute each phase according to the plan
200
+ for phase in phases:
201
+ success = await self._execute_workflow_phase(phase, options, plan)
202
+ if not success and phase in (
203
+ "configuration_setup",
204
+ "architectural_refactoring",
205
+ ):
206
+ # Critical phases - fail fast
207
+ self.logger.error(f"Critical phase {phase} failed")
208
+ return False
209
+ elif not success:
210
+ # Non-critical phases - log and continue
211
+ self.logger.warning(f"Phase {phase} had issues but continuing")
212
+
213
+ return True
214
+
215
+ async def _execute_workflow_phase(
216
+ self, phase: str, options: OptionsProtocol, plan: dict[str, t.Any]
217
+ ) -> bool:
218
+ """Execute a specific workflow phase."""
219
+ self.logger.info(f"Executing phase: {phase}")
220
+
221
+ # Different phase implementations
222
+ if phase == "configuration_setup":
223
+ return await self._setup_with_architecture(options, plan)
224
+ elif phase == "fast_hooks_with_architecture":
225
+ return await self._run_fast_hooks_with_planning(options, plan)
226
+ elif phase == "architectural_refactoring":
227
+ return await self._perform_architectural_refactoring(options, plan)
228
+ elif phase == "comprehensive_validation":
229
+ return await self._comprehensive_validation(options, plan)
230
+ elif phase == "pattern_learning":
231
+ return await self._learn_and_cache_patterns(plan)
232
+ # Fallback to standard workflow for unknown phases
233
+ return await self._execute_standard_workflow(options)
234
+
235
+ async def _setup_with_architecture(
236
+ self, options: OptionsProtocol, plan: dict[str, t.Any]
237
+ ) -> bool:
238
+ """Setup phase with architectural considerations."""
239
+ self.logger.info("Setting up project with architectural planning")
240
+ # This would integrate with existing setup logic
241
+ # For now, return success as architecture is already integrated
242
+ return True
243
+
244
+ async def _run_fast_hooks_with_planning(
245
+ self, options: OptionsProtocol, plan: dict[str, t.Any]
246
+ ) -> bool:
247
+ """Run fast hooks with architectural awareness."""
248
+ self.logger.info("Running fast hooks with architectural planning")
249
+ # This would integrate with existing hook manager
250
+ # Enhanced to use architectural patterns from the plan
251
+ return True
252
+
253
+ async def _perform_architectural_refactoring(
254
+ self, options: OptionsProtocol, plan: dict[str, t.Any]
255
+ ) -> bool:
256
+ """Perform refactoring following architectural plan."""
257
+ self.logger.info("Performing architectural refactoring")
258
+
259
+ # Use ArchitectAgent to guide refactoring
260
+ if self._architect_agent_coordinator:
261
+ architect = self._architect_agent_coordinator._get_architect_agent()
262
+ if architect:
263
+ # This would apply the architectural patterns
264
+ patterns = plan.get("patterns", [])
265
+ self.logger.info(f"Applying architectural patterns: {patterns}")
266
+ return True
267
+
268
+ return True
269
+
270
+ async def _comprehensive_validation(
271
+ self, options: OptionsProtocol, plan: dict[str, t.Any]
272
+ ) -> bool:
273
+ """Validate results against architectural plan."""
274
+ self.logger.info("Performing comprehensive validation")
275
+ validation_steps = plan.get("validation", [])
276
+
277
+ for step in validation_steps:
278
+ self.logger.info(f"Validating: {step}")
279
+ # Implement specific validation logic
280
+
281
+ return True
282
+
283
+ async def _learn_and_cache_patterns(self, plan: dict[str, t.Any]) -> bool:
284
+ """Learn from successful patterns and cache them."""
285
+ self.logger.info("Learning and caching successful patterns")
286
+
287
+ # Cache successful patterns from the plan
288
+ if self._architect_agent_coordinator:
289
+ architect = self._architect_agent_coordinator._get_architect_agent()
290
+ if architect and hasattr(architect, "get_cached_patterns"):
291
+ cached_patterns = architect.get_cached_patterns()
292
+ self.logger.info(f"Cached {len(cached_patterns)} patterns")
293
+
294
+ return True
295
+
296
+ async def _execute_standard_workflow(self, options: OptionsProtocol) -> bool:
297
+ """Fallback to standard workflow execution."""
298
+ self.logger.info("Executing standard workflow (fallback)")
299
+ # This would delegate to the existing workflow pipeline
300
+ return True
301
+
302
+
303
+ class ArchitecturalAssessment:
304
+ """Assessment of codebase architecture for planning decisions."""
305
+
306
+ def __init__(
307
+ self,
308
+ needs_planning: bool,
309
+ complexity_score: int,
310
+ potential_issues: list[Issue],
311
+ recommended_strategy: str,
312
+ ) -> None:
313
+ self.needs_planning = needs_planning
314
+ self.complexity_score = complexity_score
315
+ self.potential_issues = potential_issues
316
+ self.recommended_strategy = recommended_strategy
@@ -0,0 +1,289 @@
1
+ import json
2
+ import logging
3
+ import time
4
+ import typing as t
5
+ from contextlib import suppress
6
+ from pathlib import Path
7
+
8
+ from rich.console import Console
9
+
10
+ from crackerjack.models.protocols import OptionsProtocol
11
+ from crackerjack.models.task import SessionTracker
12
+
13
+
14
+ class SessionCoordinator:
15
+ def __init__(
16
+ self,
17
+ console: Console,
18
+ pkg_path: Path,
19
+ web_job_id: str | None = None,
20
+ ) -> None:
21
+ self.console = console
22
+ self.pkg_path = pkg_path
23
+ self.session_tracker: SessionTracker | None = None
24
+ self._cleanup_handlers: list[t.Callable[[], None]] = []
25
+ self._thread_pool = None
26
+ self._lock_files: set[Path] = set()
27
+
28
+ import uuid
29
+
30
+ self.session_id = web_job_id or str(uuid.uuid4())
31
+ self.web_job_id = web_job_id
32
+ self.start_time = time.time()
33
+ self.tasks: dict[str, t.Any] = {}
34
+ self.current_task: str | None = None
35
+ self.success: bool = False
36
+
37
+ self._setup_logging()
38
+
39
+ if self.web_job_id:
40
+ self._setup_websocket_progress_file()
41
+
42
+ def start_session(self, task_name: str) -> None:
43
+ self.current_task = task_name
44
+
45
+ def end_session(self, success: bool = True) -> None:
46
+ self.success = success
47
+ self.end_time = time.time()
48
+ if success:
49
+ self.complete_task("session", "Session completed successfully")
50
+ else:
51
+ self.fail_task("session", "Session completed with errors")
52
+
53
+ def initialize_session_tracking(self, options: OptionsProtocol) -> None:
54
+ if hasattr(options, "track_progress") and options.track_progress:
55
+ import uuid
56
+
57
+ self.session_tracker = SessionTracker(
58
+ console=self.console,
59
+ session_id=str(uuid.uuid4()),
60
+ start_time=time.time(),
61
+ )
62
+
63
+ def track_task(self, task_id: str, task_name: str) -> str:
64
+ import time
65
+
66
+ task_obj = type(
67
+ "Task",
68
+ (),
69
+ {
70
+ "task_id": task_id,
71
+ "description": task_name,
72
+ "start_time": time.time(),
73
+ "status": "in_progress",
74
+ "details": None,
75
+ "end_time": None,
76
+ "progress": 0,
77
+ },
78
+ )()
79
+
80
+ self.tasks[task_id] = task_obj
81
+
82
+ if self.session_tracker:
83
+ self.session_tracker.start_task(task_id, task_name)
84
+
85
+ return task_id
86
+
87
+ def update_task(
88
+ self,
89
+ task_id: str,
90
+ status: str,
91
+ details: str | None = None,
92
+ progress: int | None = None,
93
+ ) -> None:
94
+ if task_id in self.tasks:
95
+ task = self.tasks[task_id]
96
+ task.status = status
97
+ if details:
98
+ task.details = details
99
+ if progress is not None:
100
+ task.progress = progress
101
+
102
+ if status in ("completed", "failed"):
103
+ task.end_time = time.time()
104
+
105
+ def complete_task(self, task_id: str, details: str | None = None) -> None:
106
+ if self.session_tracker:
107
+ self.session_tracker.complete_task(task_id, details=details)
108
+
109
+ def fail_task(self, task_id: str, error: str) -> None:
110
+ if self.session_tracker:
111
+ self.session_tracker.fail_task(task_id, error)
112
+
113
+ def get_session_summary(self) -> dict[str, int] | None:
114
+ if self.session_tracker:
115
+ return self.session_tracker.get_summary()
116
+ return None
117
+
118
+ def get_summary(self) -> dict[str, t.Any]:
119
+ duration = getattr(self, "end_time", time.time()) - self.start_time
120
+ tasks_count = len(self.tasks)
121
+
122
+ if self.session_tracker:
123
+ return self.session_tracker.get_summary()
124
+
125
+ return {
126
+ "session_id": self.session_id,
127
+ "duration": duration,
128
+ "tasks_count": tasks_count,
129
+ "success": self.success,
130
+ "tasks": [
131
+ {
132
+ "task_id": task.task_id,
133
+ "description": task.description,
134
+ "status": task.status,
135
+ "details": task.details,
136
+ "start_time": task.start_time,
137
+ "end_time": task.end_time,
138
+ "progress": task.progress,
139
+ }
140
+ for task in self.tasks.values()
141
+ ],
142
+ }
143
+
144
+ def finalize_session(self, start_time: float, success: bool) -> None:
145
+ total_time = time.time() - start_time
146
+ if success:
147
+ self.complete_task(
148
+ "workflow",
149
+ f"Completed successfully in {total_time:.1f}s",
150
+ )
151
+ else:
152
+ self.complete_task(
153
+ "workflow",
154
+ f"Completed with issues in {total_time:.1f}s",
155
+ )
156
+
157
+ def register_cleanup(self, cleanup_handler: t.Callable[[], None]) -> None:
158
+ self._cleanup_handlers.append(cleanup_handler)
159
+
160
+ def track_lock_file(self, lock_file_path: Path) -> None:
161
+ self._lock_files.add(lock_file_path)
162
+
163
+ def cleanup_resources(self) -> None:
164
+ for cleanup_handler in self._cleanup_handlers:
165
+ with suppress(Exception):
166
+ cleanup_handler()
167
+
168
+ self._cleanup_temporary_files()
169
+
170
+ def _cleanup_temporary_files(self) -> None:
171
+ if not hasattr(self, "_cleanup_config") or self._cleanup_config is None:
172
+ self._cleanup_debug_logs()
173
+ self._cleanup_coverage_files()
174
+ self._cleanup_pycache_directories()
175
+ elif self._cleanup_config.auto_cleanup:
176
+ self._cleanup_debug_logs(keep_recent=self._cleanup_config.keep_debug_logs)
177
+ self._cleanup_coverage_files(
178
+ keep_recent=self._cleanup_config.keep_coverage_files,
179
+ )
180
+ self._cleanup_pycache_directories()
181
+
182
+ def set_cleanup_config(self, cleanup_config: t.Any) -> None:
183
+ self._cleanup_config = cleanup_config
184
+
185
+ def _cleanup_debug_logs(self, keep_recent: int = 5) -> None:
186
+ with suppress(Exception):
187
+ from crackerjack.services.log_manager import get_log_manager
188
+
189
+ log_manager = get_log_manager()
190
+
191
+ log_manager.rotate_logs(
192
+ log_manager.debug_dir,
193
+ "debug-*.log",
194
+ max_files=keep_recent,
195
+ max_age_days=7,
196
+ )
197
+
198
+ legacy_pattern = "crackerjack-debug-*.log"
199
+ legacy_files = sorted(
200
+ self.pkg_path.glob(legacy_pattern),
201
+ key=lambda p: p.stat().st_mtime,
202
+ )
203
+
204
+ for old_file in legacy_files[:-keep_recent]:
205
+ with suppress(FileNotFoundError, PermissionError):
206
+ old_file.unlink()
207
+
208
+ def _cleanup_coverage_files(self, keep_recent: int = 10) -> None:
209
+ with suppress(Exception):
210
+ # Clean up coverage files from cache directory
211
+ cache_dir = Path.home() / ".cache" / "crackerjack" / "coverage"
212
+ if cache_dir.exists():
213
+ pattern = ".coverage*"
214
+ coverage_files = sorted(
215
+ cache_dir.glob(pattern),
216
+ key=lambda p: p.stat().st_mtime,
217
+ )
218
+
219
+ for old_file in coverage_files[:-keep_recent]:
220
+ with suppress(FileNotFoundError, PermissionError):
221
+ old_file.unlink()
222
+
223
+ # Also clean up any legacy coverage files from project root
224
+ pattern = ".coverage.*"
225
+ coverage_files = sorted(
226
+ self.pkg_path.glob(pattern),
227
+ key=lambda p: p.stat().st_mtime,
228
+ )
229
+
230
+ for old_file in coverage_files:
231
+ with suppress(FileNotFoundError, PermissionError):
232
+ old_file.unlink()
233
+
234
+ def _cleanup_pycache_directories(self) -> None:
235
+ """Remove __pycache__ directories from the package to keep repo clean."""
236
+ with suppress(Exception):
237
+ import shutil
238
+
239
+ # Clean __pycache__ directories in package
240
+ for pycache_dir in self.pkg_path.rglob("__pycache__"):
241
+ if pycache_dir.is_dir():
242
+ with suppress(FileNotFoundError, PermissionError):
243
+ shutil.rmtree(pycache_dir)
244
+
245
+ def _setup_logging(self) -> None:
246
+ logger = logging.getLogger("crackerjack")
247
+ if not logger.handlers:
248
+ handler = logging.StreamHandler()
249
+ handler.setLevel(logging.WARNING)
250
+ logger.addHandler(handler)
251
+ logger.setLevel(logging.WARNING)
252
+
253
+ def _setup_websocket_progress_file(self) -> None:
254
+ import tempfile
255
+
256
+ self.progress_dir = Path(tempfile.gettempdir()) / "crackerjack-mcp-progress"
257
+ self.progress_file = self.progress_dir / f"job-{self.web_job_id}.json"
258
+
259
+ if self.progress_file.exists():
260
+ self._update_websocket_progress("running", "Crackerjack process started")
261
+
262
+ def _update_websocket_progress(self, status: str, message: str) -> None:
263
+ if not hasattr(self, "progress_file") or not self.progress_file:
264
+ return
265
+
266
+ try:
267
+ progress_data = {}
268
+ if self.progress_file.exists():
269
+ progress_data = json.loads(self.progress_file.read_text())
270
+
271
+ progress_data.update(
272
+ {
273
+ "status": status,
274
+ "message": message,
275
+ "updated_at": time.time(),
276
+ "current_stage": message,
277
+ },
278
+ )
279
+
280
+ self.progress_file.write_text(json.dumps(progress_data, indent=2))
281
+
282
+ except Exception as e:
283
+ self.console.print(
284
+ f"[dim yellow]Warning: Could not update progress file: {e}[/dim yellow]",
285
+ )
286
+
287
+ def update_stage(self, stage: str, status: str) -> None:
288
+ if self.web_job_id:
289
+ self._update_websocket_progress(status, f"{stage}: {status}")