crackerjack 0.29.0__py3-none-any.whl → 0.31.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +1005 -0
- crackerjack/RULES.md +380 -0
- crackerjack/__init__.py +42 -13
- crackerjack/__main__.py +225 -253
- crackerjack/agents/__init__.py +41 -0
- crackerjack/agents/architect_agent.py +281 -0
- crackerjack/agents/base.py +169 -0
- crackerjack/agents/coordinator.py +512 -0
- crackerjack/agents/documentation_agent.py +498 -0
- crackerjack/agents/dry_agent.py +388 -0
- crackerjack/agents/formatting_agent.py +245 -0
- crackerjack/agents/import_optimization_agent.py +281 -0
- crackerjack/agents/performance_agent.py +669 -0
- crackerjack/agents/proactive_agent.py +104 -0
- crackerjack/agents/refactoring_agent.py +788 -0
- crackerjack/agents/security_agent.py +529 -0
- crackerjack/agents/test_creation_agent.py +652 -0
- crackerjack/agents/test_specialist_agent.py +486 -0
- crackerjack/agents/tracker.py +212 -0
- crackerjack/api.py +560 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/facade.py +104 -0
- crackerjack/cli/handlers.py +267 -0
- crackerjack/cli/interactive.py +471 -0
- crackerjack/cli/options.py +401 -0
- crackerjack/cli/utils.py +18 -0
- crackerjack/code_cleaner.py +670 -0
- crackerjack/config/__init__.py +19 -0
- crackerjack/config/hooks.py +218 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +406 -0
- crackerjack/core/autofix_coordinator.py +200 -0
- crackerjack/core/container.py +104 -0
- crackerjack/core/enhanced_container.py +542 -0
- crackerjack/core/performance.py +243 -0
- crackerjack/core/phase_coordinator.py +561 -0
- crackerjack/core/proactive_workflow.py +316 -0
- crackerjack/core/session_coordinator.py +289 -0
- crackerjack/core/workflow_orchestrator.py +640 -0
- crackerjack/dynamic_config.py +577 -0
- crackerjack/errors.py +263 -41
- crackerjack/executors/__init__.py +11 -0
- crackerjack/executors/async_hook_executor.py +431 -0
- crackerjack/executors/cached_hook_executor.py +242 -0
- crackerjack/executors/hook_executor.py +345 -0
- crackerjack/executors/individual_hook_executor.py +669 -0
- crackerjack/intelligence/__init__.py +44 -0
- crackerjack/intelligence/adaptive_learning.py +751 -0
- crackerjack/intelligence/agent_orchestrator.py +551 -0
- crackerjack/intelligence/agent_registry.py +414 -0
- crackerjack/intelligence/agent_selector.py +502 -0
- crackerjack/intelligence/integration.py +290 -0
- crackerjack/interactive.py +576 -315
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +137 -0
- crackerjack/managers/publish_manager.py +411 -0
- crackerjack/managers/test_command_builder.py +151 -0
- crackerjack/managers/test_executor.py +435 -0
- crackerjack/managers/test_manager.py +258 -0
- crackerjack/managers/test_manager_backup.py +1124 -0
- crackerjack/managers/test_progress.py +144 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +336 -0
- crackerjack/mcp/client_runner.py +104 -0
- crackerjack/mcp/context.py +615 -0
- crackerjack/mcp/dashboard.py +636 -0
- crackerjack/mcp/enhanced_progress_monitor.py +479 -0
- crackerjack/mcp/file_monitor.py +336 -0
- crackerjack/mcp/progress_components.py +569 -0
- crackerjack/mcp/progress_monitor.py +949 -0
- crackerjack/mcp/rate_limiter.py +332 -0
- crackerjack/mcp/server.py +22 -0
- crackerjack/mcp/server_core.py +244 -0
- crackerjack/mcp/service_watchdog.py +501 -0
- crackerjack/mcp/state.py +395 -0
- crackerjack/mcp/task_manager.py +257 -0
- crackerjack/mcp/tools/__init__.py +17 -0
- crackerjack/mcp/tools/core_tools.py +249 -0
- crackerjack/mcp/tools/error_analyzer.py +308 -0
- crackerjack/mcp/tools/execution_tools.py +370 -0
- crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
- crackerjack/mcp/tools/intelligence_tools.py +314 -0
- crackerjack/mcp/tools/monitoring_tools.py +502 -0
- crackerjack/mcp/tools/proactive_tools.py +384 -0
- crackerjack/mcp/tools/progress_tools.py +141 -0
- crackerjack/mcp/tools/utility_tools.py +341 -0
- crackerjack/mcp/tools/workflow_executor.py +360 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +39 -0
- crackerjack/mcp/websocket/endpoints.py +559 -0
- crackerjack/mcp/websocket/jobs.py +253 -0
- crackerjack/mcp/websocket/server.py +116 -0
- crackerjack/mcp/websocket/websocket_handler.py +78 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/__init__.py +31 -0
- crackerjack/models/config.py +93 -0
- crackerjack/models/config_adapter.py +230 -0
- crackerjack/models/protocols.py +118 -0
- crackerjack/models/task.py +154 -0
- crackerjack/monitoring/ai_agent_watchdog.py +450 -0
- crackerjack/monitoring/regression_prevention.py +638 -0
- crackerjack/orchestration/__init__.py +0 -0
- crackerjack/orchestration/advanced_orchestrator.py +970 -0
- crackerjack/orchestration/execution_strategies.py +341 -0
- crackerjack/orchestration/test_progress_streamer.py +636 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +246 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +259 -0
- crackerjack/py313.py +8 -3
- crackerjack/services/__init__.py +22 -0
- crackerjack/services/cache.py +314 -0
- crackerjack/services/config.py +347 -0
- crackerjack/services/config_integrity.py +99 -0
- crackerjack/services/contextual_ai_assistant.py +516 -0
- crackerjack/services/coverage_ratchet.py +347 -0
- crackerjack/services/debug.py +736 -0
- crackerjack/services/dependency_monitor.py +617 -0
- crackerjack/services/enhanced_filesystem.py +439 -0
- crackerjack/services/file_hasher.py +151 -0
- crackerjack/services/filesystem.py +395 -0
- crackerjack/services/git.py +165 -0
- crackerjack/services/health_metrics.py +611 -0
- crackerjack/services/initialization.py +847 -0
- crackerjack/services/log_manager.py +286 -0
- crackerjack/services/logging.py +174 -0
- crackerjack/services/metrics.py +578 -0
- crackerjack/services/pattern_cache.py +362 -0
- crackerjack/services/pattern_detector.py +515 -0
- crackerjack/services/performance_benchmarks.py +653 -0
- crackerjack/services/security.py +163 -0
- crackerjack/services/server_manager.py +234 -0
- crackerjack/services/smart_scheduling.py +144 -0
- crackerjack/services/tool_version_service.py +61 -0
- crackerjack/services/unified_config.py +437 -0
- crackerjack/services/version_checker.py +248 -0
- crackerjack/slash_commands/__init__.py +14 -0
- crackerjack/slash_commands/init.md +122 -0
- crackerjack/slash_commands/run.md +163 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack-0.31.4.dist-info/METADATA +742 -0
- crackerjack-0.31.4.dist-info/RECORD +148 -0
- crackerjack-0.31.4.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -34
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/.pre-commit-config-ai.yaml +0 -149
- crackerjack/.pre-commit-config-fast.yaml +0 -69
- crackerjack/.pre-commit-config.yaml +0 -114
- crackerjack/crackerjack.py +0 -4140
- crackerjack/pyproject.toml +0 -285
- crackerjack-0.29.0.dist-info/METADATA +0 -1289
- crackerjack-0.29.0.dist-info/RECORD +0 -17
- {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/WHEEL +0 -0
- {crackerjack-0.29.0.dist-info → crackerjack-0.31.4.dist-info}/licenses/LICENSE +0 -0
crackerjack/crackerjack.py
DELETED
|
@@ -1,4140 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import json
|
|
3
|
-
import operator
|
|
4
|
-
import re
|
|
5
|
-
import subprocess
|
|
6
|
-
import time
|
|
7
|
-
import typing as t
|
|
8
|
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
9
|
-
from contextlib import suppress
|
|
10
|
-
from dataclasses import dataclass
|
|
11
|
-
from functools import lru_cache
|
|
12
|
-
from pathlib import Path
|
|
13
|
-
from subprocess import CompletedProcess
|
|
14
|
-
from subprocess import run as execute
|
|
15
|
-
from tomllib import loads
|
|
16
|
-
|
|
17
|
-
import aiofiles
|
|
18
|
-
from pydantic import BaseModel
|
|
19
|
-
from rich.console import Console
|
|
20
|
-
from tomli_w import dumps
|
|
21
|
-
|
|
22
|
-
from .errors import ErrorCode, ExecutionError
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
@dataclass
|
|
26
|
-
class HookResult:
|
|
27
|
-
id: str
|
|
28
|
-
name: str
|
|
29
|
-
status: str
|
|
30
|
-
duration: float
|
|
31
|
-
files_processed: int = 0
|
|
32
|
-
issues_found: list[str] | None = None
|
|
33
|
-
stage: str = "pre-commit"
|
|
34
|
-
|
|
35
|
-
def __post_init__(self) -> None:
|
|
36
|
-
if self.issues_found is None:
|
|
37
|
-
self.issues_found = []
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
@dataclass
|
|
41
|
-
class TaskStatus:
|
|
42
|
-
id: str
|
|
43
|
-
name: str
|
|
44
|
-
status: str
|
|
45
|
-
start_time: float | None = None
|
|
46
|
-
end_time: float | None = None
|
|
47
|
-
duration: float | None = None
|
|
48
|
-
details: str | None = None
|
|
49
|
-
error_message: str | None = None
|
|
50
|
-
files_changed: list[str] | None = None
|
|
51
|
-
|
|
52
|
-
def __post_init__(self) -> None:
|
|
53
|
-
if self.files_changed is None:
|
|
54
|
-
self.files_changed = []
|
|
55
|
-
if self.start_time is not None and self.end_time is not None:
|
|
56
|
-
self.duration = self.end_time - self.start_time
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
class SessionTracker(BaseModel, arbitrary_types_allowed=True):
|
|
60
|
-
console: Console
|
|
61
|
-
session_id: str
|
|
62
|
-
start_time: float
|
|
63
|
-
progress_file: Path
|
|
64
|
-
tasks: dict[str, TaskStatus] = {}
|
|
65
|
-
current_task: str | None = None
|
|
66
|
-
metadata: dict[str, t.Any] = {}
|
|
67
|
-
|
|
68
|
-
def __init__(self, **data: t.Any) -> None:
|
|
69
|
-
super().__init__(**data)
|
|
70
|
-
if not self.tasks:
|
|
71
|
-
self.tasks = {}
|
|
72
|
-
if not self.metadata:
|
|
73
|
-
self.metadata = {}
|
|
74
|
-
|
|
75
|
-
def start_task(
|
|
76
|
-
self, task_id: str, task_name: str, details: str | None = None
|
|
77
|
-
) -> None:
|
|
78
|
-
task = TaskStatus(
|
|
79
|
-
id=task_id,
|
|
80
|
-
name=task_name,
|
|
81
|
-
status="in_progress",
|
|
82
|
-
start_time=time.time(),
|
|
83
|
-
details=details,
|
|
84
|
-
)
|
|
85
|
-
self.tasks[task_id] = task
|
|
86
|
-
self.current_task = task_id
|
|
87
|
-
self._update_progress_file()
|
|
88
|
-
self.console.print(f"[yellow]⏳[/yellow] Started: {task_name}")
|
|
89
|
-
|
|
90
|
-
def complete_task(
|
|
91
|
-
self,
|
|
92
|
-
task_id: str,
|
|
93
|
-
details: str | None = None,
|
|
94
|
-
files_changed: list[str] | None = None,
|
|
95
|
-
) -> None:
|
|
96
|
-
if task_id in self.tasks:
|
|
97
|
-
task = self.tasks[task_id]
|
|
98
|
-
task.status = "completed"
|
|
99
|
-
task.end_time = time.time()
|
|
100
|
-
task.duration = task.end_time - (task.start_time or task.end_time)
|
|
101
|
-
if details:
|
|
102
|
-
task.details = details
|
|
103
|
-
if files_changed:
|
|
104
|
-
task.files_changed = files_changed
|
|
105
|
-
self._update_progress_file()
|
|
106
|
-
self.console.print(f"[green]✅[/green] Completed: {task.name}")
|
|
107
|
-
if self.current_task == task_id:
|
|
108
|
-
self.current_task = None
|
|
109
|
-
|
|
110
|
-
def fail_task(
|
|
111
|
-
self,
|
|
112
|
-
task_id: str,
|
|
113
|
-
error_message: str,
|
|
114
|
-
details: str | None = None,
|
|
115
|
-
) -> None:
|
|
116
|
-
if task_id in self.tasks:
|
|
117
|
-
task = self.tasks[task_id]
|
|
118
|
-
task.status = "failed"
|
|
119
|
-
task.end_time = time.time()
|
|
120
|
-
task.duration = task.end_time - (task.start_time or task.end_time)
|
|
121
|
-
task.error_message = error_message
|
|
122
|
-
if details:
|
|
123
|
-
task.details = details
|
|
124
|
-
self._update_progress_file()
|
|
125
|
-
self.console.print(f"[red]❌[/red] Failed: {task.name} - {error_message}")
|
|
126
|
-
if self.current_task == task_id:
|
|
127
|
-
self.current_task = None
|
|
128
|
-
|
|
129
|
-
def skip_task(self, task_id: str, reason: str) -> None:
|
|
130
|
-
if task_id in self.tasks:
|
|
131
|
-
task = self.tasks[task_id]
|
|
132
|
-
task.status = "skipped"
|
|
133
|
-
task.end_time = time.time()
|
|
134
|
-
task.details = f"Skipped: {reason}"
|
|
135
|
-
self._update_progress_file()
|
|
136
|
-
self.console.print(f"[blue]⏩[/blue] Skipped: {task.name} - {reason}")
|
|
137
|
-
if self.current_task == task_id:
|
|
138
|
-
self.current_task = None
|
|
139
|
-
|
|
140
|
-
def _update_progress_file(self) -> None:
|
|
141
|
-
try:
|
|
142
|
-
content = self._generate_markdown_content()
|
|
143
|
-
self.progress_file.write_text(content, encoding="utf-8")
|
|
144
|
-
except OSError as e:
|
|
145
|
-
self.console.print(
|
|
146
|
-
f"[yellow]Warning: Failed to update progress file: {e}[/yellow]"
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
def _generate_header_section(self) -> str:
|
|
150
|
-
from datetime import datetime
|
|
151
|
-
|
|
152
|
-
completed_tasks = sum(
|
|
153
|
-
1 for task in self.tasks.values() if task.status == "completed"
|
|
154
|
-
)
|
|
155
|
-
total_tasks = len(self.tasks)
|
|
156
|
-
overall_status = "In Progress"
|
|
157
|
-
if completed_tasks == total_tasks and total_tasks > 0:
|
|
158
|
-
overall_status = "Completed"
|
|
159
|
-
elif any(task.status == "failed" for task in self.tasks.values()):
|
|
160
|
-
overall_status = "Failed"
|
|
161
|
-
start_datetime = datetime.fromtimestamp(self.start_time)
|
|
162
|
-
|
|
163
|
-
return f"""# Crackerjack Session Progress: {self.session_id}
|
|
164
|
-
**Session ID**: {self.session_id}
|
|
165
|
-
**Started**: {start_datetime.strftime("%Y-%m-%d %H:%M:%S")}
|
|
166
|
-
**Status**: {overall_status}
|
|
167
|
-
**Progress**: {completed_tasks}/{total_tasks} tasks completed
|
|
168
|
-
|
|
169
|
-
- **Working Directory**: {self.metadata.get("working_dir", Path.cwd())}
|
|
170
|
-
- **Python Version**: {self.metadata.get("python_version", "Unknown")}
|
|
171
|
-
- **Crackerjack Version**: {self.metadata.get("crackerjack_version", "Unknown")}
|
|
172
|
-
- **CLI Options**: {self.metadata.get("cli_options", "Unknown")}
|
|
173
|
-
|
|
174
|
-
"""
|
|
175
|
-
|
|
176
|
-
def _generate_task_overview_section(self) -> str:
|
|
177
|
-
content = """## Task Progress Overview
|
|
178
|
-
| Task | Status | Duration | Details |
|
|
179
|
-
|------|--------|----------|---------|
|
|
180
|
-
"""
|
|
181
|
-
|
|
182
|
-
for task in self.tasks.values():
|
|
183
|
-
status_emoji = {
|
|
184
|
-
"pending": "⏸️",
|
|
185
|
-
"in_progress": "⏳",
|
|
186
|
-
"completed": "✅",
|
|
187
|
-
"failed": "❌",
|
|
188
|
-
"skipped": "⏩",
|
|
189
|
-
}.get(task.status, "❓")
|
|
190
|
-
|
|
191
|
-
duration_str = f"{task.duration:.2f}s" if task.duration else "N/A"
|
|
192
|
-
details_str = (
|
|
193
|
-
task.details[:50] + "..."
|
|
194
|
-
if task.details and len(task.details) > 50
|
|
195
|
-
else (task.details or "")
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
content += f"| {task.name} | {status_emoji} {task.status} | {duration_str} | {details_str} |\n"
|
|
199
|
-
|
|
200
|
-
return content + "\n"
|
|
201
|
-
|
|
202
|
-
def _generate_task_details_section(self) -> str:
|
|
203
|
-
content = "## Detailed Task Log\n\n"
|
|
204
|
-
for task in self.tasks.values():
|
|
205
|
-
content += self._format_task_detail(task)
|
|
206
|
-
return content
|
|
207
|
-
|
|
208
|
-
def _format_task_detail(self, task: TaskStatus) -> str:
|
|
209
|
-
from datetime import datetime
|
|
210
|
-
|
|
211
|
-
if task.status == "completed":
|
|
212
|
-
return self._format_completed_task(task, datetime)
|
|
213
|
-
elif task.status == "in_progress":
|
|
214
|
-
return self._format_in_progress_task(task, datetime)
|
|
215
|
-
elif task.status == "failed":
|
|
216
|
-
return self._format_failed_task(task, datetime)
|
|
217
|
-
elif task.status == "skipped":
|
|
218
|
-
return self._format_skipped_task(task)
|
|
219
|
-
return ""
|
|
220
|
-
|
|
221
|
-
def _format_completed_task(self, task: TaskStatus, datetime: t.Any) -> str:
|
|
222
|
-
start_time = (
|
|
223
|
-
datetime.fromtimestamp(task.start_time) if task.start_time else "Unknown"
|
|
224
|
-
)
|
|
225
|
-
end_time = datetime.fromtimestamp(task.end_time) if task.end_time else "Unknown"
|
|
226
|
-
files_list = ", ".join(task.files_changed) if task.files_changed else "None"
|
|
227
|
-
return f"""### ✅ {task.name} - COMPLETED
|
|
228
|
-
- **Started**: {start_time}
|
|
229
|
-
- **Completed**: {end_time}
|
|
230
|
-
- **Duration**: {task.duration:.2f}s
|
|
231
|
-
- **Files Changed**: {files_list}
|
|
232
|
-
- **Details**: {task.details or "N/A"}
|
|
233
|
-
|
|
234
|
-
"""
|
|
235
|
-
|
|
236
|
-
def _format_in_progress_task(self, task: TaskStatus, datetime: t.Any) -> str:
|
|
237
|
-
start_time = (
|
|
238
|
-
datetime.fromtimestamp(task.start_time) if task.start_time else "Unknown"
|
|
239
|
-
)
|
|
240
|
-
return f"""### ⏳ {task.name} - IN PROGRESS
|
|
241
|
-
- **Started**: {start_time}
|
|
242
|
-
- **Current Status**: {task.details or "Processing..."}
|
|
243
|
-
|
|
244
|
-
"""
|
|
245
|
-
|
|
246
|
-
def _format_failed_task(self, task: TaskStatus, datetime: t.Any) -> str:
|
|
247
|
-
start_time = (
|
|
248
|
-
datetime.fromtimestamp(task.start_time) if task.start_time else "Unknown"
|
|
249
|
-
)
|
|
250
|
-
fail_time = (
|
|
251
|
-
datetime.fromtimestamp(task.end_time) if task.end_time else "Unknown"
|
|
252
|
-
)
|
|
253
|
-
return f"""### ❌ {task.name} - FAILED
|
|
254
|
-
- **Started**: {start_time}
|
|
255
|
-
- **Failed**: {fail_time}
|
|
256
|
-
- **Error**: {task.error_message or "Unknown error"}
|
|
257
|
-
- **Recovery Suggestions**: Check error details and retry the failed operation
|
|
258
|
-
|
|
259
|
-
"""
|
|
260
|
-
|
|
261
|
-
def _format_skipped_task(self, task: TaskStatus) -> str:
|
|
262
|
-
return f"""### ⏩ {task.name} - SKIPPED
|
|
263
|
-
- **Reason**: {task.details or "No reason provided"}
|
|
264
|
-
|
|
265
|
-
"""
|
|
266
|
-
|
|
267
|
-
def _generate_footer_section(self) -> str:
|
|
268
|
-
content = f"""## Session Recovery Information
|
|
269
|
-
If this session was interrupted, you can resume from where you left off:
|
|
270
|
-
|
|
271
|
-
```bash
|
|
272
|
-
python -m crackerjack --resume-from {self.progress_file.name}
|
|
273
|
-
```
|
|
274
|
-
|
|
275
|
-
"""
|
|
276
|
-
|
|
277
|
-
all_files: set[str] = set()
|
|
278
|
-
for task in self.tasks.values():
|
|
279
|
-
if task.files_changed:
|
|
280
|
-
all_files.update(task.files_changed)
|
|
281
|
-
|
|
282
|
-
if all_files:
|
|
283
|
-
for file_path in sorted(all_files):
|
|
284
|
-
content += f"- {file_path}\n"
|
|
285
|
-
else:
|
|
286
|
-
content += "- No files modified yet\n"
|
|
287
|
-
|
|
288
|
-
content += "\n## Next Steps\n\n"
|
|
289
|
-
|
|
290
|
-
pending_tasks = [
|
|
291
|
-
task for task in self.tasks.values() if task.status == "pending"
|
|
292
|
-
]
|
|
293
|
-
in_progress_tasks = [
|
|
294
|
-
task for task in self.tasks.values() if task.status == "in_progress"
|
|
295
|
-
]
|
|
296
|
-
failed_tasks = [task for task in self.tasks.values() if task.status == "failed"]
|
|
297
|
-
|
|
298
|
-
if failed_tasks:
|
|
299
|
-
content += "⚠️ Address failed tasks:\n"
|
|
300
|
-
for task in failed_tasks:
|
|
301
|
-
content += f"- Fix {task.name}: {task.error_message}\n"
|
|
302
|
-
elif in_progress_tasks:
|
|
303
|
-
content += "🔄 Currently working on:\n"
|
|
304
|
-
for task in in_progress_tasks:
|
|
305
|
-
content += f"- {task.name}\n"
|
|
306
|
-
elif pending_tasks:
|
|
307
|
-
content += "📋 Next tasks to complete:\n"
|
|
308
|
-
for task in pending_tasks:
|
|
309
|
-
content += f"- {task.name}\n"
|
|
310
|
-
else:
|
|
311
|
-
content += "🎉 All tasks completed successfully!\n"
|
|
312
|
-
|
|
313
|
-
return content
|
|
314
|
-
|
|
315
|
-
def _generate_markdown_content(self) -> str:
|
|
316
|
-
return (
|
|
317
|
-
self._generate_header_section()
|
|
318
|
-
+ self._generate_task_overview_section()
|
|
319
|
-
+ self._generate_task_details_section()
|
|
320
|
-
+ self._generate_footer_section()
|
|
321
|
-
)
|
|
322
|
-
|
|
323
|
-
@classmethod
|
|
324
|
-
def create_session(
|
|
325
|
-
cls,
|
|
326
|
-
console: Console,
|
|
327
|
-
session_id: str | None = None,
|
|
328
|
-
progress_file: Path | None = None,
|
|
329
|
-
metadata: dict[str, t.Any] | None = None,
|
|
330
|
-
) -> "SessionTracker":
|
|
331
|
-
import uuid
|
|
332
|
-
|
|
333
|
-
if session_id is None:
|
|
334
|
-
session_id = str(uuid.uuid4())[:8]
|
|
335
|
-
|
|
336
|
-
if progress_file is None:
|
|
337
|
-
timestamp = time.strftime("%Y%m%d-%H%M%S")
|
|
338
|
-
progress_file = Path(f"SESSION-PROGRESS-{timestamp}.md")
|
|
339
|
-
|
|
340
|
-
tracker = cls(
|
|
341
|
-
console=console,
|
|
342
|
-
session_id=session_id,
|
|
343
|
-
start_time=time.time(),
|
|
344
|
-
progress_file=progress_file,
|
|
345
|
-
metadata=metadata or {},
|
|
346
|
-
)
|
|
347
|
-
|
|
348
|
-
tracker._update_progress_file()
|
|
349
|
-
console.print(f"[green]📋[/green] Session tracking started: {progress_file}")
|
|
350
|
-
return tracker
|
|
351
|
-
|
|
352
|
-
@classmethod
|
|
353
|
-
def find_recent_progress_files(cls, directory: Path = Path.cwd()) -> list[Path]:
|
|
354
|
-
progress_files: list[Path] = []
|
|
355
|
-
for file_path in directory.glob("SESSION-PROGRESS-*.md"):
|
|
356
|
-
try:
|
|
357
|
-
if file_path.is_file():
|
|
358
|
-
progress_files.append(file_path)
|
|
359
|
-
except (OSError, PermissionError):
|
|
360
|
-
continue
|
|
361
|
-
|
|
362
|
-
return sorted(progress_files, key=lambda p: p.stat().st_mtime, reverse=True)
|
|
363
|
-
|
|
364
|
-
@classmethod
|
|
365
|
-
def is_session_incomplete(cls, progress_file: Path) -> bool:
|
|
366
|
-
if not progress_file.exists():
|
|
367
|
-
return False
|
|
368
|
-
try:
|
|
369
|
-
content = progress_file.read_text(encoding="utf-8")
|
|
370
|
-
has_in_progress = "⏳" in content or "in_progress" in content
|
|
371
|
-
has_failed = "❌" in content or "failed" in content
|
|
372
|
-
has_pending = "⏸️" in content or "pending" in content
|
|
373
|
-
stat = progress_file.stat()
|
|
374
|
-
age_hours = (time.time() - stat.st_mtime) / 3600
|
|
375
|
-
is_recent = age_hours < 24
|
|
376
|
-
|
|
377
|
-
return (has_in_progress or has_failed or has_pending) and is_recent
|
|
378
|
-
except (OSError, UnicodeDecodeError):
|
|
379
|
-
return False
|
|
380
|
-
|
|
381
|
-
@classmethod
|
|
382
|
-
def find_incomplete_session(cls, directory: Path = Path.cwd()) -> Path | None:
|
|
383
|
-
recent_files = cls.find_recent_progress_files(directory)
|
|
384
|
-
for progress_file in recent_files:
|
|
385
|
-
if cls.is_session_incomplete(progress_file):
|
|
386
|
-
return progress_file
|
|
387
|
-
|
|
388
|
-
return None
|
|
389
|
-
|
|
390
|
-
@classmethod
|
|
391
|
-
def auto_detect_session(
|
|
392
|
-
cls, console: Console, directory: Path = Path.cwd()
|
|
393
|
-
) -> "SessionTracker | None":
|
|
394
|
-
incomplete_session = cls.find_incomplete_session(directory)
|
|
395
|
-
if incomplete_session:
|
|
396
|
-
return cls._handle_incomplete_session(console, incomplete_session)
|
|
397
|
-
return None
|
|
398
|
-
|
|
399
|
-
@classmethod
|
|
400
|
-
def _handle_incomplete_session(
|
|
401
|
-
cls, console: Console, incomplete_session: Path
|
|
402
|
-
) -> "SessionTracker | None":
|
|
403
|
-
console.print(
|
|
404
|
-
f"[yellow]📋[/yellow] Found incomplete session: {incomplete_session.name}"
|
|
405
|
-
)
|
|
406
|
-
try:
|
|
407
|
-
content = incomplete_session.read_text(encoding="utf-8")
|
|
408
|
-
session_info = cls._parse_session_info(content)
|
|
409
|
-
cls._display_session_info(console, session_info)
|
|
410
|
-
return cls._prompt_resume_session(console, incomplete_session)
|
|
411
|
-
except Exception as e:
|
|
412
|
-
console.print(f"[yellow]⚠️[/yellow] Could not parse session file: {e}")
|
|
413
|
-
return None
|
|
414
|
-
|
|
415
|
-
@classmethod
|
|
416
|
-
def _parse_session_info(cls, content: str) -> dict[str, str | list[str] | None]:
|
|
417
|
-
import re
|
|
418
|
-
|
|
419
|
-
session_match = re.search(r"Session ID\*\*:\s*(.+)", content)
|
|
420
|
-
session_id: str = session_match.group(1).strip() if session_match else "unknown"
|
|
421
|
-
progress_match = re.search(r"Progress\*\*:\s*(\d+)/(\d+)", content)
|
|
422
|
-
progress_info: str | None = None
|
|
423
|
-
if progress_match:
|
|
424
|
-
completed = progress_match.group(1)
|
|
425
|
-
total = progress_match.group(2)
|
|
426
|
-
progress_info = f"{completed}/{total} tasks completed"
|
|
427
|
-
failed_tasks: list[str] = []
|
|
428
|
-
for line in content.split("\n"):
|
|
429
|
-
if "❌" in line and "- FAILED" in line:
|
|
430
|
-
task_match = re.search(r"### ❌ (.+?) - FAILED", line)
|
|
431
|
-
if task_match:
|
|
432
|
-
task_name: str = task_match.group(1)
|
|
433
|
-
failed_tasks.append(task_name)
|
|
434
|
-
|
|
435
|
-
return {
|
|
436
|
-
"session_id": session_id,
|
|
437
|
-
"progress_info": progress_info,
|
|
438
|
-
"failed_tasks": failed_tasks,
|
|
439
|
-
}
|
|
440
|
-
|
|
441
|
-
@classmethod
|
|
442
|
-
def _display_session_info(
|
|
443
|
-
cls, console: Console, session_info: dict[str, str | list[str] | None]
|
|
444
|
-
) -> None:
|
|
445
|
-
console.print(f"[cyan] Session ID:[/cyan] {session_info['session_id']}")
|
|
446
|
-
if session_info["progress_info"]:
|
|
447
|
-
console.print(f"[cyan] Progress:[/cyan] {session_info['progress_info']}")
|
|
448
|
-
if session_info["failed_tasks"]:
|
|
449
|
-
console.print(
|
|
450
|
-
f"[red] Failed tasks:[/red] {', '.join(session_info['failed_tasks'])}"
|
|
451
|
-
)
|
|
452
|
-
|
|
453
|
-
@classmethod
|
|
454
|
-
def _prompt_resume_session(
|
|
455
|
-
cls, console: Console, incomplete_session: Path
|
|
456
|
-
) -> "SessionTracker | None":
|
|
457
|
-
try:
|
|
458
|
-
import sys
|
|
459
|
-
|
|
460
|
-
console.print("[yellow]❓[/yellow] Resume this session? [y/N]: ", end="")
|
|
461
|
-
sys.stdout.flush()
|
|
462
|
-
response = input().strip().lower()
|
|
463
|
-
if response in ("y", "yes"):
|
|
464
|
-
return cls.resume_session(console, incomplete_session)
|
|
465
|
-
else:
|
|
466
|
-
console.print("[blue]ℹ️[/blue] Starting new session instead")
|
|
467
|
-
return None
|
|
468
|
-
except (KeyboardInterrupt, EOFError):
|
|
469
|
-
console.print("\n[blue]ℹ️[/blue] Starting new session instead")
|
|
470
|
-
return None
|
|
471
|
-
|
|
472
|
-
@classmethod
|
|
473
|
-
def resume_session(cls, console: Console, progress_file: Path) -> "SessionTracker":
|
|
474
|
-
if not progress_file.exists():
|
|
475
|
-
raise FileNotFoundError(f"Progress file not found: {progress_file}")
|
|
476
|
-
try:
|
|
477
|
-
content = progress_file.read_text(encoding="utf-8")
|
|
478
|
-
session_id = "resumed"
|
|
479
|
-
import re
|
|
480
|
-
|
|
481
|
-
session_match = re.search(r"Session ID\*\*:\s*(.+)", content)
|
|
482
|
-
if session_match:
|
|
483
|
-
session_id = session_match.group(1).strip()
|
|
484
|
-
tracker = cls(
|
|
485
|
-
console=console,
|
|
486
|
-
session_id=session_id,
|
|
487
|
-
start_time=time.time(),
|
|
488
|
-
progress_file=progress_file,
|
|
489
|
-
metadata={},
|
|
490
|
-
)
|
|
491
|
-
console.print(f"[green]🔄[/green] Resumed session from: {progress_file}")
|
|
492
|
-
return tracker
|
|
493
|
-
except Exception as e:
|
|
494
|
-
raise RuntimeError(f"Failed to resume session: {e}") from e
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
config_files = (
|
|
498
|
-
".gitignore",
|
|
499
|
-
".pre-commit-config.yaml",
|
|
500
|
-
".pre-commit-config-ai.yaml",
|
|
501
|
-
".pre-commit-config-fast.yaml",
|
|
502
|
-
".libcst.codemod.yaml",
|
|
503
|
-
)
|
|
504
|
-
|
|
505
|
-
documentation_files = (
|
|
506
|
-
"CLAUDE.md",
|
|
507
|
-
"RULES.md",
|
|
508
|
-
)
|
|
509
|
-
default_python_version = "3.13"
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
@t.runtime_checkable
|
|
513
|
-
class CommandRunner(t.Protocol):
|
|
514
|
-
def execute_command(
|
|
515
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
516
|
-
) -> subprocess.CompletedProcess[str]: ...
|
|
517
|
-
|
|
518
|
-
|
|
519
|
-
@t.runtime_checkable
|
|
520
|
-
class OptionsProtocol(t.Protocol):
|
|
521
|
-
commit: bool
|
|
522
|
-
interactive: bool
|
|
523
|
-
no_config_updates: bool
|
|
524
|
-
verbose: bool
|
|
525
|
-
update_precommit: bool
|
|
526
|
-
update_docs: bool
|
|
527
|
-
force_update_docs: bool
|
|
528
|
-
compress_docs: bool
|
|
529
|
-
clean: bool
|
|
530
|
-
test: bool
|
|
531
|
-
benchmark: bool
|
|
532
|
-
benchmark_regression: bool
|
|
533
|
-
benchmark_regression_threshold: float
|
|
534
|
-
test_workers: int = 0
|
|
535
|
-
test_timeout: int = 0
|
|
536
|
-
publish: t.Any | None
|
|
537
|
-
bump: t.Any | None
|
|
538
|
-
all: t.Any | None
|
|
539
|
-
ai_agent: bool = False
|
|
540
|
-
create_pr: bool = False
|
|
541
|
-
skip_hooks: bool = False
|
|
542
|
-
comprehensive: bool = False
|
|
543
|
-
async_mode: bool = False
|
|
544
|
-
track_progress: bool = False
|
|
545
|
-
resume_from: str | None = None
|
|
546
|
-
progress_file: str | None = None
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
class CodeCleaner(BaseModel, arbitrary_types_allowed=True):
|
|
550
|
-
console: Console
|
|
551
|
-
|
|
552
|
-
def _analyze_workload_characteristics(self, files: list[Path]) -> dict[str, t.Any]:
|
|
553
|
-
if not files:
|
|
554
|
-
return {
|
|
555
|
-
"total_files": 0,
|
|
556
|
-
"total_size": 0,
|
|
557
|
-
"avg_file_size": 0,
|
|
558
|
-
"complexity": "low",
|
|
559
|
-
}
|
|
560
|
-
total_size = 0
|
|
561
|
-
large_files = 0
|
|
562
|
-
for file_path in files:
|
|
563
|
-
try:
|
|
564
|
-
size = file_path.stat().st_size
|
|
565
|
-
total_size += size
|
|
566
|
-
if size > 50_000:
|
|
567
|
-
large_files += 1
|
|
568
|
-
except (OSError, PermissionError):
|
|
569
|
-
continue
|
|
570
|
-
avg_file_size = total_size / len(files) if files else 0
|
|
571
|
-
large_file_ratio = large_files / len(files) if files else 0
|
|
572
|
-
if len(files) > 100 or avg_file_size > 20_000 or large_file_ratio > 0.3:
|
|
573
|
-
complexity = "high"
|
|
574
|
-
elif len(files) > 50 or avg_file_size > 10_000 or large_file_ratio > 0.1:
|
|
575
|
-
complexity = "medium"
|
|
576
|
-
else:
|
|
577
|
-
complexity = "low"
|
|
578
|
-
|
|
579
|
-
return {
|
|
580
|
-
"total_files": len(files),
|
|
581
|
-
"total_size": total_size,
|
|
582
|
-
"avg_file_size": avg_file_size,
|
|
583
|
-
"large_files": large_files,
|
|
584
|
-
"large_file_ratio": large_file_ratio,
|
|
585
|
-
"complexity": complexity,
|
|
586
|
-
}
|
|
587
|
-
|
|
588
|
-
def _calculate_optimal_workers(self, workload: dict[str, t.Any]) -> int:
|
|
589
|
-
import os
|
|
590
|
-
|
|
591
|
-
cpu_count = os.cpu_count() or 4
|
|
592
|
-
if workload["complexity"] == "high":
|
|
593
|
-
max_workers = min(cpu_count // 2, 3)
|
|
594
|
-
elif workload["complexity"] == "medium":
|
|
595
|
-
max_workers = min(cpu_count, 6)
|
|
596
|
-
else:
|
|
597
|
-
max_workers = min(cpu_count + 2, 8)
|
|
598
|
-
|
|
599
|
-
return min(max_workers, workload["total_files"])
|
|
600
|
-
|
|
601
|
-
def clean_files(self, pkg_dir: Path | None) -> None:
|
|
602
|
-
if pkg_dir is None:
|
|
603
|
-
return
|
|
604
|
-
python_files = [
|
|
605
|
-
file_path
|
|
606
|
-
for file_path in pkg_dir.rglob("*.py")
|
|
607
|
-
if not str(file_path.parent).startswith("__")
|
|
608
|
-
]
|
|
609
|
-
if not python_files:
|
|
610
|
-
return
|
|
611
|
-
workload = self._analyze_workload_characteristics(python_files)
|
|
612
|
-
max_workers = self._calculate_optimal_workers(workload)
|
|
613
|
-
if len(python_files) > 10:
|
|
614
|
-
self.console.print(
|
|
615
|
-
f"[dim]Cleaning {workload['total_files']} files "
|
|
616
|
-
f"({workload['complexity']} complexity) with {max_workers} workers[/dim]"
|
|
617
|
-
)
|
|
618
|
-
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
|
619
|
-
future_to_file = {
|
|
620
|
-
executor.submit(self.clean_file, file_path): file_path
|
|
621
|
-
for file_path in python_files
|
|
622
|
-
}
|
|
623
|
-
for future in as_completed(future_to_file):
|
|
624
|
-
file_path = future_to_file[future]
|
|
625
|
-
try:
|
|
626
|
-
future.result()
|
|
627
|
-
except Exception as e:
|
|
628
|
-
self.console.print(
|
|
629
|
-
f"[bold bright_red]❌ Error cleaning {file_path}: {e}[/bold bright_red]"
|
|
630
|
-
)
|
|
631
|
-
self._cleanup_cache_directories(pkg_dir)
|
|
632
|
-
|
|
633
|
-
def _cleanup_cache_directories(self, pkg_dir: Path) -> None:
|
|
634
|
-
with suppress(PermissionError, OSError):
|
|
635
|
-
pycache_dir = pkg_dir / "__pycache__"
|
|
636
|
-
if pycache_dir.exists():
|
|
637
|
-
for cache_file in pycache_dir.iterdir():
|
|
638
|
-
with suppress(PermissionError, OSError):
|
|
639
|
-
cache_file.unlink()
|
|
640
|
-
pycache_dir.rmdir()
|
|
641
|
-
parent_pycache = pkg_dir.parent / "__pycache__"
|
|
642
|
-
if parent_pycache.exists():
|
|
643
|
-
for cache_file in parent_pycache.iterdir():
|
|
644
|
-
with suppress(PermissionError, OSError):
|
|
645
|
-
cache_file.unlink()
|
|
646
|
-
parent_pycache.rmdir()
|
|
647
|
-
|
|
648
|
-
def clean_file(self, file_path: Path) -> None:
|
|
649
|
-
from crackerjack.errors import ExecutionError, handle_error
|
|
650
|
-
|
|
651
|
-
try:
|
|
652
|
-
code = file_path.read_text(encoding="utf-8")
|
|
653
|
-
original_code = code
|
|
654
|
-
cleaning_failed = False
|
|
655
|
-
try:
|
|
656
|
-
code = self.remove_line_comments_streaming(code)
|
|
657
|
-
except Exception as e:
|
|
658
|
-
self.console.print(
|
|
659
|
-
f"[bold bright_yellow]⚠️ Warning: Failed to remove line comments from {file_path}: {e}[/bold bright_yellow]"
|
|
660
|
-
)
|
|
661
|
-
code = original_code
|
|
662
|
-
cleaning_failed = True
|
|
663
|
-
try:
|
|
664
|
-
code = self.remove_docstrings_streaming(code)
|
|
665
|
-
except Exception as e:
|
|
666
|
-
self.console.print(
|
|
667
|
-
f"[bold bright_yellow]⚠️ Warning: Failed to remove docstrings from {file_path}: {e}[/bold bright_yellow]"
|
|
668
|
-
)
|
|
669
|
-
code = original_code
|
|
670
|
-
cleaning_failed = True
|
|
671
|
-
try:
|
|
672
|
-
code = self.remove_extra_whitespace_streaming(code)
|
|
673
|
-
except Exception as e:
|
|
674
|
-
self.console.print(
|
|
675
|
-
f"[bold bright_yellow]⚠️ Warning: Failed to remove extra whitespace from {file_path}: {e}[/bold bright_yellow]"
|
|
676
|
-
)
|
|
677
|
-
code = original_code
|
|
678
|
-
cleaning_failed = True
|
|
679
|
-
try:
|
|
680
|
-
code = self.reformat_code(code)
|
|
681
|
-
except Exception as e:
|
|
682
|
-
self.console.print(
|
|
683
|
-
f"[bold bright_yellow]⚠️ Warning: Failed to reformat {file_path}: {e}[/bold bright_yellow]"
|
|
684
|
-
)
|
|
685
|
-
code = original_code
|
|
686
|
-
cleaning_failed = True
|
|
687
|
-
file_path.write_text(code, encoding="utf-8")
|
|
688
|
-
if cleaning_failed:
|
|
689
|
-
self.console.print(
|
|
690
|
-
f"[bold yellow]⚡ Partially cleaned:[/bold yellow] [dim bright_white]{file_path}[/dim bright_white]"
|
|
691
|
-
)
|
|
692
|
-
else:
|
|
693
|
-
self.console.print(
|
|
694
|
-
f"[bold green]✨ Cleaned:[/bold green] [dim bright_white]{file_path}[/dim bright_white]"
|
|
695
|
-
)
|
|
696
|
-
except PermissionError as e:
|
|
697
|
-
self.console.print(
|
|
698
|
-
f"[red]Failed to clean: {file_path} (Permission denied)[/red]"
|
|
699
|
-
)
|
|
700
|
-
handle_error(
|
|
701
|
-
ExecutionError(
|
|
702
|
-
message=f"Permission denied while cleaning {file_path}",
|
|
703
|
-
error_code=ErrorCode.PERMISSION_ERROR,
|
|
704
|
-
details=str(e),
|
|
705
|
-
recovery=f"Check file permissions for {file_path} and ensure you have write access",
|
|
706
|
-
),
|
|
707
|
-
console=self.console,
|
|
708
|
-
exit_on_error=False,
|
|
709
|
-
)
|
|
710
|
-
except OSError as e:
|
|
711
|
-
self.console.print(
|
|
712
|
-
f"[red]Failed to clean: {file_path} (File system error)[/red]"
|
|
713
|
-
)
|
|
714
|
-
handle_error(
|
|
715
|
-
ExecutionError(
|
|
716
|
-
message=f"File system error while cleaning {file_path}",
|
|
717
|
-
error_code=ErrorCode.FILE_WRITE_ERROR,
|
|
718
|
-
details=str(e),
|
|
719
|
-
recovery=f"Check that {file_path} exists and is not being used by another process",
|
|
720
|
-
),
|
|
721
|
-
console=self.console,
|
|
722
|
-
exit_on_error=False,
|
|
723
|
-
)
|
|
724
|
-
except UnicodeDecodeError as e:
|
|
725
|
-
self.console.print(
|
|
726
|
-
f"[red]Failed to clean: {file_path} (Encoding error)[/red]"
|
|
727
|
-
)
|
|
728
|
-
handle_error(
|
|
729
|
-
ExecutionError(
|
|
730
|
-
message=f"Encoding error while reading {file_path}",
|
|
731
|
-
error_code=ErrorCode.FILE_READ_ERROR,
|
|
732
|
-
details=str(e),
|
|
733
|
-
recovery=f"File {file_path} contains non-UTF-8 characters. Please check the file encoding.",
|
|
734
|
-
),
|
|
735
|
-
console=self.console,
|
|
736
|
-
exit_on_error=False,
|
|
737
|
-
)
|
|
738
|
-
except Exception as e:
|
|
739
|
-
self.console.print(
|
|
740
|
-
f"[red]Failed to clean: {file_path} (Unexpected error)[/red]"
|
|
741
|
-
)
|
|
742
|
-
handle_error(
|
|
743
|
-
ExecutionError(
|
|
744
|
-
message=f"Unexpected error while cleaning {file_path}",
|
|
745
|
-
error_code=ErrorCode.UNEXPECTED_ERROR,
|
|
746
|
-
details=str(e),
|
|
747
|
-
recovery="This is an unexpected error. Please report this issue with the file content if possible.",
|
|
748
|
-
),
|
|
749
|
-
console=self.console,
|
|
750
|
-
exit_on_error=False,
|
|
751
|
-
)
|
|
752
|
-
|
|
753
|
-
def _initialize_docstring_state(self) -> dict[str, t.Any]:
|
|
754
|
-
return {
|
|
755
|
-
"in_docstring": False,
|
|
756
|
-
"delimiter": None,
|
|
757
|
-
"waiting": False,
|
|
758
|
-
"function_indent": 0,
|
|
759
|
-
"removed_docstring": False,
|
|
760
|
-
"in_multiline_def": False,
|
|
761
|
-
}
|
|
762
|
-
|
|
763
|
-
def _handle_function_definition(
|
|
764
|
-
self, line: str, stripped: str, state: dict[str, t.Any]
|
|
765
|
-
) -> bool:
|
|
766
|
-
if self._is_function_or_class_definition(stripped):
|
|
767
|
-
state["waiting"] = True
|
|
768
|
-
state["function_indent"] = len(line) - len(line.lstrip())
|
|
769
|
-
state["removed_docstring"] = False
|
|
770
|
-
state["in_multiline_def"] = not stripped.endswith(":")
|
|
771
|
-
return True
|
|
772
|
-
return False
|
|
773
|
-
|
|
774
|
-
def _handle_multiline_definition(
|
|
775
|
-
self, line: str, stripped: str, state: dict[str, t.Any]
|
|
776
|
-
) -> bool:
|
|
777
|
-
if state["in_multiline_def"]:
|
|
778
|
-
if stripped.endswith(":"):
|
|
779
|
-
state["in_multiline_def"] = False
|
|
780
|
-
return True
|
|
781
|
-
return False
|
|
782
|
-
|
|
783
|
-
def _handle_waiting_docstring(
|
|
784
|
-
self, lines: list[str], i: int, stripped: str, state: dict[str, t.Any]
|
|
785
|
-
) -> tuple[bool, str | None]:
|
|
786
|
-
if state["waiting"] and stripped:
|
|
787
|
-
if self._handle_docstring_start(stripped, state):
|
|
788
|
-
pass_line = None
|
|
789
|
-
if not state["in_docstring"]:
|
|
790
|
-
function_indent: int = state["function_indent"]
|
|
791
|
-
if self._needs_pass_statement(lines, i + 1, function_indent):
|
|
792
|
-
pass_line = " " * (function_indent + 4) + "pass"
|
|
793
|
-
state["removed_docstring"] = True
|
|
794
|
-
return True, pass_line
|
|
795
|
-
else:
|
|
796
|
-
state["waiting"] = False
|
|
797
|
-
return False, None
|
|
798
|
-
|
|
799
|
-
def _handle_docstring_content(
|
|
800
|
-
self, lines: list[str], i: int, stripped: str, state: dict[str, t.Any]
|
|
801
|
-
) -> tuple[bool, str | None]:
|
|
802
|
-
if state["in_docstring"]:
|
|
803
|
-
if self._handle_docstring_end(stripped, state):
|
|
804
|
-
pass_line = None
|
|
805
|
-
function_indent: int = state["function_indent"]
|
|
806
|
-
if self._needs_pass_statement(lines, i + 1, function_indent):
|
|
807
|
-
pass_line = " " * (function_indent + 4) + "pass"
|
|
808
|
-
state["removed_docstring"] = False
|
|
809
|
-
return True, pass_line
|
|
810
|
-
else:
|
|
811
|
-
return True, None
|
|
812
|
-
return False, None
|
|
813
|
-
|
|
814
|
-
def _process_line(
|
|
815
|
-
self, lines: list[str], i: int, line: str, state: dict[str, t.Any]
|
|
816
|
-
) -> tuple[bool, str | None]:
|
|
817
|
-
stripped = line.strip()
|
|
818
|
-
if self._handle_function_definition(line, stripped, state):
|
|
819
|
-
return True, line
|
|
820
|
-
if self._handle_multiline_definition(line, stripped, state):
|
|
821
|
-
return True, line
|
|
822
|
-
handled, pass_line = self._handle_waiting_docstring(lines, i, stripped, state)
|
|
823
|
-
if handled:
|
|
824
|
-
return True, pass_line
|
|
825
|
-
handled, pass_line = self._handle_docstring_content(lines, i, stripped, state)
|
|
826
|
-
if handled:
|
|
827
|
-
return True, pass_line
|
|
828
|
-
if state["removed_docstring"] and stripped:
|
|
829
|
-
state["removed_docstring"] = False
|
|
830
|
-
return False, line
|
|
831
|
-
|
|
832
|
-
def remove_docstrings(self, code: str) -> str:
|
|
833
|
-
lines = code.split("\n")
|
|
834
|
-
cleaned_lines: list[str] = []
|
|
835
|
-
docstring_state = self._initialize_docstring_state()
|
|
836
|
-
for i, line in enumerate(lines):
|
|
837
|
-
handled, result_line = self._process_line(lines, i, line, docstring_state)
|
|
838
|
-
if handled:
|
|
839
|
-
if result_line is not None:
|
|
840
|
-
cleaned_lines.append(result_line)
|
|
841
|
-
else:
|
|
842
|
-
cleaned_lines.append(line)
|
|
843
|
-
return "\n".join(cleaned_lines)
|
|
844
|
-
|
|
845
|
-
def _is_function_or_class_definition(self, stripped_line: str) -> bool:
|
|
846
|
-
return stripped_line.startswith(("def ", "class ", "async def "))
|
|
847
|
-
|
|
848
|
-
def _handle_docstring_start(self, stripped: str, state: dict[str, t.Any]) -> bool:
|
|
849
|
-
if not stripped.startswith(('"""', "'''", '"', "'")):
|
|
850
|
-
return False
|
|
851
|
-
if stripped.startswith(('"""', "'''")):
|
|
852
|
-
delimiter = stripped[:3]
|
|
853
|
-
else:
|
|
854
|
-
delimiter = stripped[0]
|
|
855
|
-
state["delimiter"] = delimiter
|
|
856
|
-
if self._is_single_line_docstring(stripped, delimiter):
|
|
857
|
-
state["waiting"] = False
|
|
858
|
-
return True
|
|
859
|
-
else:
|
|
860
|
-
state["in_docstring"] = True
|
|
861
|
-
state["waiting"] = False
|
|
862
|
-
return True
|
|
863
|
-
|
|
864
|
-
def _is_single_line_docstring(self, stripped: str, delimiter: str) -> bool:
|
|
865
|
-
return stripped.endswith(delimiter) and len(stripped) > len(delimiter)
|
|
866
|
-
|
|
867
|
-
def _handle_docstring_end(self, stripped: str, state: dict[str, t.Any]) -> bool:
|
|
868
|
-
if state["delimiter"] and stripped.endswith(state["delimiter"]):
|
|
869
|
-
state["in_docstring"] = False
|
|
870
|
-
state["delimiter"] = None
|
|
871
|
-
return True
|
|
872
|
-
return False
|
|
873
|
-
|
|
874
|
-
def _needs_pass_statement(
|
|
875
|
-
self, lines: list[str], start_index: int, function_indent: int
|
|
876
|
-
) -> bool:
|
|
877
|
-
for i in range(start_index, len(lines)):
|
|
878
|
-
line = lines[i]
|
|
879
|
-
stripped = line.strip()
|
|
880
|
-
if not stripped:
|
|
881
|
-
continue
|
|
882
|
-
line_indent = len(line) - len(line.lstrip())
|
|
883
|
-
if line_indent <= function_indent:
|
|
884
|
-
return True
|
|
885
|
-
if line_indent > function_indent:
|
|
886
|
-
return False
|
|
887
|
-
return True
|
|
888
|
-
|
|
889
|
-
def remove_line_comments(self, code: str) -> str:
|
|
890
|
-
lines = code.split("\n")
|
|
891
|
-
cleaned_lines: list[str] = []
|
|
892
|
-
for line in lines:
|
|
893
|
-
if not line.strip():
|
|
894
|
-
cleaned_lines.append(line)
|
|
895
|
-
continue
|
|
896
|
-
cleaned_line = self._process_line_for_comments(line)
|
|
897
|
-
if cleaned_line or not line.strip():
|
|
898
|
-
cleaned_lines.append(cleaned_line or line)
|
|
899
|
-
return "\n".join(cleaned_lines)
|
|
900
|
-
|
|
901
|
-
def _process_line_for_comments(self, line: str) -> str:
|
|
902
|
-
result: list[str] = []
|
|
903
|
-
string_state = {"in_string": None}
|
|
904
|
-
for i, char in enumerate(line):
|
|
905
|
-
if self._handle_string_character(char, i, line, string_state, result):
|
|
906
|
-
continue
|
|
907
|
-
elif self._handle_comment_character(char, i, line, string_state, result):
|
|
908
|
-
break
|
|
909
|
-
else:
|
|
910
|
-
result.append(char)
|
|
911
|
-
return "".join(result).rstrip()
|
|
912
|
-
|
|
913
|
-
def _handle_string_character(
|
|
914
|
-
self,
|
|
915
|
-
char: str,
|
|
916
|
-
index: int,
|
|
917
|
-
line: str,
|
|
918
|
-
string_state: dict[str, t.Any],
|
|
919
|
-
result: list[str],
|
|
920
|
-
) -> bool:
|
|
921
|
-
if char not in ("'", '"'):
|
|
922
|
-
return False
|
|
923
|
-
if index > 0 and line[index - 1] == "\\":
|
|
924
|
-
return False
|
|
925
|
-
if string_state["in_string"] is None:
|
|
926
|
-
string_state["in_string"] = char
|
|
927
|
-
elif string_state["in_string"] == char:
|
|
928
|
-
string_state["in_string"] = None
|
|
929
|
-
result.append(char)
|
|
930
|
-
return True
|
|
931
|
-
|
|
932
|
-
def _handle_comment_character(
|
|
933
|
-
self,
|
|
934
|
-
char: str,
|
|
935
|
-
index: int,
|
|
936
|
-
line: str,
|
|
937
|
-
string_state: dict[str, t.Any],
|
|
938
|
-
result: list[str],
|
|
939
|
-
) -> bool:
|
|
940
|
-
if char != "#" or string_state["in_string"] is not None:
|
|
941
|
-
return False
|
|
942
|
-
comment = line[index:].strip()
|
|
943
|
-
if self._is_special_comment_line(comment):
|
|
944
|
-
result.append(line[index:])
|
|
945
|
-
return True
|
|
946
|
-
|
|
947
|
-
def _is_special_comment_line(self, comment: str) -> bool:
|
|
948
|
-
special_comment_pattern = (
|
|
949
|
-
r"^#\s*(?:type:\s*ignore(?:\[.*?\])?|noqa|nosec|pragma:\s*no\s*cover"
|
|
950
|
-
r"|pylint:\s*disable|mypy:\s*ignore)"
|
|
951
|
-
)
|
|
952
|
-
return bool(re.match(special_comment_pattern, comment))
|
|
953
|
-
|
|
954
|
-
def remove_extra_whitespace(self, code: str) -> str:
|
|
955
|
-
lines = code.split("\n")
|
|
956
|
-
cleaned_lines: list[str] = []
|
|
957
|
-
function_tracker = {"in_function": False, "function_indent": 0}
|
|
958
|
-
import_tracker = {"in_imports": False, "last_import_type": None}
|
|
959
|
-
for i, line in enumerate(lines):
|
|
960
|
-
line = line.rstrip()
|
|
961
|
-
stripped_line = line.lstrip()
|
|
962
|
-
self._update_function_state(line, stripped_line, function_tracker)
|
|
963
|
-
self._update_import_state(line, stripped_line, import_tracker)
|
|
964
|
-
if not line:
|
|
965
|
-
if self._should_skip_empty_line(
|
|
966
|
-
i, lines, cleaned_lines, function_tracker, import_tracker
|
|
967
|
-
):
|
|
968
|
-
continue
|
|
969
|
-
cleaned_lines.append(line)
|
|
970
|
-
return "\n".join(self._remove_trailing_empty_lines(cleaned_lines))
|
|
971
|
-
|
|
972
|
-
def remove_docstrings_streaming(self, code: str) -> str:
|
|
973
|
-
if len(code) < 10000:
|
|
974
|
-
return self.remove_docstrings(code)
|
|
975
|
-
|
|
976
|
-
def process_lines():
|
|
977
|
-
lines = code.split("\n")
|
|
978
|
-
docstring_state = self._initialize_docstring_state()
|
|
979
|
-
for i, line in enumerate(lines):
|
|
980
|
-
handled, result_line = self._process_line(
|
|
981
|
-
lines, i, line, docstring_state
|
|
982
|
-
)
|
|
983
|
-
if handled:
|
|
984
|
-
if result_line is not None:
|
|
985
|
-
yield result_line
|
|
986
|
-
else:
|
|
987
|
-
yield line
|
|
988
|
-
|
|
989
|
-
return "\n".join(process_lines())
|
|
990
|
-
|
|
991
|
-
def remove_line_comments_streaming(self, code: str) -> str:
|
|
992
|
-
if len(code) < 10000:
|
|
993
|
-
return self.remove_line_comments(code)
|
|
994
|
-
|
|
995
|
-
def process_lines():
|
|
996
|
-
for line in code.split("\n"):
|
|
997
|
-
if not line.strip():
|
|
998
|
-
yield line
|
|
999
|
-
continue
|
|
1000
|
-
cleaned_line = self._process_line_for_comments(line)
|
|
1001
|
-
if cleaned_line or not line.strip():
|
|
1002
|
-
yield cleaned_line or line
|
|
1003
|
-
|
|
1004
|
-
return "\n".join(process_lines())
|
|
1005
|
-
|
|
1006
|
-
def remove_extra_whitespace_streaming(self, code: str) -> str:
|
|
1007
|
-
if len(code) < 10000:
|
|
1008
|
-
return self.remove_extra_whitespace(code)
|
|
1009
|
-
|
|
1010
|
-
def process_lines():
|
|
1011
|
-
lines = code.split("\n")
|
|
1012
|
-
function_tracker: dict[str, t.Any] = {
|
|
1013
|
-
"in_function": False,
|
|
1014
|
-
"function_indent": 0,
|
|
1015
|
-
}
|
|
1016
|
-
import_tracker: dict[str, t.Any] = {
|
|
1017
|
-
"in_imports": False,
|
|
1018
|
-
"last_import_type": None,
|
|
1019
|
-
}
|
|
1020
|
-
previous_lines: list[str] = []
|
|
1021
|
-
for i, line in enumerate(lines):
|
|
1022
|
-
line = line.rstrip()
|
|
1023
|
-
stripped_line = line.lstrip()
|
|
1024
|
-
self._update_function_state(line, stripped_line, function_tracker)
|
|
1025
|
-
self._update_import_state(line, stripped_line, import_tracker)
|
|
1026
|
-
if not line:
|
|
1027
|
-
if self._should_skip_empty_line(
|
|
1028
|
-
i, lines, previous_lines, function_tracker, import_tracker
|
|
1029
|
-
):
|
|
1030
|
-
continue
|
|
1031
|
-
previous_lines.append(line)
|
|
1032
|
-
yield line
|
|
1033
|
-
|
|
1034
|
-
processed_lines = list(process_lines())
|
|
1035
|
-
return "\n".join(self._remove_trailing_empty_lines(processed_lines))
|
|
1036
|
-
|
|
1037
|
-
def _update_function_state(
|
|
1038
|
-
self, line: str, stripped_line: str, function_tracker: dict[str, t.Any]
|
|
1039
|
-
) -> None:
|
|
1040
|
-
if stripped_line.startswith(("def ", "async def ")):
|
|
1041
|
-
function_tracker["in_function"] = True
|
|
1042
|
-
function_tracker["function_indent"] = len(line) - len(stripped_line)
|
|
1043
|
-
elif self._is_function_end(line, stripped_line, function_tracker):
|
|
1044
|
-
function_tracker["in_function"] = False
|
|
1045
|
-
function_tracker["function_indent"] = 0
|
|
1046
|
-
|
|
1047
|
-
def _update_import_state(
|
|
1048
|
-
self, line: str, stripped_line: str, import_tracker: dict[str, t.Any]
|
|
1049
|
-
) -> None:
|
|
1050
|
-
if stripped_line.startswith(("import ", "from ")):
|
|
1051
|
-
import_tracker["in_imports"] = True
|
|
1052
|
-
if self._is_stdlib_import(stripped_line):
|
|
1053
|
-
current_type = "stdlib"
|
|
1054
|
-
elif self._is_local_import(stripped_line):
|
|
1055
|
-
current_type = "local"
|
|
1056
|
-
else:
|
|
1057
|
-
current_type = "third_party"
|
|
1058
|
-
import_tracker["last_import_type"] = current_type
|
|
1059
|
-
elif stripped_line and not stripped_line.startswith("#"):
|
|
1060
|
-
import_tracker["in_imports"] = False
|
|
1061
|
-
import_tracker["last_import_type"] = None
|
|
1062
|
-
|
|
1063
|
-
@staticmethod
|
|
1064
|
-
@lru_cache(maxsize=256)
|
|
1065
|
-
def _is_stdlib_module(module: str) -> bool:
|
|
1066
|
-
stdlib_modules = {
|
|
1067
|
-
"os",
|
|
1068
|
-
"sys",
|
|
1069
|
-
"re",
|
|
1070
|
-
"json",
|
|
1071
|
-
"datetime",
|
|
1072
|
-
"time",
|
|
1073
|
-
"pathlib",
|
|
1074
|
-
"typing",
|
|
1075
|
-
"collections",
|
|
1076
|
-
"itertools",
|
|
1077
|
-
"functools",
|
|
1078
|
-
"operator",
|
|
1079
|
-
"math",
|
|
1080
|
-
"random",
|
|
1081
|
-
"uuid",
|
|
1082
|
-
"urllib",
|
|
1083
|
-
"http",
|
|
1084
|
-
"html",
|
|
1085
|
-
"xml",
|
|
1086
|
-
"email",
|
|
1087
|
-
"csv",
|
|
1088
|
-
"sqlite3",
|
|
1089
|
-
"subprocess",
|
|
1090
|
-
"threading",
|
|
1091
|
-
"multiprocessing",
|
|
1092
|
-
"asyncio",
|
|
1093
|
-
"contextlib",
|
|
1094
|
-
"dataclasses",
|
|
1095
|
-
"enum",
|
|
1096
|
-
"abc",
|
|
1097
|
-
"io",
|
|
1098
|
-
"tempfile",
|
|
1099
|
-
"shutil",
|
|
1100
|
-
"glob",
|
|
1101
|
-
"pickle",
|
|
1102
|
-
"copy",
|
|
1103
|
-
"heapq",
|
|
1104
|
-
"bisect",
|
|
1105
|
-
"array",
|
|
1106
|
-
"struct",
|
|
1107
|
-
"zlib",
|
|
1108
|
-
"hashlib",
|
|
1109
|
-
"hmac",
|
|
1110
|
-
"secrets",
|
|
1111
|
-
"base64",
|
|
1112
|
-
"binascii",
|
|
1113
|
-
"codecs",
|
|
1114
|
-
"locale",
|
|
1115
|
-
"platform",
|
|
1116
|
-
"socket",
|
|
1117
|
-
"ssl",
|
|
1118
|
-
"ipaddress",
|
|
1119
|
-
"logging",
|
|
1120
|
-
"warnings",
|
|
1121
|
-
"inspect",
|
|
1122
|
-
"ast",
|
|
1123
|
-
"dis",
|
|
1124
|
-
"tokenize",
|
|
1125
|
-
"keyword",
|
|
1126
|
-
"linecache",
|
|
1127
|
-
"traceback",
|
|
1128
|
-
"weakref",
|
|
1129
|
-
"gc",
|
|
1130
|
-
"ctypes",
|
|
1131
|
-
"unittest",
|
|
1132
|
-
"doctest",
|
|
1133
|
-
"pdb",
|
|
1134
|
-
"profile",
|
|
1135
|
-
"cProfile",
|
|
1136
|
-
"timeit",
|
|
1137
|
-
"trace",
|
|
1138
|
-
"calendar",
|
|
1139
|
-
"decimal",
|
|
1140
|
-
"fractions",
|
|
1141
|
-
"statistics",
|
|
1142
|
-
"tomllib",
|
|
1143
|
-
}
|
|
1144
|
-
return module in stdlib_modules
|
|
1145
|
-
|
|
1146
|
-
def _is_stdlib_import(self, stripped_line: str) -> bool:
|
|
1147
|
-
try:
|
|
1148
|
-
if stripped_line.startswith("from "):
|
|
1149
|
-
module = stripped_line.split()[1].split(".")[0]
|
|
1150
|
-
else:
|
|
1151
|
-
module = stripped_line.split()[1].split(".")[0]
|
|
1152
|
-
except IndexError:
|
|
1153
|
-
return False
|
|
1154
|
-
return CodeCleaner._is_stdlib_module(module)
|
|
1155
|
-
|
|
1156
|
-
def _is_local_import(self, stripped_line: str) -> bool:
|
|
1157
|
-
return stripped_line.startswith("from .") or " . " in stripped_line
|
|
1158
|
-
|
|
1159
|
-
def _is_function_end(
|
|
1160
|
-
self, line: str, stripped_line: str, function_tracker: dict[str, t.Any]
|
|
1161
|
-
) -> bool:
|
|
1162
|
-
return (
|
|
1163
|
-
function_tracker["in_function"]
|
|
1164
|
-
and bool(line)
|
|
1165
|
-
and (len(line) - len(stripped_line) <= function_tracker["function_indent"])
|
|
1166
|
-
and (not stripped_line.startswith(("@", "#")))
|
|
1167
|
-
)
|
|
1168
|
-
|
|
1169
|
-
def _should_skip_empty_line(
|
|
1170
|
-
self,
|
|
1171
|
-
line_idx: int,
|
|
1172
|
-
lines: list[str],
|
|
1173
|
-
cleaned_lines: list[str],
|
|
1174
|
-
function_tracker: dict[str, t.Any],
|
|
1175
|
-
import_tracker: dict[str, t.Any],
|
|
1176
|
-
) -> bool:
|
|
1177
|
-
if line_idx > 0 and cleaned_lines and (not cleaned_lines[-1]):
|
|
1178
|
-
return True
|
|
1179
|
-
|
|
1180
|
-
if self._is_import_section_separator(line_idx, lines, import_tracker):
|
|
1181
|
-
return False
|
|
1182
|
-
|
|
1183
|
-
if function_tracker["in_function"]:
|
|
1184
|
-
return self._should_skip_function_empty_line(line_idx, lines)
|
|
1185
|
-
return False
|
|
1186
|
-
|
|
1187
|
-
def _is_import_section_separator(
|
|
1188
|
-
self, line_idx: int, lines: list[str], import_tracker: dict[str, t.Any]
|
|
1189
|
-
) -> bool:
|
|
1190
|
-
if not import_tracker["in_imports"]:
|
|
1191
|
-
return False
|
|
1192
|
-
|
|
1193
|
-
next_line_idx = line_idx + 1
|
|
1194
|
-
while next_line_idx < len(lines) and not lines[next_line_idx].strip():
|
|
1195
|
-
next_line_idx += 1
|
|
1196
|
-
|
|
1197
|
-
if next_line_idx >= len(lines):
|
|
1198
|
-
return False
|
|
1199
|
-
|
|
1200
|
-
next_line = lines[next_line_idx].strip()
|
|
1201
|
-
if not next_line.startswith(("import ", "from ")):
|
|
1202
|
-
return False
|
|
1203
|
-
|
|
1204
|
-
if self._is_stdlib_import(next_line):
|
|
1205
|
-
next_type = "stdlib"
|
|
1206
|
-
elif self._is_local_import(next_line):
|
|
1207
|
-
next_type = "local"
|
|
1208
|
-
else:
|
|
1209
|
-
next_type = "third_party"
|
|
1210
|
-
|
|
1211
|
-
return import_tracker["last_import_type"] != next_type
|
|
1212
|
-
|
|
1213
|
-
def _should_skip_function_empty_line(self, line_idx: int, lines: list[str]) -> bool:
|
|
1214
|
-
next_line_idx = line_idx + 1
|
|
1215
|
-
if next_line_idx >= len(lines):
|
|
1216
|
-
return False
|
|
1217
|
-
next_line = lines[next_line_idx].strip()
|
|
1218
|
-
return not self._is_significant_next_line(next_line)
|
|
1219
|
-
|
|
1220
|
-
def _is_significant_next_line(self, next_line: str) -> bool:
|
|
1221
|
-
if next_line.startswith(("return", "class ", "def ", "async def ", "@")):
|
|
1222
|
-
return True
|
|
1223
|
-
if next_line in ("pass", "break", "continue", "raise"):
|
|
1224
|
-
return True
|
|
1225
|
-
return self._is_special_comment(next_line)
|
|
1226
|
-
|
|
1227
|
-
def _is_special_comment(self, line: str) -> bool:
|
|
1228
|
-
if not line.startswith("#"):
|
|
1229
|
-
return False
|
|
1230
|
-
special_patterns = ("type:", "noqa", "nosec", "pragma:", "pylint:", "mypy:")
|
|
1231
|
-
return any(pattern in line for pattern in special_patterns)
|
|
1232
|
-
|
|
1233
|
-
def _remove_trailing_empty_lines(self, lines: list[str]) -> list[str]:
|
|
1234
|
-
while lines and (not lines[-1]):
|
|
1235
|
-
lines.pop()
|
|
1236
|
-
return lines
|
|
1237
|
-
|
|
1238
|
-
def reformat_code(self, code: str) -> str:
|
|
1239
|
-
from crackerjack.errors import handle_error
|
|
1240
|
-
|
|
1241
|
-
try:
|
|
1242
|
-
import tempfile
|
|
1243
|
-
|
|
1244
|
-
with tempfile.NamedTemporaryFile(
|
|
1245
|
-
suffix=".py", mode="w+", delete=False
|
|
1246
|
-
) as temp:
|
|
1247
|
-
temp_path = Path(temp.name)
|
|
1248
|
-
temp_path.write_text(code)
|
|
1249
|
-
try:
|
|
1250
|
-
result = subprocess.run(
|
|
1251
|
-
["uv", "run", "ruff", "format", str(temp_path)],
|
|
1252
|
-
check=False,
|
|
1253
|
-
capture_output=True,
|
|
1254
|
-
text=True,
|
|
1255
|
-
)
|
|
1256
|
-
if result.returncode == 0:
|
|
1257
|
-
formatted_code = temp_path.read_text()
|
|
1258
|
-
else:
|
|
1259
|
-
self.console.print(
|
|
1260
|
-
f"[bold bright_yellow]⚠️ Ruff formatting failed: {result.stderr}[/bold bright_yellow]"
|
|
1261
|
-
)
|
|
1262
|
-
handle_error(
|
|
1263
|
-
ExecutionError(
|
|
1264
|
-
message="Code formatting failed",
|
|
1265
|
-
error_code=ErrorCode.FORMATTING_ERROR,
|
|
1266
|
-
details=result.stderr,
|
|
1267
|
-
recovery="Check Ruff configuration and formatting rules",
|
|
1268
|
-
),
|
|
1269
|
-
console=self.console,
|
|
1270
|
-
exit_on_error=False,
|
|
1271
|
-
)
|
|
1272
|
-
formatted_code = code
|
|
1273
|
-
except Exception as e:
|
|
1274
|
-
self.console.print(
|
|
1275
|
-
f"[bold bright_red]❌ Error running Ruff: {e}[/bold bright_red]"
|
|
1276
|
-
)
|
|
1277
|
-
handle_error(
|
|
1278
|
-
ExecutionError(
|
|
1279
|
-
message="Error running Ruff",
|
|
1280
|
-
error_code=ErrorCode.FORMATTING_ERROR,
|
|
1281
|
-
details=str(e),
|
|
1282
|
-
recovery="Verify Ruff is installed and configured correctly",
|
|
1283
|
-
),
|
|
1284
|
-
console=self.console,
|
|
1285
|
-
exit_on_error=False,
|
|
1286
|
-
)
|
|
1287
|
-
formatted_code = code
|
|
1288
|
-
finally:
|
|
1289
|
-
with suppress(FileNotFoundError):
|
|
1290
|
-
temp_path.unlink()
|
|
1291
|
-
return formatted_code
|
|
1292
|
-
except Exception as e:
|
|
1293
|
-
self.console.print(
|
|
1294
|
-
f"[bold bright_red]❌ Error during reformatting: {e}[/bold bright_red]"
|
|
1295
|
-
)
|
|
1296
|
-
handle_error(
|
|
1297
|
-
ExecutionError(
|
|
1298
|
-
message="Error during reformatting",
|
|
1299
|
-
error_code=ErrorCode.FORMATTING_ERROR,
|
|
1300
|
-
details=str(e),
|
|
1301
|
-
recovery="Check file permissions and disk space",
|
|
1302
|
-
),
|
|
1303
|
-
console=self.console,
|
|
1304
|
-
)
|
|
1305
|
-
return code
|
|
1306
|
-
|
|
1307
|
-
async def clean_files_async(self, pkg_dir: Path | None) -> None:
|
|
1308
|
-
if pkg_dir is None:
|
|
1309
|
-
return
|
|
1310
|
-
python_files = [
|
|
1311
|
-
file_path
|
|
1312
|
-
for file_path in pkg_dir.rglob("*.py")
|
|
1313
|
-
if not str(file_path.parent).startswith("__")
|
|
1314
|
-
]
|
|
1315
|
-
if not python_files:
|
|
1316
|
-
return
|
|
1317
|
-
max_concurrent = min(len(python_files), 8)
|
|
1318
|
-
semaphore = asyncio.Semaphore(max_concurrent)
|
|
1319
|
-
|
|
1320
|
-
async def clean_with_semaphore(file_path: Path) -> None:
|
|
1321
|
-
async with semaphore:
|
|
1322
|
-
await self.clean_file_async(file_path)
|
|
1323
|
-
|
|
1324
|
-
tasks = [clean_with_semaphore(file_path) for file_path in python_files]
|
|
1325
|
-
await asyncio.gather(*tasks, return_exceptions=True)
|
|
1326
|
-
|
|
1327
|
-
await self._cleanup_cache_directories_async(pkg_dir)
|
|
1328
|
-
|
|
1329
|
-
async def clean_file_async(self, file_path: Path) -> None:
|
|
1330
|
-
from crackerjack.errors import ExecutionError, handle_error
|
|
1331
|
-
|
|
1332
|
-
try:
|
|
1333
|
-
async with aiofiles.open(file_path, encoding="utf-8") as f: # type: ignore[misc]
|
|
1334
|
-
code: str = await f.read() # type: ignore[misc]
|
|
1335
|
-
original_code: str = code
|
|
1336
|
-
cleaning_failed = False
|
|
1337
|
-
try:
|
|
1338
|
-
code = self.remove_line_comments_streaming(code)
|
|
1339
|
-
except Exception as e:
|
|
1340
|
-
self.console.print(
|
|
1341
|
-
f"[bold bright_yellow]⚠️ Warning: Failed to remove line comments from {file_path}: {e}[/bold bright_yellow]"
|
|
1342
|
-
)
|
|
1343
|
-
code = original_code
|
|
1344
|
-
cleaning_failed = True
|
|
1345
|
-
try:
|
|
1346
|
-
code = self.remove_docstrings_streaming(code)
|
|
1347
|
-
except Exception as e:
|
|
1348
|
-
self.console.print(
|
|
1349
|
-
f"[bold bright_yellow]⚠️ Warning: Failed to remove docstrings from {file_path}: {e}[/bold bright_yellow]"
|
|
1350
|
-
)
|
|
1351
|
-
code = original_code
|
|
1352
|
-
cleaning_failed = True
|
|
1353
|
-
try:
|
|
1354
|
-
code = self.remove_extra_whitespace_streaming(code)
|
|
1355
|
-
except Exception as e:
|
|
1356
|
-
self.console.print(
|
|
1357
|
-
f"[bold bright_yellow]⚠️ Warning: Failed to remove extra whitespace from {file_path}: {e}[/bold bright_yellow]"
|
|
1358
|
-
)
|
|
1359
|
-
code = original_code
|
|
1360
|
-
cleaning_failed = True
|
|
1361
|
-
try:
|
|
1362
|
-
code = await self.reformat_code_async(code)
|
|
1363
|
-
except Exception as e:
|
|
1364
|
-
self.console.print(
|
|
1365
|
-
f"[bold bright_yellow]⚠️ Warning: Failed to reformat {file_path}: {e}[/bold bright_yellow]"
|
|
1366
|
-
)
|
|
1367
|
-
code = original_code
|
|
1368
|
-
cleaning_failed = True
|
|
1369
|
-
async with aiofiles.open(file_path, "w", encoding="utf-8") as f: # type: ignore[misc]
|
|
1370
|
-
await f.write(code) # type: ignore[misc]
|
|
1371
|
-
if cleaning_failed:
|
|
1372
|
-
self.console.print(
|
|
1373
|
-
f"[bold yellow]⚡ Partially cleaned:[/bold yellow] [dim bright_white]{file_path}[/dim bright_white]"
|
|
1374
|
-
)
|
|
1375
|
-
else:
|
|
1376
|
-
self.console.print(
|
|
1377
|
-
f"[bold green]✨ Cleaned:[/bold green] [dim bright_white]{file_path}[/dim bright_white]"
|
|
1378
|
-
)
|
|
1379
|
-
except PermissionError as e:
|
|
1380
|
-
self.console.print(
|
|
1381
|
-
f"[red]Failed to clean: {file_path} (Permission denied)[/red]"
|
|
1382
|
-
)
|
|
1383
|
-
handle_error(
|
|
1384
|
-
ExecutionError(
|
|
1385
|
-
message=f"Permission denied while cleaning {file_path}",
|
|
1386
|
-
error_code=ErrorCode.PERMISSION_ERROR,
|
|
1387
|
-
details=str(e),
|
|
1388
|
-
recovery=f"Check file permissions for {file_path} and ensure you have write access",
|
|
1389
|
-
),
|
|
1390
|
-
console=self.console,
|
|
1391
|
-
exit_on_error=False,
|
|
1392
|
-
)
|
|
1393
|
-
except OSError as e:
|
|
1394
|
-
self.console.print(
|
|
1395
|
-
f"[red]Failed to clean: {file_path} (File system error)[/red]"
|
|
1396
|
-
)
|
|
1397
|
-
handle_error(
|
|
1398
|
-
ExecutionError(
|
|
1399
|
-
message=f"File system error while cleaning {file_path}",
|
|
1400
|
-
error_code=ErrorCode.FILE_WRITE_ERROR,
|
|
1401
|
-
details=str(e),
|
|
1402
|
-
recovery=f"Check that {file_path} exists and is not being used by another process",
|
|
1403
|
-
),
|
|
1404
|
-
console=self.console,
|
|
1405
|
-
exit_on_error=False,
|
|
1406
|
-
)
|
|
1407
|
-
except UnicodeDecodeError as e:
|
|
1408
|
-
self.console.print(
|
|
1409
|
-
f"[red]Failed to clean: {file_path} (Encoding error)[/red]"
|
|
1410
|
-
)
|
|
1411
|
-
handle_error(
|
|
1412
|
-
ExecutionError(
|
|
1413
|
-
message=f"Encoding error while cleaning {file_path}",
|
|
1414
|
-
error_code=ErrorCode.FILE_READ_ERROR,
|
|
1415
|
-
details=str(e),
|
|
1416
|
-
recovery=f"Check the file encoding of {file_path} - it may not be UTF-8",
|
|
1417
|
-
),
|
|
1418
|
-
console=self.console,
|
|
1419
|
-
exit_on_error=False,
|
|
1420
|
-
)
|
|
1421
|
-
except Exception as e:
|
|
1422
|
-
self.console.print(f"[red]Unexpected error cleaning {file_path}: {e}[/red]")
|
|
1423
|
-
handle_error(
|
|
1424
|
-
ExecutionError(
|
|
1425
|
-
message=f"Unexpected error while cleaning {file_path}",
|
|
1426
|
-
error_code=ErrorCode.UNEXPECTED_ERROR,
|
|
1427
|
-
details=str(e),
|
|
1428
|
-
recovery="Please report this issue with the full error details",
|
|
1429
|
-
),
|
|
1430
|
-
console=self.console,
|
|
1431
|
-
exit_on_error=False,
|
|
1432
|
-
)
|
|
1433
|
-
|
|
1434
|
-
async def reformat_code_async(self, code: str) -> str:
|
|
1435
|
-
from crackerjack.errors import handle_error
|
|
1436
|
-
|
|
1437
|
-
try:
|
|
1438
|
-
import tempfile
|
|
1439
|
-
|
|
1440
|
-
with tempfile.NamedTemporaryFile(
|
|
1441
|
-
suffix=".py", mode="w+", delete=False
|
|
1442
|
-
) as temp:
|
|
1443
|
-
temp_path = Path(temp.name)
|
|
1444
|
-
async with aiofiles.open(temp_path, "w", encoding="utf-8") as f: # type: ignore[misc]
|
|
1445
|
-
await f.write(code) # type: ignore[misc]
|
|
1446
|
-
try:
|
|
1447
|
-
proc = await asyncio.create_subprocess_exec(
|
|
1448
|
-
"uv",
|
|
1449
|
-
"run",
|
|
1450
|
-
"ruff",
|
|
1451
|
-
"format",
|
|
1452
|
-
str(temp_path),
|
|
1453
|
-
stdout=asyncio.subprocess.PIPE,
|
|
1454
|
-
stderr=asyncio.subprocess.PIPE,
|
|
1455
|
-
)
|
|
1456
|
-
_, stderr = await proc.communicate()
|
|
1457
|
-
if proc.returncode == 0:
|
|
1458
|
-
async with aiofiles.open(temp_path, encoding="utf-8") as f: # type: ignore[misc]
|
|
1459
|
-
formatted_code = await f.read() # type: ignore[misc]
|
|
1460
|
-
else:
|
|
1461
|
-
self.console.print(
|
|
1462
|
-
f"[bold bright_yellow]⚠️ Warning: Ruff format failed with return code {proc.returncode}[/bold bright_yellow]"
|
|
1463
|
-
)
|
|
1464
|
-
if stderr:
|
|
1465
|
-
self.console.print(f"[dim]Ruff stderr: {stderr.decode()}[/dim]")
|
|
1466
|
-
formatted_code = code
|
|
1467
|
-
except Exception as e:
|
|
1468
|
-
self.console.print(
|
|
1469
|
-
f"[bold bright_red]❌ Error running Ruff: {e}[/bold bright_red]"
|
|
1470
|
-
)
|
|
1471
|
-
handle_error(
|
|
1472
|
-
ExecutionError(
|
|
1473
|
-
message="Error running Ruff",
|
|
1474
|
-
error_code=ErrorCode.FORMATTING_ERROR,
|
|
1475
|
-
details=str(e),
|
|
1476
|
-
recovery="Verify Ruff is installed and configured correctly",
|
|
1477
|
-
),
|
|
1478
|
-
console=self.console,
|
|
1479
|
-
exit_on_error=False,
|
|
1480
|
-
)
|
|
1481
|
-
formatted_code = code
|
|
1482
|
-
finally:
|
|
1483
|
-
with suppress(FileNotFoundError):
|
|
1484
|
-
temp_path.unlink()
|
|
1485
|
-
|
|
1486
|
-
return formatted_code
|
|
1487
|
-
except Exception as e:
|
|
1488
|
-
self.console.print(
|
|
1489
|
-
f"[bold bright_red]❌ Error during reformatting: {e}[/bold bright_red]"
|
|
1490
|
-
)
|
|
1491
|
-
handle_error(
|
|
1492
|
-
ExecutionError(
|
|
1493
|
-
message="Error during reformatting",
|
|
1494
|
-
error_code=ErrorCode.FORMATTING_ERROR,
|
|
1495
|
-
details=str(e),
|
|
1496
|
-
recovery="Check file permissions and disk space",
|
|
1497
|
-
),
|
|
1498
|
-
console=self.console,
|
|
1499
|
-
exit_on_error=False,
|
|
1500
|
-
)
|
|
1501
|
-
return code
|
|
1502
|
-
|
|
1503
|
-
async def _cleanup_cache_directories_async(self, pkg_dir: Path) -> None:
|
|
1504
|
-
def cleanup_sync() -> None:
|
|
1505
|
-
with suppress(PermissionError, OSError):
|
|
1506
|
-
pycache_dir = pkg_dir / "__pycache__"
|
|
1507
|
-
if pycache_dir.exists():
|
|
1508
|
-
for cache_file in pycache_dir.iterdir():
|
|
1509
|
-
with suppress(PermissionError, OSError):
|
|
1510
|
-
cache_file.unlink()
|
|
1511
|
-
pycache_dir.rmdir()
|
|
1512
|
-
parent_pycache = pkg_dir.parent / "__pycache__"
|
|
1513
|
-
if parent_pycache.exists():
|
|
1514
|
-
for cache_file in parent_pycache.iterdir():
|
|
1515
|
-
with suppress(PermissionError, OSError):
|
|
1516
|
-
cache_file.unlink()
|
|
1517
|
-
parent_pycache.rmdir()
|
|
1518
|
-
|
|
1519
|
-
loop = asyncio.get_event_loop()
|
|
1520
|
-
await loop.run_in_executor(None, cleanup_sync)
|
|
1521
|
-
|
|
1522
|
-
|
|
1523
|
-
class ConfigManager(BaseModel, arbitrary_types_allowed=True):
|
|
1524
|
-
our_path: Path
|
|
1525
|
-
pkg_path: Path
|
|
1526
|
-
pkg_name: str
|
|
1527
|
-
console: Console
|
|
1528
|
-
our_toml_path: Path | None = None
|
|
1529
|
-
pkg_toml_path: Path | None = None
|
|
1530
|
-
python_version: str = default_python_version
|
|
1531
|
-
dry_run: bool = False
|
|
1532
|
-
|
|
1533
|
-
def swap_package_name(self, value: list[str] | str) -> list[str] | str:
|
|
1534
|
-
if isinstance(value, list):
|
|
1535
|
-
value.remove("crackerjack")
|
|
1536
|
-
value.append(self.pkg_name)
|
|
1537
|
-
else:
|
|
1538
|
-
value = value.replace("crackerjack", self.pkg_name)
|
|
1539
|
-
return value
|
|
1540
|
-
|
|
1541
|
-
def update_pyproject_configs(self) -> None:
|
|
1542
|
-
self._setup_toml_paths()
|
|
1543
|
-
if self._is_crackerjack_project():
|
|
1544
|
-
self._handle_crackerjack_project()
|
|
1545
|
-
return
|
|
1546
|
-
our_toml_config = self._load_our_toml()
|
|
1547
|
-
pkg_toml_config = self._load_pkg_toml()
|
|
1548
|
-
self._ensure_required_sections(pkg_toml_config)
|
|
1549
|
-
self._update_tool_settings(our_toml_config, pkg_toml_config)
|
|
1550
|
-
self._update_python_version(our_toml_config, pkg_toml_config)
|
|
1551
|
-
self._save_pkg_toml(pkg_toml_config)
|
|
1552
|
-
|
|
1553
|
-
def _setup_toml_paths(self) -> None:
|
|
1554
|
-
toml_file = "pyproject.toml"
|
|
1555
|
-
self.our_toml_path = self.our_path / toml_file
|
|
1556
|
-
self.pkg_toml_path = self.pkg_path / toml_file
|
|
1557
|
-
|
|
1558
|
-
def _is_crackerjack_project(self) -> bool:
|
|
1559
|
-
return self.pkg_path.stem == "crackerjack"
|
|
1560
|
-
|
|
1561
|
-
def _handle_crackerjack_project(self) -> None:
|
|
1562
|
-
if self.our_toml_path and self.pkg_toml_path:
|
|
1563
|
-
self.our_toml_path.write_text(self.pkg_toml_path.read_text())
|
|
1564
|
-
|
|
1565
|
-
def _load_our_toml(self) -> dict[str, t.Any]:
|
|
1566
|
-
if self.our_toml_path:
|
|
1567
|
-
return loads(self.our_toml_path.read_text())
|
|
1568
|
-
return {}
|
|
1569
|
-
|
|
1570
|
-
def _load_pkg_toml(self) -> dict[str, t.Any]:
|
|
1571
|
-
if self.pkg_toml_path:
|
|
1572
|
-
return loads(self.pkg_toml_path.read_text())
|
|
1573
|
-
return {}
|
|
1574
|
-
|
|
1575
|
-
def _ensure_required_sections(self, pkg_toml_config: dict[str, t.Any]) -> None:
|
|
1576
|
-
pkg_toml_config.setdefault("tool", {})
|
|
1577
|
-
pkg_toml_config.setdefault("project", {})
|
|
1578
|
-
|
|
1579
|
-
def _update_tool_settings(
|
|
1580
|
-
self, our_toml_config: dict[str, t.Any], pkg_toml_config: dict[str, t.Any]
|
|
1581
|
-
) -> None:
|
|
1582
|
-
for tool, settings in our_toml_config.get("tool", {}).items():
|
|
1583
|
-
if tool not in pkg_toml_config["tool"]:
|
|
1584
|
-
pkg_toml_config["tool"][tool] = {}
|
|
1585
|
-
pkg_tool_config = pkg_toml_config["tool"][tool]
|
|
1586
|
-
self._merge_tool_config(settings, pkg_tool_config, tool)
|
|
1587
|
-
|
|
1588
|
-
def _merge_tool_config(
|
|
1589
|
-
self, our_config: dict[str, t.Any], pkg_config: dict[str, t.Any], tool: str
|
|
1590
|
-
) -> None:
|
|
1591
|
-
for setting, value in our_config.items():
|
|
1592
|
-
if isinstance(value, dict):
|
|
1593
|
-
self._merge_nested_config(
|
|
1594
|
-
setting, t.cast(dict[str, t.Any], value), pkg_config
|
|
1595
|
-
)
|
|
1596
|
-
else:
|
|
1597
|
-
self._merge_direct_config(setting, value, pkg_config)
|
|
1598
|
-
|
|
1599
|
-
def _merge_nested_config(
|
|
1600
|
-
self, setting: str, value: dict[str, t.Any], pkg_config: dict[str, t.Any]
|
|
1601
|
-
) -> None:
|
|
1602
|
-
if setting not in pkg_config:
|
|
1603
|
-
pkg_config[setting] = {}
|
|
1604
|
-
elif not isinstance(pkg_config[setting], dict):
|
|
1605
|
-
pkg_config[setting] = {}
|
|
1606
|
-
self._merge_tool_config(value, pkg_config[setting], "")
|
|
1607
|
-
for k, v in value.items():
|
|
1608
|
-
self._merge_nested_value(k, v, pkg_config[setting])
|
|
1609
|
-
|
|
1610
|
-
def _merge_nested_value(
|
|
1611
|
-
self, key: str, value: t.Any, nested_config: dict[str, t.Any]
|
|
1612
|
-
) -> None:
|
|
1613
|
-
if isinstance(value, str | list) and "crackerjack" in str(value):
|
|
1614
|
-
nested_config[key] = self.swap_package_name(t.cast(str | list[str], value))
|
|
1615
|
-
elif self._is_mergeable_list(key, value):
|
|
1616
|
-
existing = nested_config.get(key, [])
|
|
1617
|
-
if isinstance(existing, list) and isinstance(value, list):
|
|
1618
|
-
nested_config[key] = list(
|
|
1619
|
-
set(t.cast(list[str], existing) + t.cast(list[str], value))
|
|
1620
|
-
)
|
|
1621
|
-
else:
|
|
1622
|
-
nested_config[key] = value
|
|
1623
|
-
elif key not in nested_config:
|
|
1624
|
-
nested_config[key] = value
|
|
1625
|
-
|
|
1626
|
-
def _merge_direct_config(
|
|
1627
|
-
self, setting: str, value: t.Any, pkg_config: dict[str, t.Any]
|
|
1628
|
-
) -> None:
|
|
1629
|
-
if isinstance(value, str | list) and "crackerjack" in str(value):
|
|
1630
|
-
pkg_config[setting] = self.swap_package_name(t.cast(str | list[str], value))
|
|
1631
|
-
elif self._is_mergeable_list(setting, value):
|
|
1632
|
-
existing = pkg_config.get(setting, [])
|
|
1633
|
-
if isinstance(existing, list) and isinstance(value, list):
|
|
1634
|
-
pkg_config[setting] = list(
|
|
1635
|
-
set(t.cast(list[str], existing) + t.cast(list[str], value))
|
|
1636
|
-
)
|
|
1637
|
-
else:
|
|
1638
|
-
pkg_config[setting] = value
|
|
1639
|
-
elif setting not in pkg_config:
|
|
1640
|
-
pkg_config[setting] = value
|
|
1641
|
-
|
|
1642
|
-
def _is_mergeable_list(self, key: str, value: t.Any) -> bool:
|
|
1643
|
-
return key in (
|
|
1644
|
-
"exclude-deps",
|
|
1645
|
-
"exclude",
|
|
1646
|
-
"excluded",
|
|
1647
|
-
"skips",
|
|
1648
|
-
"ignore",
|
|
1649
|
-
) and isinstance(value, list)
|
|
1650
|
-
|
|
1651
|
-
def _update_python_version(
|
|
1652
|
-
self, our_toml_config: dict[str, t.Any], pkg_toml_config: dict[str, t.Any]
|
|
1653
|
-
) -> None:
|
|
1654
|
-
python_version_pattern = "\\s*W*(\\d\\.\\d*)"
|
|
1655
|
-
requires_python = our_toml_config.get("project", {}).get("requires-python", "")
|
|
1656
|
-
classifiers: list[str] = []
|
|
1657
|
-
for classifier in pkg_toml_config.get("project", {}).get("classifiers", []):
|
|
1658
|
-
classifier = re.sub(
|
|
1659
|
-
python_version_pattern, f" {self.python_version}", classifier
|
|
1660
|
-
)
|
|
1661
|
-
classifiers.append(classifier)
|
|
1662
|
-
pkg_toml_config["project"]["classifiers"] = classifiers
|
|
1663
|
-
if requires_python:
|
|
1664
|
-
pkg_toml_config["project"]["requires-python"] = requires_python
|
|
1665
|
-
|
|
1666
|
-
def _save_pkg_toml(self, pkg_toml_config: dict[str, t.Any]) -> None:
|
|
1667
|
-
if self.pkg_toml_path:
|
|
1668
|
-
self.pkg_toml_path.write_text(dumps(pkg_toml_config))
|
|
1669
|
-
|
|
1670
|
-
def copy_configs(self) -> None:
|
|
1671
|
-
configs_to_add: list[str] = []
|
|
1672
|
-
for config in config_files:
|
|
1673
|
-
config_path = self.our_path / config
|
|
1674
|
-
pkg_config_path = self.pkg_path / config
|
|
1675
|
-
pkg_config_path.touch()
|
|
1676
|
-
if self.pkg_path.stem == "crackerjack":
|
|
1677
|
-
config_path.write_text(pkg_config_path.read_text())
|
|
1678
|
-
continue
|
|
1679
|
-
if config != ".gitignore":
|
|
1680
|
-
pkg_config_path.write_text(
|
|
1681
|
-
config_path.read_text().replace("crackerjack", self.pkg_name)
|
|
1682
|
-
)
|
|
1683
|
-
configs_to_add.append(config)
|
|
1684
|
-
if configs_to_add:
|
|
1685
|
-
self.execute_command(["git", "add"] + configs_to_add)
|
|
1686
|
-
|
|
1687
|
-
def copy_documentation_templates(
|
|
1688
|
-
self, force_update: bool = False, compress_docs: bool = False
|
|
1689
|
-
) -> None:
|
|
1690
|
-
docs_to_add: list[str] = []
|
|
1691
|
-
for doc_file in documentation_files:
|
|
1692
|
-
if self._should_process_doc_file(doc_file):
|
|
1693
|
-
self._process_single_doc_file(
|
|
1694
|
-
doc_file, force_update, compress_docs, docs_to_add
|
|
1695
|
-
)
|
|
1696
|
-
|
|
1697
|
-
if docs_to_add:
|
|
1698
|
-
self.execute_command(["git", "add"] + docs_to_add)
|
|
1699
|
-
|
|
1700
|
-
def _should_process_doc_file(self, doc_file: str) -> bool:
|
|
1701
|
-
doc_path = self.our_path / doc_file
|
|
1702
|
-
if not doc_path.exists():
|
|
1703
|
-
return False
|
|
1704
|
-
if self.pkg_path.stem == "crackerjack":
|
|
1705
|
-
return False
|
|
1706
|
-
return True
|
|
1707
|
-
|
|
1708
|
-
def _process_single_doc_file(
|
|
1709
|
-
self,
|
|
1710
|
-
doc_file: str,
|
|
1711
|
-
force_update: bool,
|
|
1712
|
-
compress_docs: bool,
|
|
1713
|
-
docs_to_add: list[str],
|
|
1714
|
-
) -> None:
|
|
1715
|
-
doc_path = self.our_path / doc_file
|
|
1716
|
-
pkg_doc_path = self.pkg_path / doc_file
|
|
1717
|
-
should_update = force_update or not pkg_doc_path.exists()
|
|
1718
|
-
|
|
1719
|
-
if should_update:
|
|
1720
|
-
pkg_doc_path.touch()
|
|
1721
|
-
content = doc_path.read_text(encoding="utf-8")
|
|
1722
|
-
|
|
1723
|
-
auto_compress = self._should_compress_doc(doc_file, compress_docs)
|
|
1724
|
-
updated_content = self._customize_documentation_content(
|
|
1725
|
-
content, doc_file, auto_compress
|
|
1726
|
-
)
|
|
1727
|
-
pkg_doc_path.write_text(updated_content, encoding="utf-8")
|
|
1728
|
-
docs_to_add.append(doc_file)
|
|
1729
|
-
|
|
1730
|
-
self._print_doc_update_message(doc_file, auto_compress)
|
|
1731
|
-
|
|
1732
|
-
def _should_compress_doc(self, doc_file: str, compress_docs: bool) -> bool:
|
|
1733
|
-
return compress_docs or (
|
|
1734
|
-
self.pkg_path.stem != "crackerjack" and doc_file == "CLAUDE.md"
|
|
1735
|
-
)
|
|
1736
|
-
|
|
1737
|
-
def _print_doc_update_message(self, doc_file: str, auto_compress: bool) -> None:
|
|
1738
|
-
compression_note = (
|
|
1739
|
-
" (compressed for Claude Code)"
|
|
1740
|
-
if auto_compress and doc_file == "CLAUDE.md"
|
|
1741
|
-
else ""
|
|
1742
|
-
)
|
|
1743
|
-
self.console.print(
|
|
1744
|
-
f"[green]📋[/green] Updated {doc_file} with latest Crackerjack quality standards{compression_note}"
|
|
1745
|
-
)
|
|
1746
|
-
|
|
1747
|
-
def _customize_documentation_content(
|
|
1748
|
-
self, content: str, filename: str, compress: bool = False
|
|
1749
|
-
) -> str:
|
|
1750
|
-
if filename == "CLAUDE.md":
|
|
1751
|
-
return self._customize_claude_md(content, compress)
|
|
1752
|
-
elif filename == "RULES.md":
|
|
1753
|
-
return self._customize_rules_md(content)
|
|
1754
|
-
return content
|
|
1755
|
-
|
|
1756
|
-
def _compress_claude_md(self, content: str, target_size: int = 30000) -> str:
|
|
1757
|
-
content.split("\n")
|
|
1758
|
-
current_size = len(content)
|
|
1759
|
-
if current_size <= target_size:
|
|
1760
|
-
return content
|
|
1761
|
-
essential_sections = [
|
|
1762
|
-
"# ",
|
|
1763
|
-
"## Project Overview",
|
|
1764
|
-
"## Key Commands",
|
|
1765
|
-
"## Development Guidelines",
|
|
1766
|
-
"## Code Quality Compliance",
|
|
1767
|
-
"### Refurb Standards",
|
|
1768
|
-
"### Bandit Security Standards",
|
|
1769
|
-
"### Pyright Type Safety Standards",
|
|
1770
|
-
"## AI Code Generation Best Practices",
|
|
1771
|
-
"## Task Completion Requirements",
|
|
1772
|
-
]
|
|
1773
|
-
compression_strategies = [
|
|
1774
|
-
self._remove_redundant_examples,
|
|
1775
|
-
self._compress_command_examples,
|
|
1776
|
-
self._remove_verbose_sections,
|
|
1777
|
-
self._compress_repeated_patterns,
|
|
1778
|
-
self._summarize_long_sections,
|
|
1779
|
-
]
|
|
1780
|
-
compressed_content = content
|
|
1781
|
-
for strategy in compression_strategies:
|
|
1782
|
-
compressed_content = strategy(compressed_content)
|
|
1783
|
-
if len(compressed_content) <= target_size:
|
|
1784
|
-
break
|
|
1785
|
-
if len(compressed_content) > target_size:
|
|
1786
|
-
compressed_content = self._extract_essential_sections(
|
|
1787
|
-
compressed_content, essential_sections, target_size
|
|
1788
|
-
)
|
|
1789
|
-
|
|
1790
|
-
return self._add_compression_notice(compressed_content)
|
|
1791
|
-
|
|
1792
|
-
def _remove_redundant_examples(self, content: str) -> str:
|
|
1793
|
-
lines = content.split("\n")
|
|
1794
|
-
result = []
|
|
1795
|
-
in_example_block = False
|
|
1796
|
-
example_count = 0
|
|
1797
|
-
max_examples_per_section = 2
|
|
1798
|
-
for line in lines:
|
|
1799
|
-
if line.strip().startswith("```"):
|
|
1800
|
-
if not in_example_block:
|
|
1801
|
-
example_count += 1
|
|
1802
|
-
if example_count <= max_examples_per_section:
|
|
1803
|
-
result.append(line)
|
|
1804
|
-
in_example_block = True
|
|
1805
|
-
else:
|
|
1806
|
-
in_example_block = "skip"
|
|
1807
|
-
else:
|
|
1808
|
-
if in_example_block != "skip":
|
|
1809
|
-
result.append(line)
|
|
1810
|
-
in_example_block = False
|
|
1811
|
-
elif in_example_block == "skip":
|
|
1812
|
-
continue
|
|
1813
|
-
elif line.startswith(("## ", "### ")):
|
|
1814
|
-
example_count = 0
|
|
1815
|
-
result.append(line)
|
|
1816
|
-
else:
|
|
1817
|
-
result.append(line)
|
|
1818
|
-
|
|
1819
|
-
return "\n".join(result)
|
|
1820
|
-
|
|
1821
|
-
def _compress_command_examples(self, content: str) -> str:
|
|
1822
|
-
import re
|
|
1823
|
-
|
|
1824
|
-
content = re.sub(
|
|
1825
|
-
r"```bash\n((?:[^`]+\n){3,})```",
|
|
1826
|
-
lambda m: "```bash\n"
|
|
1827
|
-
+ "\n".join(m.group(1).split("\n")[:3])
|
|
1828
|
-
+ "\n# ... (additional commands available)\n```",
|
|
1829
|
-
content,
|
|
1830
|
-
flags=re.MULTILINE,
|
|
1831
|
-
)
|
|
1832
|
-
|
|
1833
|
-
return content
|
|
1834
|
-
|
|
1835
|
-
def _remove_verbose_sections(self, content: str) -> str:
|
|
1836
|
-
sections_to_compress = [
|
|
1837
|
-
"## Recent Bug Fixes and Improvements",
|
|
1838
|
-
"## Development Memories",
|
|
1839
|
-
"## Self-Maintenance Protocol for AI Assistants",
|
|
1840
|
-
"## Pre-commit Hook Maintenance",
|
|
1841
|
-
]
|
|
1842
|
-
lines = content.split("\n")
|
|
1843
|
-
result = []
|
|
1844
|
-
skip_section = False
|
|
1845
|
-
for line in lines:
|
|
1846
|
-
if any(line.startswith(section) for section in sections_to_compress):
|
|
1847
|
-
skip_section = True
|
|
1848
|
-
result.extend(
|
|
1849
|
-
(line, "*[Detailed information available in full CLAUDE.md]*")
|
|
1850
|
-
)
|
|
1851
|
-
result.append("")
|
|
1852
|
-
elif line.startswith("## ") and skip_section:
|
|
1853
|
-
skip_section = False
|
|
1854
|
-
result.append(line)
|
|
1855
|
-
elif not skip_section:
|
|
1856
|
-
result.append(line)
|
|
1857
|
-
|
|
1858
|
-
return "\n".join(result)
|
|
1859
|
-
|
|
1860
|
-
def _compress_repeated_patterns(self, content: str) -> str:
|
|
1861
|
-
import re
|
|
1862
|
-
|
|
1863
|
-
content = re.sub(r"\n{3,}", "\n\n", content)
|
|
1864
|
-
content = re.sub(
|
|
1865
|
-
r"(\*\*[A-Z][^*]+:\*\*[^\n]+\n){3,}",
|
|
1866
|
-
lambda m: m.group(0)[:200]
|
|
1867
|
-
+ "...\n*[Additional patterns available in full documentation]*\n",
|
|
1868
|
-
content,
|
|
1869
|
-
)
|
|
1870
|
-
|
|
1871
|
-
return content
|
|
1872
|
-
|
|
1873
|
-
def _summarize_long_sections(self, content: str) -> str:
|
|
1874
|
-
lines = content.split("\n")
|
|
1875
|
-
result = []
|
|
1876
|
-
current_section = []
|
|
1877
|
-
section_header = ""
|
|
1878
|
-
for line in lines:
|
|
1879
|
-
if line.startswith(("### ", "## ")):
|
|
1880
|
-
if current_section and len("\n".join(current_section)) > 1000:
|
|
1881
|
-
summary = self._create_section_summary(
|
|
1882
|
-
section_header, current_section
|
|
1883
|
-
)
|
|
1884
|
-
result.extend(summary)
|
|
1885
|
-
else:
|
|
1886
|
-
result.extend(current_section)
|
|
1887
|
-
current_section = [line]
|
|
1888
|
-
section_header = line
|
|
1889
|
-
else:
|
|
1890
|
-
current_section.append(line)
|
|
1891
|
-
if current_section:
|
|
1892
|
-
if len("\n".join(current_section)) > 1000:
|
|
1893
|
-
summary = self._create_section_summary(section_header, current_section)
|
|
1894
|
-
result.extend(summary)
|
|
1895
|
-
else:
|
|
1896
|
-
result.extend(current_section)
|
|
1897
|
-
|
|
1898
|
-
return "\n".join(result)
|
|
1899
|
-
|
|
1900
|
-
def _create_section_summary(
|
|
1901
|
-
self, header: str, section_lines: list[str]
|
|
1902
|
-
) -> list[str]:
|
|
1903
|
-
summary = [header, ""]
|
|
1904
|
-
|
|
1905
|
-
key_points = []
|
|
1906
|
-
for line in section_lines[2:]:
|
|
1907
|
-
if line.strip().startswith(("- ", "* ", "1. ", "2. ")):
|
|
1908
|
-
key_points.append(line)
|
|
1909
|
-
elif line.strip().startswith("**") and ":" in line:
|
|
1910
|
-
key_points.append(line)
|
|
1911
|
-
|
|
1912
|
-
if len(key_points) >= 5:
|
|
1913
|
-
break
|
|
1914
|
-
|
|
1915
|
-
if key_points:
|
|
1916
|
-
summary.extend(key_points[:5])
|
|
1917
|
-
summary.append("*[Complete details available in full CLAUDE.md]*")
|
|
1918
|
-
else:
|
|
1919
|
-
content_preview = " ".join(
|
|
1920
|
-
line.strip()
|
|
1921
|
-
for line in section_lines[2:10]
|
|
1922
|
-
if line.strip() and not line.startswith("#")
|
|
1923
|
-
)[:200]
|
|
1924
|
-
summary.extend(
|
|
1925
|
-
(
|
|
1926
|
-
f"{content_preview}...",
|
|
1927
|
-
"*[Full section available in complete documentation]*",
|
|
1928
|
-
)
|
|
1929
|
-
)
|
|
1930
|
-
|
|
1931
|
-
summary.append("")
|
|
1932
|
-
return summary
|
|
1933
|
-
|
|
1934
|
-
def _extract_essential_sections(
|
|
1935
|
-
self, content: str, essential_sections: list[str], target_size: int
|
|
1936
|
-
) -> str:
|
|
1937
|
-
lines = content.split("\n")
|
|
1938
|
-
result = []
|
|
1939
|
-
current_section = []
|
|
1940
|
-
keep_section = False
|
|
1941
|
-
|
|
1942
|
-
for line in lines:
|
|
1943
|
-
new_section_started = self._process_line_for_section(
|
|
1944
|
-
line, essential_sections, current_section, keep_section, result
|
|
1945
|
-
)
|
|
1946
|
-
if new_section_started is not None:
|
|
1947
|
-
current_section, keep_section = new_section_started
|
|
1948
|
-
else:
|
|
1949
|
-
current_section.append(line)
|
|
1950
|
-
|
|
1951
|
-
if self._should_stop_extraction(result, target_size):
|
|
1952
|
-
break
|
|
1953
|
-
|
|
1954
|
-
self._finalize_extraction(current_section, keep_section, result, target_size)
|
|
1955
|
-
return "\n".join(result)
|
|
1956
|
-
|
|
1957
|
-
def _process_line_for_section(
|
|
1958
|
-
self,
|
|
1959
|
-
line: str,
|
|
1960
|
-
essential_sections: list[str],
|
|
1961
|
-
current_section: list[str],
|
|
1962
|
-
keep_section: bool,
|
|
1963
|
-
result: list[str],
|
|
1964
|
-
) -> tuple[list[str], bool] | None:
|
|
1965
|
-
if any(line.startswith(section) for section in essential_sections):
|
|
1966
|
-
if current_section and keep_section:
|
|
1967
|
-
result.extend(current_section)
|
|
1968
|
-
return ([line], True)
|
|
1969
|
-
elif line.startswith(("## ", "### ")):
|
|
1970
|
-
if current_section and keep_section:
|
|
1971
|
-
result.extend(current_section)
|
|
1972
|
-
return ([line], False)
|
|
1973
|
-
return None
|
|
1974
|
-
|
|
1975
|
-
def _should_stop_extraction(self, result: list[str], target_size: int) -> bool:
|
|
1976
|
-
return len("\n".join(result)) > target_size
|
|
1977
|
-
|
|
1978
|
-
def _finalize_extraction(
|
|
1979
|
-
self,
|
|
1980
|
-
current_section: list[str],
|
|
1981
|
-
keep_section: bool,
|
|
1982
|
-
result: list[str],
|
|
1983
|
-
target_size: int,
|
|
1984
|
-
) -> None:
|
|
1985
|
-
if current_section and keep_section and len("\n".join(result)) < target_size:
|
|
1986
|
-
result.extend(current_section)
|
|
1987
|
-
|
|
1988
|
-
def _add_compression_notice(self, content: str) -> str:
|
|
1989
|
-
notice = """
|
|
1990
|
-
*Note: This CLAUDE.md has been automatically compressed by Crackerjack to optimize for Claude Code usage.
|
|
1991
|
-
Complete documentation is available in the source repository.*
|
|
1992
|
-
|
|
1993
|
-
"""
|
|
1994
|
-
|
|
1995
|
-
lines = content.split("\n")
|
|
1996
|
-
if len(lines) > 5:
|
|
1997
|
-
lines.insert(5, notice)
|
|
1998
|
-
|
|
1999
|
-
return "\n".join(lines)
|
|
2000
|
-
|
|
2001
|
-
def _customize_claude_md(self, content: str, compress: bool = False) -> str:
|
|
2002
|
-
project_name = self.pkg_name
|
|
2003
|
-
content = content.replace("crackerjack", project_name).replace(
|
|
2004
|
-
"Crackerjack", project_name.title()
|
|
2005
|
-
)
|
|
2006
|
-
header = f"""# {project_name.upper()}.md
|
|
2007
|
-
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
|
2008
|
-
|
|
2009
|
-
*This file was automatically generated by Crackerjack and contains the latest Python quality standards.*
|
|
2010
|
-
|
|
2011
|
-
{project_name.title()} is a Python project that follows modern development practices and maintains high code quality standards using automated tools and best practices.
|
|
2012
|
-
|
|
2013
|
-
"""
|
|
2014
|
-
|
|
2015
|
-
lines = content.split("\n")
|
|
2016
|
-
start_idx = 0
|
|
2017
|
-
for i, line in enumerate(lines):
|
|
2018
|
-
if line.startswith(("## Development Guidelines", "## Code Quality")):
|
|
2019
|
-
start_idx = i
|
|
2020
|
-
break
|
|
2021
|
-
|
|
2022
|
-
if start_idx > 0:
|
|
2023
|
-
relevant_content = "\n".join(lines[start_idx:])
|
|
2024
|
-
full_content = header + relevant_content
|
|
2025
|
-
else:
|
|
2026
|
-
full_content = header + content
|
|
2027
|
-
|
|
2028
|
-
if compress:
|
|
2029
|
-
return self._compress_claude_md(full_content)
|
|
2030
|
-
return full_content
|
|
2031
|
-
|
|
2032
|
-
def _customize_rules_md(self, content: str) -> str:
|
|
2033
|
-
project_name = self.pkg_name
|
|
2034
|
-
content = content.replace("crackerjack", project_name).replace(
|
|
2035
|
-
"Crackerjack", project_name.title()
|
|
2036
|
-
)
|
|
2037
|
-
header = f"""# {project_name.title()} Style Rules
|
|
2038
|
-
*This file was automatically generated by Crackerjack and contains the latest Python quality standards.*
|
|
2039
|
-
|
|
2040
|
-
"""
|
|
2041
|
-
|
|
2042
|
-
return header + content
|
|
2043
|
-
|
|
2044
|
-
def execute_command(
|
|
2045
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
2046
|
-
) -> subprocess.CompletedProcess[str]:
|
|
2047
|
-
if self.dry_run:
|
|
2048
|
-
self.console.print(
|
|
2049
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
2050
|
-
)
|
|
2051
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
2052
|
-
return execute(cmd, **kwargs)
|
|
2053
|
-
|
|
2054
|
-
|
|
2055
|
-
class ProjectManager(BaseModel, arbitrary_types_allowed=True):
|
|
2056
|
-
our_path: Path
|
|
2057
|
-
pkg_path: Path
|
|
2058
|
-
pkg_dir: Path | None = None
|
|
2059
|
-
pkg_name: str = "crackerjack"
|
|
2060
|
-
console: Console
|
|
2061
|
-
code_cleaner: CodeCleaner
|
|
2062
|
-
config_manager: ConfigManager
|
|
2063
|
-
dry_run: bool = False
|
|
2064
|
-
options: t.Any = None
|
|
2065
|
-
|
|
2066
|
-
def _analyze_precommit_workload(self) -> dict[str, t.Any]:
|
|
2067
|
-
try:
|
|
2068
|
-
py_files = list(self.pkg_path.rglob("*.py"))
|
|
2069
|
-
js_files = list(self.pkg_path.rglob("*.js")) + list(
|
|
2070
|
-
self.pkg_path.rglob("*.ts")
|
|
2071
|
-
)
|
|
2072
|
-
yaml_files = list(self.pkg_path.rglob("*.yaml")) + list(
|
|
2073
|
-
self.pkg_path.rglob("*.yml")
|
|
2074
|
-
)
|
|
2075
|
-
md_files = list(self.pkg_path.rglob("*.md"))
|
|
2076
|
-
total_files = (
|
|
2077
|
-
len(py_files) + len(js_files) + len(yaml_files) + len(md_files)
|
|
2078
|
-
)
|
|
2079
|
-
total_size = 0
|
|
2080
|
-
for files in (py_files, js_files, yaml_files, md_files):
|
|
2081
|
-
for file_path in files:
|
|
2082
|
-
try:
|
|
2083
|
-
total_size += file_path.stat().st_size
|
|
2084
|
-
except (OSError, PermissionError):
|
|
2085
|
-
continue
|
|
2086
|
-
if total_files > 200 or total_size > 5_000_000:
|
|
2087
|
-
complexity = "high"
|
|
2088
|
-
elif total_files > 100 or total_size > 2_000_000:
|
|
2089
|
-
complexity = "medium"
|
|
2090
|
-
else:
|
|
2091
|
-
complexity = "low"
|
|
2092
|
-
|
|
2093
|
-
return {
|
|
2094
|
-
"total_files": total_files,
|
|
2095
|
-
"py_files": len(py_files),
|
|
2096
|
-
"js_files": len(js_files),
|
|
2097
|
-
"yaml_files": len(yaml_files),
|
|
2098
|
-
"md_files": len(md_files),
|
|
2099
|
-
"total_size": total_size,
|
|
2100
|
-
"complexity": complexity,
|
|
2101
|
-
}
|
|
2102
|
-
except (OSError, PermissionError):
|
|
2103
|
-
return {"complexity": "medium", "total_files": 0}
|
|
2104
|
-
|
|
2105
|
-
def _optimize_precommit_execution(
|
|
2106
|
-
self, workload: dict[str, t.Any]
|
|
2107
|
-
) -> dict[str, t.Any]:
|
|
2108
|
-
import os
|
|
2109
|
-
|
|
2110
|
-
env_vars = {}
|
|
2111
|
-
|
|
2112
|
-
if workload["complexity"] == "high":
|
|
2113
|
-
env_vars["PRE_COMMIT_CONCURRENCY"] = str(min(os.cpu_count() or 4, 2))
|
|
2114
|
-
elif workload["complexity"] == "medium":
|
|
2115
|
-
env_vars["PRE_COMMIT_CONCURRENCY"] = str(min(os.cpu_count() or 4, 4))
|
|
2116
|
-
else:
|
|
2117
|
-
env_vars["PRE_COMMIT_CONCURRENCY"] = str(min(os.cpu_count() or 4, 6))
|
|
2118
|
-
|
|
2119
|
-
if workload["total_size"] > 10_000_000:
|
|
2120
|
-
env_vars["PRE_COMMIT_MEMORY_LIMIT"] = "2G"
|
|
2121
|
-
|
|
2122
|
-
return env_vars
|
|
2123
|
-
|
|
2124
|
-
def update_pkg_configs(self) -> None:
|
|
2125
|
-
self.config_manager.copy_configs()
|
|
2126
|
-
installed_pkgs = self.execute_command(
|
|
2127
|
-
["uv", "pip", "list", "--freeze"], capture_output=True, text=True
|
|
2128
|
-
).stdout.splitlines()
|
|
2129
|
-
if not len([pkg for pkg in installed_pkgs if "pre-commit" in pkg]):
|
|
2130
|
-
self.console.print("\n" + "─" * 80)
|
|
2131
|
-
self.console.print(
|
|
2132
|
-
"[bold bright_blue]⚡ INIT[/bold bright_blue] [bold bright_white]First-time project setup[/bold bright_white]"
|
|
2133
|
-
)
|
|
2134
|
-
self.console.print("─" * 80 + "\n")
|
|
2135
|
-
if self.options and getattr(self.options, "ai_agent", False):
|
|
2136
|
-
import subprocess
|
|
2137
|
-
|
|
2138
|
-
self.execute_command(
|
|
2139
|
-
["uv", "tool", "install", "keyring"],
|
|
2140
|
-
capture_output=True,
|
|
2141
|
-
stderr=subprocess.DEVNULL,
|
|
2142
|
-
)
|
|
2143
|
-
else:
|
|
2144
|
-
self.execute_command(["uv", "tool", "install", "keyring"])
|
|
2145
|
-
self.execute_command(["git", "init"])
|
|
2146
|
-
self.execute_command(["git", "branch", "-m", "main"])
|
|
2147
|
-
self.execute_command(["git", "add", "pyproject.toml", "uv.lock"])
|
|
2148
|
-
self.execute_command(["git", "config", "advice.addIgnoredFile", "false"])
|
|
2149
|
-
install_cmd = ["uv", "run", "pre-commit", "install"]
|
|
2150
|
-
if self.options and getattr(self.options, "ai_agent", False):
|
|
2151
|
-
install_cmd.extend(["-c", ".pre-commit-config-ai.yaml"])
|
|
2152
|
-
else:
|
|
2153
|
-
install_cmd.extend(["-c", ".pre-commit-config-fast.yaml"])
|
|
2154
|
-
self.execute_command(install_cmd)
|
|
2155
|
-
push_install_cmd = [
|
|
2156
|
-
"uv",
|
|
2157
|
-
"run",
|
|
2158
|
-
"pre-commit",
|
|
2159
|
-
"install",
|
|
2160
|
-
"--hook-type",
|
|
2161
|
-
"pre-push",
|
|
2162
|
-
]
|
|
2163
|
-
self.execute_command(push_install_cmd)
|
|
2164
|
-
self.config_manager.update_pyproject_configs()
|
|
2165
|
-
|
|
2166
|
-
def run_pre_commit(self) -> None:
|
|
2167
|
-
self.console.print("\n" + "-" * 80)
|
|
2168
|
-
self.console.print(
|
|
2169
|
-
"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running code quality checks[/bold bright_white]"
|
|
2170
|
-
)
|
|
2171
|
-
self.console.print("-" * 80 + "\n")
|
|
2172
|
-
workload = self._analyze_precommit_workload()
|
|
2173
|
-
env_vars = self._optimize_precommit_execution(workload)
|
|
2174
|
-
total_files = workload.get("total_files", 0)
|
|
2175
|
-
if isinstance(total_files, int) and total_files > 50:
|
|
2176
|
-
self.console.print(
|
|
2177
|
-
f"[dim]Processing {total_files} files "
|
|
2178
|
-
f"({workload.get('complexity', 'unknown')} complexity) with {env_vars.get('PRE_COMMIT_CONCURRENCY', 'auto')} workers[/dim]"
|
|
2179
|
-
)
|
|
2180
|
-
config_file = self._select_precommit_config()
|
|
2181
|
-
cmd = ["uv", "run", "pre-commit", "run", "--all-files", "-c", config_file]
|
|
2182
|
-
import os
|
|
2183
|
-
|
|
2184
|
-
env = os.environ.copy()
|
|
2185
|
-
env.update(env_vars)
|
|
2186
|
-
check_all = self.execute_command(cmd, env=env)
|
|
2187
|
-
if check_all.returncode > 0:
|
|
2188
|
-
self.execute_command(["uv", "lock"])
|
|
2189
|
-
self.console.print("\n[bold green]✓ Dependencies locked[/bold green]\n")
|
|
2190
|
-
check_all = self.execute_command(cmd, env=env)
|
|
2191
|
-
if check_all.returncode > 0:
|
|
2192
|
-
self.console.print(
|
|
2193
|
-
"\n\n[bold red]❌ Pre-commit failed. Please fix errors.[/bold red]\n"
|
|
2194
|
-
)
|
|
2195
|
-
raise SystemExit(1)
|
|
2196
|
-
|
|
2197
|
-
def _select_precommit_config(self) -> str:
|
|
2198
|
-
if hasattr(self, "options"):
|
|
2199
|
-
if getattr(self.options, "ai_agent", False):
|
|
2200
|
-
return ".pre-commit-config-ai.yaml"
|
|
2201
|
-
elif getattr(self.options, "comprehensive", False):
|
|
2202
|
-
return ".pre-commit-config.yaml"
|
|
2203
|
-
|
|
2204
|
-
return ".pre-commit-config-fast.yaml"
|
|
2205
|
-
|
|
2206
|
-
def run_pre_commit_with_analysis(self) -> list[HookResult]:
|
|
2207
|
-
self.console.print("\n" + "-" * 80)
|
|
2208
|
-
self.console.print(
|
|
2209
|
-
"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running code quality checks[/bold bright_white]"
|
|
2210
|
-
)
|
|
2211
|
-
self.console.print("-" * 80 + "\n")
|
|
2212
|
-
config_file = self._select_precommit_config()
|
|
2213
|
-
cmd = [
|
|
2214
|
-
"uv",
|
|
2215
|
-
"run",
|
|
2216
|
-
"pre-commit",
|
|
2217
|
-
"run",
|
|
2218
|
-
"--all-files",
|
|
2219
|
-
"-c",
|
|
2220
|
-
config_file,
|
|
2221
|
-
"--verbose",
|
|
2222
|
-
]
|
|
2223
|
-
start_time = time.time()
|
|
2224
|
-
result = self.execute_command(cmd, capture_output=True, text=True)
|
|
2225
|
-
total_duration = time.time() - start_time
|
|
2226
|
-
hook_results = self._parse_hook_output(result.stdout, result.stderr)
|
|
2227
|
-
if self.options and getattr(self.options, "ai_agent", False):
|
|
2228
|
-
self._generate_hooks_analysis(hook_results, total_duration)
|
|
2229
|
-
self._generate_quality_metrics()
|
|
2230
|
-
self._generate_project_structure_analysis()
|
|
2231
|
-
self._generate_error_context_analysis()
|
|
2232
|
-
self._generate_ai_agent_summary()
|
|
2233
|
-
if result.returncode > 0:
|
|
2234
|
-
self.execute_command(["uv", "lock"])
|
|
2235
|
-
self.console.print("\n[bold green]✓ Dependencies locked[/bold green]\n")
|
|
2236
|
-
result = self.execute_command(cmd, capture_output=True, text=True)
|
|
2237
|
-
if result.returncode > 0:
|
|
2238
|
-
self.console.print(
|
|
2239
|
-
"\n\n[bold red]❌ Pre-commit failed. Please fix errors.[/bold red]\n"
|
|
2240
|
-
)
|
|
2241
|
-
raise SystemExit(1)
|
|
2242
|
-
|
|
2243
|
-
return hook_results
|
|
2244
|
-
|
|
2245
|
-
def _parse_hook_output(self, stdout: str, stderr: str) -> list[HookResult]:
|
|
2246
|
-
hook_results: list[HookResult] = []
|
|
2247
|
-
lines = stdout.split("\n")
|
|
2248
|
-
for line in lines:
|
|
2249
|
-
if "..." in line and (
|
|
2250
|
-
"Passed" in line or "Failed" in line or "Skipped" in line
|
|
2251
|
-
):
|
|
2252
|
-
hook_name = line.split("...")[0].strip()
|
|
2253
|
-
status = (
|
|
2254
|
-
"passed"
|
|
2255
|
-
if "Passed" in line
|
|
2256
|
-
else "failed"
|
|
2257
|
-
if "Failed" in line
|
|
2258
|
-
else "skipped"
|
|
2259
|
-
)
|
|
2260
|
-
hook_results.append(
|
|
2261
|
-
HookResult(
|
|
2262
|
-
id=hook_name.lower().replace(" ", "-"),
|
|
2263
|
-
name=hook_name,
|
|
2264
|
-
status=status,
|
|
2265
|
-
duration=0.0,
|
|
2266
|
-
stage="pre-commit",
|
|
2267
|
-
)
|
|
2268
|
-
)
|
|
2269
|
-
elif "- duration:" in line and hook_results:
|
|
2270
|
-
with suppress(ValueError, IndexError):
|
|
2271
|
-
duration = float(line.split("duration:")[1].strip().rstrip("s"))
|
|
2272
|
-
hook_results[-1].duration = duration
|
|
2273
|
-
|
|
2274
|
-
return hook_results
|
|
2275
|
-
|
|
2276
|
-
def _generate_hooks_analysis(
|
|
2277
|
-
self, hook_results: list[HookResult], total_duration: float
|
|
2278
|
-
) -> None:
|
|
2279
|
-
passed = sum(1 for h in hook_results if h.status == "passed")
|
|
2280
|
-
failed = sum(1 for h in hook_results if h.status == "failed")
|
|
2281
|
-
|
|
2282
|
-
analysis = {
|
|
2283
|
-
"summary": {
|
|
2284
|
-
"total_hooks": len(hook_results),
|
|
2285
|
-
"passed": passed,
|
|
2286
|
-
"failed": failed,
|
|
2287
|
-
"total_duration": round(total_duration, 2),
|
|
2288
|
-
"status": "success" if failed == 0 else "failure",
|
|
2289
|
-
},
|
|
2290
|
-
"hooks": [
|
|
2291
|
-
{
|
|
2292
|
-
"id": hook.id,
|
|
2293
|
-
"name": hook.name,
|
|
2294
|
-
"status": hook.status,
|
|
2295
|
-
"duration": hook.duration,
|
|
2296
|
-
"files_processed": hook.files_processed,
|
|
2297
|
-
"issues_found": hook.issues_found,
|
|
2298
|
-
"stage": hook.stage,
|
|
2299
|
-
}
|
|
2300
|
-
for hook in hook_results
|
|
2301
|
-
],
|
|
2302
|
-
"performance": {
|
|
2303
|
-
"slowest_hooks": sorted(
|
|
2304
|
-
[
|
|
2305
|
-
{
|
|
2306
|
-
"hook": h.name,
|
|
2307
|
-
"duration": h.duration,
|
|
2308
|
-
"percentage": round((h.duration / total_duration) * 100, 1),
|
|
2309
|
-
}
|
|
2310
|
-
for h in hook_results
|
|
2311
|
-
if h.duration > 0
|
|
2312
|
-
],
|
|
2313
|
-
key=operator.itemgetter("duration"),
|
|
2314
|
-
reverse=True,
|
|
2315
|
-
)[:5],
|
|
2316
|
-
"optimization_suggestions": self._generate_optimization_suggestions(
|
|
2317
|
-
hook_results
|
|
2318
|
-
),
|
|
2319
|
-
},
|
|
2320
|
-
"generated_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
2321
|
-
}
|
|
2322
|
-
|
|
2323
|
-
with open("hooks-analysis.json", "w", encoding="utf-8") as f:
|
|
2324
|
-
json.dump(analysis, f, indent=2)
|
|
2325
|
-
|
|
2326
|
-
self.console.print(
|
|
2327
|
-
"[bold bright_black]→ Hook analysis: hooks-analysis.json[/bold bright_black]"
|
|
2328
|
-
)
|
|
2329
|
-
|
|
2330
|
-
def _generate_optimization_suggestions(
|
|
2331
|
-
self, hook_results: list[HookResult]
|
|
2332
|
-
) -> list[str]:
|
|
2333
|
-
suggestions: list[str] = []
|
|
2334
|
-
|
|
2335
|
-
for hook in hook_results:
|
|
2336
|
-
if hook.duration > 5.0:
|
|
2337
|
-
suggestions.append(
|
|
2338
|
-
f"Consider moving {hook.name} to pre-push stage (currently {hook.duration}s)"
|
|
2339
|
-
)
|
|
2340
|
-
elif hook.name == "autotyping" and hook.duration > 3.0:
|
|
2341
|
-
suggestions.append("Enable autotyping caching or reduce scope")
|
|
2342
|
-
|
|
2343
|
-
if not suggestions:
|
|
2344
|
-
suggestions.append("Hook performance is well optimized")
|
|
2345
|
-
|
|
2346
|
-
return suggestions
|
|
2347
|
-
|
|
2348
|
-
def _generate_quality_metrics(self) -> None:
|
|
2349
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
2350
|
-
return
|
|
2351
|
-
metrics = {
|
|
2352
|
-
"project_info": {
|
|
2353
|
-
"name": self.pkg_name,
|
|
2354
|
-
"python_version": "3.13+",
|
|
2355
|
-
"crackerjack_version": "0.19.8",
|
|
2356
|
-
"analysis_timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
2357
|
-
},
|
|
2358
|
-
"code_quality": self._collect_code_quality_metrics(),
|
|
2359
|
-
"security": self._collect_security_metrics(),
|
|
2360
|
-
"performance": self._collect_performance_metrics(),
|
|
2361
|
-
"maintainability": self._collect_maintainability_metrics(),
|
|
2362
|
-
"test_coverage": self._collect_coverage_metrics(),
|
|
2363
|
-
"recommendations": self._generate_quality_recommendations(),
|
|
2364
|
-
}
|
|
2365
|
-
with open("quality-metrics.json", "w", encoding="utf-8") as f:
|
|
2366
|
-
json.dump(metrics, f, indent=2)
|
|
2367
|
-
self.console.print(
|
|
2368
|
-
"[bold bright_black]→ Quality metrics: quality-metrics.json[/bold bright_black]"
|
|
2369
|
-
)
|
|
2370
|
-
|
|
2371
|
-
def _collect_code_quality_metrics(self) -> dict[str, t.Any]:
|
|
2372
|
-
return {
|
|
2373
|
-
"ruff_check": self._parse_ruff_results(),
|
|
2374
|
-
"pyright_types": self._parse_pyright_results(),
|
|
2375
|
-
"refurb_patterns": self._parse_refurb_results(),
|
|
2376
|
-
"complexity": self._parse_complexity_results(),
|
|
2377
|
-
}
|
|
2378
|
-
|
|
2379
|
-
def _collect_security_metrics(self) -> dict[str, t.Any]:
|
|
2380
|
-
return {
|
|
2381
|
-
"bandit_issues": self._parse_bandit_results(),
|
|
2382
|
-
"secrets_detected": self._parse_secrets_results(),
|
|
2383
|
-
"dependency_vulnerabilities": self._check_dependency_security(),
|
|
2384
|
-
}
|
|
2385
|
-
|
|
2386
|
-
def _collect_performance_metrics(self) -> dict[str, t.Any]:
|
|
2387
|
-
return {
|
|
2388
|
-
"import_analysis": self._analyze_imports(),
|
|
2389
|
-
"dead_code": self._parse_vulture_results(),
|
|
2390
|
-
"unused_dependencies": self._parse_creosote_results(),
|
|
2391
|
-
}
|
|
2392
|
-
|
|
2393
|
-
def _collect_maintainability_metrics(self) -> dict[str, t.Any]:
|
|
2394
|
-
return {
|
|
2395
|
-
"line_count": self._count_code_lines(),
|
|
2396
|
-
"file_count": self._count_files(),
|
|
2397
|
-
"docstring_coverage": self._calculate_docstring_coverage(),
|
|
2398
|
-
"type_annotation_coverage": self._calculate_type_coverage(),
|
|
2399
|
-
}
|
|
2400
|
-
|
|
2401
|
-
def _collect_coverage_metrics(self) -> dict[str, t.Any]:
|
|
2402
|
-
try:
|
|
2403
|
-
with open("coverage.json", encoding="utf-8") as f:
|
|
2404
|
-
coverage_data = json.load(f)
|
|
2405
|
-
return {
|
|
2406
|
-
"total_coverage": coverage_data.get("totals", {}).get(
|
|
2407
|
-
"percent_covered", 0
|
|
2408
|
-
),
|
|
2409
|
-
"missing_lines": coverage_data.get("totals", {}).get(
|
|
2410
|
-
"missing_lines", 0
|
|
2411
|
-
),
|
|
2412
|
-
"covered_lines": coverage_data.get("totals", {}).get(
|
|
2413
|
-
"covered_lines", 0
|
|
2414
|
-
),
|
|
2415
|
-
"files": len(coverage_data.get("files", {})),
|
|
2416
|
-
}
|
|
2417
|
-
except (FileNotFoundError, json.JSONDecodeError):
|
|
2418
|
-
return {"status": "coverage_not_available"}
|
|
2419
|
-
|
|
2420
|
-
def _parse_ruff_results(self) -> dict[str, t.Any]:
|
|
2421
|
-
return {"status": "clean", "violations": 0, "categories": []}
|
|
2422
|
-
|
|
2423
|
-
def _parse_pyright_results(self) -> dict[str, t.Any]:
|
|
2424
|
-
return {"errors": 0, "warnings": 0, "type_coverage": "high"}
|
|
2425
|
-
|
|
2426
|
-
def _parse_refurb_results(self) -> dict[str, t.Any]:
|
|
2427
|
-
return {"suggestions": 0, "patterns_modernized": []}
|
|
2428
|
-
|
|
2429
|
-
def _parse_complexity_list(
|
|
2430
|
-
self, complexity_data: list[dict[str, t.Any]]
|
|
2431
|
-
) -> dict[str, t.Any]:
|
|
2432
|
-
if not complexity_data:
|
|
2433
|
-
return {
|
|
2434
|
-
"average_complexity": 0,
|
|
2435
|
-
"max_complexity": 0,
|
|
2436
|
-
"total_functions": 0,
|
|
2437
|
-
}
|
|
2438
|
-
complexities = [item.get("complexity", 0) for item in complexity_data]
|
|
2439
|
-
return {
|
|
2440
|
-
"average_complexity": sum(complexities) / len(complexities)
|
|
2441
|
-
if complexities
|
|
2442
|
-
else 0,
|
|
2443
|
-
"max_complexity": max(complexities) if complexities else 0,
|
|
2444
|
-
"total_functions": len(complexities),
|
|
2445
|
-
}
|
|
2446
|
-
|
|
2447
|
-
def _parse_complexity_dict(
|
|
2448
|
-
self, complexity_data: dict[str, t.Any]
|
|
2449
|
-
) -> dict[str, t.Any]:
|
|
2450
|
-
return {
|
|
2451
|
-
"average_complexity": complexity_data.get("average", 0),
|
|
2452
|
-
"max_complexity": complexity_data.get("max", 0),
|
|
2453
|
-
"total_functions": complexity_data.get("total", 0),
|
|
2454
|
-
}
|
|
2455
|
-
|
|
2456
|
-
def _parse_complexity_results(self) -> dict[str, t.Any]:
|
|
2457
|
-
try:
|
|
2458
|
-
with open("complexipy.json", encoding="utf-8") as f:
|
|
2459
|
-
complexity_data = json.load(f)
|
|
2460
|
-
if isinstance(complexity_data, list):
|
|
2461
|
-
return self._parse_complexity_list(
|
|
2462
|
-
t.cast(list[dict[str, t.Any]], complexity_data)
|
|
2463
|
-
)
|
|
2464
|
-
return self._parse_complexity_dict(complexity_data)
|
|
2465
|
-
except (FileNotFoundError, json.JSONDecodeError):
|
|
2466
|
-
return {"status": "complexity_analysis_not_available"}
|
|
2467
|
-
|
|
2468
|
-
def _parse_bandit_results(self) -> dict[str, t.Any]:
|
|
2469
|
-
return {"high_severity": 0, "medium_severity": 0, "low_severity": 0}
|
|
2470
|
-
|
|
2471
|
-
def _parse_secrets_results(self) -> dict[str, t.Any]:
|
|
2472
|
-
return {"potential_secrets": 0, "verified_secrets": 0}
|
|
2473
|
-
|
|
2474
|
-
def _check_dependency_security(self) -> dict[str, t.Any]:
|
|
2475
|
-
return {"vulnerable_packages": [], "total_dependencies": 0}
|
|
2476
|
-
|
|
2477
|
-
def _analyze_imports(self) -> dict[str, t.Any]:
|
|
2478
|
-
return {"circular_imports": 0, "unused_imports": 0, "import_depth": "shallow"}
|
|
2479
|
-
|
|
2480
|
-
def _parse_vulture_results(self) -> dict[str, t.Any]:
|
|
2481
|
-
return {"dead_code_percentage": 0, "unused_functions": 0, "unused_variables": 0}
|
|
2482
|
-
|
|
2483
|
-
def _parse_creosote_results(self) -> dict[str, t.Any]:
|
|
2484
|
-
return {"unused_dependencies": [], "total_dependencies": 0}
|
|
2485
|
-
|
|
2486
|
-
def _count_code_lines(self) -> int:
|
|
2487
|
-
total_lines = 0
|
|
2488
|
-
for py_file in self.pkg_path.rglob("*.py"):
|
|
2489
|
-
if not str(py_file).startswith(("__pycache__", ".venv")):
|
|
2490
|
-
try:
|
|
2491
|
-
total_lines += len(py_file.read_text(encoding="utf-8").splitlines())
|
|
2492
|
-
except (UnicodeDecodeError, PermissionError):
|
|
2493
|
-
continue
|
|
2494
|
-
return total_lines
|
|
2495
|
-
|
|
2496
|
-
def _count_files(self) -> dict[str, int]:
|
|
2497
|
-
return {
|
|
2498
|
-
"python_files": len(list(self.pkg_path.rglob("*.py"))),
|
|
2499
|
-
"test_files": len(list(self.pkg_path.rglob("test_*.py"))),
|
|
2500
|
-
"config_files": len(list(self.pkg_path.glob("*.toml")))
|
|
2501
|
-
+ len(list(self.pkg_path.glob("*.yaml"))),
|
|
2502
|
-
}
|
|
2503
|
-
|
|
2504
|
-
def _calculate_docstring_coverage(self) -> float:
|
|
2505
|
-
return 85.0
|
|
2506
|
-
|
|
2507
|
-
def _calculate_type_coverage(self) -> float:
|
|
2508
|
-
return 95.0
|
|
2509
|
-
|
|
2510
|
-
def _generate_quality_recommendations(self) -> list[str]:
|
|
2511
|
-
recommendations: list[str] = []
|
|
2512
|
-
recommendations.extend(
|
|
2513
|
-
[
|
|
2514
|
-
"Consider adding more integration tests",
|
|
2515
|
-
"Review complex functions for potential refactoring",
|
|
2516
|
-
"Ensure all public APIs have comprehensive docstrings",
|
|
2517
|
-
"Monitor dependency updates for security patches",
|
|
2518
|
-
]
|
|
2519
|
-
)
|
|
2520
|
-
|
|
2521
|
-
return recommendations
|
|
2522
|
-
|
|
2523
|
-
def _generate_project_structure_analysis(self) -> None:
|
|
2524
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
2525
|
-
return
|
|
2526
|
-
structure = {
|
|
2527
|
-
"project_overview": {
|
|
2528
|
-
"name": self.pkg_name,
|
|
2529
|
-
"type": "python_package",
|
|
2530
|
-
"structure_pattern": self._analyze_project_pattern(),
|
|
2531
|
-
"analysis_timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
2532
|
-
},
|
|
2533
|
-
"directory_structure": self._analyze_directory_structure(),
|
|
2534
|
-
"file_distribution": self._analyze_file_distribution(),
|
|
2535
|
-
"dependencies": self._analyze_dependencies(),
|
|
2536
|
-
"configuration_files": self._analyze_configuration_files(),
|
|
2537
|
-
"documentation": self._analyze_documentation(),
|
|
2538
|
-
"testing_structure": self._analyze_testing_structure(),
|
|
2539
|
-
"package_structure": self._analyze_package_structure(),
|
|
2540
|
-
}
|
|
2541
|
-
with open("project-structure.json", "w", encoding="utf-8") as f:
|
|
2542
|
-
json.dump(structure, f, indent=2)
|
|
2543
|
-
self.console.print(
|
|
2544
|
-
"[bold bright_black]→ Project structure: project-structure.json[/bold bright_black]"
|
|
2545
|
-
)
|
|
2546
|
-
|
|
2547
|
-
def _generate_error_context_analysis(self) -> None:
|
|
2548
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
2549
|
-
return
|
|
2550
|
-
context = {
|
|
2551
|
-
"analysis_info": {
|
|
2552
|
-
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
2553
|
-
"crackerjack_version": "0.19.8",
|
|
2554
|
-
"python_version": "3.13+",
|
|
2555
|
-
},
|
|
2556
|
-
"environment": self._collect_environment_info(),
|
|
2557
|
-
"common_issues": self._identify_common_issues(),
|
|
2558
|
-
"troubleshooting": self._generate_troubleshooting_guide(),
|
|
2559
|
-
"performance_insights": self._collect_performance_insights(),
|
|
2560
|
-
"recommendations": self._generate_context_recommendations(),
|
|
2561
|
-
}
|
|
2562
|
-
with open("error-context.json", "w", encoding="utf-8") as f:
|
|
2563
|
-
json.dump(context, f, indent=2)
|
|
2564
|
-
self.console.print(
|
|
2565
|
-
"[bold bright_black]→ Error context: error-context.json[/bold bright_black]"
|
|
2566
|
-
)
|
|
2567
|
-
|
|
2568
|
-
def _generate_ai_agent_summary(self) -> None:
|
|
2569
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
2570
|
-
return
|
|
2571
|
-
summary = {
|
|
2572
|
-
"analysis_summary": {
|
|
2573
|
-
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
2574
|
-
"project_name": self.pkg_name,
|
|
2575
|
-
"analysis_type": "comprehensive_quality_assessment",
|
|
2576
|
-
"crackerjack_version": "0.19.8",
|
|
2577
|
-
},
|
|
2578
|
-
"quality_status": self._summarize_quality_status(),
|
|
2579
|
-
"key_metrics": self._summarize_key_metrics(),
|
|
2580
|
-
"critical_issues": self._identify_critical_issues(),
|
|
2581
|
-
"improvement_priorities": self._prioritize_improvements(),
|
|
2582
|
-
"next_steps": self._recommend_next_steps(),
|
|
2583
|
-
"output_files": [
|
|
2584
|
-
"hooks-analysis.json",
|
|
2585
|
-
"quality-metrics.json",
|
|
2586
|
-
"project-structure.json",
|
|
2587
|
-
"error-context.json",
|
|
2588
|
-
"test-results.xml",
|
|
2589
|
-
"coverage.json",
|
|
2590
|
-
],
|
|
2591
|
-
}
|
|
2592
|
-
with open("ai-agent-summary.json", "w", encoding="utf-8") as f:
|
|
2593
|
-
json.dump(summary, f, indent=2)
|
|
2594
|
-
self.console.print(
|
|
2595
|
-
"[bold bright_black]→ AI agent summary: ai-agent-summary.json[/bold bright_black]"
|
|
2596
|
-
)
|
|
2597
|
-
|
|
2598
|
-
def _analyze_project_pattern(self) -> str:
|
|
2599
|
-
if (self.pkg_path / "pyproject.toml").exists():
|
|
2600
|
-
if (self.pkg_path / "src").exists():
|
|
2601
|
-
return "src_layout"
|
|
2602
|
-
elif (self.pkg_path / self.pkg_name).exists():
|
|
2603
|
-
return "flat_layout"
|
|
2604
|
-
return "unknown"
|
|
2605
|
-
|
|
2606
|
-
def _analyze_directory_structure(self) -> dict[str, t.Any]:
|
|
2607
|
-
directories = [
|
|
2608
|
-
{
|
|
2609
|
-
"name": item.name,
|
|
2610
|
-
"type": self._classify_directory(item),
|
|
2611
|
-
"file_count": len(list(item.rglob("*"))),
|
|
2612
|
-
}
|
|
2613
|
-
for item in self.pkg_path.iterdir()
|
|
2614
|
-
if item.is_dir()
|
|
2615
|
-
and not item.name.startswith((".git", "__pycache__", ".venv"))
|
|
2616
|
-
]
|
|
2617
|
-
return {"directories": directories, "total_directories": len(directories)}
|
|
2618
|
-
|
|
2619
|
-
def _analyze_file_distribution(self) -> dict[str, t.Any]:
|
|
2620
|
-
file_types: dict[str, int] = {}
|
|
2621
|
-
total_files = 0
|
|
2622
|
-
for file_path in self.pkg_path.rglob("*"):
|
|
2623
|
-
if file_path.is_file() and not str(file_path).startswith(
|
|
2624
|
-
(".git", "__pycache__")
|
|
2625
|
-
):
|
|
2626
|
-
ext = file_path.suffix or "no_extension"
|
|
2627
|
-
file_types[ext] = file_types.get(ext, 0) + 1
|
|
2628
|
-
total_files += 1
|
|
2629
|
-
|
|
2630
|
-
return {"file_types": file_types, "total_files": total_files}
|
|
2631
|
-
|
|
2632
|
-
def _analyze_dependencies(self) -> dict[str, t.Any]:
|
|
2633
|
-
deps = {"status": "analysis_not_implemented"}
|
|
2634
|
-
with suppress(Exception):
|
|
2635
|
-
pyproject_path = self.pkg_path / "pyproject.toml"
|
|
2636
|
-
if pyproject_path.exists():
|
|
2637
|
-
pyproject_path.read_text(encoding="utf-8")
|
|
2638
|
-
deps = {"source": "pyproject.toml", "status": "detected"}
|
|
2639
|
-
return deps
|
|
2640
|
-
|
|
2641
|
-
def _analyze_configuration_files(self) -> list[str]:
|
|
2642
|
-
config_files: list[str] = []
|
|
2643
|
-
config_patterns = ["*.toml", "*.yaml", "*.yml", "*.ini", "*.cfg", ".env*"]
|
|
2644
|
-
for pattern in config_patterns:
|
|
2645
|
-
config_files.extend([f.name for f in self.pkg_path.glob(pattern)])
|
|
2646
|
-
|
|
2647
|
-
return sorted(set(config_files))
|
|
2648
|
-
|
|
2649
|
-
def _analyze_documentation(self) -> dict[str, t.Any]:
|
|
2650
|
-
docs = {"readme": False, "docs_dir": False, "changelog": False}
|
|
2651
|
-
for file_path in self.pkg_path.iterdir():
|
|
2652
|
-
if file_path.is_file():
|
|
2653
|
-
name_lower = file_path.name.lower()
|
|
2654
|
-
if name_lower.startswith("readme"):
|
|
2655
|
-
docs["readme"] = True
|
|
2656
|
-
elif name_lower.startswith(("changelog", "history")):
|
|
2657
|
-
docs["changelog"] = True
|
|
2658
|
-
elif file_path.is_dir() and file_path.name.lower() in (
|
|
2659
|
-
"docs",
|
|
2660
|
-
"doc",
|
|
2661
|
-
"documentation",
|
|
2662
|
-
):
|
|
2663
|
-
docs["docs_dir"] = True
|
|
2664
|
-
|
|
2665
|
-
return docs
|
|
2666
|
-
|
|
2667
|
-
def _analyze_testing_structure(self) -> dict[str, t.Any]:
|
|
2668
|
-
test_files = list(self.pkg_path.rglob("test_*.py"))
|
|
2669
|
-
test_dirs = [
|
|
2670
|
-
d
|
|
2671
|
-
for d in self.pkg_path.iterdir()
|
|
2672
|
-
if d.is_dir() and "test" in d.name.lower()
|
|
2673
|
-
]
|
|
2674
|
-
|
|
2675
|
-
return {
|
|
2676
|
-
"test_files": len(test_files),
|
|
2677
|
-
"test_directories": len(test_dirs),
|
|
2678
|
-
"has_conftest": any(
|
|
2679
|
-
f.name == "conftest.py" for f in self.pkg_path.rglob("conftest.py")
|
|
2680
|
-
),
|
|
2681
|
-
"has_pytest_ini": (self.pkg_path / "pytest.ini").exists(),
|
|
2682
|
-
}
|
|
2683
|
-
|
|
2684
|
-
def _analyze_package_structure(self) -> dict[str, t.Any]:
|
|
2685
|
-
pkg_dir = self.pkg_path / self.pkg_name
|
|
2686
|
-
if not pkg_dir.exists():
|
|
2687
|
-
return {"status": "no_package_directory"}
|
|
2688
|
-
py_files = list(pkg_dir.rglob("*.py"))
|
|
2689
|
-
return {
|
|
2690
|
-
"python_files": len(py_files),
|
|
2691
|
-
"has_init": (pkg_dir / "__init__.py").exists(),
|
|
2692
|
-
"submodules": len(
|
|
2693
|
-
[
|
|
2694
|
-
f
|
|
2695
|
-
for f in pkg_dir.iterdir()
|
|
2696
|
-
if f.is_dir() and (f / "__init__.py").exists()
|
|
2697
|
-
]
|
|
2698
|
-
),
|
|
2699
|
-
}
|
|
2700
|
-
|
|
2701
|
-
def _classify_directory(self, directory: Path) -> str:
|
|
2702
|
-
name = directory.name.lower()
|
|
2703
|
-
if name in ("test", "tests"):
|
|
2704
|
-
return "testing"
|
|
2705
|
-
elif name in ("doc", "docs", "documentation"):
|
|
2706
|
-
return "documentation"
|
|
2707
|
-
elif name in ("src", "lib"):
|
|
2708
|
-
return "source"
|
|
2709
|
-
elif name.startswith("."):
|
|
2710
|
-
return "hidden"
|
|
2711
|
-
elif (directory / "__init__.py").exists():
|
|
2712
|
-
return "python_package"
|
|
2713
|
-
return "general"
|
|
2714
|
-
|
|
2715
|
-
def _collect_environment_info(self) -> dict[str, t.Any]:
|
|
2716
|
-
return {
|
|
2717
|
-
"platform": "detected_automatically",
|
|
2718
|
-
"python_version": "3.13+",
|
|
2719
|
-
"virtual_env": "detected_automatically",
|
|
2720
|
-
"git_status": "available",
|
|
2721
|
-
}
|
|
2722
|
-
|
|
2723
|
-
def _identify_common_issues(self) -> list[str]:
|
|
2724
|
-
issues: list[str] = []
|
|
2725
|
-
if not (self.pkg_path / "pyproject.toml").exists():
|
|
2726
|
-
issues.append("Missing pyproject.toml configuration")
|
|
2727
|
-
if not (self.pkg_path / ".gitignore").exists():
|
|
2728
|
-
issues.append("Missing .gitignore file")
|
|
2729
|
-
|
|
2730
|
-
return issues
|
|
2731
|
-
|
|
2732
|
-
def _generate_troubleshooting_guide(self) -> dict[str, str]:
|
|
2733
|
-
return {
|
|
2734
|
-
"dependency_issues": "Run 'uv sync' to ensure all dependencies are installed",
|
|
2735
|
-
"hook_failures": "Check hook-specific configuration in pyproject.toml",
|
|
2736
|
-
"type_errors": "Review type annotations and ensure pyright configuration is correct",
|
|
2737
|
-
"formatting_issues": "Run 'uv run ruff format' to fix formatting automatically",
|
|
2738
|
-
}
|
|
2739
|
-
|
|
2740
|
-
def _collect_performance_insights(self) -> dict[str, t.Any]:
|
|
2741
|
-
return {
|
|
2742
|
-
"hook_performance": "Available in hooks-analysis.json",
|
|
2743
|
-
"test_performance": "Available in test output",
|
|
2744
|
-
"optimization_opportunities": "Check quality-metrics.json for details",
|
|
2745
|
-
}
|
|
2746
|
-
|
|
2747
|
-
def _generate_context_recommendations(self) -> list[str]:
|
|
2748
|
-
return [
|
|
2749
|
-
"Regular pre-commit hook execution to maintain code quality",
|
|
2750
|
-
"Periodic dependency updates for security and performance",
|
|
2751
|
-
"Monitor test coverage and add tests for uncovered code",
|
|
2752
|
-
"Review and update type annotations for better code safety",
|
|
2753
|
-
]
|
|
2754
|
-
|
|
2755
|
-
def _summarize_quality_status(self) -> str:
|
|
2756
|
-
return "analysis_complete"
|
|
2757
|
-
|
|
2758
|
-
def _summarize_key_metrics(self) -> dict[str, t.Any]:
|
|
2759
|
-
return {
|
|
2760
|
-
"code_quality": "high",
|
|
2761
|
-
"test_coverage": "good",
|
|
2762
|
-
"security_status": "clean",
|
|
2763
|
-
"maintainability": "excellent",
|
|
2764
|
-
}
|
|
2765
|
-
|
|
2766
|
-
def _identify_critical_issues(self) -> list[str]:
|
|
2767
|
-
return []
|
|
2768
|
-
|
|
2769
|
-
def _prioritize_improvements(self) -> list[str]:
|
|
2770
|
-
return [
|
|
2771
|
-
"Continue maintaining high code quality standards",
|
|
2772
|
-
"Monitor performance metrics regularly",
|
|
2773
|
-
"Keep dependencies up to date",
|
|
2774
|
-
]
|
|
2775
|
-
|
|
2776
|
-
def _recommend_next_steps(self) -> list[str]:
|
|
2777
|
-
return [
|
|
2778
|
-
"Review generated analysis files for detailed insights",
|
|
2779
|
-
"Address any identified issues or recommendations",
|
|
2780
|
-
"Set up regular automated quality checks",
|
|
2781
|
-
"Consider integrating analysis into CI/CD pipeline",
|
|
2782
|
-
]
|
|
2783
|
-
|
|
2784
|
-
def execute_command(
|
|
2785
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
2786
|
-
) -> subprocess.CompletedProcess[str]:
|
|
2787
|
-
if self.dry_run:
|
|
2788
|
-
self.console.print(
|
|
2789
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
2790
|
-
)
|
|
2791
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
2792
|
-
return execute(cmd, **kwargs)
|
|
2793
|
-
|
|
2794
|
-
async def execute_command_async(
|
|
2795
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
2796
|
-
) -> subprocess.CompletedProcess[str]:
|
|
2797
|
-
if self.dry_run:
|
|
2798
|
-
self.console.print(
|
|
2799
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
2800
|
-
)
|
|
2801
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
2802
|
-
|
|
2803
|
-
proc = await asyncio.create_subprocess_exec(
|
|
2804
|
-
*cmd,
|
|
2805
|
-
stdout=asyncio.subprocess.PIPE,
|
|
2806
|
-
stderr=asyncio.subprocess.PIPE,
|
|
2807
|
-
**kwargs,
|
|
2808
|
-
)
|
|
2809
|
-
stdout, stderr = await proc.communicate()
|
|
2810
|
-
|
|
2811
|
-
return CompletedProcess(
|
|
2812
|
-
cmd,
|
|
2813
|
-
proc.returncode or 0,
|
|
2814
|
-
stdout.decode() if stdout else "",
|
|
2815
|
-
stderr.decode() if stderr else "",
|
|
2816
|
-
)
|
|
2817
|
-
|
|
2818
|
-
async def run_pre_commit_async(self) -> None:
|
|
2819
|
-
self.console.print("\n" + "-" * 80)
|
|
2820
|
-
self.console.print(
|
|
2821
|
-
"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running code quality checks[/bold bright_white]"
|
|
2822
|
-
)
|
|
2823
|
-
self.console.print("-" * 80 + "\n")
|
|
2824
|
-
workload = self._analyze_precommit_workload()
|
|
2825
|
-
env_vars = self._optimize_precommit_execution(workload)
|
|
2826
|
-
total_files = workload.get("total_files", 0)
|
|
2827
|
-
if isinstance(total_files, int) and total_files > 50:
|
|
2828
|
-
self.console.print(
|
|
2829
|
-
f"[dim]Processing {total_files} files "
|
|
2830
|
-
f"({workload.get('complexity', 'unknown')} complexity) with {env_vars.get('PRE_COMMIT_CONCURRENCY', 'auto')} workers[/dim]"
|
|
2831
|
-
)
|
|
2832
|
-
config_file = self._select_precommit_config()
|
|
2833
|
-
cmd = ["uv", "run", "pre-commit", "run", "--all-files", "-c", config_file]
|
|
2834
|
-
import os
|
|
2835
|
-
|
|
2836
|
-
env = os.environ.copy()
|
|
2837
|
-
env.update(env_vars)
|
|
2838
|
-
check_all = await self.execute_command_async(cmd, env=env)
|
|
2839
|
-
if check_all.returncode > 0:
|
|
2840
|
-
await self.execute_command_async(["uv", "lock"])
|
|
2841
|
-
self.console.print(
|
|
2842
|
-
"\n[bold bright_red]❌ Pre-commit failed. Please fix errors.[/bold bright_red]"
|
|
2843
|
-
)
|
|
2844
|
-
if check_all.stderr:
|
|
2845
|
-
self.console.print(f"[dim]Error details: {check_all.stderr}[/dim]")
|
|
2846
|
-
raise SystemExit(1)
|
|
2847
|
-
else:
|
|
2848
|
-
self.console.print(
|
|
2849
|
-
"\n[bold bright_green]🏆 Pre-commit passed all checks![/bold bright_green]"
|
|
2850
|
-
)
|
|
2851
|
-
|
|
2852
|
-
async def run_pre_commit_with_analysis_async(self) -> list[HookResult]:
|
|
2853
|
-
self.console.print("\n" + "-" * 80)
|
|
2854
|
-
self.console.print(
|
|
2855
|
-
"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running code quality checks[/bold bright_white]"
|
|
2856
|
-
)
|
|
2857
|
-
self.console.print("-" * 80 + "\n")
|
|
2858
|
-
config_file = self._select_precommit_config()
|
|
2859
|
-
cmd = [
|
|
2860
|
-
"uv",
|
|
2861
|
-
"run",
|
|
2862
|
-
"pre-commit",
|
|
2863
|
-
"run",
|
|
2864
|
-
"--all-files",
|
|
2865
|
-
"-c",
|
|
2866
|
-
config_file,
|
|
2867
|
-
"--verbose",
|
|
2868
|
-
]
|
|
2869
|
-
self.console.print(
|
|
2870
|
-
f"[dim]→ Analysis files: {', '.join(self._get_analysis_files())}[/dim]"
|
|
2871
|
-
)
|
|
2872
|
-
start_time = time.time()
|
|
2873
|
-
check_all = await self.execute_command_async(cmd)
|
|
2874
|
-
end_time = time.time()
|
|
2875
|
-
hook_results = [
|
|
2876
|
-
HookResult(
|
|
2877
|
-
id="async_pre_commit",
|
|
2878
|
-
name="Pre-commit hooks (async)",
|
|
2879
|
-
status="passed" if check_all.returncode == 0 else "failed",
|
|
2880
|
-
duration=round(end_time - start_time, 2),
|
|
2881
|
-
files_processed=0,
|
|
2882
|
-
issues_found=[],
|
|
2883
|
-
)
|
|
2884
|
-
]
|
|
2885
|
-
if check_all.returncode > 0:
|
|
2886
|
-
await self.execute_command_async(["uv", "lock"])
|
|
2887
|
-
self.console.print(
|
|
2888
|
-
"\n[bold bright_red]❌ Pre-commit failed. Please fix errors.[/bold bright_red]"
|
|
2889
|
-
)
|
|
2890
|
-
if check_all.stderr:
|
|
2891
|
-
self.console.print(f"[dim]Error details: {check_all.stderr}[/dim]")
|
|
2892
|
-
raise SystemExit(1)
|
|
2893
|
-
else:
|
|
2894
|
-
self.console.print(
|
|
2895
|
-
"\n[bold bright_green]🏆 Pre-commit passed all checks![/bold bright_green]"
|
|
2896
|
-
)
|
|
2897
|
-
self._generate_analysis_files(hook_results)
|
|
2898
|
-
|
|
2899
|
-
return hook_results
|
|
2900
|
-
|
|
2901
|
-
def _get_analysis_files(self) -> list[str]:
|
|
2902
|
-
analysis_files: list[str] = []
|
|
2903
|
-
if (
|
|
2904
|
-
hasattr(self, "options")
|
|
2905
|
-
and self.options
|
|
2906
|
-
and getattr(self.options, "ai_agent", False)
|
|
2907
|
-
):
|
|
2908
|
-
analysis_files.extend(
|
|
2909
|
-
[
|
|
2910
|
-
"test-results.xml",
|
|
2911
|
-
"coverage.json",
|
|
2912
|
-
"benchmark.json",
|
|
2913
|
-
"ai-agent-summary.json",
|
|
2914
|
-
]
|
|
2915
|
-
)
|
|
2916
|
-
return analysis_files
|
|
2917
|
-
|
|
2918
|
-
def _generate_analysis_files(self, hook_results: list[HookResult]) -> None:
|
|
2919
|
-
if not (
|
|
2920
|
-
hasattr(self, "options")
|
|
2921
|
-
and self.options
|
|
2922
|
-
and getattr(self.options, "ai_agent", False)
|
|
2923
|
-
):
|
|
2924
|
-
return
|
|
2925
|
-
try:
|
|
2926
|
-
import json
|
|
2927
|
-
|
|
2928
|
-
summary = {
|
|
2929
|
-
"status": "success"
|
|
2930
|
-
if all(hr.status == "Passed" for hr in hook_results)
|
|
2931
|
-
else "failed",
|
|
2932
|
-
"hook_results": [
|
|
2933
|
-
{
|
|
2934
|
-
"name": hr.name,
|
|
2935
|
-
"status": hr.status,
|
|
2936
|
-
"duration": hr.duration,
|
|
2937
|
-
"issues": hr.issues_found
|
|
2938
|
-
if hasattr(hr, "issues_found")
|
|
2939
|
-
else [],
|
|
2940
|
-
}
|
|
2941
|
-
for hr in hook_results
|
|
2942
|
-
],
|
|
2943
|
-
"total_duration": sum(hr.duration for hr in hook_results),
|
|
2944
|
-
"files_analyzed": len(hook_results),
|
|
2945
|
-
}
|
|
2946
|
-
with open("ai-agent-summary.json", "w") as f:
|
|
2947
|
-
json.dump(summary, f, indent=2)
|
|
2948
|
-
except Exception as e:
|
|
2949
|
-
self.console.print(
|
|
2950
|
-
f"[yellow]Warning: Failed to generate AI summary: {e}[/yellow]"
|
|
2951
|
-
)
|
|
2952
|
-
|
|
2953
|
-
def update_precommit_hooks(self) -> None:
|
|
2954
|
-
try:
|
|
2955
|
-
result = self.execute_command(
|
|
2956
|
-
["uv", "run", "pre-commit", "autoupdate"],
|
|
2957
|
-
capture_output=True,
|
|
2958
|
-
text=True,
|
|
2959
|
-
)
|
|
2960
|
-
if result.returncode == 0:
|
|
2961
|
-
self.console.print(
|
|
2962
|
-
"[green]✅ Pre-commit hooks updated successfully[/green]"
|
|
2963
|
-
)
|
|
2964
|
-
if result.stdout.strip():
|
|
2965
|
-
self.console.print(f"[dim]{result.stdout}[/dim]")
|
|
2966
|
-
else:
|
|
2967
|
-
self.console.print(
|
|
2968
|
-
f"[red]❌ Failed to update pre-commit hooks: {result.stderr}[/red]"
|
|
2969
|
-
)
|
|
2970
|
-
except Exception as e:
|
|
2971
|
-
self.console.print(f"[red]❌ Error updating pre-commit hooks: {e}[/red]")
|
|
2972
|
-
|
|
2973
|
-
|
|
2974
|
-
class Crackerjack(BaseModel, arbitrary_types_allowed=True):
|
|
2975
|
-
our_path: Path = Path(__file__).parent
|
|
2976
|
-
pkg_path: Path = Path(Path.cwd())
|
|
2977
|
-
pkg_dir: Path | None = None
|
|
2978
|
-
pkg_name: str = "crackerjack"
|
|
2979
|
-
python_version: str = default_python_version
|
|
2980
|
-
console: Console = Console(force_terminal=True)
|
|
2981
|
-
dry_run: bool = False
|
|
2982
|
-
code_cleaner: CodeCleaner | None = None
|
|
2983
|
-
config_manager: ConfigManager | None = None
|
|
2984
|
-
project_manager: ProjectManager | None = None
|
|
2985
|
-
session_tracker: SessionTracker | None = None
|
|
2986
|
-
options: t.Any = None
|
|
2987
|
-
_file_cache: dict[str, list[Path]] = {}
|
|
2988
|
-
_file_cache_with_mtime: dict[str, tuple[float, list[Path]]] = {}
|
|
2989
|
-
_state_file: Path = Path(".crackerjack-state")
|
|
2990
|
-
|
|
2991
|
-
def __init__(self, **data: t.Any) -> None:
|
|
2992
|
-
super().__init__(**data)
|
|
2993
|
-
self._file_cache = {}
|
|
2994
|
-
self._file_cache_with_mtime = {}
|
|
2995
|
-
self._state_file = Path(".crackerjack-state")
|
|
2996
|
-
self.code_cleaner = CodeCleaner(console=self.console)
|
|
2997
|
-
self.config_manager = ConfigManager(
|
|
2998
|
-
our_path=self.our_path,
|
|
2999
|
-
pkg_path=self.pkg_path,
|
|
3000
|
-
pkg_name=self.pkg_name,
|
|
3001
|
-
console=self.console,
|
|
3002
|
-
python_version=self.python_version,
|
|
3003
|
-
dry_run=self.dry_run,
|
|
3004
|
-
)
|
|
3005
|
-
self.project_manager = ProjectManager(
|
|
3006
|
-
our_path=self.our_path,
|
|
3007
|
-
pkg_path=self.pkg_path,
|
|
3008
|
-
pkg_dir=self.pkg_dir,
|
|
3009
|
-
pkg_name=self.pkg_name,
|
|
3010
|
-
console=self.console,
|
|
3011
|
-
code_cleaner=self.code_cleaner,
|
|
3012
|
-
config_manager=self.config_manager,
|
|
3013
|
-
dry_run=self.dry_run,
|
|
3014
|
-
)
|
|
3015
|
-
|
|
3016
|
-
def _read_state(self) -> dict[str, t.Any]:
|
|
3017
|
-
import json
|
|
3018
|
-
|
|
3019
|
-
if self._state_file.exists():
|
|
3020
|
-
try:
|
|
3021
|
-
return json.loads(self._state_file.read_text(encoding="utf-8"))
|
|
3022
|
-
except (json.JSONDecodeError, OSError):
|
|
3023
|
-
return {}
|
|
3024
|
-
return {}
|
|
3025
|
-
|
|
3026
|
-
def _write_state(self, state: dict[str, t.Any]) -> None:
|
|
3027
|
-
from contextlib import suppress
|
|
3028
|
-
|
|
3029
|
-
with suppress(OSError):
|
|
3030
|
-
import json
|
|
3031
|
-
|
|
3032
|
-
self._state_file.write_text(json.dumps(state, indent=2), encoding="utf-8")
|
|
3033
|
-
|
|
3034
|
-
def _clear_state(self) -> None:
|
|
3035
|
-
if self._state_file.exists():
|
|
3036
|
-
from contextlib import suppress
|
|
3037
|
-
|
|
3038
|
-
with suppress(OSError):
|
|
3039
|
-
self._state_file.unlink()
|
|
3040
|
-
|
|
3041
|
-
def _has_version_been_bumped(self, version_type: str) -> bool:
|
|
3042
|
-
state = self._read_state()
|
|
3043
|
-
current_version = self._get_current_version()
|
|
3044
|
-
last_bumped_version = state.get("last_bumped_version")
|
|
3045
|
-
last_bump_type = state.get("last_bump_type")
|
|
3046
|
-
|
|
3047
|
-
return (
|
|
3048
|
-
last_bumped_version == current_version
|
|
3049
|
-
and last_bump_type == version_type
|
|
3050
|
-
and not state.get("publish_completed", False)
|
|
3051
|
-
)
|
|
3052
|
-
|
|
3053
|
-
def _mark_version_bumped(self, version_type: str) -> None:
|
|
3054
|
-
current_version = self._get_current_version()
|
|
3055
|
-
state = self._read_state()
|
|
3056
|
-
state.update(
|
|
3057
|
-
{
|
|
3058
|
-
"last_bumped_version": current_version,
|
|
3059
|
-
"last_bump_type": version_type,
|
|
3060
|
-
"publish_completed": False,
|
|
3061
|
-
}
|
|
3062
|
-
)
|
|
3063
|
-
self._write_state(state)
|
|
3064
|
-
|
|
3065
|
-
def _mark_publish_completed(self) -> None:
|
|
3066
|
-
state = self._read_state()
|
|
3067
|
-
state["publish_completed"] = True
|
|
3068
|
-
self._write_state(state)
|
|
3069
|
-
|
|
3070
|
-
def _get_current_version(self) -> str:
|
|
3071
|
-
from contextlib import suppress
|
|
3072
|
-
|
|
3073
|
-
with suppress(Exception):
|
|
3074
|
-
import tomllib
|
|
3075
|
-
|
|
3076
|
-
pyproject_path = Path("pyproject.toml")
|
|
3077
|
-
if pyproject_path.exists():
|
|
3078
|
-
with pyproject_path.open("rb") as f:
|
|
3079
|
-
data = tomllib.load(f)
|
|
3080
|
-
return data.get("project", {}).get("version", "unknown")
|
|
3081
|
-
return "unknown"
|
|
3082
|
-
|
|
3083
|
-
def _setup_package(self) -> None:
|
|
3084
|
-
self.pkg_name = self.pkg_path.stem.lower().replace("-", "_")
|
|
3085
|
-
self.pkg_dir = self.pkg_path / self.pkg_name
|
|
3086
|
-
self.pkg_dir.mkdir(exist_ok=True)
|
|
3087
|
-
self.console.print("\n" + "-" * 80)
|
|
3088
|
-
self.console.print(
|
|
3089
|
-
"[bold bright_magenta]🛠️ SETUP[/bold bright_magenta] [bold bright_white]Initializing project structure[/bold bright_white]"
|
|
3090
|
-
)
|
|
3091
|
-
self.console.print("-" * 80 + "\n")
|
|
3092
|
-
assert self.config_manager is not None
|
|
3093
|
-
assert self.project_manager is not None
|
|
3094
|
-
self.config_manager.pkg_name = self.pkg_name
|
|
3095
|
-
self.project_manager.pkg_name = self.pkg_name
|
|
3096
|
-
self.project_manager.pkg_dir = self.pkg_dir
|
|
3097
|
-
|
|
3098
|
-
def _update_project(self, options: t.Any) -> None:
|
|
3099
|
-
assert self.project_manager is not None
|
|
3100
|
-
if not options.no_config_updates:
|
|
3101
|
-
self.project_manager.update_pkg_configs()
|
|
3102
|
-
result: CompletedProcess[str] = self.execute_command(
|
|
3103
|
-
["uv", "sync"], capture_output=True, text=True
|
|
3104
|
-
)
|
|
3105
|
-
if result.returncode == 0:
|
|
3106
|
-
self.console.print(
|
|
3107
|
-
"[bold green]✓ Dependencies installed[/bold green]\n"
|
|
3108
|
-
)
|
|
3109
|
-
else:
|
|
3110
|
-
self.console.print(
|
|
3111
|
-
"\n\n[bold red]❌ UV sync failed. Is UV installed? Run `pipx install uv` and try again.[/bold red]\n\n"
|
|
3112
|
-
)
|
|
3113
|
-
|
|
3114
|
-
def _clean_project(self, options: t.Any) -> None:
|
|
3115
|
-
assert self.code_cleaner is not None
|
|
3116
|
-
if options.clean:
|
|
3117
|
-
if self.pkg_dir:
|
|
3118
|
-
self.console.print("\n" + "-" * 80)
|
|
3119
|
-
self.console.print(
|
|
3120
|
-
"[bold bright_blue]🧹 CLEAN[/bold bright_blue] [bold bright_white]Removing docstrings and comments[/bold bright_white]"
|
|
3121
|
-
)
|
|
3122
|
-
self.console.print("-" * 80 + "\n")
|
|
3123
|
-
self.code_cleaner.clean_files(self.pkg_dir)
|
|
3124
|
-
if self.pkg_path.stem == "crackerjack":
|
|
3125
|
-
tests_dir = self.pkg_path / "tests"
|
|
3126
|
-
if tests_dir.exists() and tests_dir.is_dir():
|
|
3127
|
-
self.console.print("\n" + "─" * 80)
|
|
3128
|
-
self.console.print(
|
|
3129
|
-
"[bold bright_blue]🧪 TESTS[/bold bright_blue] [bold bright_white]Cleaning test files[/bold bright_white]"
|
|
3130
|
-
)
|
|
3131
|
-
self.console.print("─" * 80 + "\n")
|
|
3132
|
-
self.code_cleaner.clean_files(tests_dir)
|
|
3133
|
-
|
|
3134
|
-
async def _clean_project_async(self, options: t.Any) -> None:
|
|
3135
|
-
assert self.code_cleaner is not None
|
|
3136
|
-
if options.clean:
|
|
3137
|
-
if self.pkg_dir:
|
|
3138
|
-
self.console.print("\n" + "-" * 80)
|
|
3139
|
-
self.console.print(
|
|
3140
|
-
"[bold bright_blue]🧹 CLEAN[/bold bright_blue] [bold bright_white]Removing docstrings and comments[/bold bright_white]"
|
|
3141
|
-
)
|
|
3142
|
-
self.console.print("-" * 80 + "\n")
|
|
3143
|
-
await self.code_cleaner.clean_files_async(self.pkg_dir)
|
|
3144
|
-
if self.pkg_path.stem == "crackerjack":
|
|
3145
|
-
tests_dir = self.pkg_path / "tests"
|
|
3146
|
-
if tests_dir.exists() and tests_dir.is_dir():
|
|
3147
|
-
self.console.print("\n" + "─" * 80)
|
|
3148
|
-
self.console.print(
|
|
3149
|
-
"[bold bright_blue]🧪 TESTS[/bold bright_blue] [bold bright_white]Cleaning test files[/bold bright_white]"
|
|
3150
|
-
)
|
|
3151
|
-
self.console.print("─" * 80 + "\n")
|
|
3152
|
-
await self.code_cleaner.clean_files_async(tests_dir)
|
|
3153
|
-
|
|
3154
|
-
def _get_test_timeout(self, options: OptionsProtocol, project_size: str) -> int:
|
|
3155
|
-
if options.test_timeout > 0:
|
|
3156
|
-
return options.test_timeout
|
|
3157
|
-
return (
|
|
3158
|
-
360 if project_size == "large" else 240 if project_size == "medium" else 120
|
|
3159
|
-
)
|
|
3160
|
-
|
|
3161
|
-
def _add_ai_agent_flags(
|
|
3162
|
-
self, test: list[str], options: OptionsProtocol, test_timeout: int
|
|
3163
|
-
) -> None:
|
|
3164
|
-
test.extend(
|
|
3165
|
-
[
|
|
3166
|
-
"--junitxml=test-results.xml",
|
|
3167
|
-
"--cov-report=json:coverage.json",
|
|
3168
|
-
"--tb=short",
|
|
3169
|
-
"--no-header",
|
|
3170
|
-
"--quiet",
|
|
3171
|
-
f"--timeout={test_timeout}",
|
|
3172
|
-
]
|
|
3173
|
-
)
|
|
3174
|
-
if options.benchmark or options.benchmark_regression:
|
|
3175
|
-
test.append("--benchmark-json=benchmark.json")
|
|
3176
|
-
|
|
3177
|
-
def _add_standard_flags(self, test: list[str], test_timeout: int) -> None:
|
|
3178
|
-
test.extend(
|
|
3179
|
-
[
|
|
3180
|
-
"--capture=fd",
|
|
3181
|
-
"--tb=short",
|
|
3182
|
-
"--no-header",
|
|
3183
|
-
"--disable-warnings",
|
|
3184
|
-
"--durations=0",
|
|
3185
|
-
f"--timeout={test_timeout}",
|
|
3186
|
-
]
|
|
3187
|
-
)
|
|
3188
|
-
|
|
3189
|
-
def _add_benchmark_flags(self, test: list[str], options: OptionsProtocol) -> None:
|
|
3190
|
-
if options.benchmark:
|
|
3191
|
-
test.extend(["--benchmark", "--benchmark-autosave"])
|
|
3192
|
-
if options.benchmark_regression:
|
|
3193
|
-
test.extend(
|
|
3194
|
-
[
|
|
3195
|
-
"--benchmark-regression",
|
|
3196
|
-
f"--benchmark-regression-threshold={options.benchmark_regression_threshold}",
|
|
3197
|
-
]
|
|
3198
|
-
)
|
|
3199
|
-
|
|
3200
|
-
def _add_worker_flags(
|
|
3201
|
-
self, test: list[str], options: OptionsProtocol, project_size: str
|
|
3202
|
-
) -> None:
|
|
3203
|
-
if options.test_workers > 0:
|
|
3204
|
-
if options.test_workers == 1:
|
|
3205
|
-
test.append("-vs")
|
|
3206
|
-
else:
|
|
3207
|
-
test.extend(["-xvs", "-n", str(options.test_workers)])
|
|
3208
|
-
else:
|
|
3209
|
-
workload = self._analyze_test_workload()
|
|
3210
|
-
optimal_workers = self._calculate_optimal_test_workers(workload)
|
|
3211
|
-
|
|
3212
|
-
if workload.get("test_files", 0) < 5:
|
|
3213
|
-
test.append("-xvs")
|
|
3214
|
-
else:
|
|
3215
|
-
test_files = workload.get("test_files", 0)
|
|
3216
|
-
if isinstance(test_files, int) and test_files > 20:
|
|
3217
|
-
self.console.print(
|
|
3218
|
-
f"[dim]Running {test_files} tests "
|
|
3219
|
-
f"({workload.get('complexity', 'unknown')} complexity) with {optimal_workers} workers[/dim]"
|
|
3220
|
-
)
|
|
3221
|
-
|
|
3222
|
-
if optimal_workers == 1:
|
|
3223
|
-
test.append("-vs")
|
|
3224
|
-
else:
|
|
3225
|
-
test.extend(["-xvs", "-n", str(optimal_workers)])
|
|
3226
|
-
|
|
3227
|
-
def _prepare_pytest_command(self, options: OptionsProtocol) -> list[str]:
|
|
3228
|
-
test = ["uv", "run", "pytest"]
|
|
3229
|
-
project_size = self._detect_project_size()
|
|
3230
|
-
test_timeout = self._get_test_timeout(options, project_size)
|
|
3231
|
-
if getattr(options, "ai_agent", False):
|
|
3232
|
-
self._add_ai_agent_flags(test, options, test_timeout)
|
|
3233
|
-
else:
|
|
3234
|
-
self._add_standard_flags(test, test_timeout)
|
|
3235
|
-
if options.benchmark or options.benchmark_regression:
|
|
3236
|
-
self._add_benchmark_flags(test, options)
|
|
3237
|
-
else:
|
|
3238
|
-
self._add_worker_flags(test, options, project_size)
|
|
3239
|
-
return test
|
|
3240
|
-
|
|
3241
|
-
def _get_cached_files(self, pattern: str) -> list[Path]:
|
|
3242
|
-
cache_key = f"{self.pkg_path}:{pattern}"
|
|
3243
|
-
if cache_key not in self._file_cache:
|
|
3244
|
-
try:
|
|
3245
|
-
self._file_cache[cache_key] = list(self.pkg_path.rglob(pattern))
|
|
3246
|
-
except (OSError, PermissionError):
|
|
3247
|
-
self._file_cache[cache_key] = []
|
|
3248
|
-
return self._file_cache[cache_key]
|
|
3249
|
-
|
|
3250
|
-
def _get_cached_files_with_mtime(self, pattern: str) -> list[Path]:
|
|
3251
|
-
cache_key = f"{self.pkg_path}:{pattern}"
|
|
3252
|
-
current_mtime = self._get_directory_mtime(self.pkg_path)
|
|
3253
|
-
if cache_key in self._file_cache_with_mtime:
|
|
3254
|
-
cached_mtime, cached_files = self._file_cache_with_mtime[cache_key]
|
|
3255
|
-
if cached_mtime >= current_mtime:
|
|
3256
|
-
return cached_files
|
|
3257
|
-
try:
|
|
3258
|
-
files = list(self.pkg_path.rglob(pattern))
|
|
3259
|
-
self._file_cache_with_mtime[cache_key] = (current_mtime, files)
|
|
3260
|
-
return files
|
|
3261
|
-
except (OSError, PermissionError):
|
|
3262
|
-
self._file_cache_with_mtime[cache_key] = (current_mtime, [])
|
|
3263
|
-
return []
|
|
3264
|
-
|
|
3265
|
-
def _get_directory_mtime(self, path: Path) -> float:
|
|
3266
|
-
try:
|
|
3267
|
-
max_mtime = path.stat().st_mtime
|
|
3268
|
-
for item in path.iterdir():
|
|
3269
|
-
if item.is_dir() and not item.name.startswith("."):
|
|
3270
|
-
try:
|
|
3271
|
-
dir_mtime = item.stat().st_mtime
|
|
3272
|
-
max_mtime = max(max_mtime, dir_mtime)
|
|
3273
|
-
except (OSError, PermissionError):
|
|
3274
|
-
continue
|
|
3275
|
-
elif item.is_file() and item.suffix == ".py":
|
|
3276
|
-
try:
|
|
3277
|
-
file_mtime = item.stat().st_mtime
|
|
3278
|
-
max_mtime = max(max_mtime, file_mtime)
|
|
3279
|
-
except (OSError, PermissionError):
|
|
3280
|
-
continue
|
|
3281
|
-
|
|
3282
|
-
return max_mtime
|
|
3283
|
-
except (OSError, PermissionError):
|
|
3284
|
-
return 0.0
|
|
3285
|
-
|
|
3286
|
-
def _detect_project_size(self) -> str:
|
|
3287
|
-
if self.pkg_name in ("acb", "fastblocks"):
|
|
3288
|
-
return "large"
|
|
3289
|
-
try:
|
|
3290
|
-
py_files = self._get_cached_files_with_mtime("*.py")
|
|
3291
|
-
test_files = self._get_cached_files_with_mtime("test_*.py")
|
|
3292
|
-
total_files = len(py_files)
|
|
3293
|
-
num_test_files = len(test_files)
|
|
3294
|
-
if total_files > 100 or num_test_files > 50:
|
|
3295
|
-
return "large"
|
|
3296
|
-
elif total_files > 50 or num_test_files > 20:
|
|
3297
|
-
return "medium"
|
|
3298
|
-
else:
|
|
3299
|
-
return "small"
|
|
3300
|
-
except (OSError, PermissionError):
|
|
3301
|
-
return "medium"
|
|
3302
|
-
|
|
3303
|
-
def _calculate_test_metrics(self, test_files: list[Path]) -> tuple[int, int]:
|
|
3304
|
-
total_test_size = 0
|
|
3305
|
-
slow_tests = 0
|
|
3306
|
-
for test_file in test_files:
|
|
3307
|
-
try:
|
|
3308
|
-
size = test_file.stat().st_size
|
|
3309
|
-
total_test_size += size
|
|
3310
|
-
if size > 30_000 or "integration" in test_file.name.lower():
|
|
3311
|
-
slow_tests += 1
|
|
3312
|
-
except (OSError, PermissionError):
|
|
3313
|
-
continue
|
|
3314
|
-
return total_test_size, slow_tests
|
|
3315
|
-
|
|
3316
|
-
def _determine_test_complexity(
|
|
3317
|
-
self, test_count: int, avg_size: float, slow_ratio: float
|
|
3318
|
-
) -> str:
|
|
3319
|
-
if test_count > 100 or avg_size > 25_000 or slow_ratio > 0.4:
|
|
3320
|
-
return "high"
|
|
3321
|
-
elif test_count > 50 or avg_size > 15_000 or slow_ratio > 0.2:
|
|
3322
|
-
return "medium"
|
|
3323
|
-
return "low"
|
|
3324
|
-
|
|
3325
|
-
def _analyze_test_workload(self) -> dict[str, t.Any]:
|
|
3326
|
-
try:
|
|
3327
|
-
test_files = self._get_cached_files_with_mtime("test_*.py")
|
|
3328
|
-
py_files = self._get_cached_files_with_mtime("*.py")
|
|
3329
|
-
total_test_size, slow_tests = self._calculate_test_metrics(test_files)
|
|
3330
|
-
avg_test_size = total_test_size / len(test_files) if test_files else 0
|
|
3331
|
-
slow_test_ratio = slow_tests / len(test_files) if test_files else 0
|
|
3332
|
-
complexity = self._determine_test_complexity(
|
|
3333
|
-
len(test_files), avg_test_size, slow_test_ratio
|
|
3334
|
-
)
|
|
3335
|
-
return {
|
|
3336
|
-
"total_files": len(py_files),
|
|
3337
|
-
"test_files": len(test_files),
|
|
3338
|
-
"total_test_size": total_test_size,
|
|
3339
|
-
"avg_test_size": avg_test_size,
|
|
3340
|
-
"slow_tests": slow_tests,
|
|
3341
|
-
"slow_test_ratio": slow_test_ratio,
|
|
3342
|
-
"complexity": complexity,
|
|
3343
|
-
}
|
|
3344
|
-
except (OSError, PermissionError):
|
|
3345
|
-
return {"complexity": "medium", "total_files": 0, "test_files": 0}
|
|
3346
|
-
|
|
3347
|
-
def _calculate_optimal_test_workers(self, workload: dict[str, t.Any]) -> int:
|
|
3348
|
-
import os
|
|
3349
|
-
|
|
3350
|
-
cpu_count = os.cpu_count() or 4
|
|
3351
|
-
if workload["complexity"] == "high":
|
|
3352
|
-
return min(cpu_count // 3, 2)
|
|
3353
|
-
elif workload["complexity"] == "medium":
|
|
3354
|
-
return min(cpu_count // 2, 4)
|
|
3355
|
-
return min(cpu_count, 8)
|
|
3356
|
-
|
|
3357
|
-
def _print_ai_agent_files(self, options: t.Any) -> None:
|
|
3358
|
-
if getattr(options, "ai_agent", False):
|
|
3359
|
-
self.console.print(
|
|
3360
|
-
"[bold bright_black]→ Structured test results: test-results.xml[/bold bright_black]"
|
|
3361
|
-
)
|
|
3362
|
-
self.console.print(
|
|
3363
|
-
"[bold bright_black]→ Coverage report: coverage.json[/bold bright_black]"
|
|
3364
|
-
)
|
|
3365
|
-
if options.benchmark or options.benchmark_regression:
|
|
3366
|
-
self.console.print(
|
|
3367
|
-
"[bold bright_black]→ Benchmark results: benchmark.json[/bold bright_black]"
|
|
3368
|
-
)
|
|
3369
|
-
|
|
3370
|
-
def _handle_test_failure(self, result: t.Any, options: t.Any) -> None:
|
|
3371
|
-
if result.stderr:
|
|
3372
|
-
self.console.print(result.stderr)
|
|
3373
|
-
self.console.print(
|
|
3374
|
-
"\n\n[bold bright_red]❌ Tests failed. Please fix errors.[/bold bright_red]\n"
|
|
3375
|
-
)
|
|
3376
|
-
self._print_ai_agent_files(options)
|
|
3377
|
-
raise SystemExit(1)
|
|
3378
|
-
|
|
3379
|
-
def _handle_test_success(self, options: t.Any) -> None:
|
|
3380
|
-
self.console.print(
|
|
3381
|
-
"\n\n[bold bright_green]🏆 Tests passed successfully![/bold bright_green]\n"
|
|
3382
|
-
)
|
|
3383
|
-
self._print_ai_agent_files(options)
|
|
3384
|
-
|
|
3385
|
-
def _run_tests(self, options: t.Any) -> None:
|
|
3386
|
-
if not options.test:
|
|
3387
|
-
return
|
|
3388
|
-
self.console.print("\n" + "-" * 80)
|
|
3389
|
-
self.console.print(
|
|
3390
|
-
"[bold bright_green]🧪 TESTING[/bold bright_green] [bold bright_white]Executing test suite[/bold bright_white]"
|
|
3391
|
-
)
|
|
3392
|
-
self.console.print("-" * 80 + "\n")
|
|
3393
|
-
test_command = self._prepare_pytest_command(options)
|
|
3394
|
-
result = self.execute_command(test_command, capture_output=True, text=True)
|
|
3395
|
-
if result.stdout:
|
|
3396
|
-
self.console.print(result.stdout)
|
|
3397
|
-
if result.returncode > 0:
|
|
3398
|
-
self._handle_test_failure(result, options)
|
|
3399
|
-
else:
|
|
3400
|
-
self._handle_test_success(options)
|
|
3401
|
-
|
|
3402
|
-
async def _run_tests_async(self, options: t.Any) -> None:
|
|
3403
|
-
if not options.test:
|
|
3404
|
-
return
|
|
3405
|
-
self.console.print("\n" + "-" * 80)
|
|
3406
|
-
self.console.print(
|
|
3407
|
-
"[bold bright_green]🧪 TESTING[/bold bright_green] [bold bright_white]Executing test suite (async optimized)[/bold bright_white]"
|
|
3408
|
-
)
|
|
3409
|
-
self.console.print("-" * 80 + "\n")
|
|
3410
|
-
test_command = self._prepare_pytest_command(options)
|
|
3411
|
-
result = await self.execute_command_async(test_command)
|
|
3412
|
-
if result.stdout:
|
|
3413
|
-
self.console.print(result.stdout)
|
|
3414
|
-
if result.returncode > 0:
|
|
3415
|
-
self._handle_test_failure(result, options)
|
|
3416
|
-
else:
|
|
3417
|
-
self._handle_test_success(options)
|
|
3418
|
-
|
|
3419
|
-
def _bump_version(self, options: OptionsProtocol) -> None:
|
|
3420
|
-
for option in (options.publish, options.bump):
|
|
3421
|
-
if option:
|
|
3422
|
-
version_type = str(option)
|
|
3423
|
-
if self._has_version_been_bumped(version_type):
|
|
3424
|
-
self.console.print("\n" + "-" * 80)
|
|
3425
|
-
self.console.print(
|
|
3426
|
-
f"[bold yellow]📦 VERSION[/bold yellow] [bold bright_white]Version already bumped ({version_type}), skipping to avoid duplicate bump[/bold bright_white]"
|
|
3427
|
-
)
|
|
3428
|
-
self.console.print("-" * 80 + "\n")
|
|
3429
|
-
return
|
|
3430
|
-
self.console.print("\n" + "-" * 80)
|
|
3431
|
-
self.console.print(
|
|
3432
|
-
f"[bold bright_magenta]📦 VERSION[/bold bright_magenta] [bold bright_white]Bumping {option} version[/bold bright_white]"
|
|
3433
|
-
)
|
|
3434
|
-
self.console.print("-" * 80 + "\n")
|
|
3435
|
-
if version_type in ("minor", "major"):
|
|
3436
|
-
from rich.prompt import Confirm
|
|
3437
|
-
|
|
3438
|
-
if not Confirm.ask(
|
|
3439
|
-
f"Are you sure you want to bump the {option} version?",
|
|
3440
|
-
default=False,
|
|
3441
|
-
):
|
|
3442
|
-
self.console.print(
|
|
3443
|
-
f"[bold yellow]⏭️ Skipping {option} version bump[/bold yellow]"
|
|
3444
|
-
)
|
|
3445
|
-
return
|
|
3446
|
-
self.execute_command(["uv", "version", "--bump", option])
|
|
3447
|
-
self._mark_version_bumped(version_type)
|
|
3448
|
-
break
|
|
3449
|
-
|
|
3450
|
-
def _validate_authentication_setup(self) -> None:
|
|
3451
|
-
import os
|
|
3452
|
-
import shutil
|
|
3453
|
-
|
|
3454
|
-
keyring_provider = self._get_keyring_provider()
|
|
3455
|
-
has_publish_token = bool(os.environ.get("UV_PUBLISH_TOKEN"))
|
|
3456
|
-
has_keyring = shutil.which("keyring") is not None
|
|
3457
|
-
self.console.print("[dim]🔐 Validating authentication setup...[/dim]")
|
|
3458
|
-
if has_publish_token:
|
|
3459
|
-
self._handle_publish_token_found()
|
|
3460
|
-
return
|
|
3461
|
-
if keyring_provider == "subprocess" and has_keyring:
|
|
3462
|
-
self._handle_keyring_validation()
|
|
3463
|
-
return
|
|
3464
|
-
if keyring_provider == "subprocess" and not has_keyring:
|
|
3465
|
-
self._handle_missing_keyring()
|
|
3466
|
-
if not keyring_provider:
|
|
3467
|
-
self._handle_no_keyring_provider()
|
|
3468
|
-
|
|
3469
|
-
def _handle_publish_token_found(self) -> None:
|
|
3470
|
-
self.console.print(
|
|
3471
|
-
"[dim] ✅ UV_PUBLISH_TOKEN environment variable found[/dim]"
|
|
3472
|
-
)
|
|
3473
|
-
|
|
3474
|
-
def _handle_keyring_validation(self) -> None:
|
|
3475
|
-
self.console.print(
|
|
3476
|
-
"[dim] ✅ Keyring provider configured and keyring executable found[/dim]"
|
|
3477
|
-
)
|
|
3478
|
-
try:
|
|
3479
|
-
result = self.execute_command(
|
|
3480
|
-
["keyring", "get", "https://upload.pypi.org/legacy/", "__token__"],
|
|
3481
|
-
capture_output=True,
|
|
3482
|
-
text=True,
|
|
3483
|
-
)
|
|
3484
|
-
if result.returncode == 0:
|
|
3485
|
-
self.console.print("[dim] ✅ PyPI token found in keyring[/dim]")
|
|
3486
|
-
else:
|
|
3487
|
-
self.console.print(
|
|
3488
|
-
"[yellow] ⚠️ No PyPI token found in keyring - will prompt during publish[/yellow]"
|
|
3489
|
-
)
|
|
3490
|
-
except Exception:
|
|
3491
|
-
self.console.print(
|
|
3492
|
-
"[yellow] ⚠️ Could not check keyring - will attempt publish anyway[/yellow]"
|
|
3493
|
-
)
|
|
3494
|
-
|
|
3495
|
-
def _handle_missing_keyring(self) -> None:
|
|
3496
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
3497
|
-
self.console.print(
|
|
3498
|
-
"[yellow] ⚠️ Keyring provider set to 'subprocess' but keyring executable not found[/yellow]"
|
|
3499
|
-
)
|
|
3500
|
-
self.console.print(
|
|
3501
|
-
"[yellow] Install keyring: uv tool install keyring[/yellow]"
|
|
3502
|
-
)
|
|
3503
|
-
|
|
3504
|
-
def _handle_no_keyring_provider(self) -> None:
|
|
3505
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
3506
|
-
self.console.print(
|
|
3507
|
-
"[yellow] ⚠️ No keyring provider configured and no UV_PUBLISH_TOKEN set[/yellow]"
|
|
3508
|
-
)
|
|
3509
|
-
|
|
3510
|
-
def _get_keyring_provider(self) -> str | None:
|
|
3511
|
-
import os
|
|
3512
|
-
import tomllib
|
|
3513
|
-
from pathlib import Path
|
|
3514
|
-
|
|
3515
|
-
env_provider = os.environ.get("UV_KEYRING_PROVIDER")
|
|
3516
|
-
if env_provider:
|
|
3517
|
-
return env_provider
|
|
3518
|
-
for config_file in ("pyproject.toml", "uv.toml"):
|
|
3519
|
-
config_path = Path(config_file)
|
|
3520
|
-
if config_path.exists():
|
|
3521
|
-
try:
|
|
3522
|
-
with config_path.open("rb") as f:
|
|
3523
|
-
config = tomllib.load(f)
|
|
3524
|
-
return config.get("tool", {}).get("uv", {}).get("keyring-provider")
|
|
3525
|
-
except Exception:
|
|
3526
|
-
continue
|
|
3527
|
-
|
|
3528
|
-
return None
|
|
3529
|
-
|
|
3530
|
-
def _build_publish_command(self) -> list[str]:
|
|
3531
|
-
import os
|
|
3532
|
-
|
|
3533
|
-
cmd = ["uv", "publish"]
|
|
3534
|
-
publish_token = os.environ.get("UV_PUBLISH_TOKEN")
|
|
3535
|
-
if publish_token:
|
|
3536
|
-
cmd.extend(["--token", publish_token])
|
|
3537
|
-
keyring_provider = self._get_keyring_provider()
|
|
3538
|
-
if keyring_provider:
|
|
3539
|
-
cmd.extend(["--keyring-provider", keyring_provider])
|
|
3540
|
-
|
|
3541
|
-
return cmd
|
|
3542
|
-
|
|
3543
|
-
def _display_authentication_help(self) -> None:
|
|
3544
|
-
self.console.print(
|
|
3545
|
-
"\n[bold bright_red]❌ Publish failed. Run crackerjack again to retry publishing without re-bumping version.[/bold bright_red]"
|
|
3546
|
-
)
|
|
3547
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
3548
|
-
self.console.print("\n[bold yellow]🔐 Authentication Help:[/bold yellow]")
|
|
3549
|
-
self.console.print(" [dim]To fix authentication issues, you can:[/dim]")
|
|
3550
|
-
self.console.print(
|
|
3551
|
-
" [dim]1. Set PyPI token: export UV_PUBLISH_TOKEN=pypi-your-token-here[/dim]"
|
|
3552
|
-
)
|
|
3553
|
-
self.console.print(
|
|
3554
|
-
" [dim]2. Install keyring: uv tool install keyring[/dim]"
|
|
3555
|
-
)
|
|
3556
|
-
self.console.print(
|
|
3557
|
-
" [dim]3. Store token in keyring: keyring set https://upload.pypi.org/legacy/ __token__[/dim]"
|
|
3558
|
-
)
|
|
3559
|
-
self.console.print(
|
|
3560
|
-
" [dim]4. Ensure keyring-provider is set in pyproject.toml:[/dim]"
|
|
3561
|
-
)
|
|
3562
|
-
self.console.print(" [dim] [tool.uv][/dim]")
|
|
3563
|
-
self.console.print(' [dim] keyring-provider = "subprocess"[/dim]')
|
|
3564
|
-
|
|
3565
|
-
def _publish_project(self, options: OptionsProtocol) -> None:
|
|
3566
|
-
if options.publish:
|
|
3567
|
-
self.console.print("\n" + "-" * 80)
|
|
3568
|
-
self.console.print(
|
|
3569
|
-
"[bold bright_cyan]🚀 PUBLISH[/bold bright_cyan] [bold bright_white]Building and publishing package[/bold bright_white]"
|
|
3570
|
-
)
|
|
3571
|
-
self.console.print("-" * 80 + "\n")
|
|
3572
|
-
build = self.execute_command(
|
|
3573
|
-
["uv", "build"], capture_output=True, text=True
|
|
3574
|
-
)
|
|
3575
|
-
self.console.print(build.stdout)
|
|
3576
|
-
if build.returncode > 0:
|
|
3577
|
-
self.console.print(build.stderr)
|
|
3578
|
-
self.console.print(
|
|
3579
|
-
"[bold bright_red]❌ Build failed. Please fix errors.[/bold bright_red]"
|
|
3580
|
-
)
|
|
3581
|
-
raise SystemExit(1)
|
|
3582
|
-
try:
|
|
3583
|
-
self._validate_authentication_setup()
|
|
3584
|
-
publish_cmd = self._build_publish_command()
|
|
3585
|
-
self.execute_command(publish_cmd)
|
|
3586
|
-
self._mark_publish_completed()
|
|
3587
|
-
self._clear_state()
|
|
3588
|
-
self.console.print(
|
|
3589
|
-
"\n[bold bright_green]🏆 Package published successfully![/bold bright_green]"
|
|
3590
|
-
)
|
|
3591
|
-
except SystemExit:
|
|
3592
|
-
self._display_authentication_help()
|
|
3593
|
-
raise
|
|
3594
|
-
|
|
3595
|
-
def _analyze_git_changes(self) -> dict[str, t.Any]:
|
|
3596
|
-
diff_result = self._get_git_diff_output()
|
|
3597
|
-
changes = self._parse_git_diff_output(diff_result)
|
|
3598
|
-
changes["stats"] = self._get_git_stats()
|
|
3599
|
-
return changes
|
|
3600
|
-
|
|
3601
|
-
def _get_git_diff_output(self) -> t.Any:
|
|
3602
|
-
diff_cmd = ["git", "diff", "--cached", "--name-status"]
|
|
3603
|
-
diff_result = self.execute_command(diff_cmd, capture_output=True, text=True)
|
|
3604
|
-
if not diff_result.stdout and diff_result.returncode == 0:
|
|
3605
|
-
diff_cmd = ["git", "diff", "--name-status"]
|
|
3606
|
-
diff_result = self.execute_command(diff_cmd, capture_output=True, text=True)
|
|
3607
|
-
return diff_result
|
|
3608
|
-
|
|
3609
|
-
def _parse_git_diff_output(self, diff_result: t.Any) -> dict[str, t.Any]:
|
|
3610
|
-
changes = {
|
|
3611
|
-
"added": [],
|
|
3612
|
-
"modified": [],
|
|
3613
|
-
"deleted": [],
|
|
3614
|
-
"renamed": [],
|
|
3615
|
-
"total_changes": 0,
|
|
3616
|
-
}
|
|
3617
|
-
if diff_result.returncode == 0 and diff_result.stdout:
|
|
3618
|
-
self._process_diff_lines(diff_result.stdout, changes)
|
|
3619
|
-
return changes
|
|
3620
|
-
|
|
3621
|
-
def _process_diff_lines(self, stdout: str, changes: dict[str, t.Any]) -> None:
|
|
3622
|
-
for line in stdout.strip().split("\n"):
|
|
3623
|
-
if not line:
|
|
3624
|
-
continue
|
|
3625
|
-
self._process_single_diff_line(line, changes)
|
|
3626
|
-
|
|
3627
|
-
def _process_single_diff_line(self, line: str, changes: dict[str, t.Any]) -> None:
|
|
3628
|
-
parts = line.split("\t")
|
|
3629
|
-
if len(parts) >= 2:
|
|
3630
|
-
status, filename = parts[0], parts[1]
|
|
3631
|
-
self._categorize_file_change(status, filename, parts, changes)
|
|
3632
|
-
changes["total_changes"] += 1
|
|
3633
|
-
|
|
3634
|
-
def _categorize_file_change(
|
|
3635
|
-
self, status: str, filename: str, parts: list[str], changes: dict[str, t.Any]
|
|
3636
|
-
) -> None:
|
|
3637
|
-
if status == "A":
|
|
3638
|
-
changes["added"].append(filename)
|
|
3639
|
-
elif status == "M":
|
|
3640
|
-
changes["modified"].append(filename)
|
|
3641
|
-
elif status == "D":
|
|
3642
|
-
changes["deleted"].append(filename)
|
|
3643
|
-
elif status.startswith("R"):
|
|
3644
|
-
if len(parts) >= 3:
|
|
3645
|
-
changes["renamed"].append((parts[1], parts[2]))
|
|
3646
|
-
else:
|
|
3647
|
-
changes["renamed"].append((filename, "unknown"))
|
|
3648
|
-
|
|
3649
|
-
def _get_git_stats(self) -> str:
|
|
3650
|
-
stat_cmd = ["git", "diff", "--cached", "--stat"]
|
|
3651
|
-
stat_result = self.execute_command(stat_cmd, capture_output=True, text=True)
|
|
3652
|
-
if not stat_result.stdout and stat_result.returncode == 0:
|
|
3653
|
-
stat_cmd = ["git", "diff", "--stat"]
|
|
3654
|
-
stat_result = self.execute_command(stat_cmd, capture_output=True, text=True)
|
|
3655
|
-
return stat_result.stdout if stat_result.returncode == 0 else ""
|
|
3656
|
-
|
|
3657
|
-
def _categorize_changes(self, changes: dict[str, t.Any]) -> dict[str, list[str]]:
|
|
3658
|
-
categories = {
|
|
3659
|
-
"docs": [],
|
|
3660
|
-
"tests": [],
|
|
3661
|
-
"config": [],
|
|
3662
|
-
"core": [],
|
|
3663
|
-
"ci": [],
|
|
3664
|
-
"deps": [],
|
|
3665
|
-
}
|
|
3666
|
-
file_patterns = {
|
|
3667
|
-
"docs": ["README.md", "CLAUDE.md", "RULES.md", "docs/", ".md"],
|
|
3668
|
-
"tests": ["test_", "_test.py", "tests/", "conftest.py"],
|
|
3669
|
-
"config": ["pyproject.toml", ".yaml", ".yml", ".json", ".gitignore"],
|
|
3670
|
-
"ci": [".github/", "ci/", ".pre-commit"],
|
|
3671
|
-
"deps": ["requirements", "pyproject.toml", "uv.lock"],
|
|
3672
|
-
}
|
|
3673
|
-
for file_list in ("added", "modified", "deleted"):
|
|
3674
|
-
for filename in changes.get(file_list, []):
|
|
3675
|
-
categorized = False
|
|
3676
|
-
for category, patterns in file_patterns.items():
|
|
3677
|
-
if any(pattern in filename for pattern in patterns):
|
|
3678
|
-
categories[category].append(filename)
|
|
3679
|
-
categorized = True
|
|
3680
|
-
break
|
|
3681
|
-
if not categorized:
|
|
3682
|
-
categories["core"].append(filename)
|
|
3683
|
-
|
|
3684
|
-
return categories
|
|
3685
|
-
|
|
3686
|
-
def _get_primary_changes(self, categories: dict[str, list[str]]) -> list[str]:
|
|
3687
|
-
primary_changes = []
|
|
3688
|
-
category_mapping = [
|
|
3689
|
-
("core", "core functionality"),
|
|
3690
|
-
("tests", "tests"),
|
|
3691
|
-
("docs", "documentation"),
|
|
3692
|
-
("config", "configuration"),
|
|
3693
|
-
("deps", "dependencies"),
|
|
3694
|
-
]
|
|
3695
|
-
for key, label in category_mapping:
|
|
3696
|
-
if categories[key]:
|
|
3697
|
-
primary_changes.append(label)
|
|
3698
|
-
|
|
3699
|
-
return primary_changes or ["project files"]
|
|
3700
|
-
|
|
3701
|
-
def _determine_primary_action(self, changes: dict[str, t.Any]) -> str:
|
|
3702
|
-
added_count = len(changes["added"])
|
|
3703
|
-
modified_count = len(changes["modified"])
|
|
3704
|
-
deleted_count = len(changes["deleted"])
|
|
3705
|
-
if added_count > modified_count + deleted_count:
|
|
3706
|
-
return "Add"
|
|
3707
|
-
elif deleted_count > modified_count + added_count:
|
|
3708
|
-
return "Remove"
|
|
3709
|
-
elif changes["renamed"]:
|
|
3710
|
-
return "Refactor"
|
|
3711
|
-
return "Update"
|
|
3712
|
-
|
|
3713
|
-
def _generate_body_lines(self, changes: dict[str, t.Any]) -> list[str]:
|
|
3714
|
-
body_lines = []
|
|
3715
|
-
change_types = [
|
|
3716
|
-
("added", "Added"),
|
|
3717
|
-
("modified", "Modified"),
|
|
3718
|
-
("deleted", "Deleted"),
|
|
3719
|
-
("renamed", "Renamed"),
|
|
3720
|
-
]
|
|
3721
|
-
for change_type, label in change_types:
|
|
3722
|
-
items = changes.get(change_type, [])
|
|
3723
|
-
if items:
|
|
3724
|
-
count = len(items)
|
|
3725
|
-
body_lines.append(f"- {label} {count} file(s)")
|
|
3726
|
-
if change_type not in ("deleted", "renamed"):
|
|
3727
|
-
for file in items[:3]:
|
|
3728
|
-
body_lines.append(f" * {file}")
|
|
3729
|
-
if count > 3:
|
|
3730
|
-
body_lines.append(f" * ... and {count - 3} more")
|
|
3731
|
-
|
|
3732
|
-
return body_lines
|
|
3733
|
-
|
|
3734
|
-
def _generate_commit_message(self, changes: dict[str, t.Any]) -> str:
|
|
3735
|
-
if changes["total_changes"] == 0:
|
|
3736
|
-
return "Update project files"
|
|
3737
|
-
categories = self._categorize_changes(changes)
|
|
3738
|
-
primary_changes = self._get_primary_changes(categories)
|
|
3739
|
-
primary_action = self._determine_primary_action(changes)
|
|
3740
|
-
commit_subject = f"{primary_action} {' and '.join(primary_changes[:2])}"
|
|
3741
|
-
body_lines = self._generate_body_lines(changes)
|
|
3742
|
-
if body_lines:
|
|
3743
|
-
return f"{commit_subject}\n\n" + "\n".join(body_lines)
|
|
3744
|
-
return commit_subject
|
|
3745
|
-
|
|
3746
|
-
def _commit_and_push(self, options: OptionsProtocol) -> None:
|
|
3747
|
-
if options.commit:
|
|
3748
|
-
self.console.print("\n" + "-" * 80)
|
|
3749
|
-
self.console.print(
|
|
3750
|
-
"[bold bright_white]📝 COMMIT[/bold bright_white] [bold bright_white]Saving changes to git[/bold bright_white]"
|
|
3751
|
-
)
|
|
3752
|
-
self.console.print("-" * 80 + "\n")
|
|
3753
|
-
changes = self._analyze_git_changes()
|
|
3754
|
-
if changes["total_changes"] > 0:
|
|
3755
|
-
self.console.print("[dim]🔍 Analyzing changes...[/dim]\n")
|
|
3756
|
-
if changes["stats"]:
|
|
3757
|
-
self.console.print(changes["stats"])
|
|
3758
|
-
suggested_msg = self._generate_commit_message(changes)
|
|
3759
|
-
self.console.print(
|
|
3760
|
-
"\n[bold cyan]📋 Suggested commit message:[/bold cyan]"
|
|
3761
|
-
)
|
|
3762
|
-
self.console.print(f"[cyan]{suggested_msg}[/cyan]\n")
|
|
3763
|
-
user_choice = (
|
|
3764
|
-
input("Use suggested message? [Y/n/e to edit]: ").strip().lower()
|
|
3765
|
-
)
|
|
3766
|
-
if user_choice in ("", "y"):
|
|
3767
|
-
commit_msg = suggested_msg
|
|
3768
|
-
elif user_choice == "e":
|
|
3769
|
-
import os
|
|
3770
|
-
import tempfile
|
|
3771
|
-
|
|
3772
|
-
with tempfile.NamedTemporaryFile(
|
|
3773
|
-
mode="w", suffix=".txt", delete=False
|
|
3774
|
-
) as f:
|
|
3775
|
-
f.write(suggested_msg)
|
|
3776
|
-
temp_path = f.name
|
|
3777
|
-
editor = os.environ.get("EDITOR", "vi")
|
|
3778
|
-
self.execute_command([editor, temp_path])
|
|
3779
|
-
with open(temp_path) as f:
|
|
3780
|
-
commit_msg = f.read().strip()
|
|
3781
|
-
Path(temp_path).unlink()
|
|
3782
|
-
else:
|
|
3783
|
-
commit_msg = input("\nEnter custom commit message: ")
|
|
3784
|
-
else:
|
|
3785
|
-
commit_msg = input("\nCommit message: ")
|
|
3786
|
-
self.execute_command(
|
|
3787
|
-
["git", "commit", "-m", commit_msg, "--no-verify", "--", "."]
|
|
3788
|
-
)
|
|
3789
|
-
self.execute_command(["git", "push", "origin", "main", "--no-verify"])
|
|
3790
|
-
|
|
3791
|
-
def _update_precommit(self, options: OptionsProtocol) -> None:
|
|
3792
|
-
if options.update_precommit:
|
|
3793
|
-
self.console.print("\n" + "-" * 80)
|
|
3794
|
-
self.console.print(
|
|
3795
|
-
"[bold bright_blue]🔄 UPDATE[/bold bright_blue] [bold bright_white]Updating pre-commit hooks[/bold bright_white]"
|
|
3796
|
-
)
|
|
3797
|
-
self.console.print("-" * 80 + "\n")
|
|
3798
|
-
if self.pkg_path.stem == "crackerjack":
|
|
3799
|
-
update_cmd = ["uv", "run", "pre-commit", "autoupdate"]
|
|
3800
|
-
if getattr(options, "ai_agent", False):
|
|
3801
|
-
update_cmd.extend(["-c", ".pre-commit-config-ai.yaml"])
|
|
3802
|
-
self.execute_command(update_cmd)
|
|
3803
|
-
else:
|
|
3804
|
-
self.project_manager.update_precommit_hooks()
|
|
3805
|
-
|
|
3806
|
-
def _update_docs(self, options: OptionsProtocol) -> None:
|
|
3807
|
-
if options.update_docs or options.force_update_docs:
|
|
3808
|
-
self.console.print("\n" + "-" * 80)
|
|
3809
|
-
self.console.print(
|
|
3810
|
-
"[bold bright_blue]📋 DOCS UPDATE[/bold bright_blue] [bold bright_white]Updating documentation with quality standards[/bold bright_white]"
|
|
3811
|
-
)
|
|
3812
|
-
self.console.print("-" * 80 + "\n")
|
|
3813
|
-
self.config_manager.copy_documentation_templates(
|
|
3814
|
-
force_update=options.force_update_docs,
|
|
3815
|
-
compress_docs=options.compress_docs,
|
|
3816
|
-
)
|
|
3817
|
-
|
|
3818
|
-
def execute_command(
|
|
3819
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
3820
|
-
) -> subprocess.CompletedProcess[str]:
|
|
3821
|
-
if self.dry_run:
|
|
3822
|
-
self.console.print(
|
|
3823
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
3824
|
-
)
|
|
3825
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
3826
|
-
return execute(cmd, **kwargs)
|
|
3827
|
-
|
|
3828
|
-
async def execute_command_async(
|
|
3829
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
3830
|
-
) -> subprocess.CompletedProcess[str]:
|
|
3831
|
-
if self.dry_run:
|
|
3832
|
-
self.console.print(
|
|
3833
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
3834
|
-
)
|
|
3835
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
3836
|
-
|
|
3837
|
-
proc = await asyncio.create_subprocess_exec(
|
|
3838
|
-
*cmd,
|
|
3839
|
-
stdout=asyncio.subprocess.PIPE,
|
|
3840
|
-
stderr=asyncio.subprocess.PIPE,
|
|
3841
|
-
**kwargs,
|
|
3842
|
-
)
|
|
3843
|
-
stdout, stderr = await proc.communicate()
|
|
3844
|
-
|
|
3845
|
-
return CompletedProcess(
|
|
3846
|
-
cmd,
|
|
3847
|
-
proc.returncode or 0,
|
|
3848
|
-
stdout.decode() if stdout else "",
|
|
3849
|
-
stderr.decode() if stderr else "",
|
|
3850
|
-
)
|
|
3851
|
-
|
|
3852
|
-
def _run_comprehensive_quality_checks(self, options: OptionsProtocol) -> None:
|
|
3853
|
-
if options.skip_hooks or (
|
|
3854
|
-
options.test
|
|
3855
|
-
and not any([options.publish, options.bump, options.commit, options.all])
|
|
3856
|
-
):
|
|
3857
|
-
return
|
|
3858
|
-
needs_comprehensive = any(
|
|
3859
|
-
[options.publish, options.bump, options.commit, options.all]
|
|
3860
|
-
)
|
|
3861
|
-
if not needs_comprehensive:
|
|
3862
|
-
return
|
|
3863
|
-
self.console.print("\n" + "-" * 80)
|
|
3864
|
-
self.console.print(
|
|
3865
|
-
"[bold bright_magenta]🔍 COMPREHENSIVE QUALITY[/bold bright_magenta] [bold bright_white]Running all quality checks before publish/commit[/bold bright_white]"
|
|
3866
|
-
)
|
|
3867
|
-
self.console.print("-" * 80 + "\n")
|
|
3868
|
-
cmd = [
|
|
3869
|
-
"uv",
|
|
3870
|
-
"run",
|
|
3871
|
-
"pre-commit",
|
|
3872
|
-
"run",
|
|
3873
|
-
"--all-files",
|
|
3874
|
-
"--hook-stage=manual",
|
|
3875
|
-
"-c",
|
|
3876
|
-
".pre-commit-config.yaml",
|
|
3877
|
-
]
|
|
3878
|
-
result = self.execute_command(cmd)
|
|
3879
|
-
if result.returncode > 0:
|
|
3880
|
-
self.console.print(
|
|
3881
|
-
"\n[bold bright_red]❌ Comprehensive quality checks failed![/bold bright_red]"
|
|
3882
|
-
)
|
|
3883
|
-
self.console.print(
|
|
3884
|
-
"\n[bold red]Cannot proceed with publishing/committing until all quality checks pass.[/bold red]\n"
|
|
3885
|
-
)
|
|
3886
|
-
raise SystemExit(1)
|
|
3887
|
-
else:
|
|
3888
|
-
self.console.print(
|
|
3889
|
-
"\n[bold bright_green]🏆 All comprehensive quality checks passed![/bold bright_green]"
|
|
3890
|
-
)
|
|
3891
|
-
|
|
3892
|
-
async def _run_comprehensive_quality_checks_async(
|
|
3893
|
-
self, options: OptionsProtocol
|
|
3894
|
-
) -> None:
|
|
3895
|
-
if options.skip_hooks or (
|
|
3896
|
-
options.test
|
|
3897
|
-
and not any([options.publish, options.bump, options.commit, options.all])
|
|
3898
|
-
):
|
|
3899
|
-
return
|
|
3900
|
-
|
|
3901
|
-
needs_comprehensive = any(
|
|
3902
|
-
[options.publish, options.bump, options.commit, options.all]
|
|
3903
|
-
)
|
|
3904
|
-
|
|
3905
|
-
if not needs_comprehensive:
|
|
3906
|
-
return
|
|
3907
|
-
|
|
3908
|
-
self.console.print("\n" + "-" * 80)
|
|
3909
|
-
self.console.print(
|
|
3910
|
-
"[bold bright_magenta]🔍 COMPREHENSIVE QUALITY[/bold bright_magenta] [bold bright_white]Running all quality checks before publish/commit[/bold bright_white]"
|
|
3911
|
-
)
|
|
3912
|
-
self.console.print("-" * 80 + "\n")
|
|
3913
|
-
|
|
3914
|
-
cmd = [
|
|
3915
|
-
"uv",
|
|
3916
|
-
"run",
|
|
3917
|
-
"pre-commit",
|
|
3918
|
-
"run",
|
|
3919
|
-
"--all-files",
|
|
3920
|
-
"--hook-stage=manual",
|
|
3921
|
-
"-c",
|
|
3922
|
-
".pre-commit-config.yaml",
|
|
3923
|
-
]
|
|
3924
|
-
|
|
3925
|
-
result = await self.execute_command_async(cmd)
|
|
3926
|
-
|
|
3927
|
-
if result.returncode > 0:
|
|
3928
|
-
self.console.print(
|
|
3929
|
-
"\n[bold bright_red]❌ Comprehensive quality checks failed![/bold bright_red]"
|
|
3930
|
-
)
|
|
3931
|
-
if result.stderr:
|
|
3932
|
-
self.console.print(f"[dim]Error details: {result.stderr}[/dim]")
|
|
3933
|
-
self.console.print(
|
|
3934
|
-
"\n[bold red]Cannot proceed with publishing/committing until all quality checks pass.[/bold red]\n"
|
|
3935
|
-
)
|
|
3936
|
-
raise SystemExit(1)
|
|
3937
|
-
else:
|
|
3938
|
-
self.console.print(
|
|
3939
|
-
"[bold bright_green]🏆 All comprehensive quality checks passed![/bold bright_green]"
|
|
3940
|
-
)
|
|
3941
|
-
|
|
3942
|
-
def _run_tracked_task(
|
|
3943
|
-
self, task_id: str, task_name: str, task_func: t.Callable[[], None]
|
|
3944
|
-
) -> None:
|
|
3945
|
-
if self.session_tracker:
|
|
3946
|
-
self.session_tracker.start_task(task_id, task_name)
|
|
3947
|
-
try:
|
|
3948
|
-
task_func()
|
|
3949
|
-
if self.session_tracker:
|
|
3950
|
-
self.session_tracker.complete_task(task_id, f"{task_name} completed")
|
|
3951
|
-
except Exception as e:
|
|
3952
|
-
if self.session_tracker:
|
|
3953
|
-
self.session_tracker.fail_task(task_id, str(e))
|
|
3954
|
-
raise
|
|
3955
|
-
|
|
3956
|
-
def _run_pre_commit_task(self, options: OptionsProtocol) -> None:
|
|
3957
|
-
if not options.skip_hooks:
|
|
3958
|
-
if getattr(options, "ai_agent", False):
|
|
3959
|
-
self.project_manager.run_pre_commit_with_analysis()
|
|
3960
|
-
else:
|
|
3961
|
-
self.project_manager.run_pre_commit()
|
|
3962
|
-
else:
|
|
3963
|
-
self.console.print(
|
|
3964
|
-
"\n[bold bright_yellow]⏭️ Skipping pre-commit hooks...[/bold bright_yellow]\n"
|
|
3965
|
-
)
|
|
3966
|
-
if self.session_tracker:
|
|
3967
|
-
self.session_tracker.skip_task("pre_commit", "Skipped by user request")
|
|
3968
|
-
|
|
3969
|
-
def _initialize_session_tracking(self, options: OptionsProtocol) -> None:
|
|
3970
|
-
if options.resume_from:
|
|
3971
|
-
try:
|
|
3972
|
-
progress_file = Path(options.resume_from)
|
|
3973
|
-
self.session_tracker = SessionTracker.resume_session(
|
|
3974
|
-
console=self.console,
|
|
3975
|
-
progress_file=progress_file,
|
|
3976
|
-
)
|
|
3977
|
-
return
|
|
3978
|
-
except Exception as e:
|
|
3979
|
-
self.console.print(
|
|
3980
|
-
f"[yellow]Warning: Failed to resume from {options.resume_from}: {e}[/yellow]"
|
|
3981
|
-
)
|
|
3982
|
-
self.session_tracker = None
|
|
3983
|
-
return
|
|
3984
|
-
if options.track_progress:
|
|
3985
|
-
try:
|
|
3986
|
-
auto_tracker = SessionTracker.auto_detect_session(self.console)
|
|
3987
|
-
if auto_tracker:
|
|
3988
|
-
self.session_tracker = auto_tracker
|
|
3989
|
-
return
|
|
3990
|
-
progress_file = (
|
|
3991
|
-
Path(options.progress_file) if options.progress_file else None
|
|
3992
|
-
)
|
|
3993
|
-
try:
|
|
3994
|
-
from importlib.metadata import version
|
|
3995
|
-
|
|
3996
|
-
crackerjack_version = version("crackerjack")
|
|
3997
|
-
except (ImportError, ModuleNotFoundError):
|
|
3998
|
-
crackerjack_version = "unknown"
|
|
3999
|
-
metadata = {
|
|
4000
|
-
"working_dir": str(self.pkg_path),
|
|
4001
|
-
"python_version": self.python_version,
|
|
4002
|
-
"crackerjack_version": crackerjack_version,
|
|
4003
|
-
"cli_options": str(options),
|
|
4004
|
-
}
|
|
4005
|
-
self.session_tracker = SessionTracker.create_session(
|
|
4006
|
-
console=self.console,
|
|
4007
|
-
progress_file=progress_file,
|
|
4008
|
-
metadata=metadata,
|
|
4009
|
-
)
|
|
4010
|
-
except Exception as e:
|
|
4011
|
-
self.console.print(
|
|
4012
|
-
f"[yellow]Warning: Failed to initialize session tracking: {e}[/yellow]"
|
|
4013
|
-
)
|
|
4014
|
-
self.session_tracker = None
|
|
4015
|
-
|
|
4016
|
-
def process(self, options: OptionsProtocol) -> None:
|
|
4017
|
-
assert self.project_manager is not None
|
|
4018
|
-
self._initialize_session_tracking(options)
|
|
4019
|
-
self.console.print("\n" + "-" * 80)
|
|
4020
|
-
self.console.print(
|
|
4021
|
-
"[bold bright_cyan]⚒️ CRACKERJACKING[/bold bright_cyan] [bold bright_white]Starting workflow execution[/bold bright_white]"
|
|
4022
|
-
)
|
|
4023
|
-
self.console.print("-" * 80 + "\n")
|
|
4024
|
-
if options.all:
|
|
4025
|
-
options.clean = True
|
|
4026
|
-
options.test = True
|
|
4027
|
-
options.publish = options.all
|
|
4028
|
-
options.commit = True
|
|
4029
|
-
self._run_tracked_task(
|
|
4030
|
-
"setup", "Initialize project structure", self._setup_package
|
|
4031
|
-
)
|
|
4032
|
-
self._run_tracked_task(
|
|
4033
|
-
"update_project",
|
|
4034
|
-
"Update project configuration",
|
|
4035
|
-
lambda: self._update_project(options),
|
|
4036
|
-
)
|
|
4037
|
-
self._run_tracked_task(
|
|
4038
|
-
"update_precommit",
|
|
4039
|
-
"Update pre-commit hooks",
|
|
4040
|
-
lambda: self._update_precommit(options),
|
|
4041
|
-
)
|
|
4042
|
-
self._run_tracked_task(
|
|
4043
|
-
"update_docs",
|
|
4044
|
-
"Update documentation templates",
|
|
4045
|
-
lambda: self._update_docs(options),
|
|
4046
|
-
)
|
|
4047
|
-
self._run_tracked_task(
|
|
4048
|
-
"clean_project", "Clean project code", lambda: self._clean_project(options)
|
|
4049
|
-
)
|
|
4050
|
-
if self.project_manager is not None:
|
|
4051
|
-
self.project_manager.options = options
|
|
4052
|
-
if not options.skip_hooks:
|
|
4053
|
-
self._run_tracked_task(
|
|
4054
|
-
"pre_commit",
|
|
4055
|
-
"Run pre-commit hooks",
|
|
4056
|
-
lambda: self._run_pre_commit_task(options),
|
|
4057
|
-
)
|
|
4058
|
-
else:
|
|
4059
|
-
self._run_pre_commit_task(options)
|
|
4060
|
-
self._run_tracked_task(
|
|
4061
|
-
"run_tests", "Execute test suite", lambda: self._run_tests(options)
|
|
4062
|
-
)
|
|
4063
|
-
self._run_tracked_task(
|
|
4064
|
-
"quality_checks",
|
|
4065
|
-
"Run comprehensive quality checks",
|
|
4066
|
-
lambda: self._run_comprehensive_quality_checks(options),
|
|
4067
|
-
)
|
|
4068
|
-
self._run_tracked_task(
|
|
4069
|
-
"bump_version", "Bump version numbers", lambda: self._bump_version(options)
|
|
4070
|
-
)
|
|
4071
|
-
self._run_tracked_task(
|
|
4072
|
-
"commit_push",
|
|
4073
|
-
"Commit and push changes",
|
|
4074
|
-
lambda: self._commit_and_push(options),
|
|
4075
|
-
)
|
|
4076
|
-
self._run_tracked_task(
|
|
4077
|
-
"publish", "Publish project", lambda: self._publish_project(options)
|
|
4078
|
-
)
|
|
4079
|
-
self.console.print("\n" + "-" * 80)
|
|
4080
|
-
self.console.print(
|
|
4081
|
-
"[bold bright_green]🏆 CRACKERJACK COMPLETE[/bold bright_green] [bold bright_white]Workflow completed successfully![/bold bright_white]"
|
|
4082
|
-
)
|
|
4083
|
-
self.console.print("-" * 80 + "\n")
|
|
4084
|
-
|
|
4085
|
-
async def process_async(self, options: OptionsProtocol) -> None:
|
|
4086
|
-
assert self.project_manager is not None
|
|
4087
|
-
self.console.print("\n" + "-" * 80)
|
|
4088
|
-
self.console.print(
|
|
4089
|
-
"[bold bright_cyan]⚒️ CRACKERJACKING[/bold bright_cyan] [bold bright_white]Starting workflow execution (async optimized)[/bold bright_white]"
|
|
4090
|
-
)
|
|
4091
|
-
self.console.print("-" * 80 + "\n")
|
|
4092
|
-
if options.all:
|
|
4093
|
-
options.clean = True
|
|
4094
|
-
options.test = True
|
|
4095
|
-
options.publish = options.all
|
|
4096
|
-
options.commit = True
|
|
4097
|
-
self._setup_package()
|
|
4098
|
-
self._update_project(options)
|
|
4099
|
-
self._update_precommit(options)
|
|
4100
|
-
await self._clean_project_async(options)
|
|
4101
|
-
if self.project_manager is not None:
|
|
4102
|
-
self.project_manager.options = options
|
|
4103
|
-
if not options.skip_hooks:
|
|
4104
|
-
if getattr(options, "ai_agent", False):
|
|
4105
|
-
await self.project_manager.run_pre_commit_with_analysis_async()
|
|
4106
|
-
else:
|
|
4107
|
-
await self.project_manager.run_pre_commit_async()
|
|
4108
|
-
else:
|
|
4109
|
-
self.console.print(
|
|
4110
|
-
"\n[bold bright_yellow]⏭️ Skipping pre-commit hooks...[/bold bright_yellow]\n"
|
|
4111
|
-
)
|
|
4112
|
-
await self._run_tests_async(options)
|
|
4113
|
-
await self._run_comprehensive_quality_checks_async(options)
|
|
4114
|
-
self._bump_version(options)
|
|
4115
|
-
self._commit_and_push(options)
|
|
4116
|
-
self._publish_project(options)
|
|
4117
|
-
self.console.print("\n" + "-" * 80)
|
|
4118
|
-
self.console.print(
|
|
4119
|
-
"[bold bright_green]🏆 CRACKERJACK COMPLETE[/bold bright_green] [bold bright_white]Workflow completed successfully![/bold bright_white]"
|
|
4120
|
-
)
|
|
4121
|
-
self.console.print("-" * 80 + "\n")
|
|
4122
|
-
|
|
4123
|
-
|
|
4124
|
-
crackerjack_it = Crackerjack().process
|
|
4125
|
-
|
|
4126
|
-
|
|
4127
|
-
def create_crackerjack_runner(
|
|
4128
|
-
console: Console | None = None,
|
|
4129
|
-
our_path: Path | None = None,
|
|
4130
|
-
pkg_path: Path | None = None,
|
|
4131
|
-
python_version: str = default_python_version,
|
|
4132
|
-
dry_run: bool = False,
|
|
4133
|
-
) -> Crackerjack:
|
|
4134
|
-
return Crackerjack(
|
|
4135
|
-
console=console or Console(force_terminal=True),
|
|
4136
|
-
our_path=our_path or Path(__file__).parent,
|
|
4137
|
-
pkg_path=pkg_path or Path.cwd(),
|
|
4138
|
-
python_version=python_version,
|
|
4139
|
-
dry_run=dry_run,
|
|
4140
|
-
)
|