crackerjack 0.30.3__py3-none-any.whl → 0.31.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +1005 -0
- crackerjack/RULES.md +380 -0
- crackerjack/__init__.py +42 -13
- crackerjack/__main__.py +227 -299
- crackerjack/agents/__init__.py +41 -0
- crackerjack/agents/architect_agent.py +281 -0
- crackerjack/agents/base.py +170 -0
- crackerjack/agents/coordinator.py +512 -0
- crackerjack/agents/documentation_agent.py +498 -0
- crackerjack/agents/dry_agent.py +388 -0
- crackerjack/agents/formatting_agent.py +245 -0
- crackerjack/agents/import_optimization_agent.py +281 -0
- crackerjack/agents/performance_agent.py +669 -0
- crackerjack/agents/proactive_agent.py +104 -0
- crackerjack/agents/refactoring_agent.py +788 -0
- crackerjack/agents/security_agent.py +529 -0
- crackerjack/agents/test_creation_agent.py +657 -0
- crackerjack/agents/test_specialist_agent.py +486 -0
- crackerjack/agents/tracker.py +212 -0
- crackerjack/api.py +560 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/facade.py +104 -0
- crackerjack/cli/handlers.py +267 -0
- crackerjack/cli/interactive.py +471 -0
- crackerjack/cli/options.py +409 -0
- crackerjack/cli/utils.py +18 -0
- crackerjack/code_cleaner.py +618 -928
- crackerjack/config/__init__.py +19 -0
- crackerjack/config/hooks.py +218 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +406 -0
- crackerjack/core/autofix_coordinator.py +200 -0
- crackerjack/core/container.py +104 -0
- crackerjack/core/enhanced_container.py +542 -0
- crackerjack/core/performance.py +243 -0
- crackerjack/core/phase_coordinator.py +585 -0
- crackerjack/core/proactive_workflow.py +316 -0
- crackerjack/core/session_coordinator.py +289 -0
- crackerjack/core/workflow_orchestrator.py +826 -0
- crackerjack/dynamic_config.py +94 -103
- crackerjack/errors.py +263 -41
- crackerjack/executors/__init__.py +11 -0
- crackerjack/executors/async_hook_executor.py +431 -0
- crackerjack/executors/cached_hook_executor.py +242 -0
- crackerjack/executors/hook_executor.py +345 -0
- crackerjack/executors/individual_hook_executor.py +669 -0
- crackerjack/intelligence/__init__.py +44 -0
- crackerjack/intelligence/adaptive_learning.py +751 -0
- crackerjack/intelligence/agent_orchestrator.py +551 -0
- crackerjack/intelligence/agent_registry.py +414 -0
- crackerjack/intelligence/agent_selector.py +502 -0
- crackerjack/intelligence/integration.py +290 -0
- crackerjack/interactive.py +576 -315
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +137 -0
- crackerjack/managers/publish_manager.py +433 -0
- crackerjack/managers/test_command_builder.py +151 -0
- crackerjack/managers/test_executor.py +443 -0
- crackerjack/managers/test_manager.py +258 -0
- crackerjack/managers/test_manager_backup.py +1124 -0
- crackerjack/managers/test_progress.py +114 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +336 -0
- crackerjack/mcp/client_runner.py +104 -0
- crackerjack/mcp/context.py +621 -0
- crackerjack/mcp/dashboard.py +636 -0
- crackerjack/mcp/enhanced_progress_monitor.py +479 -0
- crackerjack/mcp/file_monitor.py +336 -0
- crackerjack/mcp/progress_components.py +569 -0
- crackerjack/mcp/progress_monitor.py +949 -0
- crackerjack/mcp/rate_limiter.py +332 -0
- crackerjack/mcp/server.py +22 -0
- crackerjack/mcp/server_core.py +244 -0
- crackerjack/mcp/service_watchdog.py +501 -0
- crackerjack/mcp/state.py +395 -0
- crackerjack/mcp/task_manager.py +257 -0
- crackerjack/mcp/tools/__init__.py +17 -0
- crackerjack/mcp/tools/core_tools.py +249 -0
- crackerjack/mcp/tools/error_analyzer.py +308 -0
- crackerjack/mcp/tools/execution_tools.py +372 -0
- crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
- crackerjack/mcp/tools/intelligence_tools.py +314 -0
- crackerjack/mcp/tools/monitoring_tools.py +502 -0
- crackerjack/mcp/tools/proactive_tools.py +384 -0
- crackerjack/mcp/tools/progress_tools.py +217 -0
- crackerjack/mcp/tools/utility_tools.py +341 -0
- crackerjack/mcp/tools/workflow_executor.py +565 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +39 -0
- crackerjack/mcp/websocket/endpoints.py +559 -0
- crackerjack/mcp/websocket/jobs.py +253 -0
- crackerjack/mcp/websocket/server.py +116 -0
- crackerjack/mcp/websocket/websocket_handler.py +78 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/__init__.py +31 -0
- crackerjack/models/config.py +93 -0
- crackerjack/models/config_adapter.py +230 -0
- crackerjack/models/protocols.py +118 -0
- crackerjack/models/task.py +154 -0
- crackerjack/monitoring/ai_agent_watchdog.py +450 -0
- crackerjack/monitoring/regression_prevention.py +638 -0
- crackerjack/orchestration/__init__.py +0 -0
- crackerjack/orchestration/advanced_orchestrator.py +970 -0
- crackerjack/orchestration/coverage_improvement.py +223 -0
- crackerjack/orchestration/execution_strategies.py +341 -0
- crackerjack/orchestration/test_progress_streamer.py +636 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +246 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +259 -0
- crackerjack/py313.py +8 -3
- crackerjack/services/__init__.py +22 -0
- crackerjack/services/cache.py +314 -0
- crackerjack/services/config.py +358 -0
- crackerjack/services/config_integrity.py +99 -0
- crackerjack/services/contextual_ai_assistant.py +516 -0
- crackerjack/services/coverage_ratchet.py +356 -0
- crackerjack/services/debug.py +736 -0
- crackerjack/services/dependency_monitor.py +617 -0
- crackerjack/services/enhanced_filesystem.py +439 -0
- crackerjack/services/file_hasher.py +151 -0
- crackerjack/services/filesystem.py +421 -0
- crackerjack/services/git.py +176 -0
- crackerjack/services/health_metrics.py +611 -0
- crackerjack/services/initialization.py +873 -0
- crackerjack/services/log_manager.py +286 -0
- crackerjack/services/logging.py +174 -0
- crackerjack/services/metrics.py +578 -0
- crackerjack/services/pattern_cache.py +362 -0
- crackerjack/services/pattern_detector.py +515 -0
- crackerjack/services/performance_benchmarks.py +653 -0
- crackerjack/services/security.py +163 -0
- crackerjack/services/server_manager.py +234 -0
- crackerjack/services/smart_scheduling.py +144 -0
- crackerjack/services/tool_version_service.py +61 -0
- crackerjack/services/unified_config.py +437 -0
- crackerjack/services/version_checker.py +248 -0
- crackerjack/slash_commands/__init__.py +14 -0
- crackerjack/slash_commands/init.md +122 -0
- crackerjack/slash_commands/run.md +163 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack-0.31.7.dist-info/METADATA +742 -0
- crackerjack-0.31.7.dist-info/RECORD +149 -0
- crackerjack-0.31.7.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -34
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/crackerjack.py +0 -3805
- crackerjack/pyproject.toml +0 -286
- crackerjack-0.30.3.dist-info/METADATA +0 -1290
- crackerjack-0.30.3.dist-info/RECORD +0 -16
- {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/WHEEL +0 -0
- {crackerjack-0.30.3.dist-info → crackerjack-0.31.7.dist-info}/licenses/LICENSE +0 -0
crackerjack/crackerjack.py
DELETED
|
@@ -1,3805 +0,0 @@
|
|
|
1
|
-
import asyncio
|
|
2
|
-
import json
|
|
3
|
-
import operator
|
|
4
|
-
import re
|
|
5
|
-
import subprocess
|
|
6
|
-
import time
|
|
7
|
-
import typing as t
|
|
8
|
-
from contextlib import suppress
|
|
9
|
-
from dataclasses import dataclass
|
|
10
|
-
from pathlib import Path
|
|
11
|
-
from subprocess import CompletedProcess
|
|
12
|
-
from subprocess import run as execute
|
|
13
|
-
from tomllib import loads
|
|
14
|
-
|
|
15
|
-
from pydantic import BaseModel
|
|
16
|
-
from rich.console import Console
|
|
17
|
-
from tomli_w import dumps
|
|
18
|
-
|
|
19
|
-
from .code_cleaner import CodeCleaner
|
|
20
|
-
from .dynamic_config import (
|
|
21
|
-
generate_config_for_mode,
|
|
22
|
-
)
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
@dataclass
|
|
26
|
-
class HookResult:
|
|
27
|
-
id: str
|
|
28
|
-
name: str
|
|
29
|
-
status: str
|
|
30
|
-
duration: float
|
|
31
|
-
files_processed: int = 0
|
|
32
|
-
issues_found: list[str] | None = None
|
|
33
|
-
stage: str = "pre-commit"
|
|
34
|
-
|
|
35
|
-
def __post_init__(self) -> None:
|
|
36
|
-
if self.issues_found is None:
|
|
37
|
-
self.issues_found = []
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
@dataclass
|
|
41
|
-
class TaskStatus:
|
|
42
|
-
id: str
|
|
43
|
-
name: str
|
|
44
|
-
status: str
|
|
45
|
-
start_time: float | None = None
|
|
46
|
-
end_time: float | None = None
|
|
47
|
-
duration: float | None = None
|
|
48
|
-
details: str | None = None
|
|
49
|
-
error_message: str | None = None
|
|
50
|
-
files_changed: list[str] | None = None
|
|
51
|
-
|
|
52
|
-
def __post_init__(self) -> None:
|
|
53
|
-
if self.files_changed is None:
|
|
54
|
-
self.files_changed = []
|
|
55
|
-
if self.start_time is not None and self.end_time is not None:
|
|
56
|
-
self.duration = self.end_time - self.start_time
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
class SessionTracker(BaseModel, arbitrary_types_allowed=True):
|
|
60
|
-
console: Console
|
|
61
|
-
session_id: str
|
|
62
|
-
start_time: float
|
|
63
|
-
progress_file: Path
|
|
64
|
-
tasks: dict[str, TaskStatus] = {}
|
|
65
|
-
current_task: str | None = None
|
|
66
|
-
metadata: dict[str, t.Any] = {}
|
|
67
|
-
|
|
68
|
-
def __init__(self, **data: t.Any) -> None:
|
|
69
|
-
super().__init__(**data)
|
|
70
|
-
if not self.tasks:
|
|
71
|
-
self.tasks = {}
|
|
72
|
-
if not self.metadata:
|
|
73
|
-
self.metadata = {}
|
|
74
|
-
|
|
75
|
-
def start_task(
|
|
76
|
-
self, task_id: str, task_name: str, details: str | None = None
|
|
77
|
-
) -> None:
|
|
78
|
-
task = TaskStatus(
|
|
79
|
-
id=task_id,
|
|
80
|
-
name=task_name,
|
|
81
|
-
status="in_progress",
|
|
82
|
-
start_time=time.time(),
|
|
83
|
-
details=details,
|
|
84
|
-
)
|
|
85
|
-
self.tasks[task_id] = task
|
|
86
|
-
self.current_task = task_id
|
|
87
|
-
self._update_progress_file()
|
|
88
|
-
self.console.print(f"[yellow]⏳[/yellow] Started: {task_name}")
|
|
89
|
-
|
|
90
|
-
def complete_task(
|
|
91
|
-
self,
|
|
92
|
-
task_id: str,
|
|
93
|
-
details: str | None = None,
|
|
94
|
-
files_changed: list[str] | None = None,
|
|
95
|
-
) -> None:
|
|
96
|
-
if task_id in self.tasks:
|
|
97
|
-
task = self.tasks[task_id]
|
|
98
|
-
task.status = "completed"
|
|
99
|
-
task.end_time = time.time()
|
|
100
|
-
task.duration = task.end_time - (task.start_time or task.end_time)
|
|
101
|
-
if details:
|
|
102
|
-
task.details = details
|
|
103
|
-
if files_changed:
|
|
104
|
-
task.files_changed = files_changed
|
|
105
|
-
self._update_progress_file()
|
|
106
|
-
self.console.print(f"[green]✅[/green] Completed: {task.name}")
|
|
107
|
-
if self.current_task == task_id:
|
|
108
|
-
self.current_task = None
|
|
109
|
-
|
|
110
|
-
def fail_task(
|
|
111
|
-
self,
|
|
112
|
-
task_id: str,
|
|
113
|
-
error_message: str,
|
|
114
|
-
details: str | None = None,
|
|
115
|
-
) -> None:
|
|
116
|
-
if task_id in self.tasks:
|
|
117
|
-
task = self.tasks[task_id]
|
|
118
|
-
task.status = "failed"
|
|
119
|
-
task.end_time = time.time()
|
|
120
|
-
task.duration = task.end_time - (task.start_time or task.end_time)
|
|
121
|
-
task.error_message = error_message
|
|
122
|
-
if details:
|
|
123
|
-
task.details = details
|
|
124
|
-
self._update_progress_file()
|
|
125
|
-
self.console.print(f"[red]❌[/red] Failed: {task.name} - {error_message}")
|
|
126
|
-
if self.current_task == task_id:
|
|
127
|
-
self.current_task = None
|
|
128
|
-
|
|
129
|
-
def skip_task(self, task_id: str, reason: str) -> None:
|
|
130
|
-
if task_id in self.tasks:
|
|
131
|
-
task = self.tasks[task_id]
|
|
132
|
-
task.status = "skipped"
|
|
133
|
-
task.end_time = time.time()
|
|
134
|
-
task.details = f"Skipped: {reason}"
|
|
135
|
-
self._update_progress_file()
|
|
136
|
-
self.console.print(f"[blue]⏩[/blue] Skipped: {task.name} - {reason}")
|
|
137
|
-
if self.current_task == task_id:
|
|
138
|
-
self.current_task = None
|
|
139
|
-
|
|
140
|
-
def _update_progress_file(self) -> None:
|
|
141
|
-
try:
|
|
142
|
-
content = self._generate_markdown_content()
|
|
143
|
-
self.progress_file.write_text(content, encoding="utf-8")
|
|
144
|
-
except OSError as e:
|
|
145
|
-
self.console.print(
|
|
146
|
-
f"[yellow]Warning: Failed to update progress file: {e}[/yellow]"
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
def _generate_header_section(self) -> str:
|
|
150
|
-
from datetime import datetime
|
|
151
|
-
|
|
152
|
-
completed_tasks = sum(
|
|
153
|
-
1 for task in self.tasks.values() if task.status == "completed"
|
|
154
|
-
)
|
|
155
|
-
total_tasks = len(self.tasks)
|
|
156
|
-
overall_status = "In Progress"
|
|
157
|
-
if completed_tasks == total_tasks and total_tasks > 0:
|
|
158
|
-
overall_status = "Completed"
|
|
159
|
-
elif any(task.status == "failed" for task in self.tasks.values()):
|
|
160
|
-
overall_status = "Failed"
|
|
161
|
-
start_datetime = datetime.fromtimestamp(self.start_time)
|
|
162
|
-
|
|
163
|
-
return f"""# Crackerjack Session Progress: {self.session_id}
|
|
164
|
-
**Session ID**: {self.session_id}
|
|
165
|
-
**Started**: {start_datetime.strftime("%Y-%m-%d %H:%M:%S")}
|
|
166
|
-
**Status**: {overall_status}
|
|
167
|
-
**Progress**: {completed_tasks}/{total_tasks} tasks completed
|
|
168
|
-
|
|
169
|
-
- **Working Directory**: {self.metadata.get("working_dir", Path.cwd())}
|
|
170
|
-
- **Python Version**: {self.metadata.get("python_version", "Unknown")}
|
|
171
|
-
- **Crackerjack Version**: {self.metadata.get("crackerjack_version", "Unknown")}
|
|
172
|
-
- **CLI Options**: {self.metadata.get("cli_options", "Unknown")}
|
|
173
|
-
|
|
174
|
-
"""
|
|
175
|
-
|
|
176
|
-
def _generate_task_overview_section(self) -> str:
|
|
177
|
-
content = """## Task Progress Overview
|
|
178
|
-
| Task | Status | Duration | Details |
|
|
179
|
-
|------|--------|----------|---------|
|
|
180
|
-
"""
|
|
181
|
-
|
|
182
|
-
for task in self.tasks.values():
|
|
183
|
-
status_emoji = {
|
|
184
|
-
"pending": "⏸️",
|
|
185
|
-
"in_progress": "⏳",
|
|
186
|
-
"completed": "✅",
|
|
187
|
-
"failed": "❌",
|
|
188
|
-
"skipped": "⏩",
|
|
189
|
-
}.get(task.status, "❓")
|
|
190
|
-
|
|
191
|
-
duration_str = f"{task.duration:.2f}s" if task.duration else "N/A"
|
|
192
|
-
details_str = (
|
|
193
|
-
task.details[:50] + "..."
|
|
194
|
-
if task.details and len(task.details) > 50
|
|
195
|
-
else (task.details or "")
|
|
196
|
-
)
|
|
197
|
-
|
|
198
|
-
content += f"| {task.name} | {status_emoji} {task.status} | {duration_str} | {details_str} |\n"
|
|
199
|
-
|
|
200
|
-
return content + "\n"
|
|
201
|
-
|
|
202
|
-
def _generate_task_details_section(self) -> str:
|
|
203
|
-
content = "## Detailed Task Log\n\n"
|
|
204
|
-
for task in self.tasks.values():
|
|
205
|
-
content += self._format_task_detail(task)
|
|
206
|
-
return content
|
|
207
|
-
|
|
208
|
-
def _format_task_detail(self, task: TaskStatus) -> str:
|
|
209
|
-
from datetime import datetime
|
|
210
|
-
|
|
211
|
-
if task.status == "completed":
|
|
212
|
-
return self._format_completed_task(task, datetime)
|
|
213
|
-
elif task.status == "in_progress":
|
|
214
|
-
return self._format_in_progress_task(task, datetime)
|
|
215
|
-
elif task.status == "failed":
|
|
216
|
-
return self._format_failed_task(task, datetime)
|
|
217
|
-
elif task.status == "skipped":
|
|
218
|
-
return self._format_skipped_task(task)
|
|
219
|
-
return ""
|
|
220
|
-
|
|
221
|
-
def _format_completed_task(self, task: TaskStatus, datetime: t.Any) -> str:
|
|
222
|
-
start_time = (
|
|
223
|
-
datetime.fromtimestamp(task.start_time) if task.start_time else "Unknown"
|
|
224
|
-
)
|
|
225
|
-
end_time = datetime.fromtimestamp(task.end_time) if task.end_time else "Unknown"
|
|
226
|
-
files_list = ", ".join(task.files_changed) if task.files_changed else "None"
|
|
227
|
-
return f"""### ✅ {task.name} - COMPLETED
|
|
228
|
-
- **Started**: {start_time}
|
|
229
|
-
- **Completed**: {end_time}
|
|
230
|
-
- **Duration**: {task.duration:.2f}s
|
|
231
|
-
- **Files Changed**: {files_list}
|
|
232
|
-
- **Details**: {task.details or "N/A"}
|
|
233
|
-
|
|
234
|
-
"""
|
|
235
|
-
|
|
236
|
-
def _format_in_progress_task(self, task: TaskStatus, datetime: t.Any) -> str:
|
|
237
|
-
start_time = (
|
|
238
|
-
datetime.fromtimestamp(task.start_time) if task.start_time else "Unknown"
|
|
239
|
-
)
|
|
240
|
-
return f"""### ⏳ {task.name} - IN PROGRESS
|
|
241
|
-
- **Started**: {start_time}
|
|
242
|
-
- **Current Status**: {task.details or "Processing..."}
|
|
243
|
-
|
|
244
|
-
"""
|
|
245
|
-
|
|
246
|
-
def _format_failed_task(self, task: TaskStatus, datetime: t.Any) -> str:
|
|
247
|
-
start_time = (
|
|
248
|
-
datetime.fromtimestamp(task.start_time) if task.start_time else "Unknown"
|
|
249
|
-
)
|
|
250
|
-
fail_time = (
|
|
251
|
-
datetime.fromtimestamp(task.end_time) if task.end_time else "Unknown"
|
|
252
|
-
)
|
|
253
|
-
return f"""### ❌ {task.name} - FAILED
|
|
254
|
-
- **Started**: {start_time}
|
|
255
|
-
- **Failed**: {fail_time}
|
|
256
|
-
- **Error**: {task.error_message or "Unknown error"}
|
|
257
|
-
- **Recovery Suggestions**: Check error details and retry the failed operation
|
|
258
|
-
|
|
259
|
-
"""
|
|
260
|
-
|
|
261
|
-
def _format_skipped_task(self, task: TaskStatus) -> str:
|
|
262
|
-
return f"""### ⏩ {task.name} - SKIPPED
|
|
263
|
-
- **Reason**: {task.details or "No reason provided"}
|
|
264
|
-
|
|
265
|
-
"""
|
|
266
|
-
|
|
267
|
-
def _generate_footer_section(self) -> str:
|
|
268
|
-
content = f"""## Session Recovery Information
|
|
269
|
-
If this session was interrupted, you can resume from where you left off:
|
|
270
|
-
|
|
271
|
-
```bash
|
|
272
|
-
python -m crackerjack --resume-from {self.progress_file.name}
|
|
273
|
-
```
|
|
274
|
-
|
|
275
|
-
"""
|
|
276
|
-
|
|
277
|
-
all_files: set[str] = set()
|
|
278
|
-
for task in self.tasks.values():
|
|
279
|
-
if task.files_changed:
|
|
280
|
-
all_files.update(task.files_changed)
|
|
281
|
-
|
|
282
|
-
if all_files:
|
|
283
|
-
for file_path in sorted(all_files):
|
|
284
|
-
content += f"- {file_path}\n"
|
|
285
|
-
else:
|
|
286
|
-
content += "- No files modified yet\n"
|
|
287
|
-
|
|
288
|
-
content += "\n## Next Steps\n\n"
|
|
289
|
-
|
|
290
|
-
pending_tasks = [
|
|
291
|
-
task for task in self.tasks.values() if task.status == "pending"
|
|
292
|
-
]
|
|
293
|
-
in_progress_tasks = [
|
|
294
|
-
task for task in self.tasks.values() if task.status == "in_progress"
|
|
295
|
-
]
|
|
296
|
-
failed_tasks = [task for task in self.tasks.values() if task.status == "failed"]
|
|
297
|
-
|
|
298
|
-
if failed_tasks:
|
|
299
|
-
content += "⚠️ Address failed tasks:\n"
|
|
300
|
-
for task in failed_tasks:
|
|
301
|
-
content += f"- Fix {task.name}: {task.error_message}\n"
|
|
302
|
-
elif in_progress_tasks:
|
|
303
|
-
content += "🔄 Currently working on:\n"
|
|
304
|
-
for task in in_progress_tasks:
|
|
305
|
-
content += f"- {task.name}\n"
|
|
306
|
-
elif pending_tasks:
|
|
307
|
-
content += "📋 Next tasks to complete:\n"
|
|
308
|
-
for task in pending_tasks:
|
|
309
|
-
content += f"- {task.name}\n"
|
|
310
|
-
else:
|
|
311
|
-
content += "🎉 All tasks completed successfully!\n"
|
|
312
|
-
|
|
313
|
-
return content
|
|
314
|
-
|
|
315
|
-
def _generate_markdown_content(self) -> str:
|
|
316
|
-
return (
|
|
317
|
-
self._generate_header_section()
|
|
318
|
-
+ self._generate_task_overview_section()
|
|
319
|
-
+ self._generate_task_details_section()
|
|
320
|
-
+ self._generate_footer_section()
|
|
321
|
-
)
|
|
322
|
-
|
|
323
|
-
@classmethod
|
|
324
|
-
def create_session(
|
|
325
|
-
cls,
|
|
326
|
-
console: Console,
|
|
327
|
-
session_id: str | None = None,
|
|
328
|
-
progress_file: Path | None = None,
|
|
329
|
-
metadata: dict[str, t.Any] | None = None,
|
|
330
|
-
) -> "SessionTracker":
|
|
331
|
-
import uuid
|
|
332
|
-
|
|
333
|
-
if session_id is None:
|
|
334
|
-
session_id = str(uuid.uuid4())[:8]
|
|
335
|
-
|
|
336
|
-
if progress_file is None:
|
|
337
|
-
timestamp = time.strftime("%Y%m%d-%H%M%S")
|
|
338
|
-
progress_file = Path(f"SESSION-PROGRESS-{timestamp}.md")
|
|
339
|
-
|
|
340
|
-
tracker = cls(
|
|
341
|
-
console=console,
|
|
342
|
-
session_id=session_id,
|
|
343
|
-
start_time=time.time(),
|
|
344
|
-
progress_file=progress_file,
|
|
345
|
-
metadata=metadata or {},
|
|
346
|
-
)
|
|
347
|
-
|
|
348
|
-
tracker._update_progress_file()
|
|
349
|
-
console.print(f"[green]📋[/green] Session tracking started: {progress_file}")
|
|
350
|
-
return tracker
|
|
351
|
-
|
|
352
|
-
@classmethod
|
|
353
|
-
def find_recent_progress_files(cls, directory: Path = Path.cwd()) -> list[Path]:
|
|
354
|
-
progress_files: list[Path] = []
|
|
355
|
-
for file_path in directory.glob("SESSION-PROGRESS-*.md"):
|
|
356
|
-
try:
|
|
357
|
-
if file_path.is_file():
|
|
358
|
-
progress_files.append(file_path)
|
|
359
|
-
except (OSError, PermissionError):
|
|
360
|
-
continue
|
|
361
|
-
|
|
362
|
-
return sorted(progress_files, key=lambda p: p.stat().st_mtime, reverse=True)
|
|
363
|
-
|
|
364
|
-
@classmethod
|
|
365
|
-
def is_session_incomplete(cls, progress_file: Path) -> bool:
|
|
366
|
-
if not progress_file.exists():
|
|
367
|
-
return False
|
|
368
|
-
try:
|
|
369
|
-
content = progress_file.read_text(encoding="utf-8")
|
|
370
|
-
has_in_progress = "⏳" in content or "in_progress" in content
|
|
371
|
-
has_failed = "❌" in content or "failed" in content
|
|
372
|
-
has_pending = "⏸️" in content or "pending" in content
|
|
373
|
-
stat = progress_file.stat()
|
|
374
|
-
age_hours = (time.time() - stat.st_mtime) / 3600
|
|
375
|
-
is_recent = age_hours < 24
|
|
376
|
-
|
|
377
|
-
return (has_in_progress or has_failed or has_pending) and is_recent
|
|
378
|
-
except (OSError, UnicodeDecodeError):
|
|
379
|
-
return False
|
|
380
|
-
|
|
381
|
-
@classmethod
|
|
382
|
-
def find_incomplete_session(cls, directory: Path = Path.cwd()) -> Path | None:
|
|
383
|
-
recent_files = cls.find_recent_progress_files(directory)
|
|
384
|
-
for progress_file in recent_files:
|
|
385
|
-
if cls.is_session_incomplete(progress_file):
|
|
386
|
-
return progress_file
|
|
387
|
-
|
|
388
|
-
return None
|
|
389
|
-
|
|
390
|
-
@classmethod
|
|
391
|
-
def auto_detect_session(
|
|
392
|
-
cls, console: Console, directory: Path = Path.cwd()
|
|
393
|
-
) -> "SessionTracker | None":
|
|
394
|
-
incomplete_session = cls.find_incomplete_session(directory)
|
|
395
|
-
if incomplete_session:
|
|
396
|
-
return cls._handle_incomplete_session(console, incomplete_session)
|
|
397
|
-
return None
|
|
398
|
-
|
|
399
|
-
@classmethod
|
|
400
|
-
def _handle_incomplete_session(
|
|
401
|
-
cls, console: Console, incomplete_session: Path
|
|
402
|
-
) -> "SessionTracker | None":
|
|
403
|
-
console.print(
|
|
404
|
-
f"[yellow]📋[/yellow] Found incomplete session: {incomplete_session.name}"
|
|
405
|
-
)
|
|
406
|
-
try:
|
|
407
|
-
content = incomplete_session.read_text(encoding="utf-8")
|
|
408
|
-
session_info = cls._parse_session_info(content)
|
|
409
|
-
cls._display_session_info(console, session_info)
|
|
410
|
-
return cls._prompt_resume_session(console, incomplete_session)
|
|
411
|
-
except Exception as e:
|
|
412
|
-
console.print(f"[yellow]⚠️[/yellow] Could not parse session file: {e}")
|
|
413
|
-
return None
|
|
414
|
-
|
|
415
|
-
@classmethod
|
|
416
|
-
def _parse_session_info(cls, content: str) -> dict[str, str | list[str] | None]:
|
|
417
|
-
import re
|
|
418
|
-
|
|
419
|
-
session_match = re.search(r"Session ID\*\*:\s*(.+)", content)
|
|
420
|
-
session_id: str = session_match.group(1).strip() if session_match else "unknown"
|
|
421
|
-
progress_match = re.search(r"Progress\*\*:\s*(\d+)/(\d+)", content)
|
|
422
|
-
progress_info: str | None = None
|
|
423
|
-
if progress_match:
|
|
424
|
-
completed = progress_match.group(1)
|
|
425
|
-
total = progress_match.group(2)
|
|
426
|
-
progress_info = f"{completed}/{total} tasks completed"
|
|
427
|
-
failed_tasks: list[str] = []
|
|
428
|
-
for line in content.split("\n"):
|
|
429
|
-
if "❌" in line and "- FAILED" in line:
|
|
430
|
-
task_match = re.search(r"### ❌ (.+?) - FAILED", line)
|
|
431
|
-
if task_match:
|
|
432
|
-
task_name: str = task_match.group(1)
|
|
433
|
-
failed_tasks.append(task_name)
|
|
434
|
-
|
|
435
|
-
return {
|
|
436
|
-
"session_id": session_id,
|
|
437
|
-
"progress_info": progress_info,
|
|
438
|
-
"failed_tasks": failed_tasks,
|
|
439
|
-
}
|
|
440
|
-
|
|
441
|
-
@classmethod
|
|
442
|
-
def _display_session_info(
|
|
443
|
-
cls, console: Console, session_info: dict[str, str | list[str] | None]
|
|
444
|
-
) -> None:
|
|
445
|
-
console.print(f"[cyan] Session ID:[/cyan] {session_info['session_id']}")
|
|
446
|
-
if session_info["progress_info"]:
|
|
447
|
-
console.print(f"[cyan] Progress:[/cyan] {session_info['progress_info']}")
|
|
448
|
-
if session_info["failed_tasks"]:
|
|
449
|
-
console.print(
|
|
450
|
-
f"[red] Failed tasks:[/red] {', '.join(session_info['failed_tasks'])}"
|
|
451
|
-
)
|
|
452
|
-
|
|
453
|
-
@classmethod
|
|
454
|
-
def _prompt_resume_session(
|
|
455
|
-
cls, console: Console, incomplete_session: Path
|
|
456
|
-
) -> "SessionTracker | None":
|
|
457
|
-
try:
|
|
458
|
-
import sys
|
|
459
|
-
|
|
460
|
-
console.print("[yellow]❓[/yellow] Resume this session? [y/N]: ", end="")
|
|
461
|
-
sys.stdout.flush()
|
|
462
|
-
response = input().strip().lower()
|
|
463
|
-
if response in ("y", "yes"):
|
|
464
|
-
return cls.resume_session(console, incomplete_session)
|
|
465
|
-
else:
|
|
466
|
-
console.print("[blue]ℹ️[/blue] Starting new session instead")
|
|
467
|
-
return None
|
|
468
|
-
except (KeyboardInterrupt, EOFError):
|
|
469
|
-
console.print("\n[blue]ℹ️[/blue] Starting new session instead")
|
|
470
|
-
return None
|
|
471
|
-
|
|
472
|
-
@classmethod
|
|
473
|
-
def resume_session(cls, console: Console, progress_file: Path) -> "SessionTracker":
|
|
474
|
-
if not progress_file.exists():
|
|
475
|
-
raise FileNotFoundError(f"Progress file not found: {progress_file}")
|
|
476
|
-
try:
|
|
477
|
-
content = progress_file.read_text(encoding="utf-8")
|
|
478
|
-
session_id = "resumed"
|
|
479
|
-
import re
|
|
480
|
-
|
|
481
|
-
session_match = re.search(r"Session ID\*\*:\s*(.+)", content)
|
|
482
|
-
if session_match:
|
|
483
|
-
session_id = session_match.group(1).strip()
|
|
484
|
-
tracker = cls(
|
|
485
|
-
console=console,
|
|
486
|
-
session_id=session_id,
|
|
487
|
-
start_time=time.time(),
|
|
488
|
-
progress_file=progress_file,
|
|
489
|
-
metadata={},
|
|
490
|
-
)
|
|
491
|
-
console.print(f"[green]🔄[/green] Resumed session from: {progress_file}")
|
|
492
|
-
return tracker
|
|
493
|
-
except Exception as e:
|
|
494
|
-
raise RuntimeError(f"Failed to resume session: {e}") from e
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
config_files = (
|
|
498
|
-
".gitignore",
|
|
499
|
-
".libcst.codemod.yaml",
|
|
500
|
-
)
|
|
501
|
-
|
|
502
|
-
documentation_files = (
|
|
503
|
-
"CLAUDE.md",
|
|
504
|
-
"RULES.md",
|
|
505
|
-
)
|
|
506
|
-
default_python_version = "3.13"
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
@t.runtime_checkable
|
|
510
|
-
class CommandRunner(t.Protocol):
|
|
511
|
-
def execute_command(
|
|
512
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
513
|
-
) -> subprocess.CompletedProcess[str]: ...
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
@t.runtime_checkable
|
|
517
|
-
class OptionsProtocol(t.Protocol):
|
|
518
|
-
commit: bool
|
|
519
|
-
interactive: bool
|
|
520
|
-
no_config_updates: bool
|
|
521
|
-
verbose: bool
|
|
522
|
-
update_precommit: bool
|
|
523
|
-
update_docs: bool
|
|
524
|
-
force_update_docs: bool
|
|
525
|
-
compress_docs: bool
|
|
526
|
-
clean: bool
|
|
527
|
-
test: bool
|
|
528
|
-
benchmark: bool
|
|
529
|
-
benchmark_regression: bool
|
|
530
|
-
benchmark_regression_threshold: float
|
|
531
|
-
test_workers: int = 0
|
|
532
|
-
test_timeout: int = 0
|
|
533
|
-
publish: t.Any | None
|
|
534
|
-
bump: t.Any | None
|
|
535
|
-
all: t.Any | None
|
|
536
|
-
ai_agent: bool = False
|
|
537
|
-
create_pr: bool = False
|
|
538
|
-
skip_hooks: bool = False
|
|
539
|
-
comprehensive: bool = False
|
|
540
|
-
async_mode: bool = False
|
|
541
|
-
track_progress: bool = False
|
|
542
|
-
resume_from: str | None = None
|
|
543
|
-
progress_file: str | None = None
|
|
544
|
-
experimental_hooks: bool = False
|
|
545
|
-
enable_pyrefly: bool = False
|
|
546
|
-
enable_ty: bool = False
|
|
547
|
-
no_git_tags: bool = False
|
|
548
|
-
skip_version_check: bool = False
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
class ConfigManager(BaseModel, arbitrary_types_allowed=True):
|
|
552
|
-
our_path: Path
|
|
553
|
-
pkg_path: Path
|
|
554
|
-
pkg_name: str
|
|
555
|
-
console: Console
|
|
556
|
-
our_toml_path: Path | None = None
|
|
557
|
-
pkg_toml_path: Path | None = None
|
|
558
|
-
python_version: str = default_python_version
|
|
559
|
-
dry_run: bool = False
|
|
560
|
-
|
|
561
|
-
def swap_package_name(self, value: list[str] | str) -> list[str] | str:
|
|
562
|
-
if isinstance(value, list):
|
|
563
|
-
value.remove("crackerjack")
|
|
564
|
-
value.append(self.pkg_name)
|
|
565
|
-
else:
|
|
566
|
-
value = value.replace("crackerjack", self.pkg_name)
|
|
567
|
-
return value
|
|
568
|
-
|
|
569
|
-
def update_pyproject_configs(self) -> None:
|
|
570
|
-
self._setup_toml_paths()
|
|
571
|
-
if self._is_crackerjack_project():
|
|
572
|
-
self._handle_crackerjack_project()
|
|
573
|
-
return
|
|
574
|
-
our_toml_config = self._load_our_toml()
|
|
575
|
-
pkg_toml_config = self._load_pkg_toml()
|
|
576
|
-
self._ensure_required_sections(pkg_toml_config)
|
|
577
|
-
self._update_tool_settings(our_toml_config, pkg_toml_config)
|
|
578
|
-
self._update_python_version(our_toml_config, pkg_toml_config)
|
|
579
|
-
self._save_pkg_toml(pkg_toml_config)
|
|
580
|
-
|
|
581
|
-
def _setup_toml_paths(self) -> None:
|
|
582
|
-
toml_file = "pyproject.toml"
|
|
583
|
-
self.our_toml_path = self.our_path / toml_file
|
|
584
|
-
self.pkg_toml_path = self.pkg_path / toml_file
|
|
585
|
-
|
|
586
|
-
def _is_crackerjack_project(self) -> bool:
|
|
587
|
-
return self.pkg_path.stem == "crackerjack"
|
|
588
|
-
|
|
589
|
-
def _handle_crackerjack_project(self) -> None:
|
|
590
|
-
if self.our_toml_path and self.pkg_toml_path:
|
|
591
|
-
self.our_toml_path.write_text(self.pkg_toml_path.read_text())
|
|
592
|
-
|
|
593
|
-
def _load_our_toml(self) -> dict[str, t.Any]:
|
|
594
|
-
if self.our_toml_path:
|
|
595
|
-
return loads(self.our_toml_path.read_text())
|
|
596
|
-
return {}
|
|
597
|
-
|
|
598
|
-
def _load_pkg_toml(self) -> dict[str, t.Any]:
|
|
599
|
-
if self.pkg_toml_path:
|
|
600
|
-
return loads(self.pkg_toml_path.read_text())
|
|
601
|
-
return {}
|
|
602
|
-
|
|
603
|
-
def _ensure_required_sections(self, pkg_toml_config: dict[str, t.Any]) -> None:
|
|
604
|
-
pkg_toml_config.setdefault("tool", {})
|
|
605
|
-
pkg_toml_config.setdefault("project", {})
|
|
606
|
-
|
|
607
|
-
def _update_tool_settings(
|
|
608
|
-
self, our_toml_config: dict[str, t.Any], pkg_toml_config: dict[str, t.Any]
|
|
609
|
-
) -> None:
|
|
610
|
-
for tool, settings in our_toml_config.get("tool", {}).items():
|
|
611
|
-
if tool not in pkg_toml_config["tool"]:
|
|
612
|
-
pkg_toml_config["tool"][tool] = {}
|
|
613
|
-
pkg_tool_config = pkg_toml_config["tool"][tool]
|
|
614
|
-
self._merge_tool_config(settings, pkg_tool_config, tool)
|
|
615
|
-
|
|
616
|
-
def _merge_tool_config(
|
|
617
|
-
self, our_config: dict[str, t.Any], pkg_config: dict[str, t.Any], tool: str
|
|
618
|
-
) -> None:
|
|
619
|
-
for setting, value in our_config.items():
|
|
620
|
-
if isinstance(value, dict):
|
|
621
|
-
self._merge_nested_config(
|
|
622
|
-
setting, t.cast(dict[str, t.Any], value), pkg_config
|
|
623
|
-
)
|
|
624
|
-
else:
|
|
625
|
-
self._merge_direct_config(setting, value, pkg_config)
|
|
626
|
-
|
|
627
|
-
def _merge_nested_config(
|
|
628
|
-
self, setting: str, value: dict[str, t.Any], pkg_config: dict[str, t.Any]
|
|
629
|
-
) -> None:
|
|
630
|
-
if setting not in pkg_config:
|
|
631
|
-
pkg_config[setting] = {}
|
|
632
|
-
elif not isinstance(pkg_config[setting], dict):
|
|
633
|
-
pkg_config[setting] = {}
|
|
634
|
-
self._merge_tool_config(value, pkg_config[setting], "")
|
|
635
|
-
for k, v in value.items():
|
|
636
|
-
self._merge_nested_value(k, v, pkg_config[setting])
|
|
637
|
-
|
|
638
|
-
def _merge_nested_value(
|
|
639
|
-
self, key: str, value: t.Any, nested_config: dict[str, t.Any]
|
|
640
|
-
) -> None:
|
|
641
|
-
if isinstance(value, str | list) and "crackerjack" in str(value):
|
|
642
|
-
nested_config[key] = self.swap_package_name(t.cast(str | list[str], value))
|
|
643
|
-
elif self._is_mergeable_list(key, value):
|
|
644
|
-
existing = nested_config.get(key, [])
|
|
645
|
-
if isinstance(existing, list) and isinstance(value, list):
|
|
646
|
-
nested_config[key] = list(
|
|
647
|
-
set(t.cast(list[str], existing) + t.cast(list[str], value))
|
|
648
|
-
)
|
|
649
|
-
else:
|
|
650
|
-
nested_config[key] = value
|
|
651
|
-
elif key not in nested_config:
|
|
652
|
-
nested_config[key] = value
|
|
653
|
-
|
|
654
|
-
def _merge_direct_config(
|
|
655
|
-
self, setting: str, value: t.Any, pkg_config: dict[str, t.Any]
|
|
656
|
-
) -> None:
|
|
657
|
-
if isinstance(value, str | list) and "crackerjack" in str(value):
|
|
658
|
-
pkg_config[setting] = self.swap_package_name(t.cast(str | list[str], value))
|
|
659
|
-
elif self._is_mergeable_list(setting, value):
|
|
660
|
-
existing = pkg_config.get(setting, [])
|
|
661
|
-
if isinstance(existing, list) and isinstance(value, list):
|
|
662
|
-
pkg_config[setting] = list(
|
|
663
|
-
set(t.cast(list[str], existing) + t.cast(list[str], value))
|
|
664
|
-
)
|
|
665
|
-
else:
|
|
666
|
-
pkg_config[setting] = value
|
|
667
|
-
elif setting not in pkg_config:
|
|
668
|
-
pkg_config[setting] = value
|
|
669
|
-
|
|
670
|
-
def _is_mergeable_list(self, key: str, value: t.Any) -> bool:
|
|
671
|
-
return key in (
|
|
672
|
-
"exclude-deps",
|
|
673
|
-
"exclude",
|
|
674
|
-
"excluded",
|
|
675
|
-
"skips",
|
|
676
|
-
"ignore",
|
|
677
|
-
) and isinstance(value, list)
|
|
678
|
-
|
|
679
|
-
def _update_python_version(
|
|
680
|
-
self, our_toml_config: dict[str, t.Any], pkg_toml_config: dict[str, t.Any]
|
|
681
|
-
) -> None:
|
|
682
|
-
python_version_pattern = "\\s*W*(\\d\\.\\d*)"
|
|
683
|
-
requires_python = our_toml_config.get("project", {}).get("requires-python", "")
|
|
684
|
-
classifiers: list[str] = []
|
|
685
|
-
for classifier in pkg_toml_config.get("project", {}).get("classifiers", []):
|
|
686
|
-
classifier = re.sub(
|
|
687
|
-
python_version_pattern, f" {self.python_version}", classifier
|
|
688
|
-
)
|
|
689
|
-
classifiers.append(classifier)
|
|
690
|
-
pkg_toml_config["project"]["classifiers"] = classifiers
|
|
691
|
-
if requires_python:
|
|
692
|
-
pkg_toml_config["project"]["requires-python"] = requires_python
|
|
693
|
-
|
|
694
|
-
def _save_pkg_toml(self, pkg_toml_config: dict[str, t.Any]) -> None:
|
|
695
|
-
if self.pkg_toml_path:
|
|
696
|
-
self.pkg_toml_path.write_text(dumps(pkg_toml_config))
|
|
697
|
-
|
|
698
|
-
def copy_configs(self) -> None:
|
|
699
|
-
configs_to_add: list[str] = []
|
|
700
|
-
for config in config_files:
|
|
701
|
-
config_path = self.our_path / config
|
|
702
|
-
pkg_config_path = self.pkg_path / config
|
|
703
|
-
pkg_config_path.touch()
|
|
704
|
-
if self.pkg_path.stem == "crackerjack":
|
|
705
|
-
config_path.write_text(pkg_config_path.read_text())
|
|
706
|
-
continue
|
|
707
|
-
if config != ".gitignore":
|
|
708
|
-
pkg_config_path.write_text(
|
|
709
|
-
config_path.read_text().replace("crackerjack", self.pkg_name)
|
|
710
|
-
)
|
|
711
|
-
configs_to_add.append(config)
|
|
712
|
-
if configs_to_add:
|
|
713
|
-
self.execute_command(["git", "add"] + configs_to_add)
|
|
714
|
-
|
|
715
|
-
def copy_documentation_templates(
|
|
716
|
-
self, force_update: bool = False, compress_docs: bool = False
|
|
717
|
-
) -> None:
|
|
718
|
-
docs_to_add: list[str] = []
|
|
719
|
-
for doc_file in documentation_files:
|
|
720
|
-
if self._should_process_doc_file(doc_file):
|
|
721
|
-
self._process_single_doc_file(
|
|
722
|
-
doc_file, force_update, compress_docs, docs_to_add
|
|
723
|
-
)
|
|
724
|
-
|
|
725
|
-
if docs_to_add:
|
|
726
|
-
self.execute_command(["git", "add"] + docs_to_add)
|
|
727
|
-
|
|
728
|
-
def _should_process_doc_file(self, doc_file: str) -> bool:
|
|
729
|
-
doc_path = self.our_path / doc_file
|
|
730
|
-
if not doc_path.exists():
|
|
731
|
-
return False
|
|
732
|
-
if self.pkg_path.stem == "crackerjack":
|
|
733
|
-
return False
|
|
734
|
-
return True
|
|
735
|
-
|
|
736
|
-
def _process_single_doc_file(
|
|
737
|
-
self,
|
|
738
|
-
doc_file: str,
|
|
739
|
-
force_update: bool,
|
|
740
|
-
compress_docs: bool,
|
|
741
|
-
docs_to_add: list[str],
|
|
742
|
-
) -> None:
|
|
743
|
-
doc_path = self.our_path / doc_file
|
|
744
|
-
pkg_doc_path = self.pkg_path / doc_file
|
|
745
|
-
should_update = force_update or not pkg_doc_path.exists()
|
|
746
|
-
|
|
747
|
-
if should_update:
|
|
748
|
-
pkg_doc_path.touch()
|
|
749
|
-
content = doc_path.read_text(encoding="utf-8")
|
|
750
|
-
|
|
751
|
-
auto_compress = self._should_compress_doc(doc_file, compress_docs)
|
|
752
|
-
updated_content = self._customize_documentation_content(
|
|
753
|
-
content, doc_file, auto_compress
|
|
754
|
-
)
|
|
755
|
-
pkg_doc_path.write_text(updated_content, encoding="utf-8")
|
|
756
|
-
docs_to_add.append(doc_file)
|
|
757
|
-
|
|
758
|
-
self._print_doc_update_message(doc_file, auto_compress)
|
|
759
|
-
|
|
760
|
-
def _should_compress_doc(self, doc_file: str, compress_docs: bool) -> bool:
|
|
761
|
-
return compress_docs or (
|
|
762
|
-
self.pkg_path.stem != "crackerjack" and doc_file == "CLAUDE.md"
|
|
763
|
-
)
|
|
764
|
-
|
|
765
|
-
def _print_doc_update_message(self, doc_file: str, auto_compress: bool) -> None:
|
|
766
|
-
compression_note = (
|
|
767
|
-
" (compressed for Claude Code)"
|
|
768
|
-
if auto_compress and doc_file == "CLAUDE.md"
|
|
769
|
-
else ""
|
|
770
|
-
)
|
|
771
|
-
self.console.print(
|
|
772
|
-
f"[green]📋[/green] Updated {doc_file} with latest Crackerjack quality standards{compression_note}"
|
|
773
|
-
)
|
|
774
|
-
|
|
775
|
-
def _customize_documentation_content(
|
|
776
|
-
self, content: str, filename: str, compress: bool = False
|
|
777
|
-
) -> str:
|
|
778
|
-
if filename == "CLAUDE.md":
|
|
779
|
-
return self._customize_claude_md(content, compress)
|
|
780
|
-
elif filename == "RULES.md":
|
|
781
|
-
return self._customize_rules_md(content)
|
|
782
|
-
return content
|
|
783
|
-
|
|
784
|
-
def _compress_claude_md(self, content: str, target_size: int = 30000) -> str:
|
|
785
|
-
content.split("\n")
|
|
786
|
-
current_size = len(content)
|
|
787
|
-
if current_size <= target_size:
|
|
788
|
-
return content
|
|
789
|
-
essential_sections = [
|
|
790
|
-
"# ",
|
|
791
|
-
"## Project Overview",
|
|
792
|
-
"## Key Commands",
|
|
793
|
-
"## Development Guidelines",
|
|
794
|
-
"## Code Quality Compliance",
|
|
795
|
-
"### Refurb Standards",
|
|
796
|
-
"### Bandit Security Standards",
|
|
797
|
-
"### Pyright Type Safety Standards",
|
|
798
|
-
"## AI Code Generation Best Practices",
|
|
799
|
-
"## Task Completion Requirements",
|
|
800
|
-
]
|
|
801
|
-
compression_strategies = [
|
|
802
|
-
self._remove_redundant_examples,
|
|
803
|
-
self._compress_command_examples,
|
|
804
|
-
self._remove_verbose_sections,
|
|
805
|
-
self._compress_repeated_patterns,
|
|
806
|
-
self._summarize_long_sections,
|
|
807
|
-
]
|
|
808
|
-
compressed_content = content
|
|
809
|
-
for strategy in compression_strategies:
|
|
810
|
-
compressed_content = strategy(compressed_content)
|
|
811
|
-
if len(compressed_content) <= target_size:
|
|
812
|
-
break
|
|
813
|
-
if len(compressed_content) > target_size:
|
|
814
|
-
compressed_content = self._extract_essential_sections(
|
|
815
|
-
compressed_content, essential_sections, target_size
|
|
816
|
-
)
|
|
817
|
-
|
|
818
|
-
return self._add_compression_notice(compressed_content)
|
|
819
|
-
|
|
820
|
-
def _remove_redundant_examples(self, content: str) -> str:
|
|
821
|
-
lines = content.split("\n")
|
|
822
|
-
result = []
|
|
823
|
-
in_example_block = False
|
|
824
|
-
example_count = 0
|
|
825
|
-
max_examples_per_section = 2
|
|
826
|
-
for line in lines:
|
|
827
|
-
if line.strip().startswith("```"):
|
|
828
|
-
if not in_example_block:
|
|
829
|
-
example_count += 1
|
|
830
|
-
if example_count <= max_examples_per_section:
|
|
831
|
-
result.append(line)
|
|
832
|
-
in_example_block = True
|
|
833
|
-
else:
|
|
834
|
-
in_example_block = "skip"
|
|
835
|
-
else:
|
|
836
|
-
if in_example_block != "skip":
|
|
837
|
-
result.append(line)
|
|
838
|
-
in_example_block = False
|
|
839
|
-
elif in_example_block == "skip":
|
|
840
|
-
continue
|
|
841
|
-
elif line.startswith(("## ", "### ")):
|
|
842
|
-
example_count = 0
|
|
843
|
-
result.append(line)
|
|
844
|
-
else:
|
|
845
|
-
result.append(line)
|
|
846
|
-
|
|
847
|
-
return "\n".join(result)
|
|
848
|
-
|
|
849
|
-
def _compress_command_examples(self, content: str) -> str:
|
|
850
|
-
import re
|
|
851
|
-
|
|
852
|
-
content = re.sub(
|
|
853
|
-
r"```bash\n((?:[^`]+\n){3,})```",
|
|
854
|
-
lambda m: "```bash\n"
|
|
855
|
-
+ "\n".join(m.group(1).split("\n")[:3])
|
|
856
|
-
+ "\n# ... (additional commands available)\n```",
|
|
857
|
-
content,
|
|
858
|
-
flags=re.MULTILINE,
|
|
859
|
-
)
|
|
860
|
-
|
|
861
|
-
return content
|
|
862
|
-
|
|
863
|
-
def _remove_verbose_sections(self, content: str) -> str:
|
|
864
|
-
sections_to_compress = [
|
|
865
|
-
"## Recent Bug Fixes and Improvements",
|
|
866
|
-
"## Development Memories",
|
|
867
|
-
"## Self-Maintenance Protocol for AI Assistants",
|
|
868
|
-
"## Pre-commit Hook Maintenance",
|
|
869
|
-
]
|
|
870
|
-
lines = content.split("\n")
|
|
871
|
-
result = []
|
|
872
|
-
skip_section = False
|
|
873
|
-
for line in lines:
|
|
874
|
-
if any(line.startswith(section) for section in sections_to_compress):
|
|
875
|
-
skip_section = True
|
|
876
|
-
result.extend(
|
|
877
|
-
(line, "*[Detailed information available in full CLAUDE.md]*")
|
|
878
|
-
)
|
|
879
|
-
result.append("")
|
|
880
|
-
elif line.startswith("## ") and skip_section:
|
|
881
|
-
skip_section = False
|
|
882
|
-
result.append(line)
|
|
883
|
-
elif not skip_section:
|
|
884
|
-
result.append(line)
|
|
885
|
-
|
|
886
|
-
return "\n".join(result)
|
|
887
|
-
|
|
888
|
-
def _compress_repeated_patterns(self, content: str) -> str:
|
|
889
|
-
import re
|
|
890
|
-
|
|
891
|
-
content = re.sub(r"\n{3,}", "\n\n", content)
|
|
892
|
-
content = re.sub(
|
|
893
|
-
r"(\*\*[A-Z][^*]+:\*\*[^\n]+\n){3,}",
|
|
894
|
-
lambda m: m.group(0)[:200]
|
|
895
|
-
+ "...\n*[Additional patterns available in full documentation]*\n",
|
|
896
|
-
content,
|
|
897
|
-
)
|
|
898
|
-
|
|
899
|
-
return content
|
|
900
|
-
|
|
901
|
-
def _summarize_long_sections(self, content: str) -> str:
|
|
902
|
-
lines = content.split("\n")
|
|
903
|
-
result = []
|
|
904
|
-
current_section = []
|
|
905
|
-
section_header = ""
|
|
906
|
-
for line in lines:
|
|
907
|
-
if line.startswith(("### ", "## ")):
|
|
908
|
-
if current_section and len("\n".join(current_section)) > 1000:
|
|
909
|
-
summary = self._create_section_summary(
|
|
910
|
-
section_header, current_section
|
|
911
|
-
)
|
|
912
|
-
result.extend(summary)
|
|
913
|
-
else:
|
|
914
|
-
result.extend(current_section)
|
|
915
|
-
current_section = [line]
|
|
916
|
-
section_header = line
|
|
917
|
-
else:
|
|
918
|
-
current_section.append(line)
|
|
919
|
-
if current_section:
|
|
920
|
-
if len("\n".join(current_section)) > 1000:
|
|
921
|
-
summary = self._create_section_summary(section_header, current_section)
|
|
922
|
-
result.extend(summary)
|
|
923
|
-
else:
|
|
924
|
-
result.extend(current_section)
|
|
925
|
-
|
|
926
|
-
return "\n".join(result)
|
|
927
|
-
|
|
928
|
-
def _create_section_summary(
|
|
929
|
-
self, header: str, section_lines: list[str]
|
|
930
|
-
) -> list[str]:
|
|
931
|
-
summary = [header, ""]
|
|
932
|
-
|
|
933
|
-
key_points = []
|
|
934
|
-
for line in section_lines[2:]:
|
|
935
|
-
if line.strip().startswith(("- ", "* ", "1. ", "2. ")):
|
|
936
|
-
key_points.append(line)
|
|
937
|
-
elif line.strip().startswith("**") and ":" in line:
|
|
938
|
-
key_points.append(line)
|
|
939
|
-
|
|
940
|
-
if len(key_points) >= 5:
|
|
941
|
-
break
|
|
942
|
-
|
|
943
|
-
if key_points:
|
|
944
|
-
summary.extend(key_points[:5])
|
|
945
|
-
summary.append("*[Complete details available in full CLAUDE.md]*")
|
|
946
|
-
else:
|
|
947
|
-
content_preview = " ".join(
|
|
948
|
-
line.strip()
|
|
949
|
-
for line in section_lines[2:10]
|
|
950
|
-
if line.strip() and not line.startswith("#")
|
|
951
|
-
)[:200]
|
|
952
|
-
summary.extend(
|
|
953
|
-
(
|
|
954
|
-
f"{content_preview}...",
|
|
955
|
-
"*[Full section available in complete documentation]*",
|
|
956
|
-
)
|
|
957
|
-
)
|
|
958
|
-
|
|
959
|
-
summary.append("")
|
|
960
|
-
return summary
|
|
961
|
-
|
|
962
|
-
def _extract_essential_sections(
|
|
963
|
-
self, content: str, essential_sections: list[str], target_size: int
|
|
964
|
-
) -> str:
|
|
965
|
-
lines = content.split("\n")
|
|
966
|
-
result = []
|
|
967
|
-
current_section = []
|
|
968
|
-
keep_section = False
|
|
969
|
-
|
|
970
|
-
for line in lines:
|
|
971
|
-
new_section_started = self._process_line_for_section(
|
|
972
|
-
line, essential_sections, current_section, keep_section, result
|
|
973
|
-
)
|
|
974
|
-
if new_section_started is not None:
|
|
975
|
-
current_section, keep_section = new_section_started
|
|
976
|
-
else:
|
|
977
|
-
current_section.append(line)
|
|
978
|
-
|
|
979
|
-
if self._should_stop_extraction(result, target_size):
|
|
980
|
-
break
|
|
981
|
-
|
|
982
|
-
self._finalize_extraction(current_section, keep_section, result, target_size)
|
|
983
|
-
return "\n".join(result)
|
|
984
|
-
|
|
985
|
-
def _process_line_for_section(
|
|
986
|
-
self,
|
|
987
|
-
line: str,
|
|
988
|
-
essential_sections: list[str],
|
|
989
|
-
current_section: list[str],
|
|
990
|
-
keep_section: bool,
|
|
991
|
-
result: list[str],
|
|
992
|
-
) -> tuple[list[str], bool] | None:
|
|
993
|
-
if any(line.startswith(section) for section in essential_sections):
|
|
994
|
-
if current_section and keep_section:
|
|
995
|
-
result.extend(current_section)
|
|
996
|
-
return ([line], True)
|
|
997
|
-
elif line.startswith(("## ", "### ")):
|
|
998
|
-
if current_section and keep_section:
|
|
999
|
-
result.extend(current_section)
|
|
1000
|
-
return ([line], False)
|
|
1001
|
-
return None
|
|
1002
|
-
|
|
1003
|
-
def _should_stop_extraction(self, result: list[str], target_size: int) -> bool:
|
|
1004
|
-
return len("\n".join(result)) > target_size
|
|
1005
|
-
|
|
1006
|
-
def _finalize_extraction(
|
|
1007
|
-
self,
|
|
1008
|
-
current_section: list[str],
|
|
1009
|
-
keep_section: bool,
|
|
1010
|
-
result: list[str],
|
|
1011
|
-
target_size: int,
|
|
1012
|
-
) -> None:
|
|
1013
|
-
if current_section and keep_section and len("\n".join(result)) < target_size:
|
|
1014
|
-
result.extend(current_section)
|
|
1015
|
-
|
|
1016
|
-
def _add_compression_notice(self, content: str) -> str:
|
|
1017
|
-
notice = """
|
|
1018
|
-
*Note: This CLAUDE.md has been automatically compressed by Crackerjack to optimize for Claude Code usage.
|
|
1019
|
-
Complete documentation is available in the source repository.*
|
|
1020
|
-
|
|
1021
|
-
"""
|
|
1022
|
-
|
|
1023
|
-
lines = content.split("\n")
|
|
1024
|
-
if len(lines) > 5:
|
|
1025
|
-
lines.insert(5, notice)
|
|
1026
|
-
|
|
1027
|
-
return "\n".join(lines)
|
|
1028
|
-
|
|
1029
|
-
def _customize_claude_md(self, content: str, compress: bool = False) -> str:
|
|
1030
|
-
project_name = self.pkg_name
|
|
1031
|
-
content = content.replace("crackerjack", project_name).replace(
|
|
1032
|
-
"Crackerjack", project_name.title()
|
|
1033
|
-
)
|
|
1034
|
-
header = f"""# {project_name.upper()}.md
|
|
1035
|
-
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
|
1036
|
-
|
|
1037
|
-
*This file was automatically generated by Crackerjack and contains the latest Python quality standards.*
|
|
1038
|
-
|
|
1039
|
-
{project_name.title()} is a Python project that follows modern development practices and maintains high code quality standards using automated tools and best practices.
|
|
1040
|
-
|
|
1041
|
-
"""
|
|
1042
|
-
|
|
1043
|
-
lines = content.split("\n")
|
|
1044
|
-
start_idx = 0
|
|
1045
|
-
for i, line in enumerate(lines):
|
|
1046
|
-
if line.startswith(("## Development Guidelines", "## Code Quality")):
|
|
1047
|
-
start_idx = i
|
|
1048
|
-
break
|
|
1049
|
-
|
|
1050
|
-
if start_idx > 0:
|
|
1051
|
-
relevant_content = "\n".join(lines[start_idx:])
|
|
1052
|
-
full_content = header + relevant_content
|
|
1053
|
-
else:
|
|
1054
|
-
full_content = header + content
|
|
1055
|
-
|
|
1056
|
-
if compress:
|
|
1057
|
-
return self._compress_claude_md(full_content)
|
|
1058
|
-
return full_content
|
|
1059
|
-
|
|
1060
|
-
def _customize_rules_md(self, content: str) -> str:
|
|
1061
|
-
project_name = self.pkg_name
|
|
1062
|
-
content = content.replace("crackerjack", project_name).replace(
|
|
1063
|
-
"Crackerjack", project_name.title()
|
|
1064
|
-
)
|
|
1065
|
-
header = f"""# {project_name.title()} Style Rules
|
|
1066
|
-
*This file was automatically generated by Crackerjack and contains the latest Python quality standards.*
|
|
1067
|
-
|
|
1068
|
-
"""
|
|
1069
|
-
|
|
1070
|
-
return header + content
|
|
1071
|
-
|
|
1072
|
-
def execute_command(
|
|
1073
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
1074
|
-
) -> subprocess.CompletedProcess[str]:
|
|
1075
|
-
if self.dry_run:
|
|
1076
|
-
self.console.print(
|
|
1077
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
1078
|
-
)
|
|
1079
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
1080
|
-
return execute(cmd, **kwargs)
|
|
1081
|
-
|
|
1082
|
-
|
|
1083
|
-
class ProjectManager(BaseModel, arbitrary_types_allowed=True):
|
|
1084
|
-
our_path: Path
|
|
1085
|
-
pkg_path: Path
|
|
1086
|
-
pkg_dir: Path | None = None
|
|
1087
|
-
pkg_name: str = "crackerjack"
|
|
1088
|
-
console: Console
|
|
1089
|
-
code_cleaner: CodeCleaner
|
|
1090
|
-
config_manager: ConfigManager
|
|
1091
|
-
dry_run: bool = False
|
|
1092
|
-
options: t.Any = None
|
|
1093
|
-
|
|
1094
|
-
def _analyze_precommit_workload(self) -> dict[str, t.Any]:
|
|
1095
|
-
try:
|
|
1096
|
-
py_files = list(self.pkg_path.rglob("*.py"))
|
|
1097
|
-
js_files = list(self.pkg_path.rglob("*.js")) + list(
|
|
1098
|
-
self.pkg_path.rglob("*.ts")
|
|
1099
|
-
)
|
|
1100
|
-
yaml_files = list(self.pkg_path.rglob("*.yaml")) + list(
|
|
1101
|
-
self.pkg_path.rglob("*.yml")
|
|
1102
|
-
)
|
|
1103
|
-
md_files = list(self.pkg_path.rglob("*.md"))
|
|
1104
|
-
total_files = (
|
|
1105
|
-
len(py_files) + len(js_files) + len(yaml_files) + len(md_files)
|
|
1106
|
-
)
|
|
1107
|
-
total_size = 0
|
|
1108
|
-
for files in (py_files, js_files, yaml_files, md_files):
|
|
1109
|
-
for file_path in files:
|
|
1110
|
-
try:
|
|
1111
|
-
total_size += file_path.stat().st_size
|
|
1112
|
-
except (OSError, PermissionError):
|
|
1113
|
-
continue
|
|
1114
|
-
if total_files > 200 or total_size > 5_000_000:
|
|
1115
|
-
complexity = "high"
|
|
1116
|
-
elif total_files > 100 or total_size > 2_000_000:
|
|
1117
|
-
complexity = "medium"
|
|
1118
|
-
else:
|
|
1119
|
-
complexity = "low"
|
|
1120
|
-
|
|
1121
|
-
return {
|
|
1122
|
-
"total_files": total_files,
|
|
1123
|
-
"py_files": len(py_files),
|
|
1124
|
-
"js_files": len(js_files),
|
|
1125
|
-
"yaml_files": len(yaml_files),
|
|
1126
|
-
"md_files": len(md_files),
|
|
1127
|
-
"total_size": total_size,
|
|
1128
|
-
"complexity": complexity,
|
|
1129
|
-
}
|
|
1130
|
-
except (OSError, PermissionError):
|
|
1131
|
-
return {"complexity": "medium", "total_files": 0}
|
|
1132
|
-
|
|
1133
|
-
def _optimize_precommit_execution(
|
|
1134
|
-
self, workload: dict[str, t.Any]
|
|
1135
|
-
) -> dict[str, t.Any]:
|
|
1136
|
-
import os
|
|
1137
|
-
|
|
1138
|
-
env_vars = {}
|
|
1139
|
-
|
|
1140
|
-
if workload["complexity"] == "high":
|
|
1141
|
-
env_vars["PRE_COMMIT_CONCURRENCY"] = str(min(os.cpu_count() or 4, 2))
|
|
1142
|
-
elif workload["complexity"] == "medium":
|
|
1143
|
-
env_vars["PRE_COMMIT_CONCURRENCY"] = str(min(os.cpu_count() or 4, 4))
|
|
1144
|
-
else:
|
|
1145
|
-
env_vars["PRE_COMMIT_CONCURRENCY"] = str(min(os.cpu_count() or 4, 6))
|
|
1146
|
-
|
|
1147
|
-
if workload["total_size"] > 10_000_000:
|
|
1148
|
-
env_vars["PRE_COMMIT_MEMORY_LIMIT"] = "2G"
|
|
1149
|
-
|
|
1150
|
-
return env_vars
|
|
1151
|
-
|
|
1152
|
-
def _cleanup_legacy_config_files(self) -> None:
|
|
1153
|
-
legacy_config_files = [
|
|
1154
|
-
".pre-commit-config.yaml",
|
|
1155
|
-
".pre-commit-config-ai.yaml",
|
|
1156
|
-
".pre-commit-config-fast.yaml",
|
|
1157
|
-
]
|
|
1158
|
-
removed_files = []
|
|
1159
|
-
for config_file in legacy_config_files:
|
|
1160
|
-
config_path = Path(config_file)
|
|
1161
|
-
if config_path.exists():
|
|
1162
|
-
try:
|
|
1163
|
-
config_path.unlink()
|
|
1164
|
-
removed_files.append(config_file)
|
|
1165
|
-
except OSError as e:
|
|
1166
|
-
self.console.print(
|
|
1167
|
-
f"[yellow]Warning: Could not remove {config_file}: {e}[/yellow]"
|
|
1168
|
-
)
|
|
1169
|
-
if removed_files:
|
|
1170
|
-
self.console.print(
|
|
1171
|
-
f"[dim]🧹 Cleaned up legacy config files: {', '.join(removed_files)}[/dim]"
|
|
1172
|
-
)
|
|
1173
|
-
|
|
1174
|
-
def update_pkg_configs(self) -> None:
|
|
1175
|
-
self._cleanup_legacy_config_files()
|
|
1176
|
-
self.config_manager.copy_configs()
|
|
1177
|
-
installed_pkgs = self.execute_command(
|
|
1178
|
-
["uv", "pip", "list", "--freeze"], capture_output=True, text=True
|
|
1179
|
-
).stdout.splitlines()
|
|
1180
|
-
if not len([pkg for pkg in installed_pkgs if "pre-commit" in pkg]):
|
|
1181
|
-
self.console.print("\n" + "─" * 80)
|
|
1182
|
-
self.console.print(
|
|
1183
|
-
"[bold bright_blue]⚡ INIT[/bold bright_blue] [bold bright_white]First-time project setup[/bold bright_white]"
|
|
1184
|
-
)
|
|
1185
|
-
self.console.print("─" * 80 + "\n")
|
|
1186
|
-
if self.options and getattr(self.options, "ai_agent", False):
|
|
1187
|
-
import subprocess
|
|
1188
|
-
|
|
1189
|
-
self.execute_command(
|
|
1190
|
-
["uv", "tool", "install", "keyring"],
|
|
1191
|
-
capture_output=True,
|
|
1192
|
-
stderr=subprocess.DEVNULL,
|
|
1193
|
-
)
|
|
1194
|
-
else:
|
|
1195
|
-
self.execute_command(["uv", "tool", "install", "keyring"])
|
|
1196
|
-
self.execute_command(["git", "init"])
|
|
1197
|
-
self.execute_command(["git", "branch", "-m", "main"])
|
|
1198
|
-
self.execute_command(["git", "add", "pyproject.toml", "uv.lock"])
|
|
1199
|
-
self.execute_command(["git", "config", "advice.addIgnoredFile", "false"])
|
|
1200
|
-
self.config_manager.update_pyproject_configs()
|
|
1201
|
-
|
|
1202
|
-
def run_pre_commit(self) -> None:
|
|
1203
|
-
self.console.print("\n" + "-" * 80)
|
|
1204
|
-
self.console.print(
|
|
1205
|
-
"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running code quality checks[/bold bright_white]"
|
|
1206
|
-
)
|
|
1207
|
-
self.console.print("-" * 80 + "\n")
|
|
1208
|
-
workload = self._analyze_precommit_workload()
|
|
1209
|
-
env_vars = self._optimize_precommit_execution(workload)
|
|
1210
|
-
total_files = workload.get("total_files", 0)
|
|
1211
|
-
if isinstance(total_files, int) and total_files > 50:
|
|
1212
|
-
self.console.print(
|
|
1213
|
-
f"[dim]Processing {total_files} files "
|
|
1214
|
-
f"({workload.get('complexity', 'unknown')} complexity) with {env_vars.get('PRE_COMMIT_CONCURRENCY', 'auto')} workers[/dim]"
|
|
1215
|
-
)
|
|
1216
|
-
config_file = self._select_precommit_config()
|
|
1217
|
-
cmd = ["uv", "run", "pre-commit", "run", "--all-files", "-c", config_file]
|
|
1218
|
-
import os
|
|
1219
|
-
|
|
1220
|
-
env = os.environ.copy()
|
|
1221
|
-
env.update(env_vars)
|
|
1222
|
-
check_all = self.execute_command(cmd, env=env)
|
|
1223
|
-
if check_all.returncode > 0:
|
|
1224
|
-
self.execute_command(["uv", "lock"])
|
|
1225
|
-
self.console.print("\n[bold green]✓ Dependencies locked[/bold green]\n")
|
|
1226
|
-
check_all = self.execute_command(cmd, env=env)
|
|
1227
|
-
if check_all.returncode > 0:
|
|
1228
|
-
self.console.print(
|
|
1229
|
-
"\n\n[bold red]❌ Pre-commit failed. Please fix errors.[/bold red]\n"
|
|
1230
|
-
)
|
|
1231
|
-
raise SystemExit(1)
|
|
1232
|
-
|
|
1233
|
-
def _select_precommit_config(self) -> str:
|
|
1234
|
-
if hasattr(self, "options"):
|
|
1235
|
-
experimental_hooks = getattr(self.options, "experimental_hooks", False)
|
|
1236
|
-
enable_pyrefly = getattr(self.options, "enable_pyrefly", False)
|
|
1237
|
-
enable_ty = getattr(self.options, "enable_ty", False)
|
|
1238
|
-
enabled_experimental = []
|
|
1239
|
-
if experimental_hooks:
|
|
1240
|
-
enabled_experimental = ["pyrefly", "ty"]
|
|
1241
|
-
else:
|
|
1242
|
-
if enable_pyrefly:
|
|
1243
|
-
enabled_experimental.append("pyrefly")
|
|
1244
|
-
if enable_ty:
|
|
1245
|
-
enabled_experimental.append("ty")
|
|
1246
|
-
if enabled_experimental:
|
|
1247
|
-
mode = "experimental"
|
|
1248
|
-
config_path = generate_config_for_mode(mode, enabled_experimental)
|
|
1249
|
-
elif getattr(self.options, "comprehensive", False):
|
|
1250
|
-
mode = "comprehensive"
|
|
1251
|
-
config_path = generate_config_for_mode(mode)
|
|
1252
|
-
else:
|
|
1253
|
-
mode = "fast"
|
|
1254
|
-
config_path = generate_config_for_mode(mode)
|
|
1255
|
-
|
|
1256
|
-
return str(config_path)
|
|
1257
|
-
config_path = generate_config_for_mode("fast")
|
|
1258
|
-
return str(config_path)
|
|
1259
|
-
|
|
1260
|
-
def run_pre_commit_with_analysis(self) -> list[HookResult]:
|
|
1261
|
-
self.console.print("\n" + "-" * 80)
|
|
1262
|
-
self.console.print(
|
|
1263
|
-
"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running code quality checks[/bold bright_white]"
|
|
1264
|
-
)
|
|
1265
|
-
self.console.print("-" * 80 + "\n")
|
|
1266
|
-
config_file = self._select_precommit_config()
|
|
1267
|
-
cmd = [
|
|
1268
|
-
"uv",
|
|
1269
|
-
"run",
|
|
1270
|
-
"pre-commit",
|
|
1271
|
-
"run",
|
|
1272
|
-
"--all-files",
|
|
1273
|
-
"-c",
|
|
1274
|
-
config_file,
|
|
1275
|
-
"--verbose",
|
|
1276
|
-
]
|
|
1277
|
-
start_time = time.time()
|
|
1278
|
-
result = self.execute_command(cmd, capture_output=True, text=True)
|
|
1279
|
-
total_duration = time.time() - start_time
|
|
1280
|
-
hook_results = self._parse_hook_output(result.stdout, result.stderr)
|
|
1281
|
-
if self.options and getattr(self.options, "ai_agent", False):
|
|
1282
|
-
self._generate_hooks_analysis(hook_results, total_duration)
|
|
1283
|
-
self._generate_quality_metrics()
|
|
1284
|
-
self._generate_project_structure_analysis()
|
|
1285
|
-
self._generate_error_context_analysis()
|
|
1286
|
-
self._generate_ai_agent_summary()
|
|
1287
|
-
if result.returncode > 0:
|
|
1288
|
-
self.execute_command(["uv", "lock"])
|
|
1289
|
-
self.console.print("\n[bold green]✓ Dependencies locked[/bold green]\n")
|
|
1290
|
-
result = self.execute_command(cmd, capture_output=True, text=True)
|
|
1291
|
-
if result.returncode > 0:
|
|
1292
|
-
self.console.print(
|
|
1293
|
-
"\n\n[bold red]❌ Pre-commit failed. Please fix errors.[/bold red]\n"
|
|
1294
|
-
)
|
|
1295
|
-
raise SystemExit(1)
|
|
1296
|
-
|
|
1297
|
-
return hook_results
|
|
1298
|
-
|
|
1299
|
-
def _parse_hook_output(self, stdout: str, stderr: str) -> list[HookResult]:
|
|
1300
|
-
hook_results: list[HookResult] = []
|
|
1301
|
-
lines = stdout.split("\n")
|
|
1302
|
-
for line in lines:
|
|
1303
|
-
if "..." in line and (
|
|
1304
|
-
"Passed" in line or "Failed" in line or "Skipped" in line
|
|
1305
|
-
):
|
|
1306
|
-
hook_name = line.split("...")[0].strip()
|
|
1307
|
-
status = (
|
|
1308
|
-
"passed"
|
|
1309
|
-
if "Passed" in line
|
|
1310
|
-
else "failed"
|
|
1311
|
-
if "Failed" in line
|
|
1312
|
-
else "skipped"
|
|
1313
|
-
)
|
|
1314
|
-
hook_results.append(
|
|
1315
|
-
HookResult(
|
|
1316
|
-
id=hook_name.lower().replace(" ", "-"),
|
|
1317
|
-
name=hook_name,
|
|
1318
|
-
status=status,
|
|
1319
|
-
duration=0.0,
|
|
1320
|
-
stage="pre-commit",
|
|
1321
|
-
)
|
|
1322
|
-
)
|
|
1323
|
-
elif "- duration:" in line and hook_results:
|
|
1324
|
-
with suppress(ValueError, IndexError):
|
|
1325
|
-
duration = float(line.split("duration:")[1].strip().rstrip("s"))
|
|
1326
|
-
hook_results[-1].duration = duration
|
|
1327
|
-
|
|
1328
|
-
return hook_results
|
|
1329
|
-
|
|
1330
|
-
def _generate_hooks_analysis(
|
|
1331
|
-
self, hook_results: list[HookResult], total_duration: float
|
|
1332
|
-
) -> None:
|
|
1333
|
-
passed = sum(1 for h in hook_results if h.status == "passed")
|
|
1334
|
-
failed = sum(1 for h in hook_results if h.status == "failed")
|
|
1335
|
-
|
|
1336
|
-
analysis = {
|
|
1337
|
-
"summary": {
|
|
1338
|
-
"total_hooks": len(hook_results),
|
|
1339
|
-
"passed": passed,
|
|
1340
|
-
"failed": failed,
|
|
1341
|
-
"total_duration": round(total_duration, 2),
|
|
1342
|
-
"status": "success" if failed == 0 else "failure",
|
|
1343
|
-
},
|
|
1344
|
-
"hooks": [
|
|
1345
|
-
{
|
|
1346
|
-
"id": hook.id,
|
|
1347
|
-
"name": hook.name,
|
|
1348
|
-
"status": hook.status,
|
|
1349
|
-
"duration": hook.duration,
|
|
1350
|
-
"files_processed": hook.files_processed,
|
|
1351
|
-
"issues_found": hook.issues_found,
|
|
1352
|
-
"stage": hook.stage,
|
|
1353
|
-
}
|
|
1354
|
-
for hook in hook_results
|
|
1355
|
-
],
|
|
1356
|
-
"performance": {
|
|
1357
|
-
"slowest_hooks": sorted(
|
|
1358
|
-
[
|
|
1359
|
-
{
|
|
1360
|
-
"hook": h.name,
|
|
1361
|
-
"duration": h.duration,
|
|
1362
|
-
"percentage": round((h.duration / total_duration) * 100, 1),
|
|
1363
|
-
}
|
|
1364
|
-
for h in hook_results
|
|
1365
|
-
if h.duration > 0
|
|
1366
|
-
],
|
|
1367
|
-
key=operator.itemgetter("duration"),
|
|
1368
|
-
reverse=True,
|
|
1369
|
-
)[:5],
|
|
1370
|
-
"optimization_suggestions": self._generate_optimization_suggestions(
|
|
1371
|
-
hook_results
|
|
1372
|
-
),
|
|
1373
|
-
},
|
|
1374
|
-
"generated_at": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
1375
|
-
}
|
|
1376
|
-
|
|
1377
|
-
with open("hooks-analysis.json", "w", encoding="utf-8") as f:
|
|
1378
|
-
json.dump(analysis, f, indent=2)
|
|
1379
|
-
|
|
1380
|
-
self.console.print(
|
|
1381
|
-
"[bold bright_black]→ Hook analysis: hooks-analysis.json[/bold bright_black]"
|
|
1382
|
-
)
|
|
1383
|
-
|
|
1384
|
-
def _generate_optimization_suggestions(
|
|
1385
|
-
self, hook_results: list[HookResult]
|
|
1386
|
-
) -> list[str]:
|
|
1387
|
-
suggestions: list[str] = []
|
|
1388
|
-
|
|
1389
|
-
for hook in hook_results:
|
|
1390
|
-
if hook.duration > 5.0:
|
|
1391
|
-
suggestions.append(
|
|
1392
|
-
f"Consider moving {hook.name} to pre-push stage (currently {hook.duration}s)"
|
|
1393
|
-
)
|
|
1394
|
-
elif hook.name == "autotyping" and hook.duration > 3.0:
|
|
1395
|
-
suggestions.append("Enable autotyping caching or reduce scope")
|
|
1396
|
-
|
|
1397
|
-
if not suggestions:
|
|
1398
|
-
suggestions.append("Hook performance is well optimized")
|
|
1399
|
-
|
|
1400
|
-
return suggestions
|
|
1401
|
-
|
|
1402
|
-
def _generate_quality_metrics(self) -> None:
|
|
1403
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
1404
|
-
return
|
|
1405
|
-
metrics = {
|
|
1406
|
-
"project_info": {
|
|
1407
|
-
"name": self.pkg_name,
|
|
1408
|
-
"python_version": "3.13+",
|
|
1409
|
-
"crackerjack_version": "0.19.8",
|
|
1410
|
-
"analysis_timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
1411
|
-
},
|
|
1412
|
-
"code_quality": self._collect_code_quality_metrics(),
|
|
1413
|
-
"security": self._collect_security_metrics(),
|
|
1414
|
-
"performance": self._collect_performance_metrics(),
|
|
1415
|
-
"maintainability": self._collect_maintainability_metrics(),
|
|
1416
|
-
"test_coverage": self._collect_coverage_metrics(),
|
|
1417
|
-
"recommendations": self._generate_quality_recommendations(),
|
|
1418
|
-
}
|
|
1419
|
-
with open("quality-metrics.json", "w", encoding="utf-8") as f:
|
|
1420
|
-
json.dump(metrics, f, indent=2)
|
|
1421
|
-
self.console.print(
|
|
1422
|
-
"[bold bright_black]→ Quality metrics: quality-metrics.json[/bold bright_black]"
|
|
1423
|
-
)
|
|
1424
|
-
|
|
1425
|
-
def _collect_code_quality_metrics(self) -> dict[str, t.Any]:
|
|
1426
|
-
return {
|
|
1427
|
-
"ruff_check": self._parse_ruff_results(),
|
|
1428
|
-
"pyright_types": self._parse_pyright_results(),
|
|
1429
|
-
"refurb_patterns": self._parse_refurb_results(),
|
|
1430
|
-
"complexity": self._parse_complexity_results(),
|
|
1431
|
-
}
|
|
1432
|
-
|
|
1433
|
-
def _collect_security_metrics(self) -> dict[str, t.Any]:
|
|
1434
|
-
return {
|
|
1435
|
-
"bandit_issues": self._parse_bandit_results(),
|
|
1436
|
-
"secrets_detected": self._parse_secrets_results(),
|
|
1437
|
-
"dependency_vulnerabilities": self._check_dependency_security(),
|
|
1438
|
-
}
|
|
1439
|
-
|
|
1440
|
-
def _collect_performance_metrics(self) -> dict[str, t.Any]:
|
|
1441
|
-
return {
|
|
1442
|
-
"import_analysis": self._analyze_imports(),
|
|
1443
|
-
"dead_code": self._parse_vulture_results(),
|
|
1444
|
-
"unused_dependencies": self._parse_creosote_results(),
|
|
1445
|
-
}
|
|
1446
|
-
|
|
1447
|
-
def _collect_maintainability_metrics(self) -> dict[str, t.Any]:
|
|
1448
|
-
return {
|
|
1449
|
-
"line_count": self._count_code_lines(),
|
|
1450
|
-
"file_count": self._count_files(),
|
|
1451
|
-
"docstring_coverage": self._calculate_docstring_coverage(),
|
|
1452
|
-
"type_annotation_coverage": self._calculate_type_coverage(),
|
|
1453
|
-
}
|
|
1454
|
-
|
|
1455
|
-
def _collect_coverage_metrics(self) -> dict[str, t.Any]:
|
|
1456
|
-
try:
|
|
1457
|
-
with open("coverage.json", encoding="utf-8") as f:
|
|
1458
|
-
coverage_data = json.load(f)
|
|
1459
|
-
return {
|
|
1460
|
-
"total_coverage": coverage_data.get("totals", {}).get(
|
|
1461
|
-
"percent_covered", 0
|
|
1462
|
-
),
|
|
1463
|
-
"missing_lines": coverage_data.get("totals", {}).get(
|
|
1464
|
-
"missing_lines", 0
|
|
1465
|
-
),
|
|
1466
|
-
"covered_lines": coverage_data.get("totals", {}).get(
|
|
1467
|
-
"covered_lines", 0
|
|
1468
|
-
),
|
|
1469
|
-
"files": len(coverage_data.get("files", {})),
|
|
1470
|
-
}
|
|
1471
|
-
except (FileNotFoundError, json.JSONDecodeError):
|
|
1472
|
-
return {"status": "coverage_not_available"}
|
|
1473
|
-
|
|
1474
|
-
def _parse_ruff_results(self) -> dict[str, t.Any]:
|
|
1475
|
-
return {"status": "clean", "violations": 0, "categories": []}
|
|
1476
|
-
|
|
1477
|
-
def _parse_pyright_results(self) -> dict[str, t.Any]:
|
|
1478
|
-
return {"errors": 0, "warnings": 0, "type_coverage": "high"}
|
|
1479
|
-
|
|
1480
|
-
def _parse_refurb_results(self) -> dict[str, t.Any]:
|
|
1481
|
-
return {"suggestions": 0, "patterns_modernized": []}
|
|
1482
|
-
|
|
1483
|
-
def _parse_complexity_list(
|
|
1484
|
-
self, complexity_data: list[dict[str, t.Any]]
|
|
1485
|
-
) -> dict[str, t.Any]:
|
|
1486
|
-
if not complexity_data:
|
|
1487
|
-
return {
|
|
1488
|
-
"average_complexity": 0,
|
|
1489
|
-
"max_complexity": 0,
|
|
1490
|
-
"total_functions": 0,
|
|
1491
|
-
}
|
|
1492
|
-
complexities = [item.get("complexity", 0) for item in complexity_data]
|
|
1493
|
-
return {
|
|
1494
|
-
"average_complexity": sum(complexities) / len(complexities)
|
|
1495
|
-
if complexities
|
|
1496
|
-
else 0,
|
|
1497
|
-
"max_complexity": max(complexities) if complexities else 0,
|
|
1498
|
-
"total_functions": len(complexities),
|
|
1499
|
-
}
|
|
1500
|
-
|
|
1501
|
-
def _parse_complexity_dict(
|
|
1502
|
-
self, complexity_data: dict[str, t.Any]
|
|
1503
|
-
) -> dict[str, t.Any]:
|
|
1504
|
-
return {
|
|
1505
|
-
"average_complexity": complexity_data.get("average", 0),
|
|
1506
|
-
"max_complexity": complexity_data.get("max", 0),
|
|
1507
|
-
"total_functions": complexity_data.get("total", 0),
|
|
1508
|
-
}
|
|
1509
|
-
|
|
1510
|
-
def _parse_complexity_results(self) -> dict[str, t.Any]:
|
|
1511
|
-
try:
|
|
1512
|
-
with open("complexipy.json", encoding="utf-8") as f:
|
|
1513
|
-
complexity_data = json.load(f)
|
|
1514
|
-
if isinstance(complexity_data, list):
|
|
1515
|
-
return self._parse_complexity_list(
|
|
1516
|
-
t.cast(list[dict[str, t.Any]], complexity_data)
|
|
1517
|
-
)
|
|
1518
|
-
return self._parse_complexity_dict(complexity_data)
|
|
1519
|
-
except (FileNotFoundError, json.JSONDecodeError):
|
|
1520
|
-
return {"status": "complexity_analysis_not_available"}
|
|
1521
|
-
|
|
1522
|
-
def _parse_bandit_results(self) -> dict[str, t.Any]:
|
|
1523
|
-
return {"high_severity": 0, "medium_severity": 0, "low_severity": 0}
|
|
1524
|
-
|
|
1525
|
-
def _parse_secrets_results(self) -> dict[str, t.Any]:
|
|
1526
|
-
return {"potential_secrets": 0, "verified_secrets": 0}
|
|
1527
|
-
|
|
1528
|
-
def _check_dependency_security(self) -> dict[str, t.Any]:
|
|
1529
|
-
return {"vulnerable_packages": [], "total_dependencies": 0}
|
|
1530
|
-
|
|
1531
|
-
def _analyze_imports(self) -> dict[str, t.Any]:
|
|
1532
|
-
return {"circular_imports": 0, "unused_imports": 0, "import_depth": "shallow"}
|
|
1533
|
-
|
|
1534
|
-
def _parse_vulture_results(self) -> dict[str, t.Any]:
|
|
1535
|
-
return {"dead_code_percentage": 0, "unused_functions": 0, "unused_variables": 0}
|
|
1536
|
-
|
|
1537
|
-
def _parse_creosote_results(self) -> dict[str, t.Any]:
|
|
1538
|
-
return {"unused_dependencies": [], "total_dependencies": 0}
|
|
1539
|
-
|
|
1540
|
-
def _count_code_lines(self) -> int:
|
|
1541
|
-
total_lines = 0
|
|
1542
|
-
for py_file in self.pkg_path.rglob("*.py"):
|
|
1543
|
-
if not str(py_file).startswith(("__pycache__", ".venv")):
|
|
1544
|
-
try:
|
|
1545
|
-
total_lines += len(py_file.read_text(encoding="utf-8").splitlines())
|
|
1546
|
-
except (UnicodeDecodeError, PermissionError):
|
|
1547
|
-
continue
|
|
1548
|
-
return total_lines
|
|
1549
|
-
|
|
1550
|
-
def _count_files(self) -> dict[str, int]:
|
|
1551
|
-
return {
|
|
1552
|
-
"python_files": len(list(self.pkg_path.rglob("*.py"))),
|
|
1553
|
-
"test_files": len(list(self.pkg_path.rglob("test_*.py"))),
|
|
1554
|
-
"config_files": len(list(self.pkg_path.glob("*.toml")))
|
|
1555
|
-
+ len(list(self.pkg_path.glob("*.yaml"))),
|
|
1556
|
-
}
|
|
1557
|
-
|
|
1558
|
-
def _calculate_docstring_coverage(self) -> float:
|
|
1559
|
-
return 85.0
|
|
1560
|
-
|
|
1561
|
-
def _calculate_type_coverage(self) -> float:
|
|
1562
|
-
return 95.0
|
|
1563
|
-
|
|
1564
|
-
def _generate_quality_recommendations(self) -> list[str]:
|
|
1565
|
-
recommendations: list[str] = []
|
|
1566
|
-
recommendations.extend(
|
|
1567
|
-
[
|
|
1568
|
-
"Consider adding more integration tests",
|
|
1569
|
-
"Review complex functions for potential refactoring",
|
|
1570
|
-
"Ensure all public APIs have comprehensive docstrings",
|
|
1571
|
-
"Monitor dependency updates for security patches",
|
|
1572
|
-
]
|
|
1573
|
-
)
|
|
1574
|
-
|
|
1575
|
-
return recommendations
|
|
1576
|
-
|
|
1577
|
-
def _generate_project_structure_analysis(self) -> None:
|
|
1578
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
1579
|
-
return
|
|
1580
|
-
structure = {
|
|
1581
|
-
"project_overview": {
|
|
1582
|
-
"name": self.pkg_name,
|
|
1583
|
-
"type": "python_package",
|
|
1584
|
-
"structure_pattern": self._analyze_project_pattern(),
|
|
1585
|
-
"analysis_timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
1586
|
-
},
|
|
1587
|
-
"directory_structure": self._analyze_directory_structure(),
|
|
1588
|
-
"file_distribution": self._analyze_file_distribution(),
|
|
1589
|
-
"dependencies": self._analyze_dependencies(),
|
|
1590
|
-
"configuration_files": self._analyze_configuration_files(),
|
|
1591
|
-
"documentation": self._analyze_documentation(),
|
|
1592
|
-
"testing_structure": self._analyze_testing_structure(),
|
|
1593
|
-
"package_structure": self._analyze_package_structure(),
|
|
1594
|
-
}
|
|
1595
|
-
with open("project-structure.json", "w", encoding="utf-8") as f:
|
|
1596
|
-
json.dump(structure, f, indent=2)
|
|
1597
|
-
self.console.print(
|
|
1598
|
-
"[bold bright_black]→ Project structure: project-structure.json[/bold bright_black]"
|
|
1599
|
-
)
|
|
1600
|
-
|
|
1601
|
-
def _generate_error_context_analysis(self) -> None:
|
|
1602
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
1603
|
-
return
|
|
1604
|
-
context = {
|
|
1605
|
-
"analysis_info": {
|
|
1606
|
-
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
1607
|
-
"crackerjack_version": "0.19.8",
|
|
1608
|
-
"python_version": "3.13+",
|
|
1609
|
-
},
|
|
1610
|
-
"environment": self._collect_environment_info(),
|
|
1611
|
-
"common_issues": self._identify_common_issues(),
|
|
1612
|
-
"troubleshooting": self._generate_troubleshooting_guide(),
|
|
1613
|
-
"performance_insights": self._collect_performance_insights(),
|
|
1614
|
-
"recommendations": self._generate_context_recommendations(),
|
|
1615
|
-
}
|
|
1616
|
-
with open("error-context.json", "w", encoding="utf-8") as f:
|
|
1617
|
-
json.dump(context, f, indent=2)
|
|
1618
|
-
self.console.print(
|
|
1619
|
-
"[bold bright_black]→ Error context: error-context.json[/bold bright_black]"
|
|
1620
|
-
)
|
|
1621
|
-
|
|
1622
|
-
def _generate_ai_agent_summary(self) -> None:
|
|
1623
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
1624
|
-
return
|
|
1625
|
-
summary = {
|
|
1626
|
-
"analysis_summary": {
|
|
1627
|
-
"timestamp": time.strftime("%Y-%m-%dT%H:%M:%SZ"),
|
|
1628
|
-
"project_name": self.pkg_name,
|
|
1629
|
-
"analysis_type": "comprehensive_quality_assessment",
|
|
1630
|
-
"crackerjack_version": "0.19.8",
|
|
1631
|
-
},
|
|
1632
|
-
"quality_status": self._summarize_quality_status(),
|
|
1633
|
-
"key_metrics": self._summarize_key_metrics(),
|
|
1634
|
-
"critical_issues": self._identify_critical_issues(),
|
|
1635
|
-
"improvement_priorities": self._prioritize_improvements(),
|
|
1636
|
-
"next_steps": self._recommend_next_steps(),
|
|
1637
|
-
"output_files": [
|
|
1638
|
-
"hooks-analysis.json",
|
|
1639
|
-
"quality-metrics.json",
|
|
1640
|
-
"project-structure.json",
|
|
1641
|
-
"error-context.json",
|
|
1642
|
-
"test-results.xml",
|
|
1643
|
-
"coverage.json",
|
|
1644
|
-
],
|
|
1645
|
-
}
|
|
1646
|
-
with open("ai-agent-summary.json", "w", encoding="utf-8") as f:
|
|
1647
|
-
json.dump(summary, f, indent=2)
|
|
1648
|
-
self.console.print(
|
|
1649
|
-
"[bold bright_black]→ AI agent summary: ai-agent-summary.json[/bold bright_black]"
|
|
1650
|
-
)
|
|
1651
|
-
|
|
1652
|
-
def _analyze_project_pattern(self) -> str:
|
|
1653
|
-
if (self.pkg_path / "pyproject.toml").exists():
|
|
1654
|
-
if (self.pkg_path / "src").exists():
|
|
1655
|
-
return "src_layout"
|
|
1656
|
-
elif (self.pkg_path / self.pkg_name).exists():
|
|
1657
|
-
return "flat_layout"
|
|
1658
|
-
return "unknown"
|
|
1659
|
-
|
|
1660
|
-
def _analyze_directory_structure(self) -> dict[str, t.Any]:
|
|
1661
|
-
directories = [
|
|
1662
|
-
{
|
|
1663
|
-
"name": item.name,
|
|
1664
|
-
"type": self._classify_directory(item),
|
|
1665
|
-
"file_count": len(list(item.rglob("*"))),
|
|
1666
|
-
}
|
|
1667
|
-
for item in self.pkg_path.iterdir()
|
|
1668
|
-
if item.is_dir()
|
|
1669
|
-
and not item.name.startswith((".git", "__pycache__", ".venv"))
|
|
1670
|
-
]
|
|
1671
|
-
return {"directories": directories, "total_directories": len(directories)}
|
|
1672
|
-
|
|
1673
|
-
def _analyze_file_distribution(self) -> dict[str, t.Any]:
|
|
1674
|
-
file_types: dict[str, int] = {}
|
|
1675
|
-
total_files = 0
|
|
1676
|
-
for file_path in self.pkg_path.rglob("*"):
|
|
1677
|
-
if file_path.is_file() and not str(file_path).startswith(
|
|
1678
|
-
(".git", "__pycache__")
|
|
1679
|
-
):
|
|
1680
|
-
ext = file_path.suffix or "no_extension"
|
|
1681
|
-
file_types[ext] = file_types.get(ext, 0) + 1
|
|
1682
|
-
total_files += 1
|
|
1683
|
-
|
|
1684
|
-
return {"file_types": file_types, "total_files": total_files}
|
|
1685
|
-
|
|
1686
|
-
def _analyze_dependencies(self) -> dict[str, t.Any]:
|
|
1687
|
-
deps = {"status": "analysis_not_implemented"}
|
|
1688
|
-
with suppress(Exception):
|
|
1689
|
-
pyproject_path = self.pkg_path / "pyproject.toml"
|
|
1690
|
-
if pyproject_path.exists():
|
|
1691
|
-
pyproject_path.read_text(encoding="utf-8")
|
|
1692
|
-
deps = {"source": "pyproject.toml", "status": "detected"}
|
|
1693
|
-
return deps
|
|
1694
|
-
|
|
1695
|
-
def _analyze_configuration_files(self) -> list[str]:
|
|
1696
|
-
config_files: list[str] = []
|
|
1697
|
-
config_patterns = ["*.toml", "*.yaml", "*.yml", "*.ini", "*.cfg", ".env*"]
|
|
1698
|
-
for pattern in config_patterns:
|
|
1699
|
-
config_files.extend([f.name for f in self.pkg_path.glob(pattern)])
|
|
1700
|
-
|
|
1701
|
-
return sorted(set(config_files))
|
|
1702
|
-
|
|
1703
|
-
def _analyze_documentation(self) -> dict[str, t.Any]:
|
|
1704
|
-
docs = {"readme": False, "docs_dir": False, "changelog": False}
|
|
1705
|
-
for file_path in self.pkg_path.iterdir():
|
|
1706
|
-
if file_path.is_file():
|
|
1707
|
-
name_lower = file_path.name.lower()
|
|
1708
|
-
if name_lower.startswith("readme"):
|
|
1709
|
-
docs["readme"] = True
|
|
1710
|
-
elif name_lower.startswith(("changelog", "history")):
|
|
1711
|
-
docs["changelog"] = True
|
|
1712
|
-
elif file_path.is_dir() and file_path.name.lower() in (
|
|
1713
|
-
"docs",
|
|
1714
|
-
"doc",
|
|
1715
|
-
"documentation",
|
|
1716
|
-
):
|
|
1717
|
-
docs["docs_dir"] = True
|
|
1718
|
-
|
|
1719
|
-
return docs
|
|
1720
|
-
|
|
1721
|
-
def _analyze_testing_structure(self) -> dict[str, t.Any]:
|
|
1722
|
-
test_files = list(self.pkg_path.rglob("test_*.py"))
|
|
1723
|
-
test_dirs = [
|
|
1724
|
-
d
|
|
1725
|
-
for d in self.pkg_path.iterdir()
|
|
1726
|
-
if d.is_dir() and "test" in d.name.lower()
|
|
1727
|
-
]
|
|
1728
|
-
|
|
1729
|
-
return {
|
|
1730
|
-
"test_files": len(test_files),
|
|
1731
|
-
"test_directories": len(test_dirs),
|
|
1732
|
-
"has_conftest": any(
|
|
1733
|
-
f.name == "conftest.py" for f in self.pkg_path.rglob("conftest.py")
|
|
1734
|
-
),
|
|
1735
|
-
"has_pytest_ini": (self.pkg_path / "pytest.ini").exists(),
|
|
1736
|
-
}
|
|
1737
|
-
|
|
1738
|
-
def _analyze_package_structure(self) -> dict[str, t.Any]:
|
|
1739
|
-
pkg_dir = self.pkg_path / self.pkg_name
|
|
1740
|
-
if not pkg_dir.exists():
|
|
1741
|
-
return {"status": "no_package_directory"}
|
|
1742
|
-
py_files = list(pkg_dir.rglob("*.py"))
|
|
1743
|
-
return {
|
|
1744
|
-
"python_files": len(py_files),
|
|
1745
|
-
"has_init": (pkg_dir / "__init__.py").exists(),
|
|
1746
|
-
"submodules": len(
|
|
1747
|
-
[
|
|
1748
|
-
f
|
|
1749
|
-
for f in pkg_dir.iterdir()
|
|
1750
|
-
if f.is_dir() and (f / "__init__.py").exists()
|
|
1751
|
-
]
|
|
1752
|
-
),
|
|
1753
|
-
}
|
|
1754
|
-
|
|
1755
|
-
def _classify_directory(self, directory: Path) -> str:
|
|
1756
|
-
name = directory.name.lower()
|
|
1757
|
-
if name in ("test", "tests"):
|
|
1758
|
-
return "testing"
|
|
1759
|
-
elif name in ("doc", "docs", "documentation"):
|
|
1760
|
-
return "documentation"
|
|
1761
|
-
elif name in ("src", "lib"):
|
|
1762
|
-
return "source"
|
|
1763
|
-
elif name.startswith("."):
|
|
1764
|
-
return "hidden"
|
|
1765
|
-
elif (directory / "__init__.py").exists():
|
|
1766
|
-
return "python_package"
|
|
1767
|
-
return "general"
|
|
1768
|
-
|
|
1769
|
-
def _collect_environment_info(self) -> dict[str, t.Any]:
|
|
1770
|
-
return {
|
|
1771
|
-
"platform": "detected_automatically",
|
|
1772
|
-
"python_version": "3.13+",
|
|
1773
|
-
"virtual_env": "detected_automatically",
|
|
1774
|
-
"git_status": "available",
|
|
1775
|
-
}
|
|
1776
|
-
|
|
1777
|
-
def _identify_common_issues(self) -> list[str]:
|
|
1778
|
-
issues: list[str] = []
|
|
1779
|
-
if not (self.pkg_path / "pyproject.toml").exists():
|
|
1780
|
-
issues.append("Missing pyproject.toml configuration")
|
|
1781
|
-
if not (self.pkg_path / ".gitignore").exists():
|
|
1782
|
-
issues.append("Missing .gitignore file")
|
|
1783
|
-
|
|
1784
|
-
return issues
|
|
1785
|
-
|
|
1786
|
-
def _generate_troubleshooting_guide(self) -> dict[str, str]:
|
|
1787
|
-
return {
|
|
1788
|
-
"dependency_issues": "Run 'uv sync' to ensure all dependencies are installed",
|
|
1789
|
-
"hook_failures": "Check hook-specific configuration in pyproject.toml",
|
|
1790
|
-
"type_errors": "Review type annotations and ensure pyright configuration is correct",
|
|
1791
|
-
"formatting_issues": "Run 'uv run ruff format' to fix formatting automatically",
|
|
1792
|
-
}
|
|
1793
|
-
|
|
1794
|
-
def _collect_performance_insights(self) -> dict[str, t.Any]:
|
|
1795
|
-
return {
|
|
1796
|
-
"hook_performance": "Available in hooks-analysis.json",
|
|
1797
|
-
"test_performance": "Available in test output",
|
|
1798
|
-
"optimization_opportunities": "Check quality-metrics.json for details",
|
|
1799
|
-
}
|
|
1800
|
-
|
|
1801
|
-
def _generate_context_recommendations(self) -> list[str]:
|
|
1802
|
-
return [
|
|
1803
|
-
"Regular pre-commit hook execution to maintain code quality",
|
|
1804
|
-
"Periodic dependency updates for security and performance",
|
|
1805
|
-
"Monitor test coverage and add tests for uncovered code",
|
|
1806
|
-
"Review and update type annotations for better code safety",
|
|
1807
|
-
]
|
|
1808
|
-
|
|
1809
|
-
def _summarize_quality_status(self) -> str:
|
|
1810
|
-
return "analysis_complete"
|
|
1811
|
-
|
|
1812
|
-
def _summarize_key_metrics(self) -> dict[str, t.Any]:
|
|
1813
|
-
return {
|
|
1814
|
-
"code_quality": "high",
|
|
1815
|
-
"test_coverage": "good",
|
|
1816
|
-
"security_status": "clean",
|
|
1817
|
-
"maintainability": "excellent",
|
|
1818
|
-
}
|
|
1819
|
-
|
|
1820
|
-
def _identify_critical_issues(self) -> list[str]:
|
|
1821
|
-
return []
|
|
1822
|
-
|
|
1823
|
-
def _prioritize_improvements(self) -> list[str]:
|
|
1824
|
-
return [
|
|
1825
|
-
"Continue maintaining high code quality standards",
|
|
1826
|
-
"Monitor performance metrics regularly",
|
|
1827
|
-
"Keep dependencies up to date",
|
|
1828
|
-
]
|
|
1829
|
-
|
|
1830
|
-
def _recommend_next_steps(self) -> list[str]:
|
|
1831
|
-
return [
|
|
1832
|
-
"Review generated analysis files for detailed insights",
|
|
1833
|
-
"Address any identified issues or recommendations",
|
|
1834
|
-
"Set up regular automated quality checks",
|
|
1835
|
-
"Consider integrating analysis into CI/CD pipeline",
|
|
1836
|
-
]
|
|
1837
|
-
|
|
1838
|
-
def execute_command(
|
|
1839
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
1840
|
-
) -> subprocess.CompletedProcess[str]:
|
|
1841
|
-
if self.dry_run:
|
|
1842
|
-
self.console.print(
|
|
1843
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
1844
|
-
)
|
|
1845
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
1846
|
-
return execute(cmd, **kwargs)
|
|
1847
|
-
|
|
1848
|
-
async def execute_command_async(
|
|
1849
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
1850
|
-
) -> subprocess.CompletedProcess[str]:
|
|
1851
|
-
if self.dry_run:
|
|
1852
|
-
self.console.print(
|
|
1853
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
1854
|
-
)
|
|
1855
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
1856
|
-
|
|
1857
|
-
proc = await asyncio.create_subprocess_exec(
|
|
1858
|
-
*cmd,
|
|
1859
|
-
stdout=asyncio.subprocess.PIPE,
|
|
1860
|
-
stderr=asyncio.subprocess.PIPE,
|
|
1861
|
-
**kwargs,
|
|
1862
|
-
)
|
|
1863
|
-
stdout, stderr = await proc.communicate()
|
|
1864
|
-
|
|
1865
|
-
return CompletedProcess(
|
|
1866
|
-
cmd,
|
|
1867
|
-
proc.returncode or 0,
|
|
1868
|
-
stdout.decode() if stdout else "",
|
|
1869
|
-
stderr.decode() if stderr else "",
|
|
1870
|
-
)
|
|
1871
|
-
|
|
1872
|
-
async def run_pre_commit_async(self) -> None:
|
|
1873
|
-
self.console.print("\n" + "-" * 80)
|
|
1874
|
-
self.console.print(
|
|
1875
|
-
"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running code quality checks[/bold bright_white]"
|
|
1876
|
-
)
|
|
1877
|
-
self.console.print("-" * 80 + "\n")
|
|
1878
|
-
workload = self._analyze_precommit_workload()
|
|
1879
|
-
env_vars = self._optimize_precommit_execution(workload)
|
|
1880
|
-
total_files = workload.get("total_files", 0)
|
|
1881
|
-
if isinstance(total_files, int) and total_files > 50:
|
|
1882
|
-
self.console.print(
|
|
1883
|
-
f"[dim]Processing {total_files} files "
|
|
1884
|
-
f"({workload.get('complexity', 'unknown')} complexity) with {env_vars.get('PRE_COMMIT_CONCURRENCY', 'auto')} workers[/dim]"
|
|
1885
|
-
)
|
|
1886
|
-
config_file = self._select_precommit_config()
|
|
1887
|
-
cmd = ["uv", "run", "pre-commit", "run", "--all-files", "-c", config_file]
|
|
1888
|
-
import os
|
|
1889
|
-
|
|
1890
|
-
env = os.environ.copy()
|
|
1891
|
-
env.update(env_vars)
|
|
1892
|
-
check_all = await self.execute_command_async(cmd, env=env)
|
|
1893
|
-
if check_all.returncode > 0:
|
|
1894
|
-
await self.execute_command_async(["uv", "lock"])
|
|
1895
|
-
self.console.print(
|
|
1896
|
-
"\n[bold bright_red]❌ Pre-commit failed. Please fix errors.[/bold bright_red]"
|
|
1897
|
-
)
|
|
1898
|
-
if check_all.stderr:
|
|
1899
|
-
self.console.print(f"[dim]Error details: {check_all.stderr}[/dim]")
|
|
1900
|
-
raise SystemExit(1)
|
|
1901
|
-
else:
|
|
1902
|
-
self.console.print(
|
|
1903
|
-
"\n[bold bright_green]🏆 Pre-commit passed all checks![/bold bright_green]"
|
|
1904
|
-
)
|
|
1905
|
-
|
|
1906
|
-
async def run_pre_commit_with_analysis_async(self) -> list[HookResult]:
|
|
1907
|
-
self.console.print("\n" + "-" * 80)
|
|
1908
|
-
self.console.print(
|
|
1909
|
-
"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running code quality checks[/bold bright_white]"
|
|
1910
|
-
)
|
|
1911
|
-
self.console.print("-" * 80 + "\n")
|
|
1912
|
-
config_file = self._select_precommit_config()
|
|
1913
|
-
cmd = [
|
|
1914
|
-
"uv",
|
|
1915
|
-
"run",
|
|
1916
|
-
"pre-commit",
|
|
1917
|
-
"run",
|
|
1918
|
-
"--all-files",
|
|
1919
|
-
"-c",
|
|
1920
|
-
config_file,
|
|
1921
|
-
"--verbose",
|
|
1922
|
-
]
|
|
1923
|
-
self.console.print(
|
|
1924
|
-
f"[dim]→ Analysis files: {', '.join(self._get_analysis_files())}[/dim]"
|
|
1925
|
-
)
|
|
1926
|
-
start_time = time.time()
|
|
1927
|
-
check_all = await self.execute_command_async(cmd)
|
|
1928
|
-
end_time = time.time()
|
|
1929
|
-
hook_results = [
|
|
1930
|
-
HookResult(
|
|
1931
|
-
id="async_pre_commit",
|
|
1932
|
-
name="Pre-commit hooks (async)",
|
|
1933
|
-
status="passed" if check_all.returncode == 0 else "failed",
|
|
1934
|
-
duration=round(end_time - start_time, 2),
|
|
1935
|
-
files_processed=0,
|
|
1936
|
-
issues_found=[],
|
|
1937
|
-
)
|
|
1938
|
-
]
|
|
1939
|
-
if check_all.returncode > 0:
|
|
1940
|
-
await self.execute_command_async(["uv", "lock"])
|
|
1941
|
-
self.console.print(
|
|
1942
|
-
"\n[bold bright_red]❌ Pre-commit failed. Please fix errors.[/bold bright_red]"
|
|
1943
|
-
)
|
|
1944
|
-
if check_all.stderr:
|
|
1945
|
-
self.console.print(f"[dim]Error details: {check_all.stderr}[/dim]")
|
|
1946
|
-
raise SystemExit(1)
|
|
1947
|
-
else:
|
|
1948
|
-
self.console.print(
|
|
1949
|
-
"\n[bold bright_green]🏆 Pre-commit passed all checks![/bold bright_green]"
|
|
1950
|
-
)
|
|
1951
|
-
self._generate_analysis_files(hook_results)
|
|
1952
|
-
|
|
1953
|
-
return hook_results
|
|
1954
|
-
|
|
1955
|
-
def _get_analysis_files(self) -> list[str]:
|
|
1956
|
-
analysis_files: list[str] = []
|
|
1957
|
-
if (
|
|
1958
|
-
hasattr(self, "options")
|
|
1959
|
-
and self.options
|
|
1960
|
-
and getattr(self.options, "ai_agent", False)
|
|
1961
|
-
):
|
|
1962
|
-
analysis_files.extend(
|
|
1963
|
-
[
|
|
1964
|
-
"test-results.xml",
|
|
1965
|
-
"coverage.json",
|
|
1966
|
-
"benchmark.json",
|
|
1967
|
-
"ai-agent-summary.json",
|
|
1968
|
-
]
|
|
1969
|
-
)
|
|
1970
|
-
return analysis_files
|
|
1971
|
-
|
|
1972
|
-
def _generate_analysis_files(self, hook_results: list[HookResult]) -> None:
|
|
1973
|
-
if not (
|
|
1974
|
-
hasattr(self, "options")
|
|
1975
|
-
and self.options
|
|
1976
|
-
and getattr(self.options, "ai_agent", False)
|
|
1977
|
-
):
|
|
1978
|
-
return
|
|
1979
|
-
try:
|
|
1980
|
-
import json
|
|
1981
|
-
|
|
1982
|
-
summary = {
|
|
1983
|
-
"status": "success"
|
|
1984
|
-
if all(hr.status == "Passed" for hr in hook_results)
|
|
1985
|
-
else "failed",
|
|
1986
|
-
"hook_results": [
|
|
1987
|
-
{
|
|
1988
|
-
"name": hr.name,
|
|
1989
|
-
"status": hr.status,
|
|
1990
|
-
"duration": hr.duration,
|
|
1991
|
-
"issues": hr.issues_found
|
|
1992
|
-
if hasattr(hr, "issues_found")
|
|
1993
|
-
else [],
|
|
1994
|
-
}
|
|
1995
|
-
for hr in hook_results
|
|
1996
|
-
],
|
|
1997
|
-
"total_duration": sum(hr.duration for hr in hook_results),
|
|
1998
|
-
"files_analyzed": len(hook_results),
|
|
1999
|
-
}
|
|
2000
|
-
with open("ai-agent-summary.json", "w") as f:
|
|
2001
|
-
json.dump(summary, f, indent=2)
|
|
2002
|
-
except Exception as e:
|
|
2003
|
-
self.console.print(
|
|
2004
|
-
f"[yellow]Warning: Failed to generate AI summary: {e}[/yellow]"
|
|
2005
|
-
)
|
|
2006
|
-
|
|
2007
|
-
def update_precommit_hooks(self) -> None:
|
|
2008
|
-
try:
|
|
2009
|
-
result = self.execute_command(
|
|
2010
|
-
["uv", "run", "pre-commit", "autoupdate"],
|
|
2011
|
-
capture_output=True,
|
|
2012
|
-
text=True,
|
|
2013
|
-
)
|
|
2014
|
-
if result.returncode == 0:
|
|
2015
|
-
self.console.print(
|
|
2016
|
-
"[green]✅ Pre-commit hooks updated successfully[/green]"
|
|
2017
|
-
)
|
|
2018
|
-
if result.stdout.strip():
|
|
2019
|
-
self.console.print(f"[dim]{result.stdout}[/dim]")
|
|
2020
|
-
else:
|
|
2021
|
-
self.console.print(
|
|
2022
|
-
f"[red]❌ Failed to update pre-commit hooks: {result.stderr}[/red]"
|
|
2023
|
-
)
|
|
2024
|
-
except Exception as e:
|
|
2025
|
-
self.console.print(f"[red]❌ Error updating pre-commit hooks: {e}[/red]")
|
|
2026
|
-
|
|
2027
|
-
|
|
2028
|
-
class Crackerjack(BaseModel, arbitrary_types_allowed=True):
|
|
2029
|
-
our_path: Path = Path(__file__).parent
|
|
2030
|
-
pkg_path: Path = Path(Path.cwd())
|
|
2031
|
-
pkg_dir: Path | None = None
|
|
2032
|
-
pkg_name: str = "crackerjack"
|
|
2033
|
-
python_version: str = default_python_version
|
|
2034
|
-
console: Console = Console(force_terminal=True)
|
|
2035
|
-
dry_run: bool = False
|
|
2036
|
-
code_cleaner: CodeCleaner | None = None
|
|
2037
|
-
config_manager: ConfigManager | None = None
|
|
2038
|
-
project_manager: ProjectManager | None = None
|
|
2039
|
-
session_tracker: SessionTracker | None = None
|
|
2040
|
-
options: t.Any = None
|
|
2041
|
-
_file_cache: dict[str, list[Path]] = {}
|
|
2042
|
-
_file_cache_with_mtime: dict[str, tuple[float, list[Path]]] = {}
|
|
2043
|
-
_state_file: Path = Path(".crackerjack-state")
|
|
2044
|
-
|
|
2045
|
-
def __init__(self, **data: t.Any) -> None:
|
|
2046
|
-
super().__init__(**data)
|
|
2047
|
-
self._file_cache = {}
|
|
2048
|
-
self._file_cache_with_mtime = {}
|
|
2049
|
-
self._state_file = Path(".crackerjack-state")
|
|
2050
|
-
self.code_cleaner = CodeCleaner(console=self.console)
|
|
2051
|
-
self.config_manager = ConfigManager(
|
|
2052
|
-
our_path=self.our_path,
|
|
2053
|
-
pkg_path=self.pkg_path,
|
|
2054
|
-
pkg_name=self.pkg_name,
|
|
2055
|
-
console=self.console,
|
|
2056
|
-
python_version=self.python_version,
|
|
2057
|
-
dry_run=self.dry_run,
|
|
2058
|
-
)
|
|
2059
|
-
self.project_manager = ProjectManager(
|
|
2060
|
-
our_path=self.our_path,
|
|
2061
|
-
pkg_path=self.pkg_path,
|
|
2062
|
-
pkg_dir=self.pkg_dir,
|
|
2063
|
-
pkg_name=self.pkg_name,
|
|
2064
|
-
console=self.console,
|
|
2065
|
-
code_cleaner=self.code_cleaner,
|
|
2066
|
-
config_manager=self.config_manager,
|
|
2067
|
-
dry_run=self.dry_run,
|
|
2068
|
-
)
|
|
2069
|
-
|
|
2070
|
-
def _read_state(self) -> dict[str, t.Any]:
|
|
2071
|
-
import json
|
|
2072
|
-
|
|
2073
|
-
if self._state_file.exists():
|
|
2074
|
-
try:
|
|
2075
|
-
return json.loads(self._state_file.read_text(encoding="utf-8"))
|
|
2076
|
-
except (json.JSONDecodeError, OSError):
|
|
2077
|
-
return {}
|
|
2078
|
-
return {}
|
|
2079
|
-
|
|
2080
|
-
def _write_state(self, state: dict[str, t.Any]) -> None:
|
|
2081
|
-
from contextlib import suppress
|
|
2082
|
-
|
|
2083
|
-
with suppress(OSError):
|
|
2084
|
-
import json
|
|
2085
|
-
|
|
2086
|
-
self._state_file.write_text(json.dumps(state, indent=2), encoding="utf-8")
|
|
2087
|
-
|
|
2088
|
-
def _get_state(self) -> dict[str, t.Any]:
|
|
2089
|
-
return self._read_state()
|
|
2090
|
-
|
|
2091
|
-
def _save_state(self, state: dict[str, t.Any]) -> None:
|
|
2092
|
-
self._write_state(state)
|
|
2093
|
-
|
|
2094
|
-
def _clear_state(self) -> None:
|
|
2095
|
-
if self._state_file.exists():
|
|
2096
|
-
from contextlib import suppress
|
|
2097
|
-
|
|
2098
|
-
with suppress(OSError):
|
|
2099
|
-
self._state_file.unlink()
|
|
2100
|
-
|
|
2101
|
-
def _has_version_been_bumped(self, version_type: str) -> bool:
|
|
2102
|
-
state = self._read_state()
|
|
2103
|
-
current_version = self._get_current_version()
|
|
2104
|
-
last_bumped_version = state.get("last_bumped_version")
|
|
2105
|
-
last_bump_type = state.get("last_bump_type")
|
|
2106
|
-
|
|
2107
|
-
return (
|
|
2108
|
-
last_bumped_version == current_version
|
|
2109
|
-
and last_bump_type == version_type
|
|
2110
|
-
and not state.get("publish_completed", False)
|
|
2111
|
-
)
|
|
2112
|
-
|
|
2113
|
-
def _mark_version_bumped(self, version_type: str) -> None:
|
|
2114
|
-
current_version = self._get_current_version()
|
|
2115
|
-
state = self._read_state()
|
|
2116
|
-
state.update(
|
|
2117
|
-
{
|
|
2118
|
-
"last_bumped_version": current_version,
|
|
2119
|
-
"last_bump_type": version_type,
|
|
2120
|
-
"publish_completed": False,
|
|
2121
|
-
}
|
|
2122
|
-
)
|
|
2123
|
-
self._write_state(state)
|
|
2124
|
-
|
|
2125
|
-
def _mark_publish_completed(self) -> None:
|
|
2126
|
-
state = self._read_state()
|
|
2127
|
-
state["publish_completed"] = True
|
|
2128
|
-
self._write_state(state)
|
|
2129
|
-
|
|
2130
|
-
def _get_current_version(self) -> str:
|
|
2131
|
-
from contextlib import suppress
|
|
2132
|
-
|
|
2133
|
-
with suppress(Exception):
|
|
2134
|
-
import tomllib
|
|
2135
|
-
|
|
2136
|
-
pyproject_path = Path("pyproject.toml")
|
|
2137
|
-
if pyproject_path.exists():
|
|
2138
|
-
with pyproject_path.open("rb") as f:
|
|
2139
|
-
data = tomllib.load(f)
|
|
2140
|
-
return data.get("project", {}).get("version", "unknown")
|
|
2141
|
-
return "unknown"
|
|
2142
|
-
|
|
2143
|
-
def _create_git_tag(self, version: str | None = None) -> None:
|
|
2144
|
-
if version is None:
|
|
2145
|
-
version = self._get_current_version()
|
|
2146
|
-
if version == "unknown":
|
|
2147
|
-
self.console.print(
|
|
2148
|
-
"[bold yellow]⚠️ Warning: Could not determine version for tagging[/bold yellow]"
|
|
2149
|
-
)
|
|
2150
|
-
return
|
|
2151
|
-
tag_name = f"v{version}"
|
|
2152
|
-
result = self.execute_command(
|
|
2153
|
-
["git", "tag", "-l", tag_name], capture_output=True, text=True
|
|
2154
|
-
)
|
|
2155
|
-
if result.stdout.strip():
|
|
2156
|
-
self.console.print(
|
|
2157
|
-
f"[bold yellow]⚠️ Tag {tag_name} already exists, skipping tag creation[/bold yellow]"
|
|
2158
|
-
)
|
|
2159
|
-
return
|
|
2160
|
-
self.console.print(
|
|
2161
|
-
f"[bold bright_cyan]🏷️ Creating git tag: {tag_name}[/bold bright_cyan]"
|
|
2162
|
-
)
|
|
2163
|
-
package_name = self.pkg_path.stem.lower().replace("-", "_")
|
|
2164
|
-
tag_message = f"Release {package_name} v{version}"
|
|
2165
|
-
self.execute_command(["git", "tag", "-a", tag_name, "-m", tag_message])
|
|
2166
|
-
self.console.print(f"[bold green]✅ Created tag: {tag_name}[/bold green]")
|
|
2167
|
-
|
|
2168
|
-
def _push_git_tags(self) -> None:
|
|
2169
|
-
self.console.print(
|
|
2170
|
-
"[bold bright_cyan]🚀 Pushing tags to remote repository[/bold bright_cyan]"
|
|
2171
|
-
)
|
|
2172
|
-
try:
|
|
2173
|
-
self.execute_command(["git", "push", "origin", "--tags"])
|
|
2174
|
-
self.console.print("[bold green]✅ Tags pushed successfully[/bold green]")
|
|
2175
|
-
except Exception as e:
|
|
2176
|
-
self.console.print(
|
|
2177
|
-
f"[bold yellow]⚠️ Warning: Failed to push tags: {e}[/bold yellow]"
|
|
2178
|
-
)
|
|
2179
|
-
|
|
2180
|
-
def _verify_version_consistency(self) -> bool:
|
|
2181
|
-
current_version = self._get_current_version()
|
|
2182
|
-
if current_version == "unknown":
|
|
2183
|
-
self.console.print(
|
|
2184
|
-
"[bold yellow]⚠️ Warning: Could not determine current version from pyproject.toml[/bold yellow]"
|
|
2185
|
-
)
|
|
2186
|
-
return False
|
|
2187
|
-
try:
|
|
2188
|
-
result = self.execute_command(
|
|
2189
|
-
["git", "describe", "--tags", "--abbrev=0"],
|
|
2190
|
-
capture_output=True,
|
|
2191
|
-
text=True,
|
|
2192
|
-
)
|
|
2193
|
-
latest_tag = result.stdout.strip()
|
|
2194
|
-
if latest_tag.startswith("v"):
|
|
2195
|
-
tag_version = latest_tag[1:]
|
|
2196
|
-
else:
|
|
2197
|
-
tag_version = latest_tag
|
|
2198
|
-
except Exception:
|
|
2199
|
-
self.console.print(
|
|
2200
|
-
"[bold bright_cyan]ℹ️ No git tags found - this appears to be the first release[/bold bright_cyan]"
|
|
2201
|
-
)
|
|
2202
|
-
return True
|
|
2203
|
-
if current_version != tag_version:
|
|
2204
|
-
self.console.print(
|
|
2205
|
-
f"[bold red]❌ Version mismatch detected:[/bold red]\n"
|
|
2206
|
-
f" pyproject.toml version: {current_version}\n"
|
|
2207
|
-
f" Latest git tag version: {tag_version}\n"
|
|
2208
|
-
f" These should match before committing or publishing."
|
|
2209
|
-
)
|
|
2210
|
-
return False
|
|
2211
|
-
self.console.print(
|
|
2212
|
-
f"[bold green]✅ Version consistency verified: {current_version}[/bold green]"
|
|
2213
|
-
)
|
|
2214
|
-
return True
|
|
2215
|
-
|
|
2216
|
-
def _setup_package(self) -> None:
|
|
2217
|
-
self.pkg_name = self.pkg_path.stem.lower().replace("-", "_")
|
|
2218
|
-
self.pkg_dir = self.pkg_path / self.pkg_name
|
|
2219
|
-
self.pkg_dir.mkdir(exist_ok=True)
|
|
2220
|
-
self.console.print("\n" + "-" * 80)
|
|
2221
|
-
self.console.print(
|
|
2222
|
-
"[bold bright_magenta]🛠️ SETUP[/bold bright_magenta] [bold bright_white]Initializing project structure[/bold bright_white]"
|
|
2223
|
-
)
|
|
2224
|
-
self.console.print("-" * 80 + "\n")
|
|
2225
|
-
assert self.config_manager is not None
|
|
2226
|
-
assert self.project_manager is not None
|
|
2227
|
-
self.config_manager.pkg_name = self.pkg_name
|
|
2228
|
-
self.project_manager.pkg_name = self.pkg_name
|
|
2229
|
-
self.project_manager.pkg_dir = self.pkg_dir
|
|
2230
|
-
|
|
2231
|
-
def _update_project(self, options: t.Any) -> None:
|
|
2232
|
-
assert self.project_manager is not None
|
|
2233
|
-
if not options.no_config_updates:
|
|
2234
|
-
self.project_manager.update_pkg_configs()
|
|
2235
|
-
self._run_automatic_updates()
|
|
2236
|
-
if self.pkg_path.stem != "crackerjack":
|
|
2237
|
-
self._check_and_update_crackerjack()
|
|
2238
|
-
result: CompletedProcess[str] = self.execute_command(
|
|
2239
|
-
["uv", "sync"], capture_output=True, text=True
|
|
2240
|
-
)
|
|
2241
|
-
if result.returncode == 0:
|
|
2242
|
-
self.console.print(
|
|
2243
|
-
"[bold green]✓ Dependencies installed[/bold green]\n"
|
|
2244
|
-
)
|
|
2245
|
-
else:
|
|
2246
|
-
self.console.print(
|
|
2247
|
-
"\n\n[bold red]❌ UV sync failed. Is UV installed? Run `pipx install uv` and try again.[/bold red]\n\n"
|
|
2248
|
-
)
|
|
2249
|
-
|
|
2250
|
-
def _run_automatic_updates(self) -> None:
|
|
2251
|
-
self.console.print("[dim]🔄 Checking for updates...[/dim]")
|
|
2252
|
-
self._upgrade_dependencies()
|
|
2253
|
-
self._update_hooks_if_needed()
|
|
2254
|
-
|
|
2255
|
-
def _upgrade_dependencies(self) -> None:
|
|
2256
|
-
try:
|
|
2257
|
-
result = self.execute_command(
|
|
2258
|
-
["uv", "sync", "--upgrade"], capture_output=True, text=True
|
|
2259
|
-
)
|
|
2260
|
-
if result.returncode == 0:
|
|
2261
|
-
self._handle_upgrade_success(result)
|
|
2262
|
-
else:
|
|
2263
|
-
self.console.print(
|
|
2264
|
-
f"[yellow]⚠️ Dependency upgrade failed: {result.stderr}[/yellow]"
|
|
2265
|
-
)
|
|
2266
|
-
except Exception as e:
|
|
2267
|
-
self.console.print(f"[yellow]⚠️ Error upgrading dependencies: {e}[/yellow]")
|
|
2268
|
-
|
|
2269
|
-
def _handle_upgrade_success(
|
|
2270
|
-
self, result: "subprocess.CompletedProcess[str]"
|
|
2271
|
-
) -> None:
|
|
2272
|
-
if "no changes" not in result.stdout.lower():
|
|
2273
|
-
self.console.print("[green]✅ Dependencies upgraded[/green]")
|
|
2274
|
-
self._show_upgrade_summary(result.stdout)
|
|
2275
|
-
else:
|
|
2276
|
-
self.console.print("[dim]✓ Dependencies already up to date[/dim]")
|
|
2277
|
-
|
|
2278
|
-
def _show_upgrade_summary(self, stdout: str) -> None:
|
|
2279
|
-
if stdout.strip():
|
|
2280
|
-
upgrade_lines = [line for line in stdout.split("\n") if "->" in line]
|
|
2281
|
-
if upgrade_lines:
|
|
2282
|
-
self.console.print(f"[dim]{len(upgrade_lines)} packages upgraded[/dim]")
|
|
2283
|
-
|
|
2284
|
-
def _update_hooks_if_needed(self) -> None:
|
|
2285
|
-
import time
|
|
2286
|
-
from pathlib import Path
|
|
2287
|
-
|
|
2288
|
-
marker_file = Path(".crackerjack-hooks-updated")
|
|
2289
|
-
current_time = time.time()
|
|
2290
|
-
week_seconds = 7 * 24 * 60 * 60
|
|
2291
|
-
should_update = True
|
|
2292
|
-
if marker_file.exists():
|
|
2293
|
-
try:
|
|
2294
|
-
last_update = float(marker_file.read_text().strip())
|
|
2295
|
-
if current_time - last_update < week_seconds:
|
|
2296
|
-
should_update = False
|
|
2297
|
-
except (ValueError, OSError):
|
|
2298
|
-
should_update = True
|
|
2299
|
-
if should_update:
|
|
2300
|
-
self._update_precommit_hooks()
|
|
2301
|
-
from contextlib import suppress
|
|
2302
|
-
|
|
2303
|
-
with suppress(OSError):
|
|
2304
|
-
marker_file.write_text(str(current_time))
|
|
2305
|
-
else:
|
|
2306
|
-
self.console.print("[dim]✓ Pre-commit hooks recently updated[/dim]")
|
|
2307
|
-
|
|
2308
|
-
def _update_precommit_hooks(self) -> None:
|
|
2309
|
-
try:
|
|
2310
|
-
result = self.execute_command(
|
|
2311
|
-
["uv", "run", "pre-commit", "autoupdate"],
|
|
2312
|
-
capture_output=True,
|
|
2313
|
-
text=True,
|
|
2314
|
-
)
|
|
2315
|
-
if result.returncode == 0:
|
|
2316
|
-
if "updated" in result.stdout.lower():
|
|
2317
|
-
self.console.print("[green]✅ Pre-commit hooks updated[/green]")
|
|
2318
|
-
update_lines = [
|
|
2319
|
-
line for line in result.stdout.split("\n") if "->" in line
|
|
2320
|
-
]
|
|
2321
|
-
if update_lines:
|
|
2322
|
-
self.console.print(
|
|
2323
|
-
f"[dim]{len(update_lines)} hooks updated[/dim]"
|
|
2324
|
-
)
|
|
2325
|
-
else:
|
|
2326
|
-
self.console.print(
|
|
2327
|
-
"[dim]✓ Pre-commit hooks already up to date[/dim]"
|
|
2328
|
-
)
|
|
2329
|
-
else:
|
|
2330
|
-
self.console.print(
|
|
2331
|
-
f"[yellow]⚠️ Pre-commit update failed: {result.stderr}[/yellow]"
|
|
2332
|
-
)
|
|
2333
|
-
except Exception as e:
|
|
2334
|
-
self.console.print(
|
|
2335
|
-
f"[yellow]⚠️ Error updating pre-commit hooks: {e}[/yellow]"
|
|
2336
|
-
)
|
|
2337
|
-
|
|
2338
|
-
def _check_and_update_crackerjack(self) -> None:
|
|
2339
|
-
try:
|
|
2340
|
-
import tomllib
|
|
2341
|
-
from pathlib import Path
|
|
2342
|
-
|
|
2343
|
-
pyproject_path = Path("pyproject.toml")
|
|
2344
|
-
if not pyproject_path.exists():
|
|
2345
|
-
return
|
|
2346
|
-
with pyproject_path.open("rb") as f:
|
|
2347
|
-
config = tomllib.load(f)
|
|
2348
|
-
dependencies = config.get("project", {}).get("dependencies", [])
|
|
2349
|
-
dev_dependencies = config.get("dependency-groups", {}).get("dev", [])
|
|
2350
|
-
has_crackerjack = any(
|
|
2351
|
-
dep.startswith("crackerjack") for dep in dependencies + dev_dependencies
|
|
2352
|
-
)
|
|
2353
|
-
if has_crackerjack:
|
|
2354
|
-
result = self.execute_command(
|
|
2355
|
-
["uv", "sync", "--upgrade", "--upgrade-package", "crackerjack"],
|
|
2356
|
-
capture_output=True,
|
|
2357
|
-
text=True,
|
|
2358
|
-
)
|
|
2359
|
-
if result.returncode == 0:
|
|
2360
|
-
if "crackerjack" in result.stdout:
|
|
2361
|
-
self.console.print(
|
|
2362
|
-
"[green]✅ Crackerjack upgraded to latest version[/green]"
|
|
2363
|
-
)
|
|
2364
|
-
else:
|
|
2365
|
-
self.console.print(
|
|
2366
|
-
"[dim]✓ Crackerjack already up to date[/dim]"
|
|
2367
|
-
)
|
|
2368
|
-
else:
|
|
2369
|
-
self.console.print(
|
|
2370
|
-
f"[yellow]⚠️ Crackerjack update check failed: {result.stderr}[/yellow]"
|
|
2371
|
-
)
|
|
2372
|
-
except Exception as e:
|
|
2373
|
-
self.console.print(
|
|
2374
|
-
f"[yellow]⚠️ Error checking crackerjack updates: {e}[/yellow]"
|
|
2375
|
-
)
|
|
2376
|
-
|
|
2377
|
-
def _clean_project(self, options: t.Any) -> None:
|
|
2378
|
-
assert self.code_cleaner is not None
|
|
2379
|
-
if options.clean:
|
|
2380
|
-
if self.pkg_dir:
|
|
2381
|
-
self.console.print("\n" + "-" * 80)
|
|
2382
|
-
self.console.print(
|
|
2383
|
-
"[bold bright_blue]🧹 CLEAN[/bold bright_blue] [bold bright_white]Removing docstrings and comments[/bold bright_white]"
|
|
2384
|
-
)
|
|
2385
|
-
self.console.print("-" * 80 + "\n")
|
|
2386
|
-
self.code_cleaner.clean_files(self.pkg_dir)
|
|
2387
|
-
if self.pkg_path.stem == "crackerjack":
|
|
2388
|
-
tests_dir = self.pkg_path / "tests"
|
|
2389
|
-
if tests_dir.exists() and tests_dir.is_dir():
|
|
2390
|
-
self.console.print("\n" + "─" * 80)
|
|
2391
|
-
self.console.print(
|
|
2392
|
-
"[bold bright_blue]🧪 TESTS[/bold bright_blue] [bold bright_white]Cleaning test files[/bold bright_white]"
|
|
2393
|
-
)
|
|
2394
|
-
self.console.print("─" * 80 + "\n")
|
|
2395
|
-
self.code_cleaner.clean_files(tests_dir)
|
|
2396
|
-
|
|
2397
|
-
async def _clean_project_async(self, options: t.Any) -> None:
|
|
2398
|
-
assert self.code_cleaner is not None
|
|
2399
|
-
if options.clean:
|
|
2400
|
-
if self.pkg_dir:
|
|
2401
|
-
self.console.print("\n" + "-" * 80)
|
|
2402
|
-
self.console.print(
|
|
2403
|
-
"[bold bright_blue]🧹 CLEAN[/bold bright_blue] [bold bright_white]Removing docstrings and comments[/bold bright_white]"
|
|
2404
|
-
)
|
|
2405
|
-
self.console.print("-" * 80 + "\n")
|
|
2406
|
-
await self.code_cleaner.clean_files_async(self.pkg_dir)
|
|
2407
|
-
if self.pkg_path.stem == "crackerjack":
|
|
2408
|
-
tests_dir = self.pkg_path / "tests"
|
|
2409
|
-
if tests_dir.exists() and tests_dir.is_dir():
|
|
2410
|
-
self.console.print("\n" + "─" * 80)
|
|
2411
|
-
self.console.print(
|
|
2412
|
-
"[bold bright_blue]🧪 TESTS[/bold bright_blue] [bold bright_white]Cleaning test files[/bold bright_white]"
|
|
2413
|
-
)
|
|
2414
|
-
self.console.print("─" * 80 + "\n")
|
|
2415
|
-
await self.code_cleaner.clean_files_async(tests_dir)
|
|
2416
|
-
|
|
2417
|
-
def _get_test_timeout(self, options: OptionsProtocol, project_size: str) -> int:
|
|
2418
|
-
if options.test_timeout > 0:
|
|
2419
|
-
return options.test_timeout
|
|
2420
|
-
return (
|
|
2421
|
-
360 if project_size == "large" else 240 if project_size == "medium" else 120
|
|
2422
|
-
)
|
|
2423
|
-
|
|
2424
|
-
def _add_ai_agent_flags(
|
|
2425
|
-
self, test: list[str], options: OptionsProtocol, test_timeout: int
|
|
2426
|
-
) -> None:
|
|
2427
|
-
test.extend(
|
|
2428
|
-
[
|
|
2429
|
-
"--junitxml=test-results.xml",
|
|
2430
|
-
"--cov-report=json:coverage.json",
|
|
2431
|
-
"--tb=short",
|
|
2432
|
-
"--no-header",
|
|
2433
|
-
"--quiet",
|
|
2434
|
-
f"--timeout={test_timeout}",
|
|
2435
|
-
]
|
|
2436
|
-
)
|
|
2437
|
-
if options.benchmark or options.benchmark_regression:
|
|
2438
|
-
test.append("--benchmark-json=benchmark.json")
|
|
2439
|
-
|
|
2440
|
-
def _add_standard_flags(self, test: list[str], test_timeout: int) -> None:
|
|
2441
|
-
test.extend(
|
|
2442
|
-
[
|
|
2443
|
-
"--capture=fd",
|
|
2444
|
-
"--tb=short",
|
|
2445
|
-
"--no-header",
|
|
2446
|
-
"--disable-warnings",
|
|
2447
|
-
"--durations=0",
|
|
2448
|
-
f"--timeout={test_timeout}",
|
|
2449
|
-
]
|
|
2450
|
-
)
|
|
2451
|
-
|
|
2452
|
-
def _add_benchmark_flags(self, test: list[str], options: OptionsProtocol) -> None:
|
|
2453
|
-
if options.benchmark:
|
|
2454
|
-
test.extend(["--benchmark", "--benchmark-autosave"])
|
|
2455
|
-
if options.benchmark_regression:
|
|
2456
|
-
test.extend(
|
|
2457
|
-
[
|
|
2458
|
-
"--benchmark-regression",
|
|
2459
|
-
f"--benchmark-regression-threshold={options.benchmark_regression_threshold}",
|
|
2460
|
-
]
|
|
2461
|
-
)
|
|
2462
|
-
|
|
2463
|
-
def _add_worker_flags(
|
|
2464
|
-
self, test: list[str], options: OptionsProtocol, project_size: str
|
|
2465
|
-
) -> None:
|
|
2466
|
-
if options.test_workers > 0:
|
|
2467
|
-
if options.test_workers == 1:
|
|
2468
|
-
test.append("-vs")
|
|
2469
|
-
else:
|
|
2470
|
-
test.extend(["-xvs", "-n", str(options.test_workers)])
|
|
2471
|
-
else:
|
|
2472
|
-
workload = self._analyze_test_workload()
|
|
2473
|
-
optimal_workers = self._calculate_optimal_test_workers(workload)
|
|
2474
|
-
|
|
2475
|
-
if workload.get("test_files", 0) < 5:
|
|
2476
|
-
test.append("-xvs")
|
|
2477
|
-
else:
|
|
2478
|
-
test_files = workload.get("test_files", 0)
|
|
2479
|
-
if isinstance(test_files, int) and test_files > 20:
|
|
2480
|
-
self.console.print(
|
|
2481
|
-
f"[dim]Running {test_files} tests "
|
|
2482
|
-
f"({workload.get('complexity', 'unknown')} complexity) with {optimal_workers} workers[/dim]"
|
|
2483
|
-
)
|
|
2484
|
-
|
|
2485
|
-
if optimal_workers == 1:
|
|
2486
|
-
test.append("-vs")
|
|
2487
|
-
else:
|
|
2488
|
-
test.extend(["-xvs", "-n", str(optimal_workers)])
|
|
2489
|
-
|
|
2490
|
-
def _prepare_pytest_command(self, options: OptionsProtocol) -> list[str]:
|
|
2491
|
-
test = ["uv", "run", "pytest"]
|
|
2492
|
-
project_size = self._detect_project_size()
|
|
2493
|
-
test_timeout = self._get_test_timeout(options, project_size)
|
|
2494
|
-
if getattr(options, "ai_agent", False):
|
|
2495
|
-
self._add_ai_agent_flags(test, options, test_timeout)
|
|
2496
|
-
else:
|
|
2497
|
-
self._add_standard_flags(test, test_timeout)
|
|
2498
|
-
if options.benchmark or options.benchmark_regression:
|
|
2499
|
-
self._add_benchmark_flags(test, options)
|
|
2500
|
-
else:
|
|
2501
|
-
self._add_worker_flags(test, options, project_size)
|
|
2502
|
-
return test
|
|
2503
|
-
|
|
2504
|
-
def _get_cached_files(self, pattern: str) -> list[Path]:
|
|
2505
|
-
cache_key = f"{self.pkg_path}:{pattern}"
|
|
2506
|
-
if cache_key not in self._file_cache:
|
|
2507
|
-
try:
|
|
2508
|
-
self._file_cache[cache_key] = list(self.pkg_path.rglob(pattern))
|
|
2509
|
-
except (OSError, PermissionError):
|
|
2510
|
-
self._file_cache[cache_key] = []
|
|
2511
|
-
return self._file_cache[cache_key]
|
|
2512
|
-
|
|
2513
|
-
def _get_cached_files_with_mtime(self, pattern: str) -> list[Path]:
|
|
2514
|
-
cache_key = f"{self.pkg_path}:{pattern}"
|
|
2515
|
-
current_mtime = self._get_directory_mtime(self.pkg_path)
|
|
2516
|
-
if cache_key in self._file_cache_with_mtime:
|
|
2517
|
-
cached_mtime, cached_files = self._file_cache_with_mtime[cache_key]
|
|
2518
|
-
if cached_mtime >= current_mtime:
|
|
2519
|
-
return cached_files
|
|
2520
|
-
try:
|
|
2521
|
-
files = list(self.pkg_path.rglob(pattern))
|
|
2522
|
-
self._file_cache_with_mtime[cache_key] = (current_mtime, files)
|
|
2523
|
-
return files
|
|
2524
|
-
except (OSError, PermissionError):
|
|
2525
|
-
self._file_cache_with_mtime[cache_key] = (current_mtime, [])
|
|
2526
|
-
return []
|
|
2527
|
-
|
|
2528
|
-
def _get_directory_mtime(self, path: Path) -> float:
|
|
2529
|
-
try:
|
|
2530
|
-
max_mtime = path.stat().st_mtime
|
|
2531
|
-
for item in path.iterdir():
|
|
2532
|
-
if item.is_dir() and not item.name.startswith("."):
|
|
2533
|
-
try:
|
|
2534
|
-
dir_mtime = item.stat().st_mtime
|
|
2535
|
-
max_mtime = max(max_mtime, dir_mtime)
|
|
2536
|
-
except (OSError, PermissionError):
|
|
2537
|
-
continue
|
|
2538
|
-
elif item.is_file() and item.suffix == ".py":
|
|
2539
|
-
try:
|
|
2540
|
-
file_mtime = item.stat().st_mtime
|
|
2541
|
-
max_mtime = max(max_mtime, file_mtime)
|
|
2542
|
-
except (OSError, PermissionError):
|
|
2543
|
-
continue
|
|
2544
|
-
|
|
2545
|
-
return max_mtime
|
|
2546
|
-
except (OSError, PermissionError):
|
|
2547
|
-
return 0.0
|
|
2548
|
-
|
|
2549
|
-
def _detect_project_size(self) -> str:
|
|
2550
|
-
if self.pkg_name in ("acb", "fastblocks"):
|
|
2551
|
-
return "large"
|
|
2552
|
-
try:
|
|
2553
|
-
py_files = self._get_cached_files_with_mtime("*.py")
|
|
2554
|
-
test_files = self._get_cached_files_with_mtime("test_*.py")
|
|
2555
|
-
total_files = len(py_files)
|
|
2556
|
-
num_test_files = len(test_files)
|
|
2557
|
-
if total_files > 100 or num_test_files > 50:
|
|
2558
|
-
return "large"
|
|
2559
|
-
elif total_files > 50 or num_test_files > 20:
|
|
2560
|
-
return "medium"
|
|
2561
|
-
else:
|
|
2562
|
-
return "small"
|
|
2563
|
-
except (OSError, PermissionError):
|
|
2564
|
-
return "medium"
|
|
2565
|
-
|
|
2566
|
-
def _calculate_test_metrics(self, test_files: list[Path]) -> tuple[int, int]:
|
|
2567
|
-
total_test_size = 0
|
|
2568
|
-
slow_tests = 0
|
|
2569
|
-
for test_file in test_files:
|
|
2570
|
-
try:
|
|
2571
|
-
size = test_file.stat().st_size
|
|
2572
|
-
total_test_size += size
|
|
2573
|
-
if size > 30_000 or "integration" in test_file.name.lower():
|
|
2574
|
-
slow_tests += 1
|
|
2575
|
-
except (OSError, PermissionError):
|
|
2576
|
-
continue
|
|
2577
|
-
return total_test_size, slow_tests
|
|
2578
|
-
|
|
2579
|
-
def _determine_test_complexity(
|
|
2580
|
-
self, test_count: int, avg_size: float, slow_ratio: float
|
|
2581
|
-
) -> str:
|
|
2582
|
-
if test_count > 100 or avg_size > 25_000 or slow_ratio > 0.4:
|
|
2583
|
-
return "high"
|
|
2584
|
-
elif test_count > 50 or avg_size > 15_000 or slow_ratio > 0.2:
|
|
2585
|
-
return "medium"
|
|
2586
|
-
return "low"
|
|
2587
|
-
|
|
2588
|
-
def _analyze_test_workload(self) -> dict[str, t.Any]:
|
|
2589
|
-
try:
|
|
2590
|
-
test_files = self._get_cached_files_with_mtime("test_*.py")
|
|
2591
|
-
py_files = self._get_cached_files_with_mtime("*.py")
|
|
2592
|
-
total_test_size, slow_tests = self._calculate_test_metrics(test_files)
|
|
2593
|
-
avg_test_size = total_test_size / len(test_files) if test_files else 0
|
|
2594
|
-
slow_test_ratio = slow_tests / len(test_files) if test_files else 0
|
|
2595
|
-
complexity = self._determine_test_complexity(
|
|
2596
|
-
len(test_files), avg_test_size, slow_test_ratio
|
|
2597
|
-
)
|
|
2598
|
-
return {
|
|
2599
|
-
"total_files": len(py_files),
|
|
2600
|
-
"test_files": len(test_files),
|
|
2601
|
-
"total_test_size": total_test_size,
|
|
2602
|
-
"avg_test_size": avg_test_size,
|
|
2603
|
-
"slow_tests": slow_tests,
|
|
2604
|
-
"slow_test_ratio": slow_test_ratio,
|
|
2605
|
-
"complexity": complexity,
|
|
2606
|
-
}
|
|
2607
|
-
except (OSError, PermissionError):
|
|
2608
|
-
return {"complexity": "medium", "total_files": 0, "test_files": 0}
|
|
2609
|
-
|
|
2610
|
-
def _calculate_optimal_test_workers(self, workload: dict[str, t.Any]) -> int:
|
|
2611
|
-
import os
|
|
2612
|
-
|
|
2613
|
-
cpu_count = os.cpu_count() or 4
|
|
2614
|
-
if workload["complexity"] == "high":
|
|
2615
|
-
return min(cpu_count // 3, 2)
|
|
2616
|
-
elif workload["complexity"] == "medium":
|
|
2617
|
-
return min(cpu_count // 2, 4)
|
|
2618
|
-
return min(cpu_count, 8)
|
|
2619
|
-
|
|
2620
|
-
def _print_ai_agent_files(self, options: t.Any) -> None:
|
|
2621
|
-
if getattr(options, "ai_agent", False):
|
|
2622
|
-
self.console.print(
|
|
2623
|
-
"[bold bright_black]→ Structured test results: test-results.xml[/bold bright_black]"
|
|
2624
|
-
)
|
|
2625
|
-
self.console.print(
|
|
2626
|
-
"[bold bright_black]→ Coverage report: coverage.json[/bold bright_black]"
|
|
2627
|
-
)
|
|
2628
|
-
if options.benchmark or options.benchmark_regression:
|
|
2629
|
-
self.console.print(
|
|
2630
|
-
"[bold bright_black]→ Benchmark results: benchmark.json[/bold bright_black]"
|
|
2631
|
-
)
|
|
2632
|
-
|
|
2633
|
-
def _handle_test_failure(self, result: t.Any, options: t.Any) -> None:
|
|
2634
|
-
if result.stderr:
|
|
2635
|
-
self.console.print(result.stderr)
|
|
2636
|
-
self.console.print(
|
|
2637
|
-
"\n\n[bold bright_red]❌ Tests failed. Please fix errors.[/bold bright_red]\n"
|
|
2638
|
-
)
|
|
2639
|
-
self._print_ai_agent_files(options)
|
|
2640
|
-
raise SystemExit(1)
|
|
2641
|
-
|
|
2642
|
-
def _handle_test_success(self, options: t.Any) -> None:
|
|
2643
|
-
self.console.print(
|
|
2644
|
-
"\n\n[bold bright_green]🏆 Tests passed successfully![/bold bright_green]\n"
|
|
2645
|
-
)
|
|
2646
|
-
self._print_ai_agent_files(options)
|
|
2647
|
-
|
|
2648
|
-
def _run_tests(self, options: t.Any) -> None:
|
|
2649
|
-
if not options.test:
|
|
2650
|
-
return
|
|
2651
|
-
self.console.print("\n" + "-" * 80)
|
|
2652
|
-
self.console.print(
|
|
2653
|
-
"[bold bright_green]🧪 TESTING[/bold bright_green] [bold bright_white]Executing test suite[/bold bright_white]"
|
|
2654
|
-
)
|
|
2655
|
-
self.console.print("-" * 80 + "\n")
|
|
2656
|
-
test_command = self._prepare_pytest_command(options)
|
|
2657
|
-
result = self.execute_command(test_command, capture_output=True, text=True)
|
|
2658
|
-
if result.stdout:
|
|
2659
|
-
self.console.print(result.stdout)
|
|
2660
|
-
if result.returncode > 0:
|
|
2661
|
-
self._handle_test_failure(result, options)
|
|
2662
|
-
else:
|
|
2663
|
-
self._handle_test_success(options)
|
|
2664
|
-
|
|
2665
|
-
async def _run_tests_async(self, options: t.Any) -> None:
|
|
2666
|
-
if not options.test:
|
|
2667
|
-
return
|
|
2668
|
-
self.console.print("\n" + "-" * 80)
|
|
2669
|
-
self.console.print(
|
|
2670
|
-
"[bold bright_green]🧪 TESTING[/bold bright_green] [bold bright_white]Executing test suite (async optimized)[/bold bright_white]"
|
|
2671
|
-
)
|
|
2672
|
-
self.console.print("-" * 80 + "\n")
|
|
2673
|
-
test_command = self._prepare_pytest_command(options)
|
|
2674
|
-
result = await self.execute_command_async(test_command)
|
|
2675
|
-
if result.stdout:
|
|
2676
|
-
self.console.print(result.stdout)
|
|
2677
|
-
if result.returncode > 0:
|
|
2678
|
-
self._handle_test_failure(result, options)
|
|
2679
|
-
else:
|
|
2680
|
-
self._handle_test_success(options)
|
|
2681
|
-
|
|
2682
|
-
def _prompt_version_selection(self) -> str:
|
|
2683
|
-
from rich.prompt import Prompt
|
|
2684
|
-
|
|
2685
|
-
if self.options and getattr(self.options, "ai_agent", False):
|
|
2686
|
-
self.console.print(
|
|
2687
|
-
"[dim]AI agent mode: defaulting to patch version bump[/dim]"
|
|
2688
|
-
)
|
|
2689
|
-
return "patch"
|
|
2690
|
-
self.console.print(
|
|
2691
|
-
"\n[bold bright_yellow]📦 VERSION SELECTION[/bold bright_yellow]"
|
|
2692
|
-
)
|
|
2693
|
-
self.console.print("[dim]Select the type of version bump to perform:[/dim]\n")
|
|
2694
|
-
choices = {
|
|
2695
|
-
"1": ("patch", "Bug fixes and minor changes (0.1.0 → 0.1.1)"),
|
|
2696
|
-
"2": ("minor", "New features, backwards compatible (0.1.0 → 0.2.0)"),
|
|
2697
|
-
"3": ("major", "Breaking changes, major updates (0.1.0 → 1.0.0)"),
|
|
2698
|
-
}
|
|
2699
|
-
for key, (bump_type, description) in choices.items():
|
|
2700
|
-
self.console.print(
|
|
2701
|
-
f" [bold bright_cyan]{key}[/bold bright_cyan] {bump_type:<6} - {description}"
|
|
2702
|
-
)
|
|
2703
|
-
while True:
|
|
2704
|
-
choice = Prompt.ask(
|
|
2705
|
-
"\n[bold]Select version bump type",
|
|
2706
|
-
choices=list(choices.keys()),
|
|
2707
|
-
default="1",
|
|
2708
|
-
show_choices=False,
|
|
2709
|
-
)
|
|
2710
|
-
if choice in choices:
|
|
2711
|
-
selected_type = choices[choice][0]
|
|
2712
|
-
self.console.print(
|
|
2713
|
-
f"[green]✓ Selected: {selected_type} version bump[/green]"
|
|
2714
|
-
)
|
|
2715
|
-
return selected_type
|
|
2716
|
-
else:
|
|
2717
|
-
self.console.print(
|
|
2718
|
-
"[red]Invalid choice. Please select 1, 2, or 3.[/red]"
|
|
2719
|
-
)
|
|
2720
|
-
|
|
2721
|
-
def _bump_version(self, options: OptionsProtocol) -> None:
|
|
2722
|
-
if options.publish and str(options.publish) == "interactive":
|
|
2723
|
-
return self._handle_interactive_version_selection(options)
|
|
2724
|
-
for option in (options.publish, options.bump):
|
|
2725
|
-
if option:
|
|
2726
|
-
version_type = str(option)
|
|
2727
|
-
if self._has_version_been_bumped(version_type):
|
|
2728
|
-
self._display_version_already_bumped_message(version_type)
|
|
2729
|
-
return
|
|
2730
|
-
self._display_version_bump_message(option)
|
|
2731
|
-
if not self._confirm_version_bump_if_needed(option, version_type):
|
|
2732
|
-
return
|
|
2733
|
-
self.execute_command(["uv", "version", "--bump", option])
|
|
2734
|
-
self._mark_version_bumped(version_type)
|
|
2735
|
-
if not options.no_git_tags:
|
|
2736
|
-
self._create_git_tag()
|
|
2737
|
-
break
|
|
2738
|
-
|
|
2739
|
-
def _handle_interactive_version_selection(self, options: OptionsProtocol) -> None:
|
|
2740
|
-
selected_version = self._prompt_version_selection()
|
|
2741
|
-
from crackerjack.__main__ import BumpOption
|
|
2742
|
-
|
|
2743
|
-
options_dict = vars(options).copy()
|
|
2744
|
-
options_dict["publish"] = BumpOption(selected_version)
|
|
2745
|
-
from types import SimpleNamespace
|
|
2746
|
-
|
|
2747
|
-
temp_options = SimpleNamespace(**options_dict)
|
|
2748
|
-
|
|
2749
|
-
return self._bump_version(temp_options) # type: ignore[arg-type]
|
|
2750
|
-
|
|
2751
|
-
def _display_version_already_bumped_message(self, version_type: str) -> None:
|
|
2752
|
-
self.console.print("\n" + "-" * 80)
|
|
2753
|
-
self.console.print(
|
|
2754
|
-
f"[bold yellow]📦 VERSION[/bold yellow] [bold bright_white]Version already bumped ({version_type}), skipping to avoid duplicate bump[/bold bright_white]"
|
|
2755
|
-
)
|
|
2756
|
-
self.console.print("-" * 80 + "\n")
|
|
2757
|
-
|
|
2758
|
-
def _display_version_bump_message(self, option: t.Any) -> None:
|
|
2759
|
-
self.console.print("\n" + "-" * 80)
|
|
2760
|
-
self.console.print(
|
|
2761
|
-
f"[bold bright_magenta]📦 VERSION[/bold bright_magenta] [bold bright_white]Bumping {option} version[/bold bright_white]"
|
|
2762
|
-
)
|
|
2763
|
-
self.console.print("-" * 80 + "\n")
|
|
2764
|
-
|
|
2765
|
-
def _confirm_version_bump_if_needed(self, option: t.Any, version_type: str) -> bool:
|
|
2766
|
-
if version_type in ("minor", "major"):
|
|
2767
|
-
from rich.prompt import Confirm
|
|
2768
|
-
|
|
2769
|
-
if not Confirm.ask(
|
|
2770
|
-
f"Are you sure you want to bump the {option} version?",
|
|
2771
|
-
default=False,
|
|
2772
|
-
):
|
|
2773
|
-
self.console.print(
|
|
2774
|
-
f"[bold yellow]⏭️ Skipping {option} version bump[/bold yellow]"
|
|
2775
|
-
)
|
|
2776
|
-
return False
|
|
2777
|
-
return True
|
|
2778
|
-
|
|
2779
|
-
def _validate_authentication_setup(self) -> None:
|
|
2780
|
-
import os
|
|
2781
|
-
import shutil
|
|
2782
|
-
|
|
2783
|
-
keyring_provider = self._get_keyring_provider()
|
|
2784
|
-
has_publish_token = bool(os.environ.get("UV_PUBLISH_TOKEN"))
|
|
2785
|
-
has_keyring = shutil.which("keyring") is not None
|
|
2786
|
-
self.console.print("[dim]🔐 Validating authentication setup...[/dim]")
|
|
2787
|
-
if has_publish_token:
|
|
2788
|
-
self._handle_publish_token_found()
|
|
2789
|
-
return
|
|
2790
|
-
if keyring_provider == "subprocess" and has_keyring:
|
|
2791
|
-
self._handle_keyring_validation()
|
|
2792
|
-
return
|
|
2793
|
-
if keyring_provider == "subprocess" and not has_keyring:
|
|
2794
|
-
self._handle_missing_keyring()
|
|
2795
|
-
if not keyring_provider:
|
|
2796
|
-
self._handle_no_keyring_provider()
|
|
2797
|
-
|
|
2798
|
-
def _handle_publish_token_found(self) -> None:
|
|
2799
|
-
self.console.print(
|
|
2800
|
-
"[dim] ✅ UV_PUBLISH_TOKEN environment variable found[/dim]"
|
|
2801
|
-
)
|
|
2802
|
-
|
|
2803
|
-
def _handle_keyring_validation(self) -> None:
|
|
2804
|
-
self.console.print(
|
|
2805
|
-
"[dim] ✅ Keyring provider configured and keyring executable found[/dim]"
|
|
2806
|
-
)
|
|
2807
|
-
try:
|
|
2808
|
-
result = self.execute_command(
|
|
2809
|
-
["keyring", "get", "https://upload.pypi.org/legacy/", "__token__"],
|
|
2810
|
-
capture_output=True,
|
|
2811
|
-
text=True,
|
|
2812
|
-
)
|
|
2813
|
-
if result.returncode == 0:
|
|
2814
|
-
self.console.print("[dim] ✅ PyPI token found in keyring[/dim]")
|
|
2815
|
-
else:
|
|
2816
|
-
self.console.print(
|
|
2817
|
-
"[yellow] ⚠️ No PyPI token found in keyring - will prompt during publish[/yellow]"
|
|
2818
|
-
)
|
|
2819
|
-
except Exception:
|
|
2820
|
-
self.console.print(
|
|
2821
|
-
"[yellow] ⚠️ Could not check keyring - will attempt publish anyway[/yellow]"
|
|
2822
|
-
)
|
|
2823
|
-
|
|
2824
|
-
def _handle_missing_keyring(self) -> None:
|
|
2825
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
2826
|
-
self.console.print(
|
|
2827
|
-
"[yellow] ⚠️ Keyring provider set to 'subprocess' but keyring executable not found[/yellow]"
|
|
2828
|
-
)
|
|
2829
|
-
self.console.print(
|
|
2830
|
-
"[yellow] Install keyring: uv tool install keyring[/yellow]"
|
|
2831
|
-
)
|
|
2832
|
-
|
|
2833
|
-
def _handle_no_keyring_provider(self) -> None:
|
|
2834
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
2835
|
-
self.console.print(
|
|
2836
|
-
"[yellow] ⚠️ No keyring provider configured and no UV_PUBLISH_TOKEN set[/yellow]"
|
|
2837
|
-
)
|
|
2838
|
-
|
|
2839
|
-
def _get_keyring_provider(self) -> str | None:
|
|
2840
|
-
import os
|
|
2841
|
-
import tomllib
|
|
2842
|
-
from pathlib import Path
|
|
2843
|
-
|
|
2844
|
-
env_provider = os.environ.get("UV_KEYRING_PROVIDER")
|
|
2845
|
-
if env_provider:
|
|
2846
|
-
return env_provider
|
|
2847
|
-
for config_file in ("pyproject.toml", "uv.toml"):
|
|
2848
|
-
config_path = Path(config_file)
|
|
2849
|
-
if config_path.exists():
|
|
2850
|
-
try:
|
|
2851
|
-
with config_path.open("rb") as f:
|
|
2852
|
-
config = tomllib.load(f)
|
|
2853
|
-
return config.get("tool", {}).get("uv", {}).get("keyring-provider")
|
|
2854
|
-
except Exception:
|
|
2855
|
-
continue
|
|
2856
|
-
|
|
2857
|
-
return None
|
|
2858
|
-
|
|
2859
|
-
def _build_publish_command(self) -> list[str]:
|
|
2860
|
-
import os
|
|
2861
|
-
|
|
2862
|
-
cmd = ["uv", "publish"]
|
|
2863
|
-
publish_token = os.environ.get("UV_PUBLISH_TOKEN")
|
|
2864
|
-
if publish_token:
|
|
2865
|
-
cmd.extend(["--token", publish_token])
|
|
2866
|
-
keyring_provider = self._get_keyring_provider()
|
|
2867
|
-
if keyring_provider:
|
|
2868
|
-
cmd.extend(["--keyring-provider", keyring_provider])
|
|
2869
|
-
|
|
2870
|
-
return cmd
|
|
2871
|
-
|
|
2872
|
-
def _publish_with_retry(self) -> None:
|
|
2873
|
-
max_retries = 2
|
|
2874
|
-
for attempt in range(max_retries):
|
|
2875
|
-
try:
|
|
2876
|
-
result = self._attempt_publish()
|
|
2877
|
-
if result.returncode == 0:
|
|
2878
|
-
self._verify_pypi_upload()
|
|
2879
|
-
return
|
|
2880
|
-
if not self._handle_publish_failure(result, attempt, max_retries):
|
|
2881
|
-
raise SystemExit(1)
|
|
2882
|
-
except SystemExit:
|
|
2883
|
-
if attempt < max_retries - 1:
|
|
2884
|
-
continue
|
|
2885
|
-
raise
|
|
2886
|
-
|
|
2887
|
-
def _attempt_publish(self) -> "subprocess.CompletedProcess[str]":
|
|
2888
|
-
self._validate_authentication_setup()
|
|
2889
|
-
publish_cmd = self._build_publish_command()
|
|
2890
|
-
self.console.print("[dim]📤 Uploading package to PyPI...[/dim]")
|
|
2891
|
-
import subprocess
|
|
2892
|
-
import time
|
|
2893
|
-
from threading import Thread
|
|
2894
|
-
|
|
2895
|
-
from rich.live import Live
|
|
2896
|
-
from rich.spinner import Spinner
|
|
2897
|
-
|
|
2898
|
-
result: subprocess.CompletedProcess[str] | None = None
|
|
2899
|
-
start_time = time.time()
|
|
2900
|
-
|
|
2901
|
-
def run_publish() -> None:
|
|
2902
|
-
nonlocal result
|
|
2903
|
-
result = self.execute_command(publish_cmd, capture_output=True, text=True)
|
|
2904
|
-
|
|
2905
|
-
publish_thread = Thread(target=run_publish)
|
|
2906
|
-
publish_thread.start()
|
|
2907
|
-
|
|
2908
|
-
elapsed_time = 0
|
|
2909
|
-
while publish_thread.is_alive():
|
|
2910
|
-
elapsed_time = time.time() - start_time
|
|
2911
|
-
|
|
2912
|
-
if elapsed_time < 5:
|
|
2913
|
-
text = "[dim]📤 Uploading to PyPI...[/dim]"
|
|
2914
|
-
elif elapsed_time < 15:
|
|
2915
|
-
text = "[dim]📤 Uploading to PyPI... (this may take a moment)[/dim]"
|
|
2916
|
-
else:
|
|
2917
|
-
text = "[dim]📤 Uploading to PyPI... (large package or slow connection)[/dim]"
|
|
2918
|
-
|
|
2919
|
-
spinner = Spinner("dots", text=text)
|
|
2920
|
-
with Live(spinner, refresh_per_second=10, transient=True):
|
|
2921
|
-
time.sleep(0.5)
|
|
2922
|
-
|
|
2923
|
-
if not publish_thread.is_alive():
|
|
2924
|
-
break
|
|
2925
|
-
|
|
2926
|
-
publish_thread.join()
|
|
2927
|
-
|
|
2928
|
-
elapsed_time = time.time() - start_time
|
|
2929
|
-
|
|
2930
|
-
if result and result.returncode == 0:
|
|
2931
|
-
self.console.print(
|
|
2932
|
-
f"[green]✅ Package uploaded successfully! ({elapsed_time:.1f}s)[/green]"
|
|
2933
|
-
)
|
|
2934
|
-
elif result and result.returncode != 0:
|
|
2935
|
-
self.console.print(f"[red]❌ Upload failed after {elapsed_time:.1f}s[/red]")
|
|
2936
|
-
if result.stdout:
|
|
2937
|
-
self.console.print(f"[dim]stdout: {result.stdout}[/dim]")
|
|
2938
|
-
if result.stderr:
|
|
2939
|
-
self.console.print(f"[red]stderr: {result.stderr}[/red]")
|
|
2940
|
-
|
|
2941
|
-
if result is None:
|
|
2942
|
-
return subprocess.CompletedProcess(
|
|
2943
|
-
args=publish_cmd,
|
|
2944
|
-
returncode=1,
|
|
2945
|
-
stdout="",
|
|
2946
|
-
stderr="Thread execution failed",
|
|
2947
|
-
)
|
|
2948
|
-
|
|
2949
|
-
return result
|
|
2950
|
-
|
|
2951
|
-
def _verify_pypi_upload(self) -> None:
|
|
2952
|
-
if self.options and getattr(self.options, "ai_agent", False):
|
|
2953
|
-
return
|
|
2954
|
-
import time
|
|
2955
|
-
|
|
2956
|
-
package_name = self._get_package_name()
|
|
2957
|
-
current_version = self._get_current_version()
|
|
2958
|
-
self.console.print(
|
|
2959
|
-
f"[dim]🔍 Verifying upload of {package_name} v{current_version}...[/dim]"
|
|
2960
|
-
)
|
|
2961
|
-
time.sleep(2)
|
|
2962
|
-
self._retry_pypi_verification(package_name, current_version)
|
|
2963
|
-
|
|
2964
|
-
def _retry_pypi_verification(self, package_name: str, current_version: str) -> None:
|
|
2965
|
-
import time
|
|
2966
|
-
|
|
2967
|
-
max_attempts = 3
|
|
2968
|
-
for attempt in range(max_attempts):
|
|
2969
|
-
try:
|
|
2970
|
-
if self._check_pypi_package_exists(package_name, current_version):
|
|
2971
|
-
self._show_pypi_success(package_name, current_version)
|
|
2972
|
-
return
|
|
2973
|
-
if attempt < max_attempts - 1:
|
|
2974
|
-
self._show_pypi_retry_message(attempt, max_attempts)
|
|
2975
|
-
time.sleep(5)
|
|
2976
|
-
continue
|
|
2977
|
-
else:
|
|
2978
|
-
self._show_pypi_not_visible(package_name, current_version)
|
|
2979
|
-
return
|
|
2980
|
-
except Exception as e:
|
|
2981
|
-
if attempt < max_attempts - 1:
|
|
2982
|
-
self._show_pypi_error_retry(attempt, max_attempts, e)
|
|
2983
|
-
time.sleep(5)
|
|
2984
|
-
continue
|
|
2985
|
-
else:
|
|
2986
|
-
self._show_pypi_final_error(package_name, current_version, e)
|
|
2987
|
-
return
|
|
2988
|
-
|
|
2989
|
-
def _check_pypi_package_exists(
|
|
2990
|
-
self, package_name: str, current_version: str
|
|
2991
|
-
) -> bool:
|
|
2992
|
-
import json
|
|
2993
|
-
import urllib.error
|
|
2994
|
-
import urllib.request
|
|
2995
|
-
|
|
2996
|
-
url = f"https://pypi.org/pypi/{package_name}/{current_version}/json"
|
|
2997
|
-
try:
|
|
2998
|
-
with urllib.request.urlopen(url, timeout=10) as response: # nosec B310
|
|
2999
|
-
data = json.loads(response.read().decode())
|
|
3000
|
-
return data.get("info", {}).get("version") == current_version
|
|
3001
|
-
except urllib.error.HTTPError as e:
|
|
3002
|
-
if e.code == 404:
|
|
3003
|
-
return False
|
|
3004
|
-
raise
|
|
3005
|
-
|
|
3006
|
-
def _show_pypi_success(self, package_name: str, current_version: str) -> None:
|
|
3007
|
-
self.console.print(
|
|
3008
|
-
f"[green]✅ Verified: {package_name} v{current_version} is available on PyPI![/green]"
|
|
3009
|
-
)
|
|
3010
|
-
pypi_url = f"https://pypi.org/project/{package_name}/{current_version}/"
|
|
3011
|
-
self.console.print(f"[dim] 📦 Package URL: {pypi_url}[/dim]")
|
|
3012
|
-
|
|
3013
|
-
def _show_pypi_retry_message(self, attempt: int, max_attempts: int) -> None:
|
|
3014
|
-
self.console.print(
|
|
3015
|
-
f"[yellow]⏳ Package not yet available on PyPI (attempt {attempt + 1}/{max_attempts}), retrying...[/yellow]"
|
|
3016
|
-
)
|
|
3017
|
-
|
|
3018
|
-
def _show_pypi_not_visible(self, package_name: str, current_version: str) -> None:
|
|
3019
|
-
self.console.print(
|
|
3020
|
-
"[yellow]⚠️ Package uploaded but not yet visible on PyPI (this is normal)[/yellow]"
|
|
3021
|
-
)
|
|
3022
|
-
self.console.print(
|
|
3023
|
-
f"[dim] Check later at: https://pypi.org/project/{package_name}/{current_version}/[/dim]"
|
|
3024
|
-
)
|
|
3025
|
-
|
|
3026
|
-
def _show_pypi_error_retry(
|
|
3027
|
-
self, attempt: int, max_attempts: int, error: Exception
|
|
3028
|
-
) -> None:
|
|
3029
|
-
self.console.print(
|
|
3030
|
-
f"[yellow]⏳ Error checking PyPI (attempt {attempt + 1}/{max_attempts}): {error}[/yellow]"
|
|
3031
|
-
)
|
|
3032
|
-
|
|
3033
|
-
def _show_pypi_final_error(
|
|
3034
|
-
self, package_name: str, current_version: str, error: Exception
|
|
3035
|
-
) -> None:
|
|
3036
|
-
self.console.print(f"[yellow]⚠️ Could not verify PyPI upload: {error}[/yellow]")
|
|
3037
|
-
self.console.print(
|
|
3038
|
-
f"[dim] Check manually at: https://pypi.org/project/{package_name}/{current_version}/[/dim]"
|
|
3039
|
-
)
|
|
3040
|
-
|
|
3041
|
-
def _get_package_name(self) -> str:
|
|
3042
|
-
import tomllib
|
|
3043
|
-
from pathlib import Path
|
|
3044
|
-
|
|
3045
|
-
pyproject_path = Path("pyproject.toml")
|
|
3046
|
-
if pyproject_path.exists():
|
|
3047
|
-
with pyproject_path.open("rb") as f:
|
|
3048
|
-
data = tomllib.load(f)
|
|
3049
|
-
return data.get("project", {}).get("name", "unknown")
|
|
3050
|
-
return "unknown"
|
|
3051
|
-
|
|
3052
|
-
def _handle_publish_failure(
|
|
3053
|
-
self, result: "subprocess.CompletedProcess[str]", attempt: int, max_retries: int
|
|
3054
|
-
) -> bool:
|
|
3055
|
-
if self._is_auth_error(result):
|
|
3056
|
-
return self._handle_auth_error(attempt, max_retries)
|
|
3057
|
-
else:
|
|
3058
|
-
self._handle_non_auth_error(result)
|
|
3059
|
-
return False
|
|
3060
|
-
|
|
3061
|
-
def _handle_auth_error(self, attempt: int, max_retries: int) -> bool:
|
|
3062
|
-
if attempt < max_retries - 1:
|
|
3063
|
-
self.console.print(
|
|
3064
|
-
f"[yellow]⚠️ Authentication failed (attempt {attempt + 1}/{max_retries})[/yellow]"
|
|
3065
|
-
)
|
|
3066
|
-
return self._prompt_for_token()
|
|
3067
|
-
self._display_authentication_help()
|
|
3068
|
-
return False
|
|
3069
|
-
|
|
3070
|
-
def _handle_non_auth_error(
|
|
3071
|
-
self, result: "subprocess.CompletedProcess[str]"
|
|
3072
|
-
) -> None:
|
|
3073
|
-
self.console.print(result.stdout)
|
|
3074
|
-
self.console.print(result.stderr)
|
|
3075
|
-
|
|
3076
|
-
def _is_auth_error(self, result: "subprocess.CompletedProcess[str]") -> bool:
|
|
3077
|
-
error_text = (result.stdout + result.stderr).lower()
|
|
3078
|
-
auth_indicators = (
|
|
3079
|
-
"authentication",
|
|
3080
|
-
"unauthorized",
|
|
3081
|
-
"403",
|
|
3082
|
-
"401",
|
|
3083
|
-
"invalid credentials",
|
|
3084
|
-
"token",
|
|
3085
|
-
"password",
|
|
3086
|
-
"username",
|
|
3087
|
-
)
|
|
3088
|
-
return any(indicator in error_text for indicator in auth_indicators)
|
|
3089
|
-
|
|
3090
|
-
def _prompt_for_token(self) -> bool:
|
|
3091
|
-
import getpass
|
|
3092
|
-
import os
|
|
3093
|
-
import shutil
|
|
3094
|
-
|
|
3095
|
-
if self.options and getattr(self.options, "ai_agent", False):
|
|
3096
|
-
return False
|
|
3097
|
-
self.console.print("\n[bold yellow]🔐 PyPI Token Required[/bold yellow]")
|
|
3098
|
-
self.console.print(
|
|
3099
|
-
"[dim]Please enter your PyPI token (starts with 'pypi-'):[/dim]"
|
|
3100
|
-
)
|
|
3101
|
-
try:
|
|
3102
|
-
token = getpass.getpass("PyPI Token: ")
|
|
3103
|
-
if not token or not token.startswith("pypi-"):
|
|
3104
|
-
self.console.print(
|
|
3105
|
-
"[red]❌ Invalid token format. Token must start with 'pypi-'[/red]"
|
|
3106
|
-
)
|
|
3107
|
-
return False
|
|
3108
|
-
if shutil.which("keyring"):
|
|
3109
|
-
try:
|
|
3110
|
-
result = self.execute_command(
|
|
3111
|
-
[
|
|
3112
|
-
"keyring",
|
|
3113
|
-
"set",
|
|
3114
|
-
"https://upload.pypi.org/legacy/",
|
|
3115
|
-
"__token__",
|
|
3116
|
-
],
|
|
3117
|
-
input=token,
|
|
3118
|
-
capture_output=True,
|
|
3119
|
-
text=True,
|
|
3120
|
-
)
|
|
3121
|
-
if result.returncode == 0:
|
|
3122
|
-
self.console.print("[green]✅ Token stored in keyring[/green]")
|
|
3123
|
-
else:
|
|
3124
|
-
os.environ["UV_PUBLISH_TOKEN"] = token
|
|
3125
|
-
self.console.print(
|
|
3126
|
-
"[yellow]⚠️ Keyring storage failed, using environment variable[/yellow]"
|
|
3127
|
-
)
|
|
3128
|
-
except Exception:
|
|
3129
|
-
os.environ["UV_PUBLISH_TOKEN"] = token
|
|
3130
|
-
self.console.print(
|
|
3131
|
-
"[yellow]⚠️ Keyring storage failed, using environment variable[/yellow]"
|
|
3132
|
-
)
|
|
3133
|
-
else:
|
|
3134
|
-
os.environ["UV_PUBLISH_TOKEN"] = token
|
|
3135
|
-
self.console.print(
|
|
3136
|
-
"[yellow]⚠️ Keyring not available, using environment variable[/yellow]"
|
|
3137
|
-
)
|
|
3138
|
-
|
|
3139
|
-
return True
|
|
3140
|
-
except KeyboardInterrupt:
|
|
3141
|
-
self.console.print("\n[yellow]⚠️ Token entry cancelled[/yellow]")
|
|
3142
|
-
return False
|
|
3143
|
-
except Exception as e:
|
|
3144
|
-
self.console.print(f"[red]❌ Error storing token: {e}[/red]")
|
|
3145
|
-
return False
|
|
3146
|
-
|
|
3147
|
-
def _display_authentication_help(self) -> None:
|
|
3148
|
-
self.console.print(
|
|
3149
|
-
"\n[bold bright_red]❌ Publish failed. Run crackerjack again to retry publishing without re-bumping version.[/bold bright_red]"
|
|
3150
|
-
)
|
|
3151
|
-
if not (self.options and getattr(self.options, "ai_agent", False)):
|
|
3152
|
-
self.console.print("\n[bold yellow]🔐 Authentication Help:[/bold yellow]")
|
|
3153
|
-
self.console.print(" [dim]To fix authentication issues, you can:[/dim]")
|
|
3154
|
-
self.console.print(
|
|
3155
|
-
" [dim]1. Set PyPI token: export UV_PUBLISH_TOKEN=pypi-your-token-here[/dim]"
|
|
3156
|
-
)
|
|
3157
|
-
self.console.print(
|
|
3158
|
-
" [dim]2. Install keyring: uv tool install keyring[/dim]"
|
|
3159
|
-
)
|
|
3160
|
-
self.console.print(
|
|
3161
|
-
" [dim]3. Store token in keyring: keyring set https://upload.pypi.org/legacy/ __token__[/dim]"
|
|
3162
|
-
)
|
|
3163
|
-
self.console.print(
|
|
3164
|
-
" [dim]4. Ensure keyring-provider is set in pyproject.toml:[/dim]"
|
|
3165
|
-
)
|
|
3166
|
-
self.console.print(" [dim] [tool.uv][/dim]")
|
|
3167
|
-
self.console.print(' [dim] keyring-provider = "subprocess"[/dim]')
|
|
3168
|
-
|
|
3169
|
-
def _publish_project(self, options: OptionsProtocol) -> None:
|
|
3170
|
-
if options.publish:
|
|
3171
|
-
self.console.print("\n" + "-" * 80)
|
|
3172
|
-
self.console.print(
|
|
3173
|
-
"[bold bright_cyan]🚀 PUBLISH[/bold bright_cyan] [bold bright_white]Building and publishing package[/bold bright_white]"
|
|
3174
|
-
)
|
|
3175
|
-
self.console.print("-" * 80 + "\n")
|
|
3176
|
-
if not options.skip_version_check:
|
|
3177
|
-
if not self._verify_version_consistency():
|
|
3178
|
-
self.console.print(
|
|
3179
|
-
"[bold red]❌ Publishing aborted due to version mismatch. Please ensure pyproject.toml version matches git tag.[/bold red]"
|
|
3180
|
-
)
|
|
3181
|
-
raise SystemExit(1)
|
|
3182
|
-
state = self._get_state()
|
|
3183
|
-
if not state.get("build_completed", False):
|
|
3184
|
-
build = self.execute_command(
|
|
3185
|
-
["uv", "build"], capture_output=True, text=True
|
|
3186
|
-
)
|
|
3187
|
-
self.console.print(build.stdout)
|
|
3188
|
-
if build.returncode > 0:
|
|
3189
|
-
self.console.print(build.stderr)
|
|
3190
|
-
self.console.print(
|
|
3191
|
-
"[bold bright_red]❌ Build failed. Please fix errors.[/bold bright_red]"
|
|
3192
|
-
)
|
|
3193
|
-
raise SystemExit(1)
|
|
3194
|
-
state["build_completed"] = True
|
|
3195
|
-
self._save_state(state)
|
|
3196
|
-
else:
|
|
3197
|
-
self.console.print(
|
|
3198
|
-
"[dim]📦 Using existing build artifacts (retry mode)[/dim]"
|
|
3199
|
-
)
|
|
3200
|
-
self._publish_with_retry()
|
|
3201
|
-
self._mark_publish_completed()
|
|
3202
|
-
self._clear_state()
|
|
3203
|
-
self.console.print(
|
|
3204
|
-
"\n[bold bright_green]🏆 Package published successfully![/bold bright_green]"
|
|
3205
|
-
)
|
|
3206
|
-
|
|
3207
|
-
def _analyze_git_changes(self) -> dict[str, t.Any]:
|
|
3208
|
-
diff_result = self._get_git_diff_output()
|
|
3209
|
-
changes = self._parse_git_diff_output(diff_result)
|
|
3210
|
-
changes["stats"] = self._get_git_stats()
|
|
3211
|
-
return changes
|
|
3212
|
-
|
|
3213
|
-
def _get_git_diff_output(self) -> t.Any:
|
|
3214
|
-
diff_cmd = ["git", "diff", "--cached", "--name-status"]
|
|
3215
|
-
diff_result = self.execute_command(diff_cmd, capture_output=True, text=True)
|
|
3216
|
-
if not diff_result.stdout and diff_result.returncode == 0:
|
|
3217
|
-
diff_cmd = ["git", "diff", "--name-status"]
|
|
3218
|
-
diff_result = self.execute_command(diff_cmd, capture_output=True, text=True)
|
|
3219
|
-
return diff_result
|
|
3220
|
-
|
|
3221
|
-
def _parse_git_diff_output(self, diff_result: t.Any) -> dict[str, t.Any]:
|
|
3222
|
-
changes = {
|
|
3223
|
-
"added": [],
|
|
3224
|
-
"modified": [],
|
|
3225
|
-
"deleted": [],
|
|
3226
|
-
"renamed": [],
|
|
3227
|
-
"total_changes": 0,
|
|
3228
|
-
}
|
|
3229
|
-
if diff_result.returncode == 0 and diff_result.stdout:
|
|
3230
|
-
self._process_diff_lines(diff_result.stdout, changes)
|
|
3231
|
-
return changes
|
|
3232
|
-
|
|
3233
|
-
def _process_diff_lines(self, stdout: str, changes: dict[str, t.Any]) -> None:
|
|
3234
|
-
for line in stdout.strip().split("\n"):
|
|
3235
|
-
if not line:
|
|
3236
|
-
continue
|
|
3237
|
-
self._process_single_diff_line(line, changes)
|
|
3238
|
-
|
|
3239
|
-
def _process_single_diff_line(self, line: str, changes: dict[str, t.Any]) -> None:
|
|
3240
|
-
parts = line.split("\t")
|
|
3241
|
-
if len(parts) >= 2:
|
|
3242
|
-
status, filename = parts[0], parts[1]
|
|
3243
|
-
self._categorize_file_change(status, filename, parts, changes)
|
|
3244
|
-
changes["total_changes"] += 1
|
|
3245
|
-
|
|
3246
|
-
def _categorize_file_change(
|
|
3247
|
-
self, status: str, filename: str, parts: list[str], changes: dict[str, t.Any]
|
|
3248
|
-
) -> None:
|
|
3249
|
-
if status == "A":
|
|
3250
|
-
changes["added"].append(filename)
|
|
3251
|
-
elif status == "M":
|
|
3252
|
-
changes["modified"].append(filename)
|
|
3253
|
-
elif status == "D":
|
|
3254
|
-
changes["deleted"].append(filename)
|
|
3255
|
-
elif status.startswith("R"):
|
|
3256
|
-
if len(parts) >= 3:
|
|
3257
|
-
changes["renamed"].append((parts[1], parts[2]))
|
|
3258
|
-
else:
|
|
3259
|
-
changes["renamed"].append((filename, "unknown"))
|
|
3260
|
-
|
|
3261
|
-
def _get_git_stats(self) -> str:
|
|
3262
|
-
stat_cmd = ["git", "diff", "--cached", "--stat"]
|
|
3263
|
-
stat_result = self.execute_command(stat_cmd, capture_output=True, text=True)
|
|
3264
|
-
if not stat_result.stdout and stat_result.returncode == 0:
|
|
3265
|
-
stat_cmd = ["git", "diff", "--stat"]
|
|
3266
|
-
stat_result = self.execute_command(stat_cmd, capture_output=True, text=True)
|
|
3267
|
-
return stat_result.stdout if stat_result.returncode == 0 else ""
|
|
3268
|
-
|
|
3269
|
-
def _categorize_changes(self, changes: dict[str, t.Any]) -> dict[str, list[str]]:
|
|
3270
|
-
categories = {
|
|
3271
|
-
"docs": [],
|
|
3272
|
-
"tests": [],
|
|
3273
|
-
"config": [],
|
|
3274
|
-
"core": [],
|
|
3275
|
-
"ci": [],
|
|
3276
|
-
"deps": [],
|
|
3277
|
-
}
|
|
3278
|
-
file_patterns = {
|
|
3279
|
-
"docs": ["README.md", "CLAUDE.md", "RULES.md", "docs/", ".md"],
|
|
3280
|
-
"tests": ["test_", "_test.py", "tests/", "conftest.py"],
|
|
3281
|
-
"config": ["pyproject.toml", ".yaml", ".yml", ".json", ".gitignore"],
|
|
3282
|
-
"ci": [".github/", "ci/", ".pre-commit"],
|
|
3283
|
-
"deps": ["requirements", "pyproject.toml", "uv.lock"],
|
|
3284
|
-
}
|
|
3285
|
-
for file_list in ("added", "modified", "deleted"):
|
|
3286
|
-
for filename in changes.get(file_list, []):
|
|
3287
|
-
categorized = False
|
|
3288
|
-
for category, patterns in file_patterns.items():
|
|
3289
|
-
if any(pattern in filename for pattern in patterns):
|
|
3290
|
-
categories[category].append(filename)
|
|
3291
|
-
categorized = True
|
|
3292
|
-
break
|
|
3293
|
-
if not categorized:
|
|
3294
|
-
categories["core"].append(filename)
|
|
3295
|
-
|
|
3296
|
-
return categories
|
|
3297
|
-
|
|
3298
|
-
def _get_primary_changes(self, categories: dict[str, list[str]]) -> list[str]:
|
|
3299
|
-
primary_changes = []
|
|
3300
|
-
category_mapping = [
|
|
3301
|
-
("core", "core functionality"),
|
|
3302
|
-
("tests", "tests"),
|
|
3303
|
-
("docs", "documentation"),
|
|
3304
|
-
("config", "configuration"),
|
|
3305
|
-
("deps", "dependencies"),
|
|
3306
|
-
]
|
|
3307
|
-
for key, label in category_mapping:
|
|
3308
|
-
if categories[key]:
|
|
3309
|
-
primary_changes.append(label)
|
|
3310
|
-
|
|
3311
|
-
return primary_changes or ["project files"]
|
|
3312
|
-
|
|
3313
|
-
def _determine_primary_action(self, changes: dict[str, t.Any]) -> str:
|
|
3314
|
-
added_count = len(changes["added"])
|
|
3315
|
-
modified_count = len(changes["modified"])
|
|
3316
|
-
deleted_count = len(changes["deleted"])
|
|
3317
|
-
if added_count > modified_count + deleted_count:
|
|
3318
|
-
return "Add"
|
|
3319
|
-
elif deleted_count > modified_count + added_count:
|
|
3320
|
-
return "Remove"
|
|
3321
|
-
elif changes["renamed"]:
|
|
3322
|
-
return "Refactor"
|
|
3323
|
-
return "Update"
|
|
3324
|
-
|
|
3325
|
-
def _generate_body_lines(self, changes: dict[str, t.Any]) -> list[str]:
|
|
3326
|
-
body_lines = []
|
|
3327
|
-
change_types = [
|
|
3328
|
-
("added", "Added"),
|
|
3329
|
-
("modified", "Modified"),
|
|
3330
|
-
("deleted", "Deleted"),
|
|
3331
|
-
("renamed", "Renamed"),
|
|
3332
|
-
]
|
|
3333
|
-
for change_type, label in change_types:
|
|
3334
|
-
items = changes.get(change_type, [])
|
|
3335
|
-
if items:
|
|
3336
|
-
count = len(items)
|
|
3337
|
-
body_lines.append(f"- {label} {count} file(s)")
|
|
3338
|
-
if change_type not in ("deleted", "renamed"):
|
|
3339
|
-
for file in items[:3]:
|
|
3340
|
-
body_lines.append(f" * {file}")
|
|
3341
|
-
if count > 3:
|
|
3342
|
-
body_lines.append(f" * ... and {count - 3} more")
|
|
3343
|
-
|
|
3344
|
-
return body_lines
|
|
3345
|
-
|
|
3346
|
-
def _generate_commit_message(self, changes: dict[str, t.Any]) -> str:
|
|
3347
|
-
if changes["total_changes"] == 0:
|
|
3348
|
-
return "Update project files"
|
|
3349
|
-
categories = self._categorize_changes(changes)
|
|
3350
|
-
primary_changes = self._get_primary_changes(categories)
|
|
3351
|
-
primary_action = self._determine_primary_action(changes)
|
|
3352
|
-
commit_subject = f"{primary_action} {' and '.join(primary_changes[:2])}"
|
|
3353
|
-
body_lines = self._generate_body_lines(changes)
|
|
3354
|
-
if body_lines:
|
|
3355
|
-
return f"{commit_subject}\n\n" + "\n".join(body_lines)
|
|
3356
|
-
return commit_subject
|
|
3357
|
-
|
|
3358
|
-
def _commit_and_push(self, options: OptionsProtocol) -> None:
|
|
3359
|
-
if options.commit:
|
|
3360
|
-
self.console.print("\n" + "-" * 80)
|
|
3361
|
-
self.console.print(
|
|
3362
|
-
"[bold bright_white]📝 COMMIT[/bold bright_white] [bold bright_white]Saving changes to git[/bold bright_white]"
|
|
3363
|
-
)
|
|
3364
|
-
self.console.print("-" * 80 + "\n")
|
|
3365
|
-
if not options.skip_version_check:
|
|
3366
|
-
if not self._verify_version_consistency():
|
|
3367
|
-
self.console.print(
|
|
3368
|
-
"[bold red]❌ Commit aborted due to version mismatch. Please ensure pyproject.toml version matches git tag.[/bold red]"
|
|
3369
|
-
)
|
|
3370
|
-
raise SystemExit(1)
|
|
3371
|
-
changes = self._analyze_git_changes()
|
|
3372
|
-
if changes["total_changes"] > 0:
|
|
3373
|
-
self.console.print("[dim]🔍 Analyzing changes...[/dim]\n")
|
|
3374
|
-
if changes["stats"]:
|
|
3375
|
-
self.console.print(changes["stats"])
|
|
3376
|
-
suggested_msg = self._generate_commit_message(changes)
|
|
3377
|
-
self.console.print(
|
|
3378
|
-
"\n[bold cyan]📋 Suggested commit message:[/bold cyan]"
|
|
3379
|
-
)
|
|
3380
|
-
self.console.print(f"[cyan]{suggested_msg}[/cyan]\n")
|
|
3381
|
-
user_choice = (
|
|
3382
|
-
input("Use suggested message? [Y/n/e to edit]: ").strip().lower()
|
|
3383
|
-
)
|
|
3384
|
-
if user_choice in ("", "y"):
|
|
3385
|
-
commit_msg = suggested_msg
|
|
3386
|
-
elif user_choice == "e":
|
|
3387
|
-
import os
|
|
3388
|
-
import tempfile
|
|
3389
|
-
|
|
3390
|
-
with tempfile.NamedTemporaryFile(
|
|
3391
|
-
mode="w", suffix=".txt", delete=False
|
|
3392
|
-
) as f:
|
|
3393
|
-
f.write(suggested_msg)
|
|
3394
|
-
temp_path = f.name
|
|
3395
|
-
editor = os.environ.get("EDITOR", "vi")
|
|
3396
|
-
self.execute_command([editor, temp_path])
|
|
3397
|
-
with open(temp_path) as f:
|
|
3398
|
-
commit_msg = f.read().strip()
|
|
3399
|
-
Path(temp_path).unlink()
|
|
3400
|
-
else:
|
|
3401
|
-
commit_msg = input("\nEnter custom commit message: ")
|
|
3402
|
-
else:
|
|
3403
|
-
commit_msg = input("\nCommit message: ")
|
|
3404
|
-
self.execute_command(
|
|
3405
|
-
["git", "commit", "-m", commit_msg, "--no-verify", "--", "."]
|
|
3406
|
-
)
|
|
3407
|
-
self.execute_command(["git", "push", "origin", "main", "--no-verify"])
|
|
3408
|
-
self._push_git_tags()
|
|
3409
|
-
|
|
3410
|
-
def _update_precommit(self, options: OptionsProtocol) -> None:
|
|
3411
|
-
if options.update_precommit:
|
|
3412
|
-
self.console.print(
|
|
3413
|
-
"\n[bold yellow]⚠️ DEPRECATION WARNING[/bold yellow]: The --update-precommit (-u) flag is deprecated.\n"
|
|
3414
|
-
" Pre-commit hooks are now updated automatically on a weekly basis.\n"
|
|
3415
|
-
" This manual update will still work but is no longer needed.\n"
|
|
3416
|
-
)
|
|
3417
|
-
self.console.print("\n" + "-" * 80)
|
|
3418
|
-
self.console.print(
|
|
3419
|
-
"[bold bright_blue]🔄 UPDATE[/bold bright_blue] [bold bright_white]Updating pre-commit hooks[/bold bright_white]"
|
|
3420
|
-
)
|
|
3421
|
-
self.console.print("-" * 80 + "\n")
|
|
3422
|
-
if self.pkg_path.stem == "crackerjack":
|
|
3423
|
-
config_path = self.project_manager._select_precommit_config()
|
|
3424
|
-
update_cmd = [
|
|
3425
|
-
"uv",
|
|
3426
|
-
"run",
|
|
3427
|
-
"pre-commit",
|
|
3428
|
-
"autoupdate",
|
|
3429
|
-
"-c",
|
|
3430
|
-
config_path,
|
|
3431
|
-
]
|
|
3432
|
-
self.execute_command(update_cmd)
|
|
3433
|
-
else:
|
|
3434
|
-
self.project_manager.update_precommit_hooks()
|
|
3435
|
-
|
|
3436
|
-
def _update_docs(self, options: OptionsProtocol) -> None:
|
|
3437
|
-
if options.update_docs or options.force_update_docs:
|
|
3438
|
-
self.console.print("\n" + "-" * 80)
|
|
3439
|
-
self.console.print(
|
|
3440
|
-
"[bold bright_blue]📋 DOCS UPDATE[/bold bright_blue] [bold bright_white]Updating documentation with quality standards[/bold bright_white]"
|
|
3441
|
-
)
|
|
3442
|
-
self.console.print("-" * 80 + "\n")
|
|
3443
|
-
self.config_manager.copy_documentation_templates(
|
|
3444
|
-
force_update=options.force_update_docs,
|
|
3445
|
-
compress_docs=options.compress_docs,
|
|
3446
|
-
)
|
|
3447
|
-
|
|
3448
|
-
def execute_command(
|
|
3449
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
3450
|
-
) -> subprocess.CompletedProcess[str]:
|
|
3451
|
-
if self.dry_run:
|
|
3452
|
-
self.console.print(
|
|
3453
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
3454
|
-
)
|
|
3455
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
3456
|
-
return execute(cmd, **kwargs)
|
|
3457
|
-
|
|
3458
|
-
async def execute_command_async(
|
|
3459
|
-
self, cmd: list[str], **kwargs: t.Any
|
|
3460
|
-
) -> subprocess.CompletedProcess[str]:
|
|
3461
|
-
if self.dry_run:
|
|
3462
|
-
self.console.print(
|
|
3463
|
-
f"[bold bright_black]→ {' '.join(cmd)}[/bold bright_black]"
|
|
3464
|
-
)
|
|
3465
|
-
return CompletedProcess(cmd, 0, "", "")
|
|
3466
|
-
|
|
3467
|
-
proc = await asyncio.create_subprocess_exec(
|
|
3468
|
-
*cmd,
|
|
3469
|
-
stdout=asyncio.subprocess.PIPE,
|
|
3470
|
-
stderr=asyncio.subprocess.PIPE,
|
|
3471
|
-
**kwargs,
|
|
3472
|
-
)
|
|
3473
|
-
stdout, stderr = await proc.communicate()
|
|
3474
|
-
|
|
3475
|
-
return CompletedProcess(
|
|
3476
|
-
cmd,
|
|
3477
|
-
proc.returncode or 0,
|
|
3478
|
-
stdout.decode() if stdout else "",
|
|
3479
|
-
stderr.decode() if stderr else "",
|
|
3480
|
-
)
|
|
3481
|
-
|
|
3482
|
-
def _run_comprehensive_quality_checks(self, options: OptionsProtocol) -> None:
|
|
3483
|
-
if options.skip_hooks or (
|
|
3484
|
-
options.test
|
|
3485
|
-
and not any([options.publish, options.bump, options.commit, options.all])
|
|
3486
|
-
):
|
|
3487
|
-
return
|
|
3488
|
-
needs_comprehensive = any(
|
|
3489
|
-
[options.publish, options.bump, options.commit, options.all]
|
|
3490
|
-
)
|
|
3491
|
-
if not needs_comprehensive:
|
|
3492
|
-
return
|
|
3493
|
-
self.console.print("\n" + "-" * 80)
|
|
3494
|
-
self.console.print(
|
|
3495
|
-
"[bold bright_magenta]🔍 COMPREHENSIVE QUALITY[/bold bright_magenta] [bold bright_white]Running all quality checks before publish/commit[/bold bright_white]"
|
|
3496
|
-
)
|
|
3497
|
-
self.console.print("-" * 80 + "\n")
|
|
3498
|
-
config_path = self.project_manager._select_precommit_config()
|
|
3499
|
-
cmd = [
|
|
3500
|
-
"uv",
|
|
3501
|
-
"run",
|
|
3502
|
-
"pre-commit",
|
|
3503
|
-
"run",
|
|
3504
|
-
"--all-files",
|
|
3505
|
-
"--hook-stage=manual",
|
|
3506
|
-
"-c",
|
|
3507
|
-
config_path,
|
|
3508
|
-
]
|
|
3509
|
-
result = self.execute_command(cmd)
|
|
3510
|
-
if result.returncode > 0:
|
|
3511
|
-
self.console.print(
|
|
3512
|
-
"\n[bold bright_red]❌ Comprehensive quality checks failed![/bold bright_red]"
|
|
3513
|
-
)
|
|
3514
|
-
self.console.print(
|
|
3515
|
-
"\n[bold red]Cannot proceed with publishing/committing until all quality checks pass.[/bold red]\n"
|
|
3516
|
-
)
|
|
3517
|
-
raise SystemExit(1)
|
|
3518
|
-
else:
|
|
3519
|
-
self.console.print(
|
|
3520
|
-
"\n[bold bright_green]🏆 All comprehensive quality checks passed![/bold bright_green]"
|
|
3521
|
-
)
|
|
3522
|
-
|
|
3523
|
-
async def _run_comprehensive_quality_checks_async(
|
|
3524
|
-
self, options: OptionsProtocol
|
|
3525
|
-
) -> None:
|
|
3526
|
-
if options.skip_hooks or (
|
|
3527
|
-
options.test
|
|
3528
|
-
and not any([options.publish, options.bump, options.commit, options.all])
|
|
3529
|
-
):
|
|
3530
|
-
return
|
|
3531
|
-
|
|
3532
|
-
needs_comprehensive = any(
|
|
3533
|
-
[options.publish, options.bump, options.commit, options.all]
|
|
3534
|
-
)
|
|
3535
|
-
|
|
3536
|
-
if not needs_comprehensive:
|
|
3537
|
-
return
|
|
3538
|
-
|
|
3539
|
-
self.console.print("\n" + "-" * 80)
|
|
3540
|
-
self.console.print(
|
|
3541
|
-
"[bold bright_magenta]🔍 COMPREHENSIVE QUALITY[/bold bright_magenta] [bold bright_white]Running all quality checks before publish/commit[/bold bright_white]"
|
|
3542
|
-
)
|
|
3543
|
-
self.console.print("-" * 80 + "\n")
|
|
3544
|
-
|
|
3545
|
-
config_path = self.project_manager._select_precommit_config()
|
|
3546
|
-
cmd = [
|
|
3547
|
-
"uv",
|
|
3548
|
-
"run",
|
|
3549
|
-
"pre-commit",
|
|
3550
|
-
"run",
|
|
3551
|
-
"--all-files",
|
|
3552
|
-
"--hook-stage=manual",
|
|
3553
|
-
"-c",
|
|
3554
|
-
config_path,
|
|
3555
|
-
]
|
|
3556
|
-
|
|
3557
|
-
result = await self.execute_command_async(cmd)
|
|
3558
|
-
|
|
3559
|
-
if result.returncode > 0:
|
|
3560
|
-
self.console.print(
|
|
3561
|
-
"\n[bold bright_red]❌ Comprehensive quality checks failed![/bold bright_red]"
|
|
3562
|
-
)
|
|
3563
|
-
if result.stderr:
|
|
3564
|
-
self.console.print(f"[dim]Error details: {result.stderr}[/dim]")
|
|
3565
|
-
self.console.print(
|
|
3566
|
-
"\n[bold red]Cannot proceed with publishing/committing until all quality checks pass.[/bold red]\n"
|
|
3567
|
-
)
|
|
3568
|
-
raise SystemExit(1)
|
|
3569
|
-
else:
|
|
3570
|
-
self.console.print(
|
|
3571
|
-
"[bold bright_green]🏆 All comprehensive quality checks passed![/bold bright_green]"
|
|
3572
|
-
)
|
|
3573
|
-
|
|
3574
|
-
def _run_tracked_task(
|
|
3575
|
-
self, task_id: str, task_name: str, task_func: t.Callable[[], None]
|
|
3576
|
-
) -> None:
|
|
3577
|
-
if self.session_tracker:
|
|
3578
|
-
self.session_tracker.start_task(task_id, task_name)
|
|
3579
|
-
try:
|
|
3580
|
-
task_func()
|
|
3581
|
-
if self.session_tracker:
|
|
3582
|
-
self.session_tracker.complete_task(task_id, f"{task_name} completed")
|
|
3583
|
-
except Exception as e:
|
|
3584
|
-
if self.session_tracker:
|
|
3585
|
-
self.session_tracker.fail_task(task_id, str(e))
|
|
3586
|
-
raise
|
|
3587
|
-
|
|
3588
|
-
def _run_pre_commit_task(self, options: OptionsProtocol) -> None:
|
|
3589
|
-
if not options.skip_hooks:
|
|
3590
|
-
if getattr(options, "comprehensive", False):
|
|
3591
|
-
self._run_comprehensive_hooks()
|
|
3592
|
-
elif getattr(options, "ai_agent", False):
|
|
3593
|
-
self.project_manager.run_pre_commit_with_analysis()
|
|
3594
|
-
else:
|
|
3595
|
-
self.project_manager.run_pre_commit()
|
|
3596
|
-
else:
|
|
3597
|
-
self.console.print(
|
|
3598
|
-
"\n[bold bright_yellow]⏭️ Skipping pre-commit hooks...[/bold bright_yellow]\n"
|
|
3599
|
-
)
|
|
3600
|
-
if self.session_tracker:
|
|
3601
|
-
self.session_tracker.skip_task("pre_commit", "Skipped by user request")
|
|
3602
|
-
|
|
3603
|
-
def _run_comprehensive_hooks(self) -> None:
|
|
3604
|
-
self.console.print("\n" + "-" * 80)
|
|
3605
|
-
self.console.print(
|
|
3606
|
-
"[bold bright_cyan]🔍 HOOKS[/bold bright_cyan] [bold bright_white]Running comprehensive quality checks[/bold bright_white]"
|
|
3607
|
-
)
|
|
3608
|
-
self.console.print("-" * 80 + "\n")
|
|
3609
|
-
stages = ["pre-commit", "pre-push", "manual"]
|
|
3610
|
-
config_path = self.project_manager._select_precommit_config()
|
|
3611
|
-
for stage in stages:
|
|
3612
|
-
self.console.print(f"[dim]Running {stage} stage hooks...[/dim]")
|
|
3613
|
-
cmd = [
|
|
3614
|
-
"uv",
|
|
3615
|
-
"run",
|
|
3616
|
-
"pre-commit",
|
|
3617
|
-
"run",
|
|
3618
|
-
"--all-files",
|
|
3619
|
-
f"--hook-stage={stage}",
|
|
3620
|
-
"-c",
|
|
3621
|
-
config_path,
|
|
3622
|
-
]
|
|
3623
|
-
result = self.execute_command(cmd)
|
|
3624
|
-
if result.returncode > 0:
|
|
3625
|
-
self.console.print(
|
|
3626
|
-
f"\n[bold red]❌ {stage} hooks failed. Please fix errors.[/bold red]\n"
|
|
3627
|
-
)
|
|
3628
|
-
raise SystemExit(1)
|
|
3629
|
-
|
|
3630
|
-
def _initialize_session_tracking(self, options: OptionsProtocol) -> None:
|
|
3631
|
-
if options.resume_from:
|
|
3632
|
-
try:
|
|
3633
|
-
progress_file = Path(options.resume_from)
|
|
3634
|
-
self.session_tracker = SessionTracker.resume_session(
|
|
3635
|
-
console=self.console,
|
|
3636
|
-
progress_file=progress_file,
|
|
3637
|
-
)
|
|
3638
|
-
return
|
|
3639
|
-
except Exception as e:
|
|
3640
|
-
self.console.print(
|
|
3641
|
-
f"[yellow]Warning: Failed to resume from {options.resume_from}: {e}[/yellow]"
|
|
3642
|
-
)
|
|
3643
|
-
self.session_tracker = None
|
|
3644
|
-
return
|
|
3645
|
-
if options.track_progress:
|
|
3646
|
-
try:
|
|
3647
|
-
auto_tracker = SessionTracker.auto_detect_session(self.console)
|
|
3648
|
-
if auto_tracker:
|
|
3649
|
-
self.session_tracker = auto_tracker
|
|
3650
|
-
return
|
|
3651
|
-
progress_file = (
|
|
3652
|
-
Path(options.progress_file) if options.progress_file else None
|
|
3653
|
-
)
|
|
3654
|
-
try:
|
|
3655
|
-
from importlib.metadata import version
|
|
3656
|
-
|
|
3657
|
-
crackerjack_version = version("crackerjack")
|
|
3658
|
-
except (ImportError, ModuleNotFoundError):
|
|
3659
|
-
crackerjack_version = "unknown"
|
|
3660
|
-
metadata = {
|
|
3661
|
-
"working_dir": str(self.pkg_path),
|
|
3662
|
-
"python_version": self.python_version,
|
|
3663
|
-
"crackerjack_version": crackerjack_version,
|
|
3664
|
-
"cli_options": str(options),
|
|
3665
|
-
}
|
|
3666
|
-
self.session_tracker = SessionTracker.create_session(
|
|
3667
|
-
console=self.console,
|
|
3668
|
-
progress_file=progress_file,
|
|
3669
|
-
metadata=metadata,
|
|
3670
|
-
)
|
|
3671
|
-
except Exception as e:
|
|
3672
|
-
self.console.print(
|
|
3673
|
-
f"[yellow]Warning: Failed to initialize session tracking: {e}[/yellow]"
|
|
3674
|
-
)
|
|
3675
|
-
self.session_tracker = None
|
|
3676
|
-
|
|
3677
|
-
def process(self, options: OptionsProtocol) -> None:
|
|
3678
|
-
assert self.project_manager is not None
|
|
3679
|
-
self._initialize_session_tracking(options)
|
|
3680
|
-
self.console.print("\n" + "-" * 80)
|
|
3681
|
-
self.console.print(
|
|
3682
|
-
"[bold bright_cyan]⚒️ CRACKERJACKING[/bold bright_cyan] [bold bright_white]Starting workflow execution[/bold bright_white]"
|
|
3683
|
-
)
|
|
3684
|
-
self.console.print("-" * 80 + "\n")
|
|
3685
|
-
if options.all:
|
|
3686
|
-
options.clean = True
|
|
3687
|
-
options.test = True
|
|
3688
|
-
options.publish = options.all
|
|
3689
|
-
options.commit = True
|
|
3690
|
-
if options.comprehensive:
|
|
3691
|
-
options.test = True
|
|
3692
|
-
self._run_tracked_task(
|
|
3693
|
-
"setup", "Initialize project structure", self._setup_package
|
|
3694
|
-
)
|
|
3695
|
-
self._run_tracked_task(
|
|
3696
|
-
"update_project",
|
|
3697
|
-
"Update project configuration",
|
|
3698
|
-
lambda: self._update_project(options),
|
|
3699
|
-
)
|
|
3700
|
-
self._run_tracked_task(
|
|
3701
|
-
"update_precommit",
|
|
3702
|
-
"Update pre-commit hooks",
|
|
3703
|
-
lambda: self._update_precommit(options),
|
|
3704
|
-
)
|
|
3705
|
-
self._run_tracked_task(
|
|
3706
|
-
"update_docs",
|
|
3707
|
-
"Update documentation templates",
|
|
3708
|
-
lambda: self._update_docs(options),
|
|
3709
|
-
)
|
|
3710
|
-
self._run_tracked_task(
|
|
3711
|
-
"clean_project", "Clean project code", lambda: self._clean_project(options)
|
|
3712
|
-
)
|
|
3713
|
-
if self.project_manager is not None:
|
|
3714
|
-
self.project_manager.options = options
|
|
3715
|
-
if not options.skip_hooks:
|
|
3716
|
-
self._run_tracked_task(
|
|
3717
|
-
"pre_commit",
|
|
3718
|
-
"Run pre-commit hooks",
|
|
3719
|
-
lambda: self._run_pre_commit_task(options),
|
|
3720
|
-
)
|
|
3721
|
-
else:
|
|
3722
|
-
self._run_pre_commit_task(options)
|
|
3723
|
-
self._run_tracked_task(
|
|
3724
|
-
"run_tests", "Execute test suite", lambda: self._run_tests(options)
|
|
3725
|
-
)
|
|
3726
|
-
self._run_tracked_task(
|
|
3727
|
-
"quality_checks",
|
|
3728
|
-
"Run comprehensive quality checks",
|
|
3729
|
-
lambda: self._run_comprehensive_quality_checks(options),
|
|
3730
|
-
)
|
|
3731
|
-
self._run_tracked_task(
|
|
3732
|
-
"bump_version", "Bump version numbers", lambda: self._bump_version(options)
|
|
3733
|
-
)
|
|
3734
|
-
self._run_tracked_task(
|
|
3735
|
-
"commit_push",
|
|
3736
|
-
"Commit and push changes",
|
|
3737
|
-
lambda: self._commit_and_push(options),
|
|
3738
|
-
)
|
|
3739
|
-
self._run_tracked_task(
|
|
3740
|
-
"publish", "Publish project", lambda: self._publish_project(options)
|
|
3741
|
-
)
|
|
3742
|
-
self.console.print("\n" + "-" * 80)
|
|
3743
|
-
self.console.print(
|
|
3744
|
-
"[bold bright_green]🏆 CRACKERJACK COMPLETE[/bold bright_green] [bold bright_white]Workflow completed successfully![/bold bright_white]"
|
|
3745
|
-
)
|
|
3746
|
-
self.console.print("-" * 80 + "\n")
|
|
3747
|
-
|
|
3748
|
-
async def process_async(self, options: OptionsProtocol) -> None:
|
|
3749
|
-
assert self.project_manager is not None
|
|
3750
|
-
self.console.print("\n" + "-" * 80)
|
|
3751
|
-
self.console.print(
|
|
3752
|
-
"[bold bright_cyan]⚒️ CRACKERJACKING[/bold bright_cyan] [bold bright_white]Starting workflow execution (async optimized)[/bold bright_white]"
|
|
3753
|
-
)
|
|
3754
|
-
self.console.print("-" * 80 + "\n")
|
|
3755
|
-
if options.all:
|
|
3756
|
-
options.clean = True
|
|
3757
|
-
options.test = True
|
|
3758
|
-
options.publish = options.all
|
|
3759
|
-
options.commit = True
|
|
3760
|
-
if options.comprehensive:
|
|
3761
|
-
options.test = True
|
|
3762
|
-
self._setup_package()
|
|
3763
|
-
self._update_project(options)
|
|
3764
|
-
self._update_precommit(options)
|
|
3765
|
-
await self._clean_project_async(options)
|
|
3766
|
-
if self.project_manager is not None:
|
|
3767
|
-
self.project_manager.options = options
|
|
3768
|
-
if not options.skip_hooks:
|
|
3769
|
-
if getattr(options, "ai_agent", False):
|
|
3770
|
-
await self.project_manager.run_pre_commit_with_analysis_async()
|
|
3771
|
-
else:
|
|
3772
|
-
await self.project_manager.run_pre_commit_async()
|
|
3773
|
-
else:
|
|
3774
|
-
self.console.print(
|
|
3775
|
-
"\n[bold bright_yellow]⏭️ Skipping pre-commit hooks...[/bold bright_yellow]\n"
|
|
3776
|
-
)
|
|
3777
|
-
await self._run_tests_async(options)
|
|
3778
|
-
await self._run_comprehensive_quality_checks_async(options)
|
|
3779
|
-
self._bump_version(options)
|
|
3780
|
-
self._commit_and_push(options)
|
|
3781
|
-
self._publish_project(options)
|
|
3782
|
-
self.console.print("\n" + "-" * 80)
|
|
3783
|
-
self.console.print(
|
|
3784
|
-
"[bold bright_green]🏆 CRACKERJACK COMPLETE[/bold bright_green] [bold bright_white]Workflow completed successfully![/bold bright_white]"
|
|
3785
|
-
)
|
|
3786
|
-
self.console.print("-" * 80 + "\n")
|
|
3787
|
-
|
|
3788
|
-
|
|
3789
|
-
crackerjack_it = Crackerjack().process
|
|
3790
|
-
|
|
3791
|
-
|
|
3792
|
-
def create_crackerjack_runner(
|
|
3793
|
-
console: Console | None = None,
|
|
3794
|
-
our_path: Path | None = None,
|
|
3795
|
-
pkg_path: Path | None = None,
|
|
3796
|
-
python_version: str = default_python_version,
|
|
3797
|
-
dry_run: bool = False,
|
|
3798
|
-
) -> Crackerjack:
|
|
3799
|
-
return Crackerjack(
|
|
3800
|
-
console=console or Console(force_terminal=True),
|
|
3801
|
-
our_path=our_path or Path(__file__).parent,
|
|
3802
|
-
pkg_path=pkg_path or Path.cwd(),
|
|
3803
|
-
python_version=python_version,
|
|
3804
|
-
dry_run=dry_run,
|
|
3805
|
-
)
|