crackerjack 0.30.3__py3-none-any.whl → 0.31.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +1005 -0
- crackerjack/RULES.md +380 -0
- crackerjack/__init__.py +42 -13
- crackerjack/__main__.py +225 -299
- crackerjack/agents/__init__.py +41 -0
- crackerjack/agents/architect_agent.py +281 -0
- crackerjack/agents/base.py +169 -0
- crackerjack/agents/coordinator.py +512 -0
- crackerjack/agents/documentation_agent.py +498 -0
- crackerjack/agents/dry_agent.py +388 -0
- crackerjack/agents/formatting_agent.py +245 -0
- crackerjack/agents/import_optimization_agent.py +281 -0
- crackerjack/agents/performance_agent.py +669 -0
- crackerjack/agents/proactive_agent.py +104 -0
- crackerjack/agents/refactoring_agent.py +788 -0
- crackerjack/agents/security_agent.py +529 -0
- crackerjack/agents/test_creation_agent.py +652 -0
- crackerjack/agents/test_specialist_agent.py +486 -0
- crackerjack/agents/tracker.py +212 -0
- crackerjack/api.py +560 -0
- crackerjack/cli/__init__.py +24 -0
- crackerjack/cli/facade.py +104 -0
- crackerjack/cli/handlers.py +267 -0
- crackerjack/cli/interactive.py +471 -0
- crackerjack/cli/options.py +401 -0
- crackerjack/cli/utils.py +18 -0
- crackerjack/code_cleaner.py +618 -928
- crackerjack/config/__init__.py +19 -0
- crackerjack/config/hooks.py +218 -0
- crackerjack/core/__init__.py +0 -0
- crackerjack/core/async_workflow_orchestrator.py +406 -0
- crackerjack/core/autofix_coordinator.py +200 -0
- crackerjack/core/container.py +104 -0
- crackerjack/core/enhanced_container.py +542 -0
- crackerjack/core/performance.py +243 -0
- crackerjack/core/phase_coordinator.py +561 -0
- crackerjack/core/proactive_workflow.py +316 -0
- crackerjack/core/session_coordinator.py +289 -0
- crackerjack/core/workflow_orchestrator.py +640 -0
- crackerjack/dynamic_config.py +94 -103
- crackerjack/errors.py +263 -41
- crackerjack/executors/__init__.py +11 -0
- crackerjack/executors/async_hook_executor.py +431 -0
- crackerjack/executors/cached_hook_executor.py +242 -0
- crackerjack/executors/hook_executor.py +345 -0
- crackerjack/executors/individual_hook_executor.py +669 -0
- crackerjack/intelligence/__init__.py +44 -0
- crackerjack/intelligence/adaptive_learning.py +751 -0
- crackerjack/intelligence/agent_orchestrator.py +551 -0
- crackerjack/intelligence/agent_registry.py +414 -0
- crackerjack/intelligence/agent_selector.py +502 -0
- crackerjack/intelligence/integration.py +290 -0
- crackerjack/interactive.py +576 -315
- crackerjack/managers/__init__.py +11 -0
- crackerjack/managers/async_hook_manager.py +135 -0
- crackerjack/managers/hook_manager.py +137 -0
- crackerjack/managers/publish_manager.py +411 -0
- crackerjack/managers/test_command_builder.py +151 -0
- crackerjack/managers/test_executor.py +435 -0
- crackerjack/managers/test_manager.py +258 -0
- crackerjack/managers/test_manager_backup.py +1124 -0
- crackerjack/managers/test_progress.py +144 -0
- crackerjack/mcp/__init__.py +0 -0
- crackerjack/mcp/cache.py +336 -0
- crackerjack/mcp/client_runner.py +104 -0
- crackerjack/mcp/context.py +615 -0
- crackerjack/mcp/dashboard.py +636 -0
- crackerjack/mcp/enhanced_progress_monitor.py +479 -0
- crackerjack/mcp/file_monitor.py +336 -0
- crackerjack/mcp/progress_components.py +569 -0
- crackerjack/mcp/progress_monitor.py +949 -0
- crackerjack/mcp/rate_limiter.py +332 -0
- crackerjack/mcp/server.py +22 -0
- crackerjack/mcp/server_core.py +244 -0
- crackerjack/mcp/service_watchdog.py +501 -0
- crackerjack/mcp/state.py +395 -0
- crackerjack/mcp/task_manager.py +257 -0
- crackerjack/mcp/tools/__init__.py +17 -0
- crackerjack/mcp/tools/core_tools.py +249 -0
- crackerjack/mcp/tools/error_analyzer.py +308 -0
- crackerjack/mcp/tools/execution_tools.py +370 -0
- crackerjack/mcp/tools/execution_tools_backup.py +1097 -0
- crackerjack/mcp/tools/intelligence_tool_registry.py +80 -0
- crackerjack/mcp/tools/intelligence_tools.py +314 -0
- crackerjack/mcp/tools/monitoring_tools.py +502 -0
- crackerjack/mcp/tools/proactive_tools.py +384 -0
- crackerjack/mcp/tools/progress_tools.py +141 -0
- crackerjack/mcp/tools/utility_tools.py +341 -0
- crackerjack/mcp/tools/workflow_executor.py +360 -0
- crackerjack/mcp/websocket/__init__.py +14 -0
- crackerjack/mcp/websocket/app.py +39 -0
- crackerjack/mcp/websocket/endpoints.py +559 -0
- crackerjack/mcp/websocket/jobs.py +253 -0
- crackerjack/mcp/websocket/server.py +116 -0
- crackerjack/mcp/websocket/websocket_handler.py +78 -0
- crackerjack/mcp/websocket_server.py +10 -0
- crackerjack/models/__init__.py +31 -0
- crackerjack/models/config.py +93 -0
- crackerjack/models/config_adapter.py +230 -0
- crackerjack/models/protocols.py +118 -0
- crackerjack/models/task.py +154 -0
- crackerjack/monitoring/ai_agent_watchdog.py +450 -0
- crackerjack/monitoring/regression_prevention.py +638 -0
- crackerjack/orchestration/__init__.py +0 -0
- crackerjack/orchestration/advanced_orchestrator.py +970 -0
- crackerjack/orchestration/execution_strategies.py +341 -0
- crackerjack/orchestration/test_progress_streamer.py +636 -0
- crackerjack/plugins/__init__.py +15 -0
- crackerjack/plugins/base.py +200 -0
- crackerjack/plugins/hooks.py +246 -0
- crackerjack/plugins/loader.py +335 -0
- crackerjack/plugins/managers.py +259 -0
- crackerjack/py313.py +8 -3
- crackerjack/services/__init__.py +22 -0
- crackerjack/services/cache.py +314 -0
- crackerjack/services/config.py +347 -0
- crackerjack/services/config_integrity.py +99 -0
- crackerjack/services/contextual_ai_assistant.py +516 -0
- crackerjack/services/coverage_ratchet.py +347 -0
- crackerjack/services/debug.py +736 -0
- crackerjack/services/dependency_monitor.py +617 -0
- crackerjack/services/enhanced_filesystem.py +439 -0
- crackerjack/services/file_hasher.py +151 -0
- crackerjack/services/filesystem.py +395 -0
- crackerjack/services/git.py +165 -0
- crackerjack/services/health_metrics.py +611 -0
- crackerjack/services/initialization.py +847 -0
- crackerjack/services/log_manager.py +286 -0
- crackerjack/services/logging.py +174 -0
- crackerjack/services/metrics.py +578 -0
- crackerjack/services/pattern_cache.py +362 -0
- crackerjack/services/pattern_detector.py +515 -0
- crackerjack/services/performance_benchmarks.py +653 -0
- crackerjack/services/security.py +163 -0
- crackerjack/services/server_manager.py +234 -0
- crackerjack/services/smart_scheduling.py +144 -0
- crackerjack/services/tool_version_service.py +61 -0
- crackerjack/services/unified_config.py +437 -0
- crackerjack/services/version_checker.py +248 -0
- crackerjack/slash_commands/__init__.py +14 -0
- crackerjack/slash_commands/init.md +122 -0
- crackerjack/slash_commands/run.md +163 -0
- crackerjack/slash_commands/status.md +127 -0
- crackerjack-0.31.4.dist-info/METADATA +742 -0
- crackerjack-0.31.4.dist-info/RECORD +148 -0
- crackerjack-0.31.4.dist-info/entry_points.txt +2 -0
- crackerjack/.gitignore +0 -34
- crackerjack/.libcst.codemod.yaml +0 -18
- crackerjack/.pdm.toml +0 -1
- crackerjack/crackerjack.py +0 -3805
- crackerjack/pyproject.toml +0 -286
- crackerjack-0.30.3.dist-info/METADATA +0 -1290
- crackerjack-0.30.3.dist-info/RECORD +0 -16
- {crackerjack-0.30.3.dist-info → crackerjack-0.31.4.dist-info}/WHEEL +0 -0
- {crackerjack-0.30.3.dist-info → crackerjack-0.31.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,388 @@
|
|
|
1
|
+
import re
|
|
2
|
+
import typing as t
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from .base import (
|
|
6
|
+
FixResult,
|
|
7
|
+
Issue,
|
|
8
|
+
IssueType,
|
|
9
|
+
SubAgent,
|
|
10
|
+
agent_registry,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class DRYAgent(SubAgent):
|
|
15
|
+
"""Agent specialized in detecting and fixing DRY (Don't Repeat Yourself) violations."""
|
|
16
|
+
|
|
17
|
+
def get_supported_types(self) -> set[IssueType]:
|
|
18
|
+
return {IssueType.DRY_VIOLATION}
|
|
19
|
+
|
|
20
|
+
async def can_handle(self, issue: Issue) -> float:
|
|
21
|
+
if issue.type == IssueType.DRY_VIOLATION:
|
|
22
|
+
return 0.9
|
|
23
|
+
return 0.0
|
|
24
|
+
|
|
25
|
+
async def analyze_and_fix(self, issue: Issue) -> FixResult:
|
|
26
|
+
self.log(f"Analyzing DRY violation: {issue.message}")
|
|
27
|
+
|
|
28
|
+
validation_result = self._validate_dry_issue(issue)
|
|
29
|
+
if validation_result:
|
|
30
|
+
return validation_result
|
|
31
|
+
|
|
32
|
+
if issue.file_path is None:
|
|
33
|
+
return self._create_dry_error_result(
|
|
34
|
+
ValueError("File path is required for DRY violation"),
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
file_path = Path(issue.file_path)
|
|
38
|
+
|
|
39
|
+
try:
|
|
40
|
+
return await self._process_dry_violation(file_path)
|
|
41
|
+
except Exception as e:
|
|
42
|
+
return self._create_dry_error_result(e)
|
|
43
|
+
|
|
44
|
+
def _validate_dry_issue(self, issue: Issue) -> FixResult | None:
|
|
45
|
+
"""Validate the DRY violation issue has required information."""
|
|
46
|
+
if not issue.file_path:
|
|
47
|
+
return FixResult(
|
|
48
|
+
success=False,
|
|
49
|
+
confidence=0.0,
|
|
50
|
+
remaining_issues=["No file path specified for DRY violation"],
|
|
51
|
+
)
|
|
52
|
+
|
|
53
|
+
# At this point, issue.file_path is not None due to the check above
|
|
54
|
+
file_path = Path(issue.file_path)
|
|
55
|
+
if not file_path.exists():
|
|
56
|
+
return FixResult(
|
|
57
|
+
success=False,
|
|
58
|
+
confidence=0.0,
|
|
59
|
+
remaining_issues=[f"File not found: {file_path}"],
|
|
60
|
+
)
|
|
61
|
+
|
|
62
|
+
return None
|
|
63
|
+
|
|
64
|
+
async def _process_dry_violation(self, file_path: Path) -> FixResult:
|
|
65
|
+
"""Process DRY violation detection and fixing for a file."""
|
|
66
|
+
content = self.context.get_file_content(file_path)
|
|
67
|
+
if not content:
|
|
68
|
+
return FixResult(
|
|
69
|
+
success=False,
|
|
70
|
+
confidence=0.0,
|
|
71
|
+
remaining_issues=[f"Could not read file: {file_path}"],
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
violations = self._detect_dry_violations(content, file_path)
|
|
75
|
+
|
|
76
|
+
if not violations:
|
|
77
|
+
return FixResult(
|
|
78
|
+
success=True,
|
|
79
|
+
confidence=0.7,
|
|
80
|
+
recommendations=["No DRY violations detected"],
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
return self._apply_and_save_dry_fixes(file_path, content, violations)
|
|
84
|
+
|
|
85
|
+
def _apply_and_save_dry_fixes(
|
|
86
|
+
self,
|
|
87
|
+
file_path: Path,
|
|
88
|
+
content: str,
|
|
89
|
+
violations: list[dict[str, t.Any]],
|
|
90
|
+
) -> FixResult:
|
|
91
|
+
"""Apply DRY fixes and save changes."""
|
|
92
|
+
fixed_content = self._apply_dry_fixes(content, violations)
|
|
93
|
+
|
|
94
|
+
if fixed_content == content:
|
|
95
|
+
return self._create_no_fixes_result()
|
|
96
|
+
|
|
97
|
+
success = self.context.write_file_content(file_path, fixed_content)
|
|
98
|
+
if not success:
|
|
99
|
+
return FixResult(
|
|
100
|
+
success=False,
|
|
101
|
+
confidence=0.0,
|
|
102
|
+
remaining_issues=[f"Failed to write fixed file: {file_path}"],
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
return FixResult(
|
|
106
|
+
success=True,
|
|
107
|
+
confidence=0.8,
|
|
108
|
+
fixes_applied=[
|
|
109
|
+
f"Fixed {len(violations)} DRY violations",
|
|
110
|
+
"Consolidated repetitive patterns",
|
|
111
|
+
],
|
|
112
|
+
files_modified=[str(file_path)],
|
|
113
|
+
recommendations=["Verify functionality after DRY fixes"],
|
|
114
|
+
)
|
|
115
|
+
|
|
116
|
+
def _create_no_fixes_result(self) -> FixResult:
|
|
117
|
+
"""Create result for when no fixes could be applied."""
|
|
118
|
+
return FixResult(
|
|
119
|
+
success=False,
|
|
120
|
+
confidence=0.5,
|
|
121
|
+
remaining_issues=["Could not automatically fix DRY violations"],
|
|
122
|
+
recommendations=[
|
|
123
|
+
"Manual refactoring required",
|
|
124
|
+
"Consider extracting common patterns to utility functions",
|
|
125
|
+
"Create base classes or mixins for repeated functionality",
|
|
126
|
+
],
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
def _create_dry_error_result(self, error: Exception) -> FixResult:
|
|
130
|
+
"""Create result for DRY processing errors."""
|
|
131
|
+
return FixResult(
|
|
132
|
+
success=False,
|
|
133
|
+
confidence=0.0,
|
|
134
|
+
remaining_issues=[f"Error processing file: {error}"],
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
def _detect_dry_violations(
|
|
138
|
+
self,
|
|
139
|
+
content: str,
|
|
140
|
+
file_path: Path,
|
|
141
|
+
) -> list[dict[str, t.Any]]:
|
|
142
|
+
"""Detect various types of DRY violations in the code."""
|
|
143
|
+
violations: list[dict[str, t.Any]] = []
|
|
144
|
+
|
|
145
|
+
# Detect error response patterns
|
|
146
|
+
violations.extend(self._detect_error_response_patterns(content))
|
|
147
|
+
|
|
148
|
+
# Detect path conversion patterns
|
|
149
|
+
violations.extend(self._detect_path_conversion_patterns(content))
|
|
150
|
+
|
|
151
|
+
# Detect file existence patterns
|
|
152
|
+
violations.extend(self._detect_file_existence_patterns(content))
|
|
153
|
+
|
|
154
|
+
# Detect exception handling patterns
|
|
155
|
+
violations.extend(self._detect_exception_patterns(content))
|
|
156
|
+
|
|
157
|
+
return violations
|
|
158
|
+
|
|
159
|
+
def _detect_error_response_patterns(self, content: str) -> list[dict[str, t.Any]]:
|
|
160
|
+
"""Detect repetitive error response patterns."""
|
|
161
|
+
violations: list[dict[str, t.Any]] = []
|
|
162
|
+
lines = content.split("\n")
|
|
163
|
+
|
|
164
|
+
# Pattern: return f'{"error": "message", "success": false}'
|
|
165
|
+
error_pattern = re.compile(
|
|
166
|
+
r'return\s+f?[\'\"]\{[\'\""]error[\'\""]:\s*[\'\""]([^\'\"]*)[\'\""].*\}[\'\""]',
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
error_responses: list[dict[str, t.Any]] = []
|
|
170
|
+
for i, line in enumerate(lines):
|
|
171
|
+
match = error_pattern.search(line.strip())
|
|
172
|
+
if match:
|
|
173
|
+
error_responses.append(
|
|
174
|
+
{
|
|
175
|
+
"line_number": i + 1,
|
|
176
|
+
"content": line.strip(),
|
|
177
|
+
"error_message": match.group(1),
|
|
178
|
+
},
|
|
179
|
+
)
|
|
180
|
+
|
|
181
|
+
if len(error_responses) >= 3: # Only flag if 3+ similar patterns
|
|
182
|
+
violations.append(
|
|
183
|
+
{
|
|
184
|
+
"type": "error_response_pattern",
|
|
185
|
+
"instances": error_responses,
|
|
186
|
+
"suggestion": "Extract to error utility function",
|
|
187
|
+
},
|
|
188
|
+
)
|
|
189
|
+
|
|
190
|
+
return violations
|
|
191
|
+
|
|
192
|
+
def _detect_path_conversion_patterns(self, content: str) -> list[dict[str, t.Any]]:
|
|
193
|
+
"""Detect repetitive path conversion patterns."""
|
|
194
|
+
violations: list[dict[str, t.Any]] = []
|
|
195
|
+
lines = content.split("\n")
|
|
196
|
+
|
|
197
|
+
# Pattern: Path(path) if isinstance(path, str) else path
|
|
198
|
+
path_pattern = re.compile(
|
|
199
|
+
r"Path\([^)]+\)\s+if\s+isinstance\([^)]+,\s*str\)\s+else\s+[^)]+",
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
path_conversions: list[dict[str, t.Any]] = [
|
|
203
|
+
{
|
|
204
|
+
"line_number": i + 1,
|
|
205
|
+
"content": line.strip(),
|
|
206
|
+
}
|
|
207
|
+
for i, line in enumerate(lines)
|
|
208
|
+
if path_pattern.search(line)
|
|
209
|
+
]
|
|
210
|
+
|
|
211
|
+
if len(path_conversions) >= 2:
|
|
212
|
+
violations.append(
|
|
213
|
+
{
|
|
214
|
+
"type": "path_conversion_pattern",
|
|
215
|
+
"instances": path_conversions,
|
|
216
|
+
"suggestion": "Extract to path utility function",
|
|
217
|
+
},
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
return violations
|
|
221
|
+
|
|
222
|
+
def _detect_file_existence_patterns(self, content: str) -> list[dict[str, t.Any]]:
|
|
223
|
+
"""Detect repetitive file existence check patterns."""
|
|
224
|
+
violations: list[dict[str, t.Any]] = []
|
|
225
|
+
lines = content.split("\n")
|
|
226
|
+
|
|
227
|
+
# Pattern: if not *.exists():
|
|
228
|
+
existence_pattern = re.compile(r"if\s+not\s+\w+\.exists\(\):")
|
|
229
|
+
|
|
230
|
+
existence_checks: list[dict[str, t.Any]] = [
|
|
231
|
+
{
|
|
232
|
+
"line_number": i + 1,
|
|
233
|
+
"content": line.strip(),
|
|
234
|
+
}
|
|
235
|
+
for i, line in enumerate(lines)
|
|
236
|
+
if existence_pattern.search(line.strip())
|
|
237
|
+
]
|
|
238
|
+
|
|
239
|
+
if len(existence_checks) >= 3:
|
|
240
|
+
violations.append(
|
|
241
|
+
{
|
|
242
|
+
"type": "file_existence_pattern",
|
|
243
|
+
"instances": existence_checks,
|
|
244
|
+
"suggestion": "Extract to file validation utility",
|
|
245
|
+
},
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
return violations
|
|
249
|
+
|
|
250
|
+
def _detect_exception_patterns(self, content: str) -> list[dict[str, t.Any]]:
|
|
251
|
+
"""Detect repetitive exception handling patterns."""
|
|
252
|
+
violations: list[dict[str, t.Any]] = []
|
|
253
|
+
lines = content.split("\n")
|
|
254
|
+
|
|
255
|
+
# Pattern: except Exception as e: return {"error": str(e)}
|
|
256
|
+
exception_pattern = re.compile(r"except\s+\w*Exception\s+as\s+\w+:")
|
|
257
|
+
|
|
258
|
+
exception_handlers: list[dict[str, t.Any]] = []
|
|
259
|
+
for i, line in enumerate(lines):
|
|
260
|
+
if exception_pattern.search(line.strip()):
|
|
261
|
+
# Look ahead for error return pattern
|
|
262
|
+
if (
|
|
263
|
+
i + 1 < len(lines)
|
|
264
|
+
and "error" in lines[i + 1]
|
|
265
|
+
and "str(" in lines[i + 1]
|
|
266
|
+
):
|
|
267
|
+
exception_handlers.append(
|
|
268
|
+
{
|
|
269
|
+
"line_number": i + 1,
|
|
270
|
+
"content": line.strip(),
|
|
271
|
+
"next_line": lines[i + 1].strip(),
|
|
272
|
+
},
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
if len(exception_handlers) >= 3:
|
|
276
|
+
violations.append(
|
|
277
|
+
{
|
|
278
|
+
"type": "exception_handling_pattern",
|
|
279
|
+
"instances": exception_handlers,
|
|
280
|
+
"suggestion": "Extract to error handling utility or decorator",
|
|
281
|
+
},
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
return violations
|
|
285
|
+
|
|
286
|
+
def _apply_dry_fixes(self, content: str, violations: list[dict[str, t.Any]]) -> str:
|
|
287
|
+
"""Apply fixes for detected DRY violations."""
|
|
288
|
+
lines = content.split("\n")
|
|
289
|
+
modified = False
|
|
290
|
+
|
|
291
|
+
for violation in violations:
|
|
292
|
+
if violation["type"] == "error_response_pattern":
|
|
293
|
+
lines, changed = self._fix_error_response_pattern(lines, violation)
|
|
294
|
+
modified = modified or changed
|
|
295
|
+
elif violation["type"] == "path_conversion_pattern":
|
|
296
|
+
lines, changed = self._fix_path_conversion_pattern(lines, violation)
|
|
297
|
+
modified = modified or changed
|
|
298
|
+
|
|
299
|
+
return "\n".join(lines) if modified else content
|
|
300
|
+
|
|
301
|
+
def _fix_error_response_pattern(
|
|
302
|
+
self,
|
|
303
|
+
lines: list[str],
|
|
304
|
+
violation: dict[str, t.Any],
|
|
305
|
+
) -> tuple[list[str], bool]:
|
|
306
|
+
"""Fix error response patterns by adding utility function."""
|
|
307
|
+
# Add utility function at the top of the file (after imports)
|
|
308
|
+
utility_function = '''
|
|
309
|
+
def _create_error_response(message: str, success: bool = False) -> str:
|
|
310
|
+
"""Utility function to create standardized error responses."""
|
|
311
|
+
import json
|
|
312
|
+
return json.dumps({"error": message, "success": success})
|
|
313
|
+
'''
|
|
314
|
+
|
|
315
|
+
# Find the right place to insert (after imports)
|
|
316
|
+
insert_pos = 0
|
|
317
|
+
for i, line in enumerate(lines):
|
|
318
|
+
if line.strip().startswith(("import ", "from ")):
|
|
319
|
+
insert_pos = i + 1
|
|
320
|
+
elif line.strip() and not line.strip().startswith("#"):
|
|
321
|
+
break
|
|
322
|
+
|
|
323
|
+
# Insert utility function
|
|
324
|
+
utility_lines = utility_function.strip().split("\n")
|
|
325
|
+
for i, util_line in enumerate(utility_lines):
|
|
326
|
+
lines.insert(insert_pos + i, util_line)
|
|
327
|
+
|
|
328
|
+
# Replace error response patterns
|
|
329
|
+
for instance in violation["instances"]:
|
|
330
|
+
line_number: int = int(instance["line_number"])
|
|
331
|
+
line_idx = line_number - 1 + len(utility_lines) # Adjust for inserted lines
|
|
332
|
+
if line_idx < len(lines):
|
|
333
|
+
original_line: str = lines[line_idx]
|
|
334
|
+
# Extract the error message
|
|
335
|
+
error_msg: str = str(instance["error_message"])
|
|
336
|
+
# Replace with utility function call
|
|
337
|
+
indent = len(original_line) - len(original_line.lstrip())
|
|
338
|
+
new_line = (
|
|
339
|
+
" " * indent + f'return _create_error_response("{error_msg}")'
|
|
340
|
+
)
|
|
341
|
+
lines[line_idx] = new_line
|
|
342
|
+
|
|
343
|
+
return lines, True
|
|
344
|
+
|
|
345
|
+
def _fix_path_conversion_pattern(
|
|
346
|
+
self,
|
|
347
|
+
lines: list[str],
|
|
348
|
+
violation: dict[str, t.Any],
|
|
349
|
+
) -> tuple[list[str], bool]:
|
|
350
|
+
"""Fix path conversion patterns by adding utility function."""
|
|
351
|
+
# Add utility function
|
|
352
|
+
utility_function = '''
|
|
353
|
+
def _ensure_path(path: str | Path) -> Path:
|
|
354
|
+
"""Utility function to ensure a path is a Path object."""
|
|
355
|
+
return Path(path) if isinstance(path, str) else path
|
|
356
|
+
'''
|
|
357
|
+
|
|
358
|
+
# Find insertion point (after imports)
|
|
359
|
+
insert_pos = 0
|
|
360
|
+
for i, line in enumerate(lines):
|
|
361
|
+
if line.strip().startswith(("import ", "from ")):
|
|
362
|
+
insert_pos = i + 1
|
|
363
|
+
elif line.strip() and not line.strip().startswith("#"):
|
|
364
|
+
break
|
|
365
|
+
|
|
366
|
+
# Insert utility function
|
|
367
|
+
utility_lines = utility_function.strip().split("\n")
|
|
368
|
+
for i, util_line in enumerate(utility_lines):
|
|
369
|
+
lines.insert(insert_pos + i, util_line)
|
|
370
|
+
|
|
371
|
+
# Replace path conversion patterns
|
|
372
|
+
path_pattern = re.compile(
|
|
373
|
+
r"Path\([^)]+\)\s+if\s+isinstance\([^)]+,\s*str\)\s+else\s+([^)]+)",
|
|
374
|
+
)
|
|
375
|
+
|
|
376
|
+
for instance in violation["instances"]:
|
|
377
|
+
line_number: int = int(instance["line_number"])
|
|
378
|
+
line_idx = line_number - 1 + len(utility_lines)
|
|
379
|
+
if line_idx < len(lines):
|
|
380
|
+
original_line: str = lines[line_idx]
|
|
381
|
+
# Replace pattern with utility function call
|
|
382
|
+
new_line: str = path_pattern.sub(r"_ensure_path(\1)", original_line)
|
|
383
|
+
lines[line_idx] = new_line
|
|
384
|
+
|
|
385
|
+
return lines, True
|
|
386
|
+
|
|
387
|
+
|
|
388
|
+
agent_registry.register(DRYAgent)
|
|
@@ -0,0 +1,245 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
|
|
4
|
+
from .base import (
|
|
5
|
+
AgentContext,
|
|
6
|
+
FixResult,
|
|
7
|
+
Issue,
|
|
8
|
+
IssueType,
|
|
9
|
+
SubAgent,
|
|
10
|
+
agent_registry,
|
|
11
|
+
)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class FormattingAgent(SubAgent):
|
|
15
|
+
def __init__(self, context: AgentContext) -> None:
|
|
16
|
+
super().__init__(context)
|
|
17
|
+
self.supported_tools = [
|
|
18
|
+
"ruff",
|
|
19
|
+
"trailing-whitespace",
|
|
20
|
+
"end-of-file-fixer",
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
def get_supported_types(self) -> set[IssueType]:
|
|
24
|
+
return {IssueType.FORMATTING, IssueType.IMPORT_ERROR}
|
|
25
|
+
|
|
26
|
+
async def can_handle(self, issue: Issue) -> float:
|
|
27
|
+
if issue.type not in self.get_supported_types():
|
|
28
|
+
return 0.0
|
|
29
|
+
|
|
30
|
+
message_lower = issue.message.lower()
|
|
31
|
+
|
|
32
|
+
if any(
|
|
33
|
+
keyword in message_lower
|
|
34
|
+
for keyword in (
|
|
35
|
+
"would reformat",
|
|
36
|
+
"trailing whitespace",
|
|
37
|
+
"missing newline",
|
|
38
|
+
"import sorting",
|
|
39
|
+
"unused import",
|
|
40
|
+
"ruff",
|
|
41
|
+
"format",
|
|
42
|
+
)
|
|
43
|
+
):
|
|
44
|
+
return 1.0
|
|
45
|
+
|
|
46
|
+
if any(
|
|
47
|
+
keyword in message_lower
|
|
48
|
+
for keyword in (
|
|
49
|
+
"whitespace",
|
|
50
|
+
"indent",
|
|
51
|
+
"spacing",
|
|
52
|
+
"line length",
|
|
53
|
+
"import",
|
|
54
|
+
"style",
|
|
55
|
+
"format",
|
|
56
|
+
)
|
|
57
|
+
):
|
|
58
|
+
return 0.8
|
|
59
|
+
|
|
60
|
+
if issue.type == IssueType.FORMATTING:
|
|
61
|
+
return 0.6
|
|
62
|
+
|
|
63
|
+
return 0.0
|
|
64
|
+
|
|
65
|
+
async def analyze_and_fix(self, issue: Issue) -> FixResult:
|
|
66
|
+
self.log(f"Analyzing formatting issue: {issue.message}")
|
|
67
|
+
|
|
68
|
+
fixes_applied: list[str] = []
|
|
69
|
+
files_modified: list[str] = []
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
ruff_fixes = await self._apply_ruff_fixes()
|
|
73
|
+
fixes_applied.extend(ruff_fixes)
|
|
74
|
+
|
|
75
|
+
whitespace_fixes = await self._apply_whitespace_fixes()
|
|
76
|
+
fixes_applied.extend(whitespace_fixes)
|
|
77
|
+
|
|
78
|
+
import_fixes = await self._apply_import_fixes()
|
|
79
|
+
fixes_applied.extend(import_fixes)
|
|
80
|
+
|
|
81
|
+
if issue.file_path:
|
|
82
|
+
file_fixes = await self._fix_specific_file(issue.file_path, issue)
|
|
83
|
+
fixes_applied.extend(file_fixes)
|
|
84
|
+
if file_fixes:
|
|
85
|
+
files_modified.append(issue.file_path)
|
|
86
|
+
|
|
87
|
+
success = len(fixes_applied) > 0
|
|
88
|
+
confidence = 0.9 if success else 0.3
|
|
89
|
+
|
|
90
|
+
return FixResult(
|
|
91
|
+
success=success,
|
|
92
|
+
confidence=confidence,
|
|
93
|
+
fixes_applied=fixes_applied,
|
|
94
|
+
files_modified=files_modified,
|
|
95
|
+
recommendations=[
|
|
96
|
+
"Run ruff format regularly for consistent styling",
|
|
97
|
+
"Configure pre-commit hooks for automatic formatting",
|
|
98
|
+
]
|
|
99
|
+
if not success
|
|
100
|
+
else [],
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
except Exception as e:
|
|
104
|
+
self.log(f"Error fixing formatting issue: {e}", "ERROR")
|
|
105
|
+
return FixResult(
|
|
106
|
+
success=False,
|
|
107
|
+
confidence=0.0,
|
|
108
|
+
remaining_issues=[f"Failed to apply formatting fixes: {e}"],
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
async def _apply_ruff_fixes(self) -> list[str]:
|
|
112
|
+
fixes: list[str] = []
|
|
113
|
+
|
|
114
|
+
returncode, _, stderr = await self.run_command(
|
|
115
|
+
["uv", "run", "ruff", "format", "."],
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
if returncode == 0:
|
|
119
|
+
fixes.append("Applied ruff code formatting")
|
|
120
|
+
self.log("Successfully applied ruff formatting")
|
|
121
|
+
else:
|
|
122
|
+
self.log(f"Ruff format failed: {stderr}", "WARN")
|
|
123
|
+
|
|
124
|
+
returncode, _, stderr = await self.run_command(
|
|
125
|
+
["uv", "run", "ruff", "check", ".", "--fix"],
|
|
126
|
+
)
|
|
127
|
+
|
|
128
|
+
if returncode == 0:
|
|
129
|
+
fixes.append("Applied ruff linting fixes")
|
|
130
|
+
self.log("Successfully applied ruff linting fixes")
|
|
131
|
+
else:
|
|
132
|
+
self.log(f"Ruff check --fix had issues: {stderr}", "WARN")
|
|
133
|
+
|
|
134
|
+
return fixes
|
|
135
|
+
|
|
136
|
+
async def _apply_whitespace_fixes(self) -> list[str]:
|
|
137
|
+
fixes: list[str] = []
|
|
138
|
+
|
|
139
|
+
returncode, _, _ = await self.run_command(
|
|
140
|
+
[
|
|
141
|
+
"uv",
|
|
142
|
+
"run",
|
|
143
|
+
"pre-commit",
|
|
144
|
+
"run",
|
|
145
|
+
"trailing-whitespace",
|
|
146
|
+
"--all-files",
|
|
147
|
+
],
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
if returncode == 0:
|
|
151
|
+
fixes.append("Fixed trailing whitespace")
|
|
152
|
+
self.log("Fixed trailing whitespace")
|
|
153
|
+
|
|
154
|
+
returncode, _, _ = await self.run_command(
|
|
155
|
+
[
|
|
156
|
+
"uv",
|
|
157
|
+
"run",
|
|
158
|
+
"pre-commit",
|
|
159
|
+
"run",
|
|
160
|
+
"end-of-file-fixer",
|
|
161
|
+
"--all-files",
|
|
162
|
+
],
|
|
163
|
+
)
|
|
164
|
+
|
|
165
|
+
if returncode == 0:
|
|
166
|
+
fixes.append("Fixed end-of-file formatting")
|
|
167
|
+
self.log("Fixed end-of-file formatting")
|
|
168
|
+
|
|
169
|
+
return fixes
|
|
170
|
+
|
|
171
|
+
async def _apply_import_fixes(self) -> list[str]:
|
|
172
|
+
fixes: list[str] = []
|
|
173
|
+
|
|
174
|
+
returncode, _, _ = await self.run_command(
|
|
175
|
+
[
|
|
176
|
+
"uv",
|
|
177
|
+
"run",
|
|
178
|
+
"ruff",
|
|
179
|
+
"check",
|
|
180
|
+
".",
|
|
181
|
+
"--select",
|
|
182
|
+
"I,F401",
|
|
183
|
+
"--fix",
|
|
184
|
+
],
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
if returncode == 0:
|
|
188
|
+
fixes.append("Organized imports and removed unused imports")
|
|
189
|
+
self.log("Fixed import organization")
|
|
190
|
+
|
|
191
|
+
return fixes
|
|
192
|
+
|
|
193
|
+
async def _fix_specific_file(self, file_path: str, issue: Issue) -> list[str]:
|
|
194
|
+
fixes: list[str] = []
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
path = Path(file_path)
|
|
198
|
+
content = self._validate_and_get_file_content(path)
|
|
199
|
+
if not content:
|
|
200
|
+
return fixes
|
|
201
|
+
|
|
202
|
+
original_content = content
|
|
203
|
+
cleaned_content = self._apply_content_formatting(content)
|
|
204
|
+
|
|
205
|
+
if cleaned_content != original_content:
|
|
206
|
+
if self.context.write_file_content(path, cleaned_content):
|
|
207
|
+
fixes.append(f"Fixed formatting in {file_path}")
|
|
208
|
+
self.log(f"Applied file-specific fixes to {file_path}")
|
|
209
|
+
|
|
210
|
+
except Exception as e:
|
|
211
|
+
self.log(f"Error fixing file {file_path}: {e}", "ERROR")
|
|
212
|
+
|
|
213
|
+
return fixes
|
|
214
|
+
|
|
215
|
+
def _validate_and_get_file_content(self, path: Path) -> str | None:
|
|
216
|
+
"""Validate file exists and retrieve its content."""
|
|
217
|
+
if not path.exists() or not path.is_file():
|
|
218
|
+
return None
|
|
219
|
+
|
|
220
|
+
content = self.context.get_file_content(path)
|
|
221
|
+
return content or None
|
|
222
|
+
|
|
223
|
+
def _apply_content_formatting(self, content: str) -> str:
|
|
224
|
+
"""Apply all formatting fixes to content."""
|
|
225
|
+
# Remove trailing whitespace
|
|
226
|
+
content = re.sub(r"[ \t]+$", "", content, flags=re.MULTILINE)
|
|
227
|
+
|
|
228
|
+
# Ensure file ends with newline
|
|
229
|
+
if content and not content.endswith("\n"):
|
|
230
|
+
content += "\n"
|
|
231
|
+
|
|
232
|
+
# Normalize excessive blank lines
|
|
233
|
+
content = re.sub(r"\n{3,}", "\n\n", content)
|
|
234
|
+
|
|
235
|
+
# Convert tabs to spaces
|
|
236
|
+
return self._convert_tabs_to_spaces(content)
|
|
237
|
+
|
|
238
|
+
def _convert_tabs_to_spaces(self, content: str) -> str:
|
|
239
|
+
"""Convert tab characters to 4 spaces."""
|
|
240
|
+
lines = content.split("\n")
|
|
241
|
+
fixed_lines = [line.expandtabs(4) for line in lines]
|
|
242
|
+
return "\n".join(fixed_lines)
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
agent_registry.register(FormattingAgent)
|