crackerjack 0.31.10__py3-none-any.whl → 0.31.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +288 -705
- crackerjack/__main__.py +22 -8
- crackerjack/agents/__init__.py +0 -3
- crackerjack/agents/architect_agent.py +0 -43
- crackerjack/agents/base.py +1 -9
- crackerjack/agents/coordinator.py +2 -148
- crackerjack/agents/documentation_agent.py +109 -81
- crackerjack/agents/dry_agent.py +122 -97
- crackerjack/agents/formatting_agent.py +3 -16
- crackerjack/agents/import_optimization_agent.py +1174 -130
- crackerjack/agents/performance_agent.py +956 -188
- crackerjack/agents/performance_helpers.py +229 -0
- crackerjack/agents/proactive_agent.py +1 -48
- crackerjack/agents/refactoring_agent.py +516 -246
- crackerjack/agents/refactoring_helpers.py +282 -0
- crackerjack/agents/security_agent.py +393 -90
- crackerjack/agents/test_creation_agent.py +1776 -120
- crackerjack/agents/test_specialist_agent.py +59 -15
- crackerjack/agents/tracker.py +0 -102
- crackerjack/api.py +145 -37
- crackerjack/cli/handlers.py +48 -30
- crackerjack/cli/interactive.py +11 -11
- crackerjack/cli/options.py +66 -4
- crackerjack/code_cleaner.py +808 -148
- crackerjack/config/global_lock_config.py +110 -0
- crackerjack/config/hooks.py +43 -64
- crackerjack/core/async_workflow_orchestrator.py +247 -97
- crackerjack/core/autofix_coordinator.py +192 -109
- crackerjack/core/enhanced_container.py +46 -63
- crackerjack/core/file_lifecycle.py +549 -0
- crackerjack/core/performance.py +9 -8
- crackerjack/core/performance_monitor.py +395 -0
- crackerjack/core/phase_coordinator.py +281 -94
- crackerjack/core/proactive_workflow.py +9 -58
- crackerjack/core/resource_manager.py +501 -0
- crackerjack/core/service_watchdog.py +490 -0
- crackerjack/core/session_coordinator.py +4 -8
- crackerjack/core/timeout_manager.py +504 -0
- crackerjack/core/websocket_lifecycle.py +475 -0
- crackerjack/core/workflow_orchestrator.py +343 -209
- crackerjack/dynamic_config.py +47 -6
- crackerjack/errors.py +3 -4
- crackerjack/executors/async_hook_executor.py +63 -13
- crackerjack/executors/cached_hook_executor.py +14 -14
- crackerjack/executors/hook_executor.py +100 -37
- crackerjack/executors/hook_lock_manager.py +856 -0
- crackerjack/executors/individual_hook_executor.py +120 -86
- crackerjack/intelligence/__init__.py +0 -7
- crackerjack/intelligence/adaptive_learning.py +13 -86
- crackerjack/intelligence/agent_orchestrator.py +15 -78
- crackerjack/intelligence/agent_registry.py +12 -59
- crackerjack/intelligence/agent_selector.py +31 -92
- crackerjack/intelligence/integration.py +1 -41
- crackerjack/interactive.py +9 -9
- crackerjack/managers/async_hook_manager.py +25 -8
- crackerjack/managers/hook_manager.py +9 -9
- crackerjack/managers/publish_manager.py +57 -59
- crackerjack/managers/test_command_builder.py +6 -36
- crackerjack/managers/test_executor.py +9 -61
- crackerjack/managers/test_manager.py +17 -63
- crackerjack/managers/test_manager_backup.py +77 -127
- crackerjack/managers/test_progress.py +4 -23
- crackerjack/mcp/cache.py +5 -12
- crackerjack/mcp/client_runner.py +10 -10
- crackerjack/mcp/context.py +64 -6
- crackerjack/mcp/dashboard.py +14 -11
- crackerjack/mcp/enhanced_progress_monitor.py +55 -55
- crackerjack/mcp/file_monitor.py +72 -42
- crackerjack/mcp/progress_components.py +103 -84
- crackerjack/mcp/progress_monitor.py +122 -49
- crackerjack/mcp/rate_limiter.py +12 -12
- crackerjack/mcp/server_core.py +16 -22
- crackerjack/mcp/service_watchdog.py +26 -26
- crackerjack/mcp/state.py +15 -0
- crackerjack/mcp/tools/core_tools.py +95 -39
- crackerjack/mcp/tools/error_analyzer.py +6 -32
- crackerjack/mcp/tools/execution_tools.py +1 -56
- crackerjack/mcp/tools/execution_tools_backup.py +35 -131
- crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
- crackerjack/mcp/tools/intelligence_tools.py +2 -55
- crackerjack/mcp/tools/monitoring_tools.py +308 -145
- crackerjack/mcp/tools/proactive_tools.py +12 -42
- crackerjack/mcp/tools/progress_tools.py +23 -15
- crackerjack/mcp/tools/utility_tools.py +3 -40
- crackerjack/mcp/tools/workflow_executor.py +40 -60
- crackerjack/mcp/websocket/app.py +0 -3
- crackerjack/mcp/websocket/endpoints.py +206 -268
- crackerjack/mcp/websocket/jobs.py +213 -66
- crackerjack/mcp/websocket/server.py +84 -6
- crackerjack/mcp/websocket/websocket_handler.py +137 -29
- crackerjack/models/config_adapter.py +3 -16
- crackerjack/models/protocols.py +162 -3
- crackerjack/models/resource_protocols.py +454 -0
- crackerjack/models/task.py +3 -3
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +25 -71
- crackerjack/monitoring/regression_prevention.py +28 -87
- crackerjack/orchestration/advanced_orchestrator.py +44 -78
- crackerjack/orchestration/coverage_improvement.py +10 -60
- crackerjack/orchestration/execution_strategies.py +16 -16
- crackerjack/orchestration/test_progress_streamer.py +61 -53
- crackerjack/plugins/base.py +1 -1
- crackerjack/plugins/managers.py +22 -20
- crackerjack/py313.py +65 -21
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +627 -0
- crackerjack/services/cache.py +7 -9
- crackerjack/services/config.py +35 -52
- crackerjack/services/config_integrity.py +5 -16
- crackerjack/services/config_merge.py +542 -0
- crackerjack/services/contextual_ai_assistant.py +17 -19
- crackerjack/services/coverage_ratchet.py +44 -73
- crackerjack/services/debug.py +25 -39
- crackerjack/services/dependency_monitor.py +52 -50
- crackerjack/services/enhanced_filesystem.py +14 -11
- crackerjack/services/file_hasher.py +1 -1
- crackerjack/services/filesystem.py +1 -12
- crackerjack/services/git.py +71 -47
- crackerjack/services/health_metrics.py +31 -27
- crackerjack/services/initialization.py +276 -428
- crackerjack/services/input_validator.py +760 -0
- crackerjack/services/log_manager.py +16 -16
- crackerjack/services/logging.py +7 -6
- crackerjack/services/metrics.py +43 -43
- crackerjack/services/pattern_cache.py +2 -31
- crackerjack/services/pattern_detector.py +26 -63
- crackerjack/services/performance_benchmarks.py +20 -45
- crackerjack/services/regex_patterns.py +2887 -0
- crackerjack/services/regex_utils.py +537 -0
- crackerjack/services/secure_path_utils.py +683 -0
- crackerjack/services/secure_status_formatter.py +534 -0
- crackerjack/services/secure_subprocess.py +605 -0
- crackerjack/services/security.py +47 -10
- crackerjack/services/security_logger.py +492 -0
- crackerjack/services/server_manager.py +109 -50
- crackerjack/services/smart_scheduling.py +8 -25
- crackerjack/services/status_authentication.py +603 -0
- crackerjack/services/status_security_manager.py +442 -0
- crackerjack/services/thread_safe_status_collector.py +546 -0
- crackerjack/services/tool_version_service.py +1 -23
- crackerjack/services/unified_config.py +36 -58
- crackerjack/services/validation_rate_limiter.py +269 -0
- crackerjack/services/version_checker.py +9 -40
- crackerjack/services/websocket_resource_limiter.py +572 -0
- crackerjack/slash_commands/__init__.py +52 -2
- crackerjack/tools/__init__.py +0 -0
- crackerjack/tools/validate_input_validator_patterns.py +262 -0
- crackerjack/tools/validate_regex_patterns.py +198 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/METADATA +197 -12
- crackerjack-0.31.12.dist-info/RECORD +178 -0
- crackerjack/cli/facade.py +0 -104
- crackerjack-0.31.10.dist-info/RECORD +0 -149
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/WHEEL +0 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.12.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
import ast
|
|
2
|
+
import subprocess
|
|
2
3
|
import typing as t
|
|
3
4
|
from collections import defaultdict
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
|
|
7
|
+
from ..services.regex_patterns import SAFE_PATTERNS
|
|
6
8
|
from .base import (
|
|
7
9
|
AgentContext,
|
|
8
10
|
FixResult,
|
|
@@ -17,7 +19,9 @@ class ImportAnalysis(t.NamedTuple):
|
|
|
17
19
|
file_path: Path
|
|
18
20
|
mixed_imports: list[str]
|
|
19
21
|
redundant_imports: list[str]
|
|
22
|
+
unused_imports: list[str]
|
|
20
23
|
optimization_opportunities: list[str]
|
|
24
|
+
import_violations: list[str]
|
|
21
25
|
|
|
22
26
|
|
|
23
27
|
class ImportOptimizationAgent(SubAgent):
|
|
@@ -25,14 +29,18 @@ class ImportOptimizationAgent(SubAgent):
|
|
|
25
29
|
|
|
26
30
|
def __init__(self, context: AgentContext) -> None:
|
|
27
31
|
super().__init__(context)
|
|
28
|
-
|
|
32
|
+
|
|
33
|
+
def log(self, message: str, level: str = "INFO") -> None:
|
|
34
|
+
"""Simple logging method for the agent."""
|
|
35
|
+
print(f"[{level}] ImportOptimizationAgent: {message}")
|
|
29
36
|
|
|
30
37
|
def get_supported_types(self) -> set[IssueType]:
|
|
31
38
|
return {IssueType.IMPORT_ERROR, IssueType.DEAD_CODE}
|
|
32
39
|
|
|
33
40
|
async def can_handle(self, issue: Issue) -> float:
|
|
41
|
+
"""Determine confidence level for handling import-related issues."""
|
|
34
42
|
if issue.type in self.get_supported_types():
|
|
35
|
-
return 0.
|
|
43
|
+
return 0.85
|
|
36
44
|
|
|
37
45
|
description_lower = issue.message.lower()
|
|
38
46
|
import_keywords = [
|
|
@@ -40,9 +48,21 @@ class ImportOptimizationAgent(SubAgent):
|
|
|
40
48
|
"unused import",
|
|
41
49
|
"redundant import",
|
|
42
50
|
"import style",
|
|
51
|
+
"mixed import",
|
|
52
|
+
"import organization",
|
|
53
|
+
"from import",
|
|
54
|
+
"star import",
|
|
55
|
+
"unused variable",
|
|
56
|
+
"defined but never used",
|
|
43
57
|
]
|
|
44
58
|
if any(keyword in description_lower for keyword in import_keywords):
|
|
45
|
-
return 0.
|
|
59
|
+
return 0.8
|
|
60
|
+
|
|
61
|
+
# Check for ruff/pyflakes import error codes
|
|
62
|
+
# Use safe pattern matching for error code detection
|
|
63
|
+
pattern_obj = SAFE_PATTERNS["match_error_code_patterns"]
|
|
64
|
+
if pattern_obj.test(issue.message):
|
|
65
|
+
return 0.85
|
|
46
66
|
|
|
47
67
|
return 0.0
|
|
48
68
|
|
|
@@ -50,65 +70,323 @@ class ImportOptimizationAgent(SubAgent):
|
|
|
50
70
|
return await self.fix_issue(issue)
|
|
51
71
|
|
|
52
72
|
async def analyze_file(self, file_path: Path) -> ImportAnalysis:
|
|
53
|
-
|
|
54
|
-
|
|
73
|
+
"""Comprehensive import analysis including vulture dead code detection."""
|
|
74
|
+
# Validate file
|
|
75
|
+
if not self._is_valid_python_file(file_path):
|
|
76
|
+
return self._create_empty_import_analysis(file_path)
|
|
77
|
+
|
|
78
|
+
# Parse file content
|
|
79
|
+
return await self._parse_and_analyze_file(file_path)
|
|
80
|
+
|
|
81
|
+
def _is_valid_python_file(self, file_path: Path) -> bool:
|
|
82
|
+
"""Check if the file is a valid Python file."""
|
|
83
|
+
return file_path.exists() and file_path.suffix == ".py"
|
|
84
|
+
|
|
85
|
+
def _create_empty_import_analysis(self, file_path: Path) -> ImportAnalysis:
|
|
86
|
+
"""Create an empty import analysis for invalid files."""
|
|
87
|
+
return ImportAnalysis(file_path, [], [], [], [], [])
|
|
55
88
|
|
|
89
|
+
async def _parse_and_analyze_file(self, file_path: Path) -> ImportAnalysis:
|
|
90
|
+
"""Parse and analyze a Python file."""
|
|
56
91
|
try:
|
|
57
92
|
with file_path.open(encoding="utf-8") as f:
|
|
58
|
-
|
|
93
|
+
content = f.read()
|
|
94
|
+
tree = ast.parse(content)
|
|
59
95
|
except (SyntaxError, OSError) as e:
|
|
60
|
-
self.
|
|
61
|
-
return ImportAnalysis(file_path, [], [], [])
|
|
96
|
+
return self._handle_parse_error(file_path, e)
|
|
62
97
|
|
|
63
|
-
|
|
98
|
+
# Get unused imports from vulture
|
|
99
|
+
unused_imports = await self._detect_unused_imports(file_path)
|
|
64
100
|
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
all_imports: list[dict[str, t.Any]] = []
|
|
101
|
+
# Analyze import structure
|
|
102
|
+
return self._analyze_imports(file_path, tree, content, unused_imports)
|
|
68
103
|
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
"type": "standard",
|
|
74
|
-
"module": alias.name,
|
|
75
|
-
"name": alias.asname or alias.name,
|
|
76
|
-
"line": node.lineno,
|
|
77
|
-
}
|
|
78
|
-
all_imports.append(import_info)
|
|
79
|
-
base_module = alias.name.split(".")[0]
|
|
80
|
-
module_imports[base_module].append(import_info)
|
|
81
|
-
|
|
82
|
-
elif isinstance(node, ast.ImportFrom) and node.module:
|
|
83
|
-
for alias in node.names:
|
|
84
|
-
import_info = {
|
|
85
|
-
"type": "from",
|
|
86
|
-
"module": node.module,
|
|
87
|
-
"name": alias.name,
|
|
88
|
-
"asname": alias.asname,
|
|
89
|
-
"line": node.lineno,
|
|
90
|
-
}
|
|
91
|
-
all_imports.append(import_info)
|
|
92
|
-
base_module = node.module.split(".")[0]
|
|
93
|
-
module_imports[base_module].append(import_info)
|
|
104
|
+
def _handle_parse_error(self, file_path: Path, e: Exception) -> ImportAnalysis:
|
|
105
|
+
"""Handle errors when parsing a file."""
|
|
106
|
+
self.log(f"Could not parse {file_path}: {e}", level="WARNING")
|
|
107
|
+
return ImportAnalysis(file_path, [], [], [], [], [])
|
|
94
108
|
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
109
|
+
async def _detect_unused_imports(self, file_path: Path) -> list[str]:
|
|
110
|
+
"""Use vulture to detect unused imports with intelligent filtering."""
|
|
111
|
+
try:
|
|
112
|
+
result = self._run_vulture_analysis(file_path)
|
|
113
|
+
return self._extract_unused_imports_from_result(result)
|
|
114
|
+
except (
|
|
115
|
+
subprocess.TimeoutExpired,
|
|
116
|
+
subprocess.SubprocessError,
|
|
117
|
+
FileNotFoundError,
|
|
118
|
+
):
|
|
119
|
+
# Fallback to basic AST analysis if vulture fails
|
|
120
|
+
return []
|
|
121
|
+
|
|
122
|
+
def _run_vulture_analysis(
|
|
123
|
+
self, file_path: Path
|
|
124
|
+
) -> subprocess.CompletedProcess[str]:
|
|
125
|
+
"""Run vulture analysis on a single file."""
|
|
126
|
+
return subprocess.run(
|
|
127
|
+
["uv", "run", "vulture", "--min-confidence", "80", str(file_path)],
|
|
128
|
+
capture_output=True,
|
|
129
|
+
text=True,
|
|
130
|
+
timeout=30,
|
|
131
|
+
cwd=self.context.project_path,
|
|
99
132
|
)
|
|
100
133
|
|
|
134
|
+
def _extract_unused_imports_from_result(
|
|
135
|
+
self, result: subprocess.CompletedProcess[str]
|
|
136
|
+
) -> list[str]:
|
|
137
|
+
"""Extract unused import names from vulture result."""
|
|
138
|
+
unused_imports = []
|
|
139
|
+
if not self._is_valid_vulture_result(result):
|
|
140
|
+
return unused_imports
|
|
141
|
+
|
|
142
|
+
for line in result.stdout.strip().split("\n"):
|
|
143
|
+
import_name = self._extract_import_name_from_line(line)
|
|
144
|
+
if import_name:
|
|
145
|
+
unused_imports.append(import_name)
|
|
146
|
+
|
|
147
|
+
return unused_imports
|
|
148
|
+
|
|
149
|
+
def _is_valid_vulture_result(
|
|
150
|
+
self, result: subprocess.CompletedProcess[str]
|
|
151
|
+
) -> bool:
|
|
152
|
+
"""Check if vulture result is valid and contains output."""
|
|
153
|
+
return result.returncode == 0 and bool(result.stdout)
|
|
154
|
+
|
|
155
|
+
def _extract_import_name_from_line(self, line: str) -> str | None:
|
|
156
|
+
"""Extract import name from a single vulture output line."""
|
|
157
|
+
if not line or "unused import" not in line.lower():
|
|
158
|
+
return None
|
|
159
|
+
|
|
160
|
+
# Extract import name from vulture output using safe patterns
|
|
161
|
+
# Format: "file.py:line: unused import 'name' (confidence: XX%)"
|
|
162
|
+
pattern_obj = SAFE_PATTERNS["extract_unused_import_name"]
|
|
163
|
+
if pattern_obj.test(line):
|
|
164
|
+
return pattern_obj.apply(line)
|
|
165
|
+
return None
|
|
166
|
+
|
|
167
|
+
def _analyze_imports(
|
|
168
|
+
self, file_path: Path, tree: ast.AST, content: str, unused_imports: list[str]
|
|
169
|
+
) -> ImportAnalysis:
|
|
170
|
+
"""Analyze imports in a Python file for various optimization opportunities."""
|
|
171
|
+
# Extract and analyze import information
|
|
172
|
+
analysis_results = self._perform_full_import_analysis(tree, content)
|
|
173
|
+
|
|
174
|
+
# Create the import analysis object
|
|
175
|
+
return self._create_import_analysis(file_path, analysis_results, unused_imports)
|
|
176
|
+
|
|
177
|
+
def _create_import_analysis(
|
|
178
|
+
self,
|
|
179
|
+
file_path: Path,
|
|
180
|
+
analysis_results: dict[str, list[str]],
|
|
181
|
+
unused_imports: list[str],
|
|
182
|
+
) -> ImportAnalysis:
|
|
183
|
+
"""Create an ImportAnalysis object from the analysis results."""
|
|
101
184
|
return ImportAnalysis(
|
|
102
185
|
file_path=file_path,
|
|
103
|
-
mixed_imports=mixed_imports,
|
|
104
|
-
redundant_imports=redundant_imports,
|
|
105
|
-
|
|
186
|
+
mixed_imports=analysis_results["mixed_imports"],
|
|
187
|
+
redundant_imports=analysis_results["redundant_imports"],
|
|
188
|
+
unused_imports=unused_imports,
|
|
189
|
+
optimization_opportunities=analysis_results["optimization_opportunities"],
|
|
190
|
+
import_violations=analysis_results["import_violations"],
|
|
191
|
+
)
|
|
192
|
+
|
|
193
|
+
def _perform_full_import_analysis(
|
|
194
|
+
self, tree: ast.AST, content: str
|
|
195
|
+
) -> dict[str, list[str]]:
|
|
196
|
+
"""Perform full import analysis on the AST tree."""
|
|
197
|
+
# Extract import information
|
|
198
|
+
module_imports, all_imports = self._extract_import_information(tree)
|
|
199
|
+
|
|
200
|
+
# Analyze different aspects of imports
|
|
201
|
+
return self._perform_import_analysis(module_imports, all_imports, content)
|
|
202
|
+
|
|
203
|
+
def _perform_import_analysis(
|
|
204
|
+
self,
|
|
205
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
206
|
+
all_imports: list[dict[str, t.Any]],
|
|
207
|
+
content: str,
|
|
208
|
+
) -> dict[str, list[str]]:
|
|
209
|
+
"""Perform comprehensive analysis of import patterns."""
|
|
210
|
+
# Analyze different aspects of imports
|
|
211
|
+
analysis_results = self._analyze_import_patterns(
|
|
212
|
+
module_imports, all_imports, content
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
return analysis_results
|
|
216
|
+
|
|
217
|
+
def _analyze_import_patterns(
|
|
218
|
+
self,
|
|
219
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
220
|
+
all_imports: list[dict[str, t.Any]],
|
|
221
|
+
content: str,
|
|
222
|
+
) -> dict[str, list[str]]:
|
|
223
|
+
"""Analyze various import patterns."""
|
|
224
|
+
# Analyze different aspects of imports
|
|
225
|
+
return self._analyze_import_aspects(module_imports, all_imports, content)
|
|
226
|
+
|
|
227
|
+
def _analyze_import_aspects(
|
|
228
|
+
self,
|
|
229
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
230
|
+
all_imports: list[dict[str, t.Any]],
|
|
231
|
+
content: str,
|
|
232
|
+
) -> dict[str, list[str]]:
|
|
233
|
+
"""Analyze different aspects of imports."""
|
|
234
|
+
# Analyze each aspect of imports separately
|
|
235
|
+
return self._analyze_each_import_aspect(module_imports, all_imports, content)
|
|
236
|
+
|
|
237
|
+
def _analyze_each_import_aspect(
|
|
238
|
+
self,
|
|
239
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
240
|
+
all_imports: list[dict[str, t.Any]],
|
|
241
|
+
content: str,
|
|
242
|
+
) -> dict[str, list[str]]:
|
|
243
|
+
"""Analyze each import aspect individually."""
|
|
244
|
+
mixed_imports = self._find_mixed_imports(module_imports)
|
|
245
|
+
redundant_imports = self._find_redundant_imports(all_imports)
|
|
246
|
+
optimization_opportunities = self._find_optimization_opportunities(
|
|
247
|
+
module_imports
|
|
106
248
|
)
|
|
249
|
+
import_violations = self._find_import_violations(content, all_imports)
|
|
250
|
+
|
|
251
|
+
return {
|
|
252
|
+
"mixed_imports": mixed_imports,
|
|
253
|
+
"redundant_imports": redundant_imports,
|
|
254
|
+
"optimization_opportunities": optimization_opportunities,
|
|
255
|
+
"import_violations": import_violations,
|
|
256
|
+
}
|
|
257
|
+
|
|
258
|
+
def _extract_import_information(
|
|
259
|
+
self, tree: ast.AST
|
|
260
|
+
) -> tuple[dict[str, list[dict[str, t.Any]]], list[dict[str, t.Any]]]:
|
|
261
|
+
"""Extract import information from the AST tree."""
|
|
262
|
+
module_imports: dict[str, list[dict[str, t.Any]]] = defaultdict(list)
|
|
263
|
+
all_imports: list[dict[str, t.Any]] = []
|
|
264
|
+
|
|
265
|
+
self._process_tree_imports(tree, all_imports, module_imports)
|
|
266
|
+
|
|
267
|
+
return module_imports, all_imports
|
|
268
|
+
|
|
269
|
+
def _initialize_import_containers(
|
|
270
|
+
self,
|
|
271
|
+
) -> tuple[dict[str, list[dict[str, t.Any]]], list[dict[str, t.Any]]]:
|
|
272
|
+
"""Initialize containers for import information."""
|
|
273
|
+
module_imports: dict[str, list[dict[str, t.Any]]] = defaultdict(list)
|
|
274
|
+
all_imports: list[dict[str, t.Any]] = []
|
|
275
|
+
return module_imports, all_imports
|
|
276
|
+
|
|
277
|
+
def _process_tree_imports(
|
|
278
|
+
self,
|
|
279
|
+
tree: ast.AST,
|
|
280
|
+
all_imports: list[dict[str, t.Any]],
|
|
281
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
282
|
+
) -> None:
|
|
283
|
+
"""Process all import statements in the AST tree."""
|
|
284
|
+
# Process all nodes in the tree
|
|
285
|
+
self._process_all_nodes(tree, all_imports, module_imports)
|
|
286
|
+
|
|
287
|
+
def _process_all_nodes(
|
|
288
|
+
self,
|
|
289
|
+
tree: ast.AST,
|
|
290
|
+
all_imports: list[dict[str, t.Any]],
|
|
291
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
292
|
+
) -> None:
|
|
293
|
+
"""Process all nodes in the AST tree."""
|
|
294
|
+
# Process all import statements in the tree
|
|
295
|
+
self._process_import_statements_in_tree(tree, all_imports, module_imports)
|
|
296
|
+
|
|
297
|
+
def _process_import_statements_in_tree(
|
|
298
|
+
self,
|
|
299
|
+
tree: ast.AST,
|
|
300
|
+
all_imports: list[dict[str, t.Any]],
|
|
301
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
302
|
+
) -> None:
|
|
303
|
+
"""Process all import statements in the AST tree."""
|
|
304
|
+
for node in ast.walk(tree):
|
|
305
|
+
self._process_node_if_import(node, all_imports, module_imports)
|
|
306
|
+
|
|
307
|
+
def _process_node_if_import(
|
|
308
|
+
self,
|
|
309
|
+
node: ast.AST,
|
|
310
|
+
all_imports: list[dict[str, t.Any]],
|
|
311
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
312
|
+
) -> None:
|
|
313
|
+
"""Process a node if it's an import statement."""
|
|
314
|
+
if isinstance(node, ast.Import):
|
|
315
|
+
self._process_standard_import(node, all_imports, module_imports)
|
|
316
|
+
elif isinstance(node, ast.ImportFrom) and node.module:
|
|
317
|
+
self._process_from_import(node, all_imports, module_imports)
|
|
318
|
+
|
|
319
|
+
def _process_standard_import(
|
|
320
|
+
self,
|
|
321
|
+
node: ast.Import,
|
|
322
|
+
all_imports: list[dict[str, t.Any]],
|
|
323
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
324
|
+
) -> None:
|
|
325
|
+
"""Process standard import statements."""
|
|
326
|
+
# Process all aliases in the import
|
|
327
|
+
self._process_standard_import_aliases(node, all_imports, module_imports)
|
|
328
|
+
|
|
329
|
+
def _process_standard_import_aliases(
|
|
330
|
+
self,
|
|
331
|
+
node: ast.Import,
|
|
332
|
+
all_imports: list[dict[str, t.Any]],
|
|
333
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
334
|
+
) -> None:
|
|
335
|
+
"""Process all aliases in a standard import statement."""
|
|
336
|
+
for alias in node.names:
|
|
337
|
+
import_info = {
|
|
338
|
+
"type": "standard",
|
|
339
|
+
"module": alias.name,
|
|
340
|
+
"name": alias.asname or alias.name,
|
|
341
|
+
"line": node.lineno,
|
|
342
|
+
}
|
|
343
|
+
all_imports.append(import_info)
|
|
344
|
+
base_module = alias.name.split(".")[0]
|
|
345
|
+
module_imports[base_module].append(import_info)
|
|
346
|
+
|
|
347
|
+
def _process_from_import(
|
|
348
|
+
self,
|
|
349
|
+
node: ast.ImportFrom,
|
|
350
|
+
all_imports: list[dict[str, t.Any]],
|
|
351
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
352
|
+
) -> None:
|
|
353
|
+
"""Process from import statements."""
|
|
354
|
+
# Process all aliases in the from import
|
|
355
|
+
self._process_from_import_aliases(node, all_imports, module_imports)
|
|
356
|
+
|
|
357
|
+
def _process_from_import_aliases(
|
|
358
|
+
self,
|
|
359
|
+
node: ast.ImportFrom,
|
|
360
|
+
all_imports: list[dict[str, t.Any]],
|
|
361
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
362
|
+
) -> None:
|
|
363
|
+
"""Process all aliases in a from import statement."""
|
|
364
|
+
for alias in node.names:
|
|
365
|
+
import_info = {
|
|
366
|
+
"type": "from",
|
|
367
|
+
"module": node.module,
|
|
368
|
+
"name": alias.name,
|
|
369
|
+
"asname": alias.asname,
|
|
370
|
+
"line": node.lineno,
|
|
371
|
+
}
|
|
372
|
+
all_imports.append(import_info)
|
|
373
|
+
base_module = node.module.split(".")[0]
|
|
374
|
+
module_imports[base_module].append(import_info)
|
|
107
375
|
|
|
108
376
|
def _find_mixed_imports(
|
|
109
377
|
self,
|
|
110
378
|
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
111
379
|
) -> list[str]:
|
|
380
|
+
mixed: list[str] = []
|
|
381
|
+
# Check each module for mixed import types
|
|
382
|
+
mixed.extend(self._check_mixed_imports_per_module(module_imports))
|
|
383
|
+
return mixed
|
|
384
|
+
|
|
385
|
+
def _check_mixed_imports_per_module(
|
|
386
|
+
self,
|
|
387
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
388
|
+
) -> list[str]:
|
|
389
|
+
"""Check each module for mixed import types."""
|
|
112
390
|
mixed: list[str] = []
|
|
113
391
|
for module, imports in module_imports.items():
|
|
114
392
|
types = {imp["type"] for imp in imports}
|
|
@@ -120,6 +398,17 @@ class ImportOptimizationAgent(SubAgent):
|
|
|
120
398
|
seen_modules: set[str] = set()
|
|
121
399
|
redundant: list[str] = []
|
|
122
400
|
|
|
401
|
+
# Check each import for redundancy
|
|
402
|
+
redundant.extend(self._check_redundant_imports(all_imports, seen_modules))
|
|
403
|
+
|
|
404
|
+
return redundant
|
|
405
|
+
|
|
406
|
+
def _check_redundant_imports(
|
|
407
|
+
self, all_imports: list[dict[str, t.Any]], seen_modules: set[str]
|
|
408
|
+
) -> list[str]:
|
|
409
|
+
"""Check each import for redundancy."""
|
|
410
|
+
redundant: list[str] = []
|
|
411
|
+
|
|
123
412
|
for imp in all_imports:
|
|
124
413
|
module_key = f"{imp['module']}: {imp['name']}"
|
|
125
414
|
if module_key in seen_modules:
|
|
@@ -132,150 +421,905 @@ class ImportOptimizationAgent(SubAgent):
|
|
|
132
421
|
self,
|
|
133
422
|
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
134
423
|
) -> list[str]:
|
|
424
|
+
"""Find import consolidation and optimization opportunities."""
|
|
425
|
+
# Find different types of optimization opportunities
|
|
426
|
+
return self._find_consolidation_opportunities(module_imports)
|
|
427
|
+
|
|
428
|
+
def _find_consolidation_opportunities(
|
|
429
|
+
self,
|
|
430
|
+
module_imports: dict[str, list[dict[str, t.Any]]],
|
|
431
|
+
) -> list[str]:
|
|
432
|
+
"""Find opportunities to consolidate imports."""
|
|
135
433
|
opportunities: list[str] = []
|
|
136
434
|
|
|
137
435
|
for module, imports in module_imports.items():
|
|
138
436
|
standard_imports = [imp for imp in imports if imp["type"] == "standard"]
|
|
437
|
+
from_imports = [imp for imp in imports if imp["type"] == "from"]
|
|
438
|
+
|
|
439
|
+
# Recommend consolidating multiple standard imports to from-imports
|
|
139
440
|
if len(standard_imports) >= 2:
|
|
140
441
|
opportunities.append(
|
|
141
|
-
f"
|
|
142
|
-
f"from {module}
|
|
442
|
+
f"Consolidate {len(standard_imports)} standard imports "
|
|
443
|
+
f"from '{module}' into from-import style",
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
# Recommend combining from-imports from same module
|
|
447
|
+
if len(from_imports) >= 3:
|
|
448
|
+
opportunities.append(
|
|
449
|
+
f"Consider combining {len(from_imports)} from-imports "
|
|
450
|
+
f"from '{module}' into fewer lines",
|
|
143
451
|
)
|
|
144
452
|
|
|
145
453
|
return opportunities
|
|
146
454
|
|
|
455
|
+
def _find_import_violations(
|
|
456
|
+
self, content: str, all_imports: list[dict[str, t.Any]]
|
|
457
|
+
) -> list[str]:
|
|
458
|
+
"""Find PEP 8 import organization violations."""
|
|
459
|
+
# Categorize imports and check ordering
|
|
460
|
+
violations = self._check_import_ordering(all_imports)
|
|
461
|
+
|
|
462
|
+
# Check for star imports
|
|
463
|
+
violations.extend(self._check_star_imports(content))
|
|
464
|
+
|
|
465
|
+
return violations
|
|
466
|
+
|
|
467
|
+
def _check_import_ordering(self, all_imports: list[dict[str, t.Any]]) -> list[str]:
|
|
468
|
+
"""Check if imports are in proper PEP 8 order."""
|
|
469
|
+
violations: list[str] = []
|
|
470
|
+
|
|
471
|
+
# Check for import organization (stdlib, third-party, local)
|
|
472
|
+
self._categorize_imports(all_imports)
|
|
473
|
+
|
|
474
|
+
# Find imports that are not in PEP 8 order
|
|
475
|
+
violations.extend(self._find_pep8_order_violations(all_imports))
|
|
476
|
+
|
|
477
|
+
return violations
|
|
478
|
+
|
|
479
|
+
def _find_pep8_order_violations(
|
|
480
|
+
self, all_imports: list[dict[str, t.Any]]
|
|
481
|
+
) -> list[str]:
|
|
482
|
+
"""Find imports that are not in PEP 8 order."""
|
|
483
|
+
violations: list[str] = []
|
|
484
|
+
prev_category = 0
|
|
485
|
+
|
|
486
|
+
for imp in all_imports:
|
|
487
|
+
module = imp.get("module", "")
|
|
488
|
+
category = self._get_import_category(module)
|
|
489
|
+
|
|
490
|
+
if category < prev_category:
|
|
491
|
+
violations.append(
|
|
492
|
+
f"Import '{module}' should come before previous imports (PEP 8 ordering)"
|
|
493
|
+
)
|
|
494
|
+
prev_category = max(prev_category, category)
|
|
495
|
+
|
|
496
|
+
return violations
|
|
497
|
+
|
|
498
|
+
def _check_star_imports(self, content: str) -> list[str]:
|
|
499
|
+
"""Check for star imports which should be avoided."""
|
|
500
|
+
violations: list[str] = []
|
|
501
|
+
lines = content.splitlines()
|
|
502
|
+
|
|
503
|
+
# Check for star imports
|
|
504
|
+
for line_num, line in enumerate(lines, 1):
|
|
505
|
+
# Use safe pattern matching for star import detection
|
|
506
|
+
if SAFE_PATTERNS["match_star_import"].test(line.strip()):
|
|
507
|
+
violations.append(f"Line {line_num}: Avoid star imports")
|
|
508
|
+
|
|
509
|
+
return violations
|
|
510
|
+
|
|
511
|
+
def _categorize_imports(
|
|
512
|
+
self, all_imports: list[dict[str, t.Any]]
|
|
513
|
+
) -> dict[int, list[dict[str, t.Any]]]:
|
|
514
|
+
"""Categorize imports by PEP 8 standards: 1=stdlib, 2=third-party, 3=local."""
|
|
515
|
+
categories: dict[int, list[dict[str, t.Any]]] = defaultdict(list)
|
|
516
|
+
|
|
517
|
+
for imp in all_imports:
|
|
518
|
+
module = imp.get("module", "")
|
|
519
|
+
category = self._get_import_category(module)
|
|
520
|
+
categories[category].append(imp)
|
|
521
|
+
|
|
522
|
+
return categories
|
|
523
|
+
|
|
524
|
+
def _get_import_category(self, module: str) -> int:
|
|
525
|
+
"""Determine import category: 1=stdlib, 2=third-party, 3=local."""
|
|
526
|
+
if not module:
|
|
527
|
+
return 3
|
|
528
|
+
|
|
529
|
+
# Determine category based on module type
|
|
530
|
+
return self._determine_module_category(module)
|
|
531
|
+
|
|
532
|
+
def _determine_module_category(self, module: str) -> int:
|
|
533
|
+
"""Determine the category of a module."""
|
|
534
|
+
base_module = module.split(".")[0]
|
|
535
|
+
|
|
536
|
+
# Check if it's a standard library module
|
|
537
|
+
if self._is_stdlib_module(base_module):
|
|
538
|
+
return 1
|
|
539
|
+
|
|
540
|
+
# Check if it's a local import
|
|
541
|
+
if self._is_local_import(module, base_module):
|
|
542
|
+
return 3
|
|
543
|
+
|
|
544
|
+
# Otherwise assume third-party
|
|
545
|
+
return 2
|
|
546
|
+
|
|
547
|
+
def _is_stdlib_module(self, base_module: str) -> bool:
|
|
548
|
+
"""Check if a module is a standard library module."""
|
|
549
|
+
# Get the set of standard library modules
|
|
550
|
+
stdlib_modules = self._get_stdlib_modules()
|
|
551
|
+
return base_module in stdlib_modules
|
|
552
|
+
|
|
553
|
+
def _get_stdlib_modules(self) -> set[str]:
|
|
554
|
+
"""Get the set of standard library modules."""
|
|
555
|
+
return {
|
|
556
|
+
"os",
|
|
557
|
+
"sys",
|
|
558
|
+
"json",
|
|
559
|
+
"ast",
|
|
560
|
+
"re",
|
|
561
|
+
"pathlib",
|
|
562
|
+
"subprocess",
|
|
563
|
+
"typing",
|
|
564
|
+
"collections",
|
|
565
|
+
"functools",
|
|
566
|
+
"itertools",
|
|
567
|
+
"tempfile",
|
|
568
|
+
"contextlib",
|
|
569
|
+
"dataclasses",
|
|
570
|
+
"enum",
|
|
571
|
+
"abc",
|
|
572
|
+
"asyncio",
|
|
573
|
+
"concurrent",
|
|
574
|
+
"urllib",
|
|
575
|
+
"http",
|
|
576
|
+
"socket",
|
|
577
|
+
"ssl",
|
|
578
|
+
"time",
|
|
579
|
+
"datetime",
|
|
580
|
+
"calendar",
|
|
581
|
+
"math",
|
|
582
|
+
"random",
|
|
583
|
+
"hashlib",
|
|
584
|
+
"hmac",
|
|
585
|
+
"base64",
|
|
586
|
+
"uuid",
|
|
587
|
+
"logging",
|
|
588
|
+
"warnings",
|
|
589
|
+
}
|
|
590
|
+
|
|
591
|
+
def _is_local_import(self, module: str, base_module: str) -> bool:
|
|
592
|
+
"""Check if a module is a local import."""
|
|
593
|
+
return module.startswith(".") or base_module == "crackerjack"
|
|
594
|
+
|
|
147
595
|
async def fix_issue(self, issue: Issue) -> FixResult:
|
|
596
|
+
# Validate input
|
|
597
|
+
validation_result = self._validate_issue(issue)
|
|
598
|
+
if validation_result:
|
|
599
|
+
return validation_result
|
|
600
|
+
|
|
601
|
+
# Process the issue
|
|
602
|
+
return await self._process_import_optimization_issue(issue)
|
|
603
|
+
|
|
604
|
+
async def _process_import_optimization_issue(self, issue: Issue) -> FixResult:
|
|
605
|
+
# At this point, issue.file_path is guaranteed to be a string, not None
|
|
606
|
+
file_path = Path(issue.file_path) # type: ignore
|
|
607
|
+
|
|
608
|
+
# Analyze the file
|
|
609
|
+
analysis = await self.analyze_file(file_path)
|
|
610
|
+
|
|
611
|
+
# Check if optimizations are needed
|
|
612
|
+
if not self._are_optimizations_needed(analysis):
|
|
613
|
+
return self._create_no_optimization_needed_result()
|
|
614
|
+
|
|
615
|
+
# Apply optimizations and prepare results
|
|
616
|
+
return await self._apply_optimizations_and_prepare_results(file_path, analysis)
|
|
617
|
+
|
|
618
|
+
def _create_no_optimization_needed_result(self) -> FixResult:
|
|
619
|
+
return FixResult(
|
|
620
|
+
success=True,
|
|
621
|
+
confidence=1.0,
|
|
622
|
+
fixes_applied=["No import optimizations needed"],
|
|
623
|
+
remaining_issues=[],
|
|
624
|
+
recommendations=["Import patterns are already optimal"],
|
|
625
|
+
files_modified=[],
|
|
626
|
+
)
|
|
627
|
+
|
|
628
|
+
def _validate_issue(self, issue: Issue) -> FixResult | None:
|
|
148
629
|
if issue.file_path is None:
|
|
149
630
|
return FixResult(
|
|
150
631
|
success=False,
|
|
151
632
|
confidence=0.0,
|
|
152
633
|
remaining_issues=["No file path provided for import optimization"],
|
|
153
634
|
)
|
|
154
|
-
|
|
635
|
+
return None
|
|
155
636
|
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
if not any(
|
|
637
|
+
def _are_optimizations_needed(self, analysis: ImportAnalysis) -> bool:
|
|
638
|
+
return any(
|
|
159
639
|
[
|
|
160
640
|
analysis.mixed_imports,
|
|
161
641
|
analysis.redundant_imports,
|
|
642
|
+
analysis.unused_imports,
|
|
162
643
|
analysis.optimization_opportunities,
|
|
644
|
+
analysis.import_violations,
|
|
163
645
|
],
|
|
164
|
-
)
|
|
165
|
-
return FixResult(
|
|
166
|
-
success=True,
|
|
167
|
-
confidence=1.0,
|
|
168
|
-
fixes_applied=["No import optimizations needed"],
|
|
169
|
-
remaining_issues=[],
|
|
170
|
-
recommendations=["Import patterns are already optimal"],
|
|
171
|
-
files_modified=[],
|
|
172
|
-
)
|
|
646
|
+
)
|
|
173
647
|
|
|
648
|
+
async def _apply_optimizations_and_prepare_results(
|
|
649
|
+
self, file_path: Path, analysis: ImportAnalysis
|
|
650
|
+
) -> FixResult:
|
|
174
651
|
try:
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
optimized_content = await self._optimize_imports(original_content, analysis)
|
|
179
|
-
|
|
180
|
-
with file_path.open("w", encoding="utf-8") as f:
|
|
181
|
-
f.write(optimized_content)
|
|
652
|
+
optimized_content = await self._read_and_optimize_file(file_path, analysis)
|
|
653
|
+
await self._write_optimized_content(file_path, optimized_content)
|
|
182
654
|
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
if analysis.redundant_imports:
|
|
189
|
-
changes.append(
|
|
190
|
-
f"Removed {len(analysis.redundant_imports)} redundant imports",
|
|
191
|
-
)
|
|
192
|
-
if analysis.optimization_opportunities:
|
|
193
|
-
changes.append(
|
|
194
|
-
f"Applied {len(analysis.optimization_opportunities)} optimizations",
|
|
195
|
-
)
|
|
655
|
+
# Prepare results
|
|
656
|
+
changes, remaining_issues = self._prepare_fix_results(analysis)
|
|
657
|
+
recommendations = self._prepare_recommendations(
|
|
658
|
+
file_path.name, remaining_issues
|
|
659
|
+
)
|
|
196
660
|
|
|
197
661
|
return FixResult(
|
|
198
662
|
success=True,
|
|
199
|
-
confidence=0.
|
|
663
|
+
confidence=0.85,
|
|
200
664
|
fixes_applied=changes,
|
|
201
|
-
remaining_issues=
|
|
202
|
-
recommendations=
|
|
665
|
+
remaining_issues=remaining_issues,
|
|
666
|
+
recommendations=recommendations,
|
|
203
667
|
files_modified=[str(file_path)],
|
|
204
668
|
)
|
|
205
669
|
|
|
206
670
|
except Exception as e:
|
|
207
|
-
return
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
671
|
+
return self._handle_optimization_error(e)
|
|
672
|
+
|
|
673
|
+
async def _read_and_optimize_file(
|
|
674
|
+
self, file_path: Path, analysis: ImportAnalysis
|
|
675
|
+
) -> str:
|
|
676
|
+
with file_path.open(encoding="utf-8") as f:
|
|
677
|
+
original_content = f.read()
|
|
678
|
+
return await self._optimize_imports(original_content, analysis)
|
|
679
|
+
|
|
680
|
+
async def _write_optimized_content(
|
|
681
|
+
self, file_path: Path, optimized_content: str
|
|
682
|
+
) -> None:
|
|
683
|
+
with file_path.open("w", encoding="utf-8") as f:
|
|
684
|
+
f.write(optimized_content)
|
|
685
|
+
|
|
686
|
+
def _handle_optimization_error(self, e: Exception) -> FixResult:
|
|
687
|
+
return FixResult(
|
|
688
|
+
success=False,
|
|
689
|
+
confidence=0.0,
|
|
690
|
+
fixes_applied=[],
|
|
691
|
+
remaining_issues=[f"Failed to optimize imports: {e}"],
|
|
692
|
+
recommendations=["Manual import review needed"],
|
|
693
|
+
files_modified=[],
|
|
694
|
+
)
|
|
695
|
+
|
|
696
|
+
def _prepare_fix_results(
|
|
697
|
+
self, analysis: ImportAnalysis
|
|
698
|
+
) -> tuple[list[str], list[str]]:
|
|
699
|
+
changes: list[str] = []
|
|
700
|
+
remaining_issues: list[str] = []
|
|
701
|
+
|
|
702
|
+
# Add changes for different types of optimizations
|
|
703
|
+
changes.extend(self._get_mixed_import_changes(analysis.mixed_imports))
|
|
704
|
+
changes.extend(self._get_redundant_import_changes(analysis.redundant_imports))
|
|
705
|
+
changes.extend(self._get_unused_import_changes(analysis.unused_imports))
|
|
706
|
+
changes.extend(
|
|
707
|
+
self._get_optimization_opportunity_changes(
|
|
708
|
+
analysis.optimization_opportunities
|
|
709
|
+
)
|
|
710
|
+
)
|
|
711
|
+
|
|
712
|
+
# Report violations that couldn't be auto-fixed
|
|
713
|
+
remaining_issues.extend(
|
|
714
|
+
self._get_remaining_violations(analysis.import_violations)
|
|
715
|
+
)
|
|
716
|
+
|
|
717
|
+
return changes, remaining_issues
|
|
718
|
+
|
|
719
|
+
def _get_mixed_import_changes(self, mixed_imports: list[str]) -> list[str]:
|
|
720
|
+
changes: list[str] = []
|
|
721
|
+
if mixed_imports:
|
|
722
|
+
changes.append(
|
|
723
|
+
f"Standardized mixed imports for modules: {', '.join(mixed_imports)}",
|
|
214
724
|
)
|
|
725
|
+
return changes
|
|
726
|
+
|
|
727
|
+
def _get_redundant_import_changes(self, redundant_imports: list[str]) -> list[str]:
|
|
728
|
+
changes: list[str] = []
|
|
729
|
+
if redundant_imports:
|
|
730
|
+
changes.append(
|
|
731
|
+
f"Removed {len(redundant_imports)} redundant imports",
|
|
732
|
+
)
|
|
733
|
+
return changes
|
|
734
|
+
|
|
735
|
+
def _get_unused_import_changes(self, unused_imports: list[str]) -> list[str]:
|
|
736
|
+
changes: list[str] = []
|
|
737
|
+
if unused_imports:
|
|
738
|
+
changes.append(
|
|
739
|
+
f"Removed {len(unused_imports)} unused imports: {', '.join(unused_imports[:3])}"
|
|
740
|
+
+ ("..." if len(unused_imports) > 3 else ""),
|
|
741
|
+
)
|
|
742
|
+
return changes
|
|
743
|
+
|
|
744
|
+
def _get_optimization_opportunity_changes(
|
|
745
|
+
self, optimization_opportunities: list[str]
|
|
746
|
+
) -> list[str]:
|
|
747
|
+
changes: list[str] = []
|
|
748
|
+
if optimization_opportunities:
|
|
749
|
+
changes.append(
|
|
750
|
+
f"Applied {len(optimization_opportunities)} import consolidations",
|
|
751
|
+
)
|
|
752
|
+
return changes
|
|
753
|
+
|
|
754
|
+
def _get_remaining_violations(self, import_violations: list[str]) -> list[str]:
|
|
755
|
+
remaining_issues: list[str] = []
|
|
756
|
+
if import_violations:
|
|
757
|
+
remaining_issues.extend(import_violations[:3]) # Limit to top 3
|
|
758
|
+
return remaining_issues
|
|
759
|
+
|
|
760
|
+
def _prepare_recommendations(
|
|
761
|
+
self, file_name: str, remaining_issues: list[str]
|
|
762
|
+
) -> list[str]:
|
|
763
|
+
recommendations = [f"Optimized import statements in {file_name}"]
|
|
764
|
+
if remaining_issues:
|
|
765
|
+
recommendations.append(
|
|
766
|
+
"Consider manual review for remaining PEP 8 violations"
|
|
767
|
+
)
|
|
768
|
+
return recommendations
|
|
215
769
|
|
|
216
770
|
async def _optimize_imports(self, content: str, analysis: ImportAnalysis) -> str:
|
|
771
|
+
"""Apply comprehensive import optimizations."""
|
|
217
772
|
lines = content.splitlines()
|
|
218
773
|
|
|
219
|
-
|
|
220
|
-
if module == "typing":
|
|
221
|
-
lines = self._consolidate_typing_imports(lines)
|
|
774
|
+
lines = self._apply_import_optimizations(lines, analysis)
|
|
222
775
|
|
|
223
776
|
return "\n".join(lines)
|
|
224
777
|
|
|
225
|
-
def
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
778
|
+
def _apply_import_optimizations(
|
|
779
|
+
self, lines: list[str], analysis: ImportAnalysis
|
|
780
|
+
) -> list[str]:
|
|
781
|
+
"""Apply all import optimization steps in sequence."""
|
|
782
|
+
# Apply all optimization steps
|
|
783
|
+
lines = self._apply_all_optimization_steps(lines, analysis)
|
|
784
|
+
return lines
|
|
785
|
+
|
|
786
|
+
def _apply_all_optimization_steps(
|
|
787
|
+
self, lines: list[str], analysis: ImportAnalysis
|
|
788
|
+
) -> list[str]:
|
|
789
|
+
# Remove unused imports first
|
|
790
|
+
lines = self._remove_unused_imports(lines, analysis.unused_imports)
|
|
791
|
+
|
|
792
|
+
# Consolidate mixed imports to from-import style
|
|
793
|
+
lines = self._consolidate_mixed_imports(lines, analysis.mixed_imports)
|
|
794
|
+
|
|
795
|
+
# Remove redundant imports
|
|
796
|
+
lines = self._remove_redundant_imports(lines, analysis.redundant_imports)
|
|
797
|
+
|
|
798
|
+
# Apply PEP 8 import organization
|
|
799
|
+
lines = self._organize_imports_pep8(lines)
|
|
800
|
+
|
|
801
|
+
return lines
|
|
802
|
+
|
|
803
|
+
def _remove_unused_imports(
|
|
804
|
+
self, lines: list[str], unused_imports: list[str]
|
|
805
|
+
) -> list[str]:
|
|
806
|
+
"""Remove unused imports identified by vulture."""
|
|
807
|
+
if not unused_imports:
|
|
808
|
+
return lines
|
|
809
|
+
|
|
810
|
+
unused_patterns = self._create_unused_import_patterns(unused_imports)
|
|
811
|
+
return self._filter_unused_import_lines(lines, unused_patterns, unused_imports)
|
|
812
|
+
|
|
813
|
+
def _create_unused_import_patterns(
|
|
814
|
+
self, unused_imports: list[str]
|
|
815
|
+
) -> list[t.Pattern[str]]:
|
|
816
|
+
"""Create regex patterns for unused import detection."""
|
|
817
|
+
import re # Import needed for pattern compilation
|
|
818
|
+
|
|
819
|
+
unused_patterns = []
|
|
820
|
+
for unused in unused_imports:
|
|
821
|
+
# Use dynamic pattern creation with escaping
|
|
822
|
+
escaped_unused = re.escape(unused)
|
|
823
|
+
# Create compiled regex patterns
|
|
824
|
+
unused_patterns.extend(
|
|
825
|
+
(
|
|
826
|
+
re.compile(f"^\\s*import\\s+{escaped_unused}\\s*$"),
|
|
827
|
+
re.compile(
|
|
828
|
+
f"^\\s*from\\s+\\w+\\s+import\\s+.*\\b{escaped_unused}\\b"
|
|
829
|
+
),
|
|
830
|
+
)
|
|
831
|
+
)
|
|
832
|
+
return unused_patterns
|
|
833
|
+
|
|
834
|
+
def _filter_unused_import_lines(
|
|
835
|
+
self,
|
|
836
|
+
lines: list[str],
|
|
837
|
+
unused_patterns: list[t.Pattern[str]],
|
|
838
|
+
unused_imports: list[str],
|
|
839
|
+
) -> list[str]:
|
|
840
|
+
"""Filter out lines containing unused imports."""
|
|
841
|
+
filtered_lines = []
|
|
842
|
+
for line in lines:
|
|
843
|
+
should_remove = False
|
|
844
|
+
for pattern in unused_patterns:
|
|
845
|
+
if pattern.search(line):
|
|
846
|
+
if self._is_multi_import_line(line):
|
|
847
|
+
# Only remove the specific unused import, not the whole line
|
|
848
|
+
line = self._remove_from_import_list(line, unused_imports)
|
|
849
|
+
else:
|
|
850
|
+
should_remove = True
|
|
851
|
+
break
|
|
852
|
+
|
|
853
|
+
if not should_remove and line.strip(): # Keep non-empty lines
|
|
854
|
+
filtered_lines.append(line)
|
|
855
|
+
|
|
856
|
+
return filtered_lines
|
|
857
|
+
|
|
858
|
+
def _is_multi_import_line(self, line: str) -> bool:
|
|
859
|
+
"""Check if line contains multiple imports."""
|
|
860
|
+
return "import" in line and "," in line
|
|
861
|
+
|
|
862
|
+
def _remove_from_import_list(self, line: str, unused_imports: list[str]) -> str:
|
|
863
|
+
"""Remove specific imports from a multi-import line."""
|
|
864
|
+
for unused in unused_imports:
|
|
865
|
+
# Remove 'unused_name,' or ', unused_name' using safe pattern approach
|
|
866
|
+
import re # REGEX OK: temporary for escaping in dynamic removal
|
|
867
|
+
|
|
868
|
+
escaped_unused = re.escape(unused)
|
|
869
|
+
line = re.sub(
|
|
870
|
+
rf",?\s*{escaped_unused}\s*,?", ", ", line
|
|
871
|
+
) # REGEX OK: dynamic removal with escaping
|
|
872
|
+
|
|
873
|
+
# Clean up using safe patterns
|
|
874
|
+
line = SAFE_PATTERNS["clean_import_commas"].apply(line)
|
|
875
|
+
line = SAFE_PATTERNS["clean_trailing_import_comma"].apply(line)
|
|
876
|
+
line = SAFE_PATTERNS["clean_import_prefix"].apply(line)
|
|
877
|
+
return line
|
|
878
|
+
|
|
879
|
+
def _consolidate_mixed_imports(
|
|
880
|
+
self, lines: list[str], mixed_modules: list[str]
|
|
881
|
+
) -> list[str]:
|
|
882
|
+
"""Consolidate mixed import styles to prefer from-import format."""
|
|
883
|
+
if not mixed_modules:
|
|
884
|
+
return lines
|
|
885
|
+
|
|
886
|
+
import_data = self._collect_mixed_module_imports(lines, mixed_modules)
|
|
887
|
+
lines = self._remove_old_mixed_imports(lines, import_data["lines_to_remove"])
|
|
888
|
+
lines = self._insert_consolidated_imports(lines, import_data)
|
|
889
|
+
|
|
890
|
+
return lines
|
|
891
|
+
|
|
892
|
+
def _collect_mixed_module_imports(
|
|
893
|
+
self, lines: list[str], mixed_modules: list[str]
|
|
894
|
+
) -> dict[str, t.Any]:
|
|
895
|
+
"""Collect import information for mixed modules."""
|
|
896
|
+
import_collector = self._create_import_collector()
|
|
229
897
|
|
|
230
898
|
for i, line in enumerate(lines):
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
899
|
+
stripped_line = line.strip()
|
|
900
|
+
for module in mixed_modules:
|
|
901
|
+
self._process_mixed_module_line(
|
|
902
|
+
stripped_line, module, i, import_collector
|
|
903
|
+
)
|
|
904
|
+
|
|
905
|
+
return self._finalize_import_collection(import_collector)
|
|
906
|
+
|
|
907
|
+
def _create_import_collector(self) -> dict[str, t.Any]:
|
|
908
|
+
"""Create containers for collecting import information."""
|
|
909
|
+
return {
|
|
910
|
+
"module_imports": defaultdict(set),
|
|
911
|
+
"lines_to_remove": set(),
|
|
912
|
+
"insert_positions": {},
|
|
913
|
+
}
|
|
914
|
+
|
|
915
|
+
def _finalize_import_collection(
|
|
916
|
+
self, collector: dict[str, t.Any]
|
|
917
|
+
) -> dict[str, t.Any]:
|
|
918
|
+
"""Finalize the collected import information."""
|
|
919
|
+
return {
|
|
920
|
+
"module_imports": collector["module_imports"],
|
|
921
|
+
"lines_to_remove": collector["lines_to_remove"],
|
|
922
|
+
"insert_positions": collector["insert_positions"],
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
def _process_mixed_module_line(
|
|
926
|
+
self,
|
|
927
|
+
line: str,
|
|
928
|
+
module: str,
|
|
929
|
+
line_index: int,
|
|
930
|
+
import_collector: dict[str, t.Any],
|
|
931
|
+
) -> None:
|
|
932
|
+
"""Process a single line for mixed module imports."""
|
|
933
|
+
if self._is_standard_import_line(line, module):
|
|
934
|
+
self._handle_standard_import(line, module, line_index, import_collector)
|
|
935
|
+
elif self._is_from_import_line(line, module):
|
|
936
|
+
self._handle_from_import(line, module, line_index, import_collector)
|
|
937
|
+
|
|
938
|
+
def _is_standard_import_line(self, line: str, module: str) -> bool:
|
|
939
|
+
"""Check if line is a standard import for the module."""
|
|
940
|
+
import re # REGEX OK: localized for pattern matching
|
|
941
|
+
|
|
942
|
+
return bool(
|
|
943
|
+
re.match(rf"^\s*import\s+{re.escape(module)}(?:\.\w+)*\s*$", line)
|
|
944
|
+
) # REGEX OK: dynamic module matching with escaping
|
|
945
|
+
|
|
946
|
+
def _is_from_import_line(self, line: str, module: str) -> bool:
|
|
947
|
+
"""Check if line is a from-import for the module."""
|
|
948
|
+
import re # REGEX OK: localized for pattern matching
|
|
949
|
+
|
|
950
|
+
return bool(
|
|
951
|
+
re.match(rf"^\s*from\s+{re.escape(module)}\s+import\s+", line)
|
|
952
|
+
) # REGEX OK: dynamic from import matching with escaping
|
|
953
|
+
|
|
954
|
+
def _handle_standard_import(
|
|
955
|
+
self,
|
|
956
|
+
line: str,
|
|
957
|
+
module: str,
|
|
958
|
+
line_index: int,
|
|
959
|
+
import_collector: dict[str, t.Any],
|
|
960
|
+
) -> None:
|
|
961
|
+
"""Handle standard import statement."""
|
|
962
|
+
import_name = self._extract_import_name_from_standard(line, module)
|
|
963
|
+
if import_name:
|
|
964
|
+
import_to_add = self._determine_import_name(import_name, module)
|
|
965
|
+
self._add_import_to_collector(
|
|
966
|
+
module, import_to_add, line_index, import_collector
|
|
967
|
+
)
|
|
968
|
+
|
|
969
|
+
def _extract_import_name_from_standard(self, line: str, module: str) -> str | None:
|
|
970
|
+
"""Extract the import name from a standard import line."""
|
|
971
|
+
import re # REGEX OK: localized for pattern matching
|
|
972
|
+
|
|
973
|
+
match = re.search(rf"import\s+({re.escape(module)}(?:\.\w+)*)", line)
|
|
974
|
+
return match.group(1) if match else None
|
|
975
|
+
|
|
976
|
+
def _determine_import_name(self, import_name: str, module: str) -> str:
|
|
977
|
+
"""Determine what name to import based on the import statement."""
|
|
978
|
+
if "." in import_name:
|
|
979
|
+
# For submodules, import the submodule name
|
|
980
|
+
return import_name.split(".")[-1]
|
|
981
|
+
return module
|
|
982
|
+
|
|
983
|
+
def _add_import_to_collector(
|
|
984
|
+
self,
|
|
985
|
+
module: str,
|
|
986
|
+
import_name: str,
|
|
987
|
+
line_index: int,
|
|
988
|
+
import_collector: dict[str, t.Any],
|
|
989
|
+
) -> None:
|
|
990
|
+
"""Add import information to the collector."""
|
|
991
|
+
import_collector["module_imports"][module].add(import_name)
|
|
992
|
+
import_collector["lines_to_remove"].add(line_index)
|
|
993
|
+
if module not in import_collector["insert_positions"]:
|
|
994
|
+
import_collector["insert_positions"][module] = line_index
|
|
995
|
+
|
|
996
|
+
def _handle_from_import(
|
|
997
|
+
self,
|
|
998
|
+
line: str,
|
|
999
|
+
module: str,
|
|
1000
|
+
line_index: int,
|
|
1001
|
+
import_collector: dict[str, t.Any],
|
|
1002
|
+
) -> None:
|
|
1003
|
+
"""Handle from-import statement."""
|
|
1004
|
+
import_names = self._extract_import_names_from_from_import(line, module)
|
|
1005
|
+
import_collector["module_imports"][module].update(import_names)
|
|
1006
|
+
import_collector["lines_to_remove"].add(line_index)
|
|
1007
|
+
if module not in import_collector["insert_positions"]:
|
|
1008
|
+
import_collector["insert_positions"][module] = line_index
|
|
1009
|
+
|
|
1010
|
+
def _extract_import_names_from_from_import(
|
|
1011
|
+
self, line: str, module: str
|
|
1012
|
+
) -> list[str]:
|
|
1013
|
+
"""Extract import names from a from-import line."""
|
|
1014
|
+
import re # REGEX OK: localized for pattern matching
|
|
1015
|
+
|
|
1016
|
+
import_part = re.sub(rf"^\s*from\s+{re.escape(module)}\s+import\s+", "", line)
|
|
1017
|
+
return [name.strip() for name in import_part.split(",")]
|
|
1018
|
+
|
|
1019
|
+
def _remove_old_mixed_imports(
|
|
1020
|
+
self, lines: list[str], lines_to_remove: set[int]
|
|
1021
|
+
) -> list[str]:
|
|
1022
|
+
"""Remove old import lines in reverse order to preserve indices."""
|
|
1023
|
+
for i in sorted(lines_to_remove, reverse=True):
|
|
245
1024
|
del lines[i]
|
|
1025
|
+
return lines
|
|
246
1026
|
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
1027
|
+
def _insert_consolidated_imports(
|
|
1028
|
+
self, lines: list[str], import_data: dict[str, t.Any]
|
|
1029
|
+
) -> list[str]:
|
|
1030
|
+
"""Insert consolidated from-imports."""
|
|
1031
|
+
module_imports = import_data["module_imports"]
|
|
1032
|
+
insert_positions = import_data["insert_positions"]
|
|
1033
|
+
lines_to_remove = import_data["lines_to_remove"]
|
|
250
1034
|
|
|
1035
|
+
offset = 0
|
|
1036
|
+
for module, imports in module_imports.items():
|
|
1037
|
+
if module in insert_positions:
|
|
1038
|
+
imports_list = sorted(imports)
|
|
1039
|
+
consolidated = f"from {module} import {', '.join(imports_list)}"
|
|
1040
|
+
insert_pos = insert_positions[module] - offset
|
|
1041
|
+
lines.insert(insert_pos, consolidated)
|
|
1042
|
+
offset += (
|
|
1043
|
+
len([i for i in lines_to_remove if i <= insert_positions[module]])
|
|
1044
|
+
- 1
|
|
1045
|
+
)
|
|
251
1046
|
return lines
|
|
252
1047
|
|
|
1048
|
+
def _remove_redundant_imports(
|
|
1049
|
+
self, lines: list[str], redundant_imports: list[str]
|
|
1050
|
+
) -> list[str]:
|
|
1051
|
+
"""Remove redundant/duplicate import statements."""
|
|
1052
|
+
if not redundant_imports:
|
|
1053
|
+
return lines
|
|
1054
|
+
|
|
1055
|
+
seen_imports: set[str] = set()
|
|
1056
|
+
filtered_lines = []
|
|
1057
|
+
|
|
1058
|
+
for line in lines:
|
|
1059
|
+
# Normalize the import line for comparison using safe patterns
|
|
1060
|
+
normalized = SAFE_PATTERNS["normalize_whitespace"].apply(line.strip())
|
|
1061
|
+
|
|
1062
|
+
if normalized.startswith(("import ", "from ")):
|
|
1063
|
+
if normalized not in seen_imports:
|
|
1064
|
+
seen_imports.add(normalized)
|
|
1065
|
+
filtered_lines.append(line)
|
|
1066
|
+
# Skip redundant imports
|
|
1067
|
+
else:
|
|
1068
|
+
filtered_lines.append(line)
|
|
1069
|
+
|
|
1070
|
+
return filtered_lines
|
|
1071
|
+
|
|
1072
|
+
def _organize_imports_pep8(self, lines: list[str]) -> list[str]:
|
|
1073
|
+
"""Organize imports according to PEP 8 standards."""
|
|
1074
|
+
parsed_data = self._parse_import_lines(lines)
|
|
1075
|
+
import_data, other_lines, import_bounds = parsed_data
|
|
1076
|
+
|
|
1077
|
+
if not import_data:
|
|
1078
|
+
return lines
|
|
1079
|
+
|
|
1080
|
+
sorted_imports = self._sort_imports_by_pep8_standards(import_data)
|
|
1081
|
+
return self._rebuild_with_organized_imports(
|
|
1082
|
+
sorted_imports, other_lines, import_bounds
|
|
1083
|
+
)
|
|
1084
|
+
|
|
1085
|
+
def _sort_imports_by_pep8_standards(
|
|
1086
|
+
self, import_data: list[tuple[int, str, str]]
|
|
1087
|
+
) -> list[tuple[int, str, str]]:
|
|
1088
|
+
"""Sort imports by PEP 8 standards: category first, then alphabetically."""
|
|
1089
|
+
return sorted(import_data, key=lambda x: (x[0], x[2].lower()))
|
|
1090
|
+
|
|
1091
|
+
def _parse_import_lines(
|
|
1092
|
+
self, lines: list[str]
|
|
1093
|
+
) -> tuple[list[tuple[int, str, str]], list[tuple[int, str]], tuple[int, int]]:
|
|
1094
|
+
"""Parse lines to separate imports from other code."""
|
|
1095
|
+
parser_state = self._initialize_parser_state()
|
|
1096
|
+
|
|
1097
|
+
for i, line in enumerate(lines):
|
|
1098
|
+
stripped = line.strip()
|
|
1099
|
+
if self._is_import_line(stripped):
|
|
1100
|
+
self._process_import_line(i, line, stripped, parser_state)
|
|
1101
|
+
else:
|
|
1102
|
+
self._process_non_import_line(i, line, stripped, parser_state)
|
|
1103
|
+
|
|
1104
|
+
return (
|
|
1105
|
+
parser_state["import_lines"],
|
|
1106
|
+
parser_state["other_lines"],
|
|
1107
|
+
(parser_state["import_start"], parser_state["import_end"]),
|
|
1108
|
+
)
|
|
1109
|
+
|
|
1110
|
+
def _initialize_parser_state(self) -> dict[str, t.Any]:
|
|
1111
|
+
"""Initialize parser state for import line processing."""
|
|
1112
|
+
return {
|
|
1113
|
+
"import_lines": [], # (category, line, original)
|
|
1114
|
+
"other_lines": [],
|
|
1115
|
+
"import_start": -1,
|
|
1116
|
+
"import_end": -1,
|
|
1117
|
+
}
|
|
1118
|
+
|
|
1119
|
+
def _process_import_line(
|
|
1120
|
+
self, i: int, line: str, stripped: str, parser_state: dict[str, t.Any]
|
|
1121
|
+
) -> None:
|
|
1122
|
+
"""Process a line that contains an import statement."""
|
|
1123
|
+
if parser_state["import_start"] == -1:
|
|
1124
|
+
parser_state["import_start"] = i
|
|
1125
|
+
parser_state["import_end"] = i
|
|
1126
|
+
|
|
1127
|
+
module = self._extract_module_name(stripped)
|
|
1128
|
+
category = self._get_import_category(module)
|
|
1129
|
+
parser_state["import_lines"].append((category, line, stripped))
|
|
1130
|
+
|
|
1131
|
+
def _process_non_import_line(
|
|
1132
|
+
self, i: int, line: str, stripped: str, parser_state: dict[str, t.Any]
|
|
1133
|
+
) -> None:
|
|
1134
|
+
"""Process a line that is not an import statement."""
|
|
1135
|
+
self._categorize_non_import_line(
|
|
1136
|
+
i,
|
|
1137
|
+
line,
|
|
1138
|
+
stripped,
|
|
1139
|
+
parser_state["import_start"],
|
|
1140
|
+
parser_state["import_end"],
|
|
1141
|
+
parser_state["other_lines"],
|
|
1142
|
+
)
|
|
1143
|
+
|
|
1144
|
+
def _is_import_line(self, stripped: str) -> bool:
|
|
1145
|
+
"""Check if line is an import statement."""
|
|
1146
|
+
return stripped.startswith(("import ", "from ")) and not stripped.startswith(
|
|
1147
|
+
"#"
|
|
1148
|
+
)
|
|
1149
|
+
|
|
1150
|
+
def _extract_module_name(self, stripped: str) -> str:
|
|
1151
|
+
"""Extract module name from import statement."""
|
|
1152
|
+
if stripped.startswith("import "):
|
|
1153
|
+
return stripped.split()[1].split(".")[0]
|
|
1154
|
+
# from import
|
|
1155
|
+
return stripped.split()[1]
|
|
1156
|
+
|
|
1157
|
+
def _categorize_non_import_line(
|
|
1158
|
+
self,
|
|
1159
|
+
i: int,
|
|
1160
|
+
line: str,
|
|
1161
|
+
stripped: str,
|
|
1162
|
+
import_start: int,
|
|
1163
|
+
import_end: int,
|
|
1164
|
+
other_lines: list[tuple[int, str]],
|
|
1165
|
+
) -> None:
|
|
1166
|
+
"""Categorize non-import lines for later reconstruction."""
|
|
1167
|
+
if import_start != -1 and import_end != -1 and i > import_end:
|
|
1168
|
+
# We've passed the import section
|
|
1169
|
+
other_lines.append((i, line))
|
|
1170
|
+
elif import_start == -1:
|
|
1171
|
+
# We haven't reached imports yet
|
|
1172
|
+
other_lines.append((i, line))
|
|
1173
|
+
elif stripped == "" and import_start <= i <= import_end:
|
|
1174
|
+
# Empty line within import section - we'll reorganize these
|
|
1175
|
+
return
|
|
1176
|
+
else:
|
|
1177
|
+
other_lines.append((i, line))
|
|
1178
|
+
|
|
1179
|
+
def _rebuild_with_organized_imports(
|
|
1180
|
+
self,
|
|
1181
|
+
import_data: list[tuple[int, str, str]],
|
|
1182
|
+
other_lines: list[tuple[int, str]],
|
|
1183
|
+
import_bounds: tuple[int, int],
|
|
1184
|
+
) -> list[str]:
|
|
1185
|
+
"""Rebuild file with organized imports and proper spacing."""
|
|
1186
|
+
result_lines = []
|
|
1187
|
+
import_start, import_end = import_bounds
|
|
1188
|
+
|
|
1189
|
+
# Add lines before imports
|
|
1190
|
+
self._add_lines_before_imports(result_lines, other_lines, import_start)
|
|
1191
|
+
|
|
1192
|
+
# Add organized imports with proper spacing
|
|
1193
|
+
self._add_organized_imports(result_lines, import_data)
|
|
1194
|
+
|
|
1195
|
+
# Add lines after imports
|
|
1196
|
+
self._add_lines_after_imports(result_lines, other_lines, import_end)
|
|
1197
|
+
|
|
1198
|
+
return result_lines
|
|
1199
|
+
|
|
1200
|
+
def _add_lines_before_imports(
|
|
1201
|
+
self,
|
|
1202
|
+
result_lines: list[str],
|
|
1203
|
+
other_lines: list[tuple[int, str]],
|
|
1204
|
+
import_start: int,
|
|
1205
|
+
) -> None:
|
|
1206
|
+
"""Add lines that appear before import section."""
|
|
1207
|
+
for i, line in other_lines:
|
|
1208
|
+
if i < import_start:
|
|
1209
|
+
result_lines.append(line)
|
|
1210
|
+
|
|
1211
|
+
def _add_organized_imports(
|
|
1212
|
+
self, result_lines: list[str], import_data: list[tuple[int, str, str]]
|
|
1213
|
+
) -> None:
|
|
1214
|
+
"""Add imports with proper category spacing."""
|
|
1215
|
+
current_category = 0
|
|
1216
|
+
for category, line, _ in import_data:
|
|
1217
|
+
if category > current_category and current_category > 0:
|
|
1218
|
+
result_lines.append("") # Add blank line between categories
|
|
1219
|
+
result_lines.append(line)
|
|
1220
|
+
current_category = category
|
|
1221
|
+
|
|
1222
|
+
def _add_lines_after_imports(
|
|
1223
|
+
self,
|
|
1224
|
+
result_lines: list[str],
|
|
1225
|
+
other_lines: list[tuple[int, str]],
|
|
1226
|
+
import_end: int,
|
|
1227
|
+
) -> None:
|
|
1228
|
+
"""Add lines that appear after import section."""
|
|
1229
|
+
if any(i > import_end for i, _ in other_lines):
|
|
1230
|
+
result_lines.append("") # Blank line after imports
|
|
1231
|
+
for i, line in other_lines:
|
|
1232
|
+
if i > import_end:
|
|
1233
|
+
result_lines.append(line)
|
|
1234
|
+
|
|
253
1235
|
async def get_diagnostics(self) -> dict[str, t.Any]:
|
|
254
|
-
diagnostics
|
|
255
|
-
|
|
1236
|
+
"""Provide comprehensive diagnostics about import analysis across the project."""
|
|
1237
|
+
try:
|
|
1238
|
+
python_files = self._get_python_files()
|
|
1239
|
+
metrics = await self._analyze_file_sample(python_files[:10])
|
|
1240
|
+
return self._build_success_diagnostics(len(python_files), metrics)
|
|
1241
|
+
except Exception as e:
|
|
1242
|
+
return self._build_error_diagnostics(str(e))
|
|
1243
|
+
|
|
1244
|
+
def _get_python_files(self) -> list[Path]:
|
|
1245
|
+
"""Get all Python files in the project."""
|
|
1246
|
+
return list(self.context.project_path.rglob("*.py"))
|
|
1247
|
+
|
|
1248
|
+
async def _analyze_file_sample(self, python_files: list[Path]) -> dict[str, int]:
|
|
1249
|
+
"""Analyze a sample of files for comprehensive import metrics."""
|
|
1250
|
+
metrics = {
|
|
256
1251
|
"mixed_import_files": 0,
|
|
257
1252
|
"total_mixed_modules": 0,
|
|
258
|
-
"
|
|
259
|
-
"
|
|
1253
|
+
"unused_import_files": 0,
|
|
1254
|
+
"total_unused_imports": 0,
|
|
1255
|
+
"pep8_violations": 0,
|
|
260
1256
|
}
|
|
261
1257
|
|
|
262
|
-
for
|
|
263
|
-
|
|
264
|
-
|
|
1258
|
+
for file_path in python_files:
|
|
1259
|
+
file_metrics = await self._analyze_single_file_metrics(file_path)
|
|
1260
|
+
if file_metrics:
|
|
1261
|
+
self._update_metrics(metrics, file_metrics)
|
|
265
1262
|
|
|
266
|
-
|
|
267
|
-
diagnostics["files_analyzed"] += 1
|
|
1263
|
+
return metrics
|
|
268
1264
|
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
1265
|
+
async def _analyze_single_file_metrics(
|
|
1266
|
+
self, file_path: Path
|
|
1267
|
+
) -> dict[str, int] | None:
|
|
1268
|
+
"""Analyze a single file and return its metrics, or None if analysis fails."""
|
|
1269
|
+
try:
|
|
1270
|
+
analysis = await self.analyze_file(file_path)
|
|
1271
|
+
return self._extract_file_metrics(analysis)
|
|
1272
|
+
except Exception as e:
|
|
1273
|
+
self.log(f"Could not analyze {file_path}: {e}")
|
|
1274
|
+
return None
|
|
272
1275
|
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
1276
|
+
def _extract_file_metrics(self, analysis: ImportAnalysis) -> dict[str, int]:
|
|
1277
|
+
"""Extract metrics from a single file analysis."""
|
|
1278
|
+
metrics = {
|
|
1279
|
+
"mixed_import_files": 1 if analysis.mixed_imports else 0,
|
|
1280
|
+
"total_mixed_modules": len(analysis.mixed_imports),
|
|
1281
|
+
"unused_import_files": 1 if analysis.unused_imports else 0,
|
|
1282
|
+
"total_unused_imports": len(analysis.unused_imports),
|
|
1283
|
+
"pep8_violations": len(analysis.import_violations),
|
|
1284
|
+
}
|
|
1285
|
+
return metrics
|
|
1286
|
+
|
|
1287
|
+
def _update_metrics(
|
|
1288
|
+
self, metrics: dict[str, int], file_metrics: dict[str, int]
|
|
1289
|
+
) -> None:
|
|
1290
|
+
"""Update overall metrics with single file metrics."""
|
|
1291
|
+
for key, value in file_metrics.items():
|
|
1292
|
+
metrics[key] += value
|
|
1293
|
+
|
|
1294
|
+
def _build_success_diagnostics(
|
|
1295
|
+
self, files_analyzed: int, metrics: dict[str, int]
|
|
1296
|
+
) -> dict[str, t.Any]:
|
|
1297
|
+
"""Build successful diagnostics response."""
|
|
1298
|
+
return {
|
|
1299
|
+
"files_analyzed": files_analyzed,
|
|
1300
|
+
**metrics,
|
|
1301
|
+
"agent": "ImportOptimizationAgent",
|
|
1302
|
+
"capabilities": [
|
|
1303
|
+
"Mixed import style consolidation",
|
|
1304
|
+
"Unused import detection with vulture",
|
|
1305
|
+
"PEP 8 import organization",
|
|
1306
|
+
"Redundant import removal",
|
|
1307
|
+
"Intelligent context-aware analysis",
|
|
1308
|
+
],
|
|
1309
|
+
}
|
|
277
1310
|
|
|
278
|
-
|
|
1311
|
+
def _build_error_diagnostics(self, error: str) -> dict[str, t.Any]:
|
|
1312
|
+
"""Build error diagnostics response."""
|
|
1313
|
+
return {
|
|
1314
|
+
"files_analyzed": 0,
|
|
1315
|
+
"mixed_import_files": 0,
|
|
1316
|
+
"total_mixed_modules": 0,
|
|
1317
|
+
"unused_import_files": 0,
|
|
1318
|
+
"total_unused_imports": 0,
|
|
1319
|
+
"pep8_violations": 0,
|
|
1320
|
+
"agent": "ImportOptimizationAgent",
|
|
1321
|
+
"error": error,
|
|
1322
|
+
}
|
|
279
1323
|
|
|
280
1324
|
|
|
281
1325
|
agent_registry.register(ImportOptimizationAgent)
|