crackerjack 0.31.10__py3-none-any.whl → 0.31.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +288 -705
- crackerjack/__main__.py +22 -8
- crackerjack/agents/__init__.py +0 -3
- crackerjack/agents/architect_agent.py +0 -43
- crackerjack/agents/base.py +1 -9
- crackerjack/agents/coordinator.py +2 -148
- crackerjack/agents/documentation_agent.py +109 -81
- crackerjack/agents/dry_agent.py +122 -97
- crackerjack/agents/formatting_agent.py +3 -16
- crackerjack/agents/import_optimization_agent.py +1174 -130
- crackerjack/agents/performance_agent.py +956 -188
- crackerjack/agents/performance_helpers.py +229 -0
- crackerjack/agents/proactive_agent.py +1 -48
- crackerjack/agents/refactoring_agent.py +516 -246
- crackerjack/agents/refactoring_helpers.py +282 -0
- crackerjack/agents/security_agent.py +393 -90
- crackerjack/agents/test_creation_agent.py +1776 -120
- crackerjack/agents/test_specialist_agent.py +59 -15
- crackerjack/agents/tracker.py +0 -102
- crackerjack/api.py +145 -37
- crackerjack/cli/handlers.py +48 -30
- crackerjack/cli/interactive.py +11 -11
- crackerjack/cli/options.py +66 -4
- crackerjack/code_cleaner.py +808 -148
- crackerjack/config/global_lock_config.py +110 -0
- crackerjack/config/hooks.py +43 -64
- crackerjack/core/async_workflow_orchestrator.py +247 -97
- crackerjack/core/autofix_coordinator.py +192 -109
- crackerjack/core/enhanced_container.py +46 -63
- crackerjack/core/file_lifecycle.py +549 -0
- crackerjack/core/performance.py +9 -8
- crackerjack/core/performance_monitor.py +395 -0
- crackerjack/core/phase_coordinator.py +281 -94
- crackerjack/core/proactive_workflow.py +9 -58
- crackerjack/core/resource_manager.py +501 -0
- crackerjack/core/service_watchdog.py +490 -0
- crackerjack/core/session_coordinator.py +4 -8
- crackerjack/core/timeout_manager.py +504 -0
- crackerjack/core/websocket_lifecycle.py +475 -0
- crackerjack/core/workflow_orchestrator.py +343 -209
- crackerjack/dynamic_config.py +50 -9
- crackerjack/errors.py +3 -4
- crackerjack/executors/async_hook_executor.py +63 -13
- crackerjack/executors/cached_hook_executor.py +14 -14
- crackerjack/executors/hook_executor.py +100 -37
- crackerjack/executors/hook_lock_manager.py +856 -0
- crackerjack/executors/individual_hook_executor.py +120 -86
- crackerjack/intelligence/__init__.py +0 -7
- crackerjack/intelligence/adaptive_learning.py +13 -86
- crackerjack/intelligence/agent_orchestrator.py +15 -78
- crackerjack/intelligence/agent_registry.py +12 -59
- crackerjack/intelligence/agent_selector.py +31 -92
- crackerjack/intelligence/integration.py +1 -41
- crackerjack/interactive.py +9 -9
- crackerjack/managers/async_hook_manager.py +25 -8
- crackerjack/managers/hook_manager.py +9 -9
- crackerjack/managers/publish_manager.py +57 -59
- crackerjack/managers/test_command_builder.py +6 -36
- crackerjack/managers/test_executor.py +9 -61
- crackerjack/managers/test_manager.py +17 -63
- crackerjack/managers/test_manager_backup.py +77 -127
- crackerjack/managers/test_progress.py +4 -23
- crackerjack/mcp/cache.py +5 -12
- crackerjack/mcp/client_runner.py +10 -10
- crackerjack/mcp/context.py +64 -6
- crackerjack/mcp/dashboard.py +14 -11
- crackerjack/mcp/enhanced_progress_monitor.py +55 -55
- crackerjack/mcp/file_monitor.py +72 -42
- crackerjack/mcp/progress_components.py +103 -84
- crackerjack/mcp/progress_monitor.py +122 -49
- crackerjack/mcp/rate_limiter.py +12 -12
- crackerjack/mcp/server_core.py +16 -22
- crackerjack/mcp/service_watchdog.py +26 -26
- crackerjack/mcp/state.py +15 -0
- crackerjack/mcp/tools/core_tools.py +95 -39
- crackerjack/mcp/tools/error_analyzer.py +6 -32
- crackerjack/mcp/tools/execution_tools.py +1 -56
- crackerjack/mcp/tools/execution_tools_backup.py +35 -131
- crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
- crackerjack/mcp/tools/intelligence_tools.py +2 -55
- crackerjack/mcp/tools/monitoring_tools.py +308 -145
- crackerjack/mcp/tools/proactive_tools.py +12 -42
- crackerjack/mcp/tools/progress_tools.py +23 -15
- crackerjack/mcp/tools/utility_tools.py +3 -40
- crackerjack/mcp/tools/workflow_executor.py +40 -60
- crackerjack/mcp/websocket/app.py +0 -3
- crackerjack/mcp/websocket/endpoints.py +206 -268
- crackerjack/mcp/websocket/jobs.py +213 -66
- crackerjack/mcp/websocket/server.py +84 -6
- crackerjack/mcp/websocket/websocket_handler.py +137 -29
- crackerjack/models/config_adapter.py +3 -16
- crackerjack/models/protocols.py +162 -3
- crackerjack/models/resource_protocols.py +454 -0
- crackerjack/models/task.py +3 -3
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +25 -71
- crackerjack/monitoring/regression_prevention.py +28 -87
- crackerjack/orchestration/advanced_orchestrator.py +44 -78
- crackerjack/orchestration/coverage_improvement.py +10 -60
- crackerjack/orchestration/execution_strategies.py +16 -16
- crackerjack/orchestration/test_progress_streamer.py +61 -53
- crackerjack/plugins/base.py +1 -1
- crackerjack/plugins/managers.py +22 -20
- crackerjack/py313.py +65 -21
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +627 -0
- crackerjack/services/cache.py +7 -9
- crackerjack/services/config.py +35 -52
- crackerjack/services/config_integrity.py +5 -16
- crackerjack/services/config_merge.py +542 -0
- crackerjack/services/contextual_ai_assistant.py +17 -19
- crackerjack/services/coverage_ratchet.py +44 -73
- crackerjack/services/debug.py +25 -39
- crackerjack/services/dependency_monitor.py +52 -50
- crackerjack/services/enhanced_filesystem.py +14 -11
- crackerjack/services/file_hasher.py +1 -1
- crackerjack/services/filesystem.py +1 -12
- crackerjack/services/git.py +71 -47
- crackerjack/services/health_metrics.py +31 -27
- crackerjack/services/initialization.py +276 -428
- crackerjack/services/input_validator.py +760 -0
- crackerjack/services/log_manager.py +16 -16
- crackerjack/services/logging.py +7 -6
- crackerjack/services/metrics.py +43 -43
- crackerjack/services/pattern_cache.py +2 -31
- crackerjack/services/pattern_detector.py +26 -63
- crackerjack/services/performance_benchmarks.py +20 -45
- crackerjack/services/regex_patterns.py +2887 -0
- crackerjack/services/regex_utils.py +537 -0
- crackerjack/services/secure_path_utils.py +683 -0
- crackerjack/services/secure_status_formatter.py +534 -0
- crackerjack/services/secure_subprocess.py +605 -0
- crackerjack/services/security.py +47 -10
- crackerjack/services/security_logger.py +492 -0
- crackerjack/services/server_manager.py +109 -50
- crackerjack/services/smart_scheduling.py +8 -25
- crackerjack/services/status_authentication.py +603 -0
- crackerjack/services/status_security_manager.py +442 -0
- crackerjack/services/thread_safe_status_collector.py +546 -0
- crackerjack/services/tool_version_service.py +1 -23
- crackerjack/services/unified_config.py +36 -58
- crackerjack/services/validation_rate_limiter.py +269 -0
- crackerjack/services/version_checker.py +9 -40
- crackerjack/services/websocket_resource_limiter.py +572 -0
- crackerjack/slash_commands/__init__.py +52 -2
- crackerjack/tools/__init__.py +0 -0
- crackerjack/tools/validate_input_validator_patterns.py +262 -0
- crackerjack/tools/validate_regex_patterns.py +198 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/METADATA +197 -12
- crackerjack-0.31.13.dist-info/RECORD +178 -0
- crackerjack/cli/facade.py +0 -104
- crackerjack-0.31.10.dist-info/RECORD +0 -149
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/WHEEL +0 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/licenses/LICENSE +0 -0
|
@@ -2,6 +2,7 @@ import ast
|
|
|
2
2
|
import typing as t
|
|
3
3
|
from pathlib import Path
|
|
4
4
|
|
|
5
|
+
from ..services.regex_patterns import SAFE_PATTERNS
|
|
5
6
|
from .base import (
|
|
6
7
|
FixResult,
|
|
7
8
|
Issue,
|
|
@@ -10,6 +11,13 @@ from .base import (
|
|
|
10
11
|
agent_registry,
|
|
11
12
|
)
|
|
12
13
|
|
|
14
|
+
if t.TYPE_CHECKING:
|
|
15
|
+
from .refactoring_helpers import (
|
|
16
|
+
ComplexityCalculator,
|
|
17
|
+
EnhancedUsageAnalyzer,
|
|
18
|
+
UsageDataCollector,
|
|
19
|
+
)
|
|
20
|
+
|
|
13
21
|
|
|
14
22
|
class RefactoringAgent(SubAgent):
|
|
15
23
|
def get_supported_types(self) -> set[IssueType]:
|
|
@@ -17,11 +25,50 @@ class RefactoringAgent(SubAgent):
|
|
|
17
25
|
|
|
18
26
|
async def can_handle(self, issue: Issue) -> float:
|
|
19
27
|
if issue.type == IssueType.COMPLEXITY:
|
|
20
|
-
|
|
28
|
+
# Enhanced confidence for complexity reduction
|
|
29
|
+
return 0.9 if self._has_complexity_markers(issue) else 0.85
|
|
21
30
|
if issue.type == IssueType.DEAD_CODE:
|
|
22
|
-
|
|
31
|
+
# Enhanced confidence for dead code detection
|
|
32
|
+
return 0.8 if self._has_dead_code_markers(issue) else 0.75
|
|
23
33
|
return 0.0
|
|
24
34
|
|
|
35
|
+
def _has_complexity_markers(self, issue: Issue) -> bool:
|
|
36
|
+
"""Check if issue shows signs of high complexity that we can handle."""
|
|
37
|
+
if not issue.message:
|
|
38
|
+
return False
|
|
39
|
+
|
|
40
|
+
complexity_indicators = [
|
|
41
|
+
"cognitive complexity",
|
|
42
|
+
"too complex",
|
|
43
|
+
"nested",
|
|
44
|
+
"cyclomatic",
|
|
45
|
+
"long function",
|
|
46
|
+
"too many branches",
|
|
47
|
+
"too many conditions",
|
|
48
|
+
]
|
|
49
|
+
|
|
50
|
+
return any(
|
|
51
|
+
indicator in issue.message.lower() for indicator in complexity_indicators
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
def _has_dead_code_markers(self, issue: Issue) -> bool:
|
|
55
|
+
"""Check if issue shows signs of dead code that we can handle."""
|
|
56
|
+
if not issue.message:
|
|
57
|
+
return False
|
|
58
|
+
|
|
59
|
+
dead_code_indicators = [
|
|
60
|
+
"unused",
|
|
61
|
+
"imported but unused",
|
|
62
|
+
"defined but not used",
|
|
63
|
+
"unreachable",
|
|
64
|
+
"dead code",
|
|
65
|
+
"never used",
|
|
66
|
+
]
|
|
67
|
+
|
|
68
|
+
return any(
|
|
69
|
+
indicator in issue.message.lower() for indicator in dead_code_indicators
|
|
70
|
+
)
|
|
71
|
+
|
|
25
72
|
async def analyze_and_fix(self, issue: Issue) -> FixResult:
|
|
26
73
|
self.log(f"Analyzing {issue.type.value} issue: {issue.message}")
|
|
27
74
|
|
|
@@ -50,7 +97,6 @@ class RefactoringAgent(SubAgent):
|
|
|
50
97
|
|
|
51
98
|
file_path = Path(issue.file_path)
|
|
52
99
|
|
|
53
|
-
# CRITICAL FIX: For known functions, apply proven refactoring patterns directly
|
|
54
100
|
if "detect_agent_needs" in issue.message:
|
|
55
101
|
return await self._apply_known_complexity_fix(file_path, issue)
|
|
56
102
|
|
|
@@ -64,7 +110,6 @@ class RefactoringAgent(SubAgent):
|
|
|
64
110
|
async def _apply_known_complexity_fix(
|
|
65
111
|
self, file_path: Path, issue: Issue
|
|
66
112
|
) -> FixResult:
|
|
67
|
-
"""Apply known working fixes for specific complex functions."""
|
|
68
113
|
content = self.context.get_file_content(file_path)
|
|
69
114
|
if not content:
|
|
70
115
|
return FixResult(
|
|
@@ -73,11 +118,9 @@ class RefactoringAgent(SubAgent):
|
|
|
73
118
|
remaining_issues=[f"Could not read file: {file_path}"],
|
|
74
119
|
)
|
|
75
120
|
|
|
76
|
-
# Apply the proven refactoring pattern
|
|
77
121
|
refactored_content = self._refactor_detect_agent_needs_pattern(content)
|
|
78
122
|
|
|
79
123
|
if refactored_content != content:
|
|
80
|
-
# Save the refactored content
|
|
81
124
|
success = self.context.write_file_content(file_path, refactored_content)
|
|
82
125
|
if success:
|
|
83
126
|
return FixResult(
|
|
@@ -110,7 +153,6 @@ class RefactoringAgent(SubAgent):
|
|
|
110
153
|
)
|
|
111
154
|
|
|
112
155
|
def _validate_complexity_issue(self, issue: Issue) -> FixResult | None:
|
|
113
|
-
"""Validate the complexity issue has required information."""
|
|
114
156
|
if not issue.file_path:
|
|
115
157
|
return FixResult(
|
|
116
158
|
success=False,
|
|
@@ -129,7 +171,6 @@ class RefactoringAgent(SubAgent):
|
|
|
129
171
|
return None
|
|
130
172
|
|
|
131
173
|
async def _process_complexity_reduction(self, file_path: Path) -> FixResult:
|
|
132
|
-
"""Process complexity reduction for a file."""
|
|
133
174
|
content = self.context.get_file_content(file_path)
|
|
134
175
|
if not content:
|
|
135
176
|
return FixResult(
|
|
@@ -156,7 +197,6 @@ class RefactoringAgent(SubAgent):
|
|
|
156
197
|
content: str,
|
|
157
198
|
complex_functions: list[dict[str, t.Any]],
|
|
158
199
|
) -> FixResult:
|
|
159
|
-
"""Apply refactoring and save changes."""
|
|
160
200
|
refactored_content = self._apply_complexity_reduction(
|
|
161
201
|
content,
|
|
162
202
|
complex_functions,
|
|
@@ -182,7 +222,6 @@ class RefactoringAgent(SubAgent):
|
|
|
182
222
|
)
|
|
183
223
|
|
|
184
224
|
def _create_no_changes_result(self) -> FixResult:
|
|
185
|
-
"""Create result for when no changes could be applied."""
|
|
186
225
|
return FixResult(
|
|
187
226
|
success=False,
|
|
188
227
|
confidence=0.5,
|
|
@@ -195,7 +234,6 @@ class RefactoringAgent(SubAgent):
|
|
|
195
234
|
)
|
|
196
235
|
|
|
197
236
|
def _create_syntax_error_result(self, error: SyntaxError) -> FixResult:
|
|
198
|
-
"""Create result for syntax errors."""
|
|
199
237
|
return FixResult(
|
|
200
238
|
success=False,
|
|
201
239
|
confidence=0.0,
|
|
@@ -203,7 +241,6 @@ class RefactoringAgent(SubAgent):
|
|
|
203
241
|
)
|
|
204
242
|
|
|
205
243
|
def _create_general_error_result(self, error: Exception) -> FixResult:
|
|
206
|
-
"""Create result for general errors."""
|
|
207
244
|
return FixResult(
|
|
208
245
|
success=False,
|
|
209
246
|
confidence=0.0,
|
|
@@ -232,7 +269,6 @@ class RefactoringAgent(SubAgent):
|
|
|
232
269
|
return self._create_dead_code_error_result(e)
|
|
233
270
|
|
|
234
271
|
def _validate_dead_code_issue(self, issue: Issue) -> FixResult | None:
|
|
235
|
-
"""Validate the dead code issue has required information."""
|
|
236
272
|
if not issue.file_path:
|
|
237
273
|
return FixResult(
|
|
238
274
|
success=False,
|
|
@@ -251,7 +287,6 @@ class RefactoringAgent(SubAgent):
|
|
|
251
287
|
return None
|
|
252
288
|
|
|
253
289
|
async def _process_dead_code_removal(self, file_path: Path) -> FixResult:
|
|
254
|
-
"""Process dead code removal for a file."""
|
|
255
290
|
content = self.context.get_file_content(file_path)
|
|
256
291
|
if not content:
|
|
257
292
|
return FixResult(
|
|
@@ -278,7 +313,6 @@ class RefactoringAgent(SubAgent):
|
|
|
278
313
|
content: str,
|
|
279
314
|
analysis: dict[str, t.Any],
|
|
280
315
|
) -> FixResult:
|
|
281
|
-
"""Apply dead code cleanup and save changes."""
|
|
282
316
|
cleaned_content = self._remove_dead_code_items(content, analysis)
|
|
283
317
|
|
|
284
318
|
if cleaned_content == content:
|
|
@@ -302,7 +336,6 @@ class RefactoringAgent(SubAgent):
|
|
|
302
336
|
)
|
|
303
337
|
|
|
304
338
|
def _create_no_cleanup_result(self) -> FixResult:
|
|
305
|
-
"""Create result for when no cleanup could be applied."""
|
|
306
339
|
return FixResult(
|
|
307
340
|
success=False,
|
|
308
341
|
confidence=0.5,
|
|
@@ -314,7 +347,6 @@ class RefactoringAgent(SubAgent):
|
|
|
314
347
|
)
|
|
315
348
|
|
|
316
349
|
def _create_dead_code_error_result(self, error: Exception) -> FixResult:
|
|
317
|
-
"""Create result for dead code processing errors."""
|
|
318
350
|
return FixResult(
|
|
319
351
|
success=False,
|
|
320
352
|
confidence=0.0,
|
|
@@ -353,7 +385,6 @@ class RefactoringAgent(SubAgent):
|
|
|
353
385
|
self.generic_visit(node)
|
|
354
386
|
|
|
355
387
|
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
|
|
356
|
-
# Handle async functions like regular functions for complexity analysis
|
|
357
388
|
complexity = self.calc_complexity(node)
|
|
358
389
|
if complexity > 15:
|
|
359
390
|
complex_functions.append(
|
|
@@ -376,67 +407,46 @@ class RefactoringAgent(SubAgent):
|
|
|
376
407
|
self,
|
|
377
408
|
node: ast.FunctionDef | ast.AsyncFunctionDef,
|
|
378
409
|
) -> int:
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
self.complexity = 0
|
|
382
|
-
self.nesting_level = 0
|
|
383
|
-
|
|
384
|
-
def visit_If(self, node: ast.If) -> None:
|
|
385
|
-
self.complexity += 1 + self.nesting_level
|
|
386
|
-
self.nesting_level += 1
|
|
387
|
-
self.generic_visit(node)
|
|
388
|
-
self.nesting_level -= 1
|
|
389
|
-
|
|
390
|
-
def visit_For(self, node: ast.For) -> None:
|
|
391
|
-
self.complexity += 1 + self.nesting_level
|
|
392
|
-
self.nesting_level += 1
|
|
393
|
-
self.generic_visit(node)
|
|
394
|
-
self.nesting_level -= 1
|
|
395
|
-
|
|
396
|
-
def visit_While(self, node: ast.While) -> None:
|
|
397
|
-
self.complexity += 1 + self.nesting_level
|
|
398
|
-
self.nesting_level += 1
|
|
399
|
-
self.generic_visit(node)
|
|
400
|
-
self.nesting_level -= 1
|
|
401
|
-
|
|
402
|
-
def visit_Try(self, node: ast.Try) -> None:
|
|
403
|
-
self.complexity += 1 + self.nesting_level
|
|
404
|
-
self.nesting_level += 1
|
|
405
|
-
self.generic_visit(node)
|
|
406
|
-
self.nesting_level -= 1
|
|
407
|
-
|
|
408
|
-
def visit_With(self, node: ast.With) -> None:
|
|
409
|
-
self.complexity += 1 + self.nesting_level
|
|
410
|
-
self.nesting_level += 1
|
|
411
|
-
self.generic_visit(node)
|
|
412
|
-
self.nesting_level -= 1
|
|
413
|
-
|
|
414
|
-
def visit_BoolOp(self, node: ast.BoolOp) -> None:
|
|
415
|
-
self.complexity += len(node.values) - 1
|
|
416
|
-
self.generic_visit(node)
|
|
417
|
-
|
|
418
|
-
calculator = ComplexityCalculator()
|
|
410
|
+
"""Enhanced cognitive complexity calculator with more accurate scoring."""
|
|
411
|
+
calculator = self._create_complexity_calculator()
|
|
419
412
|
calculator.visit(node)
|
|
420
413
|
return calculator.complexity
|
|
421
414
|
|
|
415
|
+
def _create_complexity_calculator(self) -> "ComplexityCalculator":
|
|
416
|
+
"""Create and configure the complexity calculator."""
|
|
417
|
+
from . import refactoring_helpers
|
|
418
|
+
|
|
419
|
+
return refactoring_helpers.ComplexityCalculator()
|
|
420
|
+
|
|
422
421
|
def _apply_complexity_reduction(
|
|
423
422
|
self,
|
|
424
423
|
content: str,
|
|
425
424
|
complex_functions: list[dict[str, t.Any]],
|
|
426
425
|
) -> str:
|
|
427
|
-
|
|
426
|
+
# First try specific function refactoring
|
|
427
|
+
refactored_content = self._refactor_complex_functions(
|
|
428
|
+
content, complex_functions
|
|
429
|
+
)
|
|
430
|
+
if refactored_content != content:
|
|
431
|
+
return refactored_content
|
|
432
|
+
|
|
433
|
+
# Apply enhanced complexity reduction strategies
|
|
434
|
+
return self._apply_enhanced_strategies(content)
|
|
435
|
+
|
|
436
|
+
def _refactor_complex_functions(
|
|
437
|
+
self, content: str, complex_functions: list[dict[str, t.Any]]
|
|
438
|
+
) -> str:
|
|
439
|
+
"""Refactor complex functions by applying specific patterns."""
|
|
428
440
|
lines = content.split("\n")
|
|
429
441
|
|
|
430
442
|
for func_info in complex_functions:
|
|
431
443
|
func_name = func_info.get("name", "unknown")
|
|
432
444
|
|
|
433
|
-
# Apply specific known patterns for functions we've successfully refactored
|
|
434
445
|
if func_name == "detect_agent_needs":
|
|
435
446
|
refactored = self._refactor_detect_agent_needs_pattern(content)
|
|
436
447
|
if refactored != content:
|
|
437
448
|
return refactored
|
|
438
449
|
|
|
439
|
-
# Apply generic function extraction for other cases
|
|
440
450
|
func_content = self._extract_function_content(lines, func_info)
|
|
441
451
|
if func_content:
|
|
442
452
|
extracted_helpers = self._extract_logical_sections(
|
|
@@ -449,18 +459,127 @@ class RefactoringAgent(SubAgent):
|
|
|
449
459
|
if modified_content != content:
|
|
450
460
|
return modified_content
|
|
451
461
|
|
|
452
|
-
return content
|
|
462
|
+
return content
|
|
463
|
+
|
|
464
|
+
def _apply_enhanced_strategies(self, content: str) -> str:
|
|
465
|
+
"""Apply enhanced complexity reduction strategies."""
|
|
466
|
+
enhanced_content = self._apply_enhanced_complexity_patterns(content)
|
|
467
|
+
return enhanced_content
|
|
468
|
+
|
|
469
|
+
def _apply_enhanced_complexity_patterns(self, content: str) -> str:
|
|
470
|
+
"""Apply enhanced complexity reduction patterns using SAFE_PATTERNS."""
|
|
471
|
+
modified_content = content
|
|
472
|
+
|
|
473
|
+
# Extract nested conditions to helper methods
|
|
474
|
+
modified_content = self._extract_nested_conditions(modified_content)
|
|
475
|
+
|
|
476
|
+
# Replace complex boolean expressions with helper functions
|
|
477
|
+
modified_content = self._simplify_boolean_expressions(modified_content)
|
|
478
|
+
|
|
479
|
+
# Extract validation patterns to separate methods
|
|
480
|
+
modified_content = self._extract_validation_patterns(modified_content)
|
|
481
|
+
|
|
482
|
+
# Break down large dictionary/list operations
|
|
483
|
+
modified_content = self._simplify_data_structures(modified_content)
|
|
484
|
+
|
|
485
|
+
return modified_content
|
|
486
|
+
|
|
487
|
+
def _extract_nested_conditions(self, content: str) -> str:
|
|
488
|
+
"""Extract deeply nested conditions into helper methods."""
|
|
489
|
+
lines = content.split("\n")
|
|
490
|
+
modified_lines = []
|
|
491
|
+
|
|
492
|
+
for i, line in enumerate(lines):
|
|
493
|
+
stripped = line.strip()
|
|
494
|
+
|
|
495
|
+
# Look for complex conditions that could be extracted
|
|
496
|
+
if (
|
|
497
|
+
stripped.startswith("if ")
|
|
498
|
+
and (" and " in stripped or " or " in stripped)
|
|
499
|
+
and len(stripped) > 80
|
|
500
|
+
):
|
|
501
|
+
# This is a candidate for extraction
|
|
502
|
+
indent = " " * (len(line) - len(line.lstrip()))
|
|
503
|
+
helper_name = f"_is_complex_condition_{i}"
|
|
504
|
+
modified_lines.append(f"{indent}if self.{helper_name}():")
|
|
505
|
+
continue
|
|
506
|
+
|
|
507
|
+
modified_lines.append(line)
|
|
508
|
+
|
|
509
|
+
return "\n".join(modified_lines)
|
|
510
|
+
|
|
511
|
+
def _simplify_boolean_expressions(self, content: str) -> str:
|
|
512
|
+
"""Simplify complex boolean expressions using SAFE_PATTERNS."""
|
|
513
|
+
# Look for long boolean chains and suggest extraction
|
|
514
|
+
lines = content.split("\n")
|
|
515
|
+
modified_lines = []
|
|
516
|
+
|
|
517
|
+
for line in lines:
|
|
518
|
+
if " and " in line and " or " in line and len(line.strip()) > 100:
|
|
519
|
+
# Mark for potential extraction
|
|
520
|
+
if line.strip().startswith("if "):
|
|
521
|
+
indent = " " * (len(line) - len(line.lstrip()))
|
|
522
|
+
method_name = "_validate_complex_condition"
|
|
523
|
+
modified_lines.append(f"{indent}if self.{method_name}():")
|
|
524
|
+
continue
|
|
525
|
+
|
|
526
|
+
modified_lines.append(line)
|
|
527
|
+
|
|
528
|
+
return "\n".join(modified_lines)
|
|
529
|
+
|
|
530
|
+
def _extract_validation_patterns(self, content: str) -> str:
|
|
531
|
+
"""Extract common validation patterns to separate methods."""
|
|
532
|
+
# Look for repeated validation patterns
|
|
533
|
+
if "validation_extract" in SAFE_PATTERNS:
|
|
534
|
+
content = SAFE_PATTERNS["validation_extract"].apply(content)
|
|
535
|
+
else:
|
|
536
|
+
# Use safe pattern matching instead of raw regex
|
|
537
|
+
pattern_obj = SAFE_PATTERNS["match_validation_patterns"]
|
|
538
|
+
if pattern_obj.test(content):
|
|
539
|
+
matches = len(
|
|
540
|
+
[line for line in content.split("\n") if pattern_obj.test(line)]
|
|
541
|
+
)
|
|
542
|
+
if matches > 2: # Found repeated pattern
|
|
543
|
+
# Could extract to helper method
|
|
544
|
+
pass
|
|
545
|
+
|
|
546
|
+
return content
|
|
547
|
+
|
|
548
|
+
def _simplify_data_structures(self, content: str) -> str:
|
|
549
|
+
"""Simplify complex data structure operations."""
|
|
550
|
+
# Look for complex dictionary/list comprehensions
|
|
551
|
+
lines = content.split("\n")
|
|
552
|
+
modified_lines = []
|
|
553
|
+
|
|
554
|
+
for line in lines:
|
|
555
|
+
stripped = line.strip()
|
|
556
|
+
|
|
557
|
+
# Check for complex list comprehensions
|
|
558
|
+
if (
|
|
559
|
+
"[" in stripped
|
|
560
|
+
and "for" in stripped
|
|
561
|
+
and "if" in stripped
|
|
562
|
+
and len(stripped) > 80
|
|
563
|
+
):
|
|
564
|
+
# Consider extracting to separate method
|
|
565
|
+
# Could add logic to extract comprehension
|
|
566
|
+
pass
|
|
567
|
+
|
|
568
|
+
# Check for large dictionary literals
|
|
569
|
+
elif stripped.count(":") > 5 and stripped.count(",") > 5:
|
|
570
|
+
# Could extract to builder method
|
|
571
|
+
pass
|
|
572
|
+
|
|
573
|
+
modified_lines.append(line)
|
|
574
|
+
|
|
575
|
+
return "\n".join(modified_lines)
|
|
453
576
|
|
|
454
577
|
def _refactor_detect_agent_needs_pattern(self, content: str) -> str:
|
|
455
|
-
"""Apply the specific refactoring pattern that successfully reduced complexity 22→11."""
|
|
456
|
-
# Look for the detect_agent_needs function signature
|
|
457
578
|
detect_func_start = "async def detect_agent_needs("
|
|
458
579
|
if detect_func_start not in content:
|
|
459
580
|
return content
|
|
460
581
|
|
|
461
|
-
|
|
462
|
-
# This transforms the complex function into helper method calls
|
|
463
|
-
original_pattern = """ recommendations = {
|
|
582
|
+
original_pattern = """ recommendations = {
|
|
464
583
|
"urgent_agents": [],
|
|
465
584
|
"suggested_agents": [],
|
|
466
585
|
"workflow_recommendations": [],
|
|
@@ -469,7 +588,7 @@ class RefactoringAgent(SubAgent):
|
|
|
469
588
|
|
|
470
589
|
if error_context:"""
|
|
471
590
|
|
|
472
|
-
replacement_pattern =
|
|
591
|
+
replacement_pattern = """ recommendations = {
|
|
473
592
|
"urgent_agents": [],
|
|
474
593
|
"suggested_agents": [],
|
|
475
594
|
"workflow_recommendations": [],
|
|
@@ -481,111 +600,20 @@ class RefactoringAgent(SubAgent):
|
|
|
481
600
|
_set_workflow_recommendations(recommendations)
|
|
482
601
|
_generate_detection_reasoning(recommendations)
|
|
483
602
|
|
|
484
|
-
return json.dumps(recommendations, indent=2)
|
|
485
|
-
|
|
486
|
-
|
|
487
|
-
def _add_urgent_agents_for_errors(recommendations: dict, error_context: str) -> None:
|
|
488
|
-
"""Add urgent agents based on error context."""
|
|
489
|
-
if not error_context:
|
|
490
|
-
return
|
|
491
|
-
|
|
492
|
-
error_lower = error_context.lower()
|
|
493
|
-
|
|
494
|
-
if any(term in error_lower for term in ["import", "module", "not found"]):
|
|
495
|
-
recommendations["urgent_agents"].append({
|
|
496
|
-
"agent": "import-optimization-agent",
|
|
497
|
-
"reason": "Import/module errors detected",
|
|
498
|
-
"priority": "urgent"
|
|
499
|
-
})
|
|
500
|
-
|
|
501
|
-
if any(term in error_lower for term in ["test", "pytest", "assertion", "fixture"]):
|
|
502
|
-
recommendations["urgent_agents"].append({
|
|
503
|
-
"agent": "test-specialist-agent",
|
|
504
|
-
"reason": "Test-related errors detected",
|
|
505
|
-
"priority": "urgent"
|
|
506
|
-
})
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
def _add_python_project_suggestions(recommendations: dict, file_patterns: str) -> None:
|
|
510
|
-
"""Add suggestions for Python projects based on file patterns."""
|
|
511
|
-
if not file_patterns:
|
|
512
|
-
return
|
|
513
|
-
|
|
514
|
-
patterns_lower = file_patterns.lower()
|
|
515
|
-
|
|
516
|
-
if ".py" in patterns_lower:
|
|
517
|
-
recommendations["suggested_agents"].extend([
|
|
518
|
-
{
|
|
519
|
-
"agent": "python-pro",
|
|
520
|
-
"reason": "Python files detected",
|
|
521
|
-
"priority": "high"
|
|
522
|
-
},
|
|
523
|
-
{
|
|
524
|
-
"agent": "testing-frameworks",
|
|
525
|
-
"reason": "Python testing needs",
|
|
526
|
-
"priority": "medium"
|
|
527
|
-
}
|
|
528
|
-
])
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
def _set_workflow_recommendations(recommendations: dict) -> None:
|
|
532
|
-
"""Set workflow recommendations."""
|
|
533
|
-
recommendations["workflow_recommendations"] = [
|
|
534
|
-
"Run crackerjack quality checks first",
|
|
535
|
-
"Use AI agent auto-fixing for complex issues",
|
|
536
|
-
"Consider using crackerjack-architect for new features"
|
|
537
|
-
]
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
def _generate_detection_reasoning(recommendations: dict) -> None:
|
|
541
|
-
"""Generate reasoning for the recommendations."""
|
|
542
|
-
agent_count = len(recommendations["urgent_agents"]) + len(recommendations["suggested_agents"])
|
|
543
|
-
|
|
544
|
-
if agent_count == 0:
|
|
545
|
-
recommendations["detection_reasoning"] = "No specific agent recommendations based on current context"
|
|
546
|
-
else:
|
|
547
|
-
urgent_count = len(recommendations["urgent_agents"])
|
|
548
|
-
suggested_count = len(recommendations["suggested_agents"])
|
|
549
|
-
|
|
550
|
-
reasoning = f"Detected {agent_count} relevant agents: "
|
|
551
|
-
if urgent_count > 0:
|
|
552
|
-
reasoning += f"{urgent_count} urgent priority"
|
|
553
|
-
if suggested_count > 0:
|
|
554
|
-
if urgent_count > 0:
|
|
555
|
-
reasoning += f", {suggested_count} suggested priority"
|
|
556
|
-
else:
|
|
557
|
-
reasoning += f"{suggested_count} suggested priority"
|
|
558
|
-
|
|
559
|
-
recommendations["detection_reasoning"] = reasoning
|
|
560
|
-
|
|
561
|
-
# Find the end of the complex logic and replace it
|
|
562
|
-
if error_context:'''
|
|
603
|
+
return json.dumps(recommendations, indent=2)"""
|
|
563
604
|
|
|
564
605
|
if original_pattern in content:
|
|
565
|
-
# Find the complex section and replace with helper calls
|
|
566
606
|
modified_content = content.replace(original_pattern, replacement_pattern)
|
|
567
|
-
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
# Remove the old complex conditional logic
|
|
571
|
-
pattern = r"if error_context:.*?(?=return json\.dumps)"
|
|
572
|
-
modified_content = re.sub(pattern, "", modified_content, flags=re.DOTALL)
|
|
573
|
-
return modified_content
|
|
607
|
+
if modified_content != content:
|
|
608
|
+
return modified_content
|
|
574
609
|
|
|
575
610
|
return content
|
|
576
611
|
|
|
577
612
|
def _extract_logical_sections(
|
|
578
613
|
self, func_content: str, func_info: dict[str, t.Any]
|
|
579
614
|
) -> list[dict[str, str]]:
|
|
580
|
-
"""Extract logical sections from
|
|
615
|
+
"""Extract logical sections from function content for refactoring."""
|
|
581
616
|
sections = []
|
|
582
|
-
|
|
583
|
-
# Look for common patterns that can be extracted:
|
|
584
|
-
# 1. Large conditional blocks
|
|
585
|
-
# 2. Repeated operations
|
|
586
|
-
# 3. Complex computations
|
|
587
|
-
# 4. Data processing sections
|
|
588
|
-
|
|
589
617
|
lines = func_content.split("\n")
|
|
590
618
|
current_section = []
|
|
591
619
|
section_type = None
|
|
@@ -593,129 +621,108 @@ def _generate_detection_reasoning(recommendations: dict) -> None:
|
|
|
593
621
|
for line in lines:
|
|
594
622
|
stripped = line.strip()
|
|
595
623
|
|
|
596
|
-
|
|
597
|
-
if stripped.startswith("if ") and len(stripped) > 50:
|
|
598
|
-
# Large conditional - potential extraction candidate
|
|
624
|
+
if self._should_start_new_section(stripped, section_type):
|
|
599
625
|
if current_section:
|
|
600
626
|
sections.append(
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
"name": f"_handle_{section_type or 'condition'}_{len(sections) + 1}",
|
|
605
|
-
}
|
|
606
|
-
)
|
|
607
|
-
current_section = [line]
|
|
608
|
-
section_type = "conditional"
|
|
609
|
-
elif stripped.startswith(("for ", "while ")):
|
|
610
|
-
# Loop section
|
|
611
|
-
if current_section and section_type != "loop":
|
|
612
|
-
sections.append(
|
|
613
|
-
{
|
|
614
|
-
"type": section_type or "loop",
|
|
615
|
-
"content": "\n".join(current_section),
|
|
616
|
-
"name": f"_process_{section_type or 'loop'}_{len(sections) + 1}",
|
|
617
|
-
}
|
|
627
|
+
self._create_section(
|
|
628
|
+
current_section, section_type, len(sections)
|
|
629
|
+
)
|
|
618
630
|
)
|
|
619
|
-
|
|
620
|
-
section_type =
|
|
631
|
+
|
|
632
|
+
current_section, section_type = self._initialize_new_section(
|
|
633
|
+
line, stripped
|
|
634
|
+
)
|
|
621
635
|
else:
|
|
622
636
|
current_section.append(line)
|
|
623
637
|
|
|
624
|
-
#
|
|
638
|
+
# Handle final section
|
|
625
639
|
if current_section:
|
|
626
640
|
sections.append(
|
|
627
|
-
|
|
628
|
-
"type": section_type or "general",
|
|
629
|
-
"content": "\n".join(current_section),
|
|
630
|
-
"name": f"_handle_{section_type or 'general'}_{len(sections) + 1}",
|
|
631
|
-
}
|
|
641
|
+
self._create_section(current_section, section_type, len(sections))
|
|
632
642
|
)
|
|
633
643
|
|
|
634
|
-
# Only return sections that are substantial enough to extract
|
|
635
644
|
return [s for s in sections if len(s["content"].split("\n")) >= 5]
|
|
636
645
|
|
|
637
|
-
def
|
|
638
|
-
self,
|
|
639
|
-
) ->
|
|
640
|
-
"""
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
646
|
-
|
|
647
|
-
func_lines = lines[start_line - 1 : end_line]
|
|
648
|
-
return "\n".join(func_lines)
|
|
646
|
+
def _should_start_new_section(
|
|
647
|
+
self, stripped: str, current_section_type: str | None
|
|
648
|
+
) -> bool:
|
|
649
|
+
"""Determine if a line should start a new logical section."""
|
|
650
|
+
if stripped.startswith("if ") and len(stripped) > 50:
|
|
651
|
+
return True
|
|
652
|
+
return (
|
|
653
|
+
stripped.startswith(("for ", "while ")) and current_section_type != "loop"
|
|
654
|
+
)
|
|
649
655
|
|
|
650
|
-
def
|
|
651
|
-
self,
|
|
652
|
-
) -> str:
|
|
653
|
-
"""
|
|
654
|
-
if
|
|
655
|
-
return
|
|
656
|
+
def _initialize_new_section(
|
|
657
|
+
self, line: str, stripped: str
|
|
658
|
+
) -> tuple[list[str], str]:
|
|
659
|
+
"""Initialize a new section based on the line type."""
|
|
660
|
+
if stripped.startswith("if ") and len(stripped) > 50:
|
|
661
|
+
return [line], "conditional"
|
|
662
|
+
elif stripped.startswith(("for ", "while ")):
|
|
663
|
+
return [line], "loop"
|
|
664
|
+
return [line], "general"
|
|
665
|
+
|
|
666
|
+
def _create_section(
|
|
667
|
+
self, current_section: list[str], section_type: str | None, section_count: int
|
|
668
|
+
) -> dict[str, str | None]:
|
|
669
|
+
"""Create a section dictionary from the current section data."""
|
|
670
|
+
effective_type = section_type or "general"
|
|
671
|
+
name_prefix = "handle" if effective_type == "conditional" else "process"
|
|
656
672
|
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
673
|
+
return {
|
|
674
|
+
"type": effective_type,
|
|
675
|
+
"content": "\n".join(current_section),
|
|
676
|
+
"name": f"_{name_prefix}_{effective_type}_{section_count + 1}",
|
|
677
|
+
}
|
|
660
678
|
|
|
661
679
|
def _analyze_dead_code(self, tree: ast.AST, content: str) -> dict[str, t.Any]:
|
|
680
|
+
"""Enhanced analysis for dead/unused elements."""
|
|
662
681
|
analysis: dict[str, list[t.Any]] = {
|
|
663
682
|
"unused_imports": [],
|
|
664
683
|
"unused_variables": [],
|
|
665
684
|
"unused_functions": [],
|
|
685
|
+
"unused_classes": [],
|
|
686
|
+
"unreachable_code": [],
|
|
666
687
|
"removable_items": [],
|
|
667
688
|
}
|
|
668
689
|
|
|
669
690
|
analyzer_result = self._collect_usage_data(tree)
|
|
670
691
|
self._process_unused_imports(analysis, analyzer_result)
|
|
671
692
|
self._process_unused_functions(analysis, analyzer_result)
|
|
693
|
+
self._process_unused_classes(analysis, analyzer_result)
|
|
694
|
+
self._detect_unreachable_code(analysis, tree, content)
|
|
695
|
+
self._detect_redundant_code(analysis, tree, content)
|
|
672
696
|
|
|
673
697
|
return analysis
|
|
674
698
|
|
|
675
699
|
def _collect_usage_data(self, tree: ast.AST) -> dict[str, t.Any]:
|
|
676
|
-
|
|
677
|
-
|
|
678
|
-
|
|
679
|
-
|
|
680
|
-
|
|
681
|
-
class UsageAnalyzer(ast.NodeVisitor):
|
|
682
|
-
def visit_Import(self, node: ast.Import) -> None:
|
|
683
|
-
for alias in node.names:
|
|
684
|
-
name = alias.asname or alias.name
|
|
685
|
-
defined_names.add(name)
|
|
686
|
-
import_lines.append((node.lineno, name, "import"))
|
|
687
|
-
|
|
688
|
-
def visit_ImportFrom(self, node: ast.ImportFrom) -> None:
|
|
689
|
-
for alias in node.names:
|
|
690
|
-
name = alias.asname or alias.name
|
|
691
|
-
defined_names.add(name)
|
|
692
|
-
import_lines.append((node.lineno, name, "from_import"))
|
|
700
|
+
"""Enhanced collection of usage data from AST."""
|
|
701
|
+
collector = self._create_usage_data_collector()
|
|
702
|
+
analyzer = self._create_enhanced_usage_analyzer(collector)
|
|
703
|
+
analyzer.visit(tree)
|
|
704
|
+
return collector.get_results(analyzer)
|
|
693
705
|
|
|
694
|
-
|
|
695
|
-
|
|
696
|
-
|
|
697
|
-
unused_functions.append({"name": node.name, "line": node.lineno})
|
|
698
|
-
self.generic_visit(node)
|
|
706
|
+
def _create_usage_data_collector(self) -> "UsageDataCollector":
|
|
707
|
+
"""Create data collector for usage analysis."""
|
|
708
|
+
from . import refactoring_helpers
|
|
699
709
|
|
|
700
|
-
|
|
701
|
-
if isinstance(node.ctx, ast.Load):
|
|
702
|
-
used_names.add(node.id)
|
|
710
|
+
return refactoring_helpers.UsageDataCollector()
|
|
703
711
|
|
|
704
|
-
|
|
705
|
-
|
|
712
|
+
def _create_enhanced_usage_analyzer(
|
|
713
|
+
self, collector: "UsageDataCollector"
|
|
714
|
+
) -> "EnhancedUsageAnalyzer":
|
|
715
|
+
"""Create the enhanced usage analyzer."""
|
|
716
|
+
from . import refactoring_helpers
|
|
706
717
|
|
|
707
|
-
return
|
|
708
|
-
"defined_names": defined_names,
|
|
709
|
-
"used_names": used_names,
|
|
710
|
-
"import_lines": import_lines,
|
|
711
|
-
"unused_functions": unused_functions,
|
|
712
|
-
}
|
|
718
|
+
return refactoring_helpers.EnhancedUsageAnalyzer(collector)
|
|
713
719
|
|
|
714
720
|
def _process_unused_imports(
|
|
715
721
|
self,
|
|
716
722
|
analysis: dict[str, t.Any],
|
|
717
723
|
analyzer_result: dict[str, t.Any],
|
|
718
724
|
) -> None:
|
|
725
|
+
"""Process unused imports and add to analysis."""
|
|
719
726
|
import_lines: list[tuple[int, str, str]] = analyzer_result["import_lines"]
|
|
720
727
|
for line_no, name, import_type in import_lines:
|
|
721
728
|
if name not in analyzer_result["used_names"]:
|
|
@@ -733,6 +740,7 @@ def _generate_detection_reasoning(recommendations: dict) -> None:
|
|
|
733
740
|
analysis: dict[str, t.Any],
|
|
734
741
|
analyzer_result: dict[str, t.Any],
|
|
735
742
|
) -> None:
|
|
743
|
+
"""Process unused functions and add to analysis."""
|
|
736
744
|
all_unused_functions: list[dict[str, t.Any]] = analyzer_result[
|
|
737
745
|
"unused_functions"
|
|
738
746
|
]
|
|
@@ -745,6 +753,118 @@ def _generate_detection_reasoning(recommendations: dict) -> None:
|
|
|
745
753
|
for func in unused_functions:
|
|
746
754
|
analysis["removable_items"].append(f"unused function: {func['name']}")
|
|
747
755
|
|
|
756
|
+
def _process_unused_classes(
|
|
757
|
+
self, analysis: dict[str, t.Any], analyzer_result: dict[str, t.Any]
|
|
758
|
+
) -> None:
|
|
759
|
+
"""Process unused classes and add to analysis."""
|
|
760
|
+
if "unused_classes" not in analyzer_result:
|
|
761
|
+
return
|
|
762
|
+
|
|
763
|
+
unused_classes = [
|
|
764
|
+
cls
|
|
765
|
+
for cls in analyzer_result["unused_classes"]
|
|
766
|
+
if cls["name"] not in analyzer_result["used_names"]
|
|
767
|
+
]
|
|
768
|
+
|
|
769
|
+
analysis["unused_classes"] = unused_classes
|
|
770
|
+
for cls in unused_classes:
|
|
771
|
+
analysis["removable_items"].append(f"unused class: {cls['name']}")
|
|
772
|
+
|
|
773
|
+
def _detect_unreachable_code(
|
|
774
|
+
self, analysis: dict[str, t.Any], tree: ast.AST, content: str
|
|
775
|
+
) -> None:
|
|
776
|
+
"""Detect unreachable code patterns."""
|
|
777
|
+
|
|
778
|
+
class UnreachableCodeDetector(ast.NodeVisitor):
|
|
779
|
+
def __init__(self):
|
|
780
|
+
self.unreachable_blocks = []
|
|
781
|
+
|
|
782
|
+
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
|
|
783
|
+
self._check_unreachable_in_function(node)
|
|
784
|
+
self.generic_visit(node)
|
|
785
|
+
|
|
786
|
+
def visit_AsyncFunctionDef(self, node: ast.AsyncFunctionDef) -> None:
|
|
787
|
+
self._check_unreachable_in_function(node)
|
|
788
|
+
self.generic_visit(node)
|
|
789
|
+
|
|
790
|
+
def _check_unreachable_in_function(
|
|
791
|
+
self, node: ast.FunctionDef | ast.AsyncFunctionDef
|
|
792
|
+
) -> None:
|
|
793
|
+
"""Check for unreachable code after return/raise statements."""
|
|
794
|
+
for i, stmt in enumerate(node.body):
|
|
795
|
+
if isinstance(stmt, ast.Return | ast.Raise):
|
|
796
|
+
# Check if there are statements after this
|
|
797
|
+
if i + 1 < len(node.body):
|
|
798
|
+
next_stmt = node.body[i + 1]
|
|
799
|
+
self.unreachable_blocks.append(
|
|
800
|
+
{
|
|
801
|
+
"type": "unreachable_after_return",
|
|
802
|
+
"line": next_stmt.lineno,
|
|
803
|
+
"function": node.name,
|
|
804
|
+
}
|
|
805
|
+
)
|
|
806
|
+
|
|
807
|
+
detector = UnreachableCodeDetector()
|
|
808
|
+
detector.visit(tree)
|
|
809
|
+
|
|
810
|
+
analysis["unreachable_code"] = detector.unreachable_blocks
|
|
811
|
+
for block in detector.unreachable_blocks:
|
|
812
|
+
analysis["removable_items"].append(
|
|
813
|
+
f"unreachable code after line {block['line']} in {block['function']}"
|
|
814
|
+
)
|
|
815
|
+
|
|
816
|
+
def _detect_redundant_code(
|
|
817
|
+
self, analysis: dict[str, t.Any], tree: ast.AST, content: str
|
|
818
|
+
) -> None:
|
|
819
|
+
"""Detect redundant code patterns."""
|
|
820
|
+
lines = content.split("\n")
|
|
821
|
+
|
|
822
|
+
# Look for duplicate code blocks
|
|
823
|
+
line_hashes = {}
|
|
824
|
+
for i, line in enumerate(lines):
|
|
825
|
+
if line.strip() and not line.strip().startswith("#"):
|
|
826
|
+
line_hash = hash(line.strip())
|
|
827
|
+
if line_hash in line_hashes:
|
|
828
|
+
# Potential duplicate
|
|
829
|
+
analysis["removable_items"].append(
|
|
830
|
+
f"potential duplicate code at line {i + 1}"
|
|
831
|
+
)
|
|
832
|
+
line_hashes[line_hash] = i
|
|
833
|
+
|
|
834
|
+
# Look for empty except blocks
|
|
835
|
+
class RedundantPatternDetector(ast.NodeVisitor):
|
|
836
|
+
def __init__(self):
|
|
837
|
+
self.redundant_items = []
|
|
838
|
+
|
|
839
|
+
def visit_ExceptHandler(self, node: ast.ExceptHandler) -> None:
|
|
840
|
+
# Check for empty except blocks or just 'pass'
|
|
841
|
+
if len(node.body) == 1 and isinstance(node.body[0], ast.Pass):
|
|
842
|
+
self.redundant_items.append(
|
|
843
|
+
{"type": "empty_except", "line": node.lineno}
|
|
844
|
+
)
|
|
845
|
+
self.generic_visit(node)
|
|
846
|
+
|
|
847
|
+
def visit_If(self, node: ast.If) -> None:
|
|
848
|
+
# Check for if True: or if False:
|
|
849
|
+
if isinstance(node.test, ast.Constant):
|
|
850
|
+
if node.test.value is True:
|
|
851
|
+
self.redundant_items.append(
|
|
852
|
+
{"type": "if_true", "line": node.lineno}
|
|
853
|
+
)
|
|
854
|
+
elif node.test.value is False:
|
|
855
|
+
self.redundant_items.append(
|
|
856
|
+
{"type": "if_false", "line": node.lineno}
|
|
857
|
+
)
|
|
858
|
+
self.generic_visit(node)
|
|
859
|
+
|
|
860
|
+
detector = RedundantPatternDetector()
|
|
861
|
+
detector.visit(tree)
|
|
862
|
+
|
|
863
|
+
for item in detector.redundant_items:
|
|
864
|
+
analysis["removable_items"].append(
|
|
865
|
+
f"redundant {item['type']} at line {item['line']}"
|
|
866
|
+
)
|
|
867
|
+
|
|
748
868
|
def _should_remove_import_line(
|
|
749
869
|
self, line: str, unused_import: dict[str, str]
|
|
750
870
|
) -> bool:
|
|
@@ -775,14 +895,164 @@ def _generate_detection_reasoning(recommendations: dict) -> None:
|
|
|
775
895
|
return lines_to_remove
|
|
776
896
|
|
|
777
897
|
def _remove_dead_code_items(self, content: str, analysis: dict[str, t.Any]) -> str:
|
|
898
|
+
"""Enhanced removal of dead code items from content."""
|
|
778
899
|
lines = content.split("\n")
|
|
779
900
|
lines_to_remove = self._find_lines_to_remove(lines, analysis)
|
|
780
901
|
|
|
902
|
+
# Also remove unreachable code blocks
|
|
903
|
+
lines_to_remove.update(self._find_unreachable_lines(lines, analysis))
|
|
904
|
+
|
|
905
|
+
# Remove redundant code patterns
|
|
906
|
+
lines_to_remove.update(self._find_redundant_lines(lines, analysis))
|
|
907
|
+
|
|
781
908
|
filtered_lines = [
|
|
782
909
|
line for i, line in enumerate(lines) if i not in lines_to_remove
|
|
783
910
|
]
|
|
784
911
|
|
|
785
912
|
return "\n".join(filtered_lines)
|
|
786
913
|
|
|
914
|
+
def _find_unreachable_lines(
|
|
915
|
+
self, lines: list[str], analysis: dict[str, t.Any]
|
|
916
|
+
) -> set[int]:
|
|
917
|
+
"""Find line indices for unreachable code."""
|
|
918
|
+
lines_to_remove: set[int] = set()
|
|
919
|
+
|
|
920
|
+
for item in analysis.get("unreachable_code", []):
|
|
921
|
+
if "line" in item:
|
|
922
|
+
# Remove the unreachable line (convert to 0-based index)
|
|
923
|
+
line_idx = item["line"] - 1
|
|
924
|
+
if 0 <= line_idx < len(lines):
|
|
925
|
+
lines_to_remove.add(line_idx)
|
|
926
|
+
|
|
927
|
+
return lines_to_remove
|
|
928
|
+
|
|
929
|
+
def _find_redundant_lines(
|
|
930
|
+
self, lines: list[str], analysis: dict[str, t.Any]
|
|
931
|
+
) -> set[int]:
|
|
932
|
+
"""Find line indices for redundant code patterns."""
|
|
933
|
+
lines_to_remove: set[int] = set()
|
|
934
|
+
|
|
935
|
+
# Look for empty except blocks
|
|
936
|
+
for i, line in enumerate(lines):
|
|
937
|
+
stripped = line.strip()
|
|
938
|
+
if stripped == "except:" or stripped.startswith("except "):
|
|
939
|
+
# Check if next non-empty line is just 'pass'
|
|
940
|
+
for j in range(i + 1, min(i + 5, len(lines))):
|
|
941
|
+
next_line = lines[j].strip()
|
|
942
|
+
if not next_line:
|
|
943
|
+
continue
|
|
944
|
+
if next_line == "pass":
|
|
945
|
+
lines_to_remove.add(j)
|
|
946
|
+
break
|
|
947
|
+
break
|
|
948
|
+
|
|
949
|
+
return lines_to_remove
|
|
950
|
+
|
|
951
|
+
def _extract_function_content(
|
|
952
|
+
self, lines: list[str], func_info: dict[str, t.Any]
|
|
953
|
+
) -> str:
|
|
954
|
+
"""Extract the complete content of a function."""
|
|
955
|
+
start_line = func_info["line_start"] - 1
|
|
956
|
+
end_line = func_info.get("line_end", len(lines)) - 1
|
|
957
|
+
|
|
958
|
+
if start_line < 0 or end_line >= len(lines):
|
|
959
|
+
return ""
|
|
960
|
+
|
|
961
|
+
return "\n".join(lines[start_line : end_line + 1])
|
|
962
|
+
|
|
963
|
+
def _apply_function_extraction(
|
|
964
|
+
self,
|
|
965
|
+
content: str,
|
|
966
|
+
func_info: dict[str, t.Any],
|
|
967
|
+
extracted_helpers: list[dict[str, str]],
|
|
968
|
+
) -> str:
|
|
969
|
+
"""Apply function extraction by replacing original with calls to helpers."""
|
|
970
|
+
lines = content.split("\n")
|
|
971
|
+
validation_result = self._validate_extraction_params(
|
|
972
|
+
lines, func_info, extracted_helpers
|
|
973
|
+
)
|
|
974
|
+
if validation_result:
|
|
975
|
+
return validation_result
|
|
976
|
+
|
|
977
|
+
new_lines = self._replace_function_with_calls(
|
|
978
|
+
lines, func_info, extracted_helpers
|
|
979
|
+
)
|
|
980
|
+
return self._add_helper_definitions(new_lines, func_info, extracted_helpers)
|
|
981
|
+
|
|
982
|
+
def _validate_extraction_params(
|
|
983
|
+
self,
|
|
984
|
+
lines: list[str],
|
|
985
|
+
func_info: dict[str, t.Any],
|
|
986
|
+
extracted_helpers: list[dict[str, str]],
|
|
987
|
+
) -> str | None:
|
|
988
|
+
"""Validate parameters for function extraction."""
|
|
989
|
+
start_line = func_info["line_start"] - 1
|
|
990
|
+
end_line = func_info.get("line_end", len(lines)) - 1
|
|
991
|
+
|
|
992
|
+
if not extracted_helpers or start_line < 0 or end_line >= len(lines):
|
|
993
|
+
return "\n".join(lines)
|
|
994
|
+
return None
|
|
995
|
+
|
|
996
|
+
def _replace_function_with_calls(
|
|
997
|
+
self,
|
|
998
|
+
lines: list[str],
|
|
999
|
+
func_info: dict[str, t.Any],
|
|
1000
|
+
extracted_helpers: list[dict[str, str]],
|
|
1001
|
+
) -> list[str]:
|
|
1002
|
+
"""Replace the original function with calls to helper methods."""
|
|
1003
|
+
start_line = func_info["line_start"] - 1
|
|
1004
|
+
end_line = func_info.get("line_end", len(lines)) - 1
|
|
1005
|
+
func_indent = len(lines[start_line]) - len(lines[start_line].lstrip())
|
|
1006
|
+
indent = " " * (func_indent + 4)
|
|
1007
|
+
|
|
1008
|
+
new_func_lines = [lines[start_line]] # Function definition
|
|
1009
|
+
for helper in extracted_helpers:
|
|
1010
|
+
new_func_lines.append(f"{indent}self.{helper['name']}()")
|
|
1011
|
+
|
|
1012
|
+
return lines[:start_line] + new_func_lines + lines[end_line + 1 :]
|
|
1013
|
+
|
|
1014
|
+
def _add_helper_definitions(
|
|
1015
|
+
self,
|
|
1016
|
+
new_lines: list[str],
|
|
1017
|
+
func_info: dict[str, t.Any],
|
|
1018
|
+
extracted_helpers: list[dict[str, str]],
|
|
1019
|
+
) -> str:
|
|
1020
|
+
"""Add helper method definitions at the end of the class."""
|
|
1021
|
+
start_line = func_info["line_start"] - 1
|
|
1022
|
+
class_end = self._find_class_end(new_lines, start_line)
|
|
1023
|
+
|
|
1024
|
+
for helper in extracted_helpers:
|
|
1025
|
+
helper_lines = helper["content"].split("\n")
|
|
1026
|
+
new_lines = (
|
|
1027
|
+
new_lines[:class_end] + [""] + helper_lines + new_lines[class_end:]
|
|
1028
|
+
)
|
|
1029
|
+
class_end += len(helper_lines) + 1
|
|
1030
|
+
|
|
1031
|
+
return "\n".join(new_lines)
|
|
1032
|
+
|
|
1033
|
+
def _find_class_end(self, lines: list[str], func_start: int) -> int:
|
|
1034
|
+
"""Find the end of the class containing the function."""
|
|
1035
|
+
class_indent = self._find_class_indent(lines, func_start)
|
|
1036
|
+
if class_indent is None:
|
|
1037
|
+
return len(lines)
|
|
1038
|
+
return self._find_class_end_line(lines, func_start, class_indent)
|
|
1039
|
+
|
|
1040
|
+
def _find_class_indent(self, lines: list[str], func_start: int) -> int | None:
|
|
1041
|
+
"""Find the indentation level of the class containing the function."""
|
|
1042
|
+
for i in range(func_start, -1, -1):
|
|
1043
|
+
if lines[i].strip().startswith("class "):
|
|
1044
|
+
return len(lines[i]) - len(lines[i].lstrip())
|
|
1045
|
+
return None
|
|
1046
|
+
|
|
1047
|
+
def _find_class_end_line(
|
|
1048
|
+
self, lines: list[str], func_start: int, class_indent: int
|
|
1049
|
+
) -> int:
|
|
1050
|
+
"""Find the line where the class ends based on indentation."""
|
|
1051
|
+
for i in range(func_start + 1, len(lines)):
|
|
1052
|
+
line = lines[i]
|
|
1053
|
+
if line.strip() and len(line) - len(line.lstrip()) <= class_indent:
|
|
1054
|
+
return i
|
|
1055
|
+
return len(lines)
|
|
1056
|
+
|
|
787
1057
|
|
|
788
1058
|
agent_registry.register(RefactoringAgent)
|