crackerjack 0.31.10__py3-none-any.whl → 0.31.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +288 -705
- crackerjack/__main__.py +22 -8
- crackerjack/agents/__init__.py +0 -3
- crackerjack/agents/architect_agent.py +0 -43
- crackerjack/agents/base.py +1 -9
- crackerjack/agents/coordinator.py +2 -148
- crackerjack/agents/documentation_agent.py +109 -81
- crackerjack/agents/dry_agent.py +122 -97
- crackerjack/agents/formatting_agent.py +3 -16
- crackerjack/agents/import_optimization_agent.py +1174 -130
- crackerjack/agents/performance_agent.py +956 -188
- crackerjack/agents/performance_helpers.py +229 -0
- crackerjack/agents/proactive_agent.py +1 -48
- crackerjack/agents/refactoring_agent.py +516 -246
- crackerjack/agents/refactoring_helpers.py +282 -0
- crackerjack/agents/security_agent.py +393 -90
- crackerjack/agents/test_creation_agent.py +1776 -120
- crackerjack/agents/test_specialist_agent.py +59 -15
- crackerjack/agents/tracker.py +0 -102
- crackerjack/api.py +145 -37
- crackerjack/cli/handlers.py +48 -30
- crackerjack/cli/interactive.py +11 -11
- crackerjack/cli/options.py +66 -4
- crackerjack/code_cleaner.py +808 -148
- crackerjack/config/global_lock_config.py +110 -0
- crackerjack/config/hooks.py +43 -64
- crackerjack/core/async_workflow_orchestrator.py +247 -97
- crackerjack/core/autofix_coordinator.py +192 -109
- crackerjack/core/enhanced_container.py +46 -63
- crackerjack/core/file_lifecycle.py +549 -0
- crackerjack/core/performance.py +9 -8
- crackerjack/core/performance_monitor.py +395 -0
- crackerjack/core/phase_coordinator.py +281 -94
- crackerjack/core/proactive_workflow.py +9 -58
- crackerjack/core/resource_manager.py +501 -0
- crackerjack/core/service_watchdog.py +490 -0
- crackerjack/core/session_coordinator.py +4 -8
- crackerjack/core/timeout_manager.py +504 -0
- crackerjack/core/websocket_lifecycle.py +475 -0
- crackerjack/core/workflow_orchestrator.py +343 -209
- crackerjack/dynamic_config.py +50 -9
- crackerjack/errors.py +3 -4
- crackerjack/executors/async_hook_executor.py +63 -13
- crackerjack/executors/cached_hook_executor.py +14 -14
- crackerjack/executors/hook_executor.py +100 -37
- crackerjack/executors/hook_lock_manager.py +856 -0
- crackerjack/executors/individual_hook_executor.py +120 -86
- crackerjack/intelligence/__init__.py +0 -7
- crackerjack/intelligence/adaptive_learning.py +13 -86
- crackerjack/intelligence/agent_orchestrator.py +15 -78
- crackerjack/intelligence/agent_registry.py +12 -59
- crackerjack/intelligence/agent_selector.py +31 -92
- crackerjack/intelligence/integration.py +1 -41
- crackerjack/interactive.py +9 -9
- crackerjack/managers/async_hook_manager.py +25 -8
- crackerjack/managers/hook_manager.py +9 -9
- crackerjack/managers/publish_manager.py +57 -59
- crackerjack/managers/test_command_builder.py +6 -36
- crackerjack/managers/test_executor.py +9 -61
- crackerjack/managers/test_manager.py +17 -63
- crackerjack/managers/test_manager_backup.py +77 -127
- crackerjack/managers/test_progress.py +4 -23
- crackerjack/mcp/cache.py +5 -12
- crackerjack/mcp/client_runner.py +10 -10
- crackerjack/mcp/context.py +64 -6
- crackerjack/mcp/dashboard.py +14 -11
- crackerjack/mcp/enhanced_progress_monitor.py +55 -55
- crackerjack/mcp/file_monitor.py +72 -42
- crackerjack/mcp/progress_components.py +103 -84
- crackerjack/mcp/progress_monitor.py +122 -49
- crackerjack/mcp/rate_limiter.py +12 -12
- crackerjack/mcp/server_core.py +16 -22
- crackerjack/mcp/service_watchdog.py +26 -26
- crackerjack/mcp/state.py +15 -0
- crackerjack/mcp/tools/core_tools.py +95 -39
- crackerjack/mcp/tools/error_analyzer.py +6 -32
- crackerjack/mcp/tools/execution_tools.py +1 -56
- crackerjack/mcp/tools/execution_tools_backup.py +35 -131
- crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
- crackerjack/mcp/tools/intelligence_tools.py +2 -55
- crackerjack/mcp/tools/monitoring_tools.py +308 -145
- crackerjack/mcp/tools/proactive_tools.py +12 -42
- crackerjack/mcp/tools/progress_tools.py +23 -15
- crackerjack/mcp/tools/utility_tools.py +3 -40
- crackerjack/mcp/tools/workflow_executor.py +40 -60
- crackerjack/mcp/websocket/app.py +0 -3
- crackerjack/mcp/websocket/endpoints.py +206 -268
- crackerjack/mcp/websocket/jobs.py +213 -66
- crackerjack/mcp/websocket/server.py +84 -6
- crackerjack/mcp/websocket/websocket_handler.py +137 -29
- crackerjack/models/config_adapter.py +3 -16
- crackerjack/models/protocols.py +162 -3
- crackerjack/models/resource_protocols.py +454 -0
- crackerjack/models/task.py +3 -3
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +25 -71
- crackerjack/monitoring/regression_prevention.py +28 -87
- crackerjack/orchestration/advanced_orchestrator.py +44 -78
- crackerjack/orchestration/coverage_improvement.py +10 -60
- crackerjack/orchestration/execution_strategies.py +16 -16
- crackerjack/orchestration/test_progress_streamer.py +61 -53
- crackerjack/plugins/base.py +1 -1
- crackerjack/plugins/managers.py +22 -20
- crackerjack/py313.py +65 -21
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +627 -0
- crackerjack/services/cache.py +7 -9
- crackerjack/services/config.py +35 -52
- crackerjack/services/config_integrity.py +5 -16
- crackerjack/services/config_merge.py +542 -0
- crackerjack/services/contextual_ai_assistant.py +17 -19
- crackerjack/services/coverage_ratchet.py +44 -73
- crackerjack/services/debug.py +25 -39
- crackerjack/services/dependency_monitor.py +52 -50
- crackerjack/services/enhanced_filesystem.py +14 -11
- crackerjack/services/file_hasher.py +1 -1
- crackerjack/services/filesystem.py +1 -12
- crackerjack/services/git.py +71 -47
- crackerjack/services/health_metrics.py +31 -27
- crackerjack/services/initialization.py +276 -428
- crackerjack/services/input_validator.py +760 -0
- crackerjack/services/log_manager.py +16 -16
- crackerjack/services/logging.py +7 -6
- crackerjack/services/metrics.py +43 -43
- crackerjack/services/pattern_cache.py +2 -31
- crackerjack/services/pattern_detector.py +26 -63
- crackerjack/services/performance_benchmarks.py +20 -45
- crackerjack/services/regex_patterns.py +2887 -0
- crackerjack/services/regex_utils.py +537 -0
- crackerjack/services/secure_path_utils.py +683 -0
- crackerjack/services/secure_status_formatter.py +534 -0
- crackerjack/services/secure_subprocess.py +605 -0
- crackerjack/services/security.py +47 -10
- crackerjack/services/security_logger.py +492 -0
- crackerjack/services/server_manager.py +109 -50
- crackerjack/services/smart_scheduling.py +8 -25
- crackerjack/services/status_authentication.py +603 -0
- crackerjack/services/status_security_manager.py +442 -0
- crackerjack/services/thread_safe_status_collector.py +546 -0
- crackerjack/services/tool_version_service.py +1 -23
- crackerjack/services/unified_config.py +36 -58
- crackerjack/services/validation_rate_limiter.py +269 -0
- crackerjack/services/version_checker.py +9 -40
- crackerjack/services/websocket_resource_limiter.py +572 -0
- crackerjack/slash_commands/__init__.py +52 -2
- crackerjack/tools/__init__.py +0 -0
- crackerjack/tools/validate_input_validator_patterns.py +262 -0
- crackerjack/tools/validate_regex_patterns.py +198 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/METADATA +197 -12
- crackerjack-0.31.13.dist-info/RECORD +178 -0
- crackerjack/cli/facade.py +0 -104
- crackerjack-0.31.10.dist-info/RECORD +0 -149
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/WHEEL +0 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,229 @@
|
|
|
1
|
+
"""Performance analysis helper classes and utilities."""
|
|
2
|
+
|
|
3
|
+
import ast
|
|
4
|
+
import typing as t
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class OptimizationResult:
|
|
10
|
+
"""Result of an optimization operation."""
|
|
11
|
+
|
|
12
|
+
lines: list[str]
|
|
13
|
+
modified: bool
|
|
14
|
+
optimization_description: str | None = None
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class EnhancedNestedLoopAnalyzer(ast.NodeVisitor):
|
|
18
|
+
"""Analyzer for detecting nested loops with complexity analysis."""
|
|
19
|
+
|
|
20
|
+
def __init__(self) -> None:
|
|
21
|
+
self.loop_stack: list[tuple[str, ast.AST, int]] = []
|
|
22
|
+
self.nested_loops: list[dict[str, t.Any]] = []
|
|
23
|
+
self.complexity_hotspots: list[dict[str, t.Any]] = []
|
|
24
|
+
|
|
25
|
+
def visit_For(self, node: ast.For) -> None:
|
|
26
|
+
self._process_loop_node(node, "nested_for_loop")
|
|
27
|
+
|
|
28
|
+
def visit_While(self, node: ast.While) -> None:
|
|
29
|
+
self._process_loop_node(node, "nested_while_loop")
|
|
30
|
+
|
|
31
|
+
def _process_loop_node(self, node: ast.For | ast.While, loop_type: str) -> None:
|
|
32
|
+
"""Process a loop node and track nesting information."""
|
|
33
|
+
current_depth = len(self.loop_stack) + 1
|
|
34
|
+
self.loop_stack.append((loop_type.split("_")[1], node, current_depth))
|
|
35
|
+
|
|
36
|
+
if current_depth > 1:
|
|
37
|
+
loop_info = self._create_loop_info(node, loop_type, current_depth)
|
|
38
|
+
self.nested_loops.append(loop_info)
|
|
39
|
+
self._check_complexity_hotspot(loop_info, current_depth)
|
|
40
|
+
|
|
41
|
+
self.generic_visit(node)
|
|
42
|
+
self.loop_stack.pop()
|
|
43
|
+
|
|
44
|
+
def _create_loop_info(
|
|
45
|
+
self, node: ast.For | ast.While, loop_type: str, current_depth: int
|
|
46
|
+
) -> dict[str, t.Any]:
|
|
47
|
+
"""Create loop information dictionary."""
|
|
48
|
+
loop_info: dict[str, t.Any] = {
|
|
49
|
+
"line_number": node.lineno,
|
|
50
|
+
"type": loop_type,
|
|
51
|
+
"depth": current_depth,
|
|
52
|
+
"complexity": f"O(n^{current_depth})",
|
|
53
|
+
"complexity_factor": self._calculate_complexity_factor(current_depth),
|
|
54
|
+
"priority": self._get_optimization_priority(current_depth),
|
|
55
|
+
"node": node,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
if isinstance(node, ast.For):
|
|
59
|
+
loop_info["iterable"] = self._extract_iterable_info(node)
|
|
60
|
+
|
|
61
|
+
return loop_info
|
|
62
|
+
|
|
63
|
+
def _check_complexity_hotspot(
|
|
64
|
+
self, loop_info: dict[str, t.Any], current_depth: int
|
|
65
|
+
) -> None:
|
|
66
|
+
"""Check if loop is a complexity hotspot and add to hotspots list."""
|
|
67
|
+
if current_depth >= 3: # O(n³) or higher
|
|
68
|
+
self.complexity_hotspots.append(
|
|
69
|
+
loop_info
|
|
70
|
+
| {
|
|
71
|
+
"severity": "high",
|
|
72
|
+
"suggestion": "Critical: Consider algorithmic improvements (memoization, caching, different data structures)",
|
|
73
|
+
}
|
|
74
|
+
)
|
|
75
|
+
|
|
76
|
+
def _calculate_complexity_factor(self, depth: int) -> int:
|
|
77
|
+
"""Calculate relative complexity factor for optimization prioritization."""
|
|
78
|
+
return depth**2 # Exponential growth factor
|
|
79
|
+
|
|
80
|
+
def _get_optimization_priority(self, depth: int) -> str:
|
|
81
|
+
"""Determine optimization priority based on nesting depth."""
|
|
82
|
+
if depth >= 4:
|
|
83
|
+
return "critical"
|
|
84
|
+
elif depth == 3:
|
|
85
|
+
return "high"
|
|
86
|
+
elif depth == 2:
|
|
87
|
+
return "medium"
|
|
88
|
+
return "low"
|
|
89
|
+
|
|
90
|
+
def _extract_iterable_info(self, node: ast.For) -> dict[str, t.Any]:
|
|
91
|
+
"""Extract information about the iterable for optimization hints."""
|
|
92
|
+
iterable_info = {"type": "unknown", "name": None}
|
|
93
|
+
|
|
94
|
+
if isinstance(node.iter, ast.Name):
|
|
95
|
+
iterable_info = {"type": "variable", "name": node.iter.id}
|
|
96
|
+
elif isinstance(node.iter, ast.Call) and isinstance(node.iter.func, ast.Name):
|
|
97
|
+
iterable_info = {
|
|
98
|
+
"type": "function_call",
|
|
99
|
+
"name": node.iter.func.id,
|
|
100
|
+
}
|
|
101
|
+
if node.iter.func.id == "range":
|
|
102
|
+
iterable_info["optimization_hint"] = (
|
|
103
|
+
"Consider list comprehension or vectorization"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
return iterable_info
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class EnhancedListOpAnalyzer(ast.NodeVisitor):
|
|
110
|
+
"""Analyzer for detecting inefficient list operations in loops."""
|
|
111
|
+
|
|
112
|
+
def __init__(self) -> None:
|
|
113
|
+
self.in_loop = False
|
|
114
|
+
self.loop_depth = 0
|
|
115
|
+
self.list_ops: list[dict[str, t.Any]] = []
|
|
116
|
+
self.current_loop_node: ast.For | ast.While | None = None
|
|
117
|
+
|
|
118
|
+
def visit_For(self, node: ast.For) -> None:
|
|
119
|
+
self._enter_loop_context(node)
|
|
120
|
+
self.generic_visit(node)
|
|
121
|
+
self._exit_loop_context()
|
|
122
|
+
|
|
123
|
+
def visit_While(self, node: ast.While) -> None:
|
|
124
|
+
self._enter_loop_context(node)
|
|
125
|
+
self.generic_visit(node)
|
|
126
|
+
self._exit_loop_context()
|
|
127
|
+
|
|
128
|
+
def visit_AugAssign(self, node: ast.AugAssign) -> None:
|
|
129
|
+
if self._should_analyze_aug_assign(node):
|
|
130
|
+
self._analyze_aug_assign_node(node)
|
|
131
|
+
self.generic_visit(node)
|
|
132
|
+
|
|
133
|
+
def _enter_loop_context(self, node: ast.For | ast.While) -> None:
|
|
134
|
+
"""Enter loop context and save previous state."""
|
|
135
|
+
self._old_state = (self.in_loop, self.loop_depth, self.current_loop_node)
|
|
136
|
+
self.in_loop = True
|
|
137
|
+
self.loop_depth += 1
|
|
138
|
+
self.current_loop_node = node
|
|
139
|
+
|
|
140
|
+
def _exit_loop_context(self) -> None:
|
|
141
|
+
"""Exit loop context and restore previous state."""
|
|
142
|
+
self.in_loop, self.loop_depth, self.current_loop_node = self._old_state
|
|
143
|
+
|
|
144
|
+
def _should_analyze_aug_assign(self, node: ast.AugAssign) -> bool:
|
|
145
|
+
"""Check if this augmented assignment should be analyzed."""
|
|
146
|
+
return self.in_loop and isinstance(node.op, ast.Add)
|
|
147
|
+
|
|
148
|
+
def _analyze_aug_assign_node(self, node: ast.AugAssign) -> None:
|
|
149
|
+
"""Analyze an augmented assignment node for inefficiencies."""
|
|
150
|
+
impact_factor = self._calculate_performance_impact()
|
|
151
|
+
|
|
152
|
+
if isinstance(node.value, ast.List):
|
|
153
|
+
self._handle_list_concat(node, impact_factor)
|
|
154
|
+
elif isinstance(node.value, ast.Name):
|
|
155
|
+
self._handle_variable_concat(node, impact_factor)
|
|
156
|
+
|
|
157
|
+
def _handle_list_concat(self, node: ast.AugAssign, impact_factor: int) -> None:
|
|
158
|
+
"""Handle list concatenation with literal list."""
|
|
159
|
+
# Type narrowing to help pyright understand that node.value is an ast.List
|
|
160
|
+
assert isinstance(node.value, ast.List)
|
|
161
|
+
list_size = len(node.value.elts)
|
|
162
|
+
|
|
163
|
+
self.list_ops.append(
|
|
164
|
+
{
|
|
165
|
+
"line_number": node.lineno,
|
|
166
|
+
"type": "list_concat_in_loop",
|
|
167
|
+
"pattern": f"list += [{list_size} items]",
|
|
168
|
+
"loop_depth": self.loop_depth,
|
|
169
|
+
"impact_factor": impact_factor,
|
|
170
|
+
"optimization": "append" if list_size == 1 else "extend",
|
|
171
|
+
"performance_gain": f"{impact_factor * 2}x"
|
|
172
|
+
if list_size > 1
|
|
173
|
+
else "2-3x",
|
|
174
|
+
}
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
def _handle_variable_concat(self, node: ast.AugAssign, impact_factor: int) -> None:
|
|
178
|
+
"""Handle list concatenation with variable."""
|
|
179
|
+
var_name = getattr(node.value, "id", "unknown")
|
|
180
|
+
self.list_ops.append(
|
|
181
|
+
{
|
|
182
|
+
"line_number": node.lineno,
|
|
183
|
+
"type": "list_concat_variable",
|
|
184
|
+
"pattern": f"list += {var_name}",
|
|
185
|
+
"loop_depth": self.loop_depth,
|
|
186
|
+
"impact_factor": impact_factor,
|
|
187
|
+
"optimization": "extend",
|
|
188
|
+
"performance_gain": f"{impact_factor * 3}x",
|
|
189
|
+
}
|
|
190
|
+
)
|
|
191
|
+
|
|
192
|
+
def _calculate_performance_impact(self) -> int:
|
|
193
|
+
"""Calculate expected performance impact based on context."""
|
|
194
|
+
base_impact = 2 # Baseline improvement factor
|
|
195
|
+
|
|
196
|
+
if self.loop_depth > 1:
|
|
197
|
+
base_impact *= self.loop_depth**2
|
|
198
|
+
|
|
199
|
+
if self._is_hot_loop():
|
|
200
|
+
base_impact *= 5
|
|
201
|
+
|
|
202
|
+
return min(base_impact, 50) # Cap at 50x impact
|
|
203
|
+
|
|
204
|
+
def _is_hot_loop(self) -> bool:
|
|
205
|
+
"""Check if current loop is a hot loop with large range."""
|
|
206
|
+
if not (self.current_loop_node and isinstance(self.current_loop_node, ast.For)):
|
|
207
|
+
return False
|
|
208
|
+
|
|
209
|
+
return self._has_large_range_iterator()
|
|
210
|
+
|
|
211
|
+
def _has_large_range_iterator(self) -> bool:
|
|
212
|
+
"""Check if the for loop uses a large range."""
|
|
213
|
+
if not isinstance(self.current_loop_node, ast.For):
|
|
214
|
+
return False
|
|
215
|
+
|
|
216
|
+
iter_node = self.current_loop_node.iter
|
|
217
|
+
if not (
|
|
218
|
+
isinstance(iter_node, ast.Call)
|
|
219
|
+
and isinstance(iter_node.func, ast.Name)
|
|
220
|
+
and iter_node.func.id == "range"
|
|
221
|
+
):
|
|
222
|
+
return False
|
|
223
|
+
|
|
224
|
+
args = iter_node.args
|
|
225
|
+
if not (args and isinstance(args[0], ast.Constant)):
|
|
226
|
+
return False
|
|
227
|
+
|
|
228
|
+
value = args[0].value
|
|
229
|
+
return isinstance(value, int | float) and value > 100
|
|
@@ -5,12 +5,6 @@ from .base import AgentContext, FixResult, Issue, SubAgent
|
|
|
5
5
|
|
|
6
6
|
|
|
7
7
|
class ProactiveAgent(SubAgent):
|
|
8
|
-
"""Base class for agents that can plan before executing fixes.
|
|
9
|
-
|
|
10
|
-
Proactive agents analyze the codebase and create architectural plans
|
|
11
|
-
before applying fixes, preventing violations rather than just fixing them.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
8
|
def __init__(self, context: AgentContext) -> None:
|
|
15
9
|
super().__init__(context)
|
|
16
10
|
self._planning_cache: dict[str, dict[str, t.Any]] = {}
|
|
@@ -18,25 +12,9 @@ class ProactiveAgent(SubAgent):
|
|
|
18
12
|
|
|
19
13
|
@abstractmethod
|
|
20
14
|
async def plan_before_action(self, issue: Issue) -> dict[str, t.Any]:
|
|
21
|
-
"""Create an architectural plan before fixing the issue.
|
|
22
|
-
|
|
23
|
-
Returns a plan dictionary with:
|
|
24
|
-
- strategy: How to approach the fix
|
|
25
|
-
- patterns: Recommended patterns to use
|
|
26
|
-
- dependencies: Other changes needed
|
|
27
|
-
- risks: Potential issues to watch for
|
|
28
|
-
"""
|
|
29
15
|
pass
|
|
30
16
|
|
|
31
17
|
async def analyze_and_fix_proactively(self, issue: Issue) -> FixResult:
|
|
32
|
-
"""Execute proactive fix with planning phase.
|
|
33
|
-
|
|
34
|
-
1. Create architectural plan
|
|
35
|
-
2. Apply fix following the plan
|
|
36
|
-
3. Validate against plan
|
|
37
|
-
4. Cache successful patterns
|
|
38
|
-
"""
|
|
39
|
-
# Check planning cache first
|
|
40
18
|
cache_key = self._get_planning_cache_key(issue)
|
|
41
19
|
if cache_key in self._planning_cache:
|
|
42
20
|
plan = self._planning_cache[cache_key]
|
|
@@ -46,10 +24,8 @@ class ProactiveAgent(SubAgent):
|
|
|
46
24
|
self._planning_cache[cache_key] = plan
|
|
47
25
|
self.log(f"Created new plan for {cache_key}")
|
|
48
26
|
|
|
49
|
-
# Execute the fix with the plan
|
|
50
27
|
result = await self._execute_with_plan(issue, plan)
|
|
51
28
|
|
|
52
|
-
# Cache successful patterns
|
|
53
29
|
if result.success and result.confidence >= 0.8:
|
|
54
30
|
self._cache_successful_pattern(issue, plan, result)
|
|
55
31
|
|
|
@@ -58,19 +34,14 @@ class ProactiveAgent(SubAgent):
|
|
|
58
34
|
async def _execute_with_plan(
|
|
59
35
|
self, issue: Issue, plan: dict[str, t.Any]
|
|
60
36
|
) -> FixResult:
|
|
61
|
-
"""Execute the fix following the architectural plan."""
|
|
62
|
-
# Default implementation falls back to standard analyze_and_fix
|
|
63
|
-
# Subclasses should override to use the plan
|
|
64
37
|
return await self.analyze_and_fix(issue)
|
|
65
38
|
|
|
66
39
|
def _get_planning_cache_key(self, issue: Issue) -> str:
|
|
67
|
-
"""Generate cache key for planning."""
|
|
68
40
|
return f"{issue.type.value}:{issue.file_path}:{issue.line_number}"
|
|
69
41
|
|
|
70
42
|
def _cache_successful_pattern(
|
|
71
43
|
self, issue: Issue, plan: dict[str, t.Any], result: FixResult
|
|
72
44
|
) -> None:
|
|
73
|
-
"""Cache successful patterns for future reuse."""
|
|
74
45
|
pattern_key = f"{issue.type.value}_{plan.get('strategy', 'default')}"
|
|
75
46
|
self._pattern_cache[pattern_key] = {
|
|
76
47
|
"plan": plan,
|
|
@@ -81,24 +52,6 @@ class ProactiveAgent(SubAgent):
|
|
|
81
52
|
self.log(f"Cached successful pattern: {pattern_key}")
|
|
82
53
|
|
|
83
54
|
def get_cached_patterns(self) -> dict[str, t.Any]:
|
|
84
|
-
"""Get all cached patterns for inspection."""
|
|
85
55
|
return self._pattern_cache.copy()
|
|
86
56
|
|
|
87
|
-
|
|
88
|
-
"""Clear the pattern cache."""
|
|
89
|
-
self._pattern_cache.clear()
|
|
90
|
-
self.log("Cleared pattern cache")
|
|
91
|
-
|
|
92
|
-
def get_planning_confidence(self, issue: Issue) -> float:
|
|
93
|
-
"""Get confidence in planning ability for this issue."""
|
|
94
|
-
# Check if we have cached patterns for this issue type
|
|
95
|
-
issue_patterns = [
|
|
96
|
-
key for key in self._pattern_cache if key.startswith(issue.type.value)
|
|
97
|
-
]
|
|
98
|
-
|
|
99
|
-
if issue_patterns:
|
|
100
|
-
# Higher confidence if we have successful patterns
|
|
101
|
-
return min(0.9, 0.6 + (len(issue_patterns) * 0.1))
|
|
102
|
-
|
|
103
|
-
# Base confidence from can_handle
|
|
104
|
-
return 0.5
|
|
57
|
+
# Removed unused methods: clear_pattern_cache, get_planning_confidence
|