crackerjack 0.33.0__py3-none-any.whl → 0.33.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/__main__.py +1350 -34
- crackerjack/adapters/__init__.py +17 -0
- crackerjack/adapters/lsp_client.py +358 -0
- crackerjack/adapters/rust_tool_adapter.py +194 -0
- crackerjack/adapters/rust_tool_manager.py +193 -0
- crackerjack/adapters/skylos_adapter.py +231 -0
- crackerjack/adapters/zuban_adapter.py +560 -0
- crackerjack/agents/base.py +7 -3
- crackerjack/agents/coordinator.py +271 -33
- crackerjack/agents/documentation_agent.py +9 -15
- crackerjack/agents/dry_agent.py +3 -15
- crackerjack/agents/formatting_agent.py +1 -1
- crackerjack/agents/import_optimization_agent.py +36 -180
- crackerjack/agents/performance_agent.py +17 -98
- crackerjack/agents/performance_helpers.py +7 -31
- crackerjack/agents/proactive_agent.py +1 -3
- crackerjack/agents/refactoring_agent.py +16 -85
- crackerjack/agents/refactoring_helpers.py +7 -42
- crackerjack/agents/security_agent.py +9 -48
- crackerjack/agents/test_creation_agent.py +356 -513
- crackerjack/agents/test_specialist_agent.py +0 -4
- crackerjack/api.py +6 -25
- crackerjack/cli/cache_handlers.py +204 -0
- crackerjack/cli/cache_handlers_enhanced.py +683 -0
- crackerjack/cli/facade.py +100 -0
- crackerjack/cli/handlers.py +224 -9
- crackerjack/cli/interactive.py +6 -4
- crackerjack/cli/options.py +642 -55
- crackerjack/cli/utils.py +2 -1
- crackerjack/code_cleaner.py +58 -117
- crackerjack/config/global_lock_config.py +8 -48
- crackerjack/config/hooks.py +53 -62
- crackerjack/core/async_workflow_orchestrator.py +24 -34
- crackerjack/core/autofix_coordinator.py +3 -17
- crackerjack/core/enhanced_container.py +4 -13
- crackerjack/core/file_lifecycle.py +12 -89
- crackerjack/core/performance.py +2 -2
- crackerjack/core/performance_monitor.py +15 -55
- crackerjack/core/phase_coordinator.py +104 -204
- crackerjack/core/resource_manager.py +14 -90
- crackerjack/core/service_watchdog.py +62 -95
- crackerjack/core/session_coordinator.py +149 -0
- crackerjack/core/timeout_manager.py +14 -72
- crackerjack/core/websocket_lifecycle.py +13 -78
- crackerjack/core/workflow_orchestrator.py +171 -174
- crackerjack/docs/INDEX.md +11 -0
- crackerjack/docs/generated/api/API_REFERENCE.md +10895 -0
- crackerjack/docs/generated/api/CLI_REFERENCE.md +109 -0
- crackerjack/docs/generated/api/CROSS_REFERENCES.md +1755 -0
- crackerjack/docs/generated/api/PROTOCOLS.md +3 -0
- crackerjack/docs/generated/api/SERVICES.md +1252 -0
- crackerjack/documentation/__init__.py +31 -0
- crackerjack/documentation/ai_templates.py +756 -0
- crackerjack/documentation/dual_output_generator.py +765 -0
- crackerjack/documentation/mkdocs_integration.py +518 -0
- crackerjack/documentation/reference_generator.py +977 -0
- crackerjack/dynamic_config.py +55 -50
- crackerjack/executors/async_hook_executor.py +10 -15
- crackerjack/executors/cached_hook_executor.py +117 -43
- crackerjack/executors/hook_executor.py +8 -34
- crackerjack/executors/hook_lock_manager.py +26 -183
- crackerjack/executors/individual_hook_executor.py +13 -11
- crackerjack/executors/lsp_aware_hook_executor.py +270 -0
- crackerjack/executors/tool_proxy.py +417 -0
- crackerjack/hooks/lsp_hook.py +79 -0
- crackerjack/intelligence/adaptive_learning.py +25 -10
- crackerjack/intelligence/agent_orchestrator.py +2 -5
- crackerjack/intelligence/agent_registry.py +34 -24
- crackerjack/intelligence/agent_selector.py +5 -7
- crackerjack/interactive.py +17 -6
- crackerjack/managers/async_hook_manager.py +0 -1
- crackerjack/managers/hook_manager.py +79 -1
- crackerjack/managers/publish_manager.py +44 -8
- crackerjack/managers/test_command_builder.py +1 -15
- crackerjack/managers/test_executor.py +1 -3
- crackerjack/managers/test_manager.py +98 -7
- crackerjack/managers/test_manager_backup.py +10 -9
- crackerjack/mcp/cache.py +2 -2
- crackerjack/mcp/client_runner.py +1 -1
- crackerjack/mcp/context.py +191 -68
- crackerjack/mcp/dashboard.py +7 -5
- crackerjack/mcp/enhanced_progress_monitor.py +31 -28
- crackerjack/mcp/file_monitor.py +30 -23
- crackerjack/mcp/progress_components.py +31 -21
- crackerjack/mcp/progress_monitor.py +50 -53
- crackerjack/mcp/rate_limiter.py +6 -6
- crackerjack/mcp/server_core.py +17 -16
- crackerjack/mcp/service_watchdog.py +2 -1
- crackerjack/mcp/state.py +4 -7
- crackerjack/mcp/task_manager.py +11 -9
- crackerjack/mcp/tools/core_tools.py +173 -32
- crackerjack/mcp/tools/error_analyzer.py +3 -2
- crackerjack/mcp/tools/execution_tools.py +8 -10
- crackerjack/mcp/tools/execution_tools_backup.py +42 -30
- crackerjack/mcp/tools/intelligence_tool_registry.py +7 -5
- crackerjack/mcp/tools/intelligence_tools.py +5 -2
- crackerjack/mcp/tools/monitoring_tools.py +33 -70
- crackerjack/mcp/tools/proactive_tools.py +24 -11
- crackerjack/mcp/tools/progress_tools.py +5 -8
- crackerjack/mcp/tools/utility_tools.py +20 -14
- crackerjack/mcp/tools/workflow_executor.py +62 -40
- crackerjack/mcp/websocket/app.py +8 -0
- crackerjack/mcp/websocket/endpoints.py +352 -357
- crackerjack/mcp/websocket/jobs.py +40 -57
- crackerjack/mcp/websocket/monitoring_endpoints.py +2935 -0
- crackerjack/mcp/websocket/server.py +7 -25
- crackerjack/mcp/websocket/websocket_handler.py +6 -17
- crackerjack/mixins/__init__.py +0 -2
- crackerjack/mixins/error_handling.py +1 -70
- crackerjack/models/config.py +12 -1
- crackerjack/models/config_adapter.py +49 -1
- crackerjack/models/protocols.py +122 -122
- crackerjack/models/resource_protocols.py +55 -210
- crackerjack/monitoring/ai_agent_watchdog.py +13 -13
- crackerjack/monitoring/metrics_collector.py +426 -0
- crackerjack/monitoring/regression_prevention.py +8 -8
- crackerjack/monitoring/websocket_server.py +643 -0
- crackerjack/orchestration/advanced_orchestrator.py +11 -6
- crackerjack/orchestration/coverage_improvement.py +3 -3
- crackerjack/orchestration/execution_strategies.py +26 -6
- crackerjack/orchestration/test_progress_streamer.py +8 -5
- crackerjack/plugins/base.py +2 -2
- crackerjack/plugins/hooks.py +7 -0
- crackerjack/plugins/managers.py +11 -8
- crackerjack/security/__init__.py +0 -1
- crackerjack/security/audit.py +6 -35
- crackerjack/services/anomaly_detector.py +392 -0
- crackerjack/services/api_extractor.py +615 -0
- crackerjack/services/backup_service.py +2 -2
- crackerjack/services/bounded_status_operations.py +15 -152
- crackerjack/services/cache.py +127 -1
- crackerjack/services/changelog_automation.py +395 -0
- crackerjack/services/config.py +15 -9
- crackerjack/services/config_merge.py +19 -80
- crackerjack/services/config_template.py +506 -0
- crackerjack/services/contextual_ai_assistant.py +48 -22
- crackerjack/services/coverage_badge_service.py +171 -0
- crackerjack/services/coverage_ratchet.py +27 -25
- crackerjack/services/debug.py +3 -3
- crackerjack/services/dependency_analyzer.py +460 -0
- crackerjack/services/dependency_monitor.py +14 -11
- crackerjack/services/documentation_generator.py +491 -0
- crackerjack/services/documentation_service.py +675 -0
- crackerjack/services/enhanced_filesystem.py +6 -5
- crackerjack/services/enterprise_optimizer.py +865 -0
- crackerjack/services/error_pattern_analyzer.py +676 -0
- crackerjack/services/file_hasher.py +1 -1
- crackerjack/services/git.py +8 -25
- crackerjack/services/health_metrics.py +10 -8
- crackerjack/services/heatmap_generator.py +735 -0
- crackerjack/services/initialization.py +11 -30
- crackerjack/services/input_validator.py +5 -97
- crackerjack/services/intelligent_commit.py +327 -0
- crackerjack/services/log_manager.py +15 -12
- crackerjack/services/logging.py +4 -3
- crackerjack/services/lsp_client.py +628 -0
- crackerjack/services/memory_optimizer.py +19 -87
- crackerjack/services/metrics.py +42 -33
- crackerjack/services/parallel_executor.py +9 -67
- crackerjack/services/pattern_cache.py +1 -1
- crackerjack/services/pattern_detector.py +6 -6
- crackerjack/services/performance_benchmarks.py +18 -59
- crackerjack/services/performance_cache.py +20 -81
- crackerjack/services/performance_monitor.py +27 -95
- crackerjack/services/predictive_analytics.py +510 -0
- crackerjack/services/quality_baseline.py +234 -0
- crackerjack/services/quality_baseline_enhanced.py +646 -0
- crackerjack/services/quality_intelligence.py +785 -0
- crackerjack/services/regex_patterns.py +618 -524
- crackerjack/services/regex_utils.py +43 -123
- crackerjack/services/secure_path_utils.py +5 -164
- crackerjack/services/secure_status_formatter.py +30 -141
- crackerjack/services/secure_subprocess.py +11 -92
- crackerjack/services/security.py +9 -41
- crackerjack/services/security_logger.py +12 -24
- crackerjack/services/server_manager.py +124 -16
- crackerjack/services/status_authentication.py +16 -159
- crackerjack/services/status_security_manager.py +4 -131
- crackerjack/services/thread_safe_status_collector.py +19 -125
- crackerjack/services/unified_config.py +21 -13
- crackerjack/services/validation_rate_limiter.py +5 -54
- crackerjack/services/version_analyzer.py +459 -0
- crackerjack/services/version_checker.py +1 -1
- crackerjack/services/websocket_resource_limiter.py +10 -144
- crackerjack/services/zuban_lsp_service.py +390 -0
- crackerjack/slash_commands/__init__.py +2 -7
- crackerjack/slash_commands/run.md +2 -2
- crackerjack/tools/validate_input_validator_patterns.py +14 -40
- crackerjack/tools/validate_regex_patterns.py +19 -48
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/METADATA +196 -25
- crackerjack-0.33.2.dist-info/RECORD +229 -0
- crackerjack/CLAUDE.md +0 -207
- crackerjack/RULES.md +0 -380
- crackerjack/py313.py +0 -234
- crackerjack-0.33.0.dist-info/RECORD +0 -187
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/WHEEL +0 -0
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.33.0.dist-info → crackerjack-0.33.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,417 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""Tool proxy that routes tool calls through adapters with health checks and graceful degradation."""
|
|
3
|
+
|
|
4
|
+
import asyncio
|
|
5
|
+
import sys
|
|
6
|
+
import time
|
|
7
|
+
import typing as t
|
|
8
|
+
from dataclasses import dataclass, field
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
|
|
11
|
+
from rich.console import Console
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass
|
|
15
|
+
class ToolHealthStatus:
|
|
16
|
+
"""Health status of a tool."""
|
|
17
|
+
|
|
18
|
+
is_healthy: bool
|
|
19
|
+
last_check: float
|
|
20
|
+
consecutive_failures: int = 0
|
|
21
|
+
last_error: str | None = None
|
|
22
|
+
fallback_recommendations: list[str] = field(default_factory=list)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
@dataclass
|
|
26
|
+
class CircuitBreakerState:
|
|
27
|
+
"""Circuit breaker state for a tool."""
|
|
28
|
+
|
|
29
|
+
is_open: bool = False
|
|
30
|
+
failure_count: int = 0
|
|
31
|
+
last_failure_time: float = 0
|
|
32
|
+
next_retry_time: float = 0
|
|
33
|
+
|
|
34
|
+
# Circuit breaker thresholds
|
|
35
|
+
failure_threshold: int = 3
|
|
36
|
+
retry_timeout: float = 120 # 2 minutes
|
|
37
|
+
|
|
38
|
+
def should_attempt(self) -> bool:
|
|
39
|
+
"""Check if we should attempt to use this tool."""
|
|
40
|
+
if not self.is_open:
|
|
41
|
+
return True
|
|
42
|
+
|
|
43
|
+
# Allow retry after timeout
|
|
44
|
+
return time.time() >= self.next_retry_time
|
|
45
|
+
|
|
46
|
+
def record_failure(self) -> None:
|
|
47
|
+
"""Record a tool failure."""
|
|
48
|
+
self.failure_count += 1
|
|
49
|
+
self.last_failure_time = time.time()
|
|
50
|
+
|
|
51
|
+
if self.failure_count >= self.failure_threshold:
|
|
52
|
+
self.is_open = True
|
|
53
|
+
self.next_retry_time = time.time() + self.retry_timeout
|
|
54
|
+
|
|
55
|
+
def record_success(self) -> None:
|
|
56
|
+
"""Record a tool success."""
|
|
57
|
+
self.failure_count = 0
|
|
58
|
+
self.is_open = False
|
|
59
|
+
self.next_retry_time = 0
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
class ToolProxy:
|
|
63
|
+
"""Proxy that routes tool calls through adapters with health checks."""
|
|
64
|
+
|
|
65
|
+
def __init__(self, console: Console | None = None):
|
|
66
|
+
self.console = console or Console()
|
|
67
|
+
self.health_status: dict[str, ToolHealthStatus] = {}
|
|
68
|
+
self.circuit_breakers: dict[str, CircuitBreakerState] = {}
|
|
69
|
+
|
|
70
|
+
# Tool mappings to adapters
|
|
71
|
+
self.tool_adapters = {
|
|
72
|
+
"zuban": self._create_zuban_adapter,
|
|
73
|
+
"skylos": self._create_skylos_adapter,
|
|
74
|
+
"ruff": self._create_ruff_adapter,
|
|
75
|
+
"bandit": self._create_bandit_adapter,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
# Fallback recommendations
|
|
79
|
+
self.fallback_tools = {
|
|
80
|
+
"zuban": ["pyright", "mypy"],
|
|
81
|
+
"skylos": ["vulture"],
|
|
82
|
+
"ruff": [], # Ruff is usually reliable
|
|
83
|
+
"bandit": [], # Skip security checks if bandit fails
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
def execute_tool(self, tool_name: str, args: list[str]) -> int:
|
|
87
|
+
"""Execute a tool through its adapter with health checks.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
tool_name: Name of the tool to execute
|
|
91
|
+
args: Arguments to pass to the tool
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Exit code (0 for success, non-zero for failure)
|
|
95
|
+
"""
|
|
96
|
+
try:
|
|
97
|
+
# Check circuit breaker
|
|
98
|
+
circuit_breaker = self._get_circuit_breaker(tool_name)
|
|
99
|
+
|
|
100
|
+
if not circuit_breaker.should_attempt():
|
|
101
|
+
self._handle_circuit_breaker_open(tool_name)
|
|
102
|
+
return self._try_fallback_tools(tool_name, args)
|
|
103
|
+
|
|
104
|
+
# Check tool health
|
|
105
|
+
if not self._check_tool_health(tool_name):
|
|
106
|
+
self._handle_unhealthy_tool(tool_name)
|
|
107
|
+
circuit_breaker.record_failure()
|
|
108
|
+
return self._try_fallback_tools(tool_name, args)
|
|
109
|
+
|
|
110
|
+
# Execute through adapter
|
|
111
|
+
result = self._execute_through_adapter(tool_name, args)
|
|
112
|
+
|
|
113
|
+
if result == 0:
|
|
114
|
+
circuit_breaker.record_success()
|
|
115
|
+
else:
|
|
116
|
+
circuit_breaker.record_failure()
|
|
117
|
+
|
|
118
|
+
return result
|
|
119
|
+
|
|
120
|
+
except Exception as e:
|
|
121
|
+
self.console.print(f"[red]Tool proxy error for {tool_name}: {e}[/red]")
|
|
122
|
+
self._get_circuit_breaker(tool_name).record_failure()
|
|
123
|
+
return self._try_fallback_tools(tool_name, args)
|
|
124
|
+
|
|
125
|
+
def _get_circuit_breaker(self, tool_name: str) -> CircuitBreakerState:
|
|
126
|
+
"""Get or create circuit breaker for tool."""
|
|
127
|
+
if tool_name not in self.circuit_breakers:
|
|
128
|
+
self.circuit_breakers[tool_name] = CircuitBreakerState()
|
|
129
|
+
return self.circuit_breakers[tool_name]
|
|
130
|
+
|
|
131
|
+
def _check_tool_health(self, tool_name: str) -> bool:
|
|
132
|
+
"""Check if a tool is healthy."""
|
|
133
|
+
current_time = time.time()
|
|
134
|
+
|
|
135
|
+
# Use cached health status if recent (within 30 seconds)
|
|
136
|
+
if tool_name in self.health_status:
|
|
137
|
+
status = self.health_status[tool_name]
|
|
138
|
+
if current_time - status.last_check < 30:
|
|
139
|
+
return status.is_healthy
|
|
140
|
+
|
|
141
|
+
# Perform actual health check
|
|
142
|
+
is_healthy = self._perform_health_check(tool_name)
|
|
143
|
+
|
|
144
|
+
self.health_status[tool_name] = ToolHealthStatus(
|
|
145
|
+
is_healthy=is_healthy,
|
|
146
|
+
last_check=current_time,
|
|
147
|
+
fallback_recommendations=list(self.fallback_tools.get(tool_name, [])),
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
return is_healthy
|
|
151
|
+
|
|
152
|
+
def _perform_health_check(self, tool_name: str) -> bool:
|
|
153
|
+
"""Perform actual health check for a tool."""
|
|
154
|
+
try:
|
|
155
|
+
if tool_name in self.tool_adapters:
|
|
156
|
+
# Use adapter health check if available
|
|
157
|
+
adapter = self.tool_adapters[tool_name]()
|
|
158
|
+
if adapter and hasattr(adapter, "check_tool_health"):
|
|
159
|
+
return bool(adapter.check_tool_health())
|
|
160
|
+
|
|
161
|
+
# Tool-specific health checks for known problematic tools
|
|
162
|
+
if tool_name == "zuban":
|
|
163
|
+
return self._check_zuban_health()
|
|
164
|
+
elif tool_name == "skylos":
|
|
165
|
+
return self._check_skylos_health()
|
|
166
|
+
|
|
167
|
+
# Fallback to basic version check
|
|
168
|
+
import subprocess
|
|
169
|
+
|
|
170
|
+
result = subprocess.run(
|
|
171
|
+
["uv", "run", tool_name, "--version"],
|
|
172
|
+
capture_output=True,
|
|
173
|
+
timeout=10,
|
|
174
|
+
text=True,
|
|
175
|
+
)
|
|
176
|
+
return result.returncode == 0
|
|
177
|
+
|
|
178
|
+
except Exception:
|
|
179
|
+
return False
|
|
180
|
+
|
|
181
|
+
def _check_zuban_health(self) -> bool:
|
|
182
|
+
"""Specific health check for Zuban that tests TOML parsing."""
|
|
183
|
+
import subprocess
|
|
184
|
+
import tempfile
|
|
185
|
+
|
|
186
|
+
try:
|
|
187
|
+
# Test basic version first
|
|
188
|
+
result = subprocess.run(
|
|
189
|
+
["uv", "run", "zuban", "--version"],
|
|
190
|
+
capture_output=True,
|
|
191
|
+
timeout=10,
|
|
192
|
+
text=True,
|
|
193
|
+
)
|
|
194
|
+
if result.returncode != 0:
|
|
195
|
+
return False
|
|
196
|
+
|
|
197
|
+
# Test actual type checking on a minimal file - this triggers TOML parsing
|
|
198
|
+
with tempfile.TemporaryDirectory() as temp_dir:
|
|
199
|
+
temp_file = Path(temp_dir) / "test.py"
|
|
200
|
+
temp_file.write_text("x: int = 1\n")
|
|
201
|
+
|
|
202
|
+
# This should trigger the TOML parsing bug if it exists
|
|
203
|
+
result = subprocess.run(
|
|
204
|
+
["uv", "run", "zuban", "check", str(temp_file)],
|
|
205
|
+
capture_output=True,
|
|
206
|
+
timeout=5, # Short timeout to catch panics
|
|
207
|
+
text=True,
|
|
208
|
+
cwd=Path.cwd(), # Run from our directory with problematic pyproject.toml
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
# If it exits cleanly (regardless of type errors), tool is healthy
|
|
212
|
+
# The key is that it doesn't panic with TOML parsing errors
|
|
213
|
+
return result.returncode in (0, 1) # 0=no errors, 1=type errors found
|
|
214
|
+
|
|
215
|
+
except (subprocess.TimeoutExpired, subprocess.CalledProcessError):
|
|
216
|
+
# Tool timed out or crashed - likely the TOML parsing bug
|
|
217
|
+
return False
|
|
218
|
+
except Exception:
|
|
219
|
+
return False
|
|
220
|
+
|
|
221
|
+
def _check_skylos_health(self) -> bool:
|
|
222
|
+
"""Specific health check for Skylos."""
|
|
223
|
+
import subprocess
|
|
224
|
+
|
|
225
|
+
try:
|
|
226
|
+
result = subprocess.run(
|
|
227
|
+
["uv", "run", "skylos", "--version"],
|
|
228
|
+
capture_output=True,
|
|
229
|
+
timeout=10,
|
|
230
|
+
text=True,
|
|
231
|
+
)
|
|
232
|
+
return result.returncode == 0
|
|
233
|
+
except Exception:
|
|
234
|
+
return False
|
|
235
|
+
|
|
236
|
+
def _execute_through_adapter(self, tool_name: str, args: list[str]) -> int:
|
|
237
|
+
"""Execute tool through its adapter if available."""
|
|
238
|
+
if tool_name in self.tool_adapters:
|
|
239
|
+
try:
|
|
240
|
+
# Use async adapter if available
|
|
241
|
+
return asyncio.run(self._execute_adapter_async(tool_name, args))
|
|
242
|
+
except Exception as e:
|
|
243
|
+
self.console.print(
|
|
244
|
+
f"[yellow]Adapter execution failed for {tool_name}: {e}[/yellow]"
|
|
245
|
+
)
|
|
246
|
+
# Fall back to direct execution
|
|
247
|
+
pass
|
|
248
|
+
|
|
249
|
+
# Direct execution as fallback
|
|
250
|
+
return self._execute_direct(tool_name, args)
|
|
251
|
+
|
|
252
|
+
async def _execute_adapter_async(self, tool_name: str, args: list[str]) -> int:
|
|
253
|
+
"""Execute tool through async adapter."""
|
|
254
|
+
adapter_factory = self.tool_adapters[tool_name]
|
|
255
|
+
adapter = adapter_factory()
|
|
256
|
+
|
|
257
|
+
# Convert args to file paths for adapter
|
|
258
|
+
target_files = self._args_to_file_paths(args)
|
|
259
|
+
|
|
260
|
+
if hasattr(adapter, "check_with_lsp_or_fallback"):
|
|
261
|
+
result = await adapter.check_with_lsp_or_fallback(target_files)
|
|
262
|
+
return 0 if result.success else 1
|
|
263
|
+
|
|
264
|
+
return self._execute_direct(tool_name, args)
|
|
265
|
+
|
|
266
|
+
def _execute_direct(self, tool_name: str, args: list[str]) -> int:
|
|
267
|
+
"""Execute tool directly without adapter."""
|
|
268
|
+
import subprocess
|
|
269
|
+
|
|
270
|
+
try:
|
|
271
|
+
cmd = ["uv", "run", tool_name] + args
|
|
272
|
+
result = subprocess.run(cmd, timeout=300)
|
|
273
|
+
return result.returncode
|
|
274
|
+
|
|
275
|
+
except subprocess.TimeoutExpired:
|
|
276
|
+
self.console.print(f"[red]Tool {tool_name} timed out[/red]")
|
|
277
|
+
return 1
|
|
278
|
+
except Exception as e:
|
|
279
|
+
self.console.print(
|
|
280
|
+
f"[red]Direct execution failed for {tool_name}: {e}[/red]"
|
|
281
|
+
)
|
|
282
|
+
return 1
|
|
283
|
+
|
|
284
|
+
def _args_to_file_paths(self, args: list[str]) -> list[Path]:
|
|
285
|
+
"""Convert command line arguments to file paths."""
|
|
286
|
+
file_paths = [
|
|
287
|
+
Path(arg) for arg in args if not arg.startswith("-") and Path(arg).exists()
|
|
288
|
+
]
|
|
289
|
+
|
|
290
|
+
# Default to current directory if no files specified
|
|
291
|
+
if not file_paths:
|
|
292
|
+
file_paths = [Path()]
|
|
293
|
+
|
|
294
|
+
return file_paths
|
|
295
|
+
|
|
296
|
+
def _try_fallback_tools(self, tool_name: str, args: list[str]) -> int:
|
|
297
|
+
"""Try fallback tools when primary tool fails."""
|
|
298
|
+
fallbacks = self.fallback_tools.get(tool_name, [])
|
|
299
|
+
|
|
300
|
+
if not fallbacks:
|
|
301
|
+
self.console.print(
|
|
302
|
+
f"[yellow]No fallback available for {tool_name}. Skipping with warning.[/yellow]"
|
|
303
|
+
)
|
|
304
|
+
return 0 # Skip with success to not block workflow
|
|
305
|
+
|
|
306
|
+
self.console.print(
|
|
307
|
+
f"[yellow]Trying fallback tools for {tool_name}: {', '.join(fallbacks)}[/yellow]"
|
|
308
|
+
)
|
|
309
|
+
|
|
310
|
+
for fallback in fallbacks:
|
|
311
|
+
try:
|
|
312
|
+
# Check if fallback is healthy
|
|
313
|
+
if self._check_tool_health(fallback):
|
|
314
|
+
result = self._execute_direct(fallback, args)
|
|
315
|
+
if result == 0:
|
|
316
|
+
self.console.print(
|
|
317
|
+
f"[green]Fallback {fallback} succeeded[/green]"
|
|
318
|
+
)
|
|
319
|
+
return 0
|
|
320
|
+
except Exception:
|
|
321
|
+
continue
|
|
322
|
+
|
|
323
|
+
self.console.print(
|
|
324
|
+
f"[yellow]All fallbacks failed for {tool_name}. Continuing...[/yellow]"
|
|
325
|
+
)
|
|
326
|
+
return 0 # Don't block workflow on tool failures
|
|
327
|
+
|
|
328
|
+
def _handle_circuit_breaker_open(self, tool_name: str) -> None:
|
|
329
|
+
"""Handle when circuit breaker is open."""
|
|
330
|
+
circuit_breaker = self.circuit_breakers[tool_name]
|
|
331
|
+
retry_minutes = int((circuit_breaker.next_retry_time - time.time()) / 60)
|
|
332
|
+
|
|
333
|
+
self.console.print(
|
|
334
|
+
f"[yellow]Circuit breaker open for {tool_name}. "
|
|
335
|
+
f"Will retry in {retry_minutes} minutes.[/yellow]"
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
def _handle_unhealthy_tool(self, tool_name: str) -> None:
|
|
339
|
+
"""Handle when tool is detected as unhealthy."""
|
|
340
|
+
self.console.print(
|
|
341
|
+
f"[yellow]Tool {tool_name} is unhealthy. Trying fallbacks...[/yellow]"
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
def _create_zuban_adapter(self) -> t.Any | None:
|
|
345
|
+
"""Create Zuban adapter instance."""
|
|
346
|
+
try:
|
|
347
|
+
from crackerjack.adapters.zuban_adapter import ZubanAdapter
|
|
348
|
+
from crackerjack.models.config import Options
|
|
349
|
+
from crackerjack.orchestration.execution_strategies import ExecutionContext
|
|
350
|
+
|
|
351
|
+
# Create minimal context for adapter
|
|
352
|
+
options = Options()
|
|
353
|
+
context = ExecutionContext(pkg_path=Path.cwd(), options=options)
|
|
354
|
+
return ZubanAdapter(context)
|
|
355
|
+
except (ImportError, Exception):
|
|
356
|
+
return None
|
|
357
|
+
|
|
358
|
+
def _create_skylos_adapter(self) -> t.Any | None:
|
|
359
|
+
"""Create Skylos adapter instance."""
|
|
360
|
+
try:
|
|
361
|
+
from crackerjack.adapters.skylos_adapter import SkylosAdapter
|
|
362
|
+
from crackerjack.models.config import Options
|
|
363
|
+
from crackerjack.orchestration.execution_strategies import ExecutionContext
|
|
364
|
+
|
|
365
|
+
options = Options()
|
|
366
|
+
context = ExecutionContext(pkg_path=Path.cwd(), options=options)
|
|
367
|
+
return SkylosAdapter(context)
|
|
368
|
+
except (ImportError, Exception):
|
|
369
|
+
return None
|
|
370
|
+
|
|
371
|
+
def _create_ruff_adapter(self) -> t.Any | None:
|
|
372
|
+
"""Create Ruff adapter instance."""
|
|
373
|
+
# Ruff doesn't have an adapter yet, return None for direct execution
|
|
374
|
+
return None
|
|
375
|
+
|
|
376
|
+
def _create_bandit_adapter(self) -> t.Any | None:
|
|
377
|
+
"""Create Bandit adapter instance."""
|
|
378
|
+
# Bandit doesn't have an adapter yet, return None for direct execution
|
|
379
|
+
return None
|
|
380
|
+
|
|
381
|
+
def get_tool_status(self) -> dict[str, dict[str, t.Any]]:
|
|
382
|
+
"""Get status of all tools for monitoring."""
|
|
383
|
+
status = {}
|
|
384
|
+
|
|
385
|
+
for tool_name in self.tool_adapters.keys():
|
|
386
|
+
circuit_breaker = self._get_circuit_breaker(tool_name)
|
|
387
|
+
health_status = self.health_status.get(tool_name)
|
|
388
|
+
|
|
389
|
+
status[tool_name] = {
|
|
390
|
+
"circuit_breaker_open": circuit_breaker.is_open,
|
|
391
|
+
"failure_count": circuit_breaker.failure_count,
|
|
392
|
+
"is_healthy": health_status.is_healthy if health_status else None,
|
|
393
|
+
"last_health_check": health_status.last_check
|
|
394
|
+
if health_status
|
|
395
|
+
else None,
|
|
396
|
+
"fallback_tools": self.fallback_tools.get(tool_name, []),
|
|
397
|
+
}
|
|
398
|
+
|
|
399
|
+
return status
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
def main() -> None:
|
|
403
|
+
"""Main entry point for tool proxy CLI."""
|
|
404
|
+
if len(sys.argv) < 2:
|
|
405
|
+
print("Usage: python -m crackerjack.executors.tool_proxy <tool_name> [args...]")
|
|
406
|
+
sys.exit(1)
|
|
407
|
+
|
|
408
|
+
tool_name = sys.argv[1]
|
|
409
|
+
args = sys.argv[2:]
|
|
410
|
+
|
|
411
|
+
proxy = ToolProxy()
|
|
412
|
+
exit_code = proxy.execute_tool(tool_name, args)
|
|
413
|
+
sys.exit(exit_code)
|
|
414
|
+
|
|
415
|
+
|
|
416
|
+
if __name__ == "__main__":
|
|
417
|
+
main()
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
#!/usr/bin/env python3
|
|
2
|
+
"""
|
|
3
|
+
LSP-aware type checking hook for crackerjack.
|
|
4
|
+
|
|
5
|
+
This hook communicates with a running Zuban LSP server to perform type checking
|
|
6
|
+
instead of spawning a separate zuban process, providing faster and more efficient
|
|
7
|
+
type checking during pre-commit hooks.
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
import sys
|
|
11
|
+
from pathlib import Path
|
|
12
|
+
|
|
13
|
+
from crackerjack.services.lsp_client import LSPClient
|
|
14
|
+
from rich.console import Console
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def main() -> int:
|
|
18
|
+
"""Main entry point for LSP hook."""
|
|
19
|
+
console = Console()
|
|
20
|
+
|
|
21
|
+
# Get files to check from command line arguments
|
|
22
|
+
files_to_check = sys.argv[1:] if len(sys.argv) > 1 else []
|
|
23
|
+
|
|
24
|
+
# If no files specified, check project files
|
|
25
|
+
if not files_to_check:
|
|
26
|
+
project_path = Path.cwd()
|
|
27
|
+
lsp_client = LSPClient(console)
|
|
28
|
+
files_to_check = lsp_client.get_project_files(project_path)
|
|
29
|
+
|
|
30
|
+
if not files_to_check:
|
|
31
|
+
console.print("🔍 No Python files to check")
|
|
32
|
+
return 0
|
|
33
|
+
|
|
34
|
+
# Initialize LSP client
|
|
35
|
+
lsp_client = LSPClient(console)
|
|
36
|
+
|
|
37
|
+
# Check if LSP server is running
|
|
38
|
+
if not lsp_client.is_server_running():
|
|
39
|
+
console.print(
|
|
40
|
+
"⚠️ Zuban LSP server not running, falling back to direct zuban check"
|
|
41
|
+
)
|
|
42
|
+
# Fall back to regular zuban execution
|
|
43
|
+
import subprocess
|
|
44
|
+
|
|
45
|
+
try:
|
|
46
|
+
result = subprocess.run(
|
|
47
|
+
["zuban", "check"] + files_to_check,
|
|
48
|
+
capture_output=True,
|
|
49
|
+
text=True,
|
|
50
|
+
timeout=120,
|
|
51
|
+
)
|
|
52
|
+
if result.stdout:
|
|
53
|
+
console.print(result.stdout)
|
|
54
|
+
if result.stderr:
|
|
55
|
+
console.print(result.stderr, style="red")
|
|
56
|
+
return result.returncode
|
|
57
|
+
except (subprocess.TimeoutExpired, FileNotFoundError) as e:
|
|
58
|
+
console.print(f"❌ Error running zuban: {e}", style="red")
|
|
59
|
+
return 1
|
|
60
|
+
|
|
61
|
+
# Use LSP server for type checking
|
|
62
|
+
server_info = lsp_client.get_server_info()
|
|
63
|
+
if server_info:
|
|
64
|
+
console.print(f"🔍 Using Zuban LSP server (PID: {server_info['pid']})")
|
|
65
|
+
|
|
66
|
+
# Check files via LSP
|
|
67
|
+
diagnostics = lsp_client.check_files(files_to_check)
|
|
68
|
+
|
|
69
|
+
# Display results
|
|
70
|
+
output = lsp_client.format_diagnostics(diagnostics)
|
|
71
|
+
console.print(output)
|
|
72
|
+
|
|
73
|
+
# Return appropriate exit code
|
|
74
|
+
has_errors = any(diags for diags in diagnostics.values())
|
|
75
|
+
return 1 if has_errors else 0
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
if __name__ == "__main__":
|
|
79
|
+
sys.exit(main())
|
|
@@ -34,7 +34,7 @@ class AgentPerformanceMetrics:
|
|
|
34
34
|
average_execution_time: float = 0.0
|
|
35
35
|
average_confidence: float = 0.0
|
|
36
36
|
success_rate: float = 0.0
|
|
37
|
-
capability_success_rates: dict[str, float] = field(default_factory=dict)
|
|
37
|
+
capability_success_rates: dict[str, float] = field(default_factory=dict[str, t.Any])
|
|
38
38
|
recent_performance_trend: float = 0.0
|
|
39
39
|
last_updated: datetime = field(default_factory=datetime.now)
|
|
40
40
|
|
|
@@ -357,7 +357,12 @@ class AdaptiveLearningSystem:
|
|
|
357
357
|
return insights
|
|
358
358
|
|
|
359
359
|
def _group_capability_performance(self) -> dict[str, dict[str, list[bool]]]:
|
|
360
|
-
|
|
360
|
+
def make_inner_defaultdict() -> defaultdict[str, list[bool]]:
|
|
361
|
+
return defaultdict(list)
|
|
362
|
+
|
|
363
|
+
capability_performance: dict[str, dict[str, list[bool]]] = defaultdict(
|
|
364
|
+
make_inner_defaultdict
|
|
365
|
+
)
|
|
361
366
|
|
|
362
367
|
for record in self._execution_records[-100:]:
|
|
363
368
|
for capability in record.task_capabilities:
|
|
@@ -365,7 +370,7 @@ class AdaptiveLearningSystem:
|
|
|
365
370
|
record.success
|
|
366
371
|
)
|
|
367
372
|
|
|
368
|
-
return dict(capability_performance)
|
|
373
|
+
return dict[str, t.Any](capability_performance)
|
|
369
374
|
|
|
370
375
|
def _find_capability_experts(
|
|
371
376
|
self, capability: str, agents: dict[str, list[bool]]
|
|
@@ -398,7 +403,12 @@ class AdaptiveLearningSystem:
|
|
|
398
403
|
return self._extract_significant_failure_insights(failure_patterns)
|
|
399
404
|
|
|
400
405
|
def _group_failure_patterns(self) -> dict[str, dict[str, int]]:
|
|
401
|
-
|
|
406
|
+
def make_inner_defaultdict() -> defaultdict[str, int]:
|
|
407
|
+
return defaultdict(int)
|
|
408
|
+
|
|
409
|
+
failure_patterns: dict[str, dict[str, int]] = defaultdict(
|
|
410
|
+
make_inner_defaultdict
|
|
411
|
+
)
|
|
402
412
|
|
|
403
413
|
for record in self._execution_records[-100:]:
|
|
404
414
|
if not record.success and record.error_message:
|
|
@@ -406,7 +416,7 @@ class AdaptiveLearningSystem:
|
|
|
406
416
|
failure_patterns[record.agent_name][error_type] += 1
|
|
407
417
|
|
|
408
418
|
return {
|
|
409
|
-
agent_name: dict(patterns)
|
|
419
|
+
agent_name: dict[str, t.Any](patterns)
|
|
410
420
|
for agent_name, patterns in failure_patterns.items()
|
|
411
421
|
}
|
|
412
422
|
|
|
@@ -471,7 +481,12 @@ class AdaptiveLearningSystem:
|
|
|
471
481
|
return insights
|
|
472
482
|
|
|
473
483
|
def _group_task_performance(self) -> dict[str, dict[str, list[bool]]]:
|
|
474
|
-
|
|
484
|
+
def make_inner_defaultdict() -> defaultdict[str, list[bool]]:
|
|
485
|
+
return defaultdict(list)
|
|
486
|
+
|
|
487
|
+
task_performance: dict[str, dict[str, list[bool]]] = defaultdict(
|
|
488
|
+
make_inner_defaultdict
|
|
489
|
+
)
|
|
475
490
|
|
|
476
491
|
for record in self._execution_records[-100:]:
|
|
477
492
|
if record.task_hash:
|
|
@@ -479,7 +494,7 @@ class AdaptiveLearningSystem:
|
|
|
479
494
|
record.success
|
|
480
495
|
)
|
|
481
496
|
|
|
482
|
-
return dict(task_performance)
|
|
497
|
+
return dict[str, t.Any](task_performance)
|
|
483
498
|
|
|
484
499
|
def _find_best_performing_agent(
|
|
485
500
|
self, agents: dict[str, list[bool]]
|
|
@@ -521,7 +536,7 @@ class AdaptiveLearningSystem:
|
|
|
521
536
|
"task_pattern": task_hash,
|
|
522
537
|
"success_rate": best_rate,
|
|
523
538
|
"example_task": example_task,
|
|
524
|
-
"competing_agents": list(agents.keys()),
|
|
539
|
+
"competing_agents": list[t.Any](agents.keys()),
|
|
525
540
|
},
|
|
526
541
|
)
|
|
527
542
|
|
|
@@ -647,7 +662,7 @@ class AdaptiveLearningSystem:
|
|
|
647
662
|
"trend": metrics.recent_performance_trend,
|
|
648
663
|
}
|
|
649
664
|
|
|
650
|
-
insights_by_type = defaultdict(int)
|
|
665
|
+
insights_by_type: dict[str, int] = defaultdict(int)
|
|
651
666
|
for insight in self._learning_insights:
|
|
652
667
|
insights_by_type[insight.insight_type] += 1
|
|
653
668
|
|
|
@@ -657,7 +672,7 @@ class AdaptiveLearningSystem:
|
|
|
657
672
|
"recent_success_rate": recent_success_rate,
|
|
658
673
|
"agents_tracked": len(self._agent_metrics),
|
|
659
674
|
"insights_discovered": len(self._learning_insights),
|
|
660
|
-
"insights_by_type": dict(insights_by_type),
|
|
675
|
+
"insights_by_type": dict[str, t.Any](insights_by_type),
|
|
661
676
|
"top_performers": sorted(
|
|
662
677
|
agent_summary.items(),
|
|
663
678
|
key=lambda x: x[1]["success_rate"],
|
|
@@ -90,8 +90,6 @@ class AgentOrchestrator:
|
|
|
90
90
|
result = await self._execute_sequential(request, candidates)
|
|
91
91
|
elif request.strategy == ExecutionStrategy.CONSENSUS:
|
|
92
92
|
result = await self._execute_consensus(request, candidates)
|
|
93
|
-
else:
|
|
94
|
-
result = await self._execute_single_best(request, candidates)
|
|
95
93
|
|
|
96
94
|
execution_time = asyncio.get_event_loop().time() - start_time
|
|
97
95
|
result.execution_time = execution_time
|
|
@@ -243,7 +241,6 @@ class AgentOrchestrator:
|
|
|
243
241
|
self.logger.warning(
|
|
244
242
|
f"Sequential agent {candidate.agent.metadata.name} failed: {e}"
|
|
245
243
|
)
|
|
246
|
-
continue
|
|
247
244
|
|
|
248
245
|
return ExecutionResult(
|
|
249
246
|
success=False,
|
|
@@ -368,7 +365,7 @@ class AgentOrchestrator:
|
|
|
368
365
|
|
|
369
366
|
return result
|
|
370
367
|
|
|
371
|
-
def _map_task_to_issue_type(self, task: TaskDescription):
|
|
368
|
+
def _map_task_to_issue_type(self, task: TaskDescription) -> t.Any:
|
|
372
369
|
from crackerjack.agents.base import IssueType
|
|
373
370
|
|
|
374
371
|
context_map = {
|
|
@@ -395,7 +392,7 @@ class AgentOrchestrator:
|
|
|
395
392
|
|
|
396
393
|
return IssueType.FORMATTING
|
|
397
394
|
|
|
398
|
-
def _map_task_priority_to_severity(self, task: TaskDescription):
|
|
395
|
+
def _map_task_priority_to_severity(self, task: TaskDescription) -> t.Any:
|
|
399
396
|
from crackerjack.agents.base import Priority
|
|
400
397
|
|
|
401
398
|
if task.priority >= 80:
|