crackerjack 0.31.10__py3-none-any.whl → 0.31.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of crackerjack might be problematic. Click here for more details.
- crackerjack/CLAUDE.md +288 -705
- crackerjack/__main__.py +22 -8
- crackerjack/agents/__init__.py +0 -3
- crackerjack/agents/architect_agent.py +0 -43
- crackerjack/agents/base.py +1 -9
- crackerjack/agents/coordinator.py +2 -148
- crackerjack/agents/documentation_agent.py +109 -81
- crackerjack/agents/dry_agent.py +122 -97
- crackerjack/agents/formatting_agent.py +3 -16
- crackerjack/agents/import_optimization_agent.py +1174 -130
- crackerjack/agents/performance_agent.py +956 -188
- crackerjack/agents/performance_helpers.py +229 -0
- crackerjack/agents/proactive_agent.py +1 -48
- crackerjack/agents/refactoring_agent.py +516 -246
- crackerjack/agents/refactoring_helpers.py +282 -0
- crackerjack/agents/security_agent.py +393 -90
- crackerjack/agents/test_creation_agent.py +1776 -120
- crackerjack/agents/test_specialist_agent.py +59 -15
- crackerjack/agents/tracker.py +0 -102
- crackerjack/api.py +145 -37
- crackerjack/cli/handlers.py +48 -30
- crackerjack/cli/interactive.py +11 -11
- crackerjack/cli/options.py +66 -4
- crackerjack/code_cleaner.py +808 -148
- crackerjack/config/global_lock_config.py +110 -0
- crackerjack/config/hooks.py +43 -64
- crackerjack/core/async_workflow_orchestrator.py +247 -97
- crackerjack/core/autofix_coordinator.py +192 -109
- crackerjack/core/enhanced_container.py +46 -63
- crackerjack/core/file_lifecycle.py +549 -0
- crackerjack/core/performance.py +9 -8
- crackerjack/core/performance_monitor.py +395 -0
- crackerjack/core/phase_coordinator.py +281 -94
- crackerjack/core/proactive_workflow.py +9 -58
- crackerjack/core/resource_manager.py +501 -0
- crackerjack/core/service_watchdog.py +490 -0
- crackerjack/core/session_coordinator.py +4 -8
- crackerjack/core/timeout_manager.py +504 -0
- crackerjack/core/websocket_lifecycle.py +475 -0
- crackerjack/core/workflow_orchestrator.py +343 -209
- crackerjack/dynamic_config.py +50 -9
- crackerjack/errors.py +3 -4
- crackerjack/executors/async_hook_executor.py +63 -13
- crackerjack/executors/cached_hook_executor.py +14 -14
- crackerjack/executors/hook_executor.py +100 -37
- crackerjack/executors/hook_lock_manager.py +856 -0
- crackerjack/executors/individual_hook_executor.py +120 -86
- crackerjack/intelligence/__init__.py +0 -7
- crackerjack/intelligence/adaptive_learning.py +13 -86
- crackerjack/intelligence/agent_orchestrator.py +15 -78
- crackerjack/intelligence/agent_registry.py +12 -59
- crackerjack/intelligence/agent_selector.py +31 -92
- crackerjack/intelligence/integration.py +1 -41
- crackerjack/interactive.py +9 -9
- crackerjack/managers/async_hook_manager.py +25 -8
- crackerjack/managers/hook_manager.py +9 -9
- crackerjack/managers/publish_manager.py +57 -59
- crackerjack/managers/test_command_builder.py +6 -36
- crackerjack/managers/test_executor.py +9 -61
- crackerjack/managers/test_manager.py +17 -63
- crackerjack/managers/test_manager_backup.py +77 -127
- crackerjack/managers/test_progress.py +4 -23
- crackerjack/mcp/cache.py +5 -12
- crackerjack/mcp/client_runner.py +10 -10
- crackerjack/mcp/context.py +64 -6
- crackerjack/mcp/dashboard.py +14 -11
- crackerjack/mcp/enhanced_progress_monitor.py +55 -55
- crackerjack/mcp/file_monitor.py +72 -42
- crackerjack/mcp/progress_components.py +103 -84
- crackerjack/mcp/progress_monitor.py +122 -49
- crackerjack/mcp/rate_limiter.py +12 -12
- crackerjack/mcp/server_core.py +16 -22
- crackerjack/mcp/service_watchdog.py +26 -26
- crackerjack/mcp/state.py +15 -0
- crackerjack/mcp/tools/core_tools.py +95 -39
- crackerjack/mcp/tools/error_analyzer.py +6 -32
- crackerjack/mcp/tools/execution_tools.py +1 -56
- crackerjack/mcp/tools/execution_tools_backup.py +35 -131
- crackerjack/mcp/tools/intelligence_tool_registry.py +0 -36
- crackerjack/mcp/tools/intelligence_tools.py +2 -55
- crackerjack/mcp/tools/monitoring_tools.py +308 -145
- crackerjack/mcp/tools/proactive_tools.py +12 -42
- crackerjack/mcp/tools/progress_tools.py +23 -15
- crackerjack/mcp/tools/utility_tools.py +3 -40
- crackerjack/mcp/tools/workflow_executor.py +40 -60
- crackerjack/mcp/websocket/app.py +0 -3
- crackerjack/mcp/websocket/endpoints.py +206 -268
- crackerjack/mcp/websocket/jobs.py +213 -66
- crackerjack/mcp/websocket/server.py +84 -6
- crackerjack/mcp/websocket/websocket_handler.py +137 -29
- crackerjack/models/config_adapter.py +3 -16
- crackerjack/models/protocols.py +162 -3
- crackerjack/models/resource_protocols.py +454 -0
- crackerjack/models/task.py +3 -3
- crackerjack/monitoring/__init__.py +0 -0
- crackerjack/monitoring/ai_agent_watchdog.py +25 -71
- crackerjack/monitoring/regression_prevention.py +28 -87
- crackerjack/orchestration/advanced_orchestrator.py +44 -78
- crackerjack/orchestration/coverage_improvement.py +10 -60
- crackerjack/orchestration/execution_strategies.py +16 -16
- crackerjack/orchestration/test_progress_streamer.py +61 -53
- crackerjack/plugins/base.py +1 -1
- crackerjack/plugins/managers.py +22 -20
- crackerjack/py313.py +65 -21
- crackerjack/services/backup_service.py +467 -0
- crackerjack/services/bounded_status_operations.py +627 -0
- crackerjack/services/cache.py +7 -9
- crackerjack/services/config.py +35 -52
- crackerjack/services/config_integrity.py +5 -16
- crackerjack/services/config_merge.py +542 -0
- crackerjack/services/contextual_ai_assistant.py +17 -19
- crackerjack/services/coverage_ratchet.py +44 -73
- crackerjack/services/debug.py +25 -39
- crackerjack/services/dependency_monitor.py +52 -50
- crackerjack/services/enhanced_filesystem.py +14 -11
- crackerjack/services/file_hasher.py +1 -1
- crackerjack/services/filesystem.py +1 -12
- crackerjack/services/git.py +71 -47
- crackerjack/services/health_metrics.py +31 -27
- crackerjack/services/initialization.py +276 -428
- crackerjack/services/input_validator.py +760 -0
- crackerjack/services/log_manager.py +16 -16
- crackerjack/services/logging.py +7 -6
- crackerjack/services/metrics.py +43 -43
- crackerjack/services/pattern_cache.py +2 -31
- crackerjack/services/pattern_detector.py +26 -63
- crackerjack/services/performance_benchmarks.py +20 -45
- crackerjack/services/regex_patterns.py +2887 -0
- crackerjack/services/regex_utils.py +537 -0
- crackerjack/services/secure_path_utils.py +683 -0
- crackerjack/services/secure_status_formatter.py +534 -0
- crackerjack/services/secure_subprocess.py +605 -0
- crackerjack/services/security.py +47 -10
- crackerjack/services/security_logger.py +492 -0
- crackerjack/services/server_manager.py +109 -50
- crackerjack/services/smart_scheduling.py +8 -25
- crackerjack/services/status_authentication.py +603 -0
- crackerjack/services/status_security_manager.py +442 -0
- crackerjack/services/thread_safe_status_collector.py +546 -0
- crackerjack/services/tool_version_service.py +1 -23
- crackerjack/services/unified_config.py +36 -58
- crackerjack/services/validation_rate_limiter.py +269 -0
- crackerjack/services/version_checker.py +9 -40
- crackerjack/services/websocket_resource_limiter.py +572 -0
- crackerjack/slash_commands/__init__.py +52 -2
- crackerjack/tools/__init__.py +0 -0
- crackerjack/tools/validate_input_validator_patterns.py +262 -0
- crackerjack/tools/validate_regex_patterns.py +198 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/METADATA +197 -12
- crackerjack-0.31.13.dist-info/RECORD +178 -0
- crackerjack/cli/facade.py +0 -104
- crackerjack-0.31.10.dist-info/RECORD +0 -149
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/WHEEL +0 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/entry_points.txt +0 -0
- {crackerjack-0.31.10.dist-info → crackerjack-0.31.13.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,627 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Bounded Status Operations to prevent DoS attacks.
|
|
3
|
+
|
|
4
|
+
Provides comprehensive operation limits, circuit breakers, and
|
|
5
|
+
resource protection for status collection operations.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import asyncio
|
|
9
|
+
import time
|
|
10
|
+
import typing as t
|
|
11
|
+
from collections import defaultdict, deque
|
|
12
|
+
from dataclasses import dataclass, field
|
|
13
|
+
from enum import Enum
|
|
14
|
+
from threading import RLock
|
|
15
|
+
|
|
16
|
+
from .security_logger import SecurityEventLevel, SecurityEventType, get_security_logger
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class OperationState(str, Enum):
|
|
20
|
+
"""States for circuit breaker pattern."""
|
|
21
|
+
|
|
22
|
+
CLOSED = "closed" # Normal operation
|
|
23
|
+
OPEN = "open" # Circuit breaker tripped
|
|
24
|
+
HALF_OPEN = "half_open" # Testing if service recovered
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
@dataclass
|
|
28
|
+
class OperationLimits:
|
|
29
|
+
"""Limits for status operations."""
|
|
30
|
+
|
|
31
|
+
max_concurrent_operations: int = 10
|
|
32
|
+
max_operations_per_minute: int = 60
|
|
33
|
+
max_operation_duration: float = 30.0
|
|
34
|
+
max_memory_usage_mb: int = 50
|
|
35
|
+
max_cpu_time_seconds: float = 5.0
|
|
36
|
+
max_file_operations: int = 100
|
|
37
|
+
timeout_seconds: float = 15.0
|
|
38
|
+
circuit_breaker_threshold: int = 5
|
|
39
|
+
circuit_breaker_timeout: float = 60.0
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
@dataclass
|
|
43
|
+
class OperationMetrics:
|
|
44
|
+
"""Metrics for operation tracking."""
|
|
45
|
+
|
|
46
|
+
operation_id: str
|
|
47
|
+
operation_type: str
|
|
48
|
+
client_id: str
|
|
49
|
+
start_time: float = field(default_factory=time.time)
|
|
50
|
+
end_time: float | None = None
|
|
51
|
+
memory_usage: int = 0
|
|
52
|
+
cpu_time: float = 0.0
|
|
53
|
+
file_operations: int = 0
|
|
54
|
+
success: bool | None = None
|
|
55
|
+
error_message: str | None = None
|
|
56
|
+
|
|
57
|
+
@property
|
|
58
|
+
def duration(self) -> float:
|
|
59
|
+
"""Get operation duration in seconds."""
|
|
60
|
+
end = self.end_time or time.time()
|
|
61
|
+
return end - self.start_time
|
|
62
|
+
|
|
63
|
+
@property
|
|
64
|
+
def is_completed(self) -> bool:
|
|
65
|
+
"""Check if operation is completed."""
|
|
66
|
+
return self.end_time is not None
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class OperationLimitExceededError(Exception):
|
|
70
|
+
"""Raised when operation limits are exceeded."""
|
|
71
|
+
|
|
72
|
+
pass
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class CircuitBreakerOpenError(Exception):
|
|
76
|
+
"""Raised when circuit breaker is open."""
|
|
77
|
+
|
|
78
|
+
pass
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class BoundedStatusOperations:
|
|
82
|
+
"""
|
|
83
|
+
Bounded operations manager for status collection.
|
|
84
|
+
|
|
85
|
+
Features:
|
|
86
|
+
- Concurrent operation limits
|
|
87
|
+
- Rate limiting per client
|
|
88
|
+
- Memory and CPU usage monitoring
|
|
89
|
+
- Circuit breaker pattern for fault tolerance
|
|
90
|
+
- Operation timeout enforcement
|
|
91
|
+
- Comprehensive metrics collection
|
|
92
|
+
"""
|
|
93
|
+
|
|
94
|
+
def __init__(self, limits: OperationLimits | None = None):
|
|
95
|
+
"""
|
|
96
|
+
Initialize bounded operations manager.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
limits: Operation limits configuration
|
|
100
|
+
"""
|
|
101
|
+
self.limits = limits or OperationLimits()
|
|
102
|
+
self.security_logger = get_security_logger()
|
|
103
|
+
|
|
104
|
+
# Thread-safe operation tracking
|
|
105
|
+
self._lock = RLock()
|
|
106
|
+
self._active_operations: dict[str, OperationMetrics] = {}
|
|
107
|
+
self._client_operations: dict[str, deque[float]] = defaultdict(
|
|
108
|
+
lambda: deque(maxlen=100)
|
|
109
|
+
)
|
|
110
|
+
self._operation_history: list[OperationMetrics] = []
|
|
111
|
+
|
|
112
|
+
# Circuit breaker tracking
|
|
113
|
+
self._circuit_states: dict[str, OperationState] = defaultdict(
|
|
114
|
+
lambda: OperationState.CLOSED
|
|
115
|
+
)
|
|
116
|
+
self._failure_counts: dict[str, int] = defaultdict(int)
|
|
117
|
+
self._last_failure_times: dict[str, float] = {}
|
|
118
|
+
|
|
119
|
+
# Resource usage tracking
|
|
120
|
+
self._total_memory_usage = 0
|
|
121
|
+
self._total_cpu_time = 0.0
|
|
122
|
+
self._total_file_operations = 0
|
|
123
|
+
|
|
124
|
+
# Operation type definitions
|
|
125
|
+
self._operation_types = {
|
|
126
|
+
"collect_status": "Status collection operation",
|
|
127
|
+
"get_jobs": "Job information retrieval",
|
|
128
|
+
"get_services": "Service status check",
|
|
129
|
+
"get_metrics": "Metrics collection",
|
|
130
|
+
"health_check": "Health check operation",
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
async def execute_bounded_operation(
|
|
134
|
+
self,
|
|
135
|
+
operation_type: str,
|
|
136
|
+
client_id: str,
|
|
137
|
+
operation_func: t.Callable[..., t.Awaitable[t.Any]],
|
|
138
|
+
*args: t.Any,
|
|
139
|
+
**kwargs: t.Any,
|
|
140
|
+
) -> t.Any:
|
|
141
|
+
"""
|
|
142
|
+
Execute a bounded status operation with comprehensive limits.
|
|
143
|
+
|
|
144
|
+
Args:
|
|
145
|
+
operation_type: Type of operation being performed
|
|
146
|
+
client_id: Client identifier
|
|
147
|
+
operation_func: Async function to execute
|
|
148
|
+
*args: Positional arguments for operation_func
|
|
149
|
+
**kwargs: Keyword arguments for operation_func
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
Result of the operation
|
|
153
|
+
|
|
154
|
+
Raises:
|
|
155
|
+
OperationLimitExceededError: If operation limits exceeded
|
|
156
|
+
CircuitBreakerOpenError: If circuit breaker is open
|
|
157
|
+
asyncio.TimeoutError: If operation times out
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
# Check circuit breaker
|
|
161
|
+
self._check_circuit_breaker(operation_type)
|
|
162
|
+
|
|
163
|
+
# Validate operation can be started
|
|
164
|
+
operation_id = self._validate_and_reserve_operation(operation_type, client_id)
|
|
165
|
+
|
|
166
|
+
# Create operation metrics
|
|
167
|
+
metrics = OperationMetrics(
|
|
168
|
+
operation_id=operation_id,
|
|
169
|
+
operation_type=operation_type,
|
|
170
|
+
client_id=client_id,
|
|
171
|
+
)
|
|
172
|
+
|
|
173
|
+
try:
|
|
174
|
+
# Register operation
|
|
175
|
+
with self._lock:
|
|
176
|
+
self._active_operations[operation_id] = metrics
|
|
177
|
+
|
|
178
|
+
# Execute with timeout and resource monitoring
|
|
179
|
+
result = await self._execute_with_monitoring(
|
|
180
|
+
operation_func, metrics, *args, **kwargs
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
# Mark as successful
|
|
184
|
+
metrics.success = True
|
|
185
|
+
metrics.end_time = time.time()
|
|
186
|
+
|
|
187
|
+
# Update circuit breaker on success
|
|
188
|
+
self._record_operation_success(operation_type)
|
|
189
|
+
|
|
190
|
+
# Log successful operation
|
|
191
|
+
self.security_logger.log_security_event(
|
|
192
|
+
event_type=SecurityEventType.OPERATION_SUCCESS,
|
|
193
|
+
level=SecurityEventLevel.LOW,
|
|
194
|
+
message=f"Operation completed: {operation_type}",
|
|
195
|
+
client_id=client_id,
|
|
196
|
+
operation=operation_type,
|
|
197
|
+
additional_data={
|
|
198
|
+
"operation_id": operation_id,
|
|
199
|
+
"duration": metrics.duration,
|
|
200
|
+
"memory_usage": metrics.memory_usage,
|
|
201
|
+
"cpu_time": metrics.cpu_time,
|
|
202
|
+
"file_operations": metrics.file_operations,
|
|
203
|
+
},
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
return result
|
|
207
|
+
|
|
208
|
+
except Exception as e:
|
|
209
|
+
# Mark as failed
|
|
210
|
+
metrics.success = False
|
|
211
|
+
metrics.end_time = time.time()
|
|
212
|
+
metrics.error_message = str(e)
|
|
213
|
+
|
|
214
|
+
# Update circuit breaker on failure
|
|
215
|
+
self._record_operation_failure(operation_type)
|
|
216
|
+
|
|
217
|
+
# Log failed operation
|
|
218
|
+
self.security_logger.log_security_event(
|
|
219
|
+
event_type=SecurityEventType.OPERATION_FAILURE,
|
|
220
|
+
level=SecurityEventLevel.HIGH,
|
|
221
|
+
message=f"Operation failed: {operation_type} - {e}",
|
|
222
|
+
client_id=client_id,
|
|
223
|
+
operation=operation_type,
|
|
224
|
+
additional_data={
|
|
225
|
+
"operation_id": operation_id,
|
|
226
|
+
"duration": metrics.duration,
|
|
227
|
+
"error": str(e),
|
|
228
|
+
},
|
|
229
|
+
)
|
|
230
|
+
|
|
231
|
+
raise
|
|
232
|
+
|
|
233
|
+
finally:
|
|
234
|
+
# Clean up operation
|
|
235
|
+
self._cleanup_operation(operation_id, metrics)
|
|
236
|
+
|
|
237
|
+
def _check_circuit_breaker(self, operation_type: str) -> None:
|
|
238
|
+
"""Check if circuit breaker allows operation."""
|
|
239
|
+
|
|
240
|
+
current_time = time.time()
|
|
241
|
+
|
|
242
|
+
with self._lock:
|
|
243
|
+
state = self._circuit_states[operation_type]
|
|
244
|
+
failure_count = self._failure_counts[operation_type]
|
|
245
|
+
last_failure = self._last_failure_times.get(operation_type, 0)
|
|
246
|
+
|
|
247
|
+
if state == OperationState.OPEN:
|
|
248
|
+
# Check if circuit breaker timeout has passed
|
|
249
|
+
if current_time - last_failure >= self.limits.circuit_breaker_timeout:
|
|
250
|
+
# Move to half-open state for testing
|
|
251
|
+
self._circuit_states[operation_type] = OperationState.HALF_OPEN
|
|
252
|
+
self.security_logger.log_security_event(
|
|
253
|
+
event_type=SecurityEventType.CIRCUIT_BREAKER_HALF_OPEN,
|
|
254
|
+
level=SecurityEventLevel.LOW,
|
|
255
|
+
message=f"Circuit breaker half-open: {operation_type}",
|
|
256
|
+
operation=operation_type,
|
|
257
|
+
)
|
|
258
|
+
else:
|
|
259
|
+
# Circuit breaker still open
|
|
260
|
+
self.security_logger.log_security_event(
|
|
261
|
+
event_type=SecurityEventType.CIRCUIT_BREAKER_OPEN,
|
|
262
|
+
level=SecurityEventLevel.MEDIUM,
|
|
263
|
+
message=f"Circuit breaker open: {operation_type}",
|
|
264
|
+
operation=operation_type,
|
|
265
|
+
additional_data={
|
|
266
|
+
"failure_count": failure_count,
|
|
267
|
+
"time_remaining": self.limits.circuit_breaker_timeout
|
|
268
|
+
- (current_time - last_failure),
|
|
269
|
+
},
|
|
270
|
+
)
|
|
271
|
+
raise CircuitBreakerOpenError(
|
|
272
|
+
f"Circuit breaker open for {operation_type}"
|
|
273
|
+
)
|
|
274
|
+
|
|
275
|
+
def _validate_and_reserve_operation(
|
|
276
|
+
self, operation_type: str, client_id: str
|
|
277
|
+
) -> str:
|
|
278
|
+
"""Validate operation can be started and reserve resources."""
|
|
279
|
+
|
|
280
|
+
current_time = time.time()
|
|
281
|
+
operation_id = f"{operation_type}_{client_id}_{int(current_time * 1000)}"
|
|
282
|
+
|
|
283
|
+
with self._lock:
|
|
284
|
+
# Check concurrent operation limit
|
|
285
|
+
if len(self._active_operations) >= self.limits.max_concurrent_operations:
|
|
286
|
+
self.security_logger.log_security_event(
|
|
287
|
+
event_type=SecurityEventType.RATE_LIMIT_EXCEEDED,
|
|
288
|
+
level=SecurityEventLevel.MEDIUM,
|
|
289
|
+
message=f"Max concurrent operations exceeded: {len(self._active_operations)}",
|
|
290
|
+
client_id=client_id,
|
|
291
|
+
operation=operation_type,
|
|
292
|
+
)
|
|
293
|
+
raise OperationLimitExceededError(
|
|
294
|
+
f"Maximum concurrent operations exceeded: {len(self._active_operations)}"
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
# Check client rate limiting
|
|
298
|
+
client_ops = self._client_operations[client_id]
|
|
299
|
+
|
|
300
|
+
# Remove operations older than 1 minute
|
|
301
|
+
while client_ops and current_time - client_ops[0] > 60.0:
|
|
302
|
+
client_ops.popleft()
|
|
303
|
+
|
|
304
|
+
if len(client_ops) >= self.limits.max_operations_per_minute:
|
|
305
|
+
self.security_logger.log_security_event(
|
|
306
|
+
event_type=SecurityEventType.RATE_LIMIT_EXCEEDED,
|
|
307
|
+
level=SecurityEventLevel.MEDIUM,
|
|
308
|
+
message=f"Client operation rate limit exceeded: {len(client_ops)}",
|
|
309
|
+
client_id=client_id,
|
|
310
|
+
operation=operation_type,
|
|
311
|
+
)
|
|
312
|
+
raise OperationLimitExceededError(
|
|
313
|
+
f"Operation rate limit exceeded: {len(client_ops)} operations/min"
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
# Check resource usage limits
|
|
317
|
+
if (
|
|
318
|
+
self._total_memory_usage
|
|
319
|
+
>= self.limits.max_memory_usage_mb * 1024 * 1024
|
|
320
|
+
):
|
|
321
|
+
self.security_logger.log_security_event(
|
|
322
|
+
event_type=SecurityEventType.RESOURCE_EXHAUSTED,
|
|
323
|
+
level=SecurityEventLevel.HIGH,
|
|
324
|
+
message=f"Memory limit exceeded: {self._total_memory_usage / 1024 / 1024:.1f}MB",
|
|
325
|
+
client_id=client_id,
|
|
326
|
+
operation=operation_type,
|
|
327
|
+
)
|
|
328
|
+
raise OperationLimitExceededError(
|
|
329
|
+
f"Memory limit exceeded: {self._total_memory_usage / 1024 / 1024:.1f}MB"
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
# Record operation start
|
|
333
|
+
client_ops.append(current_time)
|
|
334
|
+
|
|
335
|
+
return operation_id
|
|
336
|
+
|
|
337
|
+
async def _execute_with_monitoring(
|
|
338
|
+
self,
|
|
339
|
+
operation_func: t.Callable[..., t.Awaitable[t.Any]],
|
|
340
|
+
metrics: OperationMetrics,
|
|
341
|
+
*args: t.Any,
|
|
342
|
+
**kwargs: t.Any,
|
|
343
|
+
) -> t.Any:
|
|
344
|
+
"""Execute operation with resource monitoring and timeout."""
|
|
345
|
+
|
|
346
|
+
# Create monitoring task
|
|
347
|
+
monitor_task = asyncio.create_task(self._monitor_operation(metrics))
|
|
348
|
+
|
|
349
|
+
try:
|
|
350
|
+
# Execute operation with timeout
|
|
351
|
+
result = await asyncio.wait_for(
|
|
352
|
+
operation_func(*args, **kwargs),
|
|
353
|
+
timeout=self.limits.timeout_seconds,
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
return result
|
|
357
|
+
|
|
358
|
+
except TimeoutError:
|
|
359
|
+
self.security_logger.log_security_event(
|
|
360
|
+
event_type=SecurityEventType.OPERATION_TIMEOUT,
|
|
361
|
+
level=SecurityEventLevel.HIGH,
|
|
362
|
+
message=f"Operation timeout: {metrics.operation_type}",
|
|
363
|
+
client_id=metrics.client_id,
|
|
364
|
+
operation=metrics.operation_type,
|
|
365
|
+
additional_data={
|
|
366
|
+
"operation_id": metrics.operation_id,
|
|
367
|
+
"timeout": self.limits.timeout_seconds,
|
|
368
|
+
},
|
|
369
|
+
)
|
|
370
|
+
raise TimeoutError(
|
|
371
|
+
f"Operation timed out after {self.limits.timeout_seconds}s"
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
finally:
|
|
375
|
+
# Cancel monitoring
|
|
376
|
+
monitor_task.cancel()
|
|
377
|
+
try:
|
|
378
|
+
await monitor_task
|
|
379
|
+
except asyncio.CancelledError:
|
|
380
|
+
pass
|
|
381
|
+
|
|
382
|
+
async def _monitor_operation(self, metrics: OperationMetrics) -> None:
|
|
383
|
+
"""Monitor operation resource usage."""
|
|
384
|
+
|
|
385
|
+
import os
|
|
386
|
+
|
|
387
|
+
import psutil
|
|
388
|
+
|
|
389
|
+
try:
|
|
390
|
+
process = psutil.Process(os.getpid())
|
|
391
|
+
initial_cpu_time = process.cpu_times().user + process.cpu_times().system
|
|
392
|
+
|
|
393
|
+
while not metrics.is_completed:
|
|
394
|
+
try:
|
|
395
|
+
# Update CPU time
|
|
396
|
+
current_cpu_time = (
|
|
397
|
+
process.cpu_times().user + process.cpu_times().system
|
|
398
|
+
)
|
|
399
|
+
metrics.cpu_time = current_cpu_time - initial_cpu_time
|
|
400
|
+
|
|
401
|
+
# Update memory usage
|
|
402
|
+
metrics.memory_usage = process.memory_info().rss
|
|
403
|
+
|
|
404
|
+
# Check limits
|
|
405
|
+
if metrics.cpu_time > self.limits.max_cpu_time_seconds:
|
|
406
|
+
self.security_logger.log_security_event(
|
|
407
|
+
event_type=SecurityEventType.RESOURCE_LIMIT_EXCEEDED,
|
|
408
|
+
level=SecurityEventLevel.MEDIUM,
|
|
409
|
+
message=f"CPU time limit exceeded: {metrics.cpu_time:.2f}s",
|
|
410
|
+
client_id=metrics.client_id,
|
|
411
|
+
operation=metrics.operation_type,
|
|
412
|
+
additional_data={"operation_id": metrics.operation_id},
|
|
413
|
+
)
|
|
414
|
+
break
|
|
415
|
+
|
|
416
|
+
if metrics.duration > self.limits.max_operation_duration:
|
|
417
|
+
self.security_logger.log_security_event(
|
|
418
|
+
event_type=SecurityEventType.OPERATION_DURATION_EXCEEDED,
|
|
419
|
+
level=SecurityEventLevel.MEDIUM,
|
|
420
|
+
message=f"Operation duration limit exceeded: {metrics.duration:.2f}s",
|
|
421
|
+
client_id=metrics.client_id,
|
|
422
|
+
operation=metrics.operation_type,
|
|
423
|
+
additional_data={"operation_id": metrics.operation_id},
|
|
424
|
+
)
|
|
425
|
+
break
|
|
426
|
+
|
|
427
|
+
await asyncio.sleep(0.1) # Monitor every 100ms
|
|
428
|
+
|
|
429
|
+
except psutil.NoSuchProcess:
|
|
430
|
+
break
|
|
431
|
+
|
|
432
|
+
except Exception as e:
|
|
433
|
+
# Monitoring failure shouldn't stop the operation
|
|
434
|
+
self.security_logger.log_security_event(
|
|
435
|
+
event_type=SecurityEventType.MONITORING_ERROR,
|
|
436
|
+
level=SecurityEventLevel.MEDIUM,
|
|
437
|
+
message=f"Operation monitoring failed: {e}",
|
|
438
|
+
client_id=metrics.client_id,
|
|
439
|
+
operation=metrics.operation_type,
|
|
440
|
+
)
|
|
441
|
+
|
|
442
|
+
def _record_operation_success(self, operation_type: str) -> None:
|
|
443
|
+
"""Record successful operation for circuit breaker."""
|
|
444
|
+
|
|
445
|
+
with self._lock:
|
|
446
|
+
state = self._circuit_states[operation_type]
|
|
447
|
+
|
|
448
|
+
if state == OperationState.HALF_OPEN:
|
|
449
|
+
# Success in half-open state - close circuit breaker
|
|
450
|
+
self._circuit_states[operation_type] = OperationState.CLOSED
|
|
451
|
+
self._failure_counts[operation_type] = 0
|
|
452
|
+
|
|
453
|
+
self.security_logger.log_security_event(
|
|
454
|
+
event_type=SecurityEventType.CIRCUIT_BREAKER_CLOSED,
|
|
455
|
+
level=SecurityEventLevel.LOW,
|
|
456
|
+
message=f"Circuit breaker closed: {operation_type}",
|
|
457
|
+
operation=operation_type,
|
|
458
|
+
)
|
|
459
|
+
elif state == OperationState.CLOSED:
|
|
460
|
+
# Reset failure count on success
|
|
461
|
+
self._failure_counts[operation_type] = 0
|
|
462
|
+
|
|
463
|
+
def _record_operation_failure(self, operation_type: str) -> None:
|
|
464
|
+
"""Record failed operation for circuit breaker."""
|
|
465
|
+
|
|
466
|
+
current_time = time.time()
|
|
467
|
+
|
|
468
|
+
with self._lock:
|
|
469
|
+
self._failure_counts[operation_type] += 1
|
|
470
|
+
self._last_failure_times[operation_type] = current_time
|
|
471
|
+
|
|
472
|
+
state = self._circuit_states[operation_type]
|
|
473
|
+
failure_count = self._failure_counts[operation_type]
|
|
474
|
+
|
|
475
|
+
if failure_count >= self.limits.circuit_breaker_threshold:
|
|
476
|
+
if state != OperationState.OPEN:
|
|
477
|
+
# Trip circuit breaker
|
|
478
|
+
self._circuit_states[operation_type] = OperationState.OPEN
|
|
479
|
+
|
|
480
|
+
self.security_logger.log_security_event(
|
|
481
|
+
event_type=SecurityEventType.CIRCUIT_BREAKER_OPEN,
|
|
482
|
+
level=SecurityEventLevel.HIGH,
|
|
483
|
+
message=f"Circuit breaker opened: {operation_type}",
|
|
484
|
+
operation=operation_type,
|
|
485
|
+
additional_data={
|
|
486
|
+
"failure_count": failure_count,
|
|
487
|
+
"threshold": self.limits.circuit_breaker_threshold,
|
|
488
|
+
},
|
|
489
|
+
)
|
|
490
|
+
|
|
491
|
+
def _cleanup_operation(self, operation_id: str, metrics: OperationMetrics) -> None:
|
|
492
|
+
"""Clean up operation and update metrics."""
|
|
493
|
+
|
|
494
|
+
with self._lock:
|
|
495
|
+
# Remove from active operations
|
|
496
|
+
if operation_id in self._active_operations:
|
|
497
|
+
del self._active_operations[operation_id]
|
|
498
|
+
|
|
499
|
+
# Add to history (keep last 1000 operations)
|
|
500
|
+
self._operation_history.append(metrics)
|
|
501
|
+
if len(self._operation_history) > 1000:
|
|
502
|
+
self._operation_history.pop(0)
|
|
503
|
+
|
|
504
|
+
# Update resource usage tracking
|
|
505
|
+
self._total_memory_usage = max(
|
|
506
|
+
0, self._total_memory_usage - metrics.memory_usage
|
|
507
|
+
)
|
|
508
|
+
self._total_cpu_time += metrics.cpu_time
|
|
509
|
+
self._total_file_operations += metrics.file_operations
|
|
510
|
+
|
|
511
|
+
def get_operation_status(self) -> dict[str, t.Any]:
|
|
512
|
+
"""Get current operation status and limits."""
|
|
513
|
+
|
|
514
|
+
with self._lock:
|
|
515
|
+
current_time = time.time()
|
|
516
|
+
|
|
517
|
+
# Calculate recent operation stats
|
|
518
|
+
recent_ops = [
|
|
519
|
+
m
|
|
520
|
+
for m in self._operation_history
|
|
521
|
+
if current_time - m.start_time < 300 # Last 5 minutes
|
|
522
|
+
]
|
|
523
|
+
|
|
524
|
+
successful_ops = [m for m in recent_ops if m.success is True]
|
|
525
|
+
failed_ops = [m for m in recent_ops if m.success is False]
|
|
526
|
+
|
|
527
|
+
return {
|
|
528
|
+
"active_operations": len(self._active_operations),
|
|
529
|
+
"max_concurrent": self.limits.max_concurrent_operations,
|
|
530
|
+
"recent_operations": {
|
|
531
|
+
"total": len(recent_ops),
|
|
532
|
+
"successful": len(successful_ops),
|
|
533
|
+
"failed": len(failed_ops),
|
|
534
|
+
"success_rate": len(successful_ops) / len(recent_ops)
|
|
535
|
+
if recent_ops
|
|
536
|
+
else 1.0,
|
|
537
|
+
},
|
|
538
|
+
"circuit_breakers": {
|
|
539
|
+
op_type: {
|
|
540
|
+
"state": state.value,
|
|
541
|
+
"failure_count": self._failure_counts.get(op_type, 0),
|
|
542
|
+
"last_failure": self._last_failure_times.get(op_type),
|
|
543
|
+
}
|
|
544
|
+
for op_type, state in self._circuit_states.items()
|
|
545
|
+
},
|
|
546
|
+
"resource_usage": {
|
|
547
|
+
"memory_usage_mb": self._total_memory_usage / 1024 / 1024,
|
|
548
|
+
"memory_limit_mb": self.limits.max_memory_usage_mb,
|
|
549
|
+
"total_cpu_time": self._total_cpu_time,
|
|
550
|
+
"total_file_operations": self._total_file_operations,
|
|
551
|
+
},
|
|
552
|
+
"limits": {
|
|
553
|
+
"max_concurrent_operations": self.limits.max_concurrent_operations,
|
|
554
|
+
"max_operations_per_minute": self.limits.max_operations_per_minute,
|
|
555
|
+
"max_operation_duration": self.limits.max_operation_duration,
|
|
556
|
+
"timeout_seconds": self.limits.timeout_seconds,
|
|
557
|
+
},
|
|
558
|
+
}
|
|
559
|
+
|
|
560
|
+
def reset_circuit_breaker(self, operation_type: str) -> bool:
|
|
561
|
+
"""
|
|
562
|
+
Manually reset a circuit breaker.
|
|
563
|
+
|
|
564
|
+
Args:
|
|
565
|
+
operation_type: Type of operation
|
|
566
|
+
|
|
567
|
+
Returns:
|
|
568
|
+
True if circuit breaker was reset
|
|
569
|
+
"""
|
|
570
|
+
|
|
571
|
+
with self._lock:
|
|
572
|
+
if operation_type in self._circuit_states:
|
|
573
|
+
self._circuit_states[operation_type] = OperationState.CLOSED
|
|
574
|
+
self._failure_counts[operation_type] = 0
|
|
575
|
+
|
|
576
|
+
self.security_logger.log_security_event(
|
|
577
|
+
event_type=SecurityEventType.CIRCUIT_BREAKER_RESET,
|
|
578
|
+
level=SecurityEventLevel.LOW,
|
|
579
|
+
message=f"Circuit breaker manually reset: {operation_type}",
|
|
580
|
+
operation=operation_type,
|
|
581
|
+
)
|
|
582
|
+
|
|
583
|
+
return True
|
|
584
|
+
|
|
585
|
+
return False
|
|
586
|
+
|
|
587
|
+
|
|
588
|
+
# Global singleton instance
|
|
589
|
+
_bounded_operations: BoundedStatusOperations | None = None
|
|
590
|
+
|
|
591
|
+
|
|
592
|
+
def get_bounded_status_operations(
|
|
593
|
+
limits: OperationLimits | None = None,
|
|
594
|
+
) -> BoundedStatusOperations:
|
|
595
|
+
"""Get the global bounded status operations instance."""
|
|
596
|
+
|
|
597
|
+
global _bounded_operations
|
|
598
|
+
if _bounded_operations is None:
|
|
599
|
+
_bounded_operations = BoundedStatusOperations(limits)
|
|
600
|
+
return _bounded_operations
|
|
601
|
+
|
|
602
|
+
|
|
603
|
+
async def execute_bounded_status_operation(
|
|
604
|
+
operation_type: str,
|
|
605
|
+
client_id: str,
|
|
606
|
+
operation_func: t.Callable[..., t.Awaitable[t.Any]],
|
|
607
|
+
*args: t.Any,
|
|
608
|
+
**kwargs: t.Any,
|
|
609
|
+
) -> t.Any:
|
|
610
|
+
"""
|
|
611
|
+
Convenience function for bounded operation execution.
|
|
612
|
+
|
|
613
|
+
Args:
|
|
614
|
+
operation_type: Type of operation
|
|
615
|
+
client_id: Client identifier
|
|
616
|
+
operation_func: Async function to execute
|
|
617
|
+
*args: Positional arguments
|
|
618
|
+
**kwargs: Keyword arguments
|
|
619
|
+
|
|
620
|
+
Returns:
|
|
621
|
+
Operation result
|
|
622
|
+
"""
|
|
623
|
+
|
|
624
|
+
operations_manager = get_bounded_status_operations()
|
|
625
|
+
return await operations_manager.execute_bounded_operation(
|
|
626
|
+
operation_type, client_id, operation_func, *args, **kwargs
|
|
627
|
+
)
|
crackerjack/services/cache.py
CHANGED
|
@@ -30,12 +30,10 @@ class CacheEntry:
|
|
|
30
30
|
self.access_count += 1
|
|
31
31
|
|
|
32
32
|
def to_dict(self) -> dict[str, t.Any]:
|
|
33
|
-
"""Convert to JSON-serializable dict."""
|
|
34
33
|
return asdict(self)
|
|
35
34
|
|
|
36
35
|
@classmethod
|
|
37
36
|
def from_dict(cls, data: dict[str, t.Any]) -> "CacheEntry":
|
|
38
|
-
"""Create from dict loaded from JSON."""
|
|
39
37
|
return cls(**data)
|
|
40
38
|
|
|
41
39
|
|
|
@@ -256,26 +254,26 @@ class CrackerjackCache:
|
|
|
256
254
|
|
|
257
255
|
def get_file_hash(self, file_path: Path) -> str | None:
|
|
258
256
|
stat = file_path.stat()
|
|
259
|
-
cache_key = f"file_hash:{file_path}:{stat.st_mtime}:{stat.st_size}"
|
|
257
|
+
cache_key = f"file_hash: {file_path}: {stat.st_mtime}: {stat.st_size}"
|
|
260
258
|
return self.file_hash_cache.get(cache_key)
|
|
261
259
|
|
|
262
260
|
def set_file_hash(self, file_path: Path, file_hash: str) -> None:
|
|
263
261
|
stat = file_path.stat()
|
|
264
|
-
cache_key = f"file_hash:{file_path}:{stat.st_mtime}:{stat.st_size}"
|
|
262
|
+
cache_key = f"file_hash: {file_path}: {stat.st_mtime}: {stat.st_size}"
|
|
265
263
|
self.file_hash_cache.set(cache_key, file_hash, ttl_seconds=3600)
|
|
266
264
|
|
|
267
265
|
def get_config_data(self, config_key: str) -> t.Any | None:
|
|
268
|
-
return self.config_cache.get(f"config:{config_key}")
|
|
266
|
+
return self.config_cache.get(f"config: {config_key}")
|
|
269
267
|
|
|
270
268
|
def set_config_data(self, config_key: str, data: t.Any) -> None:
|
|
271
|
-
self.config_cache.set(f"config:{config_key}", data, ttl_seconds=7200)
|
|
269
|
+
self.config_cache.set(f"config: {config_key}", data, ttl_seconds=7200)
|
|
272
270
|
|
|
273
271
|
def invalidate_hook_cache(self, hook_name: str | None = None) -> None:
|
|
274
272
|
if hook_name:
|
|
275
273
|
keys_to_remove = [
|
|
276
274
|
key
|
|
277
275
|
for key in self.hook_results_cache._cache
|
|
278
|
-
if key.startswith(f"hook_result:{hook_name}:")
|
|
276
|
+
if key.startswith(f"hook_result: {hook_name}: ")
|
|
279
277
|
]
|
|
280
278
|
for key in keys_to_remove:
|
|
281
279
|
self.hook_results_cache.invalidate(key)
|
|
@@ -308,7 +306,7 @@ class CrackerjackCache:
|
|
|
308
306
|
|
|
309
307
|
def _get_hook_cache_key(self, hook_name: str, file_hashes: list[str]) -> str:
|
|
310
308
|
hash_signature = hashlib.md5(
|
|
311
|
-
",".join(sorted(file_hashes)).encode(),
|
|
309
|
+
", ".join(sorted(file_hashes)).encode(),
|
|
312
310
|
usedforsecurity=False,
|
|
313
311
|
).hexdigest()
|
|
314
|
-
return f"hook_result:{hook_name}:{hash_signature}"
|
|
312
|
+
return f"hook_result: {hook_name}: {hash_signature}"
|