claude-mpm 4.2.13__py3-none-any.whl → 4.2.14__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/core/constants.py +65 -0
- claude_mpm/core/error_handler.py +625 -0
- claude_mpm/core/file_utils.py +770 -0
- claude_mpm/core/logging_utils.py +502 -0
- claude_mpm/dashboard/static/dist/components/code-tree.js +1 -1
- claude_mpm/dashboard/static/dist/components/file-viewer.js +1 -1
- claude_mpm/dashboard/static/dist/dashboard.js +1 -1
- claude_mpm/dashboard/static/dist/socket-client.js +1 -1
- claude_mpm/dashboard/static/js/components/code-simple.js +44 -1
- claude_mpm/dashboard/static/js/components/code-tree/tree-breadcrumb.js +353 -0
- claude_mpm/dashboard/static/js/components/code-tree/tree-constants.js +235 -0
- claude_mpm/dashboard/static/js/components/code-tree/tree-search.js +409 -0
- claude_mpm/dashboard/static/js/components/code-tree/tree-utils.js +435 -0
- claude_mpm/dashboard/static/js/components/code-tree.js +29 -5
- claude_mpm/dashboard/static/js/components/file-viewer.js +69 -27
- claude_mpm/dashboard/static/js/components/session-manager.js +1 -1
- claude_mpm/dashboard/static/js/components/working-directory.js +18 -9
- claude_mpm/dashboard/static/js/dashboard.js +55 -9
- claude_mpm/dashboard/static/js/shared/dom-helpers.js +396 -0
- claude_mpm/dashboard/static/js/shared/event-bus.js +330 -0
- claude_mpm/dashboard/static/js/shared/logger.js +385 -0
- claude_mpm/dashboard/static/js/shared/tooltip-service.js +253 -0
- claude_mpm/dashboard/static/js/socket-client.js +18 -0
- claude_mpm/dashboard/templates/index.html +21 -8
- claude_mpm/services/monitor/handlers/__init__.py +2 -1
- claude_mpm/services/monitor/handlers/file.py +263 -0
- claude_mpm/services/monitor/server.py +81 -1
- claude_mpm/services/socketio/handlers/file.py +40 -5
- {claude_mpm-4.2.13.dist-info → claude_mpm-4.2.14.dist-info}/METADATA +1 -1
- {claude_mpm-4.2.13.dist-info → claude_mpm-4.2.14.dist-info}/RECORD +34 -22
- {claude_mpm-4.2.13.dist-info → claude_mpm-4.2.14.dist-info}/WHEEL +0 -0
- {claude_mpm-4.2.13.dist-info → claude_mpm-4.2.14.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.2.13.dist-info → claude_mpm-4.2.14.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.2.13.dist-info → claude_mpm-4.2.14.dist-info}/top_level.txt +0 -0
claude_mpm/core/constants.py
CHANGED
|
@@ -4,6 +4,7 @@ This module consolidates all magic numbers and configuration constants
|
|
|
4
4
|
that were previously scattered throughout the codebase.
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
|
+
from pathlib import Path
|
|
7
8
|
from typing import Tuple
|
|
8
9
|
|
|
9
10
|
|
|
@@ -294,3 +295,67 @@ SOCKETIO_PORT_RANGE = NetworkConfig.SOCKETIO_PORT_RANGE
|
|
|
294
295
|
QUERY_TIMEOUT = TimeoutConfig.QUERY_TIMEOUT
|
|
295
296
|
MAX_RETRIES = RetryConfig.MAX_RETRIES
|
|
296
297
|
DEFAULT_TIMEOUT = TimeoutConfig.DEFAULT_TIMEOUT
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
# ==============================================================================
|
|
301
|
+
# NEW ORGANIZED CONSTANTS (Phase 1 Refactoring)
|
|
302
|
+
# ==============================================================================
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
class NetworkPorts:
|
|
306
|
+
"""Network port configuration."""
|
|
307
|
+
|
|
308
|
+
# Use existing values from NetworkConfig
|
|
309
|
+
DEFAULT_SOCKETIO = NetworkConfig.DEFAULT_SOCKETIO_PORT
|
|
310
|
+
DEFAULT_DASHBOARD = NetworkConfig.DEFAULT_DASHBOARD_PORT
|
|
311
|
+
PORT_RANGE_START = NetworkConfig.SOCKETIO_PORT_RANGE[0]
|
|
312
|
+
PORT_RANGE_END = NetworkConfig.SOCKETIO_PORT_RANGE[1]
|
|
313
|
+
|
|
314
|
+
@classmethod
|
|
315
|
+
def get_port_range(cls) -> range:
|
|
316
|
+
"""Get the valid port range."""
|
|
317
|
+
return range(cls.PORT_RANGE_START, cls.PORT_RANGE_END + 1)
|
|
318
|
+
|
|
319
|
+
|
|
320
|
+
class ProjectPaths:
|
|
321
|
+
"""Project-specific paths and directories."""
|
|
322
|
+
|
|
323
|
+
# Claude directories
|
|
324
|
+
CLAUDE_DIR = ".claude"
|
|
325
|
+
CLAUDE_AGENTS_DIR = ".claude/agents"
|
|
326
|
+
CLAUDE_CONFIG_FILE = ".claude/config.yaml"
|
|
327
|
+
|
|
328
|
+
# MPM directories
|
|
329
|
+
MPM_DIR = ".claude-mpm"
|
|
330
|
+
MPM_SESSION_DIR = ".claude-mpm/session"
|
|
331
|
+
MPM_PROMPTS_DIR = ".claude-mpm/prompts"
|
|
332
|
+
MPM_LOGS_DIR = ".claude-mpm/logs"
|
|
333
|
+
MPM_CONFIG_DIR = ".claude-mpm/config"
|
|
334
|
+
MPM_MEMORY_DIR = ".claude-mpm/memory"
|
|
335
|
+
MPM_CACHE_DIR = ".claude-mpm/cache"
|
|
336
|
+
|
|
337
|
+
# Config files
|
|
338
|
+
MPM_CONFIG_FILE = "config.yaml"
|
|
339
|
+
AGENT_CONFIG_FILE = "agent_config.yaml"
|
|
340
|
+
EXPERIMENTAL_CONFIG = "experimental.json"
|
|
341
|
+
SOCKETIO_CONFIG = "socketio_config.yaml"
|
|
342
|
+
|
|
343
|
+
# Special files
|
|
344
|
+
EXPERIMENTAL_ACCEPTED = ".experimental_accepted"
|
|
345
|
+
VERSION_FILE = "VERSION"
|
|
346
|
+
BUILD_NUMBER_FILE = "BUILD_NUMBER"
|
|
347
|
+
|
|
348
|
+
@classmethod
|
|
349
|
+
def get_mpm_home(cls) -> Path:
|
|
350
|
+
"""Get the MPM home directory."""
|
|
351
|
+
return Path.home() / cls.MPM_DIR
|
|
352
|
+
|
|
353
|
+
@classmethod
|
|
354
|
+
def get_project_mpm_dir(cls) -> Path:
|
|
355
|
+
"""Get the project-specific MPM directory."""
|
|
356
|
+
return Path.cwd() / cls.MPM_DIR
|
|
357
|
+
|
|
358
|
+
@classmethod
|
|
359
|
+
def get_claude_dir(cls) -> Path:
|
|
360
|
+
"""Get the Claude directory."""
|
|
361
|
+
return Path.cwd() / cls.CLAUDE_DIR
|
|
@@ -0,0 +1,625 @@
|
|
|
1
|
+
"""Centralized error handling framework for Claude MPM.
|
|
2
|
+
|
|
3
|
+
This module provides standardized error handling utilities to replace
|
|
4
|
+
200+ generic try/except blocks across the codebase with context-aware,
|
|
5
|
+
properly logged, and recoverable error handling.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import functools
|
|
9
|
+
import sys
|
|
10
|
+
import traceback
|
|
11
|
+
from contextlib import contextmanager
|
|
12
|
+
from enum import Enum
|
|
13
|
+
from typing import Any, Callable, Dict, List, Optional, Type, TypeVar, Union
|
|
14
|
+
|
|
15
|
+
from claude_mpm.core.constants import RetryConfig
|
|
16
|
+
from claude_mpm.core.exceptions import MPMError
|
|
17
|
+
from claude_mpm.core.logging_utils import get_logger
|
|
18
|
+
|
|
19
|
+
logger = get_logger(__name__)
|
|
20
|
+
|
|
21
|
+
T = TypeVar("T")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# ==============================================================================
|
|
25
|
+
# ERROR HANDLING STRATEGIES
|
|
26
|
+
# ==============================================================================
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ErrorStrategy(Enum):
|
|
30
|
+
"""Error handling strategies."""
|
|
31
|
+
|
|
32
|
+
IGNORE = "ignore" # Log and continue
|
|
33
|
+
RETRY = "retry" # Retry the operation
|
|
34
|
+
FALLBACK = "fallback" # Use fallback value/function
|
|
35
|
+
ESCALATE = "escalate" # Re-raise the exception
|
|
36
|
+
RECOVER = "recover" # Attempt recovery
|
|
37
|
+
TERMINATE = "terminate" # Terminate the process
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ErrorSeverity(Enum):
|
|
41
|
+
"""Error severity levels."""
|
|
42
|
+
|
|
43
|
+
DEBUG = "debug"
|
|
44
|
+
INFO = "info"
|
|
45
|
+
WARNING = "warning"
|
|
46
|
+
ERROR = "error"
|
|
47
|
+
CRITICAL = "critical"
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
# ==============================================================================
|
|
51
|
+
# ERROR HANDLER
|
|
52
|
+
# ==============================================================================
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class ErrorHandler:
|
|
56
|
+
"""Centralized error handler with context and recovery capabilities."""
|
|
57
|
+
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
logger: Optional[Any] = None,
|
|
61
|
+
default_strategy: ErrorStrategy = ErrorStrategy.ESCALATE,
|
|
62
|
+
capture_traceback: bool = True,
|
|
63
|
+
):
|
|
64
|
+
"""Initialize error handler.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
logger: Logger instance (uses module logger if None)
|
|
68
|
+
default_strategy: Default error handling strategy
|
|
69
|
+
capture_traceback: Whether to capture full tracebacks
|
|
70
|
+
"""
|
|
71
|
+
self.logger = logger or get_logger(__name__)
|
|
72
|
+
self.default_strategy = default_strategy
|
|
73
|
+
self.capture_traceback = capture_traceback
|
|
74
|
+
self.error_history: List[Dict[str, Any]] = []
|
|
75
|
+
self.recovery_handlers: Dict[Type[Exception], Callable] = {}
|
|
76
|
+
|
|
77
|
+
def handle(
|
|
78
|
+
self,
|
|
79
|
+
error: Exception,
|
|
80
|
+
context: Optional[str] = None,
|
|
81
|
+
operation: Optional[str] = None,
|
|
82
|
+
strategy: Optional[ErrorStrategy] = None,
|
|
83
|
+
severity: ErrorSeverity = ErrorSeverity.ERROR,
|
|
84
|
+
fallback_value: Any = None,
|
|
85
|
+
max_retries: int = 0,
|
|
86
|
+
**extra_context,
|
|
87
|
+
) -> Any:
|
|
88
|
+
"""Handle an error with the specified strategy.
|
|
89
|
+
|
|
90
|
+
Args:
|
|
91
|
+
error: The exception to handle
|
|
92
|
+
context: Context description for logging
|
|
93
|
+
operation: Operation name that failed
|
|
94
|
+
strategy: Error handling strategy (uses default if None)
|
|
95
|
+
severity: Error severity level
|
|
96
|
+
fallback_value: Value to return for FALLBACK strategy
|
|
97
|
+
max_retries: Maximum retries for RETRY strategy
|
|
98
|
+
**extra_context: Additional context key-value pairs
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
Result based on strategy (fallback value, None, or re-raises)
|
|
102
|
+
"""
|
|
103
|
+
strategy = strategy or self.default_strategy
|
|
104
|
+
|
|
105
|
+
# Build error context
|
|
106
|
+
error_context = {
|
|
107
|
+
"error_type": type(error).__name__,
|
|
108
|
+
"error_message": str(error),
|
|
109
|
+
"operation": operation or "unknown",
|
|
110
|
+
"strategy": strategy.value,
|
|
111
|
+
"severity": severity.value,
|
|
112
|
+
}
|
|
113
|
+
error_context.update(extra_context)
|
|
114
|
+
|
|
115
|
+
# Add MPM error context if available
|
|
116
|
+
if isinstance(error, MPMError) and error.context:
|
|
117
|
+
error_context["mpm_context"] = error.context
|
|
118
|
+
|
|
119
|
+
# Capture traceback if enabled
|
|
120
|
+
if self.capture_traceback:
|
|
121
|
+
error_context["traceback"] = traceback.format_exc()
|
|
122
|
+
|
|
123
|
+
# Store in history
|
|
124
|
+
self.error_history.append(error_context)
|
|
125
|
+
|
|
126
|
+
# Log the error
|
|
127
|
+
self._log_error(error, context, severity, error_context)
|
|
128
|
+
|
|
129
|
+
# Apply strategy
|
|
130
|
+
if strategy == ErrorStrategy.IGNORE:
|
|
131
|
+
return None
|
|
132
|
+
|
|
133
|
+
if strategy == ErrorStrategy.RETRY:
|
|
134
|
+
return self._handle_retry(error, operation, max_retries, extra_context)
|
|
135
|
+
|
|
136
|
+
if strategy == ErrorStrategy.FALLBACK:
|
|
137
|
+
return fallback_value
|
|
138
|
+
|
|
139
|
+
if strategy == ErrorStrategy.ESCALATE:
|
|
140
|
+
raise error
|
|
141
|
+
|
|
142
|
+
if strategy == ErrorStrategy.RECOVER:
|
|
143
|
+
return self._handle_recovery(error, error_context)
|
|
144
|
+
|
|
145
|
+
if strategy == ErrorStrategy.TERMINATE:
|
|
146
|
+
self.logger.critical(f"Terminating due to critical error: {error}")
|
|
147
|
+
sys.exit(1)
|
|
148
|
+
|
|
149
|
+
def _log_error(
|
|
150
|
+
self,
|
|
151
|
+
error: Exception,
|
|
152
|
+
context: Optional[str],
|
|
153
|
+
severity: ErrorSeverity,
|
|
154
|
+
error_context: Dict[str, Any],
|
|
155
|
+
) -> None:
|
|
156
|
+
"""Log error with appropriate severity.
|
|
157
|
+
|
|
158
|
+
Args:
|
|
159
|
+
error: The exception
|
|
160
|
+
context: Context description
|
|
161
|
+
severity: Error severity
|
|
162
|
+
error_context: Full error context
|
|
163
|
+
"""
|
|
164
|
+
message = f"{context or 'Error occurred'}: {error}"
|
|
165
|
+
|
|
166
|
+
if severity == ErrorSeverity.DEBUG:
|
|
167
|
+
self.logger.debug(message, extra=error_context)
|
|
168
|
+
elif severity == ErrorSeverity.INFO:
|
|
169
|
+
self.logger.info(message, extra=error_context)
|
|
170
|
+
elif severity == ErrorSeverity.WARNING:
|
|
171
|
+
self.logger.warning(message, extra=error_context)
|
|
172
|
+
elif severity == ErrorSeverity.ERROR:
|
|
173
|
+
self.logger.error(message, extra=error_context)
|
|
174
|
+
elif severity == ErrorSeverity.CRITICAL:
|
|
175
|
+
self.logger.critical(message, extra=error_context)
|
|
176
|
+
|
|
177
|
+
def _handle_retry(
|
|
178
|
+
self,
|
|
179
|
+
error: Exception,
|
|
180
|
+
operation: Optional[str],
|
|
181
|
+
max_retries: int,
|
|
182
|
+
context: Dict[str, Any],
|
|
183
|
+
) -> None:
|
|
184
|
+
"""Handle retry strategy.
|
|
185
|
+
|
|
186
|
+
Args:
|
|
187
|
+
error: The exception
|
|
188
|
+
operation: Operation name
|
|
189
|
+
max_retries: Maximum retries
|
|
190
|
+
context: Error context
|
|
191
|
+
"""
|
|
192
|
+
# This would need to be called from a retry decorator
|
|
193
|
+
# For now, just log and escalate
|
|
194
|
+
self.logger.warning(f"Retry requested for {operation} but not in retry context")
|
|
195
|
+
raise error
|
|
196
|
+
|
|
197
|
+
def _handle_recovery(
|
|
198
|
+
self,
|
|
199
|
+
error: Exception,
|
|
200
|
+
error_context: Dict[str, Any],
|
|
201
|
+
) -> Any:
|
|
202
|
+
"""Attempt to recover from error.
|
|
203
|
+
|
|
204
|
+
Args:
|
|
205
|
+
error: The exception
|
|
206
|
+
error_context: Full error context
|
|
207
|
+
|
|
208
|
+
Returns:
|
|
209
|
+
Recovery result or re-raises
|
|
210
|
+
"""
|
|
211
|
+
error_type = type(error)
|
|
212
|
+
|
|
213
|
+
# Check for registered recovery handler
|
|
214
|
+
if error_type in self.recovery_handlers:
|
|
215
|
+
try:
|
|
216
|
+
handler = self.recovery_handlers[error_type]
|
|
217
|
+
return handler(error, error_context)
|
|
218
|
+
except Exception as recovery_error:
|
|
219
|
+
self.logger.error(f"Recovery failed: {recovery_error}")
|
|
220
|
+
raise error
|
|
221
|
+
|
|
222
|
+
# No recovery handler available
|
|
223
|
+
self.logger.warning(f"No recovery handler for {error_type.__name__}")
|
|
224
|
+
raise error
|
|
225
|
+
|
|
226
|
+
def register_recovery(
|
|
227
|
+
self,
|
|
228
|
+
error_type: Type[Exception],
|
|
229
|
+
handler: Callable[[Exception, Dict], Any],
|
|
230
|
+
) -> None:
|
|
231
|
+
"""Register a recovery handler for an error type.
|
|
232
|
+
|
|
233
|
+
Args:
|
|
234
|
+
error_type: Exception class to handle
|
|
235
|
+
handler: Recovery function
|
|
236
|
+
"""
|
|
237
|
+
self.recovery_handlers[error_type] = handler
|
|
238
|
+
|
|
239
|
+
def get_error_history(
|
|
240
|
+
self,
|
|
241
|
+
limit: Optional[int] = None,
|
|
242
|
+
operation: Optional[str] = None,
|
|
243
|
+
) -> List[Dict[str, Any]]:
|
|
244
|
+
"""Get error history.
|
|
245
|
+
|
|
246
|
+
Args:
|
|
247
|
+
limit: Maximum number of errors to return
|
|
248
|
+
operation: Filter by operation name
|
|
249
|
+
|
|
250
|
+
Returns:
|
|
251
|
+
List of error contexts
|
|
252
|
+
"""
|
|
253
|
+
history = self.error_history
|
|
254
|
+
|
|
255
|
+
if operation:
|
|
256
|
+
history = [e for e in history if e.get("operation") == operation]
|
|
257
|
+
|
|
258
|
+
if limit:
|
|
259
|
+
history = history[-limit:]
|
|
260
|
+
|
|
261
|
+
return history
|
|
262
|
+
|
|
263
|
+
def clear_history(self) -> None:
|
|
264
|
+
"""Clear error history."""
|
|
265
|
+
self.error_history.clear()
|
|
266
|
+
|
|
267
|
+
|
|
268
|
+
# ==============================================================================
|
|
269
|
+
# GLOBAL ERROR HANDLER
|
|
270
|
+
# ==============================================================================
|
|
271
|
+
|
|
272
|
+
# Global error handler instance
|
|
273
|
+
_global_handler = ErrorHandler()
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def handle_error(
|
|
277
|
+
error: Exception,
|
|
278
|
+
context: Optional[str] = None,
|
|
279
|
+
operation: Optional[str] = None,
|
|
280
|
+
strategy: Optional[ErrorStrategy] = None,
|
|
281
|
+
severity: ErrorSeverity = ErrorSeverity.ERROR,
|
|
282
|
+
fallback_value: Any = None,
|
|
283
|
+
logger: Optional[Any] = None,
|
|
284
|
+
**extra_context,
|
|
285
|
+
) -> Any:
|
|
286
|
+
"""Handle an error using the global error handler.
|
|
287
|
+
|
|
288
|
+
This is the main function to use for error handling throughout the codebase.
|
|
289
|
+
|
|
290
|
+
Example:
|
|
291
|
+
try:
|
|
292
|
+
# operation
|
|
293
|
+
except SpecificError as e:
|
|
294
|
+
handle_error(e, context="operation_name", strategy=ErrorStrategy.FALLBACK,
|
|
295
|
+
fallback_value=default_value)
|
|
296
|
+
|
|
297
|
+
Args:
|
|
298
|
+
error: The exception to handle
|
|
299
|
+
context: Context description for logging
|
|
300
|
+
operation: Operation name that failed
|
|
301
|
+
strategy: Error handling strategy
|
|
302
|
+
severity: Error severity level
|
|
303
|
+
fallback_value: Value to return for FALLBACK strategy
|
|
304
|
+
logger: Optional specific logger to use
|
|
305
|
+
**extra_context: Additional context key-value pairs
|
|
306
|
+
|
|
307
|
+
Returns:
|
|
308
|
+
Result based on strategy
|
|
309
|
+
"""
|
|
310
|
+
if logger:
|
|
311
|
+
handler = ErrorHandler(logger=logger)
|
|
312
|
+
else:
|
|
313
|
+
handler = _global_handler
|
|
314
|
+
|
|
315
|
+
return handler.handle(
|
|
316
|
+
error=error,
|
|
317
|
+
context=context,
|
|
318
|
+
operation=operation,
|
|
319
|
+
strategy=strategy,
|
|
320
|
+
severity=severity,
|
|
321
|
+
fallback_value=fallback_value,
|
|
322
|
+
**extra_context,
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
def register_recovery_handler(
|
|
327
|
+
error_type: Type[Exception],
|
|
328
|
+
handler: Callable[[Exception, Dict], Any],
|
|
329
|
+
) -> None:
|
|
330
|
+
"""Register a global recovery handler for an error type.
|
|
331
|
+
|
|
332
|
+
Args:
|
|
333
|
+
error_type: Exception class to handle
|
|
334
|
+
handler: Recovery function
|
|
335
|
+
"""
|
|
336
|
+
_global_handler.register_recovery(error_type, handler)
|
|
337
|
+
|
|
338
|
+
|
|
339
|
+
# ==============================================================================
|
|
340
|
+
# ERROR HANDLING DECORATORS
|
|
341
|
+
# ==============================================================================
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
def with_error_handling(
|
|
345
|
+
strategy: ErrorStrategy = ErrorStrategy.ESCALATE,
|
|
346
|
+
severity: ErrorSeverity = ErrorSeverity.ERROR,
|
|
347
|
+
fallback_value: Any = None,
|
|
348
|
+
max_retries: int = 0,
|
|
349
|
+
retry_delay: float = 1.0,
|
|
350
|
+
exceptions: Union[Type[Exception], tuple] = Exception,
|
|
351
|
+
):
|
|
352
|
+
"""Decorator for automatic error handling.
|
|
353
|
+
|
|
354
|
+
Args:
|
|
355
|
+
strategy: Error handling strategy
|
|
356
|
+
severity: Error severity level
|
|
357
|
+
fallback_value: Fallback value or callable
|
|
358
|
+
max_retries: Maximum retry attempts
|
|
359
|
+
retry_delay: Delay between retries (seconds)
|
|
360
|
+
exceptions: Exception types to catch
|
|
361
|
+
"""
|
|
362
|
+
|
|
363
|
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
|
364
|
+
@functools.wraps(func)
|
|
365
|
+
def wrapper(*args, **kwargs) -> T:
|
|
366
|
+
operation = func.__name__
|
|
367
|
+
retries = 0
|
|
368
|
+
|
|
369
|
+
while retries <= max_retries:
|
|
370
|
+
try:
|
|
371
|
+
return func(*args, **kwargs)
|
|
372
|
+
except exceptions as e:
|
|
373
|
+
if strategy == ErrorStrategy.RETRY and retries < max_retries:
|
|
374
|
+
retries += 1
|
|
375
|
+
logger.warning(
|
|
376
|
+
f"Retrying {operation} (attempt {retries}/{max_retries})"
|
|
377
|
+
)
|
|
378
|
+
if retry_delay > 0:
|
|
379
|
+
import time
|
|
380
|
+
|
|
381
|
+
time.sleep(retry_delay)
|
|
382
|
+
continue
|
|
383
|
+
|
|
384
|
+
# Use fallback if it's callable
|
|
385
|
+
fb_value = fallback_value
|
|
386
|
+
if callable(fallback_value):
|
|
387
|
+
try:
|
|
388
|
+
fb_value = fallback_value(*args, **kwargs)
|
|
389
|
+
except:
|
|
390
|
+
fb_value = None
|
|
391
|
+
|
|
392
|
+
return handle_error(
|
|
393
|
+
error=e,
|
|
394
|
+
operation=operation,
|
|
395
|
+
strategy=strategy,
|
|
396
|
+
severity=severity,
|
|
397
|
+
fallback_value=fb_value,
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
# Max retries exceeded
|
|
401
|
+
raise Exception(f"Max retries ({max_retries}) exceeded for {operation}")
|
|
402
|
+
|
|
403
|
+
return wrapper
|
|
404
|
+
|
|
405
|
+
return decorator
|
|
406
|
+
|
|
407
|
+
|
|
408
|
+
def safe_operation(
|
|
409
|
+
fallback_value: Any = None,
|
|
410
|
+
log_errors: bool = True,
|
|
411
|
+
exceptions: Union[Type[Exception], tuple] = Exception,
|
|
412
|
+
):
|
|
413
|
+
"""Decorator for safe operations that should never fail.
|
|
414
|
+
|
|
415
|
+
Args:
|
|
416
|
+
fallback_value: Value to return on error
|
|
417
|
+
log_errors: Whether to log errors
|
|
418
|
+
exceptions: Exception types to catch
|
|
419
|
+
"""
|
|
420
|
+
|
|
421
|
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
|
422
|
+
@functools.wraps(func)
|
|
423
|
+
def wrapper(*args, **kwargs) -> T:
|
|
424
|
+
try:
|
|
425
|
+
return func(*args, **kwargs)
|
|
426
|
+
except exceptions as e:
|
|
427
|
+
if log_errors:
|
|
428
|
+
logger.warning(f"Safe operation {func.__name__} failed: {e}")
|
|
429
|
+
|
|
430
|
+
if callable(fallback_value):
|
|
431
|
+
try:
|
|
432
|
+
return fallback_value(*args, **kwargs)
|
|
433
|
+
except:
|
|
434
|
+
return None
|
|
435
|
+
return fallback_value
|
|
436
|
+
|
|
437
|
+
return wrapper
|
|
438
|
+
|
|
439
|
+
return decorator
|
|
440
|
+
|
|
441
|
+
|
|
442
|
+
def retry_on_error(
|
|
443
|
+
max_retries: int = RetryConfig.MAX_RETRIES,
|
|
444
|
+
delay: float = RetryConfig.INITIAL_RETRY_DELAY,
|
|
445
|
+
exponential_backoff: bool = True,
|
|
446
|
+
exceptions: Union[Type[Exception], tuple] = Exception,
|
|
447
|
+
):
|
|
448
|
+
"""Decorator for automatic retry with exponential backoff.
|
|
449
|
+
|
|
450
|
+
Args:
|
|
451
|
+
max_retries: Maximum retry attempts
|
|
452
|
+
delay: Initial delay between retries (seconds)
|
|
453
|
+
exponential_backoff: Use exponential backoff
|
|
454
|
+
exceptions: Exception types to retry on
|
|
455
|
+
"""
|
|
456
|
+
|
|
457
|
+
def decorator(func: Callable[..., T]) -> Callable[..., T]:
|
|
458
|
+
@functools.wraps(func)
|
|
459
|
+
def wrapper(*args, **kwargs) -> T:
|
|
460
|
+
current_delay = delay
|
|
461
|
+
last_exception = None
|
|
462
|
+
|
|
463
|
+
for attempt in range(max_retries + 1):
|
|
464
|
+
try:
|
|
465
|
+
return func(*args, **kwargs)
|
|
466
|
+
except exceptions as e:
|
|
467
|
+
last_exception = e
|
|
468
|
+
|
|
469
|
+
if attempt < max_retries:
|
|
470
|
+
logger.debug(
|
|
471
|
+
f"Retry {attempt + 1}/{max_retries} for {func.__name__} "
|
|
472
|
+
f"after {current_delay:.2f}s"
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
import time
|
|
476
|
+
|
|
477
|
+
time.sleep(current_delay)
|
|
478
|
+
|
|
479
|
+
if exponential_backoff:
|
|
480
|
+
current_delay = min(
|
|
481
|
+
current_delay * RetryConfig.EXPONENTIAL_BASE,
|
|
482
|
+
RetryConfig.MAX_RETRY_DELAY,
|
|
483
|
+
)
|
|
484
|
+
|
|
485
|
+
# All retries exhausted
|
|
486
|
+
logger.error(f"All {max_retries} retries failed for {func.__name__}")
|
|
487
|
+
raise last_exception
|
|
488
|
+
|
|
489
|
+
return wrapper
|
|
490
|
+
|
|
491
|
+
return decorator
|
|
492
|
+
|
|
493
|
+
|
|
494
|
+
# ==============================================================================
|
|
495
|
+
# CONTEXT MANAGERS
|
|
496
|
+
# ==============================================================================
|
|
497
|
+
|
|
498
|
+
|
|
499
|
+
@contextmanager
|
|
500
|
+
def error_context(
|
|
501
|
+
operation: str,
|
|
502
|
+
strategy: ErrorStrategy = ErrorStrategy.ESCALATE,
|
|
503
|
+
severity: ErrorSeverity = ErrorSeverity.ERROR,
|
|
504
|
+
fallback_value: Any = None,
|
|
505
|
+
**context,
|
|
506
|
+
):
|
|
507
|
+
"""Context manager for error handling.
|
|
508
|
+
|
|
509
|
+
Example:
|
|
510
|
+
with error_context("file_operation", strategy=ErrorStrategy.FALLBACK,
|
|
511
|
+
fallback_value=None, file=filename):
|
|
512
|
+
# operations that might fail
|
|
513
|
+
pass
|
|
514
|
+
|
|
515
|
+
Args:
|
|
516
|
+
operation: Operation name
|
|
517
|
+
strategy: Error handling strategy
|
|
518
|
+
severity: Error severity
|
|
519
|
+
fallback_value: Fallback value
|
|
520
|
+
**context: Additional context
|
|
521
|
+
"""
|
|
522
|
+
try:
|
|
523
|
+
yield
|
|
524
|
+
except Exception as e:
|
|
525
|
+
handle_error(
|
|
526
|
+
error=e,
|
|
527
|
+
operation=operation,
|
|
528
|
+
strategy=strategy,
|
|
529
|
+
severity=severity,
|
|
530
|
+
fallback_value=fallback_value,
|
|
531
|
+
**context,
|
|
532
|
+
)
|
|
533
|
+
if strategy == ErrorStrategy.FALLBACK:
|
|
534
|
+
return fallback_value
|
|
535
|
+
elif strategy != ErrorStrategy.ESCALATE:
|
|
536
|
+
return
|
|
537
|
+
|
|
538
|
+
|
|
539
|
+
@contextmanager
|
|
540
|
+
def suppress_errors(
|
|
541
|
+
*exceptions: Type[Exception],
|
|
542
|
+
log: bool = True,
|
|
543
|
+
fallback: Any = None,
|
|
544
|
+
):
|
|
545
|
+
"""Context manager to suppress specific errors.
|
|
546
|
+
|
|
547
|
+
Example:
|
|
548
|
+
with suppress_errors(FileNotFoundError, log=True):
|
|
549
|
+
# operations that might fail
|
|
550
|
+
pass
|
|
551
|
+
|
|
552
|
+
Args:
|
|
553
|
+
*exceptions: Exception types to suppress
|
|
554
|
+
log: Whether to log suppressed errors
|
|
555
|
+
fallback: Fallback value to yield on error
|
|
556
|
+
"""
|
|
557
|
+
try:
|
|
558
|
+
yield fallback
|
|
559
|
+
except exceptions as e:
|
|
560
|
+
if log:
|
|
561
|
+
logger.debug(f"Suppressed error: {e}")
|
|
562
|
+
yield fallback
|
|
563
|
+
|
|
564
|
+
|
|
565
|
+
# ==============================================================================
|
|
566
|
+
# ERROR AGGREGATION
|
|
567
|
+
# ==============================================================================
|
|
568
|
+
|
|
569
|
+
|
|
570
|
+
class ErrorCollector:
|
|
571
|
+
"""Collect errors without immediately failing."""
|
|
572
|
+
|
|
573
|
+
def __init__(self, fail_fast: bool = False):
|
|
574
|
+
"""Initialize error collector.
|
|
575
|
+
|
|
576
|
+
Args:
|
|
577
|
+
fail_fast: Raise first error immediately
|
|
578
|
+
"""
|
|
579
|
+
self.errors: List[Exception] = []
|
|
580
|
+
self.fail_fast = fail_fast
|
|
581
|
+
|
|
582
|
+
def collect(self, error: Exception) -> None:
|
|
583
|
+
"""Collect an error.
|
|
584
|
+
|
|
585
|
+
Args:
|
|
586
|
+
error: Exception to collect
|
|
587
|
+
"""
|
|
588
|
+
self.errors.append(error)
|
|
589
|
+
if self.fail_fast:
|
|
590
|
+
raise error
|
|
591
|
+
|
|
592
|
+
def has_errors(self) -> bool:
|
|
593
|
+
"""Check if any errors were collected."""
|
|
594
|
+
return len(self.errors) > 0
|
|
595
|
+
|
|
596
|
+
def raise_if_errors(self, message: str = "Multiple errors occurred") -> None:
|
|
597
|
+
"""Raise an exception if any errors were collected."""
|
|
598
|
+
if self.errors:
|
|
599
|
+
if len(self.errors) == 1:
|
|
600
|
+
raise self.errors[0]
|
|
601
|
+
error_messages = [str(e) for e in self.errors]
|
|
602
|
+
raise Exception(f"{message}: {'; '.join(error_messages)}")
|
|
603
|
+
|
|
604
|
+
def clear(self) -> None:
|
|
605
|
+
"""Clear collected errors."""
|
|
606
|
+
self.errors.clear()
|
|
607
|
+
|
|
608
|
+
@contextmanager
|
|
609
|
+
def collecting(self):
|
|
610
|
+
"""Context manager for error collection.
|
|
611
|
+
|
|
612
|
+
Example:
|
|
613
|
+
collector = ErrorCollector()
|
|
614
|
+
with collector.collecting():
|
|
615
|
+
for item in items:
|
|
616
|
+
try:
|
|
617
|
+
process(item)
|
|
618
|
+
except Exception as e:
|
|
619
|
+
collector.collect(e)
|
|
620
|
+
collector.raise_if_errors()
|
|
621
|
+
"""
|
|
622
|
+
try:
|
|
623
|
+
yield self
|
|
624
|
+
finally:
|
|
625
|
+
pass # Errors are handled by caller
|