claude-mpm 4.4.3__py3-none-any.whl → 4.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- claude_mpm/VERSION +1 -1
- claude_mpm/agents/agent_loader.py +3 -2
- claude_mpm/agents/agent_loader_integration.py +2 -1
- claude_mpm/agents/async_agent_loader.py +2 -2
- claude_mpm/agents/base_agent_loader.py +2 -2
- claude_mpm/agents/frontmatter_validator.py +1 -0
- claude_mpm/agents/system_agent_config.py +2 -1
- claude_mpm/cli/commands/doctor.py +44 -5
- claude_mpm/cli/commands/mpm_init.py +116 -62
- claude_mpm/cli/parsers/configure_parser.py +3 -1
- claude_mpm/cli/startup_logging.py +1 -3
- claude_mpm/config/agent_config.py +1 -1
- claude_mpm/config/paths.py +2 -1
- claude_mpm/core/agent_name_normalizer.py +1 -0
- claude_mpm/core/config.py +2 -1
- claude_mpm/core/config_aliases.py +2 -1
- claude_mpm/core/file_utils.py +0 -1
- claude_mpm/core/framework/__init__.py +6 -6
- claude_mpm/core/framework/formatters/__init__.py +2 -2
- claude_mpm/core/framework/formatters/capability_generator.py +19 -8
- claude_mpm/core/framework/formatters/content_formatter.py +8 -3
- claude_mpm/core/framework/formatters/context_generator.py +7 -3
- claude_mpm/core/framework/loaders/__init__.py +3 -3
- claude_mpm/core/framework/loaders/agent_loader.py +7 -3
- claude_mpm/core/framework/loaders/file_loader.py +16 -6
- claude_mpm/core/framework/loaders/instruction_loader.py +16 -6
- claude_mpm/core/framework/loaders/packaged_loader.py +36 -12
- claude_mpm/core/framework/processors/__init__.py +2 -2
- claude_mpm/core/framework/processors/memory_processor.py +14 -6
- claude_mpm/core/framework/processors/metadata_processor.py +5 -5
- claude_mpm/core/framework/processors/template_processor.py +12 -6
- claude_mpm/core/framework_loader.py +44 -20
- claude_mpm/core/log_manager.py +2 -1
- claude_mpm/core/tool_access_control.py +1 -0
- claude_mpm/core/unified_agent_registry.py +2 -1
- claude_mpm/core/unified_paths.py +1 -0
- claude_mpm/experimental/cli_enhancements.py +1 -0
- claude_mpm/hooks/base_hook.py +1 -0
- claude_mpm/hooks/instruction_reinforcement.py +1 -0
- claude_mpm/hooks/kuzu_memory_hook.py +20 -13
- claude_mpm/hooks/validation_hooks.py +1 -1
- claude_mpm/scripts/mpm_doctor.py +1 -0
- claude_mpm/services/agents/loading/agent_profile_loader.py +1 -1
- claude_mpm/services/agents/loading/base_agent_manager.py +1 -1
- claude_mpm/services/agents/loading/framework_agent_loader.py +1 -1
- claude_mpm/services/agents/management/agent_capabilities_generator.py +1 -0
- claude_mpm/services/agents/management/agent_management_service.py +1 -1
- claude_mpm/services/agents/memory/memory_categorization_service.py +0 -1
- claude_mpm/services/agents/memory/memory_file_service.py +6 -2
- claude_mpm/services/agents/memory/memory_format_service.py +0 -1
- claude_mpm/services/agents/registry/deployed_agent_discovery.py +1 -1
- claude_mpm/services/async_session_logger.py +1 -1
- claude_mpm/services/claude_session_logger.py +1 -0
- claude_mpm/services/core/path_resolver.py +1 -0
- claude_mpm/services/diagnostics/checks/__init__.py +2 -0
- claude_mpm/services/diagnostics/checks/installation_check.py +126 -25
- claude_mpm/services/diagnostics/checks/mcp_services_check.py +399 -0
- claude_mpm/services/diagnostics/diagnostic_runner.py +3 -0
- claude_mpm/services/diagnostics/doctor_reporter.py +259 -32
- claude_mpm/services/event_bus/direct_relay.py +2 -1
- claude_mpm/services/event_bus/event_bus.py +1 -0
- claude_mpm/services/event_bus/relay.py +3 -2
- claude_mpm/services/framework_claude_md_generator/content_assembler.py +1 -1
- claude_mpm/services/infrastructure/daemon_manager.py +1 -1
- claude_mpm/services/mcp_config_manager.py +10 -10
- claude_mpm/services/mcp_gateway/core/process_pool.py +62 -23
- claude_mpm/services/mcp_gateway/tools/__init__.py +6 -5
- claude_mpm/services/mcp_gateway/tools/external_mcp_services.py +3 -1
- claude_mpm/services/mcp_gateway/tools/kuzu_memory_service.py +16 -31
- claude_mpm/services/memory/cache/simple_cache.py +1 -1
- claude_mpm/services/project/archive_manager.py +159 -96
- claude_mpm/services/project/documentation_manager.py +64 -45
- claude_mpm/services/project/enhanced_analyzer.py +132 -89
- claude_mpm/services/project/project_organizer.py +225 -131
- claude_mpm/services/response_tracker.py +1 -1
- claude_mpm/services/socketio/server/eventbus_integration.py +1 -1
- claude_mpm/services/unified/__init__.py +1 -1
- claude_mpm/services/unified/analyzer_strategies/__init__.py +3 -3
- claude_mpm/services/unified/analyzer_strategies/code_analyzer.py +97 -53
- claude_mpm/services/unified/analyzer_strategies/dependency_analyzer.py +81 -40
- claude_mpm/services/unified/analyzer_strategies/performance_analyzer.py +277 -178
- claude_mpm/services/unified/analyzer_strategies/security_analyzer.py +196 -112
- claude_mpm/services/unified/analyzer_strategies/structure_analyzer.py +83 -49
- claude_mpm/services/unified/config_strategies/__init__.py +111 -126
- claude_mpm/services/unified/config_strategies/config_schema.py +157 -111
- claude_mpm/services/unified/config_strategies/context_strategy.py +91 -89
- claude_mpm/services/unified/config_strategies/error_handling_strategy.py +183 -173
- claude_mpm/services/unified/config_strategies/file_loader_strategy.py +160 -152
- claude_mpm/services/unified/config_strategies/unified_config_service.py +124 -112
- claude_mpm/services/unified/config_strategies/validation_strategy.py +298 -259
- claude_mpm/services/unified/deployment_strategies/__init__.py +7 -7
- claude_mpm/services/unified/deployment_strategies/base.py +24 -28
- claude_mpm/services/unified/deployment_strategies/cloud_strategies.py +168 -88
- claude_mpm/services/unified/deployment_strategies/local.py +49 -34
- claude_mpm/services/unified/deployment_strategies/utils.py +39 -43
- claude_mpm/services/unified/deployment_strategies/vercel.py +30 -24
- claude_mpm/services/unified/interfaces.py +0 -26
- claude_mpm/services/unified/migration.py +17 -40
- claude_mpm/services/unified/strategies.py +9 -26
- claude_mpm/services/unified/unified_analyzer.py +48 -44
- claude_mpm/services/unified/unified_config.py +21 -19
- claude_mpm/services/unified/unified_deployment.py +21 -26
- claude_mpm/storage/state_storage.py +1 -0
- claude_mpm/utils/agent_dependency_loader.py +18 -6
- claude_mpm/utils/common.py +14 -12
- claude_mpm/utils/database_connector.py +15 -12
- claude_mpm/utils/error_handler.py +1 -0
- claude_mpm/utils/log_cleanup.py +1 -0
- claude_mpm/utils/path_operations.py +1 -0
- claude_mpm/utils/session_logging.py +1 -1
- claude_mpm/utils/subprocess_utils.py +1 -0
- claude_mpm/validation/agent_validator.py +1 -1
- {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.4.dist-info}/METADATA +9 -3
- {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.4.dist-info}/RECORD +118 -117
- {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.4.dist-info}/WHEEL +0 -0
- {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.4.dist-info}/entry_points.txt +0 -0
- {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.4.dist-info}/licenses/LICENSE +0 -0
- {claude_mpm-4.4.3.dist-info → claude_mpm-4.4.4.dist-info}/top_level.txt +0 -0
@@ -3,31 +3,33 @@ Error Handling Strategy - Unifies 99 error handling patterns into composable han
|
|
3
3
|
Part of Phase 3 Configuration Consolidation
|
4
4
|
"""
|
5
5
|
|
6
|
+
import json
|
7
|
+
import traceback
|
6
8
|
from abc import ABC, abstractmethod
|
7
|
-
from typing import Any, Dict, List, Optional, Union, Callable, Type
|
8
9
|
from dataclasses import dataclass, field
|
9
|
-
from enum import Enum
|
10
|
-
import traceback
|
11
|
-
import sys
|
12
10
|
from datetime import datetime
|
11
|
+
from enum import Enum
|
13
12
|
from pathlib import Path
|
14
|
-
import
|
13
|
+
from typing import Any, Callable, Dict, List, Optional, Type, Union
|
15
14
|
|
16
15
|
from claude_mpm.core.logging_utils import get_logger
|
16
|
+
|
17
17
|
from .unified_config_service import IConfigStrategy
|
18
18
|
|
19
19
|
|
20
20
|
class ErrorSeverity(Enum):
|
21
21
|
"""Error severity levels"""
|
22
|
+
|
22
23
|
CRITICAL = "critical" # System failure
|
23
|
-
ERROR = "error"
|
24
|
-
WARNING = "warning"
|
25
|
-
INFO = "info"
|
26
|
-
DEBUG = "debug"
|
24
|
+
ERROR = "error" # Operation failure
|
25
|
+
WARNING = "warning" # Recoverable issue
|
26
|
+
INFO = "info" # Informational
|
27
|
+
DEBUG = "debug" # Debug information
|
27
28
|
|
28
29
|
|
29
30
|
class ErrorCategory(Enum):
|
30
31
|
"""Categories of errors for handling strategy"""
|
32
|
+
|
31
33
|
FILE_IO = "file_io"
|
32
34
|
PARSING = "parsing"
|
33
35
|
VALIDATION = "validation"
|
@@ -43,6 +45,7 @@ class ErrorCategory(Enum):
|
|
43
45
|
@dataclass
|
44
46
|
class ErrorContext:
|
45
47
|
"""Context information for error handling"""
|
48
|
+
|
46
49
|
error: Exception
|
47
50
|
category: ErrorCategory
|
48
51
|
severity: ErrorSeverity
|
@@ -58,6 +61,7 @@ class ErrorContext:
|
|
58
61
|
@dataclass
|
59
62
|
class ErrorHandlingResult:
|
60
63
|
"""Result of error handling operation"""
|
64
|
+
|
61
65
|
handled: bool
|
62
66
|
recovered: bool = False
|
63
67
|
fallback_value: Any = None
|
@@ -77,12 +81,10 @@ class BaseErrorHandler(ABC):
|
|
77
81
|
@abstractmethod
|
78
82
|
def can_handle(self, context: ErrorContext) -> bool:
|
79
83
|
"""Check if this handler can handle the error"""
|
80
|
-
pass
|
81
84
|
|
82
85
|
@abstractmethod
|
83
86
|
def handle(self, context: ErrorContext) -> ErrorHandlingResult:
|
84
87
|
"""Handle the error"""
|
85
|
-
pass
|
86
88
|
|
87
89
|
def log_error(self, context: ErrorContext, message: str = None):
|
88
90
|
"""Log error with appropriate level"""
|
@@ -109,14 +111,13 @@ class FileIOErrorHandler(BaseErrorHandler):
|
|
109
111
|
IsADirectoryError: "Path is a directory",
|
110
112
|
NotADirectoryError: "Path is not a directory",
|
111
113
|
IOError: "I/O operation failed",
|
112
|
-
OSError: "Operating system error"
|
114
|
+
OSError: "Operating system error",
|
113
115
|
}
|
114
116
|
|
115
117
|
def can_handle(self, context: ErrorContext) -> bool:
|
116
118
|
"""Check if error is file I/O related"""
|
117
|
-
return (
|
118
|
-
context.
|
119
|
-
isinstance(context.error, (FileNotFoundError, PermissionError, IOError, OSError))
|
119
|
+
return context.category == ErrorCategory.FILE_IO or isinstance(
|
120
|
+
context.error, (FileNotFoundError, PermissionError, IOError, OSError)
|
120
121
|
)
|
121
122
|
|
122
123
|
def handle(self, context: ErrorContext) -> ErrorHandlingResult:
|
@@ -145,8 +146,8 @@ class FileIOErrorHandler(BaseErrorHandler):
|
|
145
146
|
result = ErrorHandlingResult(handled=True)
|
146
147
|
|
147
148
|
# Check for fallback locations
|
148
|
-
if context.metadata.get(
|
149
|
-
for fallback in context.metadata[
|
149
|
+
if context.metadata.get("fallback_paths"):
|
150
|
+
for fallback in context.metadata["fallback_paths"]:
|
150
151
|
fallback_path = Path(fallback)
|
151
152
|
if fallback_path.exists():
|
152
153
|
result.recovered = True
|
@@ -156,22 +157,22 @@ class FileIOErrorHandler(BaseErrorHandler):
|
|
156
157
|
return result
|
157
158
|
|
158
159
|
# Check for default values
|
159
|
-
if context.metadata.get(
|
160
|
+
if context.metadata.get("default_config"):
|
160
161
|
result.recovered = True
|
161
|
-
result.fallback_value = context.metadata[
|
162
|
+
result.fallback_value = context.metadata["default_config"]
|
162
163
|
result.actions_taken.append("Used default configuration")
|
163
164
|
return result
|
164
165
|
|
165
166
|
# Create file if requested
|
166
|
-
if context.metadata.get(
|
167
|
+
if context.metadata.get("create_if_missing"):
|
167
168
|
path = Path(context.source)
|
168
169
|
try:
|
169
170
|
path.parent.mkdir(parents=True, exist_ok=True)
|
170
171
|
|
171
172
|
# Create with default content
|
172
|
-
default_content = context.metadata.get(
|
173
|
+
default_content = context.metadata.get("default_content", {})
|
173
174
|
|
174
|
-
if path.suffix ==
|
175
|
+
if path.suffix == ".json":
|
175
176
|
path.write_text(json.dumps(default_content, indent=2))
|
176
177
|
else:
|
177
178
|
path.write_text(str(default_content))
|
@@ -192,12 +193,12 @@ class FileIOErrorHandler(BaseErrorHandler):
|
|
192
193
|
result = ErrorHandlingResult(handled=True)
|
193
194
|
|
194
195
|
# Try alternative location
|
195
|
-
if context.metadata.get(
|
196
|
-
alt_path = Path(context.metadata[
|
196
|
+
if context.metadata.get("alt_location"):
|
197
|
+
alt_path = Path(context.metadata["alt_location"])
|
197
198
|
try:
|
198
199
|
# Test write permission
|
199
200
|
alt_path.parent.mkdir(parents=True, exist_ok=True)
|
200
|
-
test_file = alt_path.parent /
|
201
|
+
test_file = alt_path.parent / ".test_write"
|
201
202
|
test_file.touch()
|
202
203
|
test_file.unlink()
|
203
204
|
|
@@ -209,9 +210,9 @@ class FileIOErrorHandler(BaseErrorHandler):
|
|
209
210
|
result.should_escalate = True
|
210
211
|
|
211
212
|
# Use read-only mode if applicable
|
212
|
-
elif context.metadata.get(
|
213
|
+
elif context.metadata.get("allow_readonly"):
|
213
214
|
result.recovered = True
|
214
|
-
result.fallback_value = {
|
215
|
+
result.fallback_value = {"readonly": True}
|
215
216
|
result.actions_taken.append("Switched to read-only mode")
|
216
217
|
|
217
218
|
return result
|
@@ -221,13 +222,15 @@ class FileIOErrorHandler(BaseErrorHandler):
|
|
221
222
|
result = ErrorHandlingResult(handled=True)
|
222
223
|
|
223
224
|
# Retry with exponential backoff
|
224
|
-
retry_count = context.metadata.get(
|
225
|
-
max_retries = context.metadata.get(
|
225
|
+
retry_count = context.metadata.get("retry_count", 0)
|
226
|
+
max_retries = context.metadata.get("max_retries", 3)
|
226
227
|
|
227
228
|
if retry_count < max_retries:
|
228
229
|
result.should_retry = True
|
229
|
-
result.retry_after = 2
|
230
|
-
result.actions_taken.append(
|
230
|
+
result.retry_after = 2**retry_count # Exponential backoff
|
231
|
+
result.actions_taken.append(
|
232
|
+
f"Retry {retry_count + 1}/{max_retries} after {result.retry_after}s"
|
233
|
+
)
|
231
234
|
else:
|
232
235
|
result.should_escalate = True
|
233
236
|
result.message = f"Failed after {max_retries} retries"
|
@@ -241,16 +244,16 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
241
244
|
PARSER_ERRORS = {
|
242
245
|
json.JSONDecodeError: ErrorCategory.PARSING,
|
243
246
|
ValueError: ErrorCategory.PARSING, # Common for parsing
|
244
|
-
SyntaxError: ErrorCategory.PARSING
|
247
|
+
SyntaxError: ErrorCategory.PARSING,
|
245
248
|
}
|
246
249
|
|
247
250
|
def can_handle(self, context: ErrorContext) -> bool:
|
248
251
|
"""Check if error is parsing related"""
|
249
252
|
return (
|
250
|
-
context.category == ErrorCategory.PARSING
|
251
|
-
type(context.error) in self.PARSER_ERRORS
|
252
|
-
|
253
|
-
|
253
|
+
context.category == ErrorCategory.PARSING
|
254
|
+
or type(context.error) in self.PARSER_ERRORS
|
255
|
+
or "parse" in str(context.error).lower()
|
256
|
+
or "decode" in str(context.error).lower()
|
254
257
|
)
|
255
258
|
|
256
259
|
def handle(self, context: ErrorContext) -> ErrorHandlingResult:
|
@@ -260,7 +263,7 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
260
263
|
# Try recovery strategies based on error type
|
261
264
|
if isinstance(context.error, json.JSONDecodeError):
|
262
265
|
result = self._handle_json_error(context)
|
263
|
-
elif
|
266
|
+
elif "yaml" in str(context.error).lower():
|
264
267
|
result = self._handle_yaml_error(context)
|
265
268
|
else:
|
266
269
|
result = self._handle_generic_parse_error(context)
|
@@ -271,14 +274,14 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
271
274
|
"""Handle JSON parsing errors"""
|
272
275
|
result = ErrorHandlingResult(handled=True)
|
273
276
|
|
274
|
-
content = context.metadata.get(
|
277
|
+
content = context.metadata.get("content", "")
|
275
278
|
|
276
279
|
# Try to fix common JSON issues
|
277
280
|
fixes = [
|
278
281
|
self._fix_json_comments,
|
279
282
|
self._fix_json_quotes,
|
280
283
|
self._fix_json_trailing_commas,
|
281
|
-
self._fix_json_unquoted_keys
|
284
|
+
self._fix_json_unquoted_keys,
|
282
285
|
]
|
283
286
|
|
284
287
|
for fix_func in fixes:
|
@@ -294,7 +297,7 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
294
297
|
continue
|
295
298
|
|
296
299
|
# Use lenient parser if available
|
297
|
-
if context.metadata.get(
|
300
|
+
if context.metadata.get("allow_lenient"):
|
298
301
|
result = self._parse_lenient_json(content, result)
|
299
302
|
|
300
303
|
return result
|
@@ -302,15 +305,17 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
302
305
|
def _fix_json_comments(self, content: str) -> str:
|
303
306
|
"""Remove comments from JSON"""
|
304
307
|
import re
|
308
|
+
|
305
309
|
# Remove single-line comments
|
306
|
-
content = re.sub(r
|
310
|
+
content = re.sub(r"//.*?$", "", content, flags=re.MULTILINE)
|
307
311
|
# Remove multi-line comments
|
308
|
-
content = re.sub(r
|
312
|
+
content = re.sub(r"/\*.*?\*/", "", content, flags=re.DOTALL)
|
309
313
|
return content
|
310
314
|
|
311
315
|
def _fix_json_quotes(self, content: str) -> str:
|
312
316
|
"""Fix quote issues in JSON"""
|
313
317
|
import re
|
318
|
+
|
314
319
|
# Replace single quotes with double quotes (careful with values)
|
315
320
|
# This is a simple approach - more sophisticated parsing might be needed
|
316
321
|
content = re.sub(r"'([^']*)':", r'"\1":', content) # Keys
|
@@ -320,22 +325,27 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
320
325
|
def _fix_json_trailing_commas(self, content: str) -> str:
|
321
326
|
"""Remove trailing commas"""
|
322
327
|
import re
|
323
|
-
|
324
|
-
content = re.sub(r
|
328
|
+
|
329
|
+
content = re.sub(r",\s*}", "}", content)
|
330
|
+
content = re.sub(r",\s*]", "]", content)
|
325
331
|
return content
|
326
332
|
|
327
333
|
def _fix_json_unquoted_keys(self, content: str) -> str:
|
328
334
|
"""Add quotes to unquoted keys"""
|
329
335
|
import re
|
336
|
+
|
330
337
|
# Match unquoted keys (word characters followed by colon)
|
331
|
-
content = re.sub(r
|
338
|
+
content = re.sub(r"(\w+):", r'"\1":', content)
|
332
339
|
return content
|
333
340
|
|
334
|
-
def _parse_lenient_json(
|
341
|
+
def _parse_lenient_json(
|
342
|
+
self, content: str, result: ErrorHandlingResult
|
343
|
+
) -> ErrorHandlingResult:
|
335
344
|
"""Parse JSON leniently"""
|
336
345
|
try:
|
337
346
|
# Try using ast.literal_eval for Python literals
|
338
347
|
import ast
|
348
|
+
|
339
349
|
parsed = ast.literal_eval(content)
|
340
350
|
result.recovered = True
|
341
351
|
result.fallback_value = parsed
|
@@ -352,7 +362,7 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
352
362
|
"""Handle YAML parsing errors"""
|
353
363
|
result = ErrorHandlingResult(handled=True)
|
354
364
|
|
355
|
-
content = context.metadata.get(
|
365
|
+
content = context.metadata.get("content", "")
|
356
366
|
|
357
367
|
# Try to fix common YAML issues
|
358
368
|
try:
|
@@ -366,7 +376,7 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
366
376
|
|
367
377
|
except:
|
368
378
|
# Try to fix tabs
|
369
|
-
content = content.replace(
|
379
|
+
content = content.replace("\t", " ")
|
370
380
|
try:
|
371
381
|
parsed = yaml.safe_load(content)
|
372
382
|
result.recovered = True
|
@@ -383,13 +393,13 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
383
393
|
result = ErrorHandlingResult(handled=True)
|
384
394
|
|
385
395
|
# Try alternative formats
|
386
|
-
content = context.metadata.get(
|
396
|
+
content = context.metadata.get("content", "")
|
387
397
|
|
388
398
|
formats = [
|
389
|
-
(
|
390
|
-
(
|
391
|
-
(
|
392
|
-
(
|
399
|
+
("json", json.loads),
|
400
|
+
("yaml", self._try_yaml),
|
401
|
+
("ini", self._try_ini),
|
402
|
+
("properties", self._try_properties),
|
393
403
|
]
|
394
404
|
|
395
405
|
for format_name, parser in formats:
|
@@ -405,7 +415,7 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
405
415
|
|
406
416
|
# Use default/empty config
|
407
417
|
result.recovered = True
|
408
|
-
result.fallback_value = context.metadata.get(
|
418
|
+
result.fallback_value = context.metadata.get("default_config", {})
|
409
419
|
result.actions_taken.append("Used default configuration")
|
410
420
|
|
411
421
|
return result
|
@@ -413,11 +423,13 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
413
423
|
def _try_yaml(self, content: str) -> Dict:
|
414
424
|
"""Try parsing as YAML"""
|
415
425
|
import yaml
|
426
|
+
|
416
427
|
return yaml.safe_load(content)
|
417
428
|
|
418
429
|
def _try_ini(self, content: str) -> Dict:
|
419
430
|
"""Try parsing as INI"""
|
420
431
|
import configparser
|
432
|
+
|
421
433
|
parser = configparser.ConfigParser()
|
422
434
|
parser.read_string(content)
|
423
435
|
return {s: dict(parser.items(s)) for s in parser.sections()}
|
@@ -427,8 +439,8 @@ class ParsingErrorHandler(BaseErrorHandler):
|
|
427
439
|
result = {}
|
428
440
|
for line in content.splitlines():
|
429
441
|
line = line.strip()
|
430
|
-
if line and not line.startswith(
|
431
|
-
key, value = line.split(
|
442
|
+
if line and not line.startswith("#") and "=" in line:
|
443
|
+
key, value = line.split("=", 1)
|
432
444
|
result[key.strip()] = value.strip()
|
433
445
|
return result
|
434
446
|
|
@@ -439,10 +451,10 @@ class ValidationErrorHandler(BaseErrorHandler):
|
|
439
451
|
def can_handle(self, context: ErrorContext) -> bool:
|
440
452
|
"""Check if error is validation related"""
|
441
453
|
return (
|
442
|
-
context.category == ErrorCategory.VALIDATION
|
443
|
-
|
444
|
-
|
445
|
-
|
454
|
+
context.category == ErrorCategory.VALIDATION
|
455
|
+
or "validation" in str(context.error).lower()
|
456
|
+
or "invalid" in str(context.error).lower()
|
457
|
+
or "constraint" in str(context.error).lower()
|
446
458
|
)
|
447
459
|
|
448
460
|
def handle(self, context: ErrorContext) -> ErrorHandlingResult:
|
@@ -450,9 +462,9 @@ class ValidationErrorHandler(BaseErrorHandler):
|
|
450
462
|
result = ErrorHandlingResult(handled=True)
|
451
463
|
|
452
464
|
# Get validation details
|
453
|
-
field = context.metadata.get(
|
454
|
-
value = context.metadata.get(
|
455
|
-
schema = context.metadata.get(
|
465
|
+
field = context.metadata.get("field")
|
466
|
+
value = context.metadata.get("value")
|
467
|
+
schema = context.metadata.get("schema")
|
456
468
|
|
457
469
|
# Try to fix or provide default
|
458
470
|
if field and schema:
|
@@ -463,18 +475,14 @@ class ValidationErrorHandler(BaseErrorHandler):
|
|
463
475
|
return result
|
464
476
|
|
465
477
|
def _fix_validation_error(
|
466
|
-
self,
|
467
|
-
field: str,
|
468
|
-
value: Any,
|
469
|
-
schema: Dict,
|
470
|
-
result: ErrorHandlingResult
|
478
|
+
self, field: str, value: Any, schema: Dict, result: ErrorHandlingResult
|
471
479
|
) -> ErrorHandlingResult:
|
472
480
|
"""Try to fix validation error"""
|
473
|
-
field_schema = schema.get(
|
481
|
+
field_schema = schema.get("properties", {}).get(field, {})
|
474
482
|
|
475
483
|
# Try type coercion
|
476
|
-
if
|
477
|
-
expected_type = field_schema[
|
484
|
+
if "type" in field_schema:
|
485
|
+
expected_type = field_schema["type"]
|
478
486
|
coerced = self._coerce_type(value, expected_type)
|
479
487
|
|
480
488
|
if coerced is not None:
|
@@ -484,24 +492,24 @@ class ValidationErrorHandler(BaseErrorHandler):
|
|
484
492
|
return result
|
485
493
|
|
486
494
|
# Use default value if available
|
487
|
-
if
|
495
|
+
if "default" in field_schema:
|
488
496
|
result.recovered = True
|
489
|
-
result.fallback_value = {field: field_schema[
|
497
|
+
result.fallback_value = {field: field_schema["default"]}
|
490
498
|
result.actions_taken.append(f"Used default value for {field}")
|
491
499
|
return result
|
492
500
|
|
493
501
|
# Use minimum/maximum for range errors
|
494
|
-
if
|
495
|
-
if value < field_schema[
|
502
|
+
if "minimum" in field_schema and isinstance(value, (int, float)):
|
503
|
+
if value < field_schema["minimum"]:
|
496
504
|
result.recovered = True
|
497
|
-
result.fallback_value = {field: field_schema[
|
505
|
+
result.fallback_value = {field: field_schema["minimum"]}
|
498
506
|
result.actions_taken.append(f"Clamped {field} to minimum")
|
499
507
|
return result
|
500
508
|
|
501
|
-
if
|
502
|
-
if value > field_schema[
|
509
|
+
if "maximum" in field_schema and isinstance(value, (int, float)):
|
510
|
+
if value > field_schema["maximum"]:
|
503
511
|
result.recovered = True
|
504
|
-
result.fallback_value = {field: field_schema[
|
512
|
+
result.fallback_value = {field: field_schema["maximum"]}
|
505
513
|
result.actions_taken.append(f"Clamped {field} to maximum")
|
506
514
|
return result
|
507
515
|
|
@@ -510,22 +518,22 @@ class ValidationErrorHandler(BaseErrorHandler):
|
|
510
518
|
def _coerce_type(self, value: Any, expected_type: str) -> Any:
|
511
519
|
"""Attempt to coerce value to expected type"""
|
512
520
|
try:
|
513
|
-
if expected_type ==
|
521
|
+
if expected_type == "string":
|
514
522
|
return str(value)
|
515
|
-
|
523
|
+
if expected_type == "integer":
|
516
524
|
return int(value)
|
517
|
-
|
525
|
+
if expected_type == "number":
|
518
526
|
return float(value)
|
519
|
-
|
527
|
+
if expected_type == "boolean":
|
520
528
|
if isinstance(value, str):
|
521
|
-
return value.lower() in [
|
529
|
+
return value.lower() in ["true", "yes", "1", "on"]
|
522
530
|
return bool(value)
|
523
|
-
|
531
|
+
if expected_type == "array":
|
524
532
|
if isinstance(value, str):
|
525
533
|
# Try comma-separated
|
526
|
-
return [v.strip() for v in value.split(
|
534
|
+
return [v.strip() for v in value.split(",")]
|
527
535
|
return list(value)
|
528
|
-
|
536
|
+
if expected_type == "object":
|
529
537
|
if isinstance(value, str):
|
530
538
|
return json.loads(value)
|
531
539
|
return dict(value)
|
@@ -533,21 +541,21 @@ class ValidationErrorHandler(BaseErrorHandler):
|
|
533
541
|
return None
|
534
542
|
|
535
543
|
def _handle_generic_validation(
|
536
|
-
self,
|
537
|
-
context: ErrorContext,
|
538
|
-
result: ErrorHandlingResult
|
544
|
+
self, context: ErrorContext, result: ErrorHandlingResult
|
539
545
|
) -> ErrorHandlingResult:
|
540
546
|
"""Handle generic validation errors"""
|
541
547
|
# Use strict vs lenient mode
|
542
|
-
if context.metadata.get(
|
548
|
+
if context.metadata.get("strict", True):
|
543
549
|
result.should_escalate = True
|
544
550
|
result.message = "Validation failed in strict mode"
|
545
551
|
else:
|
546
552
|
# In lenient mode, use config as-is with warnings
|
547
553
|
result.recovered = True
|
548
|
-
result.fallback_value = context.metadata.get(
|
554
|
+
result.fallback_value = context.metadata.get("config", {})
|
549
555
|
result.actions_taken.append("Accepted configuration in lenient mode")
|
550
|
-
self.logger.warning(
|
556
|
+
self.logger.warning(
|
557
|
+
f"Validation error ignored in lenient mode: {context.error}"
|
558
|
+
)
|
551
559
|
|
552
560
|
return result
|
553
561
|
|
@@ -560,16 +568,16 @@ class NetworkErrorHandler(BaseErrorHandler):
|
|
560
568
|
TimeoutError,
|
561
569
|
ConnectionRefusedError,
|
562
570
|
ConnectionResetError,
|
563
|
-
BrokenPipeError
|
571
|
+
BrokenPipeError,
|
564
572
|
]
|
565
573
|
|
566
574
|
def can_handle(self, context: ErrorContext) -> bool:
|
567
575
|
"""Check if error is network related"""
|
568
576
|
return (
|
569
|
-
context.category == ErrorCategory.NETWORK
|
570
|
-
any(isinstance(context.error, err) for err in self.NETWORK_ERRORS)
|
571
|
-
|
572
|
-
|
577
|
+
context.category == ErrorCategory.NETWORK
|
578
|
+
or any(isinstance(context.error, err) for err in self.NETWORK_ERRORS)
|
579
|
+
or "connection" in str(context.error).lower()
|
580
|
+
or "timeout" in str(context.error).lower()
|
573
581
|
)
|
574
582
|
|
575
583
|
def handle(self, context: ErrorContext) -> ErrorHandlingResult:
|
@@ -577,29 +585,31 @@ class NetworkErrorHandler(BaseErrorHandler):
|
|
577
585
|
result = ErrorHandlingResult(handled=True)
|
578
586
|
|
579
587
|
# Implement exponential backoff retry
|
580
|
-
retry_count = context.metadata.get(
|
581
|
-
max_retries = context.metadata.get(
|
588
|
+
retry_count = context.metadata.get("retry_count", 0)
|
589
|
+
max_retries = context.metadata.get("max_retries", 5)
|
582
590
|
|
583
591
|
if retry_count < max_retries:
|
584
592
|
# Calculate backoff time
|
585
|
-
backoff = min(300, 2
|
593
|
+
backoff = min(300, 2**retry_count) # Max 5 minutes
|
586
594
|
result.should_retry = True
|
587
595
|
result.retry_after = backoff
|
588
|
-
result.actions_taken.append(
|
596
|
+
result.actions_taken.append(
|
597
|
+
f"Retry {retry_count + 1}/{max_retries} after {backoff}s"
|
598
|
+
)
|
589
599
|
|
590
600
|
# Add jitter to prevent thundering herd
|
591
601
|
import random
|
602
|
+
|
592
603
|
result.retry_after += random.uniform(0, backoff * 0.1)
|
593
604
|
|
605
|
+
# Try offline/cached mode
|
606
|
+
elif context.metadata.get("cache_available"):
|
607
|
+
result.recovered = True
|
608
|
+
result.fallback_value = context.metadata.get("cached_config")
|
609
|
+
result.actions_taken.append("Using cached configuration")
|
594
610
|
else:
|
595
|
-
|
596
|
-
|
597
|
-
result.recovered = True
|
598
|
-
result.fallback_value = context.metadata.get('cached_config')
|
599
|
-
result.actions_taken.append("Using cached configuration")
|
600
|
-
else:
|
601
|
-
result.should_escalate = True
|
602
|
-
result.message = f"Network error after {max_retries} retries"
|
611
|
+
result.should_escalate = True
|
612
|
+
result.message = f"Network error after {max_retries} retries"
|
603
613
|
|
604
614
|
return result
|
605
615
|
|
@@ -610,18 +620,18 @@ class TypeConversionErrorHandler(BaseErrorHandler):
|
|
610
620
|
def can_handle(self, context: ErrorContext) -> bool:
|
611
621
|
"""Check if error is type conversion related"""
|
612
622
|
return (
|
613
|
-
context.category == ErrorCategory.TYPE_CONVERSION
|
614
|
-
isinstance(context.error, (TypeError, ValueError))
|
615
|
-
|
616
|
-
|
623
|
+
context.category == ErrorCategory.TYPE_CONVERSION
|
624
|
+
or isinstance(context.error, (TypeError, ValueError))
|
625
|
+
or "type" in str(context.error).lower()
|
626
|
+
or "convert" in str(context.error).lower()
|
617
627
|
)
|
618
628
|
|
619
629
|
def handle(self, context: ErrorContext) -> ErrorHandlingResult:
|
620
630
|
"""Handle type conversion errors"""
|
621
631
|
result = ErrorHandlingResult(handled=True)
|
622
632
|
|
623
|
-
source_value = context.metadata.get(
|
624
|
-
target_type = context.metadata.get(
|
633
|
+
source_value = context.metadata.get("value")
|
634
|
+
target_type = context.metadata.get("target_type")
|
625
635
|
|
626
636
|
if source_value is not None and target_type:
|
627
637
|
# Try intelligent conversion
|
@@ -648,7 +658,7 @@ class TypeConversionErrorHandler(BaseErrorHandler):
|
|
648
658
|
float: self._to_float,
|
649
659
|
bool: self._to_bool,
|
650
660
|
list: self._to_list,
|
651
|
-
dict: self._to_dict
|
661
|
+
dict: self._to_dict,
|
652
662
|
}
|
653
663
|
|
654
664
|
converter = converters.get(target_type)
|
@@ -663,7 +673,7 @@ class TypeConversionErrorHandler(BaseErrorHandler):
|
|
663
673
|
def _to_string(self, value: Any) -> str:
|
664
674
|
"""Convert to string"""
|
665
675
|
if isinstance(value, bytes):
|
666
|
-
return value.decode(
|
676
|
+
return value.decode("utf-8", errors="replace")
|
667
677
|
return str(value)
|
668
678
|
|
669
679
|
def _to_int(self, value: Any) -> int:
|
@@ -671,7 +681,8 @@ class TypeConversionErrorHandler(BaseErrorHandler):
|
|
671
681
|
if isinstance(value, str):
|
672
682
|
# Try to extract number from string
|
673
683
|
import re
|
674
|
-
|
684
|
+
|
685
|
+
match = re.search(r"-?\d+", value)
|
675
686
|
if match:
|
676
687
|
return int(match.group())
|
677
688
|
return int(float(value))
|
@@ -680,30 +691,30 @@ class TypeConversionErrorHandler(BaseErrorHandler):
|
|
680
691
|
"""Convert to float"""
|
681
692
|
if isinstance(value, str):
|
682
693
|
# Handle percentage
|
683
|
-
if
|
684
|
-
return float(value.replace(
|
694
|
+
if "%" in value:
|
695
|
+
return float(value.replace("%", "")) / 100
|
685
696
|
# Handle comma as decimal separator
|
686
|
-
value = value.replace(
|
697
|
+
value = value.replace(",", ".")
|
687
698
|
return float(value)
|
688
699
|
|
689
700
|
def _to_bool(self, value: Any) -> bool:
|
690
701
|
"""Convert to boolean"""
|
691
702
|
if isinstance(value, str):
|
692
|
-
return value.lower() in [
|
703
|
+
return value.lower() in ["true", "yes", "1", "on", "enabled"]
|
693
704
|
return bool(value)
|
694
705
|
|
695
706
|
def _to_list(self, value: Any) -> list:
|
696
707
|
"""Convert to list"""
|
697
708
|
if isinstance(value, str):
|
698
709
|
# Try JSON array
|
699
|
-
if value.startswith(
|
710
|
+
if value.startswith("["):
|
700
711
|
try:
|
701
712
|
return json.loads(value)
|
702
713
|
except:
|
703
714
|
pass
|
704
715
|
# Try comma-separated
|
705
|
-
return [v.strip() for v in value.split(
|
706
|
-
|
716
|
+
return [v.strip() for v in value.split(",")]
|
717
|
+
if hasattr(value, "__iter__") and not isinstance(value, (str, bytes, dict)):
|
707
718
|
return list(value)
|
708
719
|
return [value]
|
709
720
|
|
@@ -717,27 +728,27 @@ class TypeConversionErrorHandler(BaseErrorHandler):
|
|
717
728
|
pass
|
718
729
|
# Try key=value pairs
|
719
730
|
result = {}
|
720
|
-
for pair in value.split(
|
721
|
-
if
|
722
|
-
k, v = pair.split(
|
731
|
+
for pair in value.split(","):
|
732
|
+
if "=" in pair:
|
733
|
+
k, v = pair.split("=", 1)
|
723
734
|
result[k.strip()] = v.strip()
|
724
735
|
return result
|
725
|
-
|
736
|
+
if hasattr(value, "__dict__"):
|
726
737
|
return vars(value)
|
727
738
|
return {}
|
728
739
|
|
729
740
|
def _get_type_default(self, target_type: Type) -> Any:
|
730
741
|
"""Get default value for type"""
|
731
742
|
defaults = {
|
732
|
-
str:
|
743
|
+
str: "",
|
733
744
|
int: 0,
|
734
745
|
float: 0.0,
|
735
746
|
bool: False,
|
736
747
|
list: [],
|
737
748
|
dict: {},
|
738
|
-
type(None): None
|
749
|
+
type(None): None,
|
739
750
|
}
|
740
|
-
return defaults.get(target_type
|
751
|
+
return defaults.get(target_type)
|
741
752
|
|
742
753
|
|
743
754
|
class CompositeErrorHandler(BaseErrorHandler):
|
@@ -750,7 +761,7 @@ class CompositeErrorHandler(BaseErrorHandler):
|
|
750
761
|
ParsingErrorHandler(),
|
751
762
|
ValidationErrorHandler(),
|
752
763
|
NetworkErrorHandler(),
|
753
|
-
TypeConversionErrorHandler()
|
764
|
+
TypeConversionErrorHandler(),
|
754
765
|
]
|
755
766
|
|
756
767
|
def can_handle(self, context: ErrorContext) -> bool:
|
@@ -776,16 +787,15 @@ class CompositeErrorHandler(BaseErrorHandler):
|
|
776
787
|
|
777
788
|
# Log the full error
|
778
789
|
self.logger.error(
|
779
|
-
f"Unknown error in {context.operation}: {context.error}",
|
780
|
-
exc_info=True
|
790
|
+
f"Unknown error in {context.operation}: {context.error}", exc_info=True
|
781
791
|
)
|
782
792
|
|
783
793
|
# Try generic recovery strategies
|
784
|
-
if context.metadata.get(
|
794
|
+
if context.metadata.get("default_config"):
|
785
795
|
result.recovered = True
|
786
|
-
result.fallback_value = context.metadata[
|
796
|
+
result.fallback_value = context.metadata["default_config"]
|
787
797
|
result.actions_taken.append("Used default configuration for unknown error")
|
788
|
-
elif context.metadata.get(
|
798
|
+
elif context.metadata.get("skip_on_error"):
|
789
799
|
result.recovered = True
|
790
800
|
result.fallback_value = {}
|
791
801
|
result.actions_taken.append("Skipped configuration due to error")
|
@@ -829,7 +839,7 @@ class ErrorHandlingStrategy(IConfigStrategy):
|
|
829
839
|
error: Exception,
|
830
840
|
source: Optional[str] = None,
|
831
841
|
operation: Optional[str] = None,
|
832
|
-
**metadata
|
842
|
+
**metadata,
|
833
843
|
) -> ErrorHandlingResult:
|
834
844
|
"""Main error handling entry point"""
|
835
845
|
# Categorize error
|
@@ -844,7 +854,7 @@ class ErrorHandlingStrategy(IConfigStrategy):
|
|
844
854
|
source=source,
|
845
855
|
operation=operation,
|
846
856
|
traceback=traceback.format_exc(),
|
847
|
-
metadata=metadata
|
857
|
+
metadata=metadata,
|
848
858
|
)
|
849
859
|
|
850
860
|
# Record in history
|
@@ -873,7 +883,7 @@ class ErrorHandlingStrategy(IConfigStrategy):
|
|
873
883
|
|
874
884
|
# Parsing errors
|
875
885
|
if isinstance(error, (json.JSONDecodeError, ValueError, SyntaxError)):
|
876
|
-
if
|
886
|
+
if "parse" in str(error).lower() or "decode" in str(error).lower():
|
877
887
|
return ErrorCategory.PARSING
|
878
888
|
|
879
889
|
# Network errors
|
@@ -887,18 +897,20 @@ class ErrorHandlingStrategy(IConfigStrategy):
|
|
887
897
|
# Check error message for hints
|
888
898
|
error_msg = str(error).lower()
|
889
899
|
|
890
|
-
if
|
900
|
+
if "validation" in error_msg or "invalid" in error_msg:
|
891
901
|
return ErrorCategory.VALIDATION
|
892
|
-
|
902
|
+
if "permission" in error_msg or "access" in error_msg:
|
893
903
|
return ErrorCategory.PERMISSION
|
894
|
-
|
904
|
+
if "not found" in error_msg or "missing" in error_msg:
|
895
905
|
return ErrorCategory.MISSING_DEPENDENCY
|
896
|
-
|
906
|
+
if "config" in error_msg or "setting" in error_msg:
|
897
907
|
return ErrorCategory.CONFIGURATION
|
898
908
|
|
899
909
|
return ErrorCategory.UNKNOWN
|
900
910
|
|
901
|
-
def _determine_severity(
|
911
|
+
def _determine_severity(
|
912
|
+
self, error: Exception, category: ErrorCategory
|
913
|
+
) -> ErrorSeverity:
|
902
914
|
"""Determine error severity"""
|
903
915
|
# Critical errors
|
904
916
|
critical_types = [MemoryError, SystemError, KeyboardInterrupt]
|
@@ -916,15 +928,13 @@ class ErrorHandlingStrategy(IConfigStrategy):
|
|
916
928
|
ErrorCategory.MISSING_DEPENDENCY: ErrorSeverity.ERROR,
|
917
929
|
ErrorCategory.CONFIGURATION: ErrorSeverity.ERROR,
|
918
930
|
ErrorCategory.RUNTIME: ErrorSeverity.ERROR,
|
919
|
-
ErrorCategory.UNKNOWN: ErrorSeverity.ERROR
|
931
|
+
ErrorCategory.UNKNOWN: ErrorSeverity.ERROR,
|
920
932
|
}
|
921
933
|
|
922
934
|
return severity_map.get(category, ErrorSeverity.ERROR)
|
923
935
|
|
924
936
|
def _apply_recovery_strategies(
|
925
|
-
self,
|
926
|
-
context: ErrorContext,
|
927
|
-
result: ErrorHandlingResult
|
937
|
+
self, context: ErrorContext, result: ErrorHandlingResult
|
928
938
|
) -> ErrorHandlingResult:
|
929
939
|
"""Apply custom recovery strategies"""
|
930
940
|
for name, strategy in self.recovery_strategies.items():
|
@@ -949,10 +959,10 @@ class ErrorHandlingStrategy(IConfigStrategy):
|
|
949
959
|
"""Get error handling statistics"""
|
950
960
|
if not self.error_history:
|
951
961
|
return {
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
|
962
|
+
"total_errors": 0,
|
963
|
+
"categories": {},
|
964
|
+
"severities": {},
|
965
|
+
"recovery_rate": 0.0,
|
956
966
|
}
|
957
967
|
|
958
968
|
total = len(self.error_history)
|
@@ -971,29 +981,29 @@ class ErrorHandlingStrategy(IConfigStrategy):
|
|
971
981
|
severities[sev_name] = severities.get(sev_name, 0) + 1
|
972
982
|
|
973
983
|
return {
|
974
|
-
|
975
|
-
|
976
|
-
|
977
|
-
|
978
|
-
|
979
|
-
|
984
|
+
"total_errors": total,
|
985
|
+
"recovered": recovered,
|
986
|
+
"recovery_rate": (recovered / total) * 100 if total > 0 else 0,
|
987
|
+
"categories": categories,
|
988
|
+
"severities": severities,
|
989
|
+
"recent_errors": [
|
980
990
|
{
|
981
|
-
|
982
|
-
|
983
|
-
|
984
|
-
|
985
|
-
|
991
|
+
"timestamp": e.timestamp.isoformat(),
|
992
|
+
"category": e.category.value,
|
993
|
+
"severity": e.severity.value,
|
994
|
+
"operation": e.operation,
|
995
|
+
"recovered": e.recovery_successful,
|
986
996
|
}
|
987
997
|
for e in self.error_history[-10:] # Last 10 errors
|
988
|
-
]
|
998
|
+
],
|
989
999
|
}
|
990
1000
|
|
991
1001
|
|
992
1002
|
# Export main components
|
993
1003
|
__all__ = [
|
994
|
-
|
995
|
-
|
996
|
-
|
997
|
-
|
998
|
-
|
999
|
-
]
|
1004
|
+
"ErrorCategory",
|
1005
|
+
"ErrorContext",
|
1006
|
+
"ErrorHandlingResult",
|
1007
|
+
"ErrorHandlingStrategy",
|
1008
|
+
"ErrorSeverity",
|
1009
|
+
]
|