ai-coding-assistant 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (89) hide show
  1. ai_coding_assistant-0.5.0.dist-info/METADATA +226 -0
  2. ai_coding_assistant-0.5.0.dist-info/RECORD +89 -0
  3. ai_coding_assistant-0.5.0.dist-info/WHEEL +4 -0
  4. ai_coding_assistant-0.5.0.dist-info/entry_points.txt +3 -0
  5. ai_coding_assistant-0.5.0.dist-info/licenses/LICENSE +21 -0
  6. coding_assistant/__init__.py +3 -0
  7. coding_assistant/__main__.py +19 -0
  8. coding_assistant/cli/__init__.py +1 -0
  9. coding_assistant/cli/app.py +158 -0
  10. coding_assistant/cli/commands/__init__.py +19 -0
  11. coding_assistant/cli/commands/ask.py +178 -0
  12. coding_assistant/cli/commands/config.py +438 -0
  13. coding_assistant/cli/commands/diagram.py +267 -0
  14. coding_assistant/cli/commands/document.py +410 -0
  15. coding_assistant/cli/commands/explain.py +192 -0
  16. coding_assistant/cli/commands/fix.py +249 -0
  17. coding_assistant/cli/commands/index.py +162 -0
  18. coding_assistant/cli/commands/refactor.py +245 -0
  19. coding_assistant/cli/commands/search.py +182 -0
  20. coding_assistant/cli/commands/serve_docs.py +128 -0
  21. coding_assistant/cli/repl.py +381 -0
  22. coding_assistant/cli/theme.py +90 -0
  23. coding_assistant/codebase/__init__.py +1 -0
  24. coding_assistant/codebase/crawler.py +93 -0
  25. coding_assistant/codebase/parser.py +266 -0
  26. coding_assistant/config/__init__.py +25 -0
  27. coding_assistant/config/config_manager.py +615 -0
  28. coding_assistant/config/settings.py +82 -0
  29. coding_assistant/context/__init__.py +19 -0
  30. coding_assistant/context/chunker.py +443 -0
  31. coding_assistant/context/enhanced_retriever.py +322 -0
  32. coding_assistant/context/hybrid_search.py +311 -0
  33. coding_assistant/context/ranker.py +355 -0
  34. coding_assistant/context/retriever.py +119 -0
  35. coding_assistant/context/window.py +362 -0
  36. coding_assistant/documentation/__init__.py +23 -0
  37. coding_assistant/documentation/agents/__init__.py +27 -0
  38. coding_assistant/documentation/agents/coordinator.py +510 -0
  39. coding_assistant/documentation/agents/module_documenter.py +111 -0
  40. coding_assistant/documentation/agents/synthesizer.py +139 -0
  41. coding_assistant/documentation/agents/task_delegator.py +100 -0
  42. coding_assistant/documentation/decomposition/__init__.py +21 -0
  43. coding_assistant/documentation/decomposition/context_preserver.py +477 -0
  44. coding_assistant/documentation/decomposition/module_detector.py +302 -0
  45. coding_assistant/documentation/decomposition/partitioner.py +621 -0
  46. coding_assistant/documentation/generators/__init__.py +14 -0
  47. coding_assistant/documentation/generators/dataflow_generator.py +440 -0
  48. coding_assistant/documentation/generators/diagram_generator.py +511 -0
  49. coding_assistant/documentation/graph/__init__.py +13 -0
  50. coding_assistant/documentation/graph/dependency_builder.py +468 -0
  51. coding_assistant/documentation/graph/module_analyzer.py +475 -0
  52. coding_assistant/documentation/writers/__init__.py +11 -0
  53. coding_assistant/documentation/writers/markdown_writer.py +322 -0
  54. coding_assistant/embeddings/__init__.py +0 -0
  55. coding_assistant/embeddings/generator.py +89 -0
  56. coding_assistant/embeddings/store.py +187 -0
  57. coding_assistant/exceptions/__init__.py +50 -0
  58. coding_assistant/exceptions/base.py +110 -0
  59. coding_assistant/exceptions/llm.py +249 -0
  60. coding_assistant/exceptions/recovery.py +263 -0
  61. coding_assistant/exceptions/storage.py +213 -0
  62. coding_assistant/exceptions/validation.py +230 -0
  63. coding_assistant/llm/__init__.py +1 -0
  64. coding_assistant/llm/client.py +277 -0
  65. coding_assistant/llm/gemini_client.py +181 -0
  66. coding_assistant/llm/groq_client.py +160 -0
  67. coding_assistant/llm/prompts.py +98 -0
  68. coding_assistant/llm/together_client.py +160 -0
  69. coding_assistant/operations/__init__.py +13 -0
  70. coding_assistant/operations/differ.py +369 -0
  71. coding_assistant/operations/generator.py +347 -0
  72. coding_assistant/operations/linter.py +430 -0
  73. coding_assistant/operations/validator.py +406 -0
  74. coding_assistant/storage/__init__.py +9 -0
  75. coding_assistant/storage/database.py +363 -0
  76. coding_assistant/storage/session.py +231 -0
  77. coding_assistant/utils/__init__.py +31 -0
  78. coding_assistant/utils/cache.py +477 -0
  79. coding_assistant/utils/hardware.py +132 -0
  80. coding_assistant/utils/keystore.py +206 -0
  81. coding_assistant/utils/logger.py +32 -0
  82. coding_assistant/utils/progress.py +311 -0
  83. coding_assistant/validation/__init__.py +13 -0
  84. coding_assistant/validation/files.py +305 -0
  85. coding_assistant/validation/inputs.py +335 -0
  86. coding_assistant/validation/params.py +280 -0
  87. coding_assistant/validation/sanitizers.py +243 -0
  88. coding_assistant/vcs/__init__.py +5 -0
  89. coding_assistant/vcs/git.py +269 -0
@@ -0,0 +1,110 @@
1
+ """Base exception classes for the AI Coding Assistant."""
2
+
3
+ from typing import Optional, Dict, Any
4
+
5
+
6
+ class AssistantError(Exception):
7
+ """
8
+ Base exception for all assistant errors.
9
+
10
+ Attributes:
11
+ message: Human-readable error message
12
+ recoverable: Whether the error can be recovered from
13
+ suggestion: Suggested action to fix the error
14
+ details: Additional error details
15
+ error_code: Unique error code for categorization
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ message: str,
21
+ recoverable: bool = True,
22
+ suggestion: Optional[str] = None,
23
+ details: Optional[Dict[str, Any]] = None,
24
+ error_code: Optional[str] = None
25
+ ):
26
+ """
27
+ Initialize assistant error.
28
+
29
+ Args:
30
+ message: Error message
31
+ recoverable: Whether error can be recovered from
32
+ suggestion: Suggestion for fixing the error
33
+ details: Additional error details
34
+ error_code: Unique error code
35
+ """
36
+ super().__init__(message)
37
+ self.message = message
38
+ self.recoverable = recoverable
39
+ self.suggestion = suggestion
40
+ self.details = details or {}
41
+ self.error_code = error_code or self.__class__.__name__
42
+
43
+ def format_message(self, color: bool = True) -> str:
44
+ """
45
+ Format error message for display.
46
+
47
+ Args:
48
+ color: Whether to include Rich color markup
49
+
50
+ Returns:
51
+ Formatted error message
52
+ """
53
+ if color:
54
+ lines = [f"[red]❌ Error: {self.message}[/red]"]
55
+
56
+ if self.suggestion:
57
+ lines.append(f"\n[yellow]💡 Suggestion: {self.suggestion}[/yellow]")
58
+
59
+ if self.details:
60
+ lines.append("\n[dim]Details:[/dim]")
61
+ for key, value in self.details.items():
62
+ lines.append(f" [dim]{key}: {value}[/dim]")
63
+
64
+ if not self.recoverable:
65
+ lines.append("\n[red bold]⚠️ This error is not recoverable.[/red bold]")
66
+
67
+ else:
68
+ lines = [f"Error: {self.message}"]
69
+
70
+ if self.suggestion:
71
+ lines.append(f"\nSuggestion: {self.suggestion}")
72
+
73
+ if self.details:
74
+ lines.append("\nDetails:")
75
+ for key, value in self.details.items():
76
+ lines.append(f" {key}: {value}")
77
+
78
+ if not self.recoverable:
79
+ lines.append("\n⚠️ This error is not recoverable.")
80
+
81
+ return "\n".join(lines)
82
+
83
+ def __str__(self) -> str:
84
+ """String representation."""
85
+ return self.format_message(color=False)
86
+
87
+ def __repr__(self) -> str:
88
+ """Developer representation."""
89
+ return (
90
+ f"{self.__class__.__name__}("
91
+ f"message={self.message!r}, "
92
+ f"recoverable={self.recoverable}, "
93
+ f"error_code={self.error_code})"
94
+ )
95
+
96
+ def to_dict(self) -> Dict[str, Any]:
97
+ """
98
+ Convert error to dictionary.
99
+
100
+ Returns:
101
+ Dictionary representation of error
102
+ """
103
+ return {
104
+ 'error_code': self.error_code,
105
+ 'message': self.message,
106
+ 'recoverable': self.recoverable,
107
+ 'suggestion': self.suggestion,
108
+ 'details': self.details,
109
+ 'error_type': self.__class__.__name__
110
+ }
@@ -0,0 +1,249 @@
1
+ """LLM-related exceptions."""
2
+
3
+ from typing import Optional
4
+ from coding_assistant.exceptions.base import AssistantError
5
+
6
+
7
+ class LLMError(AssistantError):
8
+ """Base exception for LLM-related errors."""
9
+
10
+ def __init__(
11
+ self,
12
+ message: str,
13
+ recoverable: bool = True,
14
+ suggestion: Optional[str] = None,
15
+ **kwargs
16
+ ):
17
+ super().__init__(
18
+ message=message,
19
+ recoverable=recoverable,
20
+ suggestion=suggestion,
21
+ error_code="LLM_ERROR",
22
+ **kwargs
23
+ )
24
+
25
+
26
+ class LLMConnectionError(LLMError):
27
+ """
28
+ LLM connection failed.
29
+
30
+ Raised when cannot connect to LLM provider.
31
+ """
32
+
33
+ def __init__(self, provider: str, endpoint: str, reason: Optional[str] = None):
34
+ """
35
+ Initialize connection error.
36
+
37
+ Args:
38
+ provider: LLM provider name (ollama, openai, etc.)
39
+ endpoint: Connection endpoint
40
+ reason: Optional reason for failure
41
+ """
42
+ message = f"Failed to connect to {provider} LLM at {endpoint}"
43
+ if reason:
44
+ message += f": {reason}"
45
+
46
+ suggestion = self._get_suggestion(provider, endpoint)
47
+
48
+ super().__init__(
49
+ message=message,
50
+ recoverable=True,
51
+ suggestion=suggestion,
52
+ details={
53
+ 'provider': provider,
54
+ 'endpoint': endpoint,
55
+ 'reason': reason
56
+ }
57
+ )
58
+
59
+ @staticmethod
60
+ def _get_suggestion(provider: str, endpoint: str) -> str:
61
+ """Get provider-specific suggestion."""
62
+ if provider.lower() == "ollama":
63
+ return (
64
+ f"Make sure Ollama is running:\n"
65
+ f" 1. Install Ollama: https://ollama.ai\n"
66
+ f" 2. Start Ollama service: ollama serve\n"
67
+ f" 3. Check it's running: curl {endpoint}"
68
+ )
69
+ elif provider.lower() == "groq":
70
+ return (
71
+ "Check your Groq API configuration:\n"
72
+ " 1. Get free API key: https://console.groq.com\n"
73
+ " 2. Set API key: assistant config set-api-key groq <your-key>\n"
74
+ " 3. Or set env var: GROQ_API_KEY=<your-key>"
75
+ )
76
+ elif provider.lower() == "together":
77
+ return (
78
+ "Check your Together AI configuration:\n"
79
+ " 1. Get API key ($25 free trial): https://api.together.xyz\n"
80
+ " 2. Set API key: assistant config set-api-key together <your-key>\n"
81
+ " 3. Or set env var: TOGETHER_API_KEY=<your-key>"
82
+ )
83
+ elif provider.lower() == "gemini":
84
+ return (
85
+ "Check your Google Gemini configuration:\n"
86
+ " 1. Get free API key: https://makersuite.google.com/app/apikey\n"
87
+ " 2. Set API key: assistant config set-api-key gemini <your-key>\n"
88
+ " 3. Or set env var: GEMINI_API_KEY=<your-key>"
89
+ )
90
+ elif provider.lower() in ["openai", "claude"]:
91
+ return (
92
+ f"Check your API configuration:\n"
93
+ f" 1. Verify API key is set\n"
94
+ f" 2. Check network connectivity\n"
95
+ f" 3. Verify endpoint: {endpoint}"
96
+ )
97
+ else:
98
+ return f"Check that {provider} is running at {endpoint}"
99
+
100
+
101
+ class LLMResponseError(LLMError):
102
+ """
103
+ LLM returned invalid or error response.
104
+
105
+ Raised when LLM response is malformed or indicates an error.
106
+ """
107
+
108
+ def __init__(
109
+ self,
110
+ message: str,
111
+ provider: str,
112
+ status_code: Optional[int] = None,
113
+ response_text: Optional[str] = None
114
+ ):
115
+ """
116
+ Initialize response error.
117
+
118
+ Args:
119
+ message: Error message
120
+ provider: LLM provider
121
+ status_code: HTTP status code if applicable
122
+ response_text: Raw response text
123
+ """
124
+ suggestion = "Try again or switch to a different LLM provider"
125
+
126
+ if status_code == 429:
127
+ suggestion = "Rate limit exceeded. Wait a moment and try again."
128
+ elif status_code == 401:
129
+ suggestion = "Authentication failed. Check your API key."
130
+ elif status_code == 503:
131
+ suggestion = "Service unavailable. Try again later."
132
+
133
+ super().__init__(
134
+ message=message,
135
+ recoverable=True,
136
+ suggestion=suggestion,
137
+ details={
138
+ 'provider': provider,
139
+ 'status_code': status_code,
140
+ 'response_preview': response_text[:200] if response_text else None
141
+ }
142
+ )
143
+
144
+
145
+ class LLMTimeoutError(LLMError):
146
+ """
147
+ LLM request timed out.
148
+
149
+ Raised when LLM takes too long to respond.
150
+ """
151
+
152
+ def __init__(self, provider: str, timeout_seconds: int):
153
+ """
154
+ Initialize timeout error.
155
+
156
+ Args:
157
+ provider: LLM provider
158
+ timeout_seconds: Timeout duration
159
+ """
160
+ super().__init__(
161
+ message=f"LLM request to {provider} timed out after {timeout_seconds}s",
162
+ recoverable=True,
163
+ suggestion=(
164
+ "Try one of the following:\n"
165
+ " 1. Increase timeout in settings\n"
166
+ " 2. Reduce context size (use fewer code chunks)\n"
167
+ " 3. Try a faster model"
168
+ ),
169
+ details={
170
+ 'provider': provider,
171
+ 'timeout_seconds': timeout_seconds
172
+ }
173
+ )
174
+
175
+
176
+ class LLMModelNotFoundError(LLMError):
177
+ """
178
+ Requested LLM model not found.
179
+
180
+ Raised when specified model is not available.
181
+ """
182
+
183
+ def __init__(self, provider: str, model: str, available_models: Optional[list] = None):
184
+ """
185
+ Initialize model not found error.
186
+
187
+ Args:
188
+ provider: LLM provider
189
+ model: Requested model name
190
+ available_models: List of available models
191
+ """
192
+ message = f"Model '{model}' not found for provider '{provider}'"
193
+
194
+ suggestion = f"Available models:\n"
195
+ if available_models:
196
+ for m in available_models[:5]: # Show first 5
197
+ suggestion += f" - {m}\n"
198
+ if len(available_models) > 5:
199
+ suggestion += f" ... and {len(available_models) - 5} more"
200
+ else:
201
+ suggestion = f"Check model name or pull the model:\n ollama pull {model}"
202
+
203
+ super().__init__(
204
+ message=message,
205
+ recoverable=True,
206
+ suggestion=suggestion,
207
+ details={
208
+ 'provider': provider,
209
+ 'requested_model': model,
210
+ 'available_models': available_models
211
+ }
212
+ )
213
+
214
+
215
+ class LLMContextTooLargeError(LLMError):
216
+ """
217
+ Context exceeds model's token limit.
218
+
219
+ Raised when total context is too large for model.
220
+ """
221
+
222
+ def __init__(self, tokens_used: int, max_tokens: int, model: str):
223
+ """
224
+ Initialize context too large error.
225
+
226
+ Args:
227
+ tokens_used: Number of tokens used
228
+ max_tokens: Maximum tokens allowed
229
+ model: Model name
230
+ """
231
+ super().__init__(
232
+ message=(
233
+ f"Context size ({tokens_used:,} tokens) exceeds "
234
+ f"model limit ({max_tokens:,} tokens)"
235
+ ),
236
+ recoverable=True,
237
+ suggestion=(
238
+ "Reduce context size:\n"
239
+ " 1. Use fewer code chunks (--top-k parameter)\n"
240
+ " 2. Clear conversation history (/reset in chat)\n"
241
+ " 3. Use a model with larger context window"
242
+ ),
243
+ details={
244
+ 'tokens_used': tokens_used,
245
+ 'max_tokens': max_tokens,
246
+ 'model': model,
247
+ 'overflow': tokens_used - max_tokens
248
+ }
249
+ )
@@ -0,0 +1,263 @@
1
+ """Error recovery utilities."""
2
+
3
+ import time
4
+ from typing import Callable, Any, Optional, Type
5
+ from functools import wraps
6
+ from rich.console import Console
7
+
8
+ from coding_assistant.exceptions.base import AssistantError
9
+
10
+ console = Console()
11
+
12
+
13
+ class ErrorRecovery:
14
+ """
15
+ Utilities for error recovery and retry logic.
16
+
17
+ Provides decorators and helpers for handling transient failures.
18
+ """
19
+
20
+ @staticmethod
21
+ def retry(
22
+ max_attempts: int = 3,
23
+ delay: float = 1.0,
24
+ backoff: float = 2.0,
25
+ exceptions: tuple = (Exception,),
26
+ on_retry: Optional[Callable] = None
27
+ ):
28
+ """
29
+ Retry decorator with exponential backoff.
30
+
31
+ Args:
32
+ max_attempts: Maximum number of attempts
33
+ delay: Initial delay between retries (seconds)
34
+ backoff: Backoff multiplier
35
+ exceptions: Exceptions to catch and retry
36
+ on_retry: Callback function called on each retry
37
+
38
+ Usage:
39
+ @ErrorRecovery.retry(max_attempts=3, delay=1.0)
40
+ def flaky_function():
41
+ # May fail occasionally
42
+ pass
43
+ """
44
+ def decorator(func: Callable) -> Callable:
45
+ @wraps(func)
46
+ def wrapper(*args, **kwargs) -> Any:
47
+ current_delay = delay
48
+
49
+ for attempt in range(1, max_attempts + 1):
50
+ try:
51
+ return func(*args, **kwargs)
52
+
53
+ except exceptions as e:
54
+ if attempt == max_attempts:
55
+ # Last attempt failed, re-raise
56
+ raise
57
+
58
+ # Log retry
59
+ if on_retry:
60
+ on_retry(attempt, max_attempts, e)
61
+ else:
62
+ console.print(
63
+ f"[yellow]⚠️ Attempt {attempt}/{max_attempts} failed: {e}[/yellow]"
64
+ )
65
+ console.print(
66
+ f"[yellow] Retrying in {current_delay:.1f}s...[/yellow]"
67
+ )
68
+
69
+ # Wait before retry
70
+ time.sleep(current_delay)
71
+ current_delay *= backoff
72
+
73
+ return wrapper
74
+ return decorator
75
+
76
+ @staticmethod
77
+ def fallback(
78
+ fallback_func: Callable,
79
+ exceptions: tuple = (Exception,),
80
+ log_error: bool = True
81
+ ):
82
+ """
83
+ Fallback decorator - use fallback function on error.
84
+
85
+ Args:
86
+ fallback_func: Function to call on error
87
+ exceptions: Exceptions to catch
88
+ log_error: Whether to log the error
89
+
90
+ Usage:
91
+ def fallback_handler(*args, **kwargs):
92
+ return default_value
93
+
94
+ @ErrorRecovery.fallback(fallback_handler)
95
+ def risky_function():
96
+ # May fail
97
+ pass
98
+ """
99
+ def decorator(func: Callable) -> Callable:
100
+ @wraps(func)
101
+ def wrapper(*args, **kwargs) -> Any:
102
+ try:
103
+ return func(*args, **kwargs)
104
+
105
+ except exceptions as e:
106
+ if log_error:
107
+ console.print(
108
+ f"[yellow]⚠️ {func.__name__} failed: {e}[/yellow]"
109
+ )
110
+ console.print(
111
+ f"[yellow] Using fallback...[/yellow]"
112
+ )
113
+
114
+ return fallback_func(*args, **kwargs)
115
+
116
+ return wrapper
117
+ return decorator
118
+
119
+ @staticmethod
120
+ def graceful_degradation(
121
+ default_value: Any = None,
122
+ exceptions: tuple = (Exception,),
123
+ log_error: bool = True
124
+ ):
125
+ """
126
+ Graceful degradation - return default value on error.
127
+
128
+ Args:
129
+ default_value: Value to return on error
130
+ exceptions: Exceptions to catch
131
+ log_error: Whether to log the error
132
+
133
+ Usage:
134
+ @ErrorRecovery.graceful_degradation(default_value=[])
135
+ def get_items():
136
+ # May fail
137
+ return items
138
+ """
139
+ def decorator(func: Callable) -> Callable:
140
+ @wraps(func)
141
+ def wrapper(*args, **kwargs) -> Any:
142
+ try:
143
+ return func(*args, **kwargs)
144
+
145
+ except exceptions as e:
146
+ if log_error:
147
+ console.print(
148
+ f"[yellow]⚠️ {func.__name__} failed: {e}[/yellow]"
149
+ )
150
+ console.print(
151
+ f"[yellow] Returning default value: {default_value}[/yellow]"
152
+ )
153
+
154
+ return default_value
155
+
156
+ return wrapper
157
+ return decorator
158
+
159
+ @staticmethod
160
+ def handle_error(
161
+ error: Exception,
162
+ console: Optional[Console] = None,
163
+ exit_on_fatal: bool = True
164
+ ) -> None:
165
+ """
166
+ Handle error with appropriate user feedback.
167
+
168
+ Args:
169
+ error: Error to handle
170
+ console: Rich console for output
171
+ exit_on_fatal: Whether to exit on non-recoverable errors
172
+ """
173
+ if console is None:
174
+ console = Console()
175
+
176
+ # Check if it's our custom error
177
+ if isinstance(error, AssistantError):
178
+ # Show formatted error message
179
+ console.print(error.format_message())
180
+
181
+ # Exit if not recoverable
182
+ if not error.recoverable and exit_on_fatal:
183
+ console.print("\n[red]Exiting due to fatal error.[/red]")
184
+ raise SystemExit(1)
185
+
186
+ else:
187
+ # Generic error
188
+ console.print(f"[red]❌ Unexpected error: {error}[/red]")
189
+ console.print(f"[dim]Error type: {type(error).__name__}[/dim]")
190
+
191
+ if exit_on_fatal:
192
+ console.print("\n[yellow]💡 Suggestion: Run with --verbose for more details[/yellow]")
193
+ raise SystemExit(1)
194
+
195
+ @staticmethod
196
+ def safe_execute(
197
+ func: Callable,
198
+ *args,
199
+ default_value: Any = None,
200
+ log_error: bool = True,
201
+ **kwargs
202
+ ) -> Any:
203
+ """
204
+ Safely execute function, returning default value on error.
205
+
206
+ Args:
207
+ func: Function to execute
208
+ *args: Function arguments
209
+ default_value: Value to return on error
210
+ log_error: Whether to log errors
211
+ **kwargs: Function keyword arguments
212
+
213
+ Returns:
214
+ Function result or default value
215
+ """
216
+ try:
217
+ return func(*args, **kwargs)
218
+
219
+ except Exception as e:
220
+ if log_error:
221
+ console.print(
222
+ f"[yellow]⚠️ {func.__name__} failed: {e}[/yellow]"
223
+ )
224
+
225
+ return default_value
226
+
227
+ @staticmethod
228
+ def validate_or_exit(
229
+ condition: bool,
230
+ error_message: str,
231
+ suggestion: Optional[str] = None
232
+ ) -> None:
233
+ """
234
+ Validate condition or exit with error message.
235
+
236
+ Args:
237
+ condition: Condition to check
238
+ error_message: Error message if condition fails
239
+ suggestion: Optional suggestion
240
+ """
241
+ if not condition:
242
+ console.print(f"[red]❌ Error: {error_message}[/red]")
243
+
244
+ if suggestion:
245
+ console.print(f"[yellow]💡 Suggestion: {suggestion}[/yellow]")
246
+
247
+ raise SystemExit(1)
248
+
249
+
250
+ # Convenience decorators
251
+ def retry_on_failure(max_attempts: int = 3, delay: float = 1.0):
252
+ """Shorthand for retry decorator."""
253
+ return ErrorRecovery.retry(max_attempts=max_attempts, delay=delay)
254
+
255
+
256
+ def with_fallback(fallback_func: Callable):
257
+ """Shorthand for fallback decorator."""
258
+ return ErrorRecovery.fallback(fallback_func)
259
+
260
+
261
+ def ignore_errors(default_value: Any = None):
262
+ """Shorthand for graceful degradation decorator."""
263
+ return ErrorRecovery.graceful_degradation(default_value=default_value)