pygeai-orchestration 0.1.0b2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pygeai_orchestration/__init__.py +99 -0
- pygeai_orchestration/cli/__init__.py +7 -0
- pygeai_orchestration/cli/__main__.py +11 -0
- pygeai_orchestration/cli/commands/__init__.py +13 -0
- pygeai_orchestration/cli/commands/base.py +192 -0
- pygeai_orchestration/cli/error_handler.py +123 -0
- pygeai_orchestration/cli/formatters.py +419 -0
- pygeai_orchestration/cli/geai_orch.py +270 -0
- pygeai_orchestration/cli/interactive.py +265 -0
- pygeai_orchestration/cli/texts/help.py +169 -0
- pygeai_orchestration/core/__init__.py +130 -0
- pygeai_orchestration/core/base/__init__.py +23 -0
- pygeai_orchestration/core/base/agent.py +121 -0
- pygeai_orchestration/core/base/geai_agent.py +144 -0
- pygeai_orchestration/core/base/geai_orchestrator.py +77 -0
- pygeai_orchestration/core/base/orchestrator.py +142 -0
- pygeai_orchestration/core/base/pattern.py +161 -0
- pygeai_orchestration/core/base/tool.py +149 -0
- pygeai_orchestration/core/common/__init__.py +18 -0
- pygeai_orchestration/core/common/context.py +140 -0
- pygeai_orchestration/core/common/memory.py +176 -0
- pygeai_orchestration/core/common/message.py +50 -0
- pygeai_orchestration/core/common/state.py +181 -0
- pygeai_orchestration/core/composition.py +190 -0
- pygeai_orchestration/core/config.py +356 -0
- pygeai_orchestration/core/exceptions.py +400 -0
- pygeai_orchestration/core/handlers.py +380 -0
- pygeai_orchestration/core/utils/__init__.py +37 -0
- pygeai_orchestration/core/utils/cache.py +138 -0
- pygeai_orchestration/core/utils/config.py +94 -0
- pygeai_orchestration/core/utils/logging.py +57 -0
- pygeai_orchestration/core/utils/metrics.py +184 -0
- pygeai_orchestration/core/utils/validators.py +140 -0
- pygeai_orchestration/dev/__init__.py +15 -0
- pygeai_orchestration/dev/debug.py +288 -0
- pygeai_orchestration/dev/templates.py +321 -0
- pygeai_orchestration/dev/testing.py +301 -0
- pygeai_orchestration/patterns/__init__.py +15 -0
- pygeai_orchestration/patterns/multi_agent.py +237 -0
- pygeai_orchestration/patterns/planning.py +219 -0
- pygeai_orchestration/patterns/react.py +221 -0
- pygeai_orchestration/patterns/reflection.py +134 -0
- pygeai_orchestration/patterns/tool_use.py +170 -0
- pygeai_orchestration/tests/__init__.py +1 -0
- pygeai_orchestration/tests/test_base_classes.py +187 -0
- pygeai_orchestration/tests/test_cache.py +184 -0
- pygeai_orchestration/tests/test_cli_formatters.py +232 -0
- pygeai_orchestration/tests/test_common.py +214 -0
- pygeai_orchestration/tests/test_composition.py +265 -0
- pygeai_orchestration/tests/test_config.py +301 -0
- pygeai_orchestration/tests/test_dev_utils.py +337 -0
- pygeai_orchestration/tests/test_exceptions.py +327 -0
- pygeai_orchestration/tests/test_handlers.py +307 -0
- pygeai_orchestration/tests/test_metrics.py +171 -0
- pygeai_orchestration/tests/test_patterns.py +165 -0
- pygeai_orchestration-0.1.0b2.dist-info/METADATA +290 -0
- pygeai_orchestration-0.1.0b2.dist-info/RECORD +61 -0
- pygeai_orchestration-0.1.0b2.dist-info/WHEEL +5 -0
- pygeai_orchestration-0.1.0b2.dist-info/entry_points.txt +2 -0
- pygeai_orchestration-0.1.0b2.dist-info/licenses/LICENSE +8 -0
- pygeai_orchestration-0.1.0b2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,380 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Error handling utilities for pygeai-orchestration.
|
|
3
|
+
|
|
4
|
+
This module provides centralized error handling functionality following
|
|
5
|
+
the PyGEAI pattern. The ErrorHandler class offers consistent error detection,
|
|
6
|
+
extraction, and processing across the orchestration package.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from typing import Any, Dict, Optional
|
|
10
|
+
import logging
|
|
11
|
+
|
|
12
|
+
from .exceptions import OrchestrationError
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger("pygeai-orchestration")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ErrorHandler:
|
|
19
|
+
"""
|
|
20
|
+
Centralized error handler for orchestration operations.
|
|
21
|
+
|
|
22
|
+
This class provides static methods for detecting, extracting, and handling
|
|
23
|
+
errors in a consistent manner across the package. It follows the PyGEAI
|
|
24
|
+
ErrorHandler pattern for uniform error processing.
|
|
25
|
+
|
|
26
|
+
The ErrorHandler can process various response formats and extract error
|
|
27
|
+
information, making it easier to handle errors from different sources
|
|
28
|
+
(agents, tools, external APIs, etc.).
|
|
29
|
+
|
|
30
|
+
Example:
|
|
31
|
+
>>> response = {"error": "Agent timeout", "code": 500}
|
|
32
|
+
>>> if ErrorHandler.has_errors(response):
|
|
33
|
+
... error = ErrorHandler.extract_error(response)
|
|
34
|
+
... ErrorHandler.handle_error(error, context={"operation": "generate"})
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
@staticmethod
|
|
38
|
+
def has_errors(response: Any) -> bool:
|
|
39
|
+
"""
|
|
40
|
+
Check if a response contains error information.
|
|
41
|
+
|
|
42
|
+
Detects errors in various response formats including:
|
|
43
|
+
- Dictionary with 'error' or 'errors' key
|
|
44
|
+
- Dictionary with 'success': False
|
|
45
|
+
- Dictionary with 'status' indicating error
|
|
46
|
+
- Exception objects
|
|
47
|
+
|
|
48
|
+
:param response: Any - The response to check for errors.
|
|
49
|
+
:return: bool - True if errors are detected, False otherwise.
|
|
50
|
+
|
|
51
|
+
Example:
|
|
52
|
+
>>> ErrorHandler.has_errors({"error": "Failed"})
|
|
53
|
+
True
|
|
54
|
+
>>> ErrorHandler.has_errors({"success": True})
|
|
55
|
+
False
|
|
56
|
+
"""
|
|
57
|
+
if response is None:
|
|
58
|
+
return False
|
|
59
|
+
|
|
60
|
+
if isinstance(response, Exception):
|
|
61
|
+
return True
|
|
62
|
+
|
|
63
|
+
if isinstance(response, dict):
|
|
64
|
+
# Check for explicit error keys
|
|
65
|
+
if "error" in response and response["error"]:
|
|
66
|
+
return True
|
|
67
|
+
if "errors" in response and response["errors"]:
|
|
68
|
+
return True
|
|
69
|
+
|
|
70
|
+
# Check for success flag
|
|
71
|
+
if "success" in response and response["success"] is False:
|
|
72
|
+
return True
|
|
73
|
+
|
|
74
|
+
# Check for error status codes
|
|
75
|
+
if "status" in response:
|
|
76
|
+
status = response["status"]
|
|
77
|
+
if isinstance(status, int) and status >= 400:
|
|
78
|
+
return True
|
|
79
|
+
if isinstance(status, str) and status.lower() in ["error", "failed", "failure"]:
|
|
80
|
+
return True
|
|
81
|
+
|
|
82
|
+
# Check for code indicating error
|
|
83
|
+
if "code" in response:
|
|
84
|
+
code = response["code"]
|
|
85
|
+
if isinstance(code, int) and code >= 400:
|
|
86
|
+
return True
|
|
87
|
+
|
|
88
|
+
return False
|
|
89
|
+
|
|
90
|
+
@staticmethod
|
|
91
|
+
def extract_error(response: Any) -> str:
|
|
92
|
+
"""
|
|
93
|
+
Extract error message from a response.
|
|
94
|
+
|
|
95
|
+
Attempts to extract a meaningful error message from various response
|
|
96
|
+
formats. Returns a descriptive error message or a generic message if
|
|
97
|
+
the specific error cannot be determined.
|
|
98
|
+
|
|
99
|
+
:param response: Any - The response containing error information.
|
|
100
|
+
:return: str - The extracted error message.
|
|
101
|
+
|
|
102
|
+
Example:
|
|
103
|
+
>>> response = {"error": "Connection timeout", "code": 504}
|
|
104
|
+
>>> ErrorHandler.extract_error(response)
|
|
105
|
+
'Connection timeout'
|
|
106
|
+
"""
|
|
107
|
+
if response is None:
|
|
108
|
+
return "Unknown error: None response"
|
|
109
|
+
|
|
110
|
+
if isinstance(response, Exception):
|
|
111
|
+
return str(response)
|
|
112
|
+
|
|
113
|
+
if isinstance(response, str):
|
|
114
|
+
return response
|
|
115
|
+
|
|
116
|
+
if isinstance(response, dict):
|
|
117
|
+
# Try to extract from common error keys
|
|
118
|
+
if "error" in response:
|
|
119
|
+
error = response["error"]
|
|
120
|
+
if isinstance(error, dict):
|
|
121
|
+
# Handle nested error object
|
|
122
|
+
return error.get("message", str(error))
|
|
123
|
+
return str(error)
|
|
124
|
+
|
|
125
|
+
if "errors" in response:
|
|
126
|
+
errors = response["errors"]
|
|
127
|
+
if isinstance(errors, list) and errors:
|
|
128
|
+
# Return first error or join multiple
|
|
129
|
+
if len(errors) == 1:
|
|
130
|
+
return str(errors[0])
|
|
131
|
+
return "; ".join(str(e) for e in errors)
|
|
132
|
+
return str(errors)
|
|
133
|
+
|
|
134
|
+
if "message" in response:
|
|
135
|
+
return str(response["message"])
|
|
136
|
+
|
|
137
|
+
# Try to construct message from status/code
|
|
138
|
+
if "status" in response or "code" in response:
|
|
139
|
+
parts = []
|
|
140
|
+
if "status" in response:
|
|
141
|
+
parts.append(f"Status: {response['status']}")
|
|
142
|
+
if "code" in response:
|
|
143
|
+
parts.append(f"Code: {response['code']}")
|
|
144
|
+
if parts:
|
|
145
|
+
return " | ".join(parts)
|
|
146
|
+
|
|
147
|
+
# Last resort: stringify the entire response
|
|
148
|
+
return f"Error in response: {response}"
|
|
149
|
+
|
|
150
|
+
return f"Unknown error: {type(response).__name__}"
|
|
151
|
+
|
|
152
|
+
@staticmethod
|
|
153
|
+
def handle_error(
|
|
154
|
+
error: Any, context: Optional[Dict[str, Any]] = None, log_level: int = logging.ERROR
|
|
155
|
+
) -> str:
|
|
156
|
+
"""
|
|
157
|
+
Handle an error with logging and context.
|
|
158
|
+
|
|
159
|
+
Processes an error by extracting the error message, logging it with
|
|
160
|
+
context, and returning a formatted error message. This provides a
|
|
161
|
+
centralized point for error processing across the package.
|
|
162
|
+
|
|
163
|
+
:param error: Any - The error to handle (Exception, dict, or string).
|
|
164
|
+
:param context: Optional[Dict[str, Any]] - Additional context for the error.
|
|
165
|
+
:param log_level: int - Logging level to use. Defaults to logging.ERROR.
|
|
166
|
+
:return: str - The formatted error message.
|
|
167
|
+
|
|
168
|
+
Example:
|
|
169
|
+
>>> error = {"error": "Pattern failed", "iteration": 5}
|
|
170
|
+
>>> msg = ErrorHandler.handle_error(
|
|
171
|
+
... error,
|
|
172
|
+
... context={"pattern": "reflection", "task": "summarize"},
|
|
173
|
+
... log_level=logging.WARNING
|
|
174
|
+
... )
|
|
175
|
+
"""
|
|
176
|
+
# Extract error message
|
|
177
|
+
error_message = ErrorHandler.extract_error(error)
|
|
178
|
+
|
|
179
|
+
# Build context string
|
|
180
|
+
context_str = ""
|
|
181
|
+
if context:
|
|
182
|
+
context_parts = [f"{k}={v}" for k, v in context.items()]
|
|
183
|
+
context_str = f" [{', '.join(context_parts)}]"
|
|
184
|
+
|
|
185
|
+
# Format full message
|
|
186
|
+
full_message = f"{error_message}{context_str}"
|
|
187
|
+
|
|
188
|
+
# Log the error
|
|
189
|
+
logger.log(log_level, full_message)
|
|
190
|
+
|
|
191
|
+
return full_message
|
|
192
|
+
|
|
193
|
+
@staticmethod
|
|
194
|
+
def wrap_exception(
|
|
195
|
+
error: Exception,
|
|
196
|
+
exception_class: type = OrchestrationError,
|
|
197
|
+
message_prefix: Optional[str] = None,
|
|
198
|
+
**kwargs,
|
|
199
|
+
) -> OrchestrationError:
|
|
200
|
+
"""
|
|
201
|
+
Wrap an exception in a custom orchestration exception.
|
|
202
|
+
|
|
203
|
+
Converts generic exceptions into specific orchestration exceptions
|
|
204
|
+
while preserving the original error message and adding context.
|
|
205
|
+
|
|
206
|
+
:param error: Exception - The original exception to wrap.
|
|
207
|
+
:param exception_class: type - The exception class to wrap with. Defaults to OrchestrationError.
|
|
208
|
+
:param message_prefix: Optional[str] - Prefix to add to error message.
|
|
209
|
+
:param kwargs: Additional keyword arguments for the exception class.
|
|
210
|
+
:return: OrchestrationError - The wrapped exception.
|
|
211
|
+
|
|
212
|
+
Example:
|
|
213
|
+
>>> try:
|
|
214
|
+
... risky_operation()
|
|
215
|
+
... except Exception as e:
|
|
216
|
+
... raise ErrorHandler.wrap_exception(
|
|
217
|
+
... e,
|
|
218
|
+
... PatternExecutionError,
|
|
219
|
+
... message_prefix="Pattern execution failed",
|
|
220
|
+
... pattern_name="reflection",
|
|
221
|
+
... iteration=3
|
|
222
|
+
... )
|
|
223
|
+
"""
|
|
224
|
+
original_message = str(error)
|
|
225
|
+
|
|
226
|
+
if message_prefix:
|
|
227
|
+
message = f"{message_prefix}: {original_message}"
|
|
228
|
+
else:
|
|
229
|
+
message = original_message
|
|
230
|
+
|
|
231
|
+
# Create the wrapped exception
|
|
232
|
+
wrapped = exception_class(message, **kwargs)
|
|
233
|
+
|
|
234
|
+
# Preserve the original exception as context
|
|
235
|
+
wrapped.__cause__ = error
|
|
236
|
+
|
|
237
|
+
return wrapped
|
|
238
|
+
|
|
239
|
+
@staticmethod
|
|
240
|
+
def is_recoverable(error: Any) -> bool:
|
|
241
|
+
"""
|
|
242
|
+
Determine if an error is potentially recoverable.
|
|
243
|
+
|
|
244
|
+
Analyzes an error to determine if the operation could potentially
|
|
245
|
+
succeed if retried. This helps in implementing retry logic.
|
|
246
|
+
|
|
247
|
+
:param error: Any - The error to analyze.
|
|
248
|
+
:return: bool - True if the error might be recoverable, False otherwise.
|
|
249
|
+
|
|
250
|
+
Example:
|
|
251
|
+
>>> error = {"error": "Timeout", "code": 504}
|
|
252
|
+
>>> if ErrorHandler.is_recoverable(error):
|
|
253
|
+
... retry_operation()
|
|
254
|
+
"""
|
|
255
|
+
if isinstance(error, dict):
|
|
256
|
+
# Check status/code for recoverable errors
|
|
257
|
+
code = error.get("code", 0)
|
|
258
|
+
if isinstance(code, int):
|
|
259
|
+
# Timeout and rate limit errors are often recoverable
|
|
260
|
+
if code in [408, 429, 503, 504]:
|
|
261
|
+
return True
|
|
262
|
+
|
|
263
|
+
# Check for specific error types
|
|
264
|
+
error_msg = str(ErrorHandler.extract_error(error)).lower()
|
|
265
|
+
recoverable_keywords = [
|
|
266
|
+
"timeout",
|
|
267
|
+
"rate limit",
|
|
268
|
+
"too many requests",
|
|
269
|
+
"service unavailable",
|
|
270
|
+
"connection",
|
|
271
|
+
"temporary",
|
|
272
|
+
]
|
|
273
|
+
|
|
274
|
+
return any(keyword in error_msg for keyword in recoverable_keywords)
|
|
275
|
+
|
|
276
|
+
if isinstance(error, Exception):
|
|
277
|
+
error_str = str(error).lower()
|
|
278
|
+
recoverable_types = [
|
|
279
|
+
"timeout",
|
|
280
|
+
"connection",
|
|
281
|
+
"temporary",
|
|
282
|
+
]
|
|
283
|
+
return any(err_type in error_str for err_type in recoverable_types)
|
|
284
|
+
|
|
285
|
+
return False
|
|
286
|
+
|
|
287
|
+
@staticmethod
|
|
288
|
+
def format_error_response(error: Any, include_details: bool = True) -> Dict[str, Any]:
|
|
289
|
+
"""
|
|
290
|
+
Format an error as a standardized response dictionary.
|
|
291
|
+
|
|
292
|
+
Converts various error formats into a consistent dictionary structure
|
|
293
|
+
that can be used across the package.
|
|
294
|
+
|
|
295
|
+
:param error: Any - The error to format.
|
|
296
|
+
:param include_details: bool - Whether to include detailed error information.
|
|
297
|
+
:return: Dict[str, Any] - Standardized error response.
|
|
298
|
+
|
|
299
|
+
Example:
|
|
300
|
+
>>> error = ValueError("Invalid input")
|
|
301
|
+
>>> response = ErrorHandler.format_error_response(error)
|
|
302
|
+
>>> print(response)
|
|
303
|
+
{'success': False, 'error': 'Invalid input', 'error_type': 'ValueError'}
|
|
304
|
+
"""
|
|
305
|
+
response = {
|
|
306
|
+
"success": False,
|
|
307
|
+
"error": ErrorHandler.extract_error(error),
|
|
308
|
+
}
|
|
309
|
+
|
|
310
|
+
if include_details:
|
|
311
|
+
if isinstance(error, Exception):
|
|
312
|
+
response["error_type"] = type(error).__name__
|
|
313
|
+
if hasattr(error, "__cause__") and error.__cause__:
|
|
314
|
+
response["original_error"] = str(error.__cause__)
|
|
315
|
+
|
|
316
|
+
if isinstance(error, dict):
|
|
317
|
+
# Include additional fields from error dict
|
|
318
|
+
for key in ["code", "status", "details"]:
|
|
319
|
+
if key in error:
|
|
320
|
+
response[key] = error[key]
|
|
321
|
+
|
|
322
|
+
return response
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
class RetryHandler:
|
|
326
|
+
"""
|
|
327
|
+
Handler for implementing retry logic with error handling.
|
|
328
|
+
|
|
329
|
+
This class provides utilities for retrying operations that may fail
|
|
330
|
+
transiently, with configurable retry strategies and error handling.
|
|
331
|
+
|
|
332
|
+
Example:
|
|
333
|
+
>>> handler = RetryHandler(max_retries=3, delay=1.0)
|
|
334
|
+
>>> result = await handler.execute_with_retry(risky_operation, arg1, arg2)
|
|
335
|
+
"""
|
|
336
|
+
|
|
337
|
+
def __init__(
|
|
338
|
+
self,
|
|
339
|
+
max_retries: int = 3,
|
|
340
|
+
delay: float = 1.0,
|
|
341
|
+
backoff_factor: float = 2.0,
|
|
342
|
+
recoverable_only: bool = True,
|
|
343
|
+
):
|
|
344
|
+
"""
|
|
345
|
+
Initialize the RetryHandler.
|
|
346
|
+
|
|
347
|
+
:param max_retries: int - Maximum number of retry attempts. Defaults to 3.
|
|
348
|
+
:param delay: float - Initial delay between retries in seconds. Defaults to 1.0.
|
|
349
|
+
:param backoff_factor: float - Multiplier for delay on each retry. Defaults to 2.0.
|
|
350
|
+
:param recoverable_only: bool - Only retry recoverable errors. Defaults to True.
|
|
351
|
+
"""
|
|
352
|
+
self.max_retries = max_retries
|
|
353
|
+
self.delay = delay
|
|
354
|
+
self.backoff_factor = backoff_factor
|
|
355
|
+
self.recoverable_only = recoverable_only
|
|
356
|
+
|
|
357
|
+
def should_retry(self, error: Any, attempt: int) -> bool:
|
|
358
|
+
"""
|
|
359
|
+
Determine if an operation should be retried.
|
|
360
|
+
|
|
361
|
+
:param error: Any - The error that occurred.
|
|
362
|
+
:param attempt: int - Current attempt number (0-indexed).
|
|
363
|
+
:return: bool - True if should retry, False otherwise.
|
|
364
|
+
"""
|
|
365
|
+
if attempt >= self.max_retries:
|
|
366
|
+
return False
|
|
367
|
+
|
|
368
|
+
if self.recoverable_only:
|
|
369
|
+
return ErrorHandler.is_recoverable(error)
|
|
370
|
+
|
|
371
|
+
return True
|
|
372
|
+
|
|
373
|
+
def get_delay(self, attempt: int) -> float:
|
|
374
|
+
"""
|
|
375
|
+
Calculate delay before next retry.
|
|
376
|
+
|
|
377
|
+
:param attempt: int - Current attempt number (0-indexed).
|
|
378
|
+
:return: float - Delay in seconds.
|
|
379
|
+
"""
|
|
380
|
+
return self.delay * (self.backoff_factor**attempt)
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from .logging import OrchestrationLogger, get_logger
|
|
2
|
+
from .config import ConfigManager, get_config
|
|
3
|
+
from .validators import (
|
|
4
|
+
ValidationResult,
|
|
5
|
+
validate_agent_config,
|
|
6
|
+
validate_pattern_config,
|
|
7
|
+
validate_tool_config,
|
|
8
|
+
validate_pydantic_model,
|
|
9
|
+
)
|
|
10
|
+
from .cache import CacheEntry, LRUCache, PatternCache
|
|
11
|
+
from .metrics import (
|
|
12
|
+
Metric,
|
|
13
|
+
MetricType,
|
|
14
|
+
MetricsCollector,
|
|
15
|
+
GlobalMetrics,
|
|
16
|
+
TimerContext,
|
|
17
|
+
)
|
|
18
|
+
|
|
19
|
+
__all__ = [
|
|
20
|
+
"OrchestrationLogger",
|
|
21
|
+
"get_logger",
|
|
22
|
+
"ConfigManager",
|
|
23
|
+
"get_config",
|
|
24
|
+
"ValidationResult",
|
|
25
|
+
"validate_agent_config",
|
|
26
|
+
"validate_pattern_config",
|
|
27
|
+
"validate_tool_config",
|
|
28
|
+
"validate_pydantic_model",
|
|
29
|
+
"CacheEntry",
|
|
30
|
+
"LRUCache",
|
|
31
|
+
"PatternCache",
|
|
32
|
+
"Metric",
|
|
33
|
+
"MetricType",
|
|
34
|
+
"MetricsCollector",
|
|
35
|
+
"GlobalMetrics",
|
|
36
|
+
"TimerContext",
|
|
37
|
+
]
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
import hashlib
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
from collections import OrderedDict
|
|
5
|
+
from typing import Any, Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class CacheEntry:
|
|
9
|
+
def __init__(self, value: Any, ttl: Optional[float] = None):
|
|
10
|
+
self.value = value
|
|
11
|
+
self.created_at = time.time()
|
|
12
|
+
self.ttl = ttl
|
|
13
|
+
self.access_count = 0
|
|
14
|
+
self.last_accessed = self.created_at
|
|
15
|
+
|
|
16
|
+
def is_expired(self) -> bool:
|
|
17
|
+
if self.ttl is None:
|
|
18
|
+
return False
|
|
19
|
+
return (time.time() - self.created_at) > self.ttl
|
|
20
|
+
|
|
21
|
+
def access(self) -> Any:
|
|
22
|
+
self.access_count += 1
|
|
23
|
+
self.last_accessed = time.time()
|
|
24
|
+
return self.value
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class LRUCache:
|
|
28
|
+
def __init__(self, max_size: int = 100, default_ttl: Optional[float] = None):
|
|
29
|
+
self.max_size = max_size
|
|
30
|
+
self.default_ttl = default_ttl
|
|
31
|
+
self._cache: OrderedDict[str, CacheEntry] = OrderedDict()
|
|
32
|
+
self._hits = 0
|
|
33
|
+
self._misses = 0
|
|
34
|
+
|
|
35
|
+
def _generate_key(self, key_data: Any) -> str:
|
|
36
|
+
if isinstance(key_data, str):
|
|
37
|
+
return hashlib.sha256(key_data.encode()).hexdigest()
|
|
38
|
+
try:
|
|
39
|
+
serialized = json.dumps(key_data, sort_keys=True)
|
|
40
|
+
return hashlib.sha256(serialized.encode()).hexdigest()
|
|
41
|
+
except (TypeError, ValueError):
|
|
42
|
+
return hashlib.sha256(str(key_data).encode()).hexdigest()
|
|
43
|
+
|
|
44
|
+
def get(self, key: Any) -> Optional[Any]:
|
|
45
|
+
cache_key = self._generate_key(key)
|
|
46
|
+
|
|
47
|
+
if cache_key not in self._cache:
|
|
48
|
+
self._misses += 1
|
|
49
|
+
return None
|
|
50
|
+
|
|
51
|
+
entry = self._cache[cache_key]
|
|
52
|
+
|
|
53
|
+
if entry.is_expired():
|
|
54
|
+
del self._cache[cache_key]
|
|
55
|
+
self._misses += 1
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
self._cache.move_to_end(cache_key)
|
|
59
|
+
self._hits += 1
|
|
60
|
+
return entry.access()
|
|
61
|
+
|
|
62
|
+
def set(self, key: Any, value: Any, ttl: Optional[float] = None) -> None:
|
|
63
|
+
cache_key = self._generate_key(key)
|
|
64
|
+
|
|
65
|
+
if len(self._cache) >= self.max_size and cache_key not in self._cache:
|
|
66
|
+
self._cache.popitem(last=False)
|
|
67
|
+
|
|
68
|
+
effective_ttl = ttl if ttl is not None else self.default_ttl
|
|
69
|
+
self._cache[cache_key] = CacheEntry(value, effective_ttl)
|
|
70
|
+
self._cache.move_to_end(cache_key)
|
|
71
|
+
|
|
72
|
+
def invalidate(self, key: Any) -> bool:
|
|
73
|
+
cache_key = self._generate_key(key)
|
|
74
|
+
if cache_key in self._cache:
|
|
75
|
+
del self._cache[cache_key]
|
|
76
|
+
return True
|
|
77
|
+
return False
|
|
78
|
+
|
|
79
|
+
def clear(self) -> None:
|
|
80
|
+
self._cache.clear()
|
|
81
|
+
self._hits = 0
|
|
82
|
+
self._misses = 0
|
|
83
|
+
|
|
84
|
+
def size(self) -> int:
|
|
85
|
+
self._cleanup_expired()
|
|
86
|
+
return len(self._cache)
|
|
87
|
+
|
|
88
|
+
def _cleanup_expired(self) -> None:
|
|
89
|
+
expired_keys = [
|
|
90
|
+
key for key, entry in self._cache.items() if entry.is_expired()
|
|
91
|
+
]
|
|
92
|
+
for key in expired_keys:
|
|
93
|
+
del self._cache[key]
|
|
94
|
+
|
|
95
|
+
def get_stats(self) -> dict[str, Any]:
|
|
96
|
+
self._cleanup_expired()
|
|
97
|
+
total_requests = self._hits + self._misses
|
|
98
|
+
hit_rate = (self._hits / total_requests * 100) if total_requests > 0 else 0
|
|
99
|
+
|
|
100
|
+
return {
|
|
101
|
+
"size": len(self._cache),
|
|
102
|
+
"max_size": self.max_size,
|
|
103
|
+
"hits": self._hits,
|
|
104
|
+
"misses": self._misses,
|
|
105
|
+
"hit_rate": hit_rate,
|
|
106
|
+
"total_requests": total_requests,
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class PatternCache:
|
|
111
|
+
_instance: Optional["PatternCache"] = None
|
|
112
|
+
_cache: Optional[LRUCache] = None
|
|
113
|
+
|
|
114
|
+
def __new__(cls):
|
|
115
|
+
if cls._instance is None:
|
|
116
|
+
cls._instance = super().__new__(cls)
|
|
117
|
+
cls._cache = None
|
|
118
|
+
return cls._instance
|
|
119
|
+
|
|
120
|
+
def initialize(
|
|
121
|
+
self, max_size: int = 100, default_ttl: Optional[float] = 3600.0
|
|
122
|
+
) -> None:
|
|
123
|
+
if self._cache is None:
|
|
124
|
+
self._cache = LRUCache(max_size=max_size, default_ttl=default_ttl)
|
|
125
|
+
|
|
126
|
+
def get_cache(self) -> LRUCache:
|
|
127
|
+
if self._cache is None:
|
|
128
|
+
self.initialize()
|
|
129
|
+
return self._cache
|
|
130
|
+
|
|
131
|
+
def clear(self) -> None:
|
|
132
|
+
if self._cache is not None:
|
|
133
|
+
self._cache.clear()
|
|
134
|
+
|
|
135
|
+
@classmethod
|
|
136
|
+
def reset(cls) -> None:
|
|
137
|
+
cls._instance = None
|
|
138
|
+
cls._cache = None
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
from typing import Any, Dict, Optional
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class ConfigManager:
|
|
7
|
+
_instance: Optional["ConfigManager"] = None
|
|
8
|
+
|
|
9
|
+
def __new__(cls):
|
|
10
|
+
if cls._instance is None:
|
|
11
|
+
cls._instance = super().__new__(cls)
|
|
12
|
+
return cls._instance
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
if not hasattr(self, "_initialized"):
|
|
16
|
+
self._config: Dict[str, Any] = {}
|
|
17
|
+
self._config_path: Optional[Path] = None
|
|
18
|
+
self._load_defaults()
|
|
19
|
+
self._initialized = True
|
|
20
|
+
|
|
21
|
+
def _load_defaults(self) -> None:
|
|
22
|
+
self._config = {
|
|
23
|
+
"orchestration": {
|
|
24
|
+
"max_iterations": 10,
|
|
25
|
+
"timeout": 300.0,
|
|
26
|
+
"verbose": False,
|
|
27
|
+
"retry_attempts": 3,
|
|
28
|
+
},
|
|
29
|
+
"agent": {
|
|
30
|
+
"temperature": 0.7,
|
|
31
|
+
"max_tokens": 4096,
|
|
32
|
+
},
|
|
33
|
+
"logging": {
|
|
34
|
+
"level": "info",
|
|
35
|
+
},
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
def load_from_file(self, config_path: str) -> None:
|
|
39
|
+
path = Path(config_path)
|
|
40
|
+
if not path.exists():
|
|
41
|
+
raise FileNotFoundError(f"Config file not found: {config_path}")
|
|
42
|
+
|
|
43
|
+
with open(path, "r") as f:
|
|
44
|
+
file_config = json.load(f)
|
|
45
|
+
|
|
46
|
+
self._merge_config(file_config)
|
|
47
|
+
self._config_path = path
|
|
48
|
+
|
|
49
|
+
def _merge_config(self, new_config: Dict[str, Any]) -> None:
|
|
50
|
+
for key, value in new_config.items():
|
|
51
|
+
if (
|
|
52
|
+
key in self._config
|
|
53
|
+
and isinstance(self._config[key], dict)
|
|
54
|
+
and isinstance(value, dict)
|
|
55
|
+
):
|
|
56
|
+
self._config[key].update(value)
|
|
57
|
+
else:
|
|
58
|
+
self._config[key] = value
|
|
59
|
+
|
|
60
|
+
def get(self, key: str, default: Any = None) -> Any:
|
|
61
|
+
keys = key.split(".")
|
|
62
|
+
value = self._config
|
|
63
|
+
for k in keys:
|
|
64
|
+
if isinstance(value, dict):
|
|
65
|
+
value = value.get(k)
|
|
66
|
+
if value is None:
|
|
67
|
+
return default
|
|
68
|
+
else:
|
|
69
|
+
return default
|
|
70
|
+
return value
|
|
71
|
+
|
|
72
|
+
def set(self, key: str, value: Any) -> None:
|
|
73
|
+
keys = key.split(".")
|
|
74
|
+
config = self._config
|
|
75
|
+
for k in keys[:-1]:
|
|
76
|
+
if k not in config:
|
|
77
|
+
config[k] = {}
|
|
78
|
+
config = config[k]
|
|
79
|
+
config[keys[-1]] = value
|
|
80
|
+
|
|
81
|
+
def get_all(self) -> Dict[str, Any]:
|
|
82
|
+
return self._config.copy()
|
|
83
|
+
|
|
84
|
+
def save_to_file(self, config_path: Optional[str] = None) -> None:
|
|
85
|
+
path = Path(config_path) if config_path else self._config_path
|
|
86
|
+
if path is None:
|
|
87
|
+
raise ValueError("No config path specified")
|
|
88
|
+
|
|
89
|
+
with open(path, "w") as f:
|
|
90
|
+
json.dump(self._config, f, indent=2)
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
def get_config() -> ConfigManager:
|
|
94
|
+
return ConfigManager()
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
import logging
|
|
2
|
+
import sys
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class OrchestrationLogger:
|
|
7
|
+
_instance: Optional["OrchestrationLogger"] = None
|
|
8
|
+
|
|
9
|
+
def __new__(cls):
|
|
10
|
+
if cls._instance is None:
|
|
11
|
+
cls._instance = super().__new__(cls)
|
|
12
|
+
return cls._instance
|
|
13
|
+
|
|
14
|
+
def __init__(self):
|
|
15
|
+
if not hasattr(self, "_initialized"):
|
|
16
|
+
self._logger = logging.getLogger("pygeai_orchestration")
|
|
17
|
+
self._logger.setLevel(logging.INFO)
|
|
18
|
+
|
|
19
|
+
if not self._logger.handlers:
|
|
20
|
+
handler = logging.StreamHandler(sys.stdout)
|
|
21
|
+
formatter = logging.Formatter(
|
|
22
|
+
"%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
|
23
|
+
datefmt="%Y-%m-%d %H:%M:%S",
|
|
24
|
+
)
|
|
25
|
+
handler.setFormatter(formatter)
|
|
26
|
+
self._logger.addHandler(handler)
|
|
27
|
+
|
|
28
|
+
self._initialized = True
|
|
29
|
+
|
|
30
|
+
def set_level(self, level: str) -> None:
|
|
31
|
+
level_map = {
|
|
32
|
+
"debug": logging.DEBUG,
|
|
33
|
+
"info": logging.INFO,
|
|
34
|
+
"warning": logging.WARNING,
|
|
35
|
+
"error": logging.ERROR,
|
|
36
|
+
"critical": logging.CRITICAL,
|
|
37
|
+
}
|
|
38
|
+
self._logger.setLevel(level_map.get(level.lower(), logging.INFO))
|
|
39
|
+
|
|
40
|
+
def debug(self, message: str, **kwargs) -> None:
|
|
41
|
+
self._logger.debug(message, **kwargs)
|
|
42
|
+
|
|
43
|
+
def info(self, message: str, **kwargs) -> None:
|
|
44
|
+
self._logger.info(message, **kwargs)
|
|
45
|
+
|
|
46
|
+
def warning(self, message: str, **kwargs) -> None:
|
|
47
|
+
self._logger.warning(message, **kwargs)
|
|
48
|
+
|
|
49
|
+
def error(self, message: str, **kwargs) -> None:
|
|
50
|
+
self._logger.error(message, **kwargs)
|
|
51
|
+
|
|
52
|
+
def critical(self, message: str, **kwargs) -> None:
|
|
53
|
+
self._logger.critical(message, **kwargs)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
def get_logger() -> OrchestrationLogger:
|
|
57
|
+
return OrchestrationLogger()
|