aiecs 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +75 -0
- aiecs/__main__.py +41 -0
- aiecs/aiecs_client.py +295 -0
- aiecs/application/__init__.py +10 -0
- aiecs/application/executors/__init__.py +10 -0
- aiecs/application/executors/operation_executor.py +341 -0
- aiecs/config/__init__.py +15 -0
- aiecs/config/config.py +117 -0
- aiecs/config/registry.py +19 -0
- aiecs/core/__init__.py +46 -0
- aiecs/core/interface/__init__.py +34 -0
- aiecs/core/interface/execution_interface.py +150 -0
- aiecs/core/interface/storage_interface.py +214 -0
- aiecs/domain/__init__.py +20 -0
- aiecs/domain/context/__init__.py +28 -0
- aiecs/domain/context/content_engine.py +982 -0
- aiecs/domain/context/conversation_models.py +306 -0
- aiecs/domain/execution/__init__.py +12 -0
- aiecs/domain/execution/model.py +49 -0
- aiecs/domain/task/__init__.py +13 -0
- aiecs/domain/task/dsl_processor.py +460 -0
- aiecs/domain/task/model.py +50 -0
- aiecs/domain/task/task_context.py +257 -0
- aiecs/infrastructure/__init__.py +26 -0
- aiecs/infrastructure/messaging/__init__.py +13 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +341 -0
- aiecs/infrastructure/messaging/websocket_manager.py +289 -0
- aiecs/infrastructure/monitoring/__init__.py +12 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +138 -0
- aiecs/infrastructure/monitoring/structured_logger.py +50 -0
- aiecs/infrastructure/monitoring/tracing_manager.py +376 -0
- aiecs/infrastructure/persistence/__init__.py +12 -0
- aiecs/infrastructure/persistence/database_manager.py +286 -0
- aiecs/infrastructure/persistence/file_storage.py +671 -0
- aiecs/infrastructure/persistence/redis_client.py +162 -0
- aiecs/llm/__init__.py +54 -0
- aiecs/llm/base_client.py +99 -0
- aiecs/llm/client_factory.py +339 -0
- aiecs/llm/custom_callbacks.py +228 -0
- aiecs/llm/openai_client.py +125 -0
- aiecs/llm/vertex_client.py +186 -0
- aiecs/llm/xai_client.py +184 -0
- aiecs/main.py +351 -0
- aiecs/scripts/DEPENDENCY_SYSTEM_SUMMARY.md +241 -0
- aiecs/scripts/README_DEPENDENCY_CHECKER.md +309 -0
- aiecs/scripts/README_WEASEL_PATCH.md +126 -0
- aiecs/scripts/__init__.py +3 -0
- aiecs/scripts/dependency_checker.py +825 -0
- aiecs/scripts/dependency_fixer.py +348 -0
- aiecs/scripts/download_nlp_data.py +348 -0
- aiecs/scripts/fix_weasel_validator.py +121 -0
- aiecs/scripts/fix_weasel_validator.sh +82 -0
- aiecs/scripts/patch_weasel_library.sh +188 -0
- aiecs/scripts/quick_dependency_check.py +269 -0
- aiecs/scripts/run_weasel_patch.sh +41 -0
- aiecs/scripts/setup_nlp_data.sh +217 -0
- aiecs/tasks/__init__.py +2 -0
- aiecs/tasks/worker.py +111 -0
- aiecs/tools/__init__.py +196 -0
- aiecs/tools/base_tool.py +202 -0
- aiecs/tools/langchain_adapter.py +361 -0
- aiecs/tools/task_tools/__init__.py +82 -0
- aiecs/tools/task_tools/chart_tool.py +704 -0
- aiecs/tools/task_tools/classfire_tool.py +901 -0
- aiecs/tools/task_tools/image_tool.py +397 -0
- aiecs/tools/task_tools/office_tool.py +600 -0
- aiecs/tools/task_tools/pandas_tool.py +565 -0
- aiecs/tools/task_tools/report_tool.py +499 -0
- aiecs/tools/task_tools/research_tool.py +363 -0
- aiecs/tools/task_tools/scraper_tool.py +548 -0
- aiecs/tools/task_tools/search_api.py +7 -0
- aiecs/tools/task_tools/stats_tool.py +513 -0
- aiecs/tools/temp_file_manager.py +126 -0
- aiecs/tools/tool_executor/__init__.py +35 -0
- aiecs/tools/tool_executor/tool_executor.py +518 -0
- aiecs/utils/LLM_output_structor.py +409 -0
- aiecs/utils/__init__.py +23 -0
- aiecs/utils/base_callback.py +50 -0
- aiecs/utils/execution_utils.py +158 -0
- aiecs/utils/logging.py +1 -0
- aiecs/utils/prompt_loader.py +13 -0
- aiecs/utils/token_usage_repository.py +279 -0
- aiecs/ws/__init__.py +0 -0
- aiecs/ws/socket_server.py +41 -0
- aiecs-1.0.0.dist-info/METADATA +610 -0
- aiecs-1.0.0.dist-info/RECORD +90 -0
- aiecs-1.0.0.dist-info/WHEEL +5 -0
- aiecs-1.0.0.dist-info/entry_points.txt +7 -0
- aiecs-1.0.0.dist-info/licenses/LICENSE +225 -0
- aiecs-1.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,518 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
import functools
|
|
4
|
+
import hashlib
|
|
5
|
+
import inspect
|
|
6
|
+
import json
|
|
7
|
+
import logging
|
|
8
|
+
import threading
|
|
9
|
+
import time
|
|
10
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
11
|
+
from typing import Any, Callable, Dict, List, Optional, Type, Union, get_type_hints
|
|
12
|
+
from contextlib import contextmanager
|
|
13
|
+
|
|
14
|
+
from cachetools import LRUCache
|
|
15
|
+
from aiecs.utils.execution_utils import ExecutionUtils
|
|
16
|
+
import re
|
|
17
|
+
from pydantic import BaseModel, ValidationError, ConfigDict
|
|
18
|
+
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
# Base exception hierarchy
|
|
22
|
+
class ToolExecutionError(Exception):
|
|
23
|
+
"""Base exception for all tool execution errors."""
|
|
24
|
+
pass
|
|
25
|
+
|
|
26
|
+
class InputValidationError(ToolExecutionError):
|
|
27
|
+
"""Error in validating input parameters."""
|
|
28
|
+
pass
|
|
29
|
+
|
|
30
|
+
class SecurityError(ToolExecutionError):
|
|
31
|
+
"""Security-related error."""
|
|
32
|
+
pass
|
|
33
|
+
|
|
34
|
+
class OperationError(ToolExecutionError):
|
|
35
|
+
"""Error during operation execution."""
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
class TimeoutError(ToolExecutionError):
|
|
39
|
+
"""Operation timed out."""
|
|
40
|
+
pass
|
|
41
|
+
|
|
42
|
+
# Configuration for the executor
|
|
43
|
+
class ExecutorConfig(BaseModel):
|
|
44
|
+
"""
|
|
45
|
+
Configuration for the ToolExecutor.
|
|
46
|
+
|
|
47
|
+
Attributes:
|
|
48
|
+
enable_cache (bool): Enable caching of operation results.
|
|
49
|
+
cache_size (int): Maximum number of cache entries.
|
|
50
|
+
cache_ttl (int): Cache time-to-live in seconds.
|
|
51
|
+
max_workers (int): Maximum number of thread pool workers.
|
|
52
|
+
io_concurrency (int): Maximum concurrent I/O operations.
|
|
53
|
+
chunk_size (int): Chunk size for processing large data.
|
|
54
|
+
max_file_size (int): Maximum file size in bytes.
|
|
55
|
+
log_level (str): Logging level (e.g., 'INFO', 'DEBUG').
|
|
56
|
+
log_execution_time (bool): Log execution time for operations.
|
|
57
|
+
enable_security_checks (bool): Enable security checks for inputs.
|
|
58
|
+
retry_attempts (int): Number of retry attempts for transient errors.
|
|
59
|
+
retry_backoff (float): Backoff factor for retries.
|
|
60
|
+
timeout (int): Timeout for operations in seconds.
|
|
61
|
+
"""
|
|
62
|
+
enable_cache: bool = True
|
|
63
|
+
cache_size: int = 100
|
|
64
|
+
cache_ttl: int = 3600
|
|
65
|
+
max_workers: int = 4
|
|
66
|
+
io_concurrency: int = 8
|
|
67
|
+
chunk_size: int = 10000
|
|
68
|
+
max_file_size: int = 1000000
|
|
69
|
+
log_level: str = "INFO"
|
|
70
|
+
log_execution_time: bool = True
|
|
71
|
+
enable_security_checks: bool = True
|
|
72
|
+
retry_attempts: int = 3
|
|
73
|
+
retry_backoff: float = 1.0
|
|
74
|
+
timeout: int = 30
|
|
75
|
+
|
|
76
|
+
model_config = ConfigDict(env_prefix="TOOL_EXECUTOR_")
|
|
77
|
+
|
|
78
|
+
# Metrics counter
|
|
79
|
+
class ExecutorMetrics:
|
|
80
|
+
"""
|
|
81
|
+
Tracks executor performance metrics.
|
|
82
|
+
"""
|
|
83
|
+
def __init__(self):
|
|
84
|
+
self.requests: int = 0
|
|
85
|
+
self.failures: int = 0
|
|
86
|
+
self.cache_hits: int = 0
|
|
87
|
+
self.processing_times: List[float] = []
|
|
88
|
+
|
|
89
|
+
def record_request(self, processing_time: float):
|
|
90
|
+
self.requests += 1
|
|
91
|
+
self.processing_times.append(processing_time)
|
|
92
|
+
|
|
93
|
+
def record_failure(self):
|
|
94
|
+
self.failures += 1
|
|
95
|
+
|
|
96
|
+
def record_cache_hit(self):
|
|
97
|
+
self.cache_hits += 1
|
|
98
|
+
|
|
99
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
100
|
+
return {
|
|
101
|
+
'requests': self.requests,
|
|
102
|
+
'failures': self.failures,
|
|
103
|
+
'cache_hits': self.cache_hits,
|
|
104
|
+
'avg_processing_time': sum(self.processing_times) / len(self.processing_times) if self.processing_times else 0.0
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
# Decorators for tool methods
|
|
108
|
+
def validate_input(schema_class: Type[BaseModel]) -> Callable:
|
|
109
|
+
"""
|
|
110
|
+
Decorator to validate input using a Pydantic schema.
|
|
111
|
+
|
|
112
|
+
Args:
|
|
113
|
+
schema_class (Type[BaseModel]): Pydantic schema class for validation.
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
Callable: Decorated function with validated inputs.
|
|
117
|
+
|
|
118
|
+
Raises:
|
|
119
|
+
InputValidationError: If input validation fails.
|
|
120
|
+
"""
|
|
121
|
+
def decorator(func: Callable) -> Callable:
|
|
122
|
+
@functools.wraps(func)
|
|
123
|
+
def wrapper(self, *args, **kwargs):
|
|
124
|
+
try:
|
|
125
|
+
schema = schema_class(**kwargs)
|
|
126
|
+
validated_kwargs = schema.model_dump(exclude_unset=True)
|
|
127
|
+
return func(self, **validated_kwargs)
|
|
128
|
+
except ValidationError as e:
|
|
129
|
+
raise InputValidationError(f"Invalid input parameters: {e}")
|
|
130
|
+
return wrapper
|
|
131
|
+
return decorator
|
|
132
|
+
|
|
133
|
+
def cache_result(ttl: Optional[int] = None) -> Callable:
|
|
134
|
+
"""
|
|
135
|
+
Decorator to cache function results with optional TTL.
|
|
136
|
+
|
|
137
|
+
Args:
|
|
138
|
+
ttl (Optional[int]): Time-to-live for cache entry in seconds.
|
|
139
|
+
|
|
140
|
+
Returns:
|
|
141
|
+
Callable: Decorated function with caching.
|
|
142
|
+
"""
|
|
143
|
+
def decorator(func: Callable) -> Callable:
|
|
144
|
+
@functools.wraps(func)
|
|
145
|
+
def wrapper(self, *args, **kwargs):
|
|
146
|
+
if not hasattr(self, '_executor') or not self._executor.config.enable_cache:
|
|
147
|
+
return func(self, *args, **kwargs)
|
|
148
|
+
cache_key = self._executor._get_cache_key(func.__name__, args, kwargs)
|
|
149
|
+
result = self._executor._get_from_cache(cache_key)
|
|
150
|
+
if result is not None:
|
|
151
|
+
logger.debug(f"Cache hit for {func.__name__}")
|
|
152
|
+
self._executor._metrics.record_cache_hit()
|
|
153
|
+
return result
|
|
154
|
+
result = func(self, *args, **kwargs)
|
|
155
|
+
self._executor._add_to_cache(cache_key, result, ttl)
|
|
156
|
+
return result
|
|
157
|
+
return wrapper
|
|
158
|
+
return decorator
|
|
159
|
+
|
|
160
|
+
def run_in_executor(func: Callable) -> Callable:
|
|
161
|
+
"""
|
|
162
|
+
Decorator to run a synchronous function in the thread pool executor.
|
|
163
|
+
|
|
164
|
+
Args:
|
|
165
|
+
func (Callable): Function to execute.
|
|
166
|
+
|
|
167
|
+
Returns:
|
|
168
|
+
Callable: Async wrapper for the function.
|
|
169
|
+
"""
|
|
170
|
+
@functools.wraps(func)
|
|
171
|
+
async def wrapper(self, *args, **kwargs):
|
|
172
|
+
if not hasattr(self, '_executor'):
|
|
173
|
+
return await func(self, *args, **kwargs)
|
|
174
|
+
loop = asyncio.get_event_loop()
|
|
175
|
+
return await loop.run_in_executor(
|
|
176
|
+
self._executor._thread_pool,
|
|
177
|
+
functools.partial(func, self, *args, **kwargs)
|
|
178
|
+
)
|
|
179
|
+
return wrapper
|
|
180
|
+
|
|
181
|
+
def measure_execution_time(func: Callable) -> Callable:
|
|
182
|
+
"""
|
|
183
|
+
Decorator to measure and log execution time.
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
func (Callable): Function to measure.
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
Callable: Decorated function with timing.
|
|
190
|
+
"""
|
|
191
|
+
@functools.wraps(func)
|
|
192
|
+
def wrapper(self, *args, **kwargs):
|
|
193
|
+
if not hasattr(self, '_executor') or not self._executor.config.log_execution_time:
|
|
194
|
+
return func(self, *args, **kwargs)
|
|
195
|
+
start_time = time.time()
|
|
196
|
+
try:
|
|
197
|
+
result = func(self, *args, **kwargs)
|
|
198
|
+
execution_time = time.time() - start_time
|
|
199
|
+
logger.info(f"{func.__name__} executed in {execution_time:.4f} seconds")
|
|
200
|
+
return result
|
|
201
|
+
except Exception as e:
|
|
202
|
+
execution_time = time.time() - start_time
|
|
203
|
+
logger.error(f"{func.__name__} failed after {execution_time:.4f} seconds: {e}")
|
|
204
|
+
raise
|
|
205
|
+
return wrapper
|
|
206
|
+
|
|
207
|
+
def sanitize_input(func: Callable) -> Callable:
|
|
208
|
+
"""
|
|
209
|
+
Decorator to sanitize input parameters for security.
|
|
210
|
+
|
|
211
|
+
Args:
|
|
212
|
+
func (Callable): Function to sanitize inputs for.
|
|
213
|
+
|
|
214
|
+
Returns:
|
|
215
|
+
Callable: Decorated function with sanitized inputs.
|
|
216
|
+
"""
|
|
217
|
+
@functools.wraps(func)
|
|
218
|
+
def wrapper(self, *args, **kwargs):
|
|
219
|
+
if not hasattr(self, '_executor') or not self._executor.config.enable_security_checks:
|
|
220
|
+
return func(self, *args, **kwargs)
|
|
221
|
+
sanitized_kwargs = {}
|
|
222
|
+
for k, v in kwargs.items():
|
|
223
|
+
if isinstance(v, str) and re.search(r'(\bSELECT\b|\bINSERT\b|--|;|/\*)', v, re.IGNORECASE):
|
|
224
|
+
raise SecurityError(f"Input parameter '{k}' contains potentially malicious content")
|
|
225
|
+
sanitized_kwargs[k] = v
|
|
226
|
+
return func(self, *args, **sanitized_kwargs)
|
|
227
|
+
return wrapper
|
|
228
|
+
|
|
229
|
+
class ToolExecutor:
|
|
230
|
+
"""
|
|
231
|
+
Centralized executor for tool operations, handling:
|
|
232
|
+
- Input validation
|
|
233
|
+
- Caching with TTL and content-based keys
|
|
234
|
+
- Concurrency with dynamic thread pool
|
|
235
|
+
- Error handling with retries
|
|
236
|
+
- Performance optimization with metrics
|
|
237
|
+
- Structured logging
|
|
238
|
+
|
|
239
|
+
Example:
|
|
240
|
+
executor = ToolExecutor(config={'max_workers': 8})
|
|
241
|
+
result = executor.execute(tool_instance, 'operation_name', param1='value')
|
|
242
|
+
"""
|
|
243
|
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
|
244
|
+
"""
|
|
245
|
+
Initialize the executor with optional configuration.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
config (Dict[str, Any], optional): Configuration overrides for ExecutorConfig.
|
|
249
|
+
|
|
250
|
+
Raises:
|
|
251
|
+
ValueError: If config is invalid.
|
|
252
|
+
"""
|
|
253
|
+
self.config = ExecutorConfig(**(config or {}))
|
|
254
|
+
logging.basicConfig(
|
|
255
|
+
level=getattr(logging, self.config.log_level),
|
|
256
|
+
format='%(asctime)s %(levelname)s %(name)s: %(message)s'
|
|
257
|
+
)
|
|
258
|
+
self._thread_pool = ThreadPoolExecutor(max_workers=max(os.cpu_count() or 4, self.config.max_workers))
|
|
259
|
+
self._locks: Dict[str, threading.Lock] = {}
|
|
260
|
+
self._metrics = ExecutorMetrics()
|
|
261
|
+
self.execution_utils = ExecutionUtils(
|
|
262
|
+
cache_size=self.config.cache_size,
|
|
263
|
+
cache_ttl=self.config.cache_ttl,
|
|
264
|
+
retry_attempts=self.config.retry_attempts,
|
|
265
|
+
retry_backoff=self.config.retry_backoff
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
def _get_cache_key(self, func_name: str, args: tuple, kwargs: Dict[str, Any]) -> str:
|
|
269
|
+
"""
|
|
270
|
+
Generate a context-aware cache key from function name, user ID, task ID, and arguments.
|
|
271
|
+
|
|
272
|
+
Args:
|
|
273
|
+
func_name (str): Name of the function.
|
|
274
|
+
args (tuple): Positional arguments.
|
|
275
|
+
kwargs (Dict[str, Any]): Keyword arguments.
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
str: Cache key.
|
|
279
|
+
"""
|
|
280
|
+
user_id = kwargs.get("user_id", "anonymous")
|
|
281
|
+
task_id = kwargs.get("task_id", "none")
|
|
282
|
+
return self.execution_utils.generate_cache_key(func_name, user_id, task_id, args, kwargs)
|
|
283
|
+
|
|
284
|
+
def _get_from_cache(self, cache_key: str) -> Optional[Any]:
|
|
285
|
+
"""
|
|
286
|
+
Get a result from cache if it exists and is not expired.
|
|
287
|
+
|
|
288
|
+
Args:
|
|
289
|
+
cache_key (str): Cache key.
|
|
290
|
+
|
|
291
|
+
Returns:
|
|
292
|
+
Optional[Any]: Cached result or None.
|
|
293
|
+
"""
|
|
294
|
+
if not self.config.enable_cache:
|
|
295
|
+
return None
|
|
296
|
+
return self.execution_utils.get_from_cache(cache_key)
|
|
297
|
+
|
|
298
|
+
def _add_to_cache(self, cache_key: str, result: Any, ttl: Optional[int] = None) -> None:
|
|
299
|
+
"""
|
|
300
|
+
Add a result to the cache with optional TTL.
|
|
301
|
+
|
|
302
|
+
Args:
|
|
303
|
+
cache_key (str): Cache key.
|
|
304
|
+
result (Any): Result to cache.
|
|
305
|
+
ttl (Optional[int]): Time-to-live in seconds.
|
|
306
|
+
"""
|
|
307
|
+
if not self.config.enable_cache:
|
|
308
|
+
return
|
|
309
|
+
self.execution_utils.add_to_cache(cache_key, result, ttl)
|
|
310
|
+
|
|
311
|
+
def get_lock(self, resource_id: str) -> threading.Lock:
|
|
312
|
+
"""
|
|
313
|
+
Get or create a lock for a specific resource.
|
|
314
|
+
|
|
315
|
+
Args:
|
|
316
|
+
resource_id (str): Resource identifier.
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
threading.Lock: Lock for the resource.
|
|
320
|
+
"""
|
|
321
|
+
if resource_id not in self._locks:
|
|
322
|
+
self._locks[resource_id] = threading.Lock()
|
|
323
|
+
return self._locks[resource_id]
|
|
324
|
+
|
|
325
|
+
def get_metrics(self) -> Dict[str, Any]:
|
|
326
|
+
"""
|
|
327
|
+
Get current executor metrics.
|
|
328
|
+
|
|
329
|
+
Returns:
|
|
330
|
+
Dict[str, Any]: Metrics including request count, failures, cache hits, and average processing time.
|
|
331
|
+
"""
|
|
332
|
+
return self._metrics.to_dict()
|
|
333
|
+
|
|
334
|
+
@contextmanager
|
|
335
|
+
def timeout_context(self, seconds: int):
|
|
336
|
+
"""
|
|
337
|
+
Context manager for enforcing operation timeouts.
|
|
338
|
+
|
|
339
|
+
Args:
|
|
340
|
+
seconds (int): Timeout duration in seconds.
|
|
341
|
+
|
|
342
|
+
Raises:
|
|
343
|
+
TimeoutError: If operation exceeds timeout.
|
|
344
|
+
"""
|
|
345
|
+
return self.execution_utils.timeout_context(seconds)
|
|
346
|
+
|
|
347
|
+
async def _retry_operation(self, func: Callable, *args, **kwargs) -> Any:
|
|
348
|
+
"""
|
|
349
|
+
Execute an operation with retries for transient errors.
|
|
350
|
+
|
|
351
|
+
Args:
|
|
352
|
+
func (Callable): Function to execute.
|
|
353
|
+
*args: Positional arguments.
|
|
354
|
+
**kwargs: Keyword arguments.
|
|
355
|
+
|
|
356
|
+
Returns:
|
|
357
|
+
Any: Result of the operation.
|
|
358
|
+
|
|
359
|
+
Raises:
|
|
360
|
+
OperationError: If all retries fail.
|
|
361
|
+
"""
|
|
362
|
+
return await self.execution_utils.execute_with_retry_and_timeout(func, self.config.timeout, *args, **kwargs)
|
|
363
|
+
|
|
364
|
+
def execute(self, tool_instance: Any, operation: str, **kwargs) -> Any:
|
|
365
|
+
"""
|
|
366
|
+
Execute a synchronous tool operation with parameters.
|
|
367
|
+
|
|
368
|
+
Args:
|
|
369
|
+
tool_instance (Any): The tool instance to execute the operation on.
|
|
370
|
+
operation (str): The name of the operation to execute.
|
|
371
|
+
**kwargs: The parameters to pass to the operation.
|
|
372
|
+
|
|
373
|
+
Returns:
|
|
374
|
+
Any: The result of the operation.
|
|
375
|
+
|
|
376
|
+
Raises:
|
|
377
|
+
ToolExecutionError: If the operation fails.
|
|
378
|
+
InputValidationError: If input parameters are invalid.
|
|
379
|
+
SecurityError: If inputs contain malicious content.
|
|
380
|
+
"""
|
|
381
|
+
method = getattr(tool_instance, operation, None)
|
|
382
|
+
if not method or not callable(method) or operation.startswith('_'):
|
|
383
|
+
available_ops = [m for m in dir(tool_instance) if not m.startswith('_') and callable(getattr(tool_instance, m))]
|
|
384
|
+
raise ToolExecutionError(f"Unsupported operation: {operation}. Available operations: {', '.join(available_ops)}")
|
|
385
|
+
logger.info(f"Executing {tool_instance.__class__.__name__}.{operation} with params: {kwargs}")
|
|
386
|
+
start_time = time.time()
|
|
387
|
+
try:
|
|
388
|
+
# Sanitize inputs
|
|
389
|
+
if self.config.enable_security_checks:
|
|
390
|
+
for k, v in kwargs.items():
|
|
391
|
+
if isinstance(v, str) and re.search(r'(\bSELECT\b|\bINSERT\b|--|;|/\*)', v, re.IGNORECASE):
|
|
392
|
+
raise SecurityError(f"Input parameter '{k}' contains potentially malicious content")
|
|
393
|
+
# Use cache if enabled
|
|
394
|
+
if self.config.enable_cache:
|
|
395
|
+
cache_key = self._get_cache_key(operation, (), kwargs)
|
|
396
|
+
cached_result = self._get_from_cache(cache_key)
|
|
397
|
+
if cached_result is not None:
|
|
398
|
+
self._metrics.record_cache_hit()
|
|
399
|
+
logger.debug(f"Cache hit for {operation}")
|
|
400
|
+
return cached_result
|
|
401
|
+
|
|
402
|
+
result = method(**kwargs)
|
|
403
|
+
self._metrics.record_request(time.time() - start_time)
|
|
404
|
+
if self.config.log_execution_time:
|
|
405
|
+
logger.info(f"{tool_instance.__class__.__name__}.{operation} executed in {time.time() - start_time:.4f} seconds")
|
|
406
|
+
|
|
407
|
+
# Cache result if enabled
|
|
408
|
+
if self.config.enable_cache:
|
|
409
|
+
self._add_to_cache(cache_key, result)
|
|
410
|
+
return result
|
|
411
|
+
except Exception as e:
|
|
412
|
+
self._metrics.record_failure()
|
|
413
|
+
logger.error(f"Error executing {tool_instance.__class__.__name__}.{operation}: {str(e)}", exc_info=True)
|
|
414
|
+
raise OperationError(f"Error executing {operation}: {str(e)}") from e
|
|
415
|
+
|
|
416
|
+
async def execute_async(self, tool_instance: Any, operation: str, **kwargs) -> Any:
|
|
417
|
+
"""
|
|
418
|
+
Execute an asynchronous tool operation with parameters.
|
|
419
|
+
|
|
420
|
+
Args:
|
|
421
|
+
tool_instance (Any): The tool instance to execute the operation on.
|
|
422
|
+
operation (str): The name of the operation to execute.
|
|
423
|
+
**kwargs: The parameters to pass to the operation.
|
|
424
|
+
|
|
425
|
+
Returns:
|
|
426
|
+
Any: The result of the operation.
|
|
427
|
+
|
|
428
|
+
Raises:
|
|
429
|
+
ToolExecutionError: If the operation fails.
|
|
430
|
+
InputValidationError: If input parameters are invalid.
|
|
431
|
+
SecurityError: If inputs contain malicious content.
|
|
432
|
+
"""
|
|
433
|
+
method = getattr(tool_instance, operation, None)
|
|
434
|
+
if not method or not callable(method) or operation.startswith('_'):
|
|
435
|
+
available_ops = [m for m in dir(tool_instance) if not m.startswith('_') and callable(getattr(tool_instance, m))]
|
|
436
|
+
raise ToolExecutionError(f"Unsupported operation: {operation}. Available operations: {', '.join(available_ops)}")
|
|
437
|
+
is_async = inspect.iscoroutinefunction(method)
|
|
438
|
+
logger.info(f"Executing async {tool_instance.__class__.__name__}.{operation} with params: {kwargs}")
|
|
439
|
+
start_time = time.time()
|
|
440
|
+
try:
|
|
441
|
+
# Sanitize inputs
|
|
442
|
+
if self.config.enable_security_checks:
|
|
443
|
+
for k, v in kwargs.items():
|
|
444
|
+
if isinstance(v, str) and re.search(r'(\bSELECT\b|\bINSERT\b|--|;|/\*)', v, re.IGNORECASE):
|
|
445
|
+
raise SecurityError(f"Input parameter '{k}' contains potentially malicious content")
|
|
446
|
+
# Use cache if enabled
|
|
447
|
+
if self.config.enable_cache:
|
|
448
|
+
cache_key = self._get_cache_key(operation, (), kwargs)
|
|
449
|
+
cached_result = self._get_from_cache(cache_key)
|
|
450
|
+
if cached_result is not None:
|
|
451
|
+
self._metrics.record_cache_hit()
|
|
452
|
+
logger.debug(f"Cache hit for {operation}")
|
|
453
|
+
return cached_result
|
|
454
|
+
|
|
455
|
+
async def _execute():
|
|
456
|
+
if is_async:
|
|
457
|
+
return await method(**kwargs)
|
|
458
|
+
loop = asyncio.get_event_loop()
|
|
459
|
+
return await loop.run_in_executor(self._thread_pool, functools.partial(method, **kwargs))
|
|
460
|
+
result = await self._retry_operation(_execute)
|
|
461
|
+
self._metrics.record_request(time.time() - start_time)
|
|
462
|
+
if self.config.log_execution_time:
|
|
463
|
+
logger.info(f"{tool_instance.__class__.__name__}.{operation} executed in {time.time() - start_time:.4f} seconds")
|
|
464
|
+
|
|
465
|
+
# Cache result if enabled
|
|
466
|
+
if self.config.enable_cache:
|
|
467
|
+
self._add_to_cache(cache_key, result)
|
|
468
|
+
return result
|
|
469
|
+
except Exception as e:
|
|
470
|
+
self._metrics.record_failure()
|
|
471
|
+
logger.error(f"Error executing {tool_instance.__class__.__name__}.{operation}: {str(e)}", exc_info=True)
|
|
472
|
+
raise OperationError(f"Error executing {operation}: {str(e)}") from e
|
|
473
|
+
|
|
474
|
+
async def execute_batch(self, tool_instance: Any, operations: List[Dict[str, Any]]) -> List[Any]:
|
|
475
|
+
"""
|
|
476
|
+
Execute multiple tool operations in parallel.
|
|
477
|
+
|
|
478
|
+
Args:
|
|
479
|
+
tool_instance (Any): The tool instance to execute operations on.
|
|
480
|
+
operations (List[Dict[str, Any]]): List of operation dictionaries with 'op' and 'kwargs'.
|
|
481
|
+
|
|
482
|
+
Returns:
|
|
483
|
+
List[Any]: List of operation results.
|
|
484
|
+
|
|
485
|
+
Raises:
|
|
486
|
+
ToolExecutionError: If any operation fails.
|
|
487
|
+
InputValidationError: If input parameters are invalid.
|
|
488
|
+
"""
|
|
489
|
+
tasks = []
|
|
490
|
+
for op_data in operations:
|
|
491
|
+
op = op_data.get('op')
|
|
492
|
+
kwargs = op_data.get('kwargs', {})
|
|
493
|
+
if not op:
|
|
494
|
+
raise InputValidationError("Operation name missing in batch request")
|
|
495
|
+
tasks.append(self.execute_async(tool_instance, op, **kwargs))
|
|
496
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
497
|
+
for i, result in enumerate(results):
|
|
498
|
+
if isinstance(result, Exception):
|
|
499
|
+
logger.error(f"Batch operation {operations[i]['op']} failed: {result}")
|
|
500
|
+
return results
|
|
501
|
+
|
|
502
|
+
# Singleton executor instance
|
|
503
|
+
_default_executor = None
|
|
504
|
+
|
|
505
|
+
def get_executor(config: Optional[Dict[str, Any]] = None) -> ToolExecutor:
|
|
506
|
+
"""
|
|
507
|
+
Get or create the default executor instance.
|
|
508
|
+
|
|
509
|
+
Args:
|
|
510
|
+
config (Dict[str, Any], optional): Configuration overrides.
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
ToolExecutor: Singleton executor instance.
|
|
514
|
+
"""
|
|
515
|
+
global _default_executor
|
|
516
|
+
if _default_executor is None:
|
|
517
|
+
_default_executor = ToolExecutor(config)
|
|
518
|
+
return _default_executor
|