aiecs 1.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiecs/__init__.py +72 -0
- aiecs/__main__.py +41 -0
- aiecs/aiecs_client.py +469 -0
- aiecs/application/__init__.py +10 -0
- aiecs/application/executors/__init__.py +10 -0
- aiecs/application/executors/operation_executor.py +363 -0
- aiecs/application/knowledge_graph/__init__.py +7 -0
- aiecs/application/knowledge_graph/builder/__init__.py +37 -0
- aiecs/application/knowledge_graph/builder/document_builder.py +375 -0
- aiecs/application/knowledge_graph/builder/graph_builder.py +356 -0
- aiecs/application/knowledge_graph/builder/schema_mapping.py +531 -0
- aiecs/application/knowledge_graph/builder/structured_pipeline.py +443 -0
- aiecs/application/knowledge_graph/builder/text_chunker.py +319 -0
- aiecs/application/knowledge_graph/extractors/__init__.py +27 -0
- aiecs/application/knowledge_graph/extractors/base.py +100 -0
- aiecs/application/knowledge_graph/extractors/llm_entity_extractor.py +327 -0
- aiecs/application/knowledge_graph/extractors/llm_relation_extractor.py +349 -0
- aiecs/application/knowledge_graph/extractors/ner_entity_extractor.py +244 -0
- aiecs/application/knowledge_graph/fusion/__init__.py +23 -0
- aiecs/application/knowledge_graph/fusion/entity_deduplicator.py +387 -0
- aiecs/application/knowledge_graph/fusion/entity_linker.py +343 -0
- aiecs/application/knowledge_graph/fusion/knowledge_fusion.py +580 -0
- aiecs/application/knowledge_graph/fusion/relation_deduplicator.py +189 -0
- aiecs/application/knowledge_graph/pattern_matching/__init__.py +21 -0
- aiecs/application/knowledge_graph/pattern_matching/pattern_matcher.py +344 -0
- aiecs/application/knowledge_graph/pattern_matching/query_executor.py +378 -0
- aiecs/application/knowledge_graph/profiling/__init__.py +12 -0
- aiecs/application/knowledge_graph/profiling/query_plan_visualizer.py +199 -0
- aiecs/application/knowledge_graph/profiling/query_profiler.py +223 -0
- aiecs/application/knowledge_graph/reasoning/__init__.py +27 -0
- aiecs/application/knowledge_graph/reasoning/evidence_synthesis.py +347 -0
- aiecs/application/knowledge_graph/reasoning/inference_engine.py +504 -0
- aiecs/application/knowledge_graph/reasoning/logic_form_parser.py +167 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/__init__.py +79 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_builder.py +513 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_nodes.py +630 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/ast_validator.py +654 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/error_handler.py +477 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/parser.py +390 -0
- aiecs/application/knowledge_graph/reasoning/logic_parser/query_context.py +217 -0
- aiecs/application/knowledge_graph/reasoning/logic_query_integration.py +169 -0
- aiecs/application/knowledge_graph/reasoning/query_planner.py +872 -0
- aiecs/application/knowledge_graph/reasoning/reasoning_engine.py +554 -0
- aiecs/application/knowledge_graph/retrieval/__init__.py +19 -0
- aiecs/application/knowledge_graph/retrieval/retrieval_strategies.py +596 -0
- aiecs/application/knowledge_graph/search/__init__.py +59 -0
- aiecs/application/knowledge_graph/search/hybrid_search.py +423 -0
- aiecs/application/knowledge_graph/search/reranker.py +295 -0
- aiecs/application/knowledge_graph/search/reranker_strategies.py +553 -0
- aiecs/application/knowledge_graph/search/text_similarity.py +398 -0
- aiecs/application/knowledge_graph/traversal/__init__.py +15 -0
- aiecs/application/knowledge_graph/traversal/enhanced_traversal.py +329 -0
- aiecs/application/knowledge_graph/traversal/path_scorer.py +269 -0
- aiecs/application/knowledge_graph/validators/__init__.py +13 -0
- aiecs/application/knowledge_graph/validators/relation_validator.py +189 -0
- aiecs/application/knowledge_graph/visualization/__init__.py +11 -0
- aiecs/application/knowledge_graph/visualization/graph_visualizer.py +321 -0
- aiecs/common/__init__.py +9 -0
- aiecs/common/knowledge_graph/__init__.py +17 -0
- aiecs/common/knowledge_graph/runnable.py +484 -0
- aiecs/config/__init__.py +16 -0
- aiecs/config/config.py +498 -0
- aiecs/config/graph_config.py +137 -0
- aiecs/config/registry.py +23 -0
- aiecs/core/__init__.py +46 -0
- aiecs/core/interface/__init__.py +34 -0
- aiecs/core/interface/execution_interface.py +152 -0
- aiecs/core/interface/storage_interface.py +171 -0
- aiecs/domain/__init__.py +289 -0
- aiecs/domain/agent/__init__.py +189 -0
- aiecs/domain/agent/base_agent.py +697 -0
- aiecs/domain/agent/exceptions.py +103 -0
- aiecs/domain/agent/graph_aware_mixin.py +559 -0
- aiecs/domain/agent/hybrid_agent.py +490 -0
- aiecs/domain/agent/integration/__init__.py +26 -0
- aiecs/domain/agent/integration/context_compressor.py +222 -0
- aiecs/domain/agent/integration/context_engine_adapter.py +252 -0
- aiecs/domain/agent/integration/retry_policy.py +219 -0
- aiecs/domain/agent/integration/role_config.py +213 -0
- aiecs/domain/agent/knowledge_aware_agent.py +646 -0
- aiecs/domain/agent/lifecycle.py +296 -0
- aiecs/domain/agent/llm_agent.py +300 -0
- aiecs/domain/agent/memory/__init__.py +12 -0
- aiecs/domain/agent/memory/conversation.py +197 -0
- aiecs/domain/agent/migration/__init__.py +14 -0
- aiecs/domain/agent/migration/conversion.py +160 -0
- aiecs/domain/agent/migration/legacy_wrapper.py +90 -0
- aiecs/domain/agent/models.py +317 -0
- aiecs/domain/agent/observability.py +407 -0
- aiecs/domain/agent/persistence.py +289 -0
- aiecs/domain/agent/prompts/__init__.py +29 -0
- aiecs/domain/agent/prompts/builder.py +161 -0
- aiecs/domain/agent/prompts/formatters.py +189 -0
- aiecs/domain/agent/prompts/template.py +255 -0
- aiecs/domain/agent/registry.py +260 -0
- aiecs/domain/agent/tool_agent.py +257 -0
- aiecs/domain/agent/tools/__init__.py +12 -0
- aiecs/domain/agent/tools/schema_generator.py +221 -0
- aiecs/domain/community/__init__.py +155 -0
- aiecs/domain/community/agent_adapter.py +477 -0
- aiecs/domain/community/analytics.py +481 -0
- aiecs/domain/community/collaborative_workflow.py +642 -0
- aiecs/domain/community/communication_hub.py +645 -0
- aiecs/domain/community/community_builder.py +320 -0
- aiecs/domain/community/community_integration.py +800 -0
- aiecs/domain/community/community_manager.py +813 -0
- aiecs/domain/community/decision_engine.py +879 -0
- aiecs/domain/community/exceptions.py +225 -0
- aiecs/domain/community/models/__init__.py +33 -0
- aiecs/domain/community/models/community_models.py +268 -0
- aiecs/domain/community/resource_manager.py +457 -0
- aiecs/domain/community/shared_context_manager.py +603 -0
- aiecs/domain/context/__init__.py +58 -0
- aiecs/domain/context/context_engine.py +989 -0
- aiecs/domain/context/conversation_models.py +354 -0
- aiecs/domain/context/graph_memory.py +467 -0
- aiecs/domain/execution/__init__.py +12 -0
- aiecs/domain/execution/model.py +57 -0
- aiecs/domain/knowledge_graph/__init__.py +19 -0
- aiecs/domain/knowledge_graph/models/__init__.py +52 -0
- aiecs/domain/knowledge_graph/models/entity.py +130 -0
- aiecs/domain/knowledge_graph/models/evidence.py +194 -0
- aiecs/domain/knowledge_graph/models/inference_rule.py +186 -0
- aiecs/domain/knowledge_graph/models/path.py +179 -0
- aiecs/domain/knowledge_graph/models/path_pattern.py +173 -0
- aiecs/domain/knowledge_graph/models/query.py +272 -0
- aiecs/domain/knowledge_graph/models/query_plan.py +187 -0
- aiecs/domain/knowledge_graph/models/relation.py +136 -0
- aiecs/domain/knowledge_graph/schema/__init__.py +23 -0
- aiecs/domain/knowledge_graph/schema/entity_type.py +135 -0
- aiecs/domain/knowledge_graph/schema/graph_schema.py +271 -0
- aiecs/domain/knowledge_graph/schema/property_schema.py +155 -0
- aiecs/domain/knowledge_graph/schema/relation_type.py +171 -0
- aiecs/domain/knowledge_graph/schema/schema_manager.py +496 -0
- aiecs/domain/knowledge_graph/schema/type_enums.py +205 -0
- aiecs/domain/task/__init__.py +13 -0
- aiecs/domain/task/dsl_processor.py +613 -0
- aiecs/domain/task/model.py +62 -0
- aiecs/domain/task/task_context.py +268 -0
- aiecs/infrastructure/__init__.py +24 -0
- aiecs/infrastructure/graph_storage/__init__.py +11 -0
- aiecs/infrastructure/graph_storage/base.py +601 -0
- aiecs/infrastructure/graph_storage/batch_operations.py +449 -0
- aiecs/infrastructure/graph_storage/cache.py +429 -0
- aiecs/infrastructure/graph_storage/distributed.py +226 -0
- aiecs/infrastructure/graph_storage/error_handling.py +390 -0
- aiecs/infrastructure/graph_storage/graceful_degradation.py +306 -0
- aiecs/infrastructure/graph_storage/health_checks.py +378 -0
- aiecs/infrastructure/graph_storage/in_memory.py +514 -0
- aiecs/infrastructure/graph_storage/index_optimization.py +483 -0
- aiecs/infrastructure/graph_storage/lazy_loading.py +410 -0
- aiecs/infrastructure/graph_storage/metrics.py +357 -0
- aiecs/infrastructure/graph_storage/migration.py +413 -0
- aiecs/infrastructure/graph_storage/pagination.py +471 -0
- aiecs/infrastructure/graph_storage/performance_monitoring.py +466 -0
- aiecs/infrastructure/graph_storage/postgres.py +871 -0
- aiecs/infrastructure/graph_storage/query_optimizer.py +635 -0
- aiecs/infrastructure/graph_storage/schema_cache.py +290 -0
- aiecs/infrastructure/graph_storage/sqlite.py +623 -0
- aiecs/infrastructure/graph_storage/streaming.py +495 -0
- aiecs/infrastructure/messaging/__init__.py +13 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +383 -0
- aiecs/infrastructure/messaging/websocket_manager.py +298 -0
- aiecs/infrastructure/monitoring/__init__.py +34 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +174 -0
- aiecs/infrastructure/monitoring/global_metrics_manager.py +213 -0
- aiecs/infrastructure/monitoring/structured_logger.py +48 -0
- aiecs/infrastructure/monitoring/tracing_manager.py +410 -0
- aiecs/infrastructure/persistence/__init__.py +24 -0
- aiecs/infrastructure/persistence/context_engine_client.py +187 -0
- aiecs/infrastructure/persistence/database_manager.py +333 -0
- aiecs/infrastructure/persistence/file_storage.py +754 -0
- aiecs/infrastructure/persistence/redis_client.py +220 -0
- aiecs/llm/__init__.py +86 -0
- aiecs/llm/callbacks/__init__.py +11 -0
- aiecs/llm/callbacks/custom_callbacks.py +264 -0
- aiecs/llm/client_factory.py +420 -0
- aiecs/llm/clients/__init__.py +33 -0
- aiecs/llm/clients/base_client.py +193 -0
- aiecs/llm/clients/googleai_client.py +181 -0
- aiecs/llm/clients/openai_client.py +131 -0
- aiecs/llm/clients/vertex_client.py +437 -0
- aiecs/llm/clients/xai_client.py +184 -0
- aiecs/llm/config/__init__.py +51 -0
- aiecs/llm/config/config_loader.py +275 -0
- aiecs/llm/config/config_validator.py +236 -0
- aiecs/llm/config/model_config.py +151 -0
- aiecs/llm/utils/__init__.py +10 -0
- aiecs/llm/utils/validate_config.py +91 -0
- aiecs/main.py +363 -0
- aiecs/scripts/__init__.py +3 -0
- aiecs/scripts/aid/VERSION_MANAGEMENT.md +97 -0
- aiecs/scripts/aid/__init__.py +19 -0
- aiecs/scripts/aid/version_manager.py +215 -0
- aiecs/scripts/dependance_check/DEPENDENCY_SYSTEM_SUMMARY.md +242 -0
- aiecs/scripts/dependance_check/README_DEPENDENCY_CHECKER.md +310 -0
- aiecs/scripts/dependance_check/__init__.py +17 -0
- aiecs/scripts/dependance_check/dependency_checker.py +938 -0
- aiecs/scripts/dependance_check/dependency_fixer.py +391 -0
- aiecs/scripts/dependance_check/download_nlp_data.py +396 -0
- aiecs/scripts/dependance_check/quick_dependency_check.py +270 -0
- aiecs/scripts/dependance_check/setup_nlp_data.sh +217 -0
- aiecs/scripts/dependance_patch/__init__.py +7 -0
- aiecs/scripts/dependance_patch/fix_weasel/README_WEASEL_PATCH.md +126 -0
- aiecs/scripts/dependance_patch/fix_weasel/__init__.py +11 -0
- aiecs/scripts/dependance_patch/fix_weasel/fix_weasel_validator.py +128 -0
- aiecs/scripts/dependance_patch/fix_weasel/fix_weasel_validator.sh +82 -0
- aiecs/scripts/dependance_patch/fix_weasel/patch_weasel_library.sh +188 -0
- aiecs/scripts/dependance_patch/fix_weasel/run_weasel_patch.sh +41 -0
- aiecs/scripts/tools_develop/README.md +449 -0
- aiecs/scripts/tools_develop/TOOL_AUTO_DISCOVERY.md +234 -0
- aiecs/scripts/tools_develop/__init__.py +21 -0
- aiecs/scripts/tools_develop/check_type_annotations.py +259 -0
- aiecs/scripts/tools_develop/validate_tool_schemas.py +422 -0
- aiecs/scripts/tools_develop/verify_tools.py +356 -0
- aiecs/tasks/__init__.py +1 -0
- aiecs/tasks/worker.py +172 -0
- aiecs/tools/__init__.py +299 -0
- aiecs/tools/apisource/__init__.py +99 -0
- aiecs/tools/apisource/intelligence/__init__.py +19 -0
- aiecs/tools/apisource/intelligence/data_fusion.py +381 -0
- aiecs/tools/apisource/intelligence/query_analyzer.py +413 -0
- aiecs/tools/apisource/intelligence/search_enhancer.py +388 -0
- aiecs/tools/apisource/monitoring/__init__.py +9 -0
- aiecs/tools/apisource/monitoring/metrics.py +303 -0
- aiecs/tools/apisource/providers/__init__.py +115 -0
- aiecs/tools/apisource/providers/base.py +664 -0
- aiecs/tools/apisource/providers/census.py +401 -0
- aiecs/tools/apisource/providers/fred.py +564 -0
- aiecs/tools/apisource/providers/newsapi.py +412 -0
- aiecs/tools/apisource/providers/worldbank.py +357 -0
- aiecs/tools/apisource/reliability/__init__.py +12 -0
- aiecs/tools/apisource/reliability/error_handler.py +375 -0
- aiecs/tools/apisource/reliability/fallback_strategy.py +391 -0
- aiecs/tools/apisource/tool.py +850 -0
- aiecs/tools/apisource/utils/__init__.py +9 -0
- aiecs/tools/apisource/utils/validators.py +338 -0
- aiecs/tools/base_tool.py +201 -0
- aiecs/tools/docs/__init__.py +121 -0
- aiecs/tools/docs/ai_document_orchestrator.py +599 -0
- aiecs/tools/docs/ai_document_writer_orchestrator.py +2403 -0
- aiecs/tools/docs/content_insertion_tool.py +1333 -0
- aiecs/tools/docs/document_creator_tool.py +1317 -0
- aiecs/tools/docs/document_layout_tool.py +1166 -0
- aiecs/tools/docs/document_parser_tool.py +994 -0
- aiecs/tools/docs/document_writer_tool.py +1818 -0
- aiecs/tools/knowledge_graph/__init__.py +17 -0
- aiecs/tools/knowledge_graph/graph_reasoning_tool.py +734 -0
- aiecs/tools/knowledge_graph/graph_search_tool.py +923 -0
- aiecs/tools/knowledge_graph/kg_builder_tool.py +476 -0
- aiecs/tools/langchain_adapter.py +542 -0
- aiecs/tools/schema_generator.py +275 -0
- aiecs/tools/search_tool/__init__.py +100 -0
- aiecs/tools/search_tool/analyzers.py +589 -0
- aiecs/tools/search_tool/cache.py +260 -0
- aiecs/tools/search_tool/constants.py +128 -0
- aiecs/tools/search_tool/context.py +216 -0
- aiecs/tools/search_tool/core.py +749 -0
- aiecs/tools/search_tool/deduplicator.py +123 -0
- aiecs/tools/search_tool/error_handler.py +271 -0
- aiecs/tools/search_tool/metrics.py +371 -0
- aiecs/tools/search_tool/rate_limiter.py +178 -0
- aiecs/tools/search_tool/schemas.py +277 -0
- aiecs/tools/statistics/__init__.py +80 -0
- aiecs/tools/statistics/ai_data_analysis_orchestrator.py +643 -0
- aiecs/tools/statistics/ai_insight_generator_tool.py +505 -0
- aiecs/tools/statistics/ai_report_orchestrator_tool.py +694 -0
- aiecs/tools/statistics/data_loader_tool.py +564 -0
- aiecs/tools/statistics/data_profiler_tool.py +658 -0
- aiecs/tools/statistics/data_transformer_tool.py +573 -0
- aiecs/tools/statistics/data_visualizer_tool.py +495 -0
- aiecs/tools/statistics/model_trainer_tool.py +487 -0
- aiecs/tools/statistics/statistical_analyzer_tool.py +459 -0
- aiecs/tools/task_tools/__init__.py +86 -0
- aiecs/tools/task_tools/chart_tool.py +732 -0
- aiecs/tools/task_tools/classfire_tool.py +922 -0
- aiecs/tools/task_tools/image_tool.py +447 -0
- aiecs/tools/task_tools/office_tool.py +684 -0
- aiecs/tools/task_tools/pandas_tool.py +635 -0
- aiecs/tools/task_tools/report_tool.py +635 -0
- aiecs/tools/task_tools/research_tool.py +392 -0
- aiecs/tools/task_tools/scraper_tool.py +715 -0
- aiecs/tools/task_tools/stats_tool.py +688 -0
- aiecs/tools/temp_file_manager.py +130 -0
- aiecs/tools/tool_executor/__init__.py +37 -0
- aiecs/tools/tool_executor/tool_executor.py +881 -0
- aiecs/utils/LLM_output_structor.py +445 -0
- aiecs/utils/__init__.py +34 -0
- aiecs/utils/base_callback.py +47 -0
- aiecs/utils/cache_provider.py +695 -0
- aiecs/utils/execution_utils.py +184 -0
- aiecs/utils/logging.py +1 -0
- aiecs/utils/prompt_loader.py +14 -0
- aiecs/utils/token_usage_repository.py +323 -0
- aiecs/ws/__init__.py +0 -0
- aiecs/ws/socket_server.py +52 -0
- aiecs-1.5.1.dist-info/METADATA +608 -0
- aiecs-1.5.1.dist-info/RECORD +302 -0
- aiecs-1.5.1.dist-info/WHEEL +5 -0
- aiecs-1.5.1.dist-info/entry_points.txt +10 -0
- aiecs-1.5.1.dist-info/licenses/LICENSE +225 -0
- aiecs-1.5.1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,881 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
import functools
|
|
4
|
+
import inspect
|
|
5
|
+
import logging
|
|
6
|
+
import threading
|
|
7
|
+
import time
|
|
8
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
9
|
+
from typing import Any, Callable, Dict, List, Optional, Type, Union
|
|
10
|
+
from contextlib import contextmanager
|
|
11
|
+
|
|
12
|
+
from aiecs.utils.execution_utils import ExecutionUtils
|
|
13
|
+
from aiecs.utils.cache_provider import ICacheProvider, LRUCacheProvider
|
|
14
|
+
import re
|
|
15
|
+
from pydantic import BaseModel, ValidationError, ConfigDict
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger(__name__)
|
|
18
|
+
|
|
19
|
+
# Base exception hierarchy
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class ToolExecutionError(Exception):
|
|
23
|
+
"""Base exception for all tool execution errors."""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class InputValidationError(ToolExecutionError):
|
|
27
|
+
"""Error in validating input parameters."""
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class SecurityError(ToolExecutionError):
|
|
31
|
+
"""Security-related error."""
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class OperationError(ToolExecutionError):
|
|
35
|
+
"""Error during operation execution."""
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class TimeoutError(ToolExecutionError):
|
|
39
|
+
"""Operation timed out."""
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
# Configuration for the executor
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class ExecutorConfig(BaseModel):
|
|
46
|
+
"""
|
|
47
|
+
Configuration for the ToolExecutor.
|
|
48
|
+
|
|
49
|
+
Attributes:
|
|
50
|
+
enable_cache (bool): Enable caching of operation results.
|
|
51
|
+
cache_size (int): Maximum number of cache entries.
|
|
52
|
+
cache_ttl (int): Cache time-to-live in seconds.
|
|
53
|
+
max_workers (int): Maximum number of thread pool workers.
|
|
54
|
+
io_concurrency (int): Maximum concurrent I/O operations.
|
|
55
|
+
chunk_size (int): Chunk size for processing large data.
|
|
56
|
+
max_file_size (int): Maximum file size in bytes.
|
|
57
|
+
log_level (str): Logging level (e.g., 'INFO', 'DEBUG').
|
|
58
|
+
log_execution_time (bool): Log execution time for operations.
|
|
59
|
+
enable_security_checks (bool): Enable security checks for inputs.
|
|
60
|
+
retry_attempts (int): Number of retry attempts for transient errors.
|
|
61
|
+
retry_backoff (float): Backoff factor for retries.
|
|
62
|
+
timeout (int): Timeout for operations in seconds.
|
|
63
|
+
enable_dual_cache (bool): Enable dual-layer caching (L1: LRU + L2: Redis).
|
|
64
|
+
enable_redis_cache (bool): Enable Redis as L2 cache (requires enable_dual_cache=True).
|
|
65
|
+
redis_cache_ttl (int): Redis cache TTL in seconds (for L2 cache).
|
|
66
|
+
l1_cache_ttl (int): L1 cache TTL in seconds (for dual-layer cache).
|
|
67
|
+
"""
|
|
68
|
+
|
|
69
|
+
enable_cache: bool = True
|
|
70
|
+
cache_size: int = 100
|
|
71
|
+
cache_ttl: int = 3600
|
|
72
|
+
max_workers: int = 4
|
|
73
|
+
io_concurrency: int = 8
|
|
74
|
+
chunk_size: int = 10000
|
|
75
|
+
max_file_size: int = 1000000
|
|
76
|
+
log_level: str = "INFO"
|
|
77
|
+
log_execution_time: bool = True
|
|
78
|
+
enable_security_checks: bool = True
|
|
79
|
+
retry_attempts: int = 3
|
|
80
|
+
retry_backoff: float = 1.0
|
|
81
|
+
timeout: int = 30
|
|
82
|
+
|
|
83
|
+
# Dual-layer cache configuration
|
|
84
|
+
enable_dual_cache: bool = False
|
|
85
|
+
enable_redis_cache: bool = False
|
|
86
|
+
redis_cache_ttl: int = 86400 # 1 day
|
|
87
|
+
l1_cache_ttl: int = 300 # 5 minutes
|
|
88
|
+
|
|
89
|
+
model_config = ConfigDict(env_prefix="TOOL_EXECUTOR_")
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
# Metrics counter
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class ToolExecutorStats:
|
|
96
|
+
"""
|
|
97
|
+
Tracks tool executor performance statistics.
|
|
98
|
+
"""
|
|
99
|
+
|
|
100
|
+
def __init__(self):
|
|
101
|
+
self.requests: int = 0
|
|
102
|
+
self.failures: int = 0
|
|
103
|
+
self.cache_hits: int = 0
|
|
104
|
+
self.processing_times: List[float] = []
|
|
105
|
+
|
|
106
|
+
def record_request(self, processing_time: float):
|
|
107
|
+
self.requests += 1
|
|
108
|
+
self.processing_times.append(processing_time)
|
|
109
|
+
|
|
110
|
+
def record_failure(self):
|
|
111
|
+
self.failures += 1
|
|
112
|
+
|
|
113
|
+
def record_cache_hit(self):
|
|
114
|
+
self.cache_hits += 1
|
|
115
|
+
|
|
116
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
117
|
+
return {
|
|
118
|
+
"requests": self.requests,
|
|
119
|
+
"failures": self.failures,
|
|
120
|
+
"cache_hits": self.cache_hits,
|
|
121
|
+
"avg_processing_time": (
|
|
122
|
+
sum(self.processing_times) / len(self.processing_times)
|
|
123
|
+
if self.processing_times
|
|
124
|
+
else 0.0
|
|
125
|
+
),
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
# Decorators for tool methods
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def validate_input(schema_class: Type[BaseModel]) -> Callable:
|
|
133
|
+
"""
|
|
134
|
+
Decorator to validate input using a Pydantic schema.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
schema_class (Type[BaseModel]): Pydantic schema class for validation.
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Callable: Decorated function with validated inputs.
|
|
141
|
+
|
|
142
|
+
Raises:
|
|
143
|
+
InputValidationError: If input validation fails.
|
|
144
|
+
"""
|
|
145
|
+
|
|
146
|
+
def decorator(func: Callable) -> Callable:
|
|
147
|
+
@functools.wraps(func)
|
|
148
|
+
def wrapper(self, *args, **kwargs):
|
|
149
|
+
try:
|
|
150
|
+
schema = schema_class(**kwargs)
|
|
151
|
+
validated_kwargs = schema.model_dump(exclude_unset=True)
|
|
152
|
+
return func(self, **validated_kwargs)
|
|
153
|
+
except ValidationError as e:
|
|
154
|
+
raise InputValidationError(f"Invalid input parameters: {e}")
|
|
155
|
+
|
|
156
|
+
return wrapper
|
|
157
|
+
|
|
158
|
+
return decorator
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def cache_result(ttl: Optional[int] = None) -> Callable:
|
|
162
|
+
"""
|
|
163
|
+
Decorator to cache function results with optional TTL.
|
|
164
|
+
|
|
165
|
+
Args:
|
|
166
|
+
ttl (Optional[int]): Time-to-live for cache entry in seconds.
|
|
167
|
+
|
|
168
|
+
Returns:
|
|
169
|
+
Callable: Decorated function with caching.
|
|
170
|
+
"""
|
|
171
|
+
|
|
172
|
+
def decorator(func: Callable) -> Callable:
|
|
173
|
+
@functools.wraps(func)
|
|
174
|
+
def wrapper(self, *args, **kwargs):
|
|
175
|
+
if not hasattr(self, "_executor") or not self._executor.config.enable_cache:
|
|
176
|
+
return func(self, *args, **kwargs)
|
|
177
|
+
cache_key = self._executor._get_cache_key(func.__name__, args, kwargs)
|
|
178
|
+
result = self._executor._get_from_cache(cache_key)
|
|
179
|
+
if result is not None:
|
|
180
|
+
logger.debug(f"Cache hit for {func.__name__}")
|
|
181
|
+
self._executor._metrics.record_cache_hit()
|
|
182
|
+
return result
|
|
183
|
+
result = func(self, *args, **kwargs)
|
|
184
|
+
self._executor._add_to_cache(cache_key, result, ttl)
|
|
185
|
+
return result
|
|
186
|
+
|
|
187
|
+
return wrapper
|
|
188
|
+
|
|
189
|
+
return decorator
|
|
190
|
+
|
|
191
|
+
|
|
192
|
+
def cache_result_with_strategy(
|
|
193
|
+
ttl_strategy: Optional[Union[int, Callable]] = None,
|
|
194
|
+
) -> Callable:
|
|
195
|
+
"""
|
|
196
|
+
Decorator to cache function results with flexible TTL strategy.
|
|
197
|
+
|
|
198
|
+
Supports multiple TTL strategy types:
|
|
199
|
+
1. Fixed TTL (int): Static TTL in seconds
|
|
200
|
+
2. Callable strategy: Function that calculates TTL based on result and context
|
|
201
|
+
3. None: Use default TTL from executor config
|
|
202
|
+
|
|
203
|
+
Args:
|
|
204
|
+
ttl_strategy: TTL strategy, can be:
|
|
205
|
+
- int: Fixed TTL in seconds
|
|
206
|
+
- Callable[[Any, tuple, dict], int]: Function(result, args, kwargs) -> ttl_seconds
|
|
207
|
+
- None: Use default TTL
|
|
208
|
+
|
|
209
|
+
Returns:
|
|
210
|
+
Callable: Decorated function with intelligent caching.
|
|
211
|
+
|
|
212
|
+
Example:
|
|
213
|
+
# Fixed TTL
|
|
214
|
+
@cache_result_with_strategy(ttl_strategy=3600)
|
|
215
|
+
def simple_operation(self, data):
|
|
216
|
+
return process(data)
|
|
217
|
+
|
|
218
|
+
# Dynamic TTL based on result
|
|
219
|
+
def calculate_ttl(result, args, kwargs):
|
|
220
|
+
if result.get('type') == 'static':
|
|
221
|
+
return 86400 # 1 day
|
|
222
|
+
return 3600 # 1 hour
|
|
223
|
+
|
|
224
|
+
@cache_result_with_strategy(ttl_strategy=calculate_ttl)
|
|
225
|
+
def smart_operation(self, query):
|
|
226
|
+
return search(query)
|
|
227
|
+
"""
|
|
228
|
+
|
|
229
|
+
def decorator(func: Callable) -> Callable:
|
|
230
|
+
@functools.wraps(func)
|
|
231
|
+
def wrapper(self, *args, **kwargs):
|
|
232
|
+
if not hasattr(self, "_executor") or not self._executor.config.enable_cache:
|
|
233
|
+
return func(self, *args, **kwargs)
|
|
234
|
+
|
|
235
|
+
# Generate cache key
|
|
236
|
+
cache_key = self._executor._get_cache_key(func.__name__, args, kwargs)
|
|
237
|
+
|
|
238
|
+
# Check cache
|
|
239
|
+
cached = self._executor._get_from_cache(cache_key)
|
|
240
|
+
if cached is not None:
|
|
241
|
+
logger.debug(f"Cache hit for {func.__name__}")
|
|
242
|
+
self._executor._metrics.record_cache_hit()
|
|
243
|
+
return cached
|
|
244
|
+
|
|
245
|
+
# Execute function
|
|
246
|
+
result = func(self, *args, **kwargs)
|
|
247
|
+
|
|
248
|
+
# Calculate TTL based on strategy
|
|
249
|
+
# Support both regular callables and lambdas that need self
|
|
250
|
+
if callable(ttl_strategy):
|
|
251
|
+
try:
|
|
252
|
+
# Try calling with self first (for lambda self, result,
|
|
253
|
+
# args, kwargs)
|
|
254
|
+
import inspect
|
|
255
|
+
|
|
256
|
+
sig = inspect.signature(ttl_strategy)
|
|
257
|
+
if len(sig.parameters) == 4: # self, result, args, kwargs
|
|
258
|
+
ttl = ttl_strategy(self, result, args, kwargs)
|
|
259
|
+
else: # result, args, kwargs
|
|
260
|
+
ttl = ttl_strategy(result, args, kwargs)
|
|
261
|
+
|
|
262
|
+
if not isinstance(ttl, int) or ttl < 0:
|
|
263
|
+
logger.warning(
|
|
264
|
+
f"TTL strategy returned invalid value: {ttl}. "
|
|
265
|
+
f"Expected positive integer. Using default TTL."
|
|
266
|
+
)
|
|
267
|
+
ttl = None
|
|
268
|
+
except Exception as e:
|
|
269
|
+
logger.error(f"Error calculating TTL from strategy: {e}. Using default TTL.")
|
|
270
|
+
ttl = None
|
|
271
|
+
else:
|
|
272
|
+
ttl = self._executor._calculate_ttl_from_strategy(
|
|
273
|
+
ttl_strategy, result, args, kwargs
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
# Cache with calculated TTL
|
|
277
|
+
self._executor._add_to_cache(cache_key, result, ttl)
|
|
278
|
+
return result
|
|
279
|
+
|
|
280
|
+
return wrapper
|
|
281
|
+
|
|
282
|
+
return decorator
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
def run_in_executor(func: Callable) -> Callable:
|
|
286
|
+
"""
|
|
287
|
+
Decorator to run a synchronous function in the thread pool executor.
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
func (Callable): Function to execute.
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
Callable: Async wrapper for the function.
|
|
294
|
+
"""
|
|
295
|
+
|
|
296
|
+
@functools.wraps(func)
|
|
297
|
+
async def wrapper(self, *args, **kwargs):
|
|
298
|
+
if not hasattr(self, "_executor"):
|
|
299
|
+
return await func(self, *args, **kwargs)
|
|
300
|
+
loop = asyncio.get_event_loop()
|
|
301
|
+
return await loop.run_in_executor(
|
|
302
|
+
self._executor._thread_pool,
|
|
303
|
+
functools.partial(func, self, *args, **kwargs),
|
|
304
|
+
)
|
|
305
|
+
|
|
306
|
+
return wrapper
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def measure_execution_time(func: Callable) -> Callable:
|
|
310
|
+
"""
|
|
311
|
+
Decorator to measure and log execution time.
|
|
312
|
+
|
|
313
|
+
Args:
|
|
314
|
+
func (Callable): Function to measure.
|
|
315
|
+
|
|
316
|
+
Returns:
|
|
317
|
+
Callable: Decorated function with timing.
|
|
318
|
+
"""
|
|
319
|
+
|
|
320
|
+
@functools.wraps(func)
|
|
321
|
+
def wrapper(self, *args, **kwargs):
|
|
322
|
+
if not hasattr(self, "_executor") or not self._executor.config.log_execution_time:
|
|
323
|
+
return func(self, *args, **kwargs)
|
|
324
|
+
start_time = time.time()
|
|
325
|
+
try:
|
|
326
|
+
result = func(self, *args, **kwargs)
|
|
327
|
+
execution_time = time.time() - start_time
|
|
328
|
+
logger.info(f"{func.__name__} executed in {execution_time:.4f} seconds")
|
|
329
|
+
return result
|
|
330
|
+
except Exception as e:
|
|
331
|
+
execution_time = time.time() - start_time
|
|
332
|
+
logger.error(f"{func.__name__} failed after {execution_time:.4f} seconds: {e}")
|
|
333
|
+
raise
|
|
334
|
+
|
|
335
|
+
return wrapper
|
|
336
|
+
|
|
337
|
+
|
|
338
|
+
def sanitize_input(func: Callable) -> Callable:
|
|
339
|
+
"""
|
|
340
|
+
Decorator to sanitize input parameters for security.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
func (Callable): Function to sanitize inputs for.
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
Callable: Decorated function with sanitized inputs.
|
|
347
|
+
"""
|
|
348
|
+
|
|
349
|
+
@functools.wraps(func)
|
|
350
|
+
def wrapper(self, *args, **kwargs):
|
|
351
|
+
if not hasattr(self, "_executor") or not self._executor.config.enable_security_checks:
|
|
352
|
+
return func(self, *args, **kwargs)
|
|
353
|
+
sanitized_kwargs = {}
|
|
354
|
+
for k, v in kwargs.items():
|
|
355
|
+
if isinstance(v, str) and re.search(
|
|
356
|
+
r"(\bSELECT\b|\bINSERT\b|--|;|/\*)", v, re.IGNORECASE
|
|
357
|
+
):
|
|
358
|
+
raise SecurityError(f"Input parameter '{k}' contains potentially malicious content")
|
|
359
|
+
sanitized_kwargs[k] = v
|
|
360
|
+
return func(self, *args, **sanitized_kwargs)
|
|
361
|
+
|
|
362
|
+
return wrapper
|
|
363
|
+
|
|
364
|
+
|
|
365
|
+
class ToolExecutor:
|
|
366
|
+
"""
|
|
367
|
+
Centralized executor for tool operations, handling:
|
|
368
|
+
- Input validation
|
|
369
|
+
- Caching with TTL and content-based keys
|
|
370
|
+
- Concurrency with dynamic thread pool
|
|
371
|
+
- Error handling with retries
|
|
372
|
+
- Performance optimization with metrics
|
|
373
|
+
- Structured logging
|
|
374
|
+
|
|
375
|
+
Example:
|
|
376
|
+
executor = ToolExecutor(config={'max_workers': 8})
|
|
377
|
+
result = executor.execute(tool_instance, 'operation_name', param1='value')
|
|
378
|
+
"""
|
|
379
|
+
|
|
380
|
+
def __init__(
|
|
381
|
+
self,
|
|
382
|
+
config: Optional[Dict[str, Any]] = None,
|
|
383
|
+
cache_provider: Optional[ICacheProvider] = None,
|
|
384
|
+
):
|
|
385
|
+
"""
|
|
386
|
+
Initialize the executor with optional configuration.
|
|
387
|
+
|
|
388
|
+
Args:
|
|
389
|
+
config (Dict[str, Any], optional): Configuration overrides for ExecutorConfig.
|
|
390
|
+
cache_provider (ICacheProvider, optional): Custom cache provider. If None, uses default based on config.
|
|
391
|
+
|
|
392
|
+
Raises:
|
|
393
|
+
ValueError: If config is invalid.
|
|
394
|
+
"""
|
|
395
|
+
self.config = ExecutorConfig(**(config or {}))
|
|
396
|
+
logging.basicConfig(
|
|
397
|
+
level=getattr(logging, self.config.log_level),
|
|
398
|
+
format="%(asctime)s %(levelname)s %(name)s: %(message)s",
|
|
399
|
+
)
|
|
400
|
+
self._thread_pool = ThreadPoolExecutor(
|
|
401
|
+
max_workers=max(os.cpu_count() or 4, self.config.max_workers)
|
|
402
|
+
)
|
|
403
|
+
self._locks: Dict[str, threading.Lock] = {}
|
|
404
|
+
self._metrics = ToolExecutorStats()
|
|
405
|
+
self.execution_utils = ExecutionUtils(
|
|
406
|
+
cache_size=self.config.cache_size,
|
|
407
|
+
cache_ttl=self.config.cache_ttl,
|
|
408
|
+
retry_attempts=self.config.retry_attempts,
|
|
409
|
+
retry_backoff=self.config.retry_backoff,
|
|
410
|
+
)
|
|
411
|
+
|
|
412
|
+
# Support pluggable cache provider
|
|
413
|
+
if cache_provider is not None:
|
|
414
|
+
# User provided custom cache provider
|
|
415
|
+
self.cache_provider = cache_provider
|
|
416
|
+
logger.info(f"Using custom cache provider: {cache_provider.__class__.__name__}")
|
|
417
|
+
elif self.config.enable_dual_cache and self.config.enable_redis_cache:
|
|
418
|
+
# Enable dual-layer cache (L1: LRU + L2: Redis)
|
|
419
|
+
self.cache_provider = self._initialize_dual_cache()
|
|
420
|
+
else:
|
|
421
|
+
# Default: use LRUCacheProvider wrapping ExecutionUtils
|
|
422
|
+
self.cache_provider = LRUCacheProvider(self.execution_utils)
|
|
423
|
+
logger.debug("Using default LRUCacheProvider")
|
|
424
|
+
|
|
425
|
+
def _initialize_dual_cache(self) -> ICacheProvider:
|
|
426
|
+
"""
|
|
427
|
+
Initialize dual-layer cache (L1: LRU + L2: Redis).
|
|
428
|
+
|
|
429
|
+
Returns:
|
|
430
|
+
DualLayerCacheProvider instance or fallback to LRUCacheProvider
|
|
431
|
+
"""
|
|
432
|
+
try:
|
|
433
|
+
from aiecs.utils.cache_provider import (
|
|
434
|
+
DualLayerCacheProvider,
|
|
435
|
+
RedisCacheProvider,
|
|
436
|
+
)
|
|
437
|
+
|
|
438
|
+
# Create L1 cache (LRU)
|
|
439
|
+
l1_cache = LRUCacheProvider(self.execution_utils)
|
|
440
|
+
|
|
441
|
+
# Create L2 cache (Redis) - this requires async initialization
|
|
442
|
+
# We'll use a lazy initialization approach
|
|
443
|
+
try:
|
|
444
|
+
# Try to get global Redis client synchronously
|
|
445
|
+
# Note: This assumes Redis client is already initialized
|
|
446
|
+
from aiecs.infrastructure.persistence import redis_client
|
|
447
|
+
|
|
448
|
+
if redis_client is not None:
|
|
449
|
+
l2_cache = RedisCacheProvider(
|
|
450
|
+
redis_client,
|
|
451
|
+
prefix="tool_executor:",
|
|
452
|
+
default_ttl=self.config.redis_cache_ttl,
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
dual_cache = DualLayerCacheProvider(
|
|
456
|
+
l1_provider=l1_cache,
|
|
457
|
+
l2_provider=l2_cache,
|
|
458
|
+
l1_ttl=self.config.l1_cache_ttl,
|
|
459
|
+
)
|
|
460
|
+
|
|
461
|
+
logger.info("Dual-layer cache enabled (L1: LRU + L2: Redis)")
|
|
462
|
+
return dual_cache
|
|
463
|
+
else:
|
|
464
|
+
logger.warning("Redis client not initialized, falling back to LRU cache")
|
|
465
|
+
return l1_cache
|
|
466
|
+
|
|
467
|
+
except ImportError:
|
|
468
|
+
logger.warning("Redis client not available, falling back to LRU cache")
|
|
469
|
+
return l1_cache
|
|
470
|
+
|
|
471
|
+
except Exception as e:
|
|
472
|
+
logger.warning(f"Failed to initialize dual-layer cache: {e}, falling back to LRU")
|
|
473
|
+
return LRUCacheProvider(self.execution_utils)
|
|
474
|
+
|
|
475
|
+
def _get_cache_key(self, func_name: str, args: tuple, kwargs: Dict[str, Any]) -> str:
|
|
476
|
+
"""
|
|
477
|
+
Generate a context-aware cache key from function name, user ID, task ID, and arguments.
|
|
478
|
+
|
|
479
|
+
Args:
|
|
480
|
+
func_name (str): Name of the function.
|
|
481
|
+
args (tuple): Positional arguments.
|
|
482
|
+
kwargs (Dict[str, Any]): Keyword arguments.
|
|
483
|
+
|
|
484
|
+
Returns:
|
|
485
|
+
str: Cache key.
|
|
486
|
+
"""
|
|
487
|
+
user_id = kwargs.get("user_id", "anonymous")
|
|
488
|
+
task_id = kwargs.get("task_id", "none")
|
|
489
|
+
return self.execution_utils.generate_cache_key(func_name, user_id, task_id, args, kwargs)
|
|
490
|
+
|
|
491
|
+
def _calculate_ttl_from_strategy(
|
|
492
|
+
self,
|
|
493
|
+
ttl_strategy: Optional[Union[int, Callable]],
|
|
494
|
+
result: Any,
|
|
495
|
+
args: tuple,
|
|
496
|
+
kwargs: Dict[str, Any],
|
|
497
|
+
) -> Optional[int]:
|
|
498
|
+
"""
|
|
499
|
+
Calculate TTL based on the provided strategy.
|
|
500
|
+
|
|
501
|
+
Supports multiple strategy types:
|
|
502
|
+
1. None: Use default TTL from config
|
|
503
|
+
2. int: Fixed TTL in seconds
|
|
504
|
+
3. Callable: Dynamic TTL calculation function
|
|
505
|
+
|
|
506
|
+
Args:
|
|
507
|
+
ttl_strategy: TTL strategy (None, int, or Callable)
|
|
508
|
+
result: Function execution result
|
|
509
|
+
args: Function positional arguments
|
|
510
|
+
kwargs: Function keyword arguments
|
|
511
|
+
|
|
512
|
+
Returns:
|
|
513
|
+
Optional[int]: Calculated TTL in seconds, or None for default
|
|
514
|
+
|
|
515
|
+
Example:
|
|
516
|
+
# Strategy function signature
|
|
517
|
+
def my_ttl_strategy(result: Any, args: tuple, kwargs: dict) -> int:
|
|
518
|
+
if result.get('type') == 'permanent':
|
|
519
|
+
return 86400 * 30 # 30 days
|
|
520
|
+
return 3600 # 1 hour
|
|
521
|
+
"""
|
|
522
|
+
# Case 1: No strategy - use default
|
|
523
|
+
if ttl_strategy is None:
|
|
524
|
+
return None
|
|
525
|
+
|
|
526
|
+
# Case 2: Fixed TTL (integer)
|
|
527
|
+
if isinstance(ttl_strategy, int):
|
|
528
|
+
return ttl_strategy
|
|
529
|
+
|
|
530
|
+
# Case 3: Callable strategy - dynamic calculation
|
|
531
|
+
if callable(ttl_strategy):
|
|
532
|
+
try:
|
|
533
|
+
calculated_ttl = ttl_strategy(result, args, kwargs)
|
|
534
|
+
if not isinstance(calculated_ttl, int) or calculated_ttl < 0:
|
|
535
|
+
logger.warning(
|
|
536
|
+
f"TTL strategy returned invalid value: {calculated_ttl}. "
|
|
537
|
+
f"Expected positive integer. Using default TTL."
|
|
538
|
+
)
|
|
539
|
+
return None
|
|
540
|
+
return calculated_ttl
|
|
541
|
+
except Exception as e:
|
|
542
|
+
logger.error(f"Error calculating TTL from strategy: {e}. Using default TTL.")
|
|
543
|
+
return None
|
|
544
|
+
|
|
545
|
+
# Invalid strategy type
|
|
546
|
+
logger.warning(
|
|
547
|
+
f"Invalid TTL strategy type: {type(ttl_strategy)}. "
|
|
548
|
+
f"Expected None, int, or Callable. Using default TTL."
|
|
549
|
+
)
|
|
550
|
+
return None
|
|
551
|
+
|
|
552
|
+
def _get_from_cache(self, cache_key: str) -> Optional[Any]:
|
|
553
|
+
"""
|
|
554
|
+
Get a result from cache if it exists and is not expired (synchronous).
|
|
555
|
+
|
|
556
|
+
Args:
|
|
557
|
+
cache_key (str): Cache key.
|
|
558
|
+
|
|
559
|
+
Returns:
|
|
560
|
+
Optional[Any]: Cached result or None.
|
|
561
|
+
"""
|
|
562
|
+
if not self.config.enable_cache:
|
|
563
|
+
return None
|
|
564
|
+
return self.cache_provider.get(cache_key)
|
|
565
|
+
|
|
566
|
+
def _add_to_cache(self, cache_key: str, result: Any, ttl: Optional[int] = None) -> None:
|
|
567
|
+
"""
|
|
568
|
+
Add a result to the cache with optional TTL (synchronous).
|
|
569
|
+
|
|
570
|
+
Args:
|
|
571
|
+
cache_key (str): Cache key.
|
|
572
|
+
result (Any): Result to cache.
|
|
573
|
+
ttl (Optional[int]): Time-to-live in seconds.
|
|
574
|
+
"""
|
|
575
|
+
if not self.config.enable_cache:
|
|
576
|
+
return
|
|
577
|
+
self.cache_provider.set(cache_key, result, ttl)
|
|
578
|
+
|
|
579
|
+
async def _get_from_cache_async(self, cache_key: str) -> Optional[Any]:
|
|
580
|
+
"""
|
|
581
|
+
Get a result from cache if it exists and is not expired (asynchronous).
|
|
582
|
+
|
|
583
|
+
Args:
|
|
584
|
+
cache_key (str): Cache key.
|
|
585
|
+
|
|
586
|
+
Returns:
|
|
587
|
+
Optional[Any]: Cached result or None.
|
|
588
|
+
"""
|
|
589
|
+
if not self.config.enable_cache:
|
|
590
|
+
return None
|
|
591
|
+
|
|
592
|
+
# Use async interface if available
|
|
593
|
+
if hasattr(self.cache_provider, "get_async"):
|
|
594
|
+
return await self.cache_provider.get_async(cache_key)
|
|
595
|
+
else:
|
|
596
|
+
# Fallback to sync interface
|
|
597
|
+
return self.cache_provider.get(cache_key)
|
|
598
|
+
|
|
599
|
+
async def _add_to_cache_async(
|
|
600
|
+
self, cache_key: str, result: Any, ttl: Optional[int] = None
|
|
601
|
+
) -> None:
|
|
602
|
+
"""
|
|
603
|
+
Add a result to the cache with optional TTL (asynchronous).
|
|
604
|
+
|
|
605
|
+
Args:
|
|
606
|
+
cache_key (str): Cache key.
|
|
607
|
+
result (Any): Result to cache.
|
|
608
|
+
ttl (Optional[int]): Time-to-live in seconds.
|
|
609
|
+
"""
|
|
610
|
+
if not self.config.enable_cache:
|
|
611
|
+
return
|
|
612
|
+
|
|
613
|
+
# Use async interface if available
|
|
614
|
+
if hasattr(self.cache_provider, "set_async"):
|
|
615
|
+
await self.cache_provider.set_async(cache_key, result, ttl)
|
|
616
|
+
else:
|
|
617
|
+
# Fallback to sync interface
|
|
618
|
+
self.cache_provider.set(cache_key, result, ttl)
|
|
619
|
+
|
|
620
|
+
def get_lock(self, resource_id: str) -> threading.Lock:
|
|
621
|
+
"""
|
|
622
|
+
Get or create a lock for a specific resource.
|
|
623
|
+
|
|
624
|
+
Args:
|
|
625
|
+
resource_id (str): Resource identifier.
|
|
626
|
+
|
|
627
|
+
Returns:
|
|
628
|
+
threading.Lock: Lock for the resource.
|
|
629
|
+
"""
|
|
630
|
+
if resource_id not in self._locks:
|
|
631
|
+
self._locks[resource_id] = threading.Lock()
|
|
632
|
+
return self._locks[resource_id]
|
|
633
|
+
|
|
634
|
+
def get_metrics(self) -> Dict[str, Any]:
|
|
635
|
+
"""
|
|
636
|
+
Get current executor metrics.
|
|
637
|
+
|
|
638
|
+
Returns:
|
|
639
|
+
Dict[str, Any]: Metrics including request count, failures, cache hits, and average processing time.
|
|
640
|
+
"""
|
|
641
|
+
return self._metrics.to_dict()
|
|
642
|
+
|
|
643
|
+
@contextmanager
|
|
644
|
+
def timeout_context(self, seconds: int):
|
|
645
|
+
"""
|
|
646
|
+
Context manager for enforcing operation timeouts.
|
|
647
|
+
|
|
648
|
+
Args:
|
|
649
|
+
seconds (int): Timeout duration in seconds.
|
|
650
|
+
|
|
651
|
+
Raises:
|
|
652
|
+
TimeoutError: If operation exceeds timeout.
|
|
653
|
+
"""
|
|
654
|
+
return self.execution_utils.timeout_context(seconds)
|
|
655
|
+
|
|
656
|
+
async def _retry_operation(self, func: Callable, *args, **kwargs) -> Any:
|
|
657
|
+
"""
|
|
658
|
+
Execute an operation with retries for transient errors.
|
|
659
|
+
|
|
660
|
+
Args:
|
|
661
|
+
func (Callable): Function to execute.
|
|
662
|
+
*args: Positional arguments.
|
|
663
|
+
**kwargs: Keyword arguments.
|
|
664
|
+
|
|
665
|
+
Returns:
|
|
666
|
+
Any: Result of the operation.
|
|
667
|
+
|
|
668
|
+
Raises:
|
|
669
|
+
OperationError: If all retries fail.
|
|
670
|
+
"""
|
|
671
|
+
return await self.execution_utils.execute_with_retry_and_timeout(
|
|
672
|
+
func, self.config.timeout, *args, **kwargs
|
|
673
|
+
)
|
|
674
|
+
|
|
675
|
+
def execute(self, tool_instance: Any, operation: str, **kwargs) -> Any:
|
|
676
|
+
"""
|
|
677
|
+
Execute a synchronous tool operation with parameters.
|
|
678
|
+
|
|
679
|
+
Args:
|
|
680
|
+
tool_instance (Any): The tool instance to execute the operation on.
|
|
681
|
+
operation (str): The name of the operation to execute.
|
|
682
|
+
**kwargs: The parameters to pass to the operation.
|
|
683
|
+
|
|
684
|
+
Returns:
|
|
685
|
+
Any: The result of the operation.
|
|
686
|
+
|
|
687
|
+
Raises:
|
|
688
|
+
ToolExecutionError: If the operation fails.
|
|
689
|
+
InputValidationError: If input parameters are invalid.
|
|
690
|
+
SecurityError: If inputs contain malicious content.
|
|
691
|
+
"""
|
|
692
|
+
method = getattr(tool_instance, operation, None)
|
|
693
|
+
if not method or not callable(method) or operation.startswith("_"):
|
|
694
|
+
available_ops = [
|
|
695
|
+
m
|
|
696
|
+
for m in dir(tool_instance)
|
|
697
|
+
if not m.startswith("_") and callable(getattr(tool_instance, m))
|
|
698
|
+
]
|
|
699
|
+
raise ToolExecutionError(
|
|
700
|
+
f"Unsupported operation: {operation}. Available operations: {', '.join(available_ops)}"
|
|
701
|
+
)
|
|
702
|
+
logger.info(
|
|
703
|
+
f"Executing {tool_instance.__class__.__name__}.{operation} with params: {kwargs}"
|
|
704
|
+
)
|
|
705
|
+
start_time = time.time()
|
|
706
|
+
try:
|
|
707
|
+
# Sanitize inputs
|
|
708
|
+
if self.config.enable_security_checks:
|
|
709
|
+
for k, v in kwargs.items():
|
|
710
|
+
if isinstance(v, str) and re.search(
|
|
711
|
+
r"(\bSELECT\b|\bINSERT\b|--|;|/\*)", v, re.IGNORECASE
|
|
712
|
+
):
|
|
713
|
+
raise SecurityError(
|
|
714
|
+
f"Input parameter '{k}' contains potentially malicious content"
|
|
715
|
+
)
|
|
716
|
+
# Use cache if enabled
|
|
717
|
+
if self.config.enable_cache:
|
|
718
|
+
cache_key = self._get_cache_key(operation, (), kwargs)
|
|
719
|
+
cached_result = self._get_from_cache(cache_key)
|
|
720
|
+
if cached_result is not None:
|
|
721
|
+
self._metrics.record_cache_hit()
|
|
722
|
+
logger.debug(f"Cache hit for {operation}")
|
|
723
|
+
return cached_result
|
|
724
|
+
|
|
725
|
+
result = method(**kwargs)
|
|
726
|
+
self._metrics.record_request(time.time() - start_time)
|
|
727
|
+
if self.config.log_execution_time:
|
|
728
|
+
logger.info(
|
|
729
|
+
f"{tool_instance.__class__.__name__}.{operation} executed in {time.time() - start_time:.4f} seconds"
|
|
730
|
+
)
|
|
731
|
+
|
|
732
|
+
# Cache result if enabled
|
|
733
|
+
if self.config.enable_cache:
|
|
734
|
+
self._add_to_cache(cache_key, result)
|
|
735
|
+
return result
|
|
736
|
+
except Exception as e:
|
|
737
|
+
self._metrics.record_failure()
|
|
738
|
+
logger.error(
|
|
739
|
+
f"Error executing {tool_instance.__class__.__name__}.{operation}: {str(e)}",
|
|
740
|
+
exc_info=True,
|
|
741
|
+
)
|
|
742
|
+
raise OperationError(f"Error executing {operation}: {str(e)}") from e
|
|
743
|
+
|
|
744
|
+
async def execute_async(self, tool_instance: Any, operation: str, **kwargs) -> Any:
|
|
745
|
+
"""
|
|
746
|
+
Execute an asynchronous tool operation with parameters.
|
|
747
|
+
|
|
748
|
+
Args:
|
|
749
|
+
tool_instance (Any): The tool instance to execute the operation on.
|
|
750
|
+
operation (str): The name of the operation to execute.
|
|
751
|
+
**kwargs: The parameters to pass to the operation.
|
|
752
|
+
|
|
753
|
+
Returns:
|
|
754
|
+
Any: The result of the operation.
|
|
755
|
+
|
|
756
|
+
Raises:
|
|
757
|
+
ToolExecutionError: If the operation fails.
|
|
758
|
+
InputValidationError: If input parameters are invalid.
|
|
759
|
+
SecurityError: If inputs contain malicious content.
|
|
760
|
+
"""
|
|
761
|
+
method = getattr(tool_instance, operation, None)
|
|
762
|
+
if not method or not callable(method) or operation.startswith("_"):
|
|
763
|
+
available_ops = [
|
|
764
|
+
m
|
|
765
|
+
for m in dir(tool_instance)
|
|
766
|
+
if not m.startswith("_") and callable(getattr(tool_instance, m))
|
|
767
|
+
]
|
|
768
|
+
raise ToolExecutionError(
|
|
769
|
+
f"Unsupported operation: {operation}. Available operations: {', '.join(available_ops)}"
|
|
770
|
+
)
|
|
771
|
+
is_async = inspect.iscoroutinefunction(method)
|
|
772
|
+
logger.info(
|
|
773
|
+
f"Executing async {tool_instance.__class__.__name__}.{operation} with params: {kwargs}"
|
|
774
|
+
)
|
|
775
|
+
start_time = time.time()
|
|
776
|
+
try:
|
|
777
|
+
# Sanitize inputs
|
|
778
|
+
if self.config.enable_security_checks:
|
|
779
|
+
for k, v in kwargs.items():
|
|
780
|
+
if isinstance(v, str) and re.search(
|
|
781
|
+
r"(\bSELECT\b|\bINSERT\b|--|;|/\*)", v, re.IGNORECASE
|
|
782
|
+
):
|
|
783
|
+
raise SecurityError(
|
|
784
|
+
f"Input parameter '{k}' contains potentially malicious content"
|
|
785
|
+
)
|
|
786
|
+
# Use cache if enabled (async)
|
|
787
|
+
if self.config.enable_cache:
|
|
788
|
+
cache_key = self._get_cache_key(operation, (), kwargs)
|
|
789
|
+
cached_result = await self._get_from_cache_async(cache_key)
|
|
790
|
+
if cached_result is not None:
|
|
791
|
+
self._metrics.record_cache_hit()
|
|
792
|
+
logger.debug(f"Cache hit for {operation} (async)")
|
|
793
|
+
return cached_result
|
|
794
|
+
|
|
795
|
+
async def _execute():
|
|
796
|
+
if is_async:
|
|
797
|
+
return await method(**kwargs)
|
|
798
|
+
loop = asyncio.get_event_loop()
|
|
799
|
+
return await loop.run_in_executor(
|
|
800
|
+
self._thread_pool, functools.partial(method, **kwargs)
|
|
801
|
+
)
|
|
802
|
+
|
|
803
|
+
result = await self._retry_operation(_execute)
|
|
804
|
+
self._metrics.record_request(time.time() - start_time)
|
|
805
|
+
if self.config.log_execution_time:
|
|
806
|
+
logger.info(
|
|
807
|
+
f"{tool_instance.__class__.__name__}.{operation} executed in {time.time() - start_time:.4f} seconds"
|
|
808
|
+
)
|
|
809
|
+
|
|
810
|
+
# Cache result if enabled (async)
|
|
811
|
+
if self.config.enable_cache:
|
|
812
|
+
await self._add_to_cache_async(cache_key, result)
|
|
813
|
+
return result
|
|
814
|
+
except Exception as e:
|
|
815
|
+
self._metrics.record_failure()
|
|
816
|
+
logger.error(
|
|
817
|
+
f"Error executing {tool_instance.__class__.__name__}.{operation}: {str(e)}",
|
|
818
|
+
exc_info=True,
|
|
819
|
+
)
|
|
820
|
+
raise OperationError(f"Error executing {operation}: {str(e)}") from e
|
|
821
|
+
|
|
822
|
+
async def execute_batch(
|
|
823
|
+
self, tool_instance: Any, operations: List[Dict[str, Any]]
|
|
824
|
+
) -> List[Any]:
|
|
825
|
+
"""
|
|
826
|
+
Execute multiple tool operations in parallel.
|
|
827
|
+
|
|
828
|
+
Args:
|
|
829
|
+
tool_instance (Any): The tool instance to execute operations on.
|
|
830
|
+
operations (List[Dict[str, Any]]): List of operation dictionaries with 'op' and 'kwargs'.
|
|
831
|
+
|
|
832
|
+
Returns:
|
|
833
|
+
List[Any]: List of operation results.
|
|
834
|
+
|
|
835
|
+
Raises:
|
|
836
|
+
ToolExecutionError: If any operation fails.
|
|
837
|
+
InputValidationError: If input parameters are invalid.
|
|
838
|
+
"""
|
|
839
|
+
tasks = []
|
|
840
|
+
for op_data in operations:
|
|
841
|
+
op = op_data.get("op")
|
|
842
|
+
kwargs = op_data.get("kwargs", {})
|
|
843
|
+
if not op:
|
|
844
|
+
raise InputValidationError("Operation name missing in batch request")
|
|
845
|
+
tasks.append(self.execute_async(tool_instance, op, **kwargs))
|
|
846
|
+
results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
847
|
+
for i, result in enumerate(results):
|
|
848
|
+
if isinstance(result, Exception):
|
|
849
|
+
logger.error(f"Batch operation {operations[i]['op']} failed: {result}")
|
|
850
|
+
return results
|
|
851
|
+
|
|
852
|
+
|
|
853
|
+
# Singleton executor instance (for backward compatibility)
|
|
854
|
+
_default_executor = None
|
|
855
|
+
|
|
856
|
+
|
|
857
|
+
def get_executor(config: Optional[Dict[str, Any]] = None) -> ToolExecutor:
|
|
858
|
+
"""
|
|
859
|
+
Get or create executor instance.
|
|
860
|
+
|
|
861
|
+
If config is provided, creates a new executor with that config.
|
|
862
|
+
If config is None, returns the default singleton executor.
|
|
863
|
+
|
|
864
|
+
Args:
|
|
865
|
+
config (Dict[str, Any], optional): Configuration overrides.
|
|
866
|
+
If provided, creates a new executor instance.
|
|
867
|
+
If None, returns the default singleton.
|
|
868
|
+
|
|
869
|
+
Returns:
|
|
870
|
+
ToolExecutor: Executor instance.
|
|
871
|
+
"""
|
|
872
|
+
global _default_executor
|
|
873
|
+
|
|
874
|
+
# If config is provided, create a new executor with that config
|
|
875
|
+
if config is not None:
|
|
876
|
+
return ToolExecutor(config)
|
|
877
|
+
|
|
878
|
+
# Otherwise, return the default singleton
|
|
879
|
+
if _default_executor is None:
|
|
880
|
+
_default_executor = ToolExecutor()
|
|
881
|
+
return _default_executor
|