aiecs 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of aiecs might be problematic. Click here for more details.
- aiecs/__init__.py +75 -0
- aiecs/__main__.py +41 -0
- aiecs/aiecs_client.py +295 -0
- aiecs/application/__init__.py +10 -0
- aiecs/application/executors/__init__.py +10 -0
- aiecs/application/executors/operation_executor.py +341 -0
- aiecs/config/__init__.py +15 -0
- aiecs/config/config.py +117 -0
- aiecs/config/registry.py +19 -0
- aiecs/core/__init__.py +46 -0
- aiecs/core/interface/__init__.py +34 -0
- aiecs/core/interface/execution_interface.py +150 -0
- aiecs/core/interface/storage_interface.py +214 -0
- aiecs/domain/__init__.py +20 -0
- aiecs/domain/context/__init__.py +28 -0
- aiecs/domain/context/content_engine.py +982 -0
- aiecs/domain/context/conversation_models.py +306 -0
- aiecs/domain/execution/__init__.py +12 -0
- aiecs/domain/execution/model.py +49 -0
- aiecs/domain/task/__init__.py +13 -0
- aiecs/domain/task/dsl_processor.py +460 -0
- aiecs/domain/task/model.py +50 -0
- aiecs/domain/task/task_context.py +257 -0
- aiecs/infrastructure/__init__.py +26 -0
- aiecs/infrastructure/messaging/__init__.py +13 -0
- aiecs/infrastructure/messaging/celery_task_manager.py +341 -0
- aiecs/infrastructure/messaging/websocket_manager.py +289 -0
- aiecs/infrastructure/monitoring/__init__.py +12 -0
- aiecs/infrastructure/monitoring/executor_metrics.py +138 -0
- aiecs/infrastructure/monitoring/structured_logger.py +50 -0
- aiecs/infrastructure/monitoring/tracing_manager.py +376 -0
- aiecs/infrastructure/persistence/__init__.py +12 -0
- aiecs/infrastructure/persistence/database_manager.py +286 -0
- aiecs/infrastructure/persistence/file_storage.py +671 -0
- aiecs/infrastructure/persistence/redis_client.py +162 -0
- aiecs/llm/__init__.py +54 -0
- aiecs/llm/base_client.py +99 -0
- aiecs/llm/client_factory.py +339 -0
- aiecs/llm/custom_callbacks.py +228 -0
- aiecs/llm/openai_client.py +125 -0
- aiecs/llm/vertex_client.py +186 -0
- aiecs/llm/xai_client.py +184 -0
- aiecs/main.py +351 -0
- aiecs/scripts/DEPENDENCY_SYSTEM_SUMMARY.md +241 -0
- aiecs/scripts/README_DEPENDENCY_CHECKER.md +309 -0
- aiecs/scripts/README_WEASEL_PATCH.md +126 -0
- aiecs/scripts/__init__.py +3 -0
- aiecs/scripts/dependency_checker.py +825 -0
- aiecs/scripts/dependency_fixer.py +348 -0
- aiecs/scripts/download_nlp_data.py +348 -0
- aiecs/scripts/fix_weasel_validator.py +121 -0
- aiecs/scripts/fix_weasel_validator.sh +82 -0
- aiecs/scripts/patch_weasel_library.sh +188 -0
- aiecs/scripts/quick_dependency_check.py +269 -0
- aiecs/scripts/run_weasel_patch.sh +41 -0
- aiecs/scripts/setup_nlp_data.sh +217 -0
- aiecs/tasks/__init__.py +2 -0
- aiecs/tasks/worker.py +111 -0
- aiecs/tools/__init__.py +196 -0
- aiecs/tools/base_tool.py +202 -0
- aiecs/tools/langchain_adapter.py +361 -0
- aiecs/tools/task_tools/__init__.py +82 -0
- aiecs/tools/task_tools/chart_tool.py +704 -0
- aiecs/tools/task_tools/classfire_tool.py +901 -0
- aiecs/tools/task_tools/image_tool.py +397 -0
- aiecs/tools/task_tools/office_tool.py +600 -0
- aiecs/tools/task_tools/pandas_tool.py +565 -0
- aiecs/tools/task_tools/report_tool.py +499 -0
- aiecs/tools/task_tools/research_tool.py +363 -0
- aiecs/tools/task_tools/scraper_tool.py +548 -0
- aiecs/tools/task_tools/search_api.py +7 -0
- aiecs/tools/task_tools/stats_tool.py +513 -0
- aiecs/tools/temp_file_manager.py +126 -0
- aiecs/tools/tool_executor/__init__.py +35 -0
- aiecs/tools/tool_executor/tool_executor.py +518 -0
- aiecs/utils/LLM_output_structor.py +409 -0
- aiecs/utils/__init__.py +23 -0
- aiecs/utils/base_callback.py +50 -0
- aiecs/utils/execution_utils.py +158 -0
- aiecs/utils/logging.py +1 -0
- aiecs/utils/prompt_loader.py +13 -0
- aiecs/utils/token_usage_repository.py +279 -0
- aiecs/ws/__init__.py +0 -0
- aiecs/ws/socket_server.py +41 -0
- aiecs-1.0.0.dist-info/METADATA +610 -0
- aiecs-1.0.0.dist-info/RECORD +90 -0
- aiecs-1.0.0.dist-info/WHEEL +5 -0
- aiecs-1.0.0.dist-info/entry_points.txt +7 -0
- aiecs-1.0.0.dist-info/licenses/LICENSE +225 -0
- aiecs-1.0.0.dist-info/top_level.txt +1 -0
aiecs/tools/base_tool.py
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import inspect
|
|
3
|
+
import logging
|
|
4
|
+
from typing import Any, Dict, List, Optional, Type, Union
|
|
5
|
+
|
|
6
|
+
from pydantic import BaseModel, ValidationError
|
|
7
|
+
import re
|
|
8
|
+
|
|
9
|
+
from aiecs.tools.tool_executor import (
|
|
10
|
+
ToolExecutor,
|
|
11
|
+
ToolExecutionError,
|
|
12
|
+
InputValidationError,
|
|
13
|
+
OperationError,
|
|
14
|
+
SecurityError,
|
|
15
|
+
get_executor,
|
|
16
|
+
validate_input,
|
|
17
|
+
cache_result,
|
|
18
|
+
run_in_executor,
|
|
19
|
+
measure_execution_time,
|
|
20
|
+
sanitize_input
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
class BaseTool:
|
|
26
|
+
"""
|
|
27
|
+
Base class for all tools, providing common functionality:
|
|
28
|
+
- Input validation with Pydantic schemas
|
|
29
|
+
- Caching with TTL and content-based keys
|
|
30
|
+
- Concurrency with async/sync execution
|
|
31
|
+
- Error handling with retries and context
|
|
32
|
+
- Performance optimization with metrics
|
|
33
|
+
- Logging with structured output
|
|
34
|
+
|
|
35
|
+
Tools inheriting from this class focus on business logic, leveraging
|
|
36
|
+
the executor's cross-cutting concerns.
|
|
37
|
+
|
|
38
|
+
Example:
|
|
39
|
+
class MyTool(BaseTool):
|
|
40
|
+
class ReadSchema(BaseModel):
|
|
41
|
+
path: str
|
|
42
|
+
|
|
43
|
+
@validate_input(ReadSchema)
|
|
44
|
+
@cache_result(ttl=300)
|
|
45
|
+
@run_in_executor
|
|
46
|
+
@measure_execution_time
|
|
47
|
+
@sanitize_input
|
|
48
|
+
def read(self, path: str):
|
|
49
|
+
# Implementation
|
|
50
|
+
pass
|
|
51
|
+
"""
|
|
52
|
+
def __init__(self, config: Optional[Dict[str, Any]] = None):
|
|
53
|
+
"""
|
|
54
|
+
Initialize the tool with optional configuration.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
config (Dict[str, Any], optional): Tool-specific configuration.
|
|
58
|
+
|
|
59
|
+
Raises:
|
|
60
|
+
ValueError: If config is invalid.
|
|
61
|
+
"""
|
|
62
|
+
self._executor = get_executor(config)
|
|
63
|
+
self._config = config or {}
|
|
64
|
+
self._schemas: Dict[str, Type[BaseModel]] = {}
|
|
65
|
+
self._async_methods: List[str] = []
|
|
66
|
+
self._register_schemas()
|
|
67
|
+
self._register_async_methods()
|
|
68
|
+
|
|
69
|
+
def _register_schemas(self) -> None:
|
|
70
|
+
"""
|
|
71
|
+
Register Pydantic schemas for operations by inspecting inner Schema classes.
|
|
72
|
+
|
|
73
|
+
Example:
|
|
74
|
+
class MyTool(BaseTool):
|
|
75
|
+
class ReadSchema(BaseModel):
|
|
76
|
+
path: str
|
|
77
|
+
def read(self, path: str):
|
|
78
|
+
pass
|
|
79
|
+
# Registers 'read' -> ReadSchema
|
|
80
|
+
"""
|
|
81
|
+
for attr_name in dir(self.__class__):
|
|
82
|
+
attr = getattr(self.__class__, attr_name)
|
|
83
|
+
if isinstance(attr, type) and issubclass(attr, BaseModel) and attr.__name__.endswith('Schema'):
|
|
84
|
+
op_name = attr.__name__.replace('Schema', '').lower()
|
|
85
|
+
self._schemas[op_name] = attr
|
|
86
|
+
|
|
87
|
+
def _register_async_methods(self) -> None:
|
|
88
|
+
"""
|
|
89
|
+
Register async methods for proper execution handling.
|
|
90
|
+
"""
|
|
91
|
+
for attr_name in dir(self.__class__):
|
|
92
|
+
attr = getattr(self.__class__, attr_name)
|
|
93
|
+
if inspect.iscoroutinefunction(attr) and not attr_name.startswith('_'):
|
|
94
|
+
self._async_methods.append(attr_name)
|
|
95
|
+
|
|
96
|
+
def _sanitize_kwargs(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
|
|
97
|
+
"""
|
|
98
|
+
Sanitize keyword arguments to prevent injection attacks.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
kwargs (Dict[str, Any]): Input keyword arguments.
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
Dict[str, Any]: Sanitized keyword arguments.
|
|
105
|
+
|
|
106
|
+
Raises:
|
|
107
|
+
SecurityError: If kwargs contain malicious content.
|
|
108
|
+
"""
|
|
109
|
+
sanitized = {}
|
|
110
|
+
for k, v in kwargs.items():
|
|
111
|
+
if isinstance(v, str) and re.search(r'(\bSELECT\b|\bINSERT\b|--|;|/\*)', v, re.IGNORECASE):
|
|
112
|
+
raise SecurityError(f"Input parameter '{k}' contains potentially malicious content")
|
|
113
|
+
sanitized[k] = v
|
|
114
|
+
return sanitized
|
|
115
|
+
|
|
116
|
+
def run(self, op: str, **kwargs) -> Any:
|
|
117
|
+
"""
|
|
118
|
+
Execute a synchronous operation with parameters.
|
|
119
|
+
|
|
120
|
+
Args:
|
|
121
|
+
op (str): The name of the operation to execute.
|
|
122
|
+
**kwargs: The parameters to pass to the operation.
|
|
123
|
+
|
|
124
|
+
Returns:
|
|
125
|
+
Any: The result of the operation.
|
|
126
|
+
|
|
127
|
+
Raises:
|
|
128
|
+
ToolExecutionError: If the operation fails.
|
|
129
|
+
InputValidationError: If input parameters are invalid.
|
|
130
|
+
SecurityError: If inputs contain malicious content.
|
|
131
|
+
"""
|
|
132
|
+
schema_class = self._schemas.get(op)
|
|
133
|
+
if schema_class:
|
|
134
|
+
try:
|
|
135
|
+
schema = schema_class(**kwargs)
|
|
136
|
+
kwargs = schema.model_dump(exclude_unset=True)
|
|
137
|
+
except ValidationError as e:
|
|
138
|
+
raise InputValidationError(f"Invalid input parameters: {e}")
|
|
139
|
+
kwargs = self._sanitize_kwargs(kwargs)
|
|
140
|
+
return self._executor.execute(self, op, **kwargs)
|
|
141
|
+
|
|
142
|
+
async def run_async(self, op: str, **kwargs) -> Any:
|
|
143
|
+
"""
|
|
144
|
+
Execute an asynchronous operation with parameters.
|
|
145
|
+
|
|
146
|
+
Args:
|
|
147
|
+
op (str): The name of the operation to execute.
|
|
148
|
+
**kwargs: The parameters to pass to the operation.
|
|
149
|
+
|
|
150
|
+
Returns:
|
|
151
|
+
Any: The result of the operation.
|
|
152
|
+
|
|
153
|
+
Raises:
|
|
154
|
+
ToolExecutionError: If the operation fails.
|
|
155
|
+
InputValidationError: If input parameters are invalid.
|
|
156
|
+
SecurityError: If inputs contain malicious content.
|
|
157
|
+
"""
|
|
158
|
+
schema_class = self._schemas.get(op)
|
|
159
|
+
if schema_class:
|
|
160
|
+
try:
|
|
161
|
+
schema = schema_class(**kwargs)
|
|
162
|
+
kwargs = schema.model_dump(exclude_unset=True)
|
|
163
|
+
except ValidationError as e:
|
|
164
|
+
raise InputValidationError(f"Invalid input parameters: {e}")
|
|
165
|
+
kwargs = self._sanitize_kwargs(kwargs)
|
|
166
|
+
return await self._executor.execute_async(self, op, **kwargs)
|
|
167
|
+
|
|
168
|
+
async def run_batch(self, operations: List[Dict[str, Any]]) -> List[Any]:
|
|
169
|
+
"""
|
|
170
|
+
Execute multiple operations in parallel.
|
|
171
|
+
|
|
172
|
+
Args:
|
|
173
|
+
operations (List[Dict[str, Any]]): List of operation dictionaries with 'op' and 'kwargs'.
|
|
174
|
+
|
|
175
|
+
Returns:
|
|
176
|
+
List[Any]: List of operation results.
|
|
177
|
+
|
|
178
|
+
Raises:
|
|
179
|
+
ToolExecutionError: If any operation fails.
|
|
180
|
+
InputValidationError: If input parameters are invalid.
|
|
181
|
+
"""
|
|
182
|
+
return await self._executor.execute_batch(self, operations)
|
|
183
|
+
|
|
184
|
+
def _get_method_schema(self, method_name: str) -> Optional[Type[BaseModel]]:
|
|
185
|
+
"""
|
|
186
|
+
Get the schema for a method if it exists.
|
|
187
|
+
|
|
188
|
+
Args:
|
|
189
|
+
method_name (str): The name of the method.
|
|
190
|
+
|
|
191
|
+
Returns:
|
|
192
|
+
Optional[Type[BaseModel]]: The schema class or None.
|
|
193
|
+
"""
|
|
194
|
+
if method_name in self._schemas:
|
|
195
|
+
return self._schemas[method_name]
|
|
196
|
+
schema_name = method_name[0].upper() + method_name[1:] + 'Schema'
|
|
197
|
+
for attr_name in dir(self.__class__):
|
|
198
|
+
if attr_name == schema_name:
|
|
199
|
+
attr = getattr(self.__class__, attr_name)
|
|
200
|
+
if isinstance(attr, type) and issubclass(attr, BaseModel):
|
|
201
|
+
return attr
|
|
202
|
+
return None
|
|
@@ -0,0 +1,361 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Langchain Adapter: Converts BaseTool and its sub-functions into Langchain ReAct Agent compatible tool collections
|
|
3
|
+
|
|
4
|
+
Main Features:
|
|
5
|
+
1. Automatically discover all operation methods of BaseTool
|
|
6
|
+
2. Create independent Langchain Tool for each operation
|
|
7
|
+
3. Maintain all original functionality features (caching, validation, security, etc.)
|
|
8
|
+
4. Support synchronous and asynchronous execution
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import inspect
|
|
12
|
+
import logging
|
|
13
|
+
from typing import Any, Dict, List, Optional, Type, Union, get_type_hints
|
|
14
|
+
from pydantic import BaseModel, Field
|
|
15
|
+
|
|
16
|
+
try:
|
|
17
|
+
from langchain.tools import BaseTool as LangchainBaseTool
|
|
18
|
+
from langchain.callbacks.manager import CallbackManagerForToolRun, AsyncCallbackManagerForToolRun
|
|
19
|
+
LANGCHAIN_AVAILABLE = True
|
|
20
|
+
except ImportError:
|
|
21
|
+
# If langchain is not installed, create simple base class for type checking
|
|
22
|
+
class LangchainBaseTool:
|
|
23
|
+
pass
|
|
24
|
+
CallbackManagerForToolRun = None
|
|
25
|
+
AsyncCallbackManagerForToolRun = None
|
|
26
|
+
LANGCHAIN_AVAILABLE = False
|
|
27
|
+
|
|
28
|
+
from aiecs.tools.base_tool import BaseTool
|
|
29
|
+
from aiecs.tools import get_tool, list_tools, TOOL_CLASSES
|
|
30
|
+
|
|
31
|
+
logger = logging.getLogger(__name__)
|
|
32
|
+
|
|
33
|
+
class LangchainToolAdapter(LangchainBaseTool):
|
|
34
|
+
"""
|
|
35
|
+
Langchain tool adapter for single operation
|
|
36
|
+
|
|
37
|
+
Wraps one operation method of BaseTool as an independent Langchain tool
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
# Define class attributes
|
|
41
|
+
name: str = ""
|
|
42
|
+
description: str = ""
|
|
43
|
+
|
|
44
|
+
def __init__(
|
|
45
|
+
self,
|
|
46
|
+
base_tool_name: str,
|
|
47
|
+
operation_name: str,
|
|
48
|
+
operation_schema: Optional[Type[BaseModel]] = None,
|
|
49
|
+
description: Optional[str] = None
|
|
50
|
+
):
|
|
51
|
+
"""
|
|
52
|
+
Initialize adapter
|
|
53
|
+
|
|
54
|
+
Args:
|
|
55
|
+
base_tool_name: Original tool name
|
|
56
|
+
operation_name: Operation name
|
|
57
|
+
operation_schema: Pydantic Schema for the operation
|
|
58
|
+
description: Tool description
|
|
59
|
+
"""
|
|
60
|
+
# Construct tool name and description
|
|
61
|
+
self.name = f"{base_tool_name}_{operation_name}"
|
|
62
|
+
self.description = description or f"Execute {operation_name} operation from {base_tool_name} tool"
|
|
63
|
+
|
|
64
|
+
# Store tool information (use self.__dict__ to set directly to avoid pydantic validation)
|
|
65
|
+
self.__dict__['base_tool_name'] = base_tool_name
|
|
66
|
+
self.__dict__['operation_name'] = operation_name
|
|
67
|
+
self.__dict__['operation_schema'] = operation_schema
|
|
68
|
+
|
|
69
|
+
# Set parameter Schema
|
|
70
|
+
if operation_schema:
|
|
71
|
+
self.args_schema = operation_schema
|
|
72
|
+
|
|
73
|
+
super().__init__()
|
|
74
|
+
|
|
75
|
+
def _run(
|
|
76
|
+
self,
|
|
77
|
+
run_manager: Optional[CallbackManagerForToolRun] = None,
|
|
78
|
+
**kwargs: Any
|
|
79
|
+
) -> Any:
|
|
80
|
+
"""Execute operation synchronously"""
|
|
81
|
+
try:
|
|
82
|
+
# Get original tool instance
|
|
83
|
+
base_tool = get_tool(self.__dict__['base_tool_name'])
|
|
84
|
+
|
|
85
|
+
# Execute operation
|
|
86
|
+
result = base_tool.run(self.__dict__['operation_name'], **kwargs)
|
|
87
|
+
|
|
88
|
+
logger.info(f"Successfully executed {self.name} with result type: {type(result)}")
|
|
89
|
+
return result
|
|
90
|
+
|
|
91
|
+
except Exception as e:
|
|
92
|
+
logger.error(f"Error executing {self.name}: {str(e)}")
|
|
93
|
+
raise
|
|
94
|
+
|
|
95
|
+
async def _arun(
|
|
96
|
+
self,
|
|
97
|
+
run_manager: Optional[AsyncCallbackManagerForToolRun] = None,
|
|
98
|
+
**kwargs: Any
|
|
99
|
+
) -> Any:
|
|
100
|
+
"""Execute operation asynchronously"""
|
|
101
|
+
try:
|
|
102
|
+
# Get original tool instance
|
|
103
|
+
base_tool = get_tool(self.__dict__['base_tool_name'])
|
|
104
|
+
|
|
105
|
+
# Execute asynchronous operation
|
|
106
|
+
result = await base_tool.run_async(self.__dict__['operation_name'], **kwargs)
|
|
107
|
+
|
|
108
|
+
logger.info(f"Successfully executed {self.name} async with result type: {type(result)}")
|
|
109
|
+
return result
|
|
110
|
+
|
|
111
|
+
except Exception as e:
|
|
112
|
+
logger.error(f"Error executing {self.name} async: {str(e)}")
|
|
113
|
+
raise
|
|
114
|
+
|
|
115
|
+
class ToolRegistry:
|
|
116
|
+
"""Tool Registry: Manages conversion from BaseTool to Langchain tools"""
|
|
117
|
+
|
|
118
|
+
def __init__(self):
|
|
119
|
+
self._langchain_tools: Dict[str, LangchainToolAdapter] = {}
|
|
120
|
+
|
|
121
|
+
def discover_operations(self, base_tool_class: Type[BaseTool]) -> List[Dict[str, Any]]:
|
|
122
|
+
"""
|
|
123
|
+
Discover all operation methods and Schemas of BaseTool class
|
|
124
|
+
|
|
125
|
+
Args:
|
|
126
|
+
base_tool_class: BaseTool subclass
|
|
127
|
+
|
|
128
|
+
Returns:
|
|
129
|
+
List of operation information, including method names, Schemas, descriptions, etc.
|
|
130
|
+
"""
|
|
131
|
+
operations = []
|
|
132
|
+
|
|
133
|
+
# Get all Schema classes
|
|
134
|
+
schemas = {}
|
|
135
|
+
for attr_name in dir(base_tool_class):
|
|
136
|
+
attr = getattr(base_tool_class, attr_name)
|
|
137
|
+
if isinstance(attr, type) and issubclass(attr, BaseModel) and attr.__name__.endswith('Schema'):
|
|
138
|
+
op_name = attr.__name__.replace('Schema', '').lower()
|
|
139
|
+
schemas[op_name] = attr
|
|
140
|
+
|
|
141
|
+
# Get all public methods
|
|
142
|
+
for method_name in dir(base_tool_class):
|
|
143
|
+
if method_name.startswith('_'):
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
method = getattr(base_tool_class, method_name)
|
|
147
|
+
if not callable(method):
|
|
148
|
+
continue
|
|
149
|
+
|
|
150
|
+
# Skip base class methods
|
|
151
|
+
if method_name in ['run', 'run_async', 'run_batch']:
|
|
152
|
+
continue
|
|
153
|
+
|
|
154
|
+
# Get method information
|
|
155
|
+
operation_info = {
|
|
156
|
+
'name': method_name,
|
|
157
|
+
'method': method,
|
|
158
|
+
'schema': schemas.get(method_name),
|
|
159
|
+
'description': inspect.getdoc(method) or f"Execute {method_name} operation",
|
|
160
|
+
'is_async': inspect.iscoroutinefunction(method)
|
|
161
|
+
}
|
|
162
|
+
|
|
163
|
+
operations.append(operation_info)
|
|
164
|
+
|
|
165
|
+
return operations
|
|
166
|
+
|
|
167
|
+
def _extract_description(self, method, base_tool_name: str, operation_name: str, schema: Optional[Type[BaseModel]] = None) -> str:
|
|
168
|
+
"""Extract detailed description from method docstring and schema"""
|
|
169
|
+
doc = inspect.getdoc(method)
|
|
170
|
+
|
|
171
|
+
# Base description
|
|
172
|
+
if doc:
|
|
173
|
+
base_desc = doc.split('\n')[0].strip()
|
|
174
|
+
else:
|
|
175
|
+
base_desc = f"Execute {operation_name} operation"
|
|
176
|
+
|
|
177
|
+
# Enhanced description - add specific tool functionality description
|
|
178
|
+
enhanced_desc = f"{base_desc}"
|
|
179
|
+
|
|
180
|
+
# Add specific descriptions based on tool name and operation
|
|
181
|
+
if base_tool_name == "chart":
|
|
182
|
+
if operation_name == "read_data":
|
|
183
|
+
enhanced_desc = "Read and analyze data files in multiple formats (CSV, Excel, JSON, Parquet, etc.). Returns data structure summary, preview, and optional export functionality."
|
|
184
|
+
elif operation_name == "visualize":
|
|
185
|
+
enhanced_desc = "Create data visualizations including histograms, scatter plots, bar charts, line charts, heatmaps, and pair plots. Supports customizable styling, colors, and high-resolution output."
|
|
186
|
+
elif operation_name == "export_data":
|
|
187
|
+
enhanced_desc = "Export data to various formats (JSON, CSV, HTML, Excel, Markdown) with optional variable selection and path customization."
|
|
188
|
+
elif base_tool_name == "pandas":
|
|
189
|
+
enhanced_desc = f"Pandas data manipulation: {base_desc}. Supports DataFrame operations with built-in validation and error handling."
|
|
190
|
+
elif base_tool_name == "stats":
|
|
191
|
+
enhanced_desc = f"Statistical analysis: {base_desc}. Provides statistical tests, regression analysis, and data preprocessing capabilities."
|
|
192
|
+
|
|
193
|
+
# Add parameter information
|
|
194
|
+
if schema:
|
|
195
|
+
try:
|
|
196
|
+
fields = schema.__fields__ if hasattr(schema, '__fields__') else {}
|
|
197
|
+
if fields:
|
|
198
|
+
required_params = [name for name, field in fields.items() if field.is_required()]
|
|
199
|
+
optional_params = [name for name, field in fields.items() if not field.is_required()]
|
|
200
|
+
|
|
201
|
+
param_desc = ""
|
|
202
|
+
if required_params:
|
|
203
|
+
param_desc += f" Required: {', '.join(required_params)}."
|
|
204
|
+
if optional_params:
|
|
205
|
+
param_desc += f" Optional: {', '.join(optional_params)}."
|
|
206
|
+
|
|
207
|
+
enhanced_desc += param_desc
|
|
208
|
+
except Exception:
|
|
209
|
+
pass
|
|
210
|
+
|
|
211
|
+
return enhanced_desc
|
|
212
|
+
|
|
213
|
+
def create_langchain_tools(self, tool_name: str) -> List[LangchainToolAdapter]:
|
|
214
|
+
"""
|
|
215
|
+
Create all Langchain adapters for specified tool
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
tool_name: Tool name
|
|
219
|
+
|
|
220
|
+
Returns:
|
|
221
|
+
List of Langchain tool adapters
|
|
222
|
+
"""
|
|
223
|
+
if not LANGCHAIN_AVAILABLE:
|
|
224
|
+
raise ImportError("langchain is not installed. Please install it to use this adapter.")
|
|
225
|
+
|
|
226
|
+
if tool_name not in TOOL_CLASSES:
|
|
227
|
+
raise ValueError(f"Tool '{tool_name}' not found in registry")
|
|
228
|
+
|
|
229
|
+
base_tool_class = TOOL_CLASSES[tool_name]
|
|
230
|
+
operations = self.discover_operations(base_tool_class)
|
|
231
|
+
|
|
232
|
+
langchain_tools = []
|
|
233
|
+
for op_info in operations:
|
|
234
|
+
# Generate enhanced description
|
|
235
|
+
enhanced_description = self._extract_description(
|
|
236
|
+
op_info['method'],
|
|
237
|
+
tool_name,
|
|
238
|
+
op_info['name'],
|
|
239
|
+
op_info['schema']
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
adapter = LangchainToolAdapter(
|
|
243
|
+
base_tool_name=tool_name,
|
|
244
|
+
operation_name=op_info['name'],
|
|
245
|
+
operation_schema=op_info['schema'],
|
|
246
|
+
description=enhanced_description
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
langchain_tools.append(adapter)
|
|
250
|
+
self._langchain_tools[adapter.name] = adapter
|
|
251
|
+
|
|
252
|
+
logger.info(f"Created {len(langchain_tools)} Langchain tools for {tool_name}")
|
|
253
|
+
return langchain_tools
|
|
254
|
+
|
|
255
|
+
def create_all_langchain_tools(self) -> List[LangchainToolAdapter]:
|
|
256
|
+
"""
|
|
257
|
+
Create Langchain adapters for all registered BaseTools
|
|
258
|
+
|
|
259
|
+
Returns:
|
|
260
|
+
List of all Langchain tool adapters
|
|
261
|
+
"""
|
|
262
|
+
all_tools = []
|
|
263
|
+
|
|
264
|
+
for tool_name in list_tools():
|
|
265
|
+
try:
|
|
266
|
+
tools = self.create_langchain_tools(tool_name)
|
|
267
|
+
all_tools.extend(tools)
|
|
268
|
+
except Exception as e:
|
|
269
|
+
logger.error(f"Failed to create Langchain tools for {tool_name}: {e}")
|
|
270
|
+
|
|
271
|
+
logger.info(f"Created total {len(all_tools)} Langchain tools from {len(list_tools())} base tools")
|
|
272
|
+
return all_tools
|
|
273
|
+
|
|
274
|
+
def get_tool(self, name: str) -> Optional[LangchainToolAdapter]:
|
|
275
|
+
"""Get Langchain tool with specified name"""
|
|
276
|
+
return self._langchain_tools.get(name)
|
|
277
|
+
|
|
278
|
+
def list_langchain_tools(self) -> List[str]:
|
|
279
|
+
"""List all Langchain tool names"""
|
|
280
|
+
return list(self._langchain_tools.keys())
|
|
281
|
+
|
|
282
|
+
# Global registry instance
|
|
283
|
+
tool_registry = ToolRegistry()
|
|
284
|
+
|
|
285
|
+
def get_langchain_tools(tool_names: Optional[List[str]] = None) -> List[LangchainToolAdapter]:
|
|
286
|
+
"""
|
|
287
|
+
Get Langchain tool collection
|
|
288
|
+
|
|
289
|
+
Args:
|
|
290
|
+
tool_names: List of tool names to convert, None means convert all tools
|
|
291
|
+
|
|
292
|
+
Returns:
|
|
293
|
+
List of Langchain tool adapters
|
|
294
|
+
"""
|
|
295
|
+
if tool_names is None:
|
|
296
|
+
return tool_registry.create_all_langchain_tools()
|
|
297
|
+
|
|
298
|
+
all_tools = []
|
|
299
|
+
for tool_name in tool_names:
|
|
300
|
+
tools = tool_registry.create_langchain_tools(tool_name)
|
|
301
|
+
all_tools.extend(tools)
|
|
302
|
+
|
|
303
|
+
return all_tools
|
|
304
|
+
|
|
305
|
+
def create_react_agent_tools() -> List[LangchainToolAdapter]:
|
|
306
|
+
"""
|
|
307
|
+
Create complete tool collection for ReAct Agent
|
|
308
|
+
|
|
309
|
+
Returns:
|
|
310
|
+
List of adapted Langchain tools
|
|
311
|
+
"""
|
|
312
|
+
return get_langchain_tools()
|
|
313
|
+
|
|
314
|
+
def create_tool_calling_agent_tools() -> List[LangchainToolAdapter]:
|
|
315
|
+
"""
|
|
316
|
+
Create complete tool collection for Tool Calling Agent
|
|
317
|
+
|
|
318
|
+
Returns:
|
|
319
|
+
List of adapted Langchain tools optimized for tool calling
|
|
320
|
+
"""
|
|
321
|
+
return get_langchain_tools()
|
|
322
|
+
|
|
323
|
+
# Compatibility check functionality
|
|
324
|
+
def check_langchain_compatibility() -> Dict[str, Any]:
|
|
325
|
+
"""
|
|
326
|
+
Check compatibility between current environment and Langchain
|
|
327
|
+
|
|
328
|
+
Returns:
|
|
329
|
+
Compatibility check results
|
|
330
|
+
"""
|
|
331
|
+
result = {
|
|
332
|
+
'langchain_available': LANGCHAIN_AVAILABLE,
|
|
333
|
+
'total_base_tools': len(list_tools()),
|
|
334
|
+
'compatible_tools': [],
|
|
335
|
+
'incompatible_tools': [],
|
|
336
|
+
'total_operations': 0
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
if not LANGCHAIN_AVAILABLE:
|
|
340
|
+
result['error'] = 'Langchain not installed'
|
|
341
|
+
return result
|
|
342
|
+
|
|
343
|
+
for tool_name in list_tools():
|
|
344
|
+
try:
|
|
345
|
+
tool_class = TOOL_CLASSES[tool_name]
|
|
346
|
+
operations = tool_registry.discover_operations(tool_class)
|
|
347
|
+
|
|
348
|
+
result['compatible_tools'].append({
|
|
349
|
+
'name': tool_name,
|
|
350
|
+
'operations_count': len(operations),
|
|
351
|
+
'operations': [op['name'] for op in operations]
|
|
352
|
+
})
|
|
353
|
+
result['total_operations'] += len(operations)
|
|
354
|
+
|
|
355
|
+
except Exception as e:
|
|
356
|
+
result['incompatible_tools'].append({
|
|
357
|
+
'name': tool_name,
|
|
358
|
+
'error': str(e)
|
|
359
|
+
})
|
|
360
|
+
|
|
361
|
+
return result
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# python-middleware/app/tools/task_tools/__init__.py
|
|
2
|
+
|
|
3
|
+
"""
|
|
4
|
+
Task Tools Module
|
|
5
|
+
|
|
6
|
+
This module contains specialized tools for various task-oriented operations:
|
|
7
|
+
- chart_tool: Chart and visualization operations
|
|
8
|
+
- classfire_tool: Classification and categorization operations
|
|
9
|
+
- image_tool: Image processing and manipulation operations
|
|
10
|
+
- office_tool: Office document processing operations
|
|
11
|
+
- pandas_tool: Data analysis and manipulation operations
|
|
12
|
+
- report_tool: Report generation and formatting operations
|
|
13
|
+
- research_tool: Research and information gathering operations
|
|
14
|
+
- scraper_tool: Web scraping and data extraction operations
|
|
15
|
+
- search_api: Search API integration operations
|
|
16
|
+
- stats_tool: Statistical analysis and computation operations
|
|
17
|
+
"""
|
|
18
|
+
|
|
19
|
+
# Lazy import all task tools to avoid heavy dependencies at import time
|
|
20
|
+
import os
|
|
21
|
+
|
|
22
|
+
# Define available tools for lazy loading
|
|
23
|
+
_AVAILABLE_TOOLS = [
|
|
24
|
+
'chart_tool',
|
|
25
|
+
'classfire_tool',
|
|
26
|
+
'image_tool',
|
|
27
|
+
'pandas_tool',
|
|
28
|
+
'report_tool',
|
|
29
|
+
'research_tool',
|
|
30
|
+
'scraper_tool',
|
|
31
|
+
'search_api',
|
|
32
|
+
'stats_tool'
|
|
33
|
+
]
|
|
34
|
+
|
|
35
|
+
# Add office_tool conditionally
|
|
36
|
+
if not os.getenv('SKIP_OFFICE_TOOL', '').lower() in ('true', '1', 'yes'):
|
|
37
|
+
_AVAILABLE_TOOLS.append('office_tool')
|
|
38
|
+
|
|
39
|
+
# Track which tools have been loaded
|
|
40
|
+
_LOADED_TOOLS = set()
|
|
41
|
+
|
|
42
|
+
def _lazy_load_tool(tool_name: str):
|
|
43
|
+
"""Lazy load a specific tool module"""
|
|
44
|
+
if tool_name in _LOADED_TOOLS:
|
|
45
|
+
return
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
if tool_name == 'chart_tool':
|
|
49
|
+
from . import chart_tool
|
|
50
|
+
elif tool_name == 'classfire_tool':
|
|
51
|
+
from . import classfire_tool
|
|
52
|
+
elif tool_name == 'image_tool':
|
|
53
|
+
from . import image_tool
|
|
54
|
+
elif tool_name == 'office_tool':
|
|
55
|
+
from . import office_tool
|
|
56
|
+
elif tool_name == 'pandas_tool':
|
|
57
|
+
from . import pandas_tool
|
|
58
|
+
elif tool_name == 'report_tool':
|
|
59
|
+
from . import report_tool
|
|
60
|
+
elif tool_name == 'research_tool':
|
|
61
|
+
from . import research_tool
|
|
62
|
+
elif tool_name == 'scraper_tool':
|
|
63
|
+
from . import scraper_tool
|
|
64
|
+
elif tool_name == 'search_api':
|
|
65
|
+
from . import search_api
|
|
66
|
+
elif tool_name == 'stats_tool':
|
|
67
|
+
from . import stats_tool
|
|
68
|
+
|
|
69
|
+
_LOADED_TOOLS.add(tool_name)
|
|
70
|
+
|
|
71
|
+
except Exception as e:
|
|
72
|
+
import logging
|
|
73
|
+
logger = logging.getLogger(__name__)
|
|
74
|
+
logger.warning(f"Failed to load tool {tool_name}: {e}")
|
|
75
|
+
|
|
76
|
+
def load_all_tools():
|
|
77
|
+
"""Load all available tools (for backward compatibility)"""
|
|
78
|
+
for tool_name in _AVAILABLE_TOOLS:
|
|
79
|
+
_lazy_load_tool(tool_name)
|
|
80
|
+
|
|
81
|
+
# Export the tool modules for external access
|
|
82
|
+
__all__ = _AVAILABLE_TOOLS + ['load_all_tools', '_lazy_load_tool']
|