chuk-tool-processor 0.1.5__py3-none-any.whl → 0.1.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chuk-tool-processor might be problematic. Click here for more details.
- chuk_tool_processor/core/processor.py +345 -132
- chuk_tool_processor/execution/strategies/inprocess_strategy.py +512 -68
- chuk_tool_processor/execution/strategies/subprocess_strategy.py +523 -63
- chuk_tool_processor/execution/tool_executor.py +282 -24
- chuk_tool_processor/execution/wrappers/caching.py +465 -123
- chuk_tool_processor/execution/wrappers/rate_limiting.py +199 -86
- chuk_tool_processor/execution/wrappers/retry.py +133 -23
- chuk_tool_processor/logging/__init__.py +83 -10
- chuk_tool_processor/logging/context.py +218 -22
- chuk_tool_processor/logging/formatter.py +56 -13
- chuk_tool_processor/logging/helpers.py +91 -16
- chuk_tool_processor/logging/metrics.py +75 -6
- chuk_tool_processor/mcp/mcp_tool.py +80 -35
- chuk_tool_processor/mcp/register_mcp_tools.py +74 -56
- chuk_tool_processor/mcp/setup_mcp_sse.py +41 -36
- chuk_tool_processor/mcp/setup_mcp_stdio.py +39 -37
- chuk_tool_processor/mcp/stream_manager.py +28 -0
- chuk_tool_processor/models/execution_strategy.py +52 -3
- chuk_tool_processor/models/streaming_tool.py +110 -0
- chuk_tool_processor/models/tool_call.py +56 -4
- chuk_tool_processor/models/tool_result.py +115 -9
- chuk_tool_processor/models/validated_tool.py +15 -13
- chuk_tool_processor/plugins/discovery.py +115 -70
- chuk_tool_processor/plugins/parsers/base.py +13 -5
- chuk_tool_processor/plugins/parsers/{function_call_tool_plugin.py → function_call_tool.py} +39 -20
- chuk_tool_processor/plugins/parsers/json_tool.py +50 -0
- chuk_tool_processor/plugins/parsers/openai_tool.py +88 -0
- chuk_tool_processor/plugins/parsers/xml_tool.py +74 -20
- chuk_tool_processor/registry/__init__.py +46 -7
- chuk_tool_processor/registry/auto_register.py +92 -28
- chuk_tool_processor/registry/decorators.py +134 -11
- chuk_tool_processor/registry/interface.py +48 -14
- chuk_tool_processor/registry/metadata.py +52 -6
- chuk_tool_processor/registry/provider.py +75 -36
- chuk_tool_processor/registry/providers/__init__.py +49 -10
- chuk_tool_processor/registry/providers/memory.py +59 -48
- chuk_tool_processor/registry/tool_export.py +208 -39
- chuk_tool_processor/utils/validation.py +18 -13
- chuk_tool_processor-0.1.7.dist-info/METADATA +401 -0
- chuk_tool_processor-0.1.7.dist-info/RECORD +58 -0
- {chuk_tool_processor-0.1.5.dist-info → chuk_tool_processor-0.1.7.dist-info}/WHEEL +1 -1
- chuk_tool_processor/plugins/parsers/json_tool_plugin.py +0 -38
- chuk_tool_processor/plugins/parsers/openai_tool_plugin.py +0 -76
- chuk_tool_processor-0.1.5.dist-info/METADATA +0 -462
- chuk_tool_processor-0.1.5.dist-info/RECORD +0 -57
- {chuk_tool_processor-0.1.5.dist-info → chuk_tool_processor-0.1.7.dist-info}/top_level.txt +0 -0
|
@@ -1,14 +1,21 @@
|
|
|
1
1
|
# chuk_tool_processor/models/execution_strategy.py
|
|
2
|
+
"""
|
|
3
|
+
Abstract base class for tool execution strategies.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
2
7
|
from abc import ABC, abstractmethod
|
|
3
|
-
from typing import List, Optional
|
|
8
|
+
from typing import List, Optional, Dict, Any, AsyncIterator
|
|
4
9
|
|
|
5
10
|
from chuk_tool_processor.models.tool_call import ToolCall
|
|
6
11
|
from chuk_tool_processor.models.tool_result import ToolResult
|
|
7
12
|
|
|
8
|
-
|
|
9
13
|
class ExecutionStrategy(ABC):
|
|
10
14
|
"""
|
|
11
15
|
Strategy interface for executing ToolCall objects.
|
|
16
|
+
|
|
17
|
+
All execution strategies must implement at least the run method,
|
|
18
|
+
and optionally stream_run for streaming support.
|
|
12
19
|
"""
|
|
13
20
|
@abstractmethod
|
|
14
21
|
async def run(
|
|
@@ -16,4 +23,46 @@ class ExecutionStrategy(ABC):
|
|
|
16
23
|
calls: List[ToolCall],
|
|
17
24
|
timeout: Optional[float] = None
|
|
18
25
|
) -> List[ToolResult]:
|
|
19
|
-
|
|
26
|
+
"""
|
|
27
|
+
Execute a list of tool calls and return their results.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
calls: List of ToolCall objects to execute
|
|
31
|
+
timeout: Optional timeout in seconds for each call
|
|
32
|
+
|
|
33
|
+
Returns:
|
|
34
|
+
List of ToolResult objects in the same order as the calls
|
|
35
|
+
"""
|
|
36
|
+
pass
|
|
37
|
+
|
|
38
|
+
async def stream_run(
|
|
39
|
+
self,
|
|
40
|
+
calls: List[ToolCall],
|
|
41
|
+
timeout: Optional[float] = None
|
|
42
|
+
) -> AsyncIterator[ToolResult]:
|
|
43
|
+
"""
|
|
44
|
+
Execute tool calls and yield results as they become available.
|
|
45
|
+
|
|
46
|
+
Default implementation executes all calls with run() and yields the results.
|
|
47
|
+
Subclasses can override for true streaming behavior.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
calls: List of ToolCall objects to execute
|
|
51
|
+
timeout: Optional timeout in seconds for each call
|
|
52
|
+
|
|
53
|
+
Yields:
|
|
54
|
+
ToolResult objects as they become available
|
|
55
|
+
"""
|
|
56
|
+
results = await self.run(calls, timeout=timeout)
|
|
57
|
+
for result in results:
|
|
58
|
+
yield result
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
def supports_streaming(self) -> bool:
|
|
62
|
+
"""
|
|
63
|
+
Check if this strategy supports true streaming.
|
|
64
|
+
|
|
65
|
+
Default implementation returns False. Streaming-capable strategies
|
|
66
|
+
should override this to return True.
|
|
67
|
+
"""
|
|
68
|
+
return False
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
# chuk_tool_processor/models/streaming_tool.py
|
|
3
|
+
"""
|
|
4
|
+
Base class for tools that support streaming results.
|
|
5
|
+
|
|
6
|
+
This enables tools to yield incremental results during their execution,
|
|
7
|
+
which is useful for long-running operations or real-time data processing.
|
|
8
|
+
"""
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import asyncio
|
|
12
|
+
from abc import abstractmethod
|
|
13
|
+
from typing import Any, AsyncIterator, List, TypeVar, Generic, ClassVar, Optional, Dict
|
|
14
|
+
|
|
15
|
+
from pydantic import BaseModel, ConfigDict
|
|
16
|
+
|
|
17
|
+
from chuk_tool_processor.models.validated_tool import ValidatedTool
|
|
18
|
+
|
|
19
|
+
T = TypeVar('T')
|
|
20
|
+
|
|
21
|
+
class StreamingTool(ValidatedTool):
|
|
22
|
+
"""
|
|
23
|
+
Base class for tools that support streaming responses.
|
|
24
|
+
|
|
25
|
+
Subclasses must implement _stream_execute which yields results one by one.
|
|
26
|
+
The executor should use stream_execute to access streaming results directly.
|
|
27
|
+
|
|
28
|
+
Example:
|
|
29
|
+
```python
|
|
30
|
+
class Counter(StreamingTool):
|
|
31
|
+
class Arguments(BaseModel):
|
|
32
|
+
count: int = 10
|
|
33
|
+
delay: float = 0.5
|
|
34
|
+
|
|
35
|
+
class Result(BaseModel):
|
|
36
|
+
value: int
|
|
37
|
+
|
|
38
|
+
async def _stream_execute(self, count: int, delay: float) -> AsyncIterator[Result]:
|
|
39
|
+
for i in range(count):
|
|
40
|
+
await asyncio.sleep(delay)
|
|
41
|
+
yield self.Result(value=i)
|
|
42
|
+
```
|
|
43
|
+
|
|
44
|
+
Streaming usage:
|
|
45
|
+
```python
|
|
46
|
+
counter_tool = Counter()
|
|
47
|
+
async for result in counter_tool.stream_execute(count=5, delay=0.1):
|
|
48
|
+
print(f"Count: {result.value}")
|
|
49
|
+
```
|
|
50
|
+
"""
|
|
51
|
+
# Mark this as a ClassVar so Pydantic doesn't treat it as a field
|
|
52
|
+
supports_streaming: ClassVar[bool] = True
|
|
53
|
+
|
|
54
|
+
# Use ConfigDict to configure model behavior
|
|
55
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
56
|
+
|
|
57
|
+
async def stream_execute(self, **kwargs: Any) -> AsyncIterator[Any]:
|
|
58
|
+
"""
|
|
59
|
+
Execute the tool and stream results incrementally.
|
|
60
|
+
|
|
61
|
+
This public method validates arguments and then delegates to _stream_execute.
|
|
62
|
+
It should be used directly by the executor to support true streaming.
|
|
63
|
+
|
|
64
|
+
Args:
|
|
65
|
+
**kwargs: Keyword arguments for the tool
|
|
66
|
+
|
|
67
|
+
Yields:
|
|
68
|
+
Results as they are generated by the tool
|
|
69
|
+
"""
|
|
70
|
+
# Validate arguments using the Arguments model
|
|
71
|
+
args = self.Arguments(**kwargs)
|
|
72
|
+
|
|
73
|
+
# Stream results directly from _stream_execute
|
|
74
|
+
async for result in self._stream_execute(**args.model_dump()):
|
|
75
|
+
yield result
|
|
76
|
+
|
|
77
|
+
async def execute(self, **kwargs: Any) -> Any:
|
|
78
|
+
"""
|
|
79
|
+
Execute the tool and collect all results.
|
|
80
|
+
|
|
81
|
+
For streaming tools, this collects all results from stream_execute
|
|
82
|
+
into a list for compatibility with the regular execution model.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
**kwargs: Keyword arguments for the tool
|
|
86
|
+
|
|
87
|
+
Returns:
|
|
88
|
+
List of all streamed results
|
|
89
|
+
"""
|
|
90
|
+
# Collect all streamed results into a list
|
|
91
|
+
results = []
|
|
92
|
+
async for chunk in self.stream_execute(**kwargs):
|
|
93
|
+
results.append(chunk)
|
|
94
|
+
|
|
95
|
+
return results
|
|
96
|
+
|
|
97
|
+
@abstractmethod
|
|
98
|
+
async def _stream_execute(self, **kwargs: Any) -> AsyncIterator[Any]:
|
|
99
|
+
"""
|
|
100
|
+
Execute the tool and yield results incrementally.
|
|
101
|
+
|
|
102
|
+
This must be implemented by streaming tool subclasses.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
**kwargs: Tool-specific arguments
|
|
106
|
+
|
|
107
|
+
Yields:
|
|
108
|
+
Results as they are generated
|
|
109
|
+
"""
|
|
110
|
+
yield NotImplemented
|
|
@@ -1,7 +1,59 @@
|
|
|
1
1
|
# chuk_tool_processor/models/tool_call.py
|
|
2
|
-
|
|
3
|
-
|
|
2
|
+
"""
|
|
3
|
+
Model representing a tool call with arguments.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
7
|
+
import uuid
|
|
8
|
+
from typing import Any, Dict, Optional
|
|
9
|
+
|
|
10
|
+
from pydantic import BaseModel, Field, ConfigDict
|
|
4
11
|
|
|
5
12
|
class ToolCall(BaseModel):
|
|
6
|
-
|
|
7
|
-
|
|
13
|
+
"""
|
|
14
|
+
Represents a call to a tool with arguments.
|
|
15
|
+
|
|
16
|
+
Attributes:
|
|
17
|
+
id: Unique identifier for the tool call
|
|
18
|
+
tool: Name of the tool to call
|
|
19
|
+
namespace: Namespace the tool belongs to
|
|
20
|
+
arguments: Arguments to pass to the tool
|
|
21
|
+
"""
|
|
22
|
+
model_config = ConfigDict(extra='ignore')
|
|
23
|
+
|
|
24
|
+
id: str = Field(
|
|
25
|
+
default_factory=lambda: str(uuid.uuid4()),
|
|
26
|
+
description="Unique identifier for the tool call"
|
|
27
|
+
)
|
|
28
|
+
tool: str = Field(
|
|
29
|
+
...,
|
|
30
|
+
min_length=1,
|
|
31
|
+
description="Name of the tool to call; must be non-empty"
|
|
32
|
+
)
|
|
33
|
+
namespace: str = Field(
|
|
34
|
+
default="default",
|
|
35
|
+
description="Namespace the tool belongs to"
|
|
36
|
+
)
|
|
37
|
+
arguments: Dict[str, Any] = Field(
|
|
38
|
+
default_factory=dict,
|
|
39
|
+
description="Arguments to pass to the tool"
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
async def to_dict(self) -> Dict[str, Any]:
|
|
43
|
+
"""Convert to a dictionary for serialization."""
|
|
44
|
+
return {
|
|
45
|
+
"id": self.id,
|
|
46
|
+
"tool": self.tool,
|
|
47
|
+
"namespace": self.namespace,
|
|
48
|
+
"arguments": self.arguments
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
async def from_dict(cls, data: Dict[str, Any]) -> ToolCall:
|
|
53
|
+
"""Create a ToolCall from a dictionary."""
|
|
54
|
+
return cls(**data)
|
|
55
|
+
|
|
56
|
+
def __str__(self) -> str:
|
|
57
|
+
"""String representation of the tool call."""
|
|
58
|
+
args_str = ", ".join(f"{k}={v!r}" for k, v in self.arguments.items())
|
|
59
|
+
return f"ToolCall({self.tool}, {args_str})"
|
|
@@ -1,23 +1,44 @@
|
|
|
1
1
|
# chuk_tool_processor/models/tool_result.py
|
|
2
|
+
"""
|
|
3
|
+
Model representing the result of a tool execution.
|
|
4
|
+
"""
|
|
5
|
+
from __future__ import annotations
|
|
6
|
+
|
|
2
7
|
import os
|
|
3
|
-
|
|
4
|
-
from typing import Any, Optional
|
|
8
|
+
import uuid
|
|
5
9
|
from datetime import datetime, timezone
|
|
10
|
+
from typing import Any, Dict, Optional, List
|
|
11
|
+
|
|
12
|
+
from pydantic import BaseModel, Field, ConfigDict
|
|
6
13
|
|
|
7
14
|
class ToolResult(BaseModel):
|
|
8
15
|
"""
|
|
9
16
|
Represents the result of executing a tool.
|
|
10
|
-
|
|
17
|
+
|
|
18
|
+
Includes timing, host, and process metadata for diagnostics and tracing.
|
|
19
|
+
|
|
20
|
+
Attributes:
|
|
21
|
+
id: Unique identifier for the result
|
|
22
|
+
tool: Name of the tool that was executed
|
|
23
|
+
result: Return value from the tool execution
|
|
24
|
+
error: Error message if execution failed
|
|
25
|
+
start_time: UTC timestamp when execution started
|
|
26
|
+
end_time: UTC timestamp when execution finished
|
|
27
|
+
machine: Hostname where the tool ran
|
|
28
|
+
pid: Process ID of the worker
|
|
29
|
+
cached: Flag indicating if the result was retrieved from cache
|
|
30
|
+
attempts: Number of execution attempts made
|
|
31
|
+
stream_id: Optional identifier for streaming results
|
|
32
|
+
is_partial: Whether this is a partial streaming result
|
|
11
33
|
"""
|
|
12
|
-
# Configure Pydantic to ignore any extra fields
|
|
13
34
|
model_config = ConfigDict(extra='ignore')
|
|
14
35
|
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
description="True if this result was retrieved from cache"
|
|
36
|
+
id: str = Field(
|
|
37
|
+
default_factory=lambda: str(uuid.uuid4()),
|
|
38
|
+
description="Unique identifier for this result"
|
|
19
39
|
)
|
|
20
|
-
|
|
40
|
+
|
|
41
|
+
# Core fields
|
|
21
42
|
tool: str = Field(
|
|
22
43
|
...,
|
|
23
44
|
min_length=1,
|
|
@@ -31,6 +52,8 @@ class ToolResult(BaseModel):
|
|
|
31
52
|
None,
|
|
32
53
|
description="Error message if execution failed"
|
|
33
54
|
)
|
|
55
|
+
|
|
56
|
+
# Execution metadata
|
|
34
57
|
start_time: datetime = Field(
|
|
35
58
|
default_factory=lambda: datetime.now(timezone.utc),
|
|
36
59
|
description="UTC timestamp when execution started"
|
|
@@ -47,3 +70,86 @@ class ToolResult(BaseModel):
|
|
|
47
70
|
default_factory=lambda: os.getpid(),
|
|
48
71
|
description="Process ID of the worker"
|
|
49
72
|
)
|
|
73
|
+
|
|
74
|
+
# Extended features
|
|
75
|
+
cached: bool = Field(
|
|
76
|
+
default=False,
|
|
77
|
+
description="True if this result was retrieved from cache"
|
|
78
|
+
)
|
|
79
|
+
attempts: int = Field(
|
|
80
|
+
default=1,
|
|
81
|
+
description="Number of execution attempts made"
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
# Streaming support
|
|
85
|
+
stream_id: Optional[str] = Field(
|
|
86
|
+
default=None,
|
|
87
|
+
description="Identifier for this stream of results (for streaming tools)"
|
|
88
|
+
)
|
|
89
|
+
is_partial: bool = Field(
|
|
90
|
+
default=False,
|
|
91
|
+
description="True if this is a partial result in a stream"
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
@property
|
|
95
|
+
def is_success(self) -> bool:
|
|
96
|
+
"""Check if the execution was successful (no error)."""
|
|
97
|
+
return self.error is None
|
|
98
|
+
|
|
99
|
+
@property
|
|
100
|
+
def duration(self) -> float:
|
|
101
|
+
"""Calculate the execution duration in seconds."""
|
|
102
|
+
if not self.start_time or not self.end_time:
|
|
103
|
+
return 0.0
|
|
104
|
+
return (self.end_time - self.start_time).total_seconds()
|
|
105
|
+
|
|
106
|
+
async def to_dict(self) -> Dict[str, Any]:
|
|
107
|
+
"""Convert to a dictionary for serialization."""
|
|
108
|
+
return {
|
|
109
|
+
"id": self.id,
|
|
110
|
+
"tool": self.tool,
|
|
111
|
+
"result": self.result,
|
|
112
|
+
"error": self.error,
|
|
113
|
+
"success": self.is_success,
|
|
114
|
+
"duration": self.duration,
|
|
115
|
+
"start_time": self.start_time.isoformat(),
|
|
116
|
+
"end_time": self.end_time.isoformat(),
|
|
117
|
+
"machine": self.machine,
|
|
118
|
+
"pid": self.pid,
|
|
119
|
+
"cached": self.cached,
|
|
120
|
+
"attempts": self.attempts,
|
|
121
|
+
"stream_id": self.stream_id,
|
|
122
|
+
"is_partial": self.is_partial
|
|
123
|
+
}
|
|
124
|
+
|
|
125
|
+
@classmethod
|
|
126
|
+
def create_stream_chunk(
|
|
127
|
+
cls,
|
|
128
|
+
tool: str,
|
|
129
|
+
result: Any,
|
|
130
|
+
stream_id: Optional[str] = None
|
|
131
|
+
) -> ToolResult:
|
|
132
|
+
"""Create a partial streaming result."""
|
|
133
|
+
stream_id = stream_id or str(uuid.uuid4())
|
|
134
|
+
return cls(
|
|
135
|
+
tool=tool,
|
|
136
|
+
result=result,
|
|
137
|
+
stream_id=stream_id,
|
|
138
|
+
is_partial=True
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
@classmethod
|
|
142
|
+
async def from_dict(cls, data: Dict[str, Any]) -> ToolResult:
|
|
143
|
+
"""Create a ToolResult from a dictionary."""
|
|
144
|
+
# Handle datetime fields
|
|
145
|
+
if isinstance(data.get("start_time"), str):
|
|
146
|
+
data["start_time"] = datetime.fromisoformat(data["start_time"])
|
|
147
|
+
if isinstance(data.get("end_time"), str):
|
|
148
|
+
data["end_time"] = datetime.fromisoformat(data["end_time"])
|
|
149
|
+
|
|
150
|
+
return cls(**data)
|
|
151
|
+
|
|
152
|
+
def __str__(self) -> str:
|
|
153
|
+
"""String representation of the tool result."""
|
|
154
|
+
status = "success" if self.is_success else f"error: {self.error}"
|
|
155
|
+
return f"ToolResult({self.tool}, {status}, duration={self.duration:.3f}s)"
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
# chuk_tool_processor/models/validated_tool.py
|
|
2
2
|
"""
|
|
3
|
-
Self-contained base-class for *declarative* tools.
|
|
3
|
+
Self-contained base-class for *declarative* async-native tools.
|
|
4
4
|
|
|
5
5
|
Subclass it like so:
|
|
6
6
|
|
|
@@ -12,7 +12,7 @@ Subclass it like so:
|
|
|
12
12
|
class Result(BaseModel):
|
|
13
13
|
sum: int
|
|
14
14
|
|
|
15
|
-
def _execute(self, *, x: int, y: int) -> Result:
|
|
15
|
+
async def _execute(self, *, x: int, y: int) -> Result:
|
|
16
16
|
return self.Result(sum=x + y)
|
|
17
17
|
"""
|
|
18
18
|
from __future__ import annotations
|
|
@@ -93,7 +93,7 @@ class _ExportMixin:
|
|
|
93
93
|
# The public validated base-class
|
|
94
94
|
# --------------------------------------------------------------------------- #
|
|
95
95
|
class ValidatedTool(_ExportMixin, BaseModel):
|
|
96
|
-
"""Pydantic-validated base for new tools."""
|
|
96
|
+
"""Pydantic-validated base for new async-native tools."""
|
|
97
97
|
|
|
98
98
|
# ------------------------------------------------------------------ #
|
|
99
99
|
# Inner models – override in subclasses
|
|
@@ -107,11 +107,11 @@ class ValidatedTool(_ExportMixin, BaseModel):
|
|
|
107
107
|
# ------------------------------------------------------------------ #
|
|
108
108
|
# Public entry-point called by the processor
|
|
109
109
|
# ------------------------------------------------------------------ #
|
|
110
|
-
def execute(self: T_Validated, **kwargs: Any) -> BaseModel:
|
|
110
|
+
async def execute(self: T_Validated, **kwargs: Any) -> BaseModel:
|
|
111
111
|
"""Validate *kwargs*, run `_execute`, validate the result."""
|
|
112
112
|
try:
|
|
113
113
|
args = self.Arguments(**kwargs) # type: ignore[arg-type]
|
|
114
|
-
res = self._execute(**args.model_dump()) # type: ignore[arg-type]
|
|
114
|
+
res = await self._execute(**args.model_dump()) # type: ignore[arg-type]
|
|
115
115
|
|
|
116
116
|
return (
|
|
117
117
|
res
|
|
@@ -124,16 +124,16 @@ class ValidatedTool(_ExportMixin, BaseModel):
|
|
|
124
124
|
# ------------------------------------------------------------------ #
|
|
125
125
|
# Sub-classes must implement this
|
|
126
126
|
# ------------------------------------------------------------------ #
|
|
127
|
-
def _execute(self, **_kwargs: Any): # noqa: D401 – expected override
|
|
128
|
-
raise NotImplementedError("Tool must implement _execute()")
|
|
127
|
+
async def _execute(self, **_kwargs: Any): # noqa: D401 – expected override
|
|
128
|
+
raise NotImplementedError("Tool must implement async _execute()")
|
|
129
129
|
|
|
130
130
|
|
|
131
131
|
# --------------------------------------------------------------------------- #
|
|
132
|
-
# Decorator to retrofit validation onto classic
|
|
132
|
+
# Decorator to retrofit validation onto classic "imperative" tools
|
|
133
133
|
# --------------------------------------------------------------------------- #
|
|
134
134
|
def with_validation(cls): # noqa: D401 – factory
|
|
135
135
|
"""
|
|
136
|
-
Decorator that wraps an existing ``execute`` method with:
|
|
136
|
+
Decorator that wraps an existing async ``execute`` method with:
|
|
137
137
|
|
|
138
138
|
* argument validation (based on type hints)
|
|
139
139
|
* result validation (based on return annotation)
|
|
@@ -143,13 +143,15 @@ def with_validation(cls): # noqa: D401 – factory
|
|
|
143
143
|
validate_result,
|
|
144
144
|
)
|
|
145
145
|
|
|
146
|
-
original
|
|
146
|
+
original = cls.execute # type: ignore[attr-defined]
|
|
147
|
+
if not inspect.iscoroutinefunction(original):
|
|
148
|
+
raise TypeError(f"Tool {cls.__name__} must have an async execute method")
|
|
147
149
|
|
|
148
|
-
def
|
|
150
|
+
async def _async_wrapper(self, **kwargs): # type: ignore[override]
|
|
149
151
|
tool_name = cls.__name__
|
|
150
152
|
validated = validate_arguments(tool_name, original, kwargs)
|
|
151
|
-
result = original(self, **validated)
|
|
153
|
+
result = await original(self, **validated)
|
|
152
154
|
return validate_result(tool_name, original, result)
|
|
153
155
|
|
|
154
|
-
cls.execute =
|
|
156
|
+
cls.execute = _async_wrapper # type: ignore[assignment]
|
|
155
157
|
return cls
|