chuk-tool-processor 0.6.4__py3-none-any.whl → 0.9.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chuk-tool-processor might be problematic. Click here for more details.
- chuk_tool_processor/core/__init__.py +32 -1
- chuk_tool_processor/core/exceptions.py +225 -13
- chuk_tool_processor/core/processor.py +135 -104
- chuk_tool_processor/execution/strategies/__init__.py +6 -0
- chuk_tool_processor/execution/strategies/inprocess_strategy.py +142 -150
- chuk_tool_processor/execution/strategies/subprocess_strategy.py +202 -206
- chuk_tool_processor/execution/tool_executor.py +82 -84
- chuk_tool_processor/execution/wrappers/__init__.py +42 -0
- chuk_tool_processor/execution/wrappers/caching.py +150 -116
- chuk_tool_processor/execution/wrappers/circuit_breaker.py +370 -0
- chuk_tool_processor/execution/wrappers/rate_limiting.py +76 -43
- chuk_tool_processor/execution/wrappers/retry.py +116 -78
- chuk_tool_processor/logging/__init__.py +23 -17
- chuk_tool_processor/logging/context.py +40 -45
- chuk_tool_processor/logging/formatter.py +22 -21
- chuk_tool_processor/logging/helpers.py +28 -42
- chuk_tool_processor/logging/metrics.py +13 -15
- chuk_tool_processor/mcp/__init__.py +8 -12
- chuk_tool_processor/mcp/mcp_tool.py +158 -114
- chuk_tool_processor/mcp/register_mcp_tools.py +22 -22
- chuk_tool_processor/mcp/setup_mcp_http_streamable.py +57 -17
- chuk_tool_processor/mcp/setup_mcp_sse.py +57 -17
- chuk_tool_processor/mcp/setup_mcp_stdio.py +11 -11
- chuk_tool_processor/mcp/stream_manager.py +333 -276
- chuk_tool_processor/mcp/transport/__init__.py +22 -29
- chuk_tool_processor/mcp/transport/base_transport.py +180 -44
- chuk_tool_processor/mcp/transport/http_streamable_transport.py +505 -325
- chuk_tool_processor/mcp/transport/models.py +100 -0
- chuk_tool_processor/mcp/transport/sse_transport.py +607 -276
- chuk_tool_processor/mcp/transport/stdio_transport.py +597 -116
- chuk_tool_processor/models/__init__.py +21 -1
- chuk_tool_processor/models/execution_strategy.py +16 -21
- chuk_tool_processor/models/streaming_tool.py +28 -25
- chuk_tool_processor/models/tool_call.py +49 -31
- chuk_tool_processor/models/tool_export_mixin.py +22 -8
- chuk_tool_processor/models/tool_result.py +40 -77
- chuk_tool_processor/models/tool_spec.py +350 -0
- chuk_tool_processor/models/validated_tool.py +36 -18
- chuk_tool_processor/observability/__init__.py +30 -0
- chuk_tool_processor/observability/metrics.py +312 -0
- chuk_tool_processor/observability/setup.py +105 -0
- chuk_tool_processor/observability/tracing.py +345 -0
- chuk_tool_processor/plugins/__init__.py +1 -1
- chuk_tool_processor/plugins/discovery.py +11 -11
- chuk_tool_processor/plugins/parsers/__init__.py +1 -1
- chuk_tool_processor/plugins/parsers/base.py +1 -2
- chuk_tool_processor/plugins/parsers/function_call_tool.py +13 -8
- chuk_tool_processor/plugins/parsers/json_tool.py +4 -3
- chuk_tool_processor/plugins/parsers/openai_tool.py +12 -7
- chuk_tool_processor/plugins/parsers/xml_tool.py +4 -4
- chuk_tool_processor/registry/__init__.py +12 -12
- chuk_tool_processor/registry/auto_register.py +22 -30
- chuk_tool_processor/registry/decorators.py +127 -129
- chuk_tool_processor/registry/interface.py +26 -23
- chuk_tool_processor/registry/metadata.py +27 -22
- chuk_tool_processor/registry/provider.py +17 -18
- chuk_tool_processor/registry/providers/__init__.py +16 -19
- chuk_tool_processor/registry/providers/memory.py +18 -25
- chuk_tool_processor/registry/tool_export.py +42 -51
- chuk_tool_processor/utils/validation.py +15 -16
- chuk_tool_processor-0.9.7.dist-info/METADATA +1813 -0
- chuk_tool_processor-0.9.7.dist-info/RECORD +67 -0
- chuk_tool_processor-0.6.4.dist-info/METADATA +0 -697
- chuk_tool_processor-0.6.4.dist-info/RECORD +0 -60
- {chuk_tool_processor-0.6.4.dist-info → chuk_tool_processor-0.9.7.dist-info}/WHEEL +0 -0
- {chuk_tool_processor-0.6.4.dist-info → chuk_tool_processor-0.9.7.dist-info}/top_level.txt +0 -0
|
@@ -1 +1,21 @@
|
|
|
1
|
-
# chuk_tool_processor/models/__init__.py
|
|
1
|
+
# chuk_tool_processor/models/__init__.py
|
|
2
|
+
"""Data models for the tool processor."""
|
|
3
|
+
|
|
4
|
+
from chuk_tool_processor.models.execution_strategy import ExecutionStrategy
|
|
5
|
+
from chuk_tool_processor.models.streaming_tool import StreamingTool
|
|
6
|
+
from chuk_tool_processor.models.tool_call import ToolCall
|
|
7
|
+
from chuk_tool_processor.models.tool_result import ToolResult
|
|
8
|
+
from chuk_tool_processor.models.tool_spec import ToolCapability, ToolSpec, tool_spec
|
|
9
|
+
from chuk_tool_processor.models.validated_tool import ValidatedTool, with_validation
|
|
10
|
+
|
|
11
|
+
__all__ = [
|
|
12
|
+
"ExecutionStrategy",
|
|
13
|
+
"StreamingTool",
|
|
14
|
+
"ToolCall",
|
|
15
|
+
"ToolResult",
|
|
16
|
+
"ToolSpec",
|
|
17
|
+
"ToolCapability",
|
|
18
|
+
"tool_spec",
|
|
19
|
+
"ValidatedTool",
|
|
20
|
+
"with_validation",
|
|
21
|
+
]
|
|
@@ -2,67 +2,62 @@
|
|
|
2
2
|
"""
|
|
3
3
|
Abstract base class for tool execution strategies.
|
|
4
4
|
"""
|
|
5
|
+
|
|
5
6
|
from __future__ import annotations
|
|
6
7
|
|
|
7
8
|
from abc import ABC, abstractmethod
|
|
8
|
-
from
|
|
9
|
+
from collections.abc import AsyncIterator
|
|
9
10
|
|
|
10
11
|
from chuk_tool_processor.models.tool_call import ToolCall
|
|
11
12
|
from chuk_tool_processor.models.tool_result import ToolResult
|
|
12
13
|
|
|
14
|
+
|
|
13
15
|
class ExecutionStrategy(ABC):
|
|
14
16
|
"""
|
|
15
17
|
Strategy interface for executing ToolCall objects.
|
|
16
|
-
|
|
18
|
+
|
|
17
19
|
All execution strategies must implement at least the run method,
|
|
18
20
|
and optionally stream_run for streaming support.
|
|
19
21
|
"""
|
|
22
|
+
|
|
20
23
|
@abstractmethod
|
|
21
|
-
async def run(
|
|
22
|
-
self,
|
|
23
|
-
calls: List[ToolCall],
|
|
24
|
-
timeout: Optional[float] = None
|
|
25
|
-
) -> List[ToolResult]:
|
|
24
|
+
async def run(self, calls: list[ToolCall], timeout: float | None = None) -> list[ToolResult]:
|
|
26
25
|
"""
|
|
27
26
|
Execute a list of tool calls and return their results.
|
|
28
|
-
|
|
27
|
+
|
|
29
28
|
Args:
|
|
30
29
|
calls: List of ToolCall objects to execute
|
|
31
30
|
timeout: Optional timeout in seconds for each call
|
|
32
|
-
|
|
31
|
+
|
|
33
32
|
Returns:
|
|
34
33
|
List of ToolResult objects in the same order as the calls
|
|
35
34
|
"""
|
|
36
35
|
pass
|
|
37
|
-
|
|
38
|
-
async def stream_run(
|
|
39
|
-
self,
|
|
40
|
-
calls: List[ToolCall],
|
|
41
|
-
timeout: Optional[float] = None
|
|
42
|
-
) -> AsyncIterator[ToolResult]:
|
|
36
|
+
|
|
37
|
+
async def stream_run(self, calls: list[ToolCall], timeout: float | None = None) -> AsyncIterator[ToolResult]:
|
|
43
38
|
"""
|
|
44
39
|
Execute tool calls and yield results as they become available.
|
|
45
|
-
|
|
40
|
+
|
|
46
41
|
Default implementation executes all calls with run() and yields the results.
|
|
47
42
|
Subclasses can override for true streaming behavior.
|
|
48
|
-
|
|
43
|
+
|
|
49
44
|
Args:
|
|
50
45
|
calls: List of ToolCall objects to execute
|
|
51
46
|
timeout: Optional timeout in seconds for each call
|
|
52
|
-
|
|
47
|
+
|
|
53
48
|
Yields:
|
|
54
49
|
ToolResult objects as they become available
|
|
55
50
|
"""
|
|
56
51
|
results = await self.run(calls, timeout=timeout)
|
|
57
52
|
for result in results:
|
|
58
53
|
yield result
|
|
59
|
-
|
|
54
|
+
|
|
60
55
|
@property
|
|
61
56
|
def supports_streaming(self) -> bool:
|
|
62
57
|
"""
|
|
63
58
|
Check if this strategy supports true streaming.
|
|
64
|
-
|
|
59
|
+
|
|
65
60
|
Default implementation returns False. Streaming-capable strategies
|
|
66
61
|
should override this to return True.
|
|
67
62
|
"""
|
|
68
|
-
return False
|
|
63
|
+
return False
|
|
@@ -6,41 +6,43 @@ Base class for tools that support streaming results.
|
|
|
6
6
|
This enables tools to yield incremental results during their execution,
|
|
7
7
|
which is useful for long-running operations or real-time data processing.
|
|
8
8
|
"""
|
|
9
|
+
|
|
9
10
|
from __future__ import annotations
|
|
10
11
|
|
|
11
|
-
import asyncio
|
|
12
12
|
from abc import abstractmethod
|
|
13
|
-
from
|
|
13
|
+
from collections.abc import AsyncIterator
|
|
14
|
+
from typing import Any, ClassVar, TypeVar
|
|
14
15
|
|
|
15
|
-
from pydantic import
|
|
16
|
+
from pydantic import ConfigDict
|
|
16
17
|
|
|
17
18
|
from chuk_tool_processor.models.validated_tool import ValidatedTool
|
|
18
19
|
|
|
19
|
-
T = TypeVar(
|
|
20
|
+
T = TypeVar("T")
|
|
21
|
+
|
|
20
22
|
|
|
21
23
|
class StreamingTool(ValidatedTool):
|
|
22
24
|
"""
|
|
23
25
|
Base class for tools that support streaming responses.
|
|
24
|
-
|
|
26
|
+
|
|
25
27
|
Subclasses must implement _stream_execute which yields results one by one.
|
|
26
28
|
The executor should use stream_execute to access streaming results directly.
|
|
27
|
-
|
|
29
|
+
|
|
28
30
|
Example:
|
|
29
31
|
```python
|
|
30
32
|
class Counter(StreamingTool):
|
|
31
33
|
class Arguments(BaseModel):
|
|
32
34
|
count: int = 10
|
|
33
35
|
delay: float = 0.5
|
|
34
|
-
|
|
36
|
+
|
|
35
37
|
class Result(BaseModel):
|
|
36
38
|
value: int
|
|
37
|
-
|
|
39
|
+
|
|
38
40
|
async def _stream_execute(self, count: int, delay: float) -> AsyncIterator[Result]:
|
|
39
41
|
for i in range(count):
|
|
40
42
|
await asyncio.sleep(delay)
|
|
41
43
|
yield self.Result(value=i)
|
|
42
44
|
```
|
|
43
|
-
|
|
45
|
+
|
|
44
46
|
Streaming usage:
|
|
45
47
|
```python
|
|
46
48
|
counter_tool = Counter()
|
|
@@ -48,42 +50,43 @@ class StreamingTool(ValidatedTool):
|
|
|
48
50
|
print(f"Count: {result.value}")
|
|
49
51
|
```
|
|
50
52
|
"""
|
|
53
|
+
|
|
51
54
|
# Mark this as a ClassVar so Pydantic doesn't treat it as a field
|
|
52
55
|
supports_streaming: ClassVar[bool] = True
|
|
53
|
-
|
|
56
|
+
|
|
54
57
|
# Use ConfigDict to configure model behavior
|
|
55
58
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
56
|
-
|
|
59
|
+
|
|
57
60
|
async def stream_execute(self, **kwargs: Any) -> AsyncIterator[Any]:
|
|
58
61
|
"""
|
|
59
62
|
Execute the tool and stream results incrementally.
|
|
60
|
-
|
|
63
|
+
|
|
61
64
|
This public method validates arguments and then delegates to _stream_execute.
|
|
62
65
|
It should be used directly by the executor to support true streaming.
|
|
63
|
-
|
|
66
|
+
|
|
64
67
|
Args:
|
|
65
68
|
**kwargs: Keyword arguments for the tool
|
|
66
|
-
|
|
69
|
+
|
|
67
70
|
Yields:
|
|
68
71
|
Results as they are generated by the tool
|
|
69
72
|
"""
|
|
70
73
|
# Validate arguments using the Arguments model
|
|
71
74
|
args = self.Arguments(**kwargs)
|
|
72
|
-
|
|
75
|
+
|
|
73
76
|
# Stream results directly from _stream_execute
|
|
74
77
|
async for result in self._stream_execute(**args.model_dump()):
|
|
75
78
|
yield result
|
|
76
|
-
|
|
79
|
+
|
|
77
80
|
async def execute(self, **kwargs: Any) -> Any:
|
|
78
81
|
"""
|
|
79
82
|
Execute the tool and collect all results.
|
|
80
|
-
|
|
83
|
+
|
|
81
84
|
For streaming tools, this collects all results from stream_execute
|
|
82
85
|
into a list for compatibility with the regular execution model.
|
|
83
|
-
|
|
86
|
+
|
|
84
87
|
Args:
|
|
85
88
|
**kwargs: Keyword arguments for the tool
|
|
86
|
-
|
|
89
|
+
|
|
87
90
|
Returns:
|
|
88
91
|
List of all streamed results
|
|
89
92
|
"""
|
|
@@ -91,20 +94,20 @@ class StreamingTool(ValidatedTool):
|
|
|
91
94
|
results = []
|
|
92
95
|
async for chunk in self.stream_execute(**kwargs):
|
|
93
96
|
results.append(chunk)
|
|
94
|
-
|
|
97
|
+
|
|
95
98
|
return results
|
|
96
|
-
|
|
99
|
+
|
|
97
100
|
@abstractmethod
|
|
98
101
|
async def _stream_execute(self, **kwargs: Any) -> AsyncIterator[Any]:
|
|
99
102
|
"""
|
|
100
103
|
Execute the tool and yield results incrementally.
|
|
101
|
-
|
|
104
|
+
|
|
102
105
|
This must be implemented by streaming tool subclasses.
|
|
103
|
-
|
|
106
|
+
|
|
104
107
|
Args:
|
|
105
108
|
**kwargs: Tool-specific arguments
|
|
106
|
-
|
|
109
|
+
|
|
107
110
|
Yields:
|
|
108
111
|
Results as they are generated
|
|
109
112
|
"""
|
|
110
|
-
yield NotImplemented
|
|
113
|
+
yield NotImplemented
|
|
@@ -2,58 +2,76 @@
|
|
|
2
2
|
"""
|
|
3
3
|
Model representing a tool call with arguments.
|
|
4
4
|
"""
|
|
5
|
+
|
|
5
6
|
from __future__ import annotations
|
|
6
7
|
|
|
8
|
+
import hashlib
|
|
9
|
+
import json
|
|
7
10
|
import uuid
|
|
8
|
-
from typing import Any
|
|
11
|
+
from typing import Any
|
|
12
|
+
|
|
13
|
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
|
9
14
|
|
|
10
|
-
from pydantic import BaseModel, Field, ConfigDict
|
|
11
15
|
|
|
12
16
|
class ToolCall(BaseModel):
|
|
13
17
|
"""
|
|
14
18
|
Represents a call to a tool with arguments.
|
|
15
|
-
|
|
19
|
+
|
|
16
20
|
Attributes:
|
|
17
21
|
id: Unique identifier for the tool call
|
|
18
22
|
tool: Name of the tool to call
|
|
19
23
|
namespace: Namespace the tool belongs to
|
|
20
24
|
arguments: Arguments to pass to the tool
|
|
25
|
+
idempotency_key: Optional key for deduplicating duplicate calls (auto-generated)
|
|
21
26
|
"""
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
)
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
description="
|
|
32
|
-
)
|
|
33
|
-
namespace: str = Field(
|
|
34
|
-
default="default",
|
|
35
|
-
description="Namespace the tool belongs to"
|
|
36
|
-
)
|
|
37
|
-
arguments: Dict[str, Any] = Field(
|
|
38
|
-
default_factory=dict,
|
|
39
|
-
description="Arguments to pass to the tool"
|
|
27
|
+
|
|
28
|
+
model_config = ConfigDict(extra="ignore")
|
|
29
|
+
|
|
30
|
+
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for the tool call")
|
|
31
|
+
tool: str = Field(..., min_length=1, description="Name of the tool to call; must be non-empty")
|
|
32
|
+
namespace: str = Field(default="default", description="Namespace the tool belongs to")
|
|
33
|
+
arguments: dict[str, Any] = Field(default_factory=dict, description="Arguments to pass to the tool")
|
|
34
|
+
idempotency_key: str | None = Field(
|
|
35
|
+
None,
|
|
36
|
+
description="Idempotency key for deduplication. Auto-generated if not provided.",
|
|
40
37
|
)
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
38
|
+
|
|
39
|
+
@model_validator(mode="after")
|
|
40
|
+
def generate_idempotency_key(self) -> ToolCall:
|
|
41
|
+
"""Generate idempotency key if not provided."""
|
|
42
|
+
if self.idempotency_key is None:
|
|
43
|
+
self.idempotency_key = self._compute_idempotency_key()
|
|
44
|
+
return self
|
|
45
|
+
|
|
46
|
+
def _compute_idempotency_key(self) -> str:
|
|
47
|
+
"""
|
|
48
|
+
Compute a stable idempotency key from tool name, namespace, and arguments.
|
|
49
|
+
|
|
50
|
+
Uses SHA256 hash of the sorted JSON representation.
|
|
51
|
+
Returns first 16 characters of the hex digest for brevity.
|
|
52
|
+
"""
|
|
53
|
+
# Create a stable representation
|
|
54
|
+
payload = {
|
|
46
55
|
"tool": self.tool,
|
|
47
56
|
"namespace": self.namespace,
|
|
48
|
-
"arguments": self.arguments
|
|
57
|
+
"arguments": self.arguments,
|
|
49
58
|
}
|
|
50
|
-
|
|
59
|
+
# Sort keys for stability
|
|
60
|
+
json_str = json.dumps(payload, sort_keys=True, default=str)
|
|
61
|
+
# Hash it
|
|
62
|
+
hash_obj = hashlib.sha256(json_str.encode(), usedforsecurity=False)
|
|
63
|
+
return hash_obj.hexdigest()[:16] # Use first 16 chars for brevity
|
|
64
|
+
|
|
65
|
+
async def to_dict(self) -> dict[str, Any]:
|
|
66
|
+
"""Convert to a dictionary for serialization."""
|
|
67
|
+
return {"id": self.id, "tool": self.tool, "namespace": self.namespace, "arguments": self.arguments}
|
|
68
|
+
|
|
51
69
|
@classmethod
|
|
52
|
-
async def from_dict(cls, data:
|
|
70
|
+
async def from_dict(cls, data: dict[str, Any]) -> ToolCall:
|
|
53
71
|
"""Create a ToolCall from a dictionary."""
|
|
54
72
|
return cls(**data)
|
|
55
|
-
|
|
73
|
+
|
|
56
74
|
def __str__(self) -> str:
|
|
57
75
|
"""String representation of the tool call."""
|
|
58
76
|
args_str = ", ".join(f"{k}={v!r}" for k, v in self.arguments.items())
|
|
59
|
-
return f"ToolCall({self.tool}, {args_str})"
|
|
77
|
+
return f"ToolCall({self.tool}, {args_str})"
|
|
@@ -1,29 +1,43 @@
|
|
|
1
1
|
# chuk_tool_processor/models/tool_export_mix_in.py
|
|
2
|
-
|
|
2
|
+
|
|
3
|
+
from typing import Any, Protocol, runtime_checkable
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@runtime_checkable
|
|
9
|
+
class HasArguments(Protocol):
|
|
10
|
+
"""Protocol for classes that have an Arguments attribute."""
|
|
11
|
+
|
|
12
|
+
Arguments: type[BaseModel]
|
|
13
|
+
|
|
3
14
|
|
|
4
15
|
class ToolExportMixin:
|
|
5
16
|
"""Mixin that lets any ValidatedTool advertise its schema."""
|
|
6
17
|
|
|
7
18
|
@classmethod
|
|
8
|
-
def to_openai(cls) ->
|
|
9
|
-
|
|
19
|
+
def to_openai(cls) -> dict[str, Any]:
|
|
20
|
+
assert hasattr(cls, "Arguments"), f"{cls.__name__} must have an Arguments attribute"
|
|
21
|
+
schema = cls.Arguments.model_json_schema() # type: ignore[attr-defined]
|
|
10
22
|
return {
|
|
11
23
|
"type": "function",
|
|
12
24
|
"function": {
|
|
13
|
-
"name": cls.__name__.removesuffix("Tool").lower(),
|
|
25
|
+
"name": cls.__name__.removesuffix("Tool").lower(), # or keep explicit name
|
|
14
26
|
"description": (cls.__doc__ or "").strip(),
|
|
15
27
|
"parameters": schema,
|
|
16
28
|
},
|
|
17
29
|
}
|
|
18
30
|
|
|
19
31
|
@classmethod
|
|
20
|
-
def to_json_schema(cls) ->
|
|
21
|
-
|
|
32
|
+
def to_json_schema(cls) -> dict[str, Any]:
|
|
33
|
+
assert hasattr(cls, "Arguments"), f"{cls.__name__} must have an Arguments attribute"
|
|
34
|
+
return cls.Arguments.model_json_schema() # type: ignore[attr-defined, no-any-return]
|
|
22
35
|
|
|
23
36
|
@classmethod
|
|
24
37
|
def to_xml(cls) -> str:
|
|
25
38
|
"""Very small helper so existing XML-based parsers still work."""
|
|
39
|
+
assert hasattr(cls, "Arguments"), f"{cls.__name__} must have an Arguments attribute"
|
|
26
40
|
name = cls.__name__.removesuffix("Tool").lower()
|
|
27
|
-
params = cls.Arguments.model_json_schema()["properties"]
|
|
41
|
+
params = cls.Arguments.model_json_schema()["properties"] # type: ignore[attr-defined]
|
|
28
42
|
args = ", ".join(params)
|
|
29
|
-
return f
|
|
43
|
+
return f'<tool name="{name}" args="{{{args}}}"/>'
|
|
@@ -2,21 +2,24 @@
|
|
|
2
2
|
"""
|
|
3
3
|
Model representing the result of a tool execution.
|
|
4
4
|
"""
|
|
5
|
+
|
|
5
6
|
from __future__ import annotations
|
|
6
7
|
|
|
7
8
|
import os
|
|
9
|
+
import platform
|
|
8
10
|
import uuid
|
|
9
|
-
from datetime import
|
|
10
|
-
from typing import Any
|
|
11
|
+
from datetime import UTC, datetime
|
|
12
|
+
from typing import Any
|
|
13
|
+
|
|
14
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
11
15
|
|
|
12
|
-
from pydantic import BaseModel, Field, ConfigDict
|
|
13
16
|
|
|
14
17
|
class ToolResult(BaseModel):
|
|
15
18
|
"""
|
|
16
19
|
Represents the result of executing a tool.
|
|
17
|
-
|
|
20
|
+
|
|
18
21
|
Includes timing, host, and process metadata for diagnostics and tracing.
|
|
19
|
-
|
|
22
|
+
|
|
20
23
|
Attributes:
|
|
21
24
|
id: Unique identifier for the result
|
|
22
25
|
tool: Name of the tool that was executed
|
|
@@ -31,79 +34,49 @@ class ToolResult(BaseModel):
|
|
|
31
34
|
stream_id: Optional identifier for streaming results
|
|
32
35
|
is_partial: Whether this is a partial streaming result
|
|
33
36
|
"""
|
|
34
|
-
model_config = ConfigDict(extra='ignore')
|
|
35
37
|
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
38
|
+
model_config = ConfigDict(extra="ignore")
|
|
39
|
+
|
|
40
|
+
id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for this result")
|
|
41
|
+
|
|
41
42
|
# Core fields
|
|
42
|
-
tool: str = Field(
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
)
|
|
47
|
-
result: Any = Field(
|
|
48
|
-
None,
|
|
49
|
-
description="Return value from the tool execution"
|
|
50
|
-
)
|
|
51
|
-
error: Optional[str] = Field(
|
|
52
|
-
None,
|
|
53
|
-
description="Error message if execution failed"
|
|
54
|
-
)
|
|
55
|
-
|
|
43
|
+
tool: str = Field(..., min_length=1, description="Name of the tool; must be non-empty")
|
|
44
|
+
result: Any = Field(None, description="Return value from the tool execution")
|
|
45
|
+
error: str | None = Field(None, description="Error message if execution failed")
|
|
46
|
+
|
|
56
47
|
# Execution metadata
|
|
57
48
|
start_time: datetime = Field(
|
|
58
|
-
default_factory=lambda: datetime.now(
|
|
59
|
-
description="UTC timestamp when execution started"
|
|
49
|
+
default_factory=lambda: datetime.now(UTC), description="UTC timestamp when execution started"
|
|
60
50
|
)
|
|
61
51
|
end_time: datetime = Field(
|
|
62
|
-
default_factory=lambda: datetime.now(
|
|
63
|
-
description="UTC timestamp when execution finished"
|
|
64
|
-
)
|
|
65
|
-
machine: str = Field(
|
|
66
|
-
default_factory=lambda: os.uname().nodename,
|
|
67
|
-
description="Hostname where the tool ran"
|
|
68
|
-
)
|
|
69
|
-
pid: int = Field(
|
|
70
|
-
default_factory=lambda: os.getpid(),
|
|
71
|
-
description="Process ID of the worker"
|
|
52
|
+
default_factory=lambda: datetime.now(UTC), description="UTC timestamp when execution finished"
|
|
72
53
|
)
|
|
73
|
-
|
|
54
|
+
machine: str = Field(default_factory=lambda: platform.node(), description="Hostname where the tool ran")
|
|
55
|
+
pid: int = Field(default_factory=lambda: os.getpid(), description="Process ID of the worker")
|
|
56
|
+
|
|
74
57
|
# Extended features
|
|
75
|
-
cached: bool = Field(
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
)
|
|
79
|
-
attempts: int = Field(
|
|
80
|
-
default=1,
|
|
81
|
-
description="Number of execution attempts made"
|
|
82
|
-
)
|
|
83
|
-
|
|
58
|
+
cached: bool = Field(default=False, description="True if this result was retrieved from cache")
|
|
59
|
+
attempts: int = Field(default=1, description="Number of execution attempts made")
|
|
60
|
+
|
|
84
61
|
# Streaming support
|
|
85
|
-
stream_id:
|
|
86
|
-
default=None,
|
|
87
|
-
description="Identifier for this stream of results (for streaming tools)"
|
|
88
|
-
)
|
|
89
|
-
is_partial: bool = Field(
|
|
90
|
-
default=False,
|
|
91
|
-
description="True if this is a partial result in a stream"
|
|
62
|
+
stream_id: str | None = Field(
|
|
63
|
+
default=None, description="Identifier for this stream of results (for streaming tools)"
|
|
92
64
|
)
|
|
93
|
-
|
|
65
|
+
is_partial: bool = Field(default=False, description="True if this is a partial result in a stream")
|
|
66
|
+
|
|
94
67
|
@property
|
|
95
68
|
def is_success(self) -> bool:
|
|
96
69
|
"""Check if the execution was successful (no error)."""
|
|
97
70
|
return self.error is None
|
|
98
|
-
|
|
71
|
+
|
|
99
72
|
@property
|
|
100
73
|
def duration(self) -> float:
|
|
101
74
|
"""Calculate the execution duration in seconds."""
|
|
102
75
|
if not self.start_time or not self.end_time:
|
|
103
76
|
return 0.0
|
|
104
77
|
return (self.end_time - self.start_time).total_seconds()
|
|
105
|
-
|
|
106
|
-
async def to_dict(self) ->
|
|
78
|
+
|
|
79
|
+
async def to_dict(self) -> dict[str, Any]:
|
|
107
80
|
"""Convert to a dictionary for serialization."""
|
|
108
81
|
return {
|
|
109
82
|
"id": self.id,
|
|
@@ -119,37 +92,27 @@ class ToolResult(BaseModel):
|
|
|
119
92
|
"cached": self.cached,
|
|
120
93
|
"attempts": self.attempts,
|
|
121
94
|
"stream_id": self.stream_id,
|
|
122
|
-
"is_partial": self.is_partial
|
|
95
|
+
"is_partial": self.is_partial,
|
|
123
96
|
}
|
|
124
|
-
|
|
97
|
+
|
|
125
98
|
@classmethod
|
|
126
|
-
def create_stream_chunk(
|
|
127
|
-
cls,
|
|
128
|
-
tool: str,
|
|
129
|
-
result: Any,
|
|
130
|
-
stream_id: Optional[str] = None
|
|
131
|
-
) -> ToolResult:
|
|
99
|
+
def create_stream_chunk(cls, tool: str, result: Any, stream_id: str | None = None) -> ToolResult:
|
|
132
100
|
"""Create a partial streaming result."""
|
|
133
101
|
stream_id = stream_id or str(uuid.uuid4())
|
|
134
|
-
return cls(
|
|
135
|
-
|
|
136
|
-
result=result,
|
|
137
|
-
stream_id=stream_id,
|
|
138
|
-
is_partial=True
|
|
139
|
-
)
|
|
140
|
-
|
|
102
|
+
return cls(tool=tool, result=result, error=None, stream_id=stream_id, is_partial=True)
|
|
103
|
+
|
|
141
104
|
@classmethod
|
|
142
|
-
async def from_dict(cls, data:
|
|
105
|
+
async def from_dict(cls, data: dict[str, Any]) -> ToolResult:
|
|
143
106
|
"""Create a ToolResult from a dictionary."""
|
|
144
107
|
# Handle datetime fields
|
|
145
108
|
if isinstance(data.get("start_time"), str):
|
|
146
109
|
data["start_time"] = datetime.fromisoformat(data["start_time"])
|
|
147
110
|
if isinstance(data.get("end_time"), str):
|
|
148
111
|
data["end_time"] = datetime.fromisoformat(data["end_time"])
|
|
149
|
-
|
|
112
|
+
|
|
150
113
|
return cls(**data)
|
|
151
|
-
|
|
114
|
+
|
|
152
115
|
def __str__(self) -> str:
|
|
153
116
|
"""String representation of the tool result."""
|
|
154
117
|
status = "success" if self.is_success else f"error: {self.error}"
|
|
155
|
-
return f"ToolResult({self.tool}, {status}, duration={self.duration:.3f}s)"
|
|
118
|
+
return f"ToolResult({self.tool}, {status}, duration={self.duration:.3f}s)"
|