chuk-tool-processor 0.6.12__py3-none-any.whl → 0.6.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

Files changed (56) hide show
  1. chuk_tool_processor/core/__init__.py +1 -1
  2. chuk_tool_processor/core/exceptions.py +10 -4
  3. chuk_tool_processor/core/processor.py +97 -97
  4. chuk_tool_processor/execution/strategies/inprocess_strategy.py +142 -150
  5. chuk_tool_processor/execution/strategies/subprocess_strategy.py +200 -205
  6. chuk_tool_processor/execution/tool_executor.py +82 -84
  7. chuk_tool_processor/execution/wrappers/caching.py +102 -103
  8. chuk_tool_processor/execution/wrappers/rate_limiting.py +45 -42
  9. chuk_tool_processor/execution/wrappers/retry.py +23 -25
  10. chuk_tool_processor/logging/__init__.py +23 -17
  11. chuk_tool_processor/logging/context.py +40 -45
  12. chuk_tool_processor/logging/formatter.py +22 -21
  13. chuk_tool_processor/logging/helpers.py +24 -38
  14. chuk_tool_processor/logging/metrics.py +11 -13
  15. chuk_tool_processor/mcp/__init__.py +8 -12
  16. chuk_tool_processor/mcp/mcp_tool.py +124 -112
  17. chuk_tool_processor/mcp/register_mcp_tools.py +17 -17
  18. chuk_tool_processor/mcp/setup_mcp_http_streamable.py +11 -13
  19. chuk_tool_processor/mcp/setup_mcp_sse.py +11 -13
  20. chuk_tool_processor/mcp/setup_mcp_stdio.py +7 -9
  21. chuk_tool_processor/mcp/stream_manager.py +168 -204
  22. chuk_tool_processor/mcp/transport/__init__.py +4 -4
  23. chuk_tool_processor/mcp/transport/base_transport.py +43 -58
  24. chuk_tool_processor/mcp/transport/http_streamable_transport.py +145 -163
  25. chuk_tool_processor/mcp/transport/sse_transport.py +217 -255
  26. chuk_tool_processor/mcp/transport/stdio_transport.py +171 -189
  27. chuk_tool_processor/models/__init__.py +1 -1
  28. chuk_tool_processor/models/execution_strategy.py +16 -21
  29. chuk_tool_processor/models/streaming_tool.py +28 -25
  30. chuk_tool_processor/models/tool_call.py +19 -34
  31. chuk_tool_processor/models/tool_export_mixin.py +22 -8
  32. chuk_tool_processor/models/tool_result.py +40 -77
  33. chuk_tool_processor/models/validated_tool.py +14 -16
  34. chuk_tool_processor/plugins/__init__.py +1 -1
  35. chuk_tool_processor/plugins/discovery.py +10 -10
  36. chuk_tool_processor/plugins/parsers/__init__.py +1 -1
  37. chuk_tool_processor/plugins/parsers/base.py +1 -2
  38. chuk_tool_processor/plugins/parsers/function_call_tool.py +13 -8
  39. chuk_tool_processor/plugins/parsers/json_tool.py +4 -3
  40. chuk_tool_processor/plugins/parsers/openai_tool.py +12 -7
  41. chuk_tool_processor/plugins/parsers/xml_tool.py +4 -4
  42. chuk_tool_processor/registry/__init__.py +12 -12
  43. chuk_tool_processor/registry/auto_register.py +22 -30
  44. chuk_tool_processor/registry/decorators.py +127 -129
  45. chuk_tool_processor/registry/interface.py +26 -23
  46. chuk_tool_processor/registry/metadata.py +27 -22
  47. chuk_tool_processor/registry/provider.py +17 -18
  48. chuk_tool_processor/registry/providers/__init__.py +16 -19
  49. chuk_tool_processor/registry/providers/memory.py +18 -25
  50. chuk_tool_processor/registry/tool_export.py +42 -51
  51. chuk_tool_processor/utils/validation.py +15 -16
  52. {chuk_tool_processor-0.6.12.dist-info → chuk_tool_processor-0.6.13.dist-info}/METADATA +1 -1
  53. chuk_tool_processor-0.6.13.dist-info/RECORD +60 -0
  54. chuk_tool_processor-0.6.12.dist-info/RECORD +0 -60
  55. {chuk_tool_processor-0.6.12.dist-info → chuk_tool_processor-0.6.13.dist-info}/WHEEL +0 -0
  56. {chuk_tool_processor-0.6.12.dist-info → chuk_tool_processor-0.6.13.dist-info}/top_level.txt +0 -0
@@ -1 +1 @@
1
- # chuk_tool_processor/models/__init__.py
1
+ # chuk_tool_processor/models/__init__.py
@@ -2,67 +2,62 @@
2
2
  """
3
3
  Abstract base class for tool execution strategies.
4
4
  """
5
+
5
6
  from __future__ import annotations
6
7
 
7
8
  from abc import ABC, abstractmethod
8
- from typing import List, Optional, Dict, Any, AsyncIterator
9
+ from collections.abc import AsyncIterator
9
10
 
10
11
  from chuk_tool_processor.models.tool_call import ToolCall
11
12
  from chuk_tool_processor.models.tool_result import ToolResult
12
13
 
14
+
13
15
  class ExecutionStrategy(ABC):
14
16
  """
15
17
  Strategy interface for executing ToolCall objects.
16
-
18
+
17
19
  All execution strategies must implement at least the run method,
18
20
  and optionally stream_run for streaming support.
19
21
  """
22
+
20
23
  @abstractmethod
21
- async def run(
22
- self,
23
- calls: List[ToolCall],
24
- timeout: Optional[float] = None
25
- ) -> List[ToolResult]:
24
+ async def run(self, calls: list[ToolCall], timeout: float | None = None) -> list[ToolResult]:
26
25
  """
27
26
  Execute a list of tool calls and return their results.
28
-
27
+
29
28
  Args:
30
29
  calls: List of ToolCall objects to execute
31
30
  timeout: Optional timeout in seconds for each call
32
-
31
+
33
32
  Returns:
34
33
  List of ToolResult objects in the same order as the calls
35
34
  """
36
35
  pass
37
-
38
- async def stream_run(
39
- self,
40
- calls: List[ToolCall],
41
- timeout: Optional[float] = None
42
- ) -> AsyncIterator[ToolResult]:
36
+
37
+ async def stream_run(self, calls: list[ToolCall], timeout: float | None = None) -> AsyncIterator[ToolResult]:
43
38
  """
44
39
  Execute tool calls and yield results as they become available.
45
-
40
+
46
41
  Default implementation executes all calls with run() and yields the results.
47
42
  Subclasses can override for true streaming behavior.
48
-
43
+
49
44
  Args:
50
45
  calls: List of ToolCall objects to execute
51
46
  timeout: Optional timeout in seconds for each call
52
-
47
+
53
48
  Yields:
54
49
  ToolResult objects as they become available
55
50
  """
56
51
  results = await self.run(calls, timeout=timeout)
57
52
  for result in results:
58
53
  yield result
59
-
54
+
60
55
  @property
61
56
  def supports_streaming(self) -> bool:
62
57
  """
63
58
  Check if this strategy supports true streaming.
64
-
59
+
65
60
  Default implementation returns False. Streaming-capable strategies
66
61
  should override this to return True.
67
62
  """
68
- return False
63
+ return False
@@ -6,41 +6,43 @@ Base class for tools that support streaming results.
6
6
  This enables tools to yield incremental results during their execution,
7
7
  which is useful for long-running operations or real-time data processing.
8
8
  """
9
+
9
10
  from __future__ import annotations
10
11
 
11
- import asyncio
12
12
  from abc import abstractmethod
13
- from typing import Any, AsyncIterator, List, TypeVar, Generic, ClassVar, Optional, Dict
13
+ from collections.abc import AsyncIterator
14
+ from typing import Any, ClassVar, TypeVar
14
15
 
15
- from pydantic import BaseModel, ConfigDict
16
+ from pydantic import ConfigDict
16
17
 
17
18
  from chuk_tool_processor.models.validated_tool import ValidatedTool
18
19
 
19
- T = TypeVar('T')
20
+ T = TypeVar("T")
21
+
20
22
 
21
23
  class StreamingTool(ValidatedTool):
22
24
  """
23
25
  Base class for tools that support streaming responses.
24
-
26
+
25
27
  Subclasses must implement _stream_execute which yields results one by one.
26
28
  The executor should use stream_execute to access streaming results directly.
27
-
29
+
28
30
  Example:
29
31
  ```python
30
32
  class Counter(StreamingTool):
31
33
  class Arguments(BaseModel):
32
34
  count: int = 10
33
35
  delay: float = 0.5
34
-
36
+
35
37
  class Result(BaseModel):
36
38
  value: int
37
-
39
+
38
40
  async def _stream_execute(self, count: int, delay: float) -> AsyncIterator[Result]:
39
41
  for i in range(count):
40
42
  await asyncio.sleep(delay)
41
43
  yield self.Result(value=i)
42
44
  ```
43
-
45
+
44
46
  Streaming usage:
45
47
  ```python
46
48
  counter_tool = Counter()
@@ -48,42 +50,43 @@ class StreamingTool(ValidatedTool):
48
50
  print(f"Count: {result.value}")
49
51
  ```
50
52
  """
53
+
51
54
  # Mark this as a ClassVar so Pydantic doesn't treat it as a field
52
55
  supports_streaming: ClassVar[bool] = True
53
-
56
+
54
57
  # Use ConfigDict to configure model behavior
55
58
  model_config = ConfigDict(arbitrary_types_allowed=True)
56
-
59
+
57
60
  async def stream_execute(self, **kwargs: Any) -> AsyncIterator[Any]:
58
61
  """
59
62
  Execute the tool and stream results incrementally.
60
-
63
+
61
64
  This public method validates arguments and then delegates to _stream_execute.
62
65
  It should be used directly by the executor to support true streaming.
63
-
66
+
64
67
  Args:
65
68
  **kwargs: Keyword arguments for the tool
66
-
69
+
67
70
  Yields:
68
71
  Results as they are generated by the tool
69
72
  """
70
73
  # Validate arguments using the Arguments model
71
74
  args = self.Arguments(**kwargs)
72
-
75
+
73
76
  # Stream results directly from _stream_execute
74
77
  async for result in self._stream_execute(**args.model_dump()):
75
78
  yield result
76
-
79
+
77
80
  async def execute(self, **kwargs: Any) -> Any:
78
81
  """
79
82
  Execute the tool and collect all results.
80
-
83
+
81
84
  For streaming tools, this collects all results from stream_execute
82
85
  into a list for compatibility with the regular execution model.
83
-
86
+
84
87
  Args:
85
88
  **kwargs: Keyword arguments for the tool
86
-
89
+
87
90
  Returns:
88
91
  List of all streamed results
89
92
  """
@@ -91,20 +94,20 @@ class StreamingTool(ValidatedTool):
91
94
  results = []
92
95
  async for chunk in self.stream_execute(**kwargs):
93
96
  results.append(chunk)
94
-
97
+
95
98
  return results
96
-
99
+
97
100
  @abstractmethod
98
101
  async def _stream_execute(self, **kwargs: Any) -> AsyncIterator[Any]:
99
102
  """
100
103
  Execute the tool and yield results incrementally.
101
-
104
+
102
105
  This must be implemented by streaming tool subclasses.
103
-
106
+
104
107
  Args:
105
108
  **kwargs: Tool-specific arguments
106
-
109
+
107
110
  Yields:
108
111
  Results as they are generated
109
112
  """
110
- yield NotImplemented
113
+ yield NotImplemented
@@ -2,58 +2,43 @@
2
2
  """
3
3
  Model representing a tool call with arguments.
4
4
  """
5
+
5
6
  from __future__ import annotations
6
7
 
7
8
  import uuid
8
- from typing import Any, Dict, Optional
9
+ from typing import Any
10
+
11
+ from pydantic import BaseModel, ConfigDict, Field
9
12
 
10
- from pydantic import BaseModel, Field, ConfigDict
11
13
 
12
14
  class ToolCall(BaseModel):
13
15
  """
14
16
  Represents a call to a tool with arguments.
15
-
17
+
16
18
  Attributes:
17
19
  id: Unique identifier for the tool call
18
20
  tool: Name of the tool to call
19
21
  namespace: Namespace the tool belongs to
20
22
  arguments: Arguments to pass to the tool
21
23
  """
22
- model_config = ConfigDict(extra='ignore')
23
-
24
- id: str = Field(
25
- default_factory=lambda: str(uuid.uuid4()),
26
- description="Unique identifier for the tool call"
27
- )
28
- tool: str = Field(
29
- ...,
30
- min_length=1,
31
- description="Name of the tool to call; must be non-empty"
32
- )
33
- namespace: str = Field(
34
- default="default",
35
- description="Namespace the tool belongs to"
36
- )
37
- arguments: Dict[str, Any] = Field(
38
- default_factory=dict,
39
- description="Arguments to pass to the tool"
40
- )
41
-
42
- async def to_dict(self) -> Dict[str, Any]:
24
+
25
+ model_config = ConfigDict(extra="ignore")
26
+
27
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for the tool call")
28
+ tool: str = Field(..., min_length=1, description="Name of the tool to call; must be non-empty")
29
+ namespace: str = Field(default="default", description="Namespace the tool belongs to")
30
+ arguments: dict[str, Any] = Field(default_factory=dict, description="Arguments to pass to the tool")
31
+
32
+ async def to_dict(self) -> dict[str, Any]:
43
33
  """Convert to a dictionary for serialization."""
44
- return {
45
- "id": self.id,
46
- "tool": self.tool,
47
- "namespace": self.namespace,
48
- "arguments": self.arguments
49
- }
50
-
34
+ return {"id": self.id, "tool": self.tool, "namespace": self.namespace, "arguments": self.arguments}
35
+
51
36
  @classmethod
52
- async def from_dict(cls, data: Dict[str, Any]) -> ToolCall:
37
+ async def from_dict(cls, data: dict[str, Any]) -> ToolCall:
53
38
  """Create a ToolCall from a dictionary."""
54
39
  return cls(**data)
55
-
40
+
56
41
  def __str__(self) -> str:
57
42
  """String representation of the tool call."""
58
43
  args_str = ", ".join(f"{k}={v!r}" for k, v in self.arguments.items())
59
- return f"ToolCall({self.tool}, {args_str})"
44
+ return f"ToolCall({self.tool}, {args_str})"
@@ -1,29 +1,43 @@
1
1
  # chuk_tool_processor/models/tool_export_mix_in.py
2
- from typing import Dict
2
+
3
+ from typing import Any, Protocol, runtime_checkable
4
+
5
+ from pydantic import BaseModel
6
+
7
+
8
+ @runtime_checkable
9
+ class HasArguments(Protocol):
10
+ """Protocol for classes that have an Arguments attribute."""
11
+
12
+ Arguments: type[BaseModel]
13
+
3
14
 
4
15
  class ToolExportMixin:
5
16
  """Mixin that lets any ValidatedTool advertise its schema."""
6
17
 
7
18
  @classmethod
8
- def to_openai(cls) -> Dict:
9
- schema = cls.Arguments.model_json_schema()
19
+ def to_openai(cls) -> dict[str, Any]:
20
+ assert hasattr(cls, "Arguments"), f"{cls.__name__} must have an Arguments attribute"
21
+ schema = cls.Arguments.model_json_schema() # type: ignore[attr-defined]
10
22
  return {
11
23
  "type": "function",
12
24
  "function": {
13
- "name": cls.__name__.removesuffix("Tool").lower(), # or keep explicit name
25
+ "name": cls.__name__.removesuffix("Tool").lower(), # or keep explicit name
14
26
  "description": (cls.__doc__ or "").strip(),
15
27
  "parameters": schema,
16
28
  },
17
29
  }
18
30
 
19
31
  @classmethod
20
- def to_json_schema(cls) -> Dict:
21
- return cls.Arguments.model_json_schema()
32
+ def to_json_schema(cls) -> dict[str, Any]:
33
+ assert hasattr(cls, "Arguments"), f"{cls.__name__} must have an Arguments attribute"
34
+ return cls.Arguments.model_json_schema() # type: ignore[attr-defined, no-any-return]
22
35
 
23
36
  @classmethod
24
37
  def to_xml(cls) -> str:
25
38
  """Very small helper so existing XML-based parsers still work."""
39
+ assert hasattr(cls, "Arguments"), f"{cls.__name__} must have an Arguments attribute"
26
40
  name = cls.__name__.removesuffix("Tool").lower()
27
- params = cls.Arguments.model_json_schema()["properties"]
41
+ params = cls.Arguments.model_json_schema()["properties"] # type: ignore[attr-defined]
28
42
  args = ", ".join(params)
29
- return f"<tool name=\"{name}\" args=\"{{{args}}}\"/>"
43
+ return f'<tool name="{name}" args="{{{args}}}"/>'
@@ -2,21 +2,24 @@
2
2
  """
3
3
  Model representing the result of a tool execution.
4
4
  """
5
+
5
6
  from __future__ import annotations
6
7
 
7
8
  import os
9
+ import platform
8
10
  import uuid
9
- from datetime import datetime, timezone
10
- from typing import Any, Dict, Optional, List
11
+ from datetime import UTC, datetime
12
+ from typing import Any
13
+
14
+ from pydantic import BaseModel, ConfigDict, Field
11
15
 
12
- from pydantic import BaseModel, Field, ConfigDict
13
16
 
14
17
  class ToolResult(BaseModel):
15
18
  """
16
19
  Represents the result of executing a tool.
17
-
20
+
18
21
  Includes timing, host, and process metadata for diagnostics and tracing.
19
-
22
+
20
23
  Attributes:
21
24
  id: Unique identifier for the result
22
25
  tool: Name of the tool that was executed
@@ -31,79 +34,49 @@ class ToolResult(BaseModel):
31
34
  stream_id: Optional identifier for streaming results
32
35
  is_partial: Whether this is a partial streaming result
33
36
  """
34
- model_config = ConfigDict(extra='ignore')
35
37
 
36
- id: str = Field(
37
- default_factory=lambda: str(uuid.uuid4()),
38
- description="Unique identifier for this result"
39
- )
40
-
38
+ model_config = ConfigDict(extra="ignore")
39
+
40
+ id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="Unique identifier for this result")
41
+
41
42
  # Core fields
42
- tool: str = Field(
43
- ...,
44
- min_length=1,
45
- description="Name of the tool; must be non-empty"
46
- )
47
- result: Any = Field(
48
- None,
49
- description="Return value from the tool execution"
50
- )
51
- error: Optional[str] = Field(
52
- None,
53
- description="Error message if execution failed"
54
- )
55
-
43
+ tool: str = Field(..., min_length=1, description="Name of the tool; must be non-empty")
44
+ result: Any = Field(None, description="Return value from the tool execution")
45
+ error: str | None = Field(None, description="Error message if execution failed")
46
+
56
47
  # Execution metadata
57
48
  start_time: datetime = Field(
58
- default_factory=lambda: datetime.now(timezone.utc),
59
- description="UTC timestamp when execution started"
49
+ default_factory=lambda: datetime.now(UTC), description="UTC timestamp when execution started"
60
50
  )
61
51
  end_time: datetime = Field(
62
- default_factory=lambda: datetime.now(timezone.utc),
63
- description="UTC timestamp when execution finished"
64
- )
65
- machine: str = Field(
66
- default_factory=lambda: os.uname().nodename,
67
- description="Hostname where the tool ran"
68
- )
69
- pid: int = Field(
70
- default_factory=lambda: os.getpid(),
71
- description="Process ID of the worker"
52
+ default_factory=lambda: datetime.now(UTC), description="UTC timestamp when execution finished"
72
53
  )
73
-
54
+ machine: str = Field(default_factory=lambda: platform.node(), description="Hostname where the tool ran")
55
+ pid: int = Field(default_factory=lambda: os.getpid(), description="Process ID of the worker")
56
+
74
57
  # Extended features
75
- cached: bool = Field(
76
- default=False,
77
- description="True if this result was retrieved from cache"
78
- )
79
- attempts: int = Field(
80
- default=1,
81
- description="Number of execution attempts made"
82
- )
83
-
58
+ cached: bool = Field(default=False, description="True if this result was retrieved from cache")
59
+ attempts: int = Field(default=1, description="Number of execution attempts made")
60
+
84
61
  # Streaming support
85
- stream_id: Optional[str] = Field(
86
- default=None,
87
- description="Identifier for this stream of results (for streaming tools)"
88
- )
89
- is_partial: bool = Field(
90
- default=False,
91
- description="True if this is a partial result in a stream"
62
+ stream_id: str | None = Field(
63
+ default=None, description="Identifier for this stream of results (for streaming tools)"
92
64
  )
93
-
65
+ is_partial: bool = Field(default=False, description="True if this is a partial result in a stream")
66
+
94
67
  @property
95
68
  def is_success(self) -> bool:
96
69
  """Check if the execution was successful (no error)."""
97
70
  return self.error is None
98
-
71
+
99
72
  @property
100
73
  def duration(self) -> float:
101
74
  """Calculate the execution duration in seconds."""
102
75
  if not self.start_time or not self.end_time:
103
76
  return 0.0
104
77
  return (self.end_time - self.start_time).total_seconds()
105
-
106
- async def to_dict(self) -> Dict[str, Any]:
78
+
79
+ async def to_dict(self) -> dict[str, Any]:
107
80
  """Convert to a dictionary for serialization."""
108
81
  return {
109
82
  "id": self.id,
@@ -119,37 +92,27 @@ class ToolResult(BaseModel):
119
92
  "cached": self.cached,
120
93
  "attempts": self.attempts,
121
94
  "stream_id": self.stream_id,
122
- "is_partial": self.is_partial
95
+ "is_partial": self.is_partial,
123
96
  }
124
-
97
+
125
98
  @classmethod
126
- def create_stream_chunk(
127
- cls,
128
- tool: str,
129
- result: Any,
130
- stream_id: Optional[str] = None
131
- ) -> ToolResult:
99
+ def create_stream_chunk(cls, tool: str, result: Any, stream_id: str | None = None) -> ToolResult:
132
100
  """Create a partial streaming result."""
133
101
  stream_id = stream_id or str(uuid.uuid4())
134
- return cls(
135
- tool=tool,
136
- result=result,
137
- stream_id=stream_id,
138
- is_partial=True
139
- )
140
-
102
+ return cls(tool=tool, result=result, error=None, stream_id=stream_id, is_partial=True)
103
+
141
104
  @classmethod
142
- async def from_dict(cls, data: Dict[str, Any]) -> ToolResult:
105
+ async def from_dict(cls, data: dict[str, Any]) -> ToolResult:
143
106
  """Create a ToolResult from a dictionary."""
144
107
  # Handle datetime fields
145
108
  if isinstance(data.get("start_time"), str):
146
109
  data["start_time"] = datetime.fromisoformat(data["start_time"])
147
110
  if isinstance(data.get("end_time"), str):
148
111
  data["end_time"] = datetime.fromisoformat(data["end_time"])
149
-
112
+
150
113
  return cls(**data)
151
-
114
+
152
115
  def __str__(self) -> str:
153
116
  """String representation of the tool result."""
154
117
  status = "success" if self.is_success else f"error: {self.error}"
155
- return f"ToolResult({self.tool}, {status}, duration={self.duration:.3f}s)"
118
+ return f"ToolResult({self.tool}, {status}, duration={self.duration:.3f}s)"
@@ -15,12 +15,13 @@ Subclass it like so:
15
15
  async def _execute(self, *, x: int, y: int) -> Result:
16
16
  return self.Result(sum=x + y)
17
17
  """
18
+
18
19
  from __future__ import annotations
19
20
 
20
21
  import html
21
22
  import inspect
22
23
  import json
23
- from typing import Any, Dict, TypeVar, Callable
24
+ from typing import Any, TypeVar
24
25
 
25
26
  from pydantic import BaseModel, ValidationError
26
27
 
@@ -48,7 +49,7 @@ class _ExportMixin:
48
49
  cls: type[T_Validated],
49
50
  *,
50
51
  registry_name: str | None = None,
51
- ) -> Dict[str, Any]:
52
+ ) -> dict[str, Any]:
52
53
  """
53
54
  Build the structure expected by `tools=[…]`.
54
55
 
@@ -67,7 +68,7 @@ class _ExportMixin:
67
68
  "function": {
68
69
  "name": fn_name,
69
70
  "description": description,
70
- "parameters": cls.Arguments.model_json_schema(), # type: ignore[attr-defined]
71
+ "parameters": cls.Arguments.model_json_schema(),
71
72
  },
72
73
  }
73
74
 
@@ -75,18 +76,15 @@ class _ExportMixin:
75
76
  # Plain JSON schema (arguments only)
76
77
  # ------------------------------------------------------------------ #
77
78
  @classmethod
78
- def to_json_schema(cls: type[T_Validated]) -> Dict[str, Any]:
79
- return cls.Arguments.model_json_schema() # type: ignore[attr-defined]
79
+ def to_json_schema(cls: type[T_Validated]) -> dict[str, Any]:
80
+ return cls.Arguments.model_json_schema()
80
81
 
81
82
  # ------------------------------------------------------------------ #
82
83
  # Tiny XML tag - handy for unit-tests / demos
83
84
  # ------------------------------------------------------------------ #
84
85
  @classmethod
85
- def to_xml_tag(cls: type[T_Validated], **arguments: Any) -> str:
86
- return (
87
- f'<tool name="{html.escape(cls.__name__)}" '
88
- f"args='{html.escape(json.dumps(arguments))}'/>"
89
- )
86
+ def to_xml_tag(cls: type[T_Validated], **arguments: Any) -> str: # type: ignore[misc]
87
+ return f"<tool name=\"{html.escape(cls.__name__)}\" args='{html.escape(json.dumps(arguments))}'/>"
90
88
 
91
89
 
92
90
  # --------------------------------------------------------------------------- #
@@ -110,8 +108,8 @@ class ValidatedTool(_ExportMixin, BaseModel):
110
108
  async def execute(self: T_Validated, **kwargs: Any) -> BaseModel:
111
109
  """Validate *kwargs*, run `_execute`, validate the result."""
112
110
  try:
113
- args = self.Arguments(**kwargs) # type: ignore[arg-type]
114
- res = await self._execute(**args.model_dump()) # type: ignore[arg-type]
111
+ args = self.Arguments(**kwargs)
112
+ res = await self._execute(**args.model_dump())
115
113
 
116
114
  return (
117
115
  res
@@ -119,7 +117,7 @@ class ValidatedTool(_ExportMixin, BaseModel):
119
117
  else self.Result(**(res if isinstance(res, dict) else {"value": res}))
120
118
  )
121
119
  except ValidationError as exc:
122
- raise ToolValidationError(self.__class__.__name__, exc.errors()) from exc
120
+ raise ToolValidationError(self.__class__.__name__, {"errors": exc.errors()}) from exc
123
121
 
124
122
  # ------------------------------------------------------------------ #
125
123
  # Sub-classes must implement this
@@ -143,15 +141,15 @@ def with_validation(cls): # noqa: D401 - factory
143
141
  validate_result,
144
142
  )
145
143
 
146
- original = cls.execute # type: ignore[attr-defined]
144
+ original = cls.execute
147
145
  if not inspect.iscoroutinefunction(original):
148
146
  raise TypeError(f"Tool {cls.__name__} must have an async execute method")
149
147
 
150
- async def _async_wrapper(self, **kwargs): # type: ignore[override]
148
+ async def _async_wrapper(self, **kwargs):
151
149
  tool_name = cls.__name__
152
150
  validated = validate_arguments(tool_name, original, kwargs)
153
151
  result = await original(self, **validated)
154
152
  return validate_result(tool_name, original, result)
155
153
 
156
- cls.execute = _async_wrapper # type: ignore[assignment]
154
+ cls.execute = _async_wrapper
157
155
  return cls
@@ -1 +1 @@
1
- # chuk_tool_processor/plugins/parsers__init__.py
1
+ # chuk_tool_processor/plugins/parsers__init__.py