chuk-tool-processor 0.1.6__py3-none-any.whl → 0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

Files changed (46) hide show
  1. chuk_tool_processor/core/processor.py +345 -132
  2. chuk_tool_processor/execution/strategies/inprocess_strategy.py +522 -71
  3. chuk_tool_processor/execution/strategies/subprocess_strategy.py +559 -64
  4. chuk_tool_processor/execution/tool_executor.py +282 -24
  5. chuk_tool_processor/execution/wrappers/caching.py +465 -123
  6. chuk_tool_processor/execution/wrappers/rate_limiting.py +199 -86
  7. chuk_tool_processor/execution/wrappers/retry.py +133 -23
  8. chuk_tool_processor/logging/__init__.py +83 -10
  9. chuk_tool_processor/logging/context.py +218 -22
  10. chuk_tool_processor/logging/formatter.py +56 -13
  11. chuk_tool_processor/logging/helpers.py +91 -16
  12. chuk_tool_processor/logging/metrics.py +75 -6
  13. chuk_tool_processor/mcp/mcp_tool.py +80 -35
  14. chuk_tool_processor/mcp/register_mcp_tools.py +74 -56
  15. chuk_tool_processor/mcp/setup_mcp_sse.py +41 -36
  16. chuk_tool_processor/mcp/setup_mcp_stdio.py +39 -37
  17. chuk_tool_processor/mcp/transport/sse_transport.py +351 -105
  18. chuk_tool_processor/models/execution_strategy.py +52 -3
  19. chuk_tool_processor/models/streaming_tool.py +110 -0
  20. chuk_tool_processor/models/tool_call.py +56 -4
  21. chuk_tool_processor/models/tool_result.py +115 -9
  22. chuk_tool_processor/models/validated_tool.py +15 -13
  23. chuk_tool_processor/plugins/discovery.py +115 -70
  24. chuk_tool_processor/plugins/parsers/base.py +13 -5
  25. chuk_tool_processor/plugins/parsers/{function_call_tool_plugin.py → function_call_tool.py} +39 -20
  26. chuk_tool_processor/plugins/parsers/json_tool.py +50 -0
  27. chuk_tool_processor/plugins/parsers/openai_tool.py +88 -0
  28. chuk_tool_processor/plugins/parsers/xml_tool.py +74 -20
  29. chuk_tool_processor/registry/__init__.py +46 -7
  30. chuk_tool_processor/registry/auto_register.py +92 -28
  31. chuk_tool_processor/registry/decorators.py +134 -11
  32. chuk_tool_processor/registry/interface.py +48 -14
  33. chuk_tool_processor/registry/metadata.py +52 -6
  34. chuk_tool_processor/registry/provider.py +75 -36
  35. chuk_tool_processor/registry/providers/__init__.py +49 -10
  36. chuk_tool_processor/registry/providers/memory.py +59 -48
  37. chuk_tool_processor/registry/tool_export.py +208 -39
  38. chuk_tool_processor/utils/validation.py +18 -13
  39. chuk_tool_processor-0.2.dist-info/METADATA +401 -0
  40. chuk_tool_processor-0.2.dist-info/RECORD +58 -0
  41. {chuk_tool_processor-0.1.6.dist-info → chuk_tool_processor-0.2.dist-info}/WHEEL +1 -1
  42. chuk_tool_processor/plugins/parsers/json_tool_plugin.py +0 -38
  43. chuk_tool_processor/plugins/parsers/openai_tool_plugin.py +0 -76
  44. chuk_tool_processor-0.1.6.dist-info/METADATA +0 -462
  45. chuk_tool_processor-0.1.6.dist-info/RECORD +0 -57
  46. {chuk_tool_processor-0.1.6.dist-info → chuk_tool_processor-0.2.dist-info}/top_level.txt +0 -0
@@ -1,149 +1,262 @@
1
1
  # chuk_tool_processor/execution/wrappers/rate_limiting.py
2
+ """
3
+ Async-native rate-limiting wrapper.
4
+
5
+ Two layers of limits are enforced:
6
+
7
+ * **Global** - ``<N requests> / <period>`` over *all* tools.
8
+ * **Per-tool** - independent ``<N requests> / <period>`` windows.
9
+
10
+ A simple sliding-window algorithm with timestamp queues is used.
11
+ `asyncio.Lock` guards shared state so the wrapper can be used safely from
12
+ multiple coroutines.
13
+ """
14
+ from __future__ import annotations
15
+
2
16
  import asyncio
17
+ import inspect
3
18
  import time
4
- from datetime import datetime
5
- from typing import Dict, Optional, List, Any, Tuple
19
+ from typing import Any, Dict, List, Optional, Tuple, Union
6
20
 
7
- # imports
8
21
  from chuk_tool_processor.models.tool_call import ToolCall
9
22
  from chuk_tool_processor.models.tool_result import ToolResult
10
- from chuk_tool_processor.core.exceptions import ToolExecutionError
23
+ from chuk_tool_processor.logging import get_logger
11
24
 
25
+ logger = get_logger("chuk_tool_processor.execution.wrappers.rate_limiting")
12
26
 
27
+ # --------------------------------------------------------------------------- #
28
+ # Core limiter
29
+ # --------------------------------------------------------------------------- #
13
30
  class RateLimiter:
14
31
  """
15
- Rate limiter for tool executions.
16
- Supports per-tool rate limits and global rate limits.
32
+ Async-native rate limiter for controlling execution frequency.
33
+
34
+ Implements a sliding window algorithm to enforce rate limits both globally
35
+ and per-tool. All operations are thread-safe using asyncio locks.
17
36
  """
37
+
18
38
  def __init__(
19
39
  self,
40
+ *,
20
41
  global_limit: Optional[int] = None,
21
42
  global_period: float = 60.0,
22
- tool_limits: Optional[Dict[str, Tuple[int, float]]] = None
23
- ):
43
+ tool_limits: Optional[Dict[str, Tuple[int, float]]] = None,
44
+ ) -> None:
24
45
  """
25
46
  Initialize the rate limiter.
47
+
48
+ Args:
49
+ global_limit: Maximum global requests per period (None = no limit)
50
+ global_period: Time period in seconds for the global limit
51
+ tool_limits: Dict mapping tool names to (limit, period) tuples
26
52
  """
27
53
  self.global_limit = global_limit
28
54
  self.global_period = global_period
29
55
  self.tool_limits = tool_limits or {}
30
-
31
- # Track request timestamps
32
- self._global_timestamps: List[float] = []
33
- self._tool_timestamps: Dict[str, List[float]] = {}
34
-
35
- # Locks for concurrency safety
56
+
57
+ # Timestamp queues
58
+ self._global_ts: List[float] = []
59
+ self._tool_ts: Dict[str, List[float]] = {}
60
+
61
+ # Locks for thread safety
36
62
  self._global_lock = asyncio.Lock()
37
63
  self._tool_locks: Dict[str, asyncio.Lock] = {}
38
-
39
- async def _wait_for_global_limit(self) -> None:
40
- """
41
- Wait until global rate limit allows another request.
42
- """
64
+
65
+ logger.debug(
66
+ f"Initialized rate limiter: global={global_limit}/{global_period}s, "
67
+ f"tool-specific={len(self.tool_limits)} tools"
68
+ )
69
+
70
+ # --------------------- helpers -------------------- #
71
+ async def _acquire_global(self) -> None:
72
+ """Block until a global slot is available."""
43
73
  if self.global_limit is None:
44
74
  return
45
-
75
+
46
76
  while True:
47
- # Acquire lock to check and possibly record
48
77
  async with self._global_lock:
49
- now = time.time()
50
- # Remove expired timestamps
78
+ now = time.monotonic()
51
79
  cutoff = now - self.global_period
52
- self._global_timestamps = [ts for ts in self._global_timestamps if ts > cutoff]
53
- # If under limit, record and proceed
54
- if len(self._global_timestamps) < self.global_limit:
55
- self._global_timestamps.append(now)
80
+
81
+ # Prune expired timestamps
82
+ self._global_ts = [t for t in self._global_ts if t > cutoff]
83
+
84
+ # Check if we're under the limit
85
+ if len(self._global_ts) < self.global_limit:
86
+ self._global_ts.append(now)
56
87
  return
57
- # Otherwise compute wait time
58
- oldest = min(self._global_timestamps)
59
- wait_time = (oldest + self.global_period) - now
60
- # Sleep outside lock
61
- if wait_time > 0:
62
- await asyncio.sleep(wait_time)
63
- else:
64
- # retry immediately
65
- continue
66
-
67
- async def _wait_for_tool_limit(self, tool: str) -> None:
68
- """
69
- Wait until tool-specific rate limit allows another request.
70
- """
71
- # Check if tool has a limit
88
+
89
+ # Calculate wait time until a slot becomes available
90
+ wait = (self._global_ts[0] + self.global_period) - now
91
+
92
+ logger.debug(f"Global rate limit reached, waiting {wait:.2f}s")
93
+ await asyncio.sleep(wait)
94
+
95
+ async def _acquire_tool(self, tool: str) -> None:
96
+ """Block until a per-tool slot is available (if the tool has a limit)."""
72
97
  if tool not in self.tool_limits:
73
98
  return
99
+
74
100
  limit, period = self.tool_limits[tool]
75
-
76
- # Initialize lock and timestamps list if needed
77
- if tool not in self._tool_locks:
78
- self._tool_locks[tool] = asyncio.Lock()
79
- if tool not in self._tool_timestamps:
80
- self._tool_timestamps[tool] = []
81
-
101
+ lock = self._tool_locks.setdefault(tool, asyncio.Lock())
102
+ buf = self._tool_ts.setdefault(tool, [])
103
+
82
104
  while True:
83
- async with self._tool_locks[tool]:
84
- now = time.time()
85
- # Remove expired timestamps
105
+ async with lock:
106
+ now = time.monotonic()
86
107
  cutoff = now - period
87
- self._tool_timestamps[tool] = [ts for ts in self._tool_timestamps[tool] if ts > cutoff]
88
- # If under limit, record and proceed
89
- if len(self._tool_timestamps[tool]) < limit:
90
- self._tool_timestamps[tool].append(now)
108
+
109
+ # Prune expired timestamps in-place
110
+ buf[:] = [t for t in buf if t > cutoff]
111
+
112
+ # Check if we're under the limit
113
+ if len(buf) < limit:
114
+ buf.append(now)
91
115
  return
92
- # Otherwise compute wait time
93
- oldest = min(self._tool_timestamps[tool])
94
- wait_time = (oldest + period) - now
95
- # Sleep outside lock
96
- if wait_time > 0:
97
- await asyncio.sleep(wait_time)
98
- else:
99
- continue
100
-
116
+
117
+ # Calculate wait time until a slot becomes available
118
+ wait = (buf[0] + period) - now
119
+
120
+ logger.debug(f"Tool '{tool}' rate limit reached, waiting {wait:.2f}s")
121
+ await asyncio.sleep(wait)
122
+
123
+ # ----------------------- public -------------------- #
101
124
  async def wait(self, tool: str) -> None:
102
125
  """
103
- Wait until rate limits allow execution of the given tool.
126
+ Block until rate limits allow execution.
127
+
128
+ This method blocks until both global and tool-specific rate limits
129
+ allow one more execution of the specified tool.
130
+
131
+ Args:
132
+ tool: Name of the tool being executed
133
+ """
134
+ await self._acquire_global()
135
+ await self._acquire_tool(tool)
136
+
137
+ async def check_limits(self, tool: str) -> Tuple[bool, bool]:
138
+ """
139
+ Check if the tool would be rate limited without consuming a slot.
140
+
141
+ This is a non-blocking method useful for checking limits without
142
+ affecting the rate limiting state.
143
+
144
+ Args:
145
+ tool: Name of the tool to check
146
+
147
+ Returns:
148
+ Tuple of (global_limit_reached, tool_limit_reached)
104
149
  """
105
- # Wait for global limit first
106
- await self._wait_for_global_limit()
107
- # Then wait for tool-specific limit
108
- await self._wait_for_tool_limit(tool)
150
+ global_limited = False
151
+ tool_limited = False
152
+
153
+ # Check global limit
154
+ if self.global_limit is not None:
155
+ async with self._global_lock:
156
+ now = time.monotonic()
157
+ cutoff = now - self.global_period
158
+ active_ts = [t for t in self._global_ts if t > cutoff]
159
+ global_limited = len(active_ts) >= self.global_limit
160
+
161
+ # Check tool limit
162
+ if tool in self.tool_limits:
163
+ limit, period = self.tool_limits[tool]
164
+ async with self._tool_locks.setdefault(tool, asyncio.Lock()):
165
+ now = time.monotonic()
166
+ cutoff = now - period
167
+ buf = self._tool_ts.get(tool, [])
168
+ active_ts = [t for t in buf if t > cutoff]
169
+ tool_limited = len(active_ts) >= limit
170
+
171
+ return global_limited, tool_limited
109
172
 
110
173
 
174
+ # --------------------------------------------------------------------------- #
175
+ # Executor wrapper
176
+ # --------------------------------------------------------------------------- #
111
177
  class RateLimitedToolExecutor:
112
178
  """
113
- Wrapper for a tool executor that applies rate limiting.
179
+ Executor wrapper that applies rate limiting to tool executions.
180
+
181
+ This wrapper delegates to another executor but ensures that all
182
+ tool calls respect the configured rate limits.
114
183
  """
115
- def __init__(
116
- self,
117
- executor: Any,
118
- rate_limiter: RateLimiter
119
- ):
184
+
185
+ def __init__(self, executor: Any, limiter: RateLimiter) -> None:
120
186
  """
121
187
  Initialize the rate-limited executor.
188
+
189
+ Args:
190
+ executor: The underlying executor to wrap
191
+ limiter: The RateLimiter that controls execution frequency
122
192
  """
123
193
  self.executor = executor
124
- self.rate_limiter = rate_limiter
125
-
194
+ self.limiter = limiter
195
+ logger.debug(f"Initialized rate-limited executor")
196
+
126
197
  async def execute(
127
198
  self,
128
199
  calls: List[ToolCall],
129
- timeout: Optional[float] = None
200
+ timeout: Optional[float] = None,
201
+ use_cache: bool = True,
130
202
  ) -> List[ToolResult]:
131
203
  """
132
- Execute tool calls with rate limiting.
204
+ Execute tool calls while respecting rate limits.
205
+
206
+ This method blocks until rate limits allow execution, then delegates
207
+ to the underlying executor.
208
+
209
+ Args:
210
+ calls: List of tool calls to execute
211
+ timeout: Optional timeout for execution
212
+ use_cache: Whether to use cached results (forwarded to underlying executor)
213
+
214
+ Returns:
215
+ List of tool results
133
216
  """
134
- # Apply rate limiting to each call
135
- for call in calls:
136
- await self.rate_limiter.wait(call.tool)
137
- # Delegate to inner executor
217
+ if not calls:
218
+ return []
219
+
220
+ # Block for each call *before* dispatching to the wrapped executor
221
+ for c in calls:
222
+ await self.limiter.wait(c.tool)
223
+
224
+ # Check if the executor has a use_cache parameter
225
+ if hasattr(self.executor, "execute"):
226
+ sig = inspect.signature(self.executor.execute)
227
+ if "use_cache" in sig.parameters:
228
+ return await self.executor.execute(calls, timeout=timeout, use_cache=use_cache)
229
+
230
+ # Fall back to standard execute method
138
231
  return await self.executor.execute(calls, timeout=timeout)
139
232
 
140
233
 
234
+ # --------------------------------------------------------------------------- #
235
+ # Convenience decorator for tools
236
+ # --------------------------------------------------------------------------- #
141
237
  def rate_limited(limit: int, period: float = 60.0):
142
238
  """
143
- Decorator to specify rate limits for a tool class.
239
+ Class decorator that marks a Tool with default rate-limit metadata.
240
+
241
+ This allows higher-level code to detect and configure rate limiting
242
+ for the tool class.
243
+
244
+ Example:
245
+ @rate_limited(limit=10, period=60.0)
246
+ class WeatherTool:
247
+ async def execute(self, location: str) -> Dict[str, Any]:
248
+ # Implementation
249
+
250
+ Args:
251
+ limit: Maximum number of calls allowed in the period
252
+ period: Time period in seconds
253
+
254
+ Returns:
255
+ Decorated class with rate limit metadata
144
256
  """
145
257
  def decorator(cls):
146
258
  cls._rate_limit = limit
147
259
  cls._rate_period = period
148
260
  return cls
149
- return decorator
261
+
262
+ return decorator
@@ -1,20 +1,36 @@
1
- # chuk_tool_processor/retry.py
1
+ # chuk_tool_processor/execution/wrappers/retry.py
2
+ """
3
+ Async-native retry wrapper for tool execution.
4
+
5
+ This module provides a retry mechanism for tool calls that can automatically
6
+ retry failed executions based on configurable criteria and backoff strategies.
7
+ """
8
+ from __future__ import annotations
9
+
2
10
  import asyncio
3
11
  import logging
4
12
  import random
5
13
  from datetime import datetime, timezone
6
- from typing import Any, Dict, List, Optional, Type
14
+ from typing import Any, Dict, List, Optional, Type, Union
7
15
 
8
- # imports
9
16
  from chuk_tool_processor.models.tool_call import ToolCall
10
17
  from chuk_tool_processor.models.tool_result import ToolResult
18
+ from chuk_tool_processor.logging import get_logger
11
19
 
12
- logger = logging.getLogger(__name__)
20
+ logger = get_logger("chuk_tool_processor.execution.wrappers.retry")
13
21
 
14
22
 
15
23
  class RetryConfig:
16
24
  """
17
25
  Configuration for retry behavior.
26
+
27
+ Attributes:
28
+ max_retries: Maximum number of retry attempts
29
+ base_delay: Base delay between retries in seconds
30
+ max_delay: Maximum delay between retries in seconds
31
+ jitter: Whether to add random jitter to delays
32
+ retry_on_exceptions: List of exception types to retry on
33
+ retry_on_error_substrings: List of error message substrings to retry on
18
34
  """
19
35
  def __init__(
20
36
  self,
@@ -33,6 +49,17 @@ class RetryConfig:
33
49
  self.retry_on_error_substrings = retry_on_error_substrings or []
34
50
 
35
51
  def should_retry(self, attempt: int, error: Optional[Exception] = None, error_str: Optional[str] = None) -> bool:
52
+ """
53
+ Determine if a retry should be attempted.
54
+
55
+ Args:
56
+ attempt: Current attempt number (0-based)
57
+ error: Exception that caused the failure, if any
58
+ error_str: Error message string, if any
59
+
60
+ Returns:
61
+ True if a retry should be attempted, False otherwise
62
+ """
36
63
  if attempt >= self.max_retries:
37
64
  return False
38
65
  if not self.retry_on_exceptions and not self.retry_on_error_substrings:
@@ -44,6 +71,15 @@ class RetryConfig:
44
71
  return False
45
72
 
46
73
  def get_delay(self, attempt: int) -> float:
74
+ """
75
+ Calculate the delay for the current attempt with exponential backoff.
76
+
77
+ Args:
78
+ attempt: Current attempt number (0-based)
79
+
80
+ Returns:
81
+ Delay in seconds
82
+ """
47
83
  delay = min(self.base_delay * (2 ** attempt), self.max_delay)
48
84
  if self.jitter:
49
85
  delay *= (0.5 + random.random())
@@ -53,29 +89,58 @@ class RetryConfig:
53
89
  class RetryableToolExecutor:
54
90
  """
55
91
  Wrapper for a tool executor that applies retry logic.
92
+
93
+ This executor wraps another executor and automatically retries failed
94
+ tool calls based on configured retry policies.
56
95
  """
57
96
  def __init__(
58
97
  self,
59
98
  executor: Any,
60
- default_config: RetryConfig = None,
61
- tool_configs: Dict[str, RetryConfig] = None
99
+ default_config: Optional[RetryConfig] = None,
100
+ tool_configs: Optional[Dict[str, RetryConfig]] = None
62
101
  ):
102
+ """
103
+ Initialize the retryable executor.
104
+
105
+ Args:
106
+ executor: The underlying executor to wrap
107
+ default_config: Default retry configuration for all tools
108
+ tool_configs: Tool-specific retry configurations
109
+ """
63
110
  self.executor = executor
64
111
  self.default_config = default_config or RetryConfig()
65
112
  self.tool_configs = tool_configs or {}
66
113
 
67
114
  def _get_config(self, tool: str) -> RetryConfig:
115
+ """Get the retry configuration for a specific tool."""
68
116
  return self.tool_configs.get(tool, self.default_config)
69
117
 
70
118
  async def execute(
71
119
  self,
72
120
  calls: List[ToolCall],
73
- timeout: Optional[float] = None
121
+ timeout: Optional[float] = None,
122
+ use_cache: bool = True
74
123
  ) -> List[ToolResult]:
124
+ """
125
+ Execute tool calls with retry logic.
126
+
127
+ Args:
128
+ calls: List of tool calls to execute
129
+ timeout: Optional timeout for each execution
130
+ use_cache: Whether to use cached results (passed to underlying executor)
131
+
132
+ Returns:
133
+ List of tool results
134
+ """
135
+ # Handle empty calls list
136
+ if not calls:
137
+ return []
138
+
139
+ # Execute each call with retries
75
140
  results: List[ToolResult] = []
76
141
  for call in calls:
77
142
  config = self._get_config(call.tool)
78
- result = await self._execute_with_retry(call, config, timeout)
143
+ result = await self._execute_with_retry(call, config, timeout, use_cache)
79
144
  results.append(result)
80
145
  return results
81
146
 
@@ -83,8 +148,21 @@ class RetryableToolExecutor:
83
148
  self,
84
149
  call: ToolCall,
85
150
  config: RetryConfig,
86
- timeout: Optional[float]
151
+ timeout: Optional[float],
152
+ use_cache: bool
87
153
  ) -> ToolResult:
154
+ """
155
+ Execute a single tool call with retries.
156
+
157
+ Args:
158
+ call: Tool call to execute
159
+ config: Retry configuration to use
160
+ timeout: Optional timeout for execution
161
+ use_cache: Whether to use cached results
162
+
163
+ Returns:
164
+ Tool result after retries
165
+ """
88
166
  attempt = 0
89
167
  last_error: Optional[str] = None
90
168
  pid = 0
@@ -92,24 +170,31 @@ class RetryableToolExecutor:
92
170
 
93
171
  while True:
94
172
  start_time = datetime.now(timezone.utc)
173
+
95
174
  try:
96
- # execute call
97
- tool_results = await self.executor.execute([call], timeout=timeout)
175
+ # Pass the use_cache parameter if the executor supports it
176
+ executor_kwargs = {"timeout": timeout}
177
+ if hasattr(self.executor, "use_cache"):
178
+ executor_kwargs["use_cache"] = use_cache
179
+
180
+ # Execute call
181
+ tool_results = await self.executor.execute([call], **executor_kwargs)
98
182
  result = tool_results[0]
99
183
  pid = result.pid
100
184
  machine = result.machine
101
185
 
102
- # error in result
186
+ # Check for error in result
103
187
  if result.error:
104
188
  last_error = result.error
105
189
  if config.should_retry(attempt, error_str=result.error):
106
190
  logger.debug(
107
- f"Retrying tool {call.tool} after error: {result.error} (attempt {attempt + 1})"
191
+ f"Retrying tool {call.tool} after error: {result.error} (attempt {attempt + 1}/{config.max_retries})"
108
192
  )
109
193
  await asyncio.sleep(config.get_delay(attempt))
110
194
  attempt += 1
111
195
  continue
112
- # no retry: if any retries happened, wrap final error
196
+
197
+ # No retry: if any retries happened, wrap final error
113
198
  if attempt > 0:
114
199
  end_time = datetime.now(timezone.utc)
115
200
  final = ToolResult(
@@ -121,26 +206,31 @@ class RetryableToolExecutor:
121
206
  machine=machine,
122
207
  pid=pid
123
208
  )
124
- # attach attempts
125
- object.__setattr__(final, 'attempts', attempt)
209
+ # Attach attempts
210
+ final.attempts = attempt + 1 # Include the original attempt
126
211
  return final
127
- # no retries occurred, return the original failure
212
+
213
+ # No retries occurred, return the original failure
214
+ result.attempts = 1
128
215
  return result
129
216
 
130
- # success: attach attempts and return
131
- object.__setattr__(result, 'attempts', attempt)
217
+ # Success: attach attempts and return
218
+ result.attempts = attempt + 1 # Include the original attempt
132
219
  return result
220
+
133
221
  except Exception as e:
134
222
  err_str = str(e)
135
223
  last_error = err_str
224
+
136
225
  if config.should_retry(attempt, error=e):
137
226
  logger.info(
138
- f"Retrying tool {call.tool} after exception: {err_str} (attempt {attempt + 1})"
227
+ f"Retrying tool {call.tool} after exception: {err_str} (attempt {attempt + 1}/{config.max_retries})"
139
228
  )
140
229
  await asyncio.sleep(config.get_delay(attempt))
141
230
  attempt += 1
142
231
  continue
143
- # no more retries: return error result
232
+
233
+ # No more retries: return error result
144
234
  end_time = datetime.now(timezone.utc)
145
235
  final_exc = ToolResult(
146
236
  tool=call.tool,
@@ -151,7 +241,7 @@ class RetryableToolExecutor:
151
241
  machine=machine,
152
242
  pid=pid
153
243
  )
154
- object.__setattr__(final_exc, 'attempts', attempt + 1)
244
+ final_exc.attempts = attempt + 1 # Include the original attempt
155
245
  return final_exc
156
246
 
157
247
 
@@ -163,6 +253,26 @@ def retryable(
163
253
  retry_on_exceptions: Optional[List[Type[Exception]]] = None,
164
254
  retry_on_error_substrings: Optional[List[str]] = None
165
255
  ):
256
+ """
257
+ Decorator for tool classes to configure retry behavior.
258
+
259
+ Example:
260
+ @retryable(max_retries=5, base_delay=2.0)
261
+ class MyTool:
262
+ async def execute(self, x: int, y: int) -> int:
263
+ return x + y
264
+
265
+ Args:
266
+ max_retries: Maximum number of retry attempts
267
+ base_delay: Base delay between retries in seconds
268
+ max_delay: Maximum delay between retries in seconds
269
+ jitter: Whether to add random jitter to delays
270
+ retry_on_exceptions: List of exception types to retry on
271
+ retry_on_error_substrings: List of error message substrings to retry on
272
+
273
+ Returns:
274
+ Decorated class with retry configuration
275
+ """
166
276
  def decorator(cls):
167
277
  cls._retry_config = RetryConfig(
168
278
  max_retries=max_retries,
@@ -173,4 +283,4 @@ def retryable(
173
283
  retry_on_error_substrings=retry_on_error_substrings
174
284
  )
175
285
  return cls
176
- return decorator
286
+ return decorator