chuk-tool-processor 0.3__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. chuk_tool_processor/core/processor.py +1 -1
  2. chuk_tool_processor/execution/strategies/inprocess_strategy.py +1 -1
  3. chuk_tool_processor/execution/tool_executor.py +47 -9
  4. chuk_tool_processor/execution/wrappers/caching.py +3 -3
  5. chuk_tool_processor/execution/wrappers/retry.py +163 -174
  6. chuk_tool_processor/logging/context.py +6 -6
  7. chuk_tool_processor/mcp/mcp_tool.py +48 -36
  8. chuk_tool_processor/mcp/register_mcp_tools.py +3 -3
  9. chuk_tool_processor/mcp/setup_mcp_sse.py +4 -4
  10. chuk_tool_processor/mcp/setup_mcp_stdio.py +2 -2
  11. chuk_tool_processor/mcp/stream_manager.py +72 -16
  12. chuk_tool_processor/mcp/transport/base_transport.py +2 -2
  13. chuk_tool_processor/mcp/transport/sse_transport.py +68 -13
  14. chuk_tool_processor/mcp/transport/stdio_transport.py +2 -2
  15. chuk_tool_processor/models/validated_tool.py +6 -6
  16. chuk_tool_processor/plugins/discovery.py +3 -3
  17. chuk_tool_processor/plugins/parsers/base.py +1 -1
  18. chuk_tool_processor/plugins/parsers/xml_tool.py +2 -2
  19. chuk_tool_processor/registry/auto_register.py +5 -5
  20. chuk_tool_processor/registry/interface.py +2 -2
  21. chuk_tool_processor/registry/providers/memory.py +2 -2
  22. chuk_tool_processor/utils/validation.py +1 -1
  23. chuk_tool_processor-0.4.1.dist-info/METADATA +831 -0
  24. {chuk_tool_processor-0.3.dist-info → chuk_tool_processor-0.4.1.dist-info}/RECORD +26 -26
  25. chuk_tool_processor-0.3.dist-info/METADATA +0 -401
  26. {chuk_tool_processor-0.3.dist-info → chuk_tool_processor-0.4.1.dist-info}/WHEEL +0 -0
  27. {chuk_tool_processor-0.3.dist-info → chuk_tool_processor-0.4.1.dist-info}/top_level.txt +0 -0
@@ -367,7 +367,7 @@ class ToolProcessor:
367
367
  all_calls.extend(result)
368
368
 
369
369
  # ------------------------------------------------------------------ #
370
- # Remove duplicates use a stable digest instead of hashing a
370
+ # Remove duplicates - use a stable digest instead of hashing a
371
371
  # frozenset of argument items (which breaks on unhashable types).
372
372
  # ------------------------------------------------------------------ #
373
373
  def _args_digest(args: Dict[str, Any]) -> str:
@@ -393,7 +393,7 @@ class InProcessStrategy(ExecutionStrategy):
393
393
  """
394
394
  Execute a single tool call with guaranteed timeout.
395
395
 
396
- The entire invocation including argument validation is wrapped
396
+ The entire invocation - including argument validation - is wrapped
397
397
  by the semaphore to honour *max_concurrency*.
398
398
 
399
399
  Args:
@@ -1,10 +1,12 @@
1
1
  #!/usr/bin/env python
2
2
  # chuk_tool_processor/execution/tool_executor.py
3
3
  """
4
- Modified ToolExecutor with true streaming support and duplicate prevention.
4
+ Modified ToolExecutor with true streaming support and proper timeout handling.
5
5
 
6
6
  This version accesses streaming tools' stream_execute method directly
7
7
  to enable true item-by-item streaming behavior, while preventing duplicates.
8
+
9
+ FIXED: Proper timeout precedence - respects strategy's default_timeout when available.
8
10
  """
9
11
  import asyncio
10
12
  from datetime import datetime, timezone
@@ -25,12 +27,14 @@ class ToolExecutor:
25
27
 
26
28
  This class provides a unified interface for executing tools using different
27
29
  execution strategies, with special support for streaming tools.
30
+
31
+ FIXED: Proper timeout handling that respects strategy's default_timeout.
28
32
  """
29
33
 
30
34
  def __init__(
31
35
  self,
32
36
  registry: Optional[ToolRegistryInterface] = None,
33
- default_timeout: float = 10.0,
37
+ default_timeout: Optional[float] = None, # Made optional to allow strategy precedence
34
38
  strategy: Optional[ExecutionStrategy] = None,
35
39
  strategy_kwargs: Optional[Dict[str, Any]] = None,
36
40
  ) -> None:
@@ -39,12 +43,12 @@ class ToolExecutor:
39
43
 
40
44
  Args:
41
45
  registry: Tool registry to use for tool lookups
42
- default_timeout: Default timeout for tool execution
46
+ default_timeout: Default timeout for tool execution (optional)
47
+ If None, will use strategy's default_timeout if available
43
48
  strategy: Optional execution strategy (default: InProcessStrategy)
44
49
  strategy_kwargs: Additional arguments for the strategy constructor
45
50
  """
46
51
  self.registry = registry
47
- self.default_timeout = default_timeout
48
52
 
49
53
  # Create strategy if not provided
50
54
  if strategy is None:
@@ -55,13 +59,31 @@ class ToolExecutor:
55
59
  raise ValueError("Registry must be provided if strategy is not")
56
60
 
57
61
  strategy_kwargs = strategy_kwargs or {}
62
+
63
+ # If no default_timeout specified, use a reasonable default for the strategy
64
+ strategy_timeout = default_timeout if default_timeout is not None else 30.0
65
+
58
66
  strategy = _inprocess_mod.InProcessStrategy(
59
67
  registry,
60
- default_timeout=default_timeout,
68
+ default_timeout=strategy_timeout,
61
69
  **strategy_kwargs,
62
70
  )
63
71
 
64
72
  self.strategy = strategy
73
+
74
+ # Set default timeout with proper precedence:
75
+ # 1. Explicit default_timeout parameter
76
+ # 2. Strategy's default_timeout (if available and not None)
77
+ # 3. Fallback to 30.0 seconds
78
+ if default_timeout is not None:
79
+ self.default_timeout = default_timeout
80
+ logger.debug(f"Using explicit default_timeout: {self.default_timeout}s")
81
+ elif hasattr(strategy, 'default_timeout') and strategy.default_timeout is not None:
82
+ self.default_timeout = strategy.default_timeout
83
+ logger.debug(f"Using strategy's default_timeout: {self.default_timeout}s")
84
+ else:
85
+ self.default_timeout = 30.0 # Conservative fallback
86
+ logger.debug(f"Using fallback default_timeout: {self.default_timeout}s")
65
87
 
66
88
  @property
67
89
  def supports_streaming(self) -> bool:
@@ -79,7 +101,7 @@ class ToolExecutor:
79
101
 
80
102
  Args:
81
103
  calls: List of tool calls to execute
82
- timeout: Optional timeout for execution (overrides default_timeout)
104
+ timeout: Optional timeout for execution (overrides all defaults)
83
105
  use_cache: Whether to use cached results (for caching wrappers)
84
106
 
85
107
  Returns:
@@ -88,10 +110,13 @@ class ToolExecutor:
88
110
  if not calls:
89
111
  return []
90
112
 
91
- # Use the provided timeout or fall back to default
113
+ # Timeout precedence:
114
+ # 1. Explicit timeout parameter (highest priority)
115
+ # 2. Executor's default_timeout (which already considers strategy's timeout)
92
116
  effective_timeout = timeout if timeout is not None else self.default_timeout
93
117
 
94
- logger.debug(f"Executing {len(calls)} tool calls with timeout {effective_timeout}s")
118
+ logger.debug(f"Executing {len(calls)} tool calls with timeout {effective_timeout}s "
119
+ f"(explicit: {timeout is not None})")
95
120
 
96
121
  # Delegate to the strategy
97
122
  return await self.strategy.run(calls, timeout=effective_timeout)
@@ -118,9 +143,12 @@ class ToolExecutor:
118
143
  if not calls:
119
144
  return
120
145
 
121
- # Use the provided timeout or fall back to default
146
+ # Use the same timeout precedence as execute()
122
147
  effective_timeout = timeout if timeout is not None else self.default_timeout
123
148
 
149
+ logger.debug(f"Stream executing {len(calls)} tool calls with timeout {effective_timeout}s "
150
+ f"(explicit: {timeout is not None})")
151
+
124
152
  # There are two possible ways to handle streaming:
125
153
  # 1. Use the strategy's stream_run if available
126
154
  # 2. Use direct streaming for streaming tools
@@ -232,6 +260,8 @@ class ToolExecutor:
232
260
  machine = "direct-stream"
233
261
  pid = 0
234
262
 
263
+ logger.debug(f"Direct streaming {call.tool} with timeout {timeout}s")
264
+
235
265
  # Create streaming task with timeout
236
266
  async def stream_with_timeout():
237
267
  try:
@@ -265,11 +295,16 @@ class ToolExecutor:
265
295
  try:
266
296
  if timeout:
267
297
  await asyncio.wait_for(stream_with_timeout(), timeout)
298
+ logger.debug(f"Direct streaming {call.tool} completed within {timeout}s")
268
299
  else:
269
300
  await stream_with_timeout()
301
+ logger.debug(f"Direct streaming {call.tool} completed (no timeout)")
270
302
  except asyncio.TimeoutError:
271
303
  # Handle timeout
272
304
  end_time = datetime.now(timezone.utc)
305
+ actual_duration = (end_time - start_time).total_seconds()
306
+ logger.debug(f"Direct streaming {call.tool} timed out after {actual_duration:.3f}s (limit: {timeout}s)")
307
+
273
308
  timeout_result = ToolResult(
274
309
  tool=call.tool,
275
310
  result=None,
@@ -283,6 +318,8 @@ class ToolExecutor:
283
318
  except Exception as e:
284
319
  # Handle other errors
285
320
  end_time = datetime.now(timezone.utc)
321
+ logger.exception(f"Error in direct streaming {call.tool}: {e}")
322
+
286
323
  error_result = ToolResult(
287
324
  tool=call.tool,
288
325
  result=None,
@@ -300,5 +337,6 @@ class ToolExecutor:
300
337
 
301
338
  This should be called during application shutdown to ensure proper cleanup.
302
339
  """
340
+ logger.debug("Shutting down ToolExecutor")
303
341
  if hasattr(self.strategy, "shutdown") and callable(self.strategy.shutdown):
304
342
  await self.strategy.shutdown()
@@ -4,9 +4,9 @@ Async-native caching wrapper for tool execution.
4
4
 
5
5
  This module provides:
6
6
 
7
- * **CacheInterface** abstract async cache contract for custom implementations
8
- * **InMemoryCache** simple, thread-safe in-memory cache with TTL support
9
- * **CachingToolExecutor** executor wrapper that transparently caches results
7
+ * **CacheInterface** - abstract async cache contract for custom implementations
8
+ * **InMemoryCache** - simple, thread-safe in-memory cache with TTL support
9
+ * **CachingToolExecutor** - executor wrapper that transparently caches results
10
10
 
11
11
  Results retrieved from cache are marked with `cached=True` and `machine="cache"`
12
12
  for easy detection.
@@ -2,36 +2,31 @@
2
2
  """
3
3
  Async-native retry wrapper for tool execution.
4
4
 
5
- This module provides a retry mechanism for tool calls that can automatically
6
- retry failed executions based on configurable criteria and backoff strategies.
5
+ Adds exponential–back-off retry logic and *deadline-aware* timeout handling so a
6
+ `timeout=` passed by callers is treated as the **total wall-clock budget** for
7
+ all attempts of a single tool call.
7
8
  """
8
9
  from __future__ import annotations
9
10
 
10
11
  import asyncio
11
- import logging
12
12
  import random
13
+ import time
13
14
  from datetime import datetime, timezone
14
- from typing import Any, Dict, List, Optional, Type, Union
15
+ from typing import Any, Dict, List, Optional, Type
15
16
 
17
+ from chuk_tool_processor.logging import get_logger
16
18
  from chuk_tool_processor.models.tool_call import ToolCall
17
19
  from chuk_tool_processor.models.tool_result import ToolResult
18
- from chuk_tool_processor.logging import get_logger
19
20
 
20
21
  logger = get_logger("chuk_tool_processor.execution.wrappers.retry")
21
22
 
22
23
 
24
+ # --------------------------------------------------------------------------- #
25
+ # Retry configuration
26
+ # --------------------------------------------------------------------------- #
23
27
  class RetryConfig:
24
- """
25
- Configuration for retry behavior.
26
-
27
- Attributes:
28
- max_retries: Maximum number of retry attempts
29
- base_delay: Base delay between retries in seconds
30
- max_delay: Maximum delay between retries in seconds
31
- jitter: Whether to add random jitter to delays
32
- retry_on_exceptions: List of exception types to retry on
33
- retry_on_error_substrings: List of error message substrings to retry on
34
- """
28
+ """Configuration object that decides *whether* and *when* to retry."""
29
+
35
30
  def __init__(
36
31
  self,
37
32
  max_retries: int = 3,
@@ -39,248 +34,242 @@ class RetryConfig:
39
34
  max_delay: float = 60.0,
40
35
  jitter: bool = True,
41
36
  retry_on_exceptions: Optional[List[Type[Exception]]] = None,
42
- retry_on_error_substrings: Optional[List[str]] = None
37
+ retry_on_error_substrings: Optional[List[str]] = None,
43
38
  ):
39
+ if max_retries < 0:
40
+ raise ValueError("max_retries cannot be negative")
44
41
  self.max_retries = max_retries
45
42
  self.base_delay = base_delay
46
43
  self.max_delay = max_delay
47
44
  self.jitter = jitter
48
45
  self.retry_on_exceptions = retry_on_exceptions or []
49
46
  self.retry_on_error_substrings = retry_on_error_substrings or []
50
-
51
- def should_retry(self, attempt: int, error: Optional[Exception] = None, error_str: Optional[str] = None) -> bool:
52
- """
53
- Determine if a retry should be attempted.
54
-
55
- Args:
56
- attempt: Current attempt number (0-based)
57
- error: Exception that caused the failure, if any
58
- error_str: Error message string, if any
59
-
60
- Returns:
61
- True if a retry should be attempted, False otherwise
62
- """
47
+
48
+ # --------------------------------------------------------------------- #
49
+ # Decision helpers
50
+ # --------------------------------------------------------------------- #
51
+ def should_retry( # noqa: D401 (imperative mood is fine)
52
+ self,
53
+ attempt: int,
54
+ *,
55
+ error: Optional[Exception] = None,
56
+ error_str: Optional[str] = None,
57
+ ) -> bool:
58
+ """Return *True* iff another retry is allowed for this attempt."""
63
59
  if attempt >= self.max_retries:
64
60
  return False
61
+
62
+ # Nothing specified → always retry until max_retries reached
65
63
  if not self.retry_on_exceptions and not self.retry_on_error_substrings:
66
64
  return True
65
+
67
66
  if error is not None and any(isinstance(error, exc) for exc in self.retry_on_exceptions):
68
67
  return True
68
+
69
69
  if error_str and any(substr in error_str for substr in self.retry_on_error_substrings):
70
70
  return True
71
+
71
72
  return False
72
-
73
+
74
+ # --------------------------------------------------------------------- #
75
+ # Back-off
76
+ # --------------------------------------------------------------------- #
73
77
  def get_delay(self, attempt: int) -> float:
74
- """
75
- Calculate the delay for the current attempt with exponential backoff.
76
-
77
- Args:
78
- attempt: Current attempt number (0-based)
79
-
80
- Returns:
81
- Delay in seconds
82
- """
78
+ """Exponential back-off delay for *attempt* (0-based)."""
83
79
  delay = min(self.base_delay * (2 ** attempt), self.max_delay)
84
80
  if self.jitter:
85
- delay *= (0.5 + random.random())
81
+ delay *= 0.5 + random.random() # jitter in [0.5, 1.5)
86
82
  return delay
87
83
 
88
84
 
85
+ # --------------------------------------------------------------------------- #
86
+ # Retryable executor
87
+ # --------------------------------------------------------------------------- #
89
88
  class RetryableToolExecutor:
90
89
  """
91
- Wrapper for a tool executor that applies retry logic.
92
-
93
- This executor wraps another executor and automatically retries failed
94
- tool calls based on configured retry policies.
90
+ Wraps another executor and re-invokes it according to a :class:`RetryConfig`.
95
91
  """
92
+
96
93
  def __init__(
97
94
  self,
98
95
  executor: Any,
96
+ *,
99
97
  default_config: Optional[RetryConfig] = None,
100
- tool_configs: Optional[Dict[str, RetryConfig]] = None
98
+ tool_configs: Optional[Dict[str, RetryConfig]] = None,
101
99
  ):
102
- """
103
- Initialize the retryable executor.
104
-
105
- Args:
106
- executor: The underlying executor to wrap
107
- default_config: Default retry configuration for all tools
108
- tool_configs: Tool-specific retry configurations
109
- """
110
100
  self.executor = executor
111
101
  self.default_config = default_config or RetryConfig()
112
102
  self.tool_configs = tool_configs or {}
113
-
114
- def _get_config(self, tool: str) -> RetryConfig:
115
- """Get the retry configuration for a specific tool."""
103
+
104
+ # --------------------------------------------------------------------- #
105
+ # Public helpers
106
+ # --------------------------------------------------------------------- #
107
+ def _config_for(self, tool: str) -> RetryConfig:
116
108
  return self.tool_configs.get(tool, self.default_config)
117
-
109
+
118
110
  async def execute(
119
111
  self,
120
112
  calls: List[ToolCall],
113
+ *,
121
114
  timeout: Optional[float] = None,
122
- use_cache: bool = True
115
+ use_cache: bool = True,
123
116
  ) -> List[ToolResult]:
124
- """
125
- Execute tool calls with retry logic.
126
-
127
- Args:
128
- calls: List of tool calls to execute
129
- timeout: Optional timeout for each execution
130
- use_cache: Whether to use cached results (passed to underlying executor)
131
-
132
- Returns:
133
- List of tool results
134
- """
135
- # Handle empty calls list
136
117
  if not calls:
137
118
  return []
138
-
139
- # Execute each call with retries
140
- results: List[ToolResult] = []
119
+
120
+ out: List[ToolResult] = []
141
121
  for call in calls:
142
- config = self._get_config(call.tool)
143
- result = await self._execute_with_retry(call, config, timeout, use_cache)
144
- results.append(result)
145
- return results
146
-
147
- async def _execute_with_retry(
122
+ cfg = self._config_for(call.tool)
123
+ out.append(await self._execute_single(call, cfg, timeout, use_cache))
124
+ return out
125
+
126
+ # --------------------------------------------------------------------- #
127
+ # Core retry loop (per call)
128
+ # --------------------------------------------------------------------- #
129
+ async def _execute_single(
148
130
  self,
149
131
  call: ToolCall,
150
- config: RetryConfig,
132
+ cfg: RetryConfig,
151
133
  timeout: Optional[float],
152
- use_cache: bool
134
+ use_cache: bool,
153
135
  ) -> ToolResult:
154
- """
155
- Execute a single tool call with retries.
156
-
157
- Args:
158
- call: Tool call to execute
159
- config: Retry configuration to use
160
- timeout: Optional timeout for execution
161
- use_cache: Whether to use cached results
162
-
163
- Returns:
164
- Tool result after retries
165
- """
166
136
  attempt = 0
167
137
  last_error: Optional[str] = None
168
138
  pid = 0
169
139
  machine = "unknown"
170
-
140
+
141
+ # ---------------------------------------------------------------- #
142
+ # Deadline budget (wall-clock)
143
+ # ---------------------------------------------------------------- #
144
+ deadline = None
145
+ if timeout is not None:
146
+ deadline = time.monotonic() + timeout
147
+
171
148
  while True:
149
+ # ---------------------------------------------------------------- #
150
+ # Check whether we have any time left *before* trying the call
151
+ # ---------------------------------------------------------------- #
152
+ if deadline is not None:
153
+ remaining = deadline - time.monotonic()
154
+ if remaining <= 0:
155
+ return ToolResult(
156
+ tool=call.tool,
157
+ result=None,
158
+ error=f"Timeout after {timeout}s",
159
+ start_time=datetime.now(timezone.utc),
160
+ end_time=datetime.now(timezone.utc),
161
+ machine=machine,
162
+ pid=pid,
163
+ attempts=attempt,
164
+ )
165
+ else:
166
+ remaining = None # unlimited
167
+
168
+ # ---------------------------------------------------------------- #
169
+ # Execute one attempt
170
+ # ---------------------------------------------------------------- #
172
171
  start_time = datetime.now(timezone.utc)
173
-
174
172
  try:
175
- # Pass the use_cache parameter if the executor supports it
176
- executor_kwargs = {"timeout": timeout}
173
+ kwargs = {"timeout": remaining} if remaining is not None else {}
177
174
  if hasattr(self.executor, "use_cache"):
178
- executor_kwargs["use_cache"] = use_cache
179
-
180
- # Execute call
181
- tool_results = await self.executor.execute([call], **executor_kwargs)
182
- result = tool_results[0]
175
+ kwargs["use_cache"] = use_cache
176
+
177
+ result = (await self.executor.execute([call], **kwargs))[0]
183
178
  pid = result.pid
184
179
  machine = result.machine
185
-
186
- # Check for error in result
187
- if result.error:
188
- last_error = result.error
189
- if config.should_retry(attempt, error_str=result.error):
190
- logger.debug(
191
- f"Retrying tool {call.tool} after error: {result.error} (attempt {attempt + 1}/{config.max_retries})"
192
- )
193
- await asyncio.sleep(config.get_delay(attempt))
194
- attempt += 1
195
- continue
196
-
197
- # No retry: if any retries happened, wrap final error
198
- if attempt > 0:
199
- end_time = datetime.now(timezone.utc)
200
- final = ToolResult(
201
- tool=call.tool,
202
- result=None,
203
- error=f"Max retries reached ({config.max_retries}): {last_error}",
204
- start_time=start_time,
205
- end_time=end_time,
206
- machine=machine,
207
- pid=pid
208
- )
209
- # Attach attempts
210
- final.attempts = attempt + 1 # Include the original attempt
211
- return final
212
-
213
- # No retries occurred, return the original failure
214
- result.attempts = 1
180
+
181
+ # Success?
182
+ if not result.error:
183
+ result.attempts = attempt + 1
215
184
  return result
216
-
217
- # Success: attach attempts and return
218
- result.attempts = attempt + 1 # Include the original attempt
185
+
186
+ # Error: decide on retry
187
+ last_error = result.error
188
+ if cfg.should_retry(attempt, error_str=result.error):
189
+ delay = cfg.get_delay(attempt)
190
+ # never overshoot the deadline
191
+ if deadline is not None:
192
+ delay = min(delay, max(deadline - time.monotonic(), 0))
193
+ if delay:
194
+ await asyncio.sleep(delay)
195
+ attempt += 1
196
+ continue
197
+
198
+ # No more retries wanted
199
+ result.error = self._wrap_error(last_error, attempt, cfg)
200
+ result.attempts = attempt + 1
219
201
  return result
220
-
221
- except Exception as e:
222
- err_str = str(e)
202
+
203
+ # ---------------------------------------------------------------- #
204
+ # Exception path
205
+ # ---------------------------------------------------------------- #
206
+ except Exception as exc: # noqa: BLE001
207
+ err_str = str(exc)
223
208
  last_error = err_str
224
-
225
- if config.should_retry(attempt, error=e):
226
- logger.info(
227
- f"Retrying tool {call.tool} after exception: {err_str} (attempt {attempt + 1}/{config.max_retries})"
228
- )
229
- await asyncio.sleep(config.get_delay(attempt))
209
+ if cfg.should_retry(attempt, error=exc):
210
+ delay = cfg.get_delay(attempt)
211
+ if deadline is not None:
212
+ delay = min(delay, max(deadline - time.monotonic(), 0))
213
+ if delay:
214
+ await asyncio.sleep(delay)
230
215
  attempt += 1
231
216
  continue
232
-
233
- # No more retries: return error result
217
+
234
218
  end_time = datetime.now(timezone.utc)
235
- final_exc = ToolResult(
219
+ return ToolResult(
236
220
  tool=call.tool,
237
221
  result=None,
238
- error=err_str,
222
+ error=self._wrap_error(err_str, attempt, cfg),
239
223
  start_time=start_time,
240
224
  end_time=end_time,
241
225
  machine=machine,
242
- pid=pid
226
+ pid=pid,
227
+ attempts=attempt + 1,
243
228
  )
244
- final_exc.attempts = attempt + 1 # Include the original attempt
245
- return final_exc
246
229
 
230
+ # --------------------------------------------------------------------- #
231
+ # Helpers
232
+ # --------------------------------------------------------------------- #
233
+ @staticmethod
234
+ def _wrap_error(err: str, attempt: int, cfg: RetryConfig) -> str:
235
+ if attempt >= cfg.max_retries and attempt > 0:
236
+ return f"Max retries reached ({cfg.max_retries}): {err}"
237
+ return err
247
238
 
239
+
240
+ # --------------------------------------------------------------------------- #
241
+ # Decorator helper
242
+ # --------------------------------------------------------------------------- #
248
243
  def retryable(
244
+ *,
249
245
  max_retries: int = 3,
250
246
  base_delay: float = 1.0,
251
247
  max_delay: float = 60.0,
252
248
  jitter: bool = True,
253
249
  retry_on_exceptions: Optional[List[Type[Exception]]] = None,
254
- retry_on_error_substrings: Optional[List[str]] = None
250
+ retry_on_error_substrings: Optional[List[str]] = None,
255
251
  ):
256
252
  """
257
- Decorator for tool classes to configure retry behavior.
258
-
259
- Example:
260
- @retryable(max_retries=5, base_delay=2.0)
261
- class MyTool:
262
- async def execute(self, x: int, y: int) -> int:
263
- return x + y
264
-
265
- Args:
266
- max_retries: Maximum number of retry attempts
267
- base_delay: Base delay between retries in seconds
268
- max_delay: Maximum delay between retries in seconds
269
- jitter: Whether to add random jitter to delays
270
- retry_on_exceptions: List of exception types to retry on
271
- retry_on_error_substrings: List of error message substrings to retry on
272
-
273
- Returns:
274
- Decorated class with retry configuration
253
+ Class decorator that attaches a :class:`RetryConfig` to a *tool* class.
254
+
255
+ Example
256
+ -------
257
+ ```python
258
+ @retryable(max_retries=5, base_delay=0.5)
259
+ class MyTool:
260
+ ...
261
+ ```
275
262
  """
276
- def decorator(cls):
263
+
264
+ def _decorator(cls):
277
265
  cls._retry_config = RetryConfig(
278
266
  max_retries=max_retries,
279
267
  base_delay=base_delay,
280
268
  max_delay=max_delay,
281
269
  jitter=jitter,
282
270
  retry_on_exceptions=retry_on_exceptions,
283
- retry_on_error_substrings=retry_on_error_substrings
271
+ retry_on_error_substrings=retry_on_error_substrings,
284
272
  )
285
273
  return cls
286
- return decorator
274
+
275
+ return _decorator
@@ -4,12 +4,12 @@ Async-safe context management for structured logging.
4
4
 
5
5
  This module provides:
6
6
 
7
- * **LogContext** an `asyncio`-aware container that keeps a per-task dict of
7
+ * **LogContext** - an `asyncio`-aware container that keeps a per-task dict of
8
8
  contextual data (request IDs, span IDs, arbitrary metadata, …).
9
- * **log_context** a global instance of `LogContext` for convenience.
10
- * **StructuredAdapter** a `logging.LoggerAdapter` that injects the current
9
+ * **log_context** - a global instance of `LogContext` for convenience.
10
+ * **StructuredAdapter** - a `logging.LoggerAdapter` that injects the current
11
11
  `log_context.context` into every log record.
12
- * **get_logger** helper that returns a configured `StructuredAdapter`.
12
+ * **get_logger** - helper that returns a configured `StructuredAdapter`.
13
13
  """
14
14
 
15
15
  from __future__ import annotations
@@ -75,7 +75,7 @@ class LogContext:
75
75
  Async-safe context container.
76
76
 
77
77
  Holds a mutable dict that is *local* to the current asyncio task, so
78
- concurrent coroutines dont interfere with each other.
78
+ concurrent coroutines don't interfere with each other.
79
79
  """
80
80
 
81
81
  # ------------------------------------------------------------------ #
@@ -196,7 +196,7 @@ class StructuredAdapter(logging.LoggerAdapter):
196
196
  """
197
197
 
198
198
  # --------------------------- core hook -------------------------------- #
199
- def process(self, msg, kwargs): # noqa: D401 keep signature from base
199
+ def process(self, msg, kwargs): # noqa: D401 - keep signature from base
200
200
  kwargs = kwargs or {}
201
201
  extra = kwargs.get("extra", {}).copy()
202
202
  ctx = log_context.context