chuk-tool-processor 0.1.7__py3-none-any.whl → 0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

@@ -1,11 +1,13 @@
1
1
  #!/usr/bin/env python
2
2
  # chuk_tool_processor/execution/strategies/inprocess_strategy.py
3
3
  """
4
- In-process execution strategy for tools with true streaming support.
4
+ In-process execution strategy for tools with proper timeout handling.
5
5
 
6
6
  This strategy executes tools concurrently in the same process using asyncio.
7
7
  It has special support for streaming tools, accessing their stream_execute method
8
8
  directly to enable true item-by-item streaming.
9
+
10
+ FIXED: Ensures consistent timeout handling across all execution paths.
9
11
  """
10
12
  from __future__ import annotations
11
13
 
@@ -36,7 +38,7 @@ async def _noop_cm():
36
38
 
37
39
  # --------------------------------------------------------------------------- #
38
40
  class InProcessStrategy(ExecutionStrategy):
39
- """Execute tools in the local event-loop with optional concurrency cap."""
41
+ """Execute tools in the local event-loop with optional concurrency cap and consistent timeout handling."""
40
42
 
41
43
  def __init__(
42
44
  self,
@@ -53,7 +55,7 @@ class InProcessStrategy(ExecutionStrategy):
53
55
  max_concurrency: Maximum number of concurrent executions
54
56
  """
55
57
  self.registry = registry
56
- self.default_timeout = default_timeout
58
+ self.default_timeout = default_timeout or 30.0 # Always have a default
57
59
  self._sem = asyncio.Semaphore(max_concurrency) if max_concurrency else None
58
60
 
59
61
  # Task tracking for cleanup
@@ -64,6 +66,9 @@ class InProcessStrategy(ExecutionStrategy):
64
66
  # Tracking for which calls are being handled directly by the executor
65
67
  # to prevent duplicate streaming results
66
68
  self._direct_streaming_calls = set()
69
+
70
+ logger.debug("InProcessStrategy initialized with timeout: %ss, max_concurrency: %s",
71
+ self.default_timeout, max_concurrency)
67
72
 
68
73
  # ------------------------------------------------------------------ #
69
74
  def mark_direct_streaming(self, call_ids: Set[str]) -> None:
@@ -116,11 +121,15 @@ class InProcessStrategy(ExecutionStrategy):
116
121
  """
117
122
  if not calls:
118
123
  return []
124
+
125
+ # Use default_timeout if no timeout specified
126
+ effective_timeout = timeout if timeout is not None else self.default_timeout
127
+ logger.debug("Executing %d calls with %ss timeout each", len(calls), effective_timeout)
119
128
 
120
129
  tasks = []
121
130
  for call in calls:
122
131
  task = asyncio.create_task(
123
- self._execute_single_call(call, timeout or self.default_timeout)
132
+ self._execute_single_call(call, effective_timeout) # Always pass timeout
124
133
  )
125
134
  self._active_tasks.add(task)
126
135
  task.add_done_callback(self._active_tasks.discard)
@@ -142,10 +151,13 @@ class InProcessStrategy(ExecutionStrategy):
142
151
  if not calls:
143
152
  return
144
153
 
154
+ # Use default_timeout if no timeout specified
155
+ effective_timeout = timeout if timeout is not None else self.default_timeout
156
+
145
157
  queue: asyncio.Queue[ToolResult] = asyncio.Queue()
146
158
  tasks = {
147
159
  asyncio.create_task(
148
- self._stream_tool_call(call, queue, timeout or self.default_timeout)
160
+ self._stream_tool_call(call, queue, effective_timeout) # Always pass timeout
149
161
  )
150
162
  for call in calls
151
163
  if call.id not in self._direct_streaming_calls
@@ -170,7 +182,7 @@ class InProcessStrategy(ExecutionStrategy):
170
182
  self,
171
183
  call: ToolCall,
172
184
  queue: asyncio.Queue,
173
- timeout: Optional[float],
185
+ timeout: float, # Make timeout required
174
186
  ) -> None:
175
187
  """
176
188
  Execute a tool call with streaming support.
@@ -181,7 +193,7 @@ class InProcessStrategy(ExecutionStrategy):
181
193
  Args:
182
194
  call: The tool call to execute
183
195
  queue: Queue to put results into
184
- timeout: Optional timeout in seconds
196
+ timeout: Timeout in seconds (required)
185
197
  """
186
198
  # Skip if call is being handled directly by the executor
187
199
  if call.id in self._direct_streaming_calls:
@@ -269,7 +281,7 @@ class InProcessStrategy(ExecutionStrategy):
269
281
  tool: Any,
270
282
  call: ToolCall,
271
283
  queue: asyncio.Queue,
272
- timeout: Optional[float]
284
+ timeout: float, # Make timeout required
273
285
  ) -> None:
274
286
  """
275
287
  Stream results from a streaming tool with timeout support.
@@ -281,12 +293,14 @@ class InProcessStrategy(ExecutionStrategy):
281
293
  tool: The tool instance
282
294
  call: Tool call data
283
295
  queue: Queue to put results into
284
- timeout: Optional timeout in seconds
296
+ timeout: Timeout in seconds (required)
285
297
  """
286
298
  start_time = datetime.now(timezone.utc)
287
299
  machine = os.uname().nodename
288
300
  pid = os.getpid()
289
301
 
302
+ logger.debug("Streaming %s with %ss timeout", call.tool, timeout)
303
+
290
304
  # Define the streaming task
291
305
  async def streamer():
292
306
  try:
@@ -318,15 +332,17 @@ class InProcessStrategy(ExecutionStrategy):
318
332
  await queue.put(error_result)
319
333
 
320
334
  try:
321
- # Execute with timeout if specified
322
- if timeout:
323
- await asyncio.wait_for(streamer(), timeout)
324
- else:
325
- await streamer()
335
+ # Always execute with timeout
336
+ await asyncio.wait_for(streamer(), timeout)
337
+ logger.debug("%s streaming completed within %ss", call.tool, timeout)
326
338
 
327
339
  except asyncio.TimeoutError:
328
340
  # Handle timeout
329
341
  now = datetime.now(timezone.utc)
342
+ actual_duration = (now - start_time).total_seconds()
343
+ logger.debug("%s streaming timed out after %.3fs (limit: %ss)",
344
+ call.tool, actual_duration, timeout)
345
+
330
346
  timeout_result = ToolResult(
331
347
  tool=call.tool,
332
348
  result=None,
@@ -341,6 +357,8 @@ class InProcessStrategy(ExecutionStrategy):
341
357
  except Exception as e:
342
358
  # Handle other errors
343
359
  now = datetime.now(timezone.utc)
360
+ logger.debug("%s streaming failed: %s", call.tool, e)
361
+
344
362
  error_result = ToolResult(
345
363
  tool=call.tool,
346
364
  result=None,
@@ -356,7 +374,7 @@ class InProcessStrategy(ExecutionStrategy):
356
374
  self,
357
375
  call: ToolCall,
358
376
  queue: asyncio.Queue,
359
- timeout: Optional[float],
377
+ timeout: float, # Make timeout required
360
378
  ) -> None:
361
379
  """Execute a single call and put the result in the queue."""
362
380
  # Skip if call is being handled directly by the executor
@@ -370,17 +388,17 @@ class InProcessStrategy(ExecutionStrategy):
370
388
  async def _execute_single_call(
371
389
  self,
372
390
  call: ToolCall,
373
- timeout: Optional[float],
391
+ timeout: float, # Make timeout required, not optional
374
392
  ) -> ToolResult:
375
393
  """
376
- Execute a single tool call.
394
+ Execute a single tool call with guaranteed timeout.
377
395
 
378
396
  The entire invocation – including argument validation – is wrapped
379
397
  by the semaphore to honour *max_concurrency*.
380
398
 
381
399
  Args:
382
400
  call: Tool call to execute
383
- timeout: Optional timeout in seconds
401
+ timeout: Timeout in seconds (required)
384
402
 
385
403
  Returns:
386
404
  Tool execution result
@@ -389,6 +407,8 @@ class InProcessStrategy(ExecutionStrategy):
389
407
  machine = os.uname().nodename
390
408
  start = datetime.now(timezone.utc)
391
409
 
410
+ logger.debug("Executing %s with %ss timeout", call.tool, timeout)
411
+
392
412
  # Early exit if shutting down
393
413
  if self._shutting_down:
394
414
  return ToolResult(
@@ -464,19 +484,18 @@ class InProcessStrategy(ExecutionStrategy):
464
484
  self,
465
485
  tool: Any,
466
486
  call: ToolCall,
467
- timeout: float | None,
487
+ timeout: float, # Make timeout required, not optional
468
488
  start: datetime,
469
489
  machine: str,
470
490
  pid: int,
471
491
  ) -> ToolResult:
472
492
  """
473
- Resolve the correct async entry-point and invoke it with an optional
474
- timeout.
493
+ Resolve the correct async entry-point and invoke it with a guaranteed timeout.
475
494
 
476
495
  Args:
477
496
  tool: Tool instance
478
497
  call: Tool call data
479
- timeout: Optional timeout in seconds
498
+ timeout: Timeout in seconds (required)
480
499
  start: Start time for the execution
481
500
  machine: Machine name
482
501
  pid: Process ID
@@ -507,62 +526,46 @@ class InProcessStrategy(ExecutionStrategy):
507
526
  )
508
527
 
509
528
  try:
510
- if timeout:
511
- # Use a task with explicit cancellation
512
- task = asyncio.create_task(fn(**call.arguments))
529
+ # Always apply timeout
530
+ logger.debug("Applying %ss timeout to %s", timeout, call.tool)
531
+
532
+ try:
533
+ result_val = await asyncio.wait_for(fn(**call.arguments), timeout=timeout)
534
+
535
+ end_time = datetime.now(timezone.utc)
536
+ actual_duration = (end_time - start).total_seconds()
537
+ logger.debug("%s completed in %.3fs (limit: %ss)",
538
+ call.tool, actual_duration, timeout)
513
539
 
514
- try:
515
- # Wait for the task with timeout
516
- result_val = await asyncio.wait_for(task, timeout)
517
-
518
- return ToolResult(
519
- tool=call.tool,
520
- result=result_val,
521
- error=None,
522
- start_time=start,
523
- end_time=datetime.now(timezone.utc),
524
- machine=machine,
525
- pid=pid,
526
- )
527
- except asyncio.TimeoutError:
528
- # Cancel the task if it times out
529
- if not task.done():
530
- task.cancel()
531
-
532
- # Wait for cancellation to complete
533
- try:
534
- await task
535
- except asyncio.CancelledError:
536
- # Expected - we just cancelled it
537
- pass
538
- except Exception:
539
- # Ignore any other exceptions during cancellation
540
- pass
541
-
542
- # Return a timeout error
543
- return ToolResult(
544
- tool=call.tool,
545
- result=None,
546
- error=f"Timeout after {timeout}s",
547
- start_time=start,
548
- end_time=datetime.now(timezone.utc),
549
- machine=machine,
550
- pid=pid,
551
- )
552
- else:
553
- # No timeout
554
- result_val = await fn(**call.arguments)
555
540
  return ToolResult(
556
541
  tool=call.tool,
557
542
  result=result_val,
558
543
  error=None,
559
544
  start_time=start,
560
- end_time=datetime.now(timezone.utc),
545
+ end_time=end_time,
561
546
  machine=machine,
562
547
  pid=pid,
563
548
  )
549
+ except asyncio.TimeoutError:
550
+ # Handle timeout
551
+ end_time = datetime.now(timezone.utc)
552
+ actual_duration = (end_time - start).total_seconds()
553
+ logger.debug("%s timed out after %.3fs (limit: %ss)",
554
+ call.tool, actual_duration, timeout)
555
+
556
+ return ToolResult(
557
+ tool=call.tool,
558
+ result=None,
559
+ error=f"Timeout after {timeout}s",
560
+ start_time=start,
561
+ end_time=end_time,
562
+ machine=machine,
563
+ pid=pid,
564
+ )
565
+
564
566
  except asyncio.CancelledError:
565
567
  # Handle cancellation explicitly
568
+ logger.debug("%s was cancelled", call.tool)
566
569
  return ToolResult(
567
570
  tool=call.tool,
568
571
  result=None,
@@ -574,12 +577,16 @@ class InProcessStrategy(ExecutionStrategy):
574
577
  )
575
578
  except Exception as exc:
576
579
  logger.exception("Error executing %s: %s", call.tool, exc)
580
+ end_time = datetime.now(timezone.utc)
581
+ actual_duration = (end_time - start).total_seconds()
582
+ logger.debug("%s failed after %.3fs: %s", call.tool, actual_duration, exc)
583
+
577
584
  return ToolResult(
578
585
  tool=call.tool,
579
586
  result=None,
580
587
  error=str(exc),
581
588
  start_time=start,
582
- end_time=datetime.now(timezone.utc),
589
+ end_time=end_time,
583
590
  machine=machine,
584
591
  pid=pid,
585
592
  )
@@ -4,6 +4,8 @@ Subprocess execution strategy - truly runs tools in separate OS processes.
4
4
 
5
5
  This strategy executes tools in separate Python processes using a process pool,
6
6
  providing isolation and potentially better parallelism on multi-core systems.
7
+
8
+ FIXED: Ensures consistent timeout handling across all execution paths.
7
9
  """
8
10
  from __future__ import annotations
9
11
 
@@ -133,7 +135,7 @@ def _process_worker(
133
135
 
134
136
  try:
135
137
  # Execute the tool with timeout
136
- if timeout:
138
+ if timeout is not None and timeout > 0:
137
139
  result_value = loop.run_until_complete(
138
140
  asyncio.wait_for(execute_fn(**arguments), timeout)
139
141
  )
@@ -192,7 +194,7 @@ class SubprocessStrategy(ExecutionStrategy):
192
194
  """
193
195
  self.registry = registry
194
196
  self.max_workers = max_workers
195
- self.default_timeout = default_timeout
197
+ self.default_timeout = default_timeout or 30.0 # Always have a default
196
198
  self.worker_init_timeout = worker_init_timeout
197
199
 
198
200
  # Process pool (initialized lazily)
@@ -204,6 +206,9 @@ class SubprocessStrategy(ExecutionStrategy):
204
206
  self._shutdown_event = asyncio.Event()
205
207
  self._shutting_down = False
206
208
 
209
+ logger.debug("SubprocessStrategy initialized with timeout: %ss, max_workers: %d",
210
+ self.default_timeout, max_workers)
211
+
207
212
  # Register shutdown handler if in main thread
208
213
  try:
209
214
  loop = asyncio.get_running_loop()
@@ -238,12 +243,12 @@ class SubprocessStrategy(ExecutionStrategy):
238
243
  loop.run_in_executor(self._process_pool, _pool_test_func),
239
244
  timeout=self.worker_init_timeout
240
245
  )
241
- logger.info(f"Process pool initialized with {self.max_workers} workers")
246
+ logger.info("Process pool initialized with %d workers", self.max_workers)
242
247
  except Exception as e:
243
248
  # Clean up on initialization error
244
249
  self._process_pool.shutdown(wait=False)
245
250
  self._process_pool = None
246
- logger.error(f"Failed to initialize process pool: {e}")
251
+ logger.error("Failed to initialize process pool: %s", e)
247
252
  raise RuntimeError(f"Failed to initialize process pool: {e}") from e
248
253
 
249
254
  # ------------------------------------------------------------------ #
@@ -296,12 +301,16 @@ class SubprocessStrategy(ExecutionStrategy):
296
301
  )
297
302
  for call in calls
298
303
  ]
304
+
305
+ # Use default_timeout if no timeout specified
306
+ effective_timeout = timeout if timeout is not None else self.default_timeout
307
+ logger.debug("Executing %d calls in subprocesses with %ss timeout each", len(calls), effective_timeout)
299
308
 
300
309
  # Create tasks for each call
301
310
  tasks = []
302
311
  for call in calls:
303
312
  task = asyncio.create_task(self._execute_single_call(
304
- call, timeout or self.default_timeout
313
+ call, effective_timeout # Always pass concrete timeout
305
314
  ))
306
315
  self._active_tasks.add(task)
307
316
  task.add_done_callback(self._active_tasks.discard)
@@ -342,6 +351,9 @@ class SubprocessStrategy(ExecutionStrategy):
342
351
  pid=os.getpid(),
343
352
  )
344
353
  return
354
+
355
+ # Use default_timeout if no timeout specified
356
+ effective_timeout = timeout if timeout is not None else self.default_timeout
345
357
 
346
358
  # Create a queue for results
347
359
  queue = asyncio.Queue()
@@ -350,7 +362,7 @@ class SubprocessStrategy(ExecutionStrategy):
350
362
  pending = set()
351
363
  for call in calls:
352
364
  task = asyncio.create_task(self._execute_to_queue(
353
- call, queue, timeout or self.default_timeout
365
+ call, queue, effective_timeout # Always pass concrete timeout
354
366
  ))
355
367
  self._active_tasks.add(task)
356
368
  task.add_done_callback(self._active_tasks.discard)
@@ -372,13 +384,13 @@ class SubprocessStrategy(ExecutionStrategy):
372
384
  try:
373
385
  await task
374
386
  except Exception as e:
375
- logger.exception(f"Error in task: {e}")
387
+ logger.exception("Error in task: %s", e)
376
388
 
377
389
  async def _execute_to_queue(
378
390
  self,
379
391
  call: ToolCall,
380
392
  queue: asyncio.Queue,
381
- timeout: Optional[float],
393
+ timeout: float, # Make timeout required
382
394
  ) -> None:
383
395
  """Execute a single call and put the result in the queue."""
384
396
  result = await self._execute_single_call(call, timeout)
@@ -387,20 +399,22 @@ class SubprocessStrategy(ExecutionStrategy):
387
399
  async def _execute_single_call(
388
400
  self,
389
401
  call: ToolCall,
390
- timeout: Optional[float],
402
+ timeout: float, # Make timeout required
391
403
  ) -> ToolResult:
392
404
  """
393
405
  Execute a single tool call in a separate process.
394
406
 
395
407
  Args:
396
408
  call: Tool call to execute
397
- timeout: Optional timeout in seconds
409
+ timeout: Timeout in seconds (required)
398
410
 
399
411
  Returns:
400
412
  Tool execution result
401
413
  """
402
414
  start_time = datetime.now(timezone.utc)
403
415
 
416
+ logger.debug("Executing %s in subprocess with %ss timeout", call.tool, timeout)
417
+
404
418
  try:
405
419
  # Ensure pool is initialized
406
420
  await self._ensure_pool()
@@ -429,8 +443,8 @@ class SubprocessStrategy(ExecutionStrategy):
429
443
  # Execute in subprocess
430
444
  loop = asyncio.get_running_loop()
431
445
 
432
- # We need to add safety timeout here to handle process crashes
433
- safety_timeout = (timeout or self.default_timeout or 60.0) + 5.0
446
+ # Add safety timeout to handle process crashes (tool timeout + buffer)
447
+ safety_timeout = timeout + 5.0
434
448
 
435
449
  try:
436
450
  result_data = await asyncio.wait_for(
@@ -443,7 +457,7 @@ class SubprocessStrategy(ExecutionStrategy):
443
457
  module_name,
444
458
  class_name,
445
459
  call.arguments,
446
- timeout
460
+ timeout # Pass the actual timeout to worker
447
461
  )
448
462
  ),
449
463
  timeout=safety_timeout
@@ -458,25 +472,40 @@ class SubprocessStrategy(ExecutionStrategy):
458
472
  end_time_str = result_data["end_time"]
459
473
  result_data["end_time"] = datetime.fromisoformat(end_time_str)
460
474
 
475
+ end_time = datetime.now(timezone.utc)
476
+ actual_duration = (end_time - start_time).total_seconds()
477
+
478
+ if result_data.get("error"):
479
+ logger.debug("%s subprocess failed after %.3fs: %s",
480
+ call.tool, actual_duration, result_data["error"])
481
+ else:
482
+ logger.debug("%s subprocess completed in %.3fs (limit: %ss)",
483
+ call.tool, actual_duration, timeout)
484
+
461
485
  # Create ToolResult from worker data
462
486
  return ToolResult(
463
487
  tool=result_data.get("tool", call.tool),
464
488
  result=result_data.get("result"),
465
489
  error=result_data.get("error"),
466
490
  start_time=result_data.get("start_time", start_time),
467
- end_time=result_data.get("end_time", datetime.now(timezone.utc)),
491
+ end_time=result_data.get("end_time", end_time),
468
492
  machine=result_data.get("machine", os.uname().nodename),
469
493
  pid=result_data.get("pid", os.getpid()),
470
494
  )
471
495
 
472
496
  except asyncio.TimeoutError:
473
497
  # This happens if the worker process itself hangs
498
+ end_time = datetime.now(timezone.utc)
499
+ actual_duration = (end_time - start_time).total_seconds()
500
+ logger.debug("%s subprocess timed out after %.3fs (safety limit: %ss)",
501
+ call.tool, actual_duration, safety_timeout)
502
+
474
503
  return ToolResult(
475
504
  tool=call.tool,
476
505
  result=None,
477
506
  error=f"Worker process timed out after {safety_timeout}s",
478
507
  start_time=start_time,
479
- end_time=datetime.now(timezone.utc),
508
+ end_time=end_time,
480
509
  machine=os.uname().nodename,
481
510
  pid=os.getpid(),
482
511
  )
@@ -500,6 +529,7 @@ class SubprocessStrategy(ExecutionStrategy):
500
529
 
501
530
  except asyncio.CancelledError:
502
531
  # Handle cancellation
532
+ logger.debug("%s subprocess was cancelled", call.tool)
503
533
  return ToolResult(
504
534
  tool=call.tool,
505
535
  result=None,
@@ -512,13 +542,18 @@ class SubprocessStrategy(ExecutionStrategy):
512
542
 
513
543
  except Exception as e:
514
544
  # Handle any other errors
515
- logger.exception(f"Error executing {call.tool} in subprocess: {e}")
545
+ logger.exception("Error executing %s in subprocess: %s", call.tool, e)
546
+ end_time = datetime.now(timezone.utc)
547
+ actual_duration = (end_time - start_time).total_seconds()
548
+ logger.debug("%s subprocess setup failed after %.3fs: %s",
549
+ call.tool, actual_duration, e)
550
+
516
551
  return ToolResult(
517
552
  tool=call.tool,
518
553
  result=None,
519
554
  error=f"Error: {str(e)}",
520
555
  start_time=start_time,
521
- end_time=datetime.now(timezone.utc),
556
+ end_time=end_time,
522
557
  machine=os.uname().nodename,
523
558
  pid=os.getpid(),
524
559
  )
@@ -531,7 +566,7 @@ class SubprocessStrategy(ExecutionStrategy):
531
566
  async def _signal_handler(self, sig: int) -> None:
532
567
  """Handle termination signals."""
533
568
  signame = signal.Signals(sig).name
534
- logger.info(f"Received {signame}, shutting down process pool")
569
+ logger.info("Received %s, shutting down process pool", signame)
535
570
  await self.shutdown()
536
571
 
537
572
  async def shutdown(self) -> None:
@@ -549,7 +584,7 @@ class SubprocessStrategy(ExecutionStrategy):
549
584
  # Cancel all active tasks
550
585
  active_tasks = list(self._active_tasks)
551
586
  if active_tasks:
552
- logger.info(f"Cancelling {len(active_tasks)} active tool executions")
587
+ logger.info("Cancelling %d active tool executions", len(active_tasks))
553
588
  for task in active_tasks:
554
589
  task.cancel()
555
590
 
@@ -36,9 +36,11 @@ class MCPTool:
36
36
  servers: Optional[List[str]] = None,
37
37
  server_names: Optional[Dict[int, str]] = None,
38
38
  namespace: str = "stdio",
39
+ default_timeout: Optional[float] = None, # Add default timeout support
39
40
  ) -> None:
40
41
  self.tool_name = tool_name
41
42
  self._sm: Optional[StreamManager] = stream_manager
43
+ self.default_timeout = default_timeout or 30.0 # Default to 30s if not specified
42
44
 
43
45
  # Boot-strap parameters (only needed if _sm is None)
44
46
  self._cfg_file = cfg_file
@@ -78,21 +80,56 @@ class MCPTool:
78
80
  return self._sm # type: ignore[return-value]
79
81
 
80
82
  # ------------------------------------------------------------------ #
81
- async def execute(self, **kwargs: Any) -> Any:
83
+ async def execute(self, timeout: Optional[float] = None, **kwargs: Any) -> Any:
82
84
  """
83
- Forward the call to the remote MCP tool.
85
+ Forward the call to the remote MCP tool with timeout support.
86
+
87
+ Args:
88
+ timeout: Optional timeout for this specific call. If not provided,
89
+ uses the instance's default_timeout.
90
+ **kwargs: Arguments to pass to the MCP tool.
91
+
92
+ Returns:
93
+ The result from the MCP tool call.
84
94
 
85
95
  Raises
86
96
  ------
87
97
  RuntimeError
88
98
  If the server returns an error payload.
99
+ asyncio.TimeoutError
100
+ If the call times out.
89
101
  """
90
102
  sm = await self._ensure_stream_manager()
91
- result = await sm.call_tool(tool_name=self.tool_name, arguments=kwargs)
103
+
104
+ # Use provided timeout, fall back to instance default, then global default
105
+ effective_timeout = timeout if timeout is not None else self.default_timeout
106
+
107
+ logger.debug("Calling MCP tool '%s' with timeout: %ss", self.tool_name, effective_timeout)
108
+
109
+ try:
110
+ # Pass timeout directly to StreamManager instead of wrapping with wait_for
111
+ result = await sm.call_tool(
112
+ tool_name=self.tool_name,
113
+ arguments=kwargs,
114
+ timeout=effective_timeout
115
+ )
116
+
117
+ if result.get("isError"):
118
+ err = result.get("error", "Unknown error")
119
+ logger.error("Remote MCP error from '%s': %s", self.tool_name, err)
120
+ raise RuntimeError(err)
121
+
122
+ return result.get("content")
123
+
124
+ except asyncio.TimeoutError:
125
+ logger.warning("MCP tool '%s' timed out after %ss", self.tool_name, effective_timeout)
126
+ raise
127
+ except Exception as e:
128
+ logger.error("Error calling MCP tool '%s': %s", self.tool_name, e)
129
+ raise
92
130
 
93
- if result.get("isError"):
94
- err = result.get("error", "Unknown error")
95
- logger.error("Remote MCP error from '%s': %s", self.tool_name, err)
96
- raise RuntimeError(err)
97
-
98
- return result.get("content")
131
+ # ------------------------------------------------------------------ #
132
+ # Legacy method name support
133
+ async def _aexecute(self, timeout: Optional[float] = None, **kwargs: Any) -> Any:
134
+ """Legacy alias for execute() method."""
135
+ return await self.execute(timeout=timeout, **kwargs)
@@ -14,6 +14,7 @@ Utility that wires up:
14
14
 
15
15
  from __future__ import annotations
16
16
 
17
+ import os
17
18
  from typing import Dict, List, Optional, Tuple
18
19
 
19
20
  from chuk_tool_processor.core.processor import ToolProcessor
@@ -47,7 +48,26 @@ async def setup_mcp_sse( # noqa: C901 – long, but just a config wrapper
47
48
  and return a ready-to-go :class:`ToolProcessor`.
48
49
 
49
50
  Everything is **async-native** – call with ``await``.
51
+
52
+ NEW: Automatically detects and adds bearer token from MCP_BEARER_TOKEN
53
+ environment variable if not explicitly provided in server config.
50
54
  """
55
+
56
+ # NEW: Auto-detect and add bearer token to servers if available
57
+ bearer_token = os.getenv("MCP_BEARER_TOKEN")
58
+ if bearer_token:
59
+ logger.info("Found MCP_BEARER_TOKEN environment variable, adding to server configs")
60
+
61
+ # Add api_key to servers that don't already have it
62
+ enhanced_servers = []
63
+ for server in servers:
64
+ enhanced_server = dict(server) # Make a copy
65
+ if "api_key" not in enhanced_server and bearer_token:
66
+ enhanced_server["api_key"] = bearer_token
67
+ logger.info("Added bearer token to server: %s", enhanced_server.get("name", "unnamed"))
68
+ enhanced_servers.append(enhanced_server)
69
+ servers = enhanced_servers
70
+
51
71
  # 1️⃣ connect to the remote MCP servers
52
72
  stream_manager = await StreamManager.create_with_sse(
53
73
  servers=servers,
@@ -76,4 +96,4 @@ async def setup_mcp_sse( # noqa: C901 – long, but just a config wrapper
76
96
  "" if len(registered) == 1 else "s",
77
97
  namespace,
78
98
  )
79
- return processor, stream_manager
99
+ return processor, stream_manager