chuk-tool-processor 0.6.12__py3-none-any.whl → 0.6.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

Files changed (56) hide show
  1. chuk_tool_processor/core/__init__.py +1 -1
  2. chuk_tool_processor/core/exceptions.py +10 -4
  3. chuk_tool_processor/core/processor.py +97 -97
  4. chuk_tool_processor/execution/strategies/inprocess_strategy.py +142 -150
  5. chuk_tool_processor/execution/strategies/subprocess_strategy.py +200 -205
  6. chuk_tool_processor/execution/tool_executor.py +82 -84
  7. chuk_tool_processor/execution/wrappers/caching.py +102 -103
  8. chuk_tool_processor/execution/wrappers/rate_limiting.py +45 -42
  9. chuk_tool_processor/execution/wrappers/retry.py +23 -25
  10. chuk_tool_processor/logging/__init__.py +23 -17
  11. chuk_tool_processor/logging/context.py +40 -45
  12. chuk_tool_processor/logging/formatter.py +22 -21
  13. chuk_tool_processor/logging/helpers.py +24 -38
  14. chuk_tool_processor/logging/metrics.py +11 -13
  15. chuk_tool_processor/mcp/__init__.py +8 -12
  16. chuk_tool_processor/mcp/mcp_tool.py +124 -112
  17. chuk_tool_processor/mcp/register_mcp_tools.py +17 -17
  18. chuk_tool_processor/mcp/setup_mcp_http_streamable.py +11 -13
  19. chuk_tool_processor/mcp/setup_mcp_sse.py +11 -13
  20. chuk_tool_processor/mcp/setup_mcp_stdio.py +7 -9
  21. chuk_tool_processor/mcp/stream_manager.py +168 -204
  22. chuk_tool_processor/mcp/transport/__init__.py +4 -4
  23. chuk_tool_processor/mcp/transport/base_transport.py +43 -58
  24. chuk_tool_processor/mcp/transport/http_streamable_transport.py +145 -163
  25. chuk_tool_processor/mcp/transport/sse_transport.py +217 -255
  26. chuk_tool_processor/mcp/transport/stdio_transport.py +188 -190
  27. chuk_tool_processor/models/__init__.py +1 -1
  28. chuk_tool_processor/models/execution_strategy.py +16 -21
  29. chuk_tool_processor/models/streaming_tool.py +28 -25
  30. chuk_tool_processor/models/tool_call.py +19 -34
  31. chuk_tool_processor/models/tool_export_mixin.py +22 -8
  32. chuk_tool_processor/models/tool_result.py +40 -77
  33. chuk_tool_processor/models/validated_tool.py +14 -16
  34. chuk_tool_processor/plugins/__init__.py +1 -1
  35. chuk_tool_processor/plugins/discovery.py +10 -10
  36. chuk_tool_processor/plugins/parsers/__init__.py +1 -1
  37. chuk_tool_processor/plugins/parsers/base.py +1 -2
  38. chuk_tool_processor/plugins/parsers/function_call_tool.py +13 -8
  39. chuk_tool_processor/plugins/parsers/json_tool.py +4 -3
  40. chuk_tool_processor/plugins/parsers/openai_tool.py +12 -7
  41. chuk_tool_processor/plugins/parsers/xml_tool.py +4 -4
  42. chuk_tool_processor/registry/__init__.py +12 -12
  43. chuk_tool_processor/registry/auto_register.py +22 -30
  44. chuk_tool_processor/registry/decorators.py +127 -129
  45. chuk_tool_processor/registry/interface.py +26 -23
  46. chuk_tool_processor/registry/metadata.py +27 -22
  47. chuk_tool_processor/registry/provider.py +17 -18
  48. chuk_tool_processor/registry/providers/__init__.py +16 -19
  49. chuk_tool_processor/registry/providers/memory.py +18 -25
  50. chuk_tool_processor/registry/tool_export.py +42 -51
  51. chuk_tool_processor/utils/validation.py +15 -16
  52. {chuk_tool_processor-0.6.12.dist-info → chuk_tool_processor-0.6.14.dist-info}/METADATA +1 -1
  53. chuk_tool_processor-0.6.14.dist-info/RECORD +60 -0
  54. chuk_tool_processor-0.6.12.dist-info/RECORD +0 -60
  55. {chuk_tool_processor-0.6.12.dist-info → chuk_tool_processor-0.6.14.dist-info}/WHEEL +0 -0
  56. {chuk_tool_processor-0.6.12.dist-info → chuk_tool_processor-0.6.14.dist-info}/top_level.txt +0 -0
@@ -3,20 +3,26 @@ from __future__ import annotations
3
3
 
4
4
  import asyncio
5
5
  import json
6
+ import logging
7
+ import os
6
8
  import time
7
- import signal
9
+ from typing import Any
10
+
8
11
  import psutil
9
- from typing import Dict, Any, List, Optional
10
- import logging
12
+ from chuk_mcp.protocol.messages import ( # type: ignore[import-untyped]
13
+ send_initialize,
14
+ send_ping,
15
+ send_prompts_get,
16
+ send_prompts_list,
17
+ send_resources_list,
18
+ send_resources_read,
19
+ send_tools_call,
20
+ send_tools_list,
21
+ )
22
+ from chuk_mcp.transports.stdio import stdio_client # type: ignore[import-untyped]
23
+ from chuk_mcp.transports.stdio.parameters import StdioParameters # type: ignore[import-untyped]
11
24
 
12
25
  from .base_transport import MCPBaseTransport
13
- from chuk_mcp.transports.stdio import stdio_client
14
- from chuk_mcp.transports.stdio.parameters import StdioParameters
15
- from chuk_mcp.protocol.messages import (
16
- send_initialize, send_ping, send_tools_list, send_tools_call,
17
- send_resources_list, send_resources_read,
18
- send_prompts_list, send_prompts_get,
19
- )
20
26
 
21
27
  logger = logging.getLogger(__name__)
22
28
 
@@ -24,19 +30,22 @@ logger = logging.getLogger(__name__)
24
30
  class StdioTransport(MCPBaseTransport):
25
31
  """
26
32
  STDIO transport for MCP communication using process pipes.
27
-
28
- ENHANCED: Now matches SSE transport robustness with improved process
33
+
34
+ ENHANCED: Now matches SSE transport robustness with improved process
29
35
  management, health monitoring, and comprehensive error handling.
30
36
  """
31
37
 
32
- def __init__(self, server_params,
33
- connection_timeout: float = 30.0,
34
- default_timeout: float = 30.0,
35
- enable_metrics: bool = True,
36
- process_monitor: bool = True): # NEW
38
+ def __init__(
39
+ self,
40
+ server_params,
41
+ connection_timeout: float = 30.0,
42
+ default_timeout: float = 30.0,
43
+ enable_metrics: bool = True,
44
+ process_monitor: bool = True,
45
+ ): # NEW
37
46
  """
38
47
  Initialize STDIO transport with enhanced configuration.
39
-
48
+
40
49
  Args:
41
50
  server_params: Server parameters (dict or StdioParameters object)
42
51
  connection_timeout: Timeout for initial connection setup
@@ -46,31 +55,46 @@ class StdioTransport(MCPBaseTransport):
46
55
  """
47
56
  # Convert dict to StdioParameters if needed
48
57
  if isinstance(server_params, dict):
58
+ # Merge provided env with system environment to ensure PATH is available
59
+ merged_env = os.environ.copy()
60
+ if server_params.get("env"):
61
+ merged_env.update(server_params["env"])
62
+
49
63
  self.server_params = StdioParameters(
50
- command=server_params.get('command', 'python'),
51
- args=server_params.get('args', []),
52
- env=server_params.get('env')
64
+ command=server_params.get("command", "python"),
65
+ args=server_params.get("args", []),
66
+ env=merged_env,
53
67
  )
54
68
  else:
55
- self.server_params = server_params
56
-
69
+ # Also handle StdioParameters object - merge env if provided
70
+ # Create a new StdioParameters with merged env (Pydantic models are immutable)
71
+ merged_env = os.environ.copy()
72
+ if hasattr(server_params, "env") and server_params.env:
73
+ merged_env.update(server_params.env)
74
+
75
+ self.server_params = StdioParameters(
76
+ command=server_params.command,
77
+ args=server_params.args,
78
+ env=merged_env,
79
+ )
80
+
57
81
  self.connection_timeout = connection_timeout
58
82
  self.default_timeout = default_timeout
59
83
  self.enable_metrics = enable_metrics
60
84
  self.process_monitor = process_monitor # NEW
61
-
85
+
62
86
  # Connection state
63
87
  self._context = None
64
88
  self._streams = None
65
89
  self._initialized = False
66
-
90
+
67
91
  # Process monitoring (NEW - like SSE's health monitoring)
68
92
  self._process_id = None
69
93
  self._process_start_time = None
70
94
  self._last_successful_ping = None
71
95
  self._consecutive_failures = 0
72
96
  self._max_consecutive_failures = 3
73
-
97
+
74
98
  # Enhanced performance metrics (like SSE)
75
99
  self._metrics = {
76
100
  "total_calls": 0,
@@ -87,20 +111,19 @@ class StdioTransport(MCPBaseTransport):
87
111
  "memory_usage_mb": 0.0, # NEW
88
112
  "cpu_percent": 0.0, # NEW
89
113
  }
90
-
91
- logger.debug("STDIO transport initialized for command: %s",
92
- getattr(self.server_params, 'command', 'unknown'))
93
114
 
94
- async def _get_process_info(self) -> Optional[Dict[str, Any]]:
115
+ logger.debug("STDIO transport initialized for command: %s", getattr(self.server_params, "command", "unknown"))
116
+
117
+ async def _get_process_info(self) -> dict[str, Any] | None:
95
118
  """Get process information for monitoring (NEW)."""
96
119
  if not self._process_id or not self.process_monitor:
97
120
  return None
98
-
121
+
99
122
  try:
100
123
  # FIXED: Validate PID is a real integer before using psutil
101
124
  if not isinstance(self._process_id, int) or self._process_id <= 0:
102
125
  return None
103
-
126
+
104
127
  process = psutil.Process(self._process_id)
105
128
  if process.is_running():
106
129
  memory_info = process.memory_info()
@@ -110,7 +133,7 @@ class StdioTransport(MCPBaseTransport):
110
133
  "memory_mb": memory_info.rss / 1024 / 1024,
111
134
  "cpu_percent": process.cpu_percent(),
112
135
  "create_time": process.create_time(),
113
- "uptime": time.time() - self._process_start_time if self._process_start_time else 0
136
+ "uptime": time.time() - self._process_start_time if self._process_start_time else 0,
114
137
  }
115
138
  except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError, TypeError, ValueError):
116
139
  # FIXED: Handle all possible errors including TypeError from mock objects
@@ -121,32 +144,32 @@ class StdioTransport(MCPBaseTransport):
121
144
  """Monitor subprocess health (NEW - like SSE's health monitoring)."""
122
145
  if not self.process_monitor:
123
146
  return True
124
-
147
+
125
148
  # FIXED: Check if process_id is valid before monitoring
126
149
  if not self._process_id or not isinstance(self._process_id, int) or self._process_id <= 0:
127
150
  return True # No monitoring if no valid PID
128
-
151
+
129
152
  process_info = await self._get_process_info()
130
153
  if not process_info:
131
154
  logger.debug("Process monitoring unavailable (may be in test environment)")
132
155
  return True # Don't fail in test environments
133
-
156
+
134
157
  # Update metrics with process info
135
158
  if self.enable_metrics:
136
159
  self._metrics["memory_usage_mb"] = process_info["memory_mb"]
137
160
  self._metrics["cpu_percent"] = process_info["cpu_percent"]
138
-
161
+
139
162
  # Check for concerning process states
140
163
  status = process_info.get("status", "unknown")
141
164
  if status in ["zombie", "dead"]:
142
165
  logger.error("Process is in %s state", status)
143
166
  return False
144
-
167
+
145
168
  # Check for excessive memory usage (warn at 1GB)
146
169
  memory_mb = process_info.get("memory_mb", 0)
147
170
  if memory_mb > 1024:
148
171
  logger.warning("Process using excessive memory: %.1f MB", memory_mb)
149
-
172
+
150
173
  return True
151
174
 
152
175
  async def initialize(self) -> bool:
@@ -154,53 +177,47 @@ class StdioTransport(MCPBaseTransport):
154
177
  if self._initialized:
155
178
  logger.warning("Transport already initialized")
156
179
  return True
157
-
180
+
158
181
  start_time = time.time()
159
-
182
+
160
183
  try:
161
184
  logger.debug("Initializing STDIO transport...")
162
-
185
+
163
186
  # Create context with timeout protection
164
187
  self._context = stdio_client(self.server_params)
165
- self._streams = await asyncio.wait_for(
166
- self._context.__aenter__(),
167
- timeout=self.connection_timeout
168
- )
169
-
188
+ self._streams = await asyncio.wait_for(self._context.__aenter__(), timeout=self.connection_timeout)
189
+
170
190
  # Capture process information for monitoring (NEW)
171
- if self.process_monitor and hasattr(self._context, '_process'):
172
- self._process_id = getattr(self._context._process, 'pid', None)
191
+ if self.process_monitor and hasattr(self._context, "_process"):
192
+ self._process_id = getattr(self._context._process, "pid", None)
173
193
  self._process_start_time = time.time()
174
194
  logger.debug("Subprocess PID: %s", self._process_id)
175
-
195
+
176
196
  # Send initialize message with timeout
177
- init_result = await asyncio.wait_for(
178
- send_initialize(*self._streams),
179
- timeout=self.default_timeout
180
- )
181
-
197
+ init_result = await asyncio.wait_for(send_initialize(*self._streams), timeout=self.default_timeout)
198
+
182
199
  if init_result:
183
200
  # Enhanced health verification (like SSE)
184
201
  logger.debug("Verifying connection with ping...")
185
202
  ping_start = time.time()
186
- ping_success = await asyncio.wait_for(
187
- send_ping(*self._streams),
188
- timeout=10.0
189
- )
203
+ ping_success = await asyncio.wait_for(send_ping(*self._streams), timeout=10.0)
190
204
  ping_time = time.time() - ping_start
191
-
205
+
192
206
  if ping_success:
193
207
  self._initialized = True
194
208
  self._last_successful_ping = time.time()
195
209
  self._consecutive_failures = 0
196
-
210
+
197
211
  if self.enable_metrics:
198
212
  init_time = time.time() - start_time
199
213
  self._metrics["initialization_time"] = init_time
200
214
  self._metrics["last_ping_time"] = ping_time
201
-
202
- logger.debug("STDIO transport initialized successfully in %.3fs (ping: %.3fs)",
203
- time.time() - start_time, ping_time)
215
+
216
+ logger.debug(
217
+ "STDIO transport initialized successfully in %.3fs (ping: %.3fs)",
218
+ time.time() - start_time,
219
+ ping_time,
220
+ )
204
221
  return True
205
222
  else:
206
223
  logger.warning("STDIO connection established but ping failed")
@@ -214,8 +231,8 @@ class StdioTransport(MCPBaseTransport):
214
231
  logger.error("STDIO initialization failed")
215
232
  await self._cleanup()
216
233
  return False
217
-
218
- except asyncio.TimeoutError:
234
+
235
+ except TimeoutError:
219
236
  logger.error("STDIO initialization timed out after %ss", self.connection_timeout)
220
237
  await self._cleanup()
221
238
  if self.enable_metrics:
@@ -233,16 +250,16 @@ class StdioTransport(MCPBaseTransport):
233
250
  if self.enable_metrics:
234
251
  self._metrics["recovery_attempts"] += 1
235
252
  self._metrics["process_restarts"] += 1
236
-
253
+
237
254
  logger.warning("Attempting STDIO process recovery...")
238
-
255
+
239
256
  try:
240
257
  # Force cleanup of existing process
241
258
  await self._cleanup()
242
-
259
+
243
260
  # Brief delay before restart
244
261
  await asyncio.sleep(1.0)
245
-
262
+
246
263
  # Re-initialize
247
264
  return await self.initialize()
248
265
  except Exception as e:
@@ -253,10 +270,10 @@ class StdioTransport(MCPBaseTransport):
253
270
  """Enhanced close with process monitoring and metrics."""
254
271
  if not self._initialized:
255
272
  return
256
-
273
+
257
274
  # Enhanced metrics logging (like SSE)
258
275
  if self.enable_metrics and self._metrics["total_calls"] > 0:
259
- success_rate = (self._metrics["successful_calls"] / self._metrics["total_calls"] * 100)
276
+ success_rate = self._metrics["successful_calls"] / self._metrics["total_calls"] * 100
260
277
  logger.debug(
261
278
  "STDIO transport closing - Calls: %d, Success: %.1f%%, "
262
279
  "Avg time: %.3fs, Restarts: %d, Crashes: %d, Memory: %.1f MB",
@@ -265,9 +282,9 @@ class StdioTransport(MCPBaseTransport):
265
282
  self._metrics["avg_response_time"],
266
283
  self._metrics["process_restarts"],
267
284
  self._metrics["process_crashes"],
268
- self._metrics["memory_usage_mb"]
285
+ self._metrics["memory_usage_mb"],
269
286
  )
270
-
287
+
271
288
  if self._context:
272
289
  try:
273
290
  await self._context.__aexit__(None, None, None)
@@ -288,7 +305,7 @@ class StdioTransport(MCPBaseTransport):
288
305
  if process.is_running():
289
306
  logger.debug("Terminating subprocess %s", self._process_id)
290
307
  process.terminate()
291
-
308
+
292
309
  # Wait briefly for graceful termination
293
310
  try:
294
311
  process.wait(timeout=2.0)
@@ -298,7 +315,7 @@ class StdioTransport(MCPBaseTransport):
298
315
  except (psutil.NoSuchProcess, psutil.AccessDenied, TypeError, ValueError):
299
316
  # FIXED: Handle all possible errors including TypeError from mock objects
300
317
  logger.debug("Could not terminate process %s (may be mock or already dead)", self._process_id)
301
-
318
+
302
319
  self._context = None
303
320
  self._streams = None
304
321
  self._initialized = False
@@ -309,35 +326,36 @@ class StdioTransport(MCPBaseTransport):
309
326
  """Enhanced ping with process health monitoring."""
310
327
  if not self._initialized:
311
328
  return False
312
-
329
+
313
330
  # Check process health first (NEW) - but only if we have a real process
314
- if self.process_monitor and self._process_id and isinstance(self._process_id, int):
315
- if not await self._monitor_process_health():
316
- self._consecutive_failures += 1
317
- return False
318
-
331
+ if (
332
+ self.process_monitor
333
+ and self._process_id
334
+ and isinstance(self._process_id, int)
335
+ and not await self._monitor_process_health()
336
+ ):
337
+ self._consecutive_failures += 1
338
+ return False
339
+
319
340
  start_time = time.time()
320
341
  try:
321
- result = await asyncio.wait_for(
322
- send_ping(*self._streams),
323
- timeout=self.default_timeout
324
- )
325
-
342
+ result = await asyncio.wait_for(send_ping(*self._streams), timeout=self.default_timeout)
343
+
326
344
  success = bool(result)
327
-
345
+
328
346
  if success:
329
347
  self._last_successful_ping = time.time()
330
348
  self._consecutive_failures = 0
331
349
  else:
332
350
  self._consecutive_failures += 1
333
-
351
+
334
352
  if self.enable_metrics:
335
353
  ping_time = time.time() - start_time
336
354
  self._metrics["last_ping_time"] = ping_time
337
355
  logger.debug("STDIO ping completed in %.3fs: %s", ping_time, success)
338
-
356
+
339
357
  return success
340
- except asyncio.TimeoutError:
358
+ except TimeoutError:
341
359
  logger.error("STDIO ping timed out")
342
360
  self._consecutive_failures += 1
343
361
  return False
@@ -352,27 +370,24 @@ class StdioTransport(MCPBaseTransport):
352
370
  """Enhanced connection status check (like SSE)."""
353
371
  if not self._initialized or not self._streams:
354
372
  return False
355
-
373
+
356
374
  # Check for too many consecutive failures (like SSE)
357
375
  if self._consecutive_failures >= self._max_consecutive_failures:
358
376
  logger.warning("Connection marked unhealthy after %d failures", self._consecutive_failures)
359
377
  return False
360
-
378
+
361
379
  return True
362
380
 
363
- async def get_tools(self) -> List[Dict[str, Any]]:
381
+ async def get_tools(self) -> list[dict[str, Any]]:
364
382
  """Enhanced tools retrieval with recovery."""
365
383
  if not self._initialized:
366
384
  logger.error("Cannot get tools: transport not initialized")
367
385
  return []
368
-
386
+
369
387
  start_time = time.time()
370
388
  try:
371
- response = await asyncio.wait_for(
372
- send_tools_list(*self._streams),
373
- timeout=self.default_timeout
374
- )
375
-
389
+ response = await asyncio.wait_for(send_tools_list(*self._streams), timeout=self.default_timeout)
390
+
376
391
  # Normalize response
377
392
  if isinstance(response, dict):
378
393
  tools = response.get("tools", [])
@@ -381,17 +396,17 @@ class StdioTransport(MCPBaseTransport):
381
396
  else:
382
397
  logger.warning("Unexpected tools response type: %s", type(response))
383
398
  tools = []
384
-
399
+
385
400
  # Reset failure count on success
386
401
  self._consecutive_failures = 0
387
-
402
+
388
403
  if self.enable_metrics:
389
404
  response_time = time.time() - start_time
390
405
  logger.debug("Retrieved %d tools in %.3fs", len(tools), response_time)
391
-
406
+
392
407
  return tools
393
-
394
- except asyncio.TimeoutError:
408
+
409
+ except TimeoutError:
395
410
  logger.error("Get tools timed out")
396
411
  self._consecutive_failures += 1
397
412
  return []
@@ -402,89 +417,80 @@ class StdioTransport(MCPBaseTransport):
402
417
  self._metrics["pipe_errors"] += 1
403
418
  return []
404
419
 
405
- async def call_tool(self, tool_name: str, arguments: Dict[str, Any],
406
- timeout: Optional[float] = None) -> Dict[str, Any]:
420
+ async def call_tool(
421
+ self, tool_name: str, arguments: dict[str, Any], timeout: float | None = None
422
+ ) -> dict[str, Any]:
407
423
  """Enhanced tool calling with recovery and process monitoring."""
408
424
  if not self._initialized:
409
425
  return {"isError": True, "error": "Transport not initialized"}
410
426
 
411
427
  tool_timeout = timeout or self.default_timeout
412
428
  start_time = time.time()
413
-
429
+
414
430
  if self.enable_metrics:
415
431
  self._metrics["total_calls"] += 1
416
432
 
417
433
  try:
418
434
  logger.debug("Calling tool '%s' with timeout %ss", tool_name, tool_timeout)
419
-
435
+
420
436
  # Enhanced connection check with recovery attempt
421
437
  if not self.is_connected():
422
438
  logger.warning("Connection unhealthy, attempting recovery...")
423
439
  if not await self._attempt_recovery():
424
440
  if self.enable_metrics:
425
441
  self._update_metrics(time.time() - start_time, False)
426
- return {
427
- "isError": True,
428
- "error": "Failed to recover connection"
429
- }
430
-
442
+ return {"isError": True, "error": "Failed to recover connection"}
443
+
431
444
  response = await asyncio.wait_for(
432
- send_tools_call(*self._streams, tool_name, arguments),
433
- timeout=tool_timeout
445
+ send_tools_call(*self._streams, tool_name, arguments), timeout=tool_timeout
434
446
  )
435
-
447
+
436
448
  response_time = time.time() - start_time
437
449
  result = self._normalize_mcp_response(response)
438
-
450
+
439
451
  # Reset failure count and update health on success
440
452
  self._consecutive_failures = 0
441
453
  self._last_successful_ping = time.time()
442
-
454
+
443
455
  if self.enable_metrics:
444
456
  self._update_metrics(response_time, not result.get("isError", False))
445
-
457
+
446
458
  if not result.get("isError", False):
447
459
  logger.debug("Tool '%s' completed successfully in %.3fs", tool_name, response_time)
448
460
  else:
449
- logger.warning("Tool '%s' failed in %.3fs: %s", tool_name, response_time,
450
- result.get('error', 'Unknown error'))
451
-
461
+ logger.warning(
462
+ "Tool '%s' failed in %.3fs: %s", tool_name, response_time, result.get("error", "Unknown error")
463
+ )
464
+
452
465
  return result
453
-
454
- except asyncio.TimeoutError:
466
+
467
+ except TimeoutError:
455
468
  response_time = time.time() - start_time
456
469
  self._consecutive_failures += 1
457
470
  if self.enable_metrics:
458
471
  self._update_metrics(response_time, False)
459
-
472
+
460
473
  error_msg = f"Tool execution timed out after {tool_timeout}s"
461
474
  logger.error("Tool '%s' %s", tool_name, error_msg)
462
- return {
463
- "isError": True,
464
- "error": error_msg
465
- }
475
+ return {"isError": True, "error": error_msg}
466
476
  except Exception as e:
467
477
  response_time = time.time() - start_time
468
478
  self._consecutive_failures += 1
469
479
  if self.enable_metrics:
470
480
  self._update_metrics(response_time, False)
471
481
  self._metrics["pipe_errors"] += 1
472
-
482
+
473
483
  # Enhanced process error detection
474
484
  error_str = str(e).lower()
475
- if any(indicator in error_str for indicator in
476
- ["broken pipe", "process", "eof", "connection", "died"]):
485
+ if any(indicator in error_str for indicator in ["broken pipe", "process", "eof", "connection", "died"]):
477
486
  logger.warning("Process error detected: %s", e)
478
487
  self._initialized = False
479
488
  if self.enable_metrics:
480
489
  self._metrics["process_crashes"] += 1
481
-
490
+
482
491
  error_msg = f"Tool execution failed: {str(e)}"
483
492
  logger.error("Tool '%s' error: %s", tool_name, error_msg)
484
- return {
485
- "isError": True,
486
- "error": error_msg
487
- }
493
+ return {"isError": True, "error": error_msg}
488
494
 
489
495
  def _update_metrics(self, response_time: float, success: bool) -> None:
490
496
  """Enhanced metrics tracking (like SSE)."""
@@ -492,18 +498,16 @@ class StdioTransport(MCPBaseTransport):
492
498
  self._metrics["successful_calls"] += 1
493
499
  else:
494
500
  self._metrics["failed_calls"] += 1
495
-
501
+
496
502
  self._metrics["total_time"] += response_time
497
503
  if self._metrics["total_calls"] > 0:
498
- self._metrics["avg_response_time"] = (
499
- self._metrics["total_time"] / self._metrics["total_calls"]
500
- )
504
+ self._metrics["avg_response_time"] = self._metrics["total_time"] / self._metrics["total_calls"]
501
505
 
502
- def _normalize_mcp_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
506
+ def _normalize_mcp_response(self, response: dict[str, Any]) -> dict[str, Any]:
503
507
  """
504
508
  Enhanced response normalization with STDIO-specific handling.
505
-
506
- STDIO preserves string representations of numeric values for
509
+
510
+ STDIO preserves string representations of numeric values for
507
511
  backward compatibility with existing tests.
508
512
  """
509
513
  # Handle explicit error in response
@@ -511,63 +515,62 @@ class StdioTransport(MCPBaseTransport):
511
515
  error_info = response["error"]
512
516
  error_msg = error_info.get("message", str(error_info)) if isinstance(error_info, dict) else str(error_info)
513
517
  return {"isError": True, "error": error_msg}
514
-
518
+
515
519
  # Handle successful response with result
516
520
  if "result" in response:
517
521
  result = response["result"]
518
522
  if isinstance(result, dict) and "content" in result:
519
523
  return {"isError": False, "content": self._extract_stdio_content(result["content"])}
520
524
  return {"isError": False, "content": result}
521
-
525
+
522
526
  # Handle direct content-based response
523
527
  if "content" in response:
524
528
  return {"isError": False, "content": self._extract_stdio_content(response["content"])}
525
-
529
+
526
530
  return {"isError": False, "content": response}
527
531
 
528
532
  def _extract_stdio_content(self, content_list: Any) -> Any:
529
533
  """
530
534
  Enhanced content extraction with STDIO-specific string preservation.
531
-
535
+
532
536
  STDIO transport preserves string representations of numeric values
533
537
  for backward compatibility with existing tests.
534
538
  """
535
539
  if not isinstance(content_list, list) or not content_list:
536
540
  return content_list
537
-
541
+
538
542
  if len(content_list) == 1:
539
543
  item = content_list[0]
540
544
  if isinstance(item, dict) and item.get("type") == "text":
541
545
  text = item.get("text", "")
542
-
546
+
543
547
  # STDIO-specific: preserve string format for numeric values
544
548
  try:
545
549
  parsed = json.loads(text)
546
550
  # If the parsed result is a simple type and the original was a string,
547
551
  # keep it as a string to maintain compatibility
548
- if isinstance(parsed, (int, float, bool)) and isinstance(text, str):
549
- # Check if this looks like a simple numeric string
550
- if text.strip().isdigit() or (text.strip().replace('.', '', 1).isdigit()):
551
- return text # Return as string for numeric values
552
+ if (
553
+ isinstance(parsed, int | float | bool)
554
+ and isinstance(text, str)
555
+ and (text.strip().isdigit() or text.strip().replace(".", "", 1).isdigit())
556
+ ):
557
+ return text # Return as string for numeric values
552
558
  return parsed
553
559
  except json.JSONDecodeError:
554
560
  return text
555
561
  return item
556
-
562
+
557
563
  return content_list
558
564
 
559
- async def list_resources(self) -> Dict[str, Any]:
565
+ async def list_resources(self) -> dict[str, Any]:
560
566
  """Enhanced resource listing with error handling."""
561
567
  if not self._initialized:
562
568
  return {}
563
569
  try:
564
- response = await asyncio.wait_for(
565
- send_resources_list(*self._streams),
566
- timeout=self.default_timeout
567
- )
570
+ response = await asyncio.wait_for(send_resources_list(*self._streams), timeout=self.default_timeout)
568
571
  self._consecutive_failures = 0 # Reset on success
569
572
  return response if isinstance(response, dict) else {}
570
- except asyncio.TimeoutError:
573
+ except TimeoutError:
571
574
  logger.error("List resources timed out")
572
575
  self._consecutive_failures += 1
573
576
  return {}
@@ -576,18 +579,15 @@ class StdioTransport(MCPBaseTransport):
576
579
  self._consecutive_failures += 1
577
580
  return {}
578
581
 
579
- async def list_prompts(self) -> Dict[str, Any]:
582
+ async def list_prompts(self) -> dict[str, Any]:
580
583
  """Enhanced prompt listing with error handling."""
581
584
  if not self._initialized:
582
585
  return {}
583
586
  try:
584
- response = await asyncio.wait_for(
585
- send_prompts_list(*self._streams),
586
- timeout=self.default_timeout
587
- )
587
+ response = await asyncio.wait_for(send_prompts_list(*self._streams), timeout=self.default_timeout)
588
588
  self._consecutive_failures = 0 # Reset on success
589
589
  return response if isinstance(response, dict) else {}
590
- except asyncio.TimeoutError:
590
+ except TimeoutError:
591
591
  logger.error("List prompts timed out")
592
592
  self._consecutive_failures += 1
593
593
  return {}
@@ -596,18 +596,15 @@ class StdioTransport(MCPBaseTransport):
596
596
  self._consecutive_failures += 1
597
597
  return {}
598
598
 
599
- async def read_resource(self, uri: str) -> Dict[str, Any]:
599
+ async def read_resource(self, uri: str) -> dict[str, Any]:
600
600
  """Read a specific resource."""
601
601
  if not self._initialized:
602
602
  return {}
603
603
  try:
604
- response = await asyncio.wait_for(
605
- send_resources_read(*self._streams, uri),
606
- timeout=self.default_timeout
607
- )
604
+ response = await asyncio.wait_for(send_resources_read(*self._streams, uri), timeout=self.default_timeout)
608
605
  self._consecutive_failures = 0 # Reset on success
609
606
  return response if isinstance(response, dict) else {}
610
- except asyncio.TimeoutError:
607
+ except TimeoutError:
611
608
  logger.error("Read resource timed out")
612
609
  self._consecutive_failures += 1
613
610
  return {}
@@ -616,18 +613,17 @@ class StdioTransport(MCPBaseTransport):
616
613
  self._consecutive_failures += 1
617
614
  return {}
618
615
 
619
- async def get_prompt(self, name: str, arguments: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
616
+ async def get_prompt(self, name: str, arguments: dict[str, Any] | None = None) -> dict[str, Any]:
620
617
  """Get a specific prompt."""
621
618
  if not self._initialized:
622
619
  return {}
623
620
  try:
624
621
  response = await asyncio.wait_for(
625
- send_prompts_get(*self._streams, name, arguments or {}),
626
- timeout=self.default_timeout
622
+ send_prompts_get(*self._streams, name, arguments or {}), timeout=self.default_timeout
627
623
  )
628
624
  self._consecutive_failures = 0 # Reset on success
629
625
  return response if isinstance(response, dict) else {}
630
- except asyncio.TimeoutError:
626
+ except TimeoutError:
631
627
  logger.error("Get prompt timed out")
632
628
  self._consecutive_failures += 1
633
629
  return {}
@@ -636,17 +632,19 @@ class StdioTransport(MCPBaseTransport):
636
632
  self._consecutive_failures += 1
637
633
  return {}
638
634
 
639
- def get_metrics(self) -> Dict[str, Any]:
635
+ def get_metrics(self) -> dict[str, Any]:
640
636
  """Enhanced metrics with process and health information."""
641
637
  metrics = self._metrics.copy()
642
- metrics.update({
643
- "is_connected": self.is_connected(),
644
- "consecutive_failures": self._consecutive_failures,
645
- "last_successful_ping": self._last_successful_ping,
646
- "max_consecutive_failures": self._max_consecutive_failures,
647
- "process_id": self._process_id,
648
- "process_uptime": (time.time() - self._process_start_time) if self._process_start_time else 0,
649
- })
638
+ metrics.update(
639
+ {
640
+ "is_connected": self.is_connected(),
641
+ "consecutive_failures": self._consecutive_failures,
642
+ "last_successful_ping": self._last_successful_ping,
643
+ "max_consecutive_failures": self._max_consecutive_failures,
644
+ "process_id": self._process_id,
645
+ "process_uptime": (time.time() - self._process_start_time) if self._process_start_time else 0,
646
+ }
647
+ )
650
648
  return metrics
651
649
 
652
650
  def reset_metrics(self) -> None:
@@ -654,7 +652,7 @@ class StdioTransport(MCPBaseTransport):
654
652
  preserved_init_time = self._metrics.get("initialization_time")
655
653
  preserved_last_ping = self._metrics.get("last_ping_time")
656
654
  preserved_restarts = self._metrics.get("process_restarts", 0)
657
-
655
+
658
656
  self._metrics = {
659
657
  "total_calls": 0,
660
658
  "successful_calls": 0,
@@ -671,7 +669,7 @@ class StdioTransport(MCPBaseTransport):
671
669
  "cpu_percent": 0.0,
672
670
  }
673
671
 
674
- def get_streams(self) -> List[tuple]:
672
+ def get_streams(self) -> list[tuple]:
675
673
  """Enhanced streams access with connection check."""
676
674
  return [self._streams] if self._streams else []
677
675
 
@@ -684,4 +682,4 @@ class StdioTransport(MCPBaseTransport):
684
682
 
685
683
  async def __aexit__(self, exc_type, exc_val, exc_tb):
686
684
  """Enhanced context manager cleanup."""
687
- await self.close()
685
+ await self.close()