chuk-tool-processor 0.6.12__py3-none-any.whl → 0.6.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

Files changed (56) hide show
  1. chuk_tool_processor/core/__init__.py +1 -1
  2. chuk_tool_processor/core/exceptions.py +10 -4
  3. chuk_tool_processor/core/processor.py +97 -97
  4. chuk_tool_processor/execution/strategies/inprocess_strategy.py +142 -150
  5. chuk_tool_processor/execution/strategies/subprocess_strategy.py +200 -205
  6. chuk_tool_processor/execution/tool_executor.py +82 -84
  7. chuk_tool_processor/execution/wrappers/caching.py +102 -103
  8. chuk_tool_processor/execution/wrappers/rate_limiting.py +45 -42
  9. chuk_tool_processor/execution/wrappers/retry.py +23 -25
  10. chuk_tool_processor/logging/__init__.py +23 -17
  11. chuk_tool_processor/logging/context.py +40 -45
  12. chuk_tool_processor/logging/formatter.py +22 -21
  13. chuk_tool_processor/logging/helpers.py +24 -38
  14. chuk_tool_processor/logging/metrics.py +11 -13
  15. chuk_tool_processor/mcp/__init__.py +8 -12
  16. chuk_tool_processor/mcp/mcp_tool.py +124 -112
  17. chuk_tool_processor/mcp/register_mcp_tools.py +17 -17
  18. chuk_tool_processor/mcp/setup_mcp_http_streamable.py +11 -13
  19. chuk_tool_processor/mcp/setup_mcp_sse.py +11 -13
  20. chuk_tool_processor/mcp/setup_mcp_stdio.py +7 -9
  21. chuk_tool_processor/mcp/stream_manager.py +168 -204
  22. chuk_tool_processor/mcp/transport/__init__.py +4 -4
  23. chuk_tool_processor/mcp/transport/base_transport.py +43 -58
  24. chuk_tool_processor/mcp/transport/http_streamable_transport.py +145 -163
  25. chuk_tool_processor/mcp/transport/sse_transport.py +217 -255
  26. chuk_tool_processor/mcp/transport/stdio_transport.py +171 -189
  27. chuk_tool_processor/models/__init__.py +1 -1
  28. chuk_tool_processor/models/execution_strategy.py +16 -21
  29. chuk_tool_processor/models/streaming_tool.py +28 -25
  30. chuk_tool_processor/models/tool_call.py +19 -34
  31. chuk_tool_processor/models/tool_export_mixin.py +22 -8
  32. chuk_tool_processor/models/tool_result.py +40 -77
  33. chuk_tool_processor/models/validated_tool.py +14 -16
  34. chuk_tool_processor/plugins/__init__.py +1 -1
  35. chuk_tool_processor/plugins/discovery.py +10 -10
  36. chuk_tool_processor/plugins/parsers/__init__.py +1 -1
  37. chuk_tool_processor/plugins/parsers/base.py +1 -2
  38. chuk_tool_processor/plugins/parsers/function_call_tool.py +13 -8
  39. chuk_tool_processor/plugins/parsers/json_tool.py +4 -3
  40. chuk_tool_processor/plugins/parsers/openai_tool.py +12 -7
  41. chuk_tool_processor/plugins/parsers/xml_tool.py +4 -4
  42. chuk_tool_processor/registry/__init__.py +12 -12
  43. chuk_tool_processor/registry/auto_register.py +22 -30
  44. chuk_tool_processor/registry/decorators.py +127 -129
  45. chuk_tool_processor/registry/interface.py +26 -23
  46. chuk_tool_processor/registry/metadata.py +27 -22
  47. chuk_tool_processor/registry/provider.py +17 -18
  48. chuk_tool_processor/registry/providers/__init__.py +16 -19
  49. chuk_tool_processor/registry/providers/memory.py +18 -25
  50. chuk_tool_processor/registry/tool_export.py +42 -51
  51. chuk_tool_processor/utils/validation.py +15 -16
  52. {chuk_tool_processor-0.6.12.dist-info → chuk_tool_processor-0.6.13.dist-info}/METADATA +1 -1
  53. chuk_tool_processor-0.6.13.dist-info/RECORD +60 -0
  54. chuk_tool_processor-0.6.12.dist-info/RECORD +0 -60
  55. {chuk_tool_processor-0.6.12.dist-info → chuk_tool_processor-0.6.13.dist-info}/WHEEL +0 -0
  56. {chuk_tool_processor-0.6.12.dist-info → chuk_tool_processor-0.6.13.dist-info}/top_level.txt +0 -0
@@ -3,20 +3,25 @@ from __future__ import annotations
3
3
 
4
4
  import asyncio
5
5
  import json
6
+ import logging
6
7
  import time
7
- import signal
8
+ from typing import Any
9
+
8
10
  import psutil
9
- from typing import Dict, Any, List, Optional
10
- import logging
11
+ from chuk_mcp.protocol.messages import ( # type: ignore[import-untyped]
12
+ send_initialize,
13
+ send_ping,
14
+ send_prompts_get,
15
+ send_prompts_list,
16
+ send_resources_list,
17
+ send_resources_read,
18
+ send_tools_call,
19
+ send_tools_list,
20
+ )
21
+ from chuk_mcp.transports.stdio import stdio_client # type: ignore[import-untyped]
22
+ from chuk_mcp.transports.stdio.parameters import StdioParameters # type: ignore[import-untyped]
11
23
 
12
24
  from .base_transport import MCPBaseTransport
13
- from chuk_mcp.transports.stdio import stdio_client
14
- from chuk_mcp.transports.stdio.parameters import StdioParameters
15
- from chuk_mcp.protocol.messages import (
16
- send_initialize, send_ping, send_tools_list, send_tools_call,
17
- send_resources_list, send_resources_read,
18
- send_prompts_list, send_prompts_get,
19
- )
20
25
 
21
26
  logger = logging.getLogger(__name__)
22
27
 
@@ -24,19 +29,22 @@ logger = logging.getLogger(__name__)
24
29
  class StdioTransport(MCPBaseTransport):
25
30
  """
26
31
  STDIO transport for MCP communication using process pipes.
27
-
28
- ENHANCED: Now matches SSE transport robustness with improved process
32
+
33
+ ENHANCED: Now matches SSE transport robustness with improved process
29
34
  management, health monitoring, and comprehensive error handling.
30
35
  """
31
36
 
32
- def __init__(self, server_params,
33
- connection_timeout: float = 30.0,
34
- default_timeout: float = 30.0,
35
- enable_metrics: bool = True,
36
- process_monitor: bool = True): # NEW
37
+ def __init__(
38
+ self,
39
+ server_params,
40
+ connection_timeout: float = 30.0,
41
+ default_timeout: float = 30.0,
42
+ enable_metrics: bool = True,
43
+ process_monitor: bool = True,
44
+ ): # NEW
37
45
  """
38
46
  Initialize STDIO transport with enhanced configuration.
39
-
47
+
40
48
  Args:
41
49
  server_params: Server parameters (dict or StdioParameters object)
42
50
  connection_timeout: Timeout for initial connection setup
@@ -47,30 +55,30 @@ class StdioTransport(MCPBaseTransport):
47
55
  # Convert dict to StdioParameters if needed
48
56
  if isinstance(server_params, dict):
49
57
  self.server_params = StdioParameters(
50
- command=server_params.get('command', 'python'),
51
- args=server_params.get('args', []),
52
- env=server_params.get('env')
58
+ command=server_params.get("command", "python"),
59
+ args=server_params.get("args", []),
60
+ env=server_params.get("env"),
53
61
  )
54
62
  else:
55
63
  self.server_params = server_params
56
-
64
+
57
65
  self.connection_timeout = connection_timeout
58
66
  self.default_timeout = default_timeout
59
67
  self.enable_metrics = enable_metrics
60
68
  self.process_monitor = process_monitor # NEW
61
-
69
+
62
70
  # Connection state
63
71
  self._context = None
64
72
  self._streams = None
65
73
  self._initialized = False
66
-
74
+
67
75
  # Process monitoring (NEW - like SSE's health monitoring)
68
76
  self._process_id = None
69
77
  self._process_start_time = None
70
78
  self._last_successful_ping = None
71
79
  self._consecutive_failures = 0
72
80
  self._max_consecutive_failures = 3
73
-
81
+
74
82
  # Enhanced performance metrics (like SSE)
75
83
  self._metrics = {
76
84
  "total_calls": 0,
@@ -87,20 +95,19 @@ class StdioTransport(MCPBaseTransport):
87
95
  "memory_usage_mb": 0.0, # NEW
88
96
  "cpu_percent": 0.0, # NEW
89
97
  }
90
-
91
- logger.debug("STDIO transport initialized for command: %s",
92
- getattr(self.server_params, 'command', 'unknown'))
93
98
 
94
- async def _get_process_info(self) -> Optional[Dict[str, Any]]:
99
+ logger.debug("STDIO transport initialized for command: %s", getattr(self.server_params, "command", "unknown"))
100
+
101
+ async def _get_process_info(self) -> dict[str, Any] | None:
95
102
  """Get process information for monitoring (NEW)."""
96
103
  if not self._process_id or not self.process_monitor:
97
104
  return None
98
-
105
+
99
106
  try:
100
107
  # FIXED: Validate PID is a real integer before using psutil
101
108
  if not isinstance(self._process_id, int) or self._process_id <= 0:
102
109
  return None
103
-
110
+
104
111
  process = psutil.Process(self._process_id)
105
112
  if process.is_running():
106
113
  memory_info = process.memory_info()
@@ -110,7 +117,7 @@ class StdioTransport(MCPBaseTransport):
110
117
  "memory_mb": memory_info.rss / 1024 / 1024,
111
118
  "cpu_percent": process.cpu_percent(),
112
119
  "create_time": process.create_time(),
113
- "uptime": time.time() - self._process_start_time if self._process_start_time else 0
120
+ "uptime": time.time() - self._process_start_time if self._process_start_time else 0,
114
121
  }
115
122
  except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError, TypeError, ValueError):
116
123
  # FIXED: Handle all possible errors including TypeError from mock objects
@@ -121,32 +128,32 @@ class StdioTransport(MCPBaseTransport):
121
128
  """Monitor subprocess health (NEW - like SSE's health monitoring)."""
122
129
  if not self.process_monitor:
123
130
  return True
124
-
131
+
125
132
  # FIXED: Check if process_id is valid before monitoring
126
133
  if not self._process_id or not isinstance(self._process_id, int) or self._process_id <= 0:
127
134
  return True # No monitoring if no valid PID
128
-
135
+
129
136
  process_info = await self._get_process_info()
130
137
  if not process_info:
131
138
  logger.debug("Process monitoring unavailable (may be in test environment)")
132
139
  return True # Don't fail in test environments
133
-
140
+
134
141
  # Update metrics with process info
135
142
  if self.enable_metrics:
136
143
  self._metrics["memory_usage_mb"] = process_info["memory_mb"]
137
144
  self._metrics["cpu_percent"] = process_info["cpu_percent"]
138
-
145
+
139
146
  # Check for concerning process states
140
147
  status = process_info.get("status", "unknown")
141
148
  if status in ["zombie", "dead"]:
142
149
  logger.error("Process is in %s state", status)
143
150
  return False
144
-
151
+
145
152
  # Check for excessive memory usage (warn at 1GB)
146
153
  memory_mb = process_info.get("memory_mb", 0)
147
154
  if memory_mb > 1024:
148
155
  logger.warning("Process using excessive memory: %.1f MB", memory_mb)
149
-
156
+
150
157
  return True
151
158
 
152
159
  async def initialize(self) -> bool:
@@ -154,53 +161,47 @@ class StdioTransport(MCPBaseTransport):
154
161
  if self._initialized:
155
162
  logger.warning("Transport already initialized")
156
163
  return True
157
-
164
+
158
165
  start_time = time.time()
159
-
166
+
160
167
  try:
161
168
  logger.debug("Initializing STDIO transport...")
162
-
169
+
163
170
  # Create context with timeout protection
164
171
  self._context = stdio_client(self.server_params)
165
- self._streams = await asyncio.wait_for(
166
- self._context.__aenter__(),
167
- timeout=self.connection_timeout
168
- )
169
-
172
+ self._streams = await asyncio.wait_for(self._context.__aenter__(), timeout=self.connection_timeout)
173
+
170
174
  # Capture process information for monitoring (NEW)
171
- if self.process_monitor and hasattr(self._context, '_process'):
172
- self._process_id = getattr(self._context._process, 'pid', None)
175
+ if self.process_monitor and hasattr(self._context, "_process"):
176
+ self._process_id = getattr(self._context._process, "pid", None)
173
177
  self._process_start_time = time.time()
174
178
  logger.debug("Subprocess PID: %s", self._process_id)
175
-
179
+
176
180
  # Send initialize message with timeout
177
- init_result = await asyncio.wait_for(
178
- send_initialize(*self._streams),
179
- timeout=self.default_timeout
180
- )
181
-
181
+ init_result = await asyncio.wait_for(send_initialize(*self._streams), timeout=self.default_timeout)
182
+
182
183
  if init_result:
183
184
  # Enhanced health verification (like SSE)
184
185
  logger.debug("Verifying connection with ping...")
185
186
  ping_start = time.time()
186
- ping_success = await asyncio.wait_for(
187
- send_ping(*self._streams),
188
- timeout=10.0
189
- )
187
+ ping_success = await asyncio.wait_for(send_ping(*self._streams), timeout=10.0)
190
188
  ping_time = time.time() - ping_start
191
-
189
+
192
190
  if ping_success:
193
191
  self._initialized = True
194
192
  self._last_successful_ping = time.time()
195
193
  self._consecutive_failures = 0
196
-
194
+
197
195
  if self.enable_metrics:
198
196
  init_time = time.time() - start_time
199
197
  self._metrics["initialization_time"] = init_time
200
198
  self._metrics["last_ping_time"] = ping_time
201
-
202
- logger.debug("STDIO transport initialized successfully in %.3fs (ping: %.3fs)",
203
- time.time() - start_time, ping_time)
199
+
200
+ logger.debug(
201
+ "STDIO transport initialized successfully in %.3fs (ping: %.3fs)",
202
+ time.time() - start_time,
203
+ ping_time,
204
+ )
204
205
  return True
205
206
  else:
206
207
  logger.warning("STDIO connection established but ping failed")
@@ -214,8 +215,8 @@ class StdioTransport(MCPBaseTransport):
214
215
  logger.error("STDIO initialization failed")
215
216
  await self._cleanup()
216
217
  return False
217
-
218
- except asyncio.TimeoutError:
218
+
219
+ except TimeoutError:
219
220
  logger.error("STDIO initialization timed out after %ss", self.connection_timeout)
220
221
  await self._cleanup()
221
222
  if self.enable_metrics:
@@ -233,16 +234,16 @@ class StdioTransport(MCPBaseTransport):
233
234
  if self.enable_metrics:
234
235
  self._metrics["recovery_attempts"] += 1
235
236
  self._metrics["process_restarts"] += 1
236
-
237
+
237
238
  logger.warning("Attempting STDIO process recovery...")
238
-
239
+
239
240
  try:
240
241
  # Force cleanup of existing process
241
242
  await self._cleanup()
242
-
243
+
243
244
  # Brief delay before restart
244
245
  await asyncio.sleep(1.0)
245
-
246
+
246
247
  # Re-initialize
247
248
  return await self.initialize()
248
249
  except Exception as e:
@@ -253,10 +254,10 @@ class StdioTransport(MCPBaseTransport):
253
254
  """Enhanced close with process monitoring and metrics."""
254
255
  if not self._initialized:
255
256
  return
256
-
257
+
257
258
  # Enhanced metrics logging (like SSE)
258
259
  if self.enable_metrics and self._metrics["total_calls"] > 0:
259
- success_rate = (self._metrics["successful_calls"] / self._metrics["total_calls"] * 100)
260
+ success_rate = self._metrics["successful_calls"] / self._metrics["total_calls"] * 100
260
261
  logger.debug(
261
262
  "STDIO transport closing - Calls: %d, Success: %.1f%%, "
262
263
  "Avg time: %.3fs, Restarts: %d, Crashes: %d, Memory: %.1f MB",
@@ -265,9 +266,9 @@ class StdioTransport(MCPBaseTransport):
265
266
  self._metrics["avg_response_time"],
266
267
  self._metrics["process_restarts"],
267
268
  self._metrics["process_crashes"],
268
- self._metrics["memory_usage_mb"]
269
+ self._metrics["memory_usage_mb"],
269
270
  )
270
-
271
+
271
272
  if self._context:
272
273
  try:
273
274
  await self._context.__aexit__(None, None, None)
@@ -288,7 +289,7 @@ class StdioTransport(MCPBaseTransport):
288
289
  if process.is_running():
289
290
  logger.debug("Terminating subprocess %s", self._process_id)
290
291
  process.terminate()
291
-
292
+
292
293
  # Wait briefly for graceful termination
293
294
  try:
294
295
  process.wait(timeout=2.0)
@@ -298,7 +299,7 @@ class StdioTransport(MCPBaseTransport):
298
299
  except (psutil.NoSuchProcess, psutil.AccessDenied, TypeError, ValueError):
299
300
  # FIXED: Handle all possible errors including TypeError from mock objects
300
301
  logger.debug("Could not terminate process %s (may be mock or already dead)", self._process_id)
301
-
302
+
302
303
  self._context = None
303
304
  self._streams = None
304
305
  self._initialized = False
@@ -309,35 +310,36 @@ class StdioTransport(MCPBaseTransport):
309
310
  """Enhanced ping with process health monitoring."""
310
311
  if not self._initialized:
311
312
  return False
312
-
313
+
313
314
  # Check process health first (NEW) - but only if we have a real process
314
- if self.process_monitor and self._process_id and isinstance(self._process_id, int):
315
- if not await self._monitor_process_health():
316
- self._consecutive_failures += 1
317
- return False
318
-
315
+ if (
316
+ self.process_monitor
317
+ and self._process_id
318
+ and isinstance(self._process_id, int)
319
+ and not await self._monitor_process_health()
320
+ ):
321
+ self._consecutive_failures += 1
322
+ return False
323
+
319
324
  start_time = time.time()
320
325
  try:
321
- result = await asyncio.wait_for(
322
- send_ping(*self._streams),
323
- timeout=self.default_timeout
324
- )
325
-
326
+ result = await asyncio.wait_for(send_ping(*self._streams), timeout=self.default_timeout)
327
+
326
328
  success = bool(result)
327
-
329
+
328
330
  if success:
329
331
  self._last_successful_ping = time.time()
330
332
  self._consecutive_failures = 0
331
333
  else:
332
334
  self._consecutive_failures += 1
333
-
335
+
334
336
  if self.enable_metrics:
335
337
  ping_time = time.time() - start_time
336
338
  self._metrics["last_ping_time"] = ping_time
337
339
  logger.debug("STDIO ping completed in %.3fs: %s", ping_time, success)
338
-
340
+
339
341
  return success
340
- except asyncio.TimeoutError:
342
+ except TimeoutError:
341
343
  logger.error("STDIO ping timed out")
342
344
  self._consecutive_failures += 1
343
345
  return False
@@ -352,27 +354,24 @@ class StdioTransport(MCPBaseTransport):
352
354
  """Enhanced connection status check (like SSE)."""
353
355
  if not self._initialized or not self._streams:
354
356
  return False
355
-
357
+
356
358
  # Check for too many consecutive failures (like SSE)
357
359
  if self._consecutive_failures >= self._max_consecutive_failures:
358
360
  logger.warning("Connection marked unhealthy after %d failures", self._consecutive_failures)
359
361
  return False
360
-
362
+
361
363
  return True
362
364
 
363
- async def get_tools(self) -> List[Dict[str, Any]]:
365
+ async def get_tools(self) -> list[dict[str, Any]]:
364
366
  """Enhanced tools retrieval with recovery."""
365
367
  if not self._initialized:
366
368
  logger.error("Cannot get tools: transport not initialized")
367
369
  return []
368
-
370
+
369
371
  start_time = time.time()
370
372
  try:
371
- response = await asyncio.wait_for(
372
- send_tools_list(*self._streams),
373
- timeout=self.default_timeout
374
- )
375
-
373
+ response = await asyncio.wait_for(send_tools_list(*self._streams), timeout=self.default_timeout)
374
+
376
375
  # Normalize response
377
376
  if isinstance(response, dict):
378
377
  tools = response.get("tools", [])
@@ -381,17 +380,17 @@ class StdioTransport(MCPBaseTransport):
381
380
  else:
382
381
  logger.warning("Unexpected tools response type: %s", type(response))
383
382
  tools = []
384
-
383
+
385
384
  # Reset failure count on success
386
385
  self._consecutive_failures = 0
387
-
386
+
388
387
  if self.enable_metrics:
389
388
  response_time = time.time() - start_time
390
389
  logger.debug("Retrieved %d tools in %.3fs", len(tools), response_time)
391
-
390
+
392
391
  return tools
393
-
394
- except asyncio.TimeoutError:
392
+
393
+ except TimeoutError:
395
394
  logger.error("Get tools timed out")
396
395
  self._consecutive_failures += 1
397
396
  return []
@@ -402,89 +401,80 @@ class StdioTransport(MCPBaseTransport):
402
401
  self._metrics["pipe_errors"] += 1
403
402
  return []
404
403
 
405
- async def call_tool(self, tool_name: str, arguments: Dict[str, Any],
406
- timeout: Optional[float] = None) -> Dict[str, Any]:
404
+ async def call_tool(
405
+ self, tool_name: str, arguments: dict[str, Any], timeout: float | None = None
406
+ ) -> dict[str, Any]:
407
407
  """Enhanced tool calling with recovery and process monitoring."""
408
408
  if not self._initialized:
409
409
  return {"isError": True, "error": "Transport not initialized"}
410
410
 
411
411
  tool_timeout = timeout or self.default_timeout
412
412
  start_time = time.time()
413
-
413
+
414
414
  if self.enable_metrics:
415
415
  self._metrics["total_calls"] += 1
416
416
 
417
417
  try:
418
418
  logger.debug("Calling tool '%s' with timeout %ss", tool_name, tool_timeout)
419
-
419
+
420
420
  # Enhanced connection check with recovery attempt
421
421
  if not self.is_connected():
422
422
  logger.warning("Connection unhealthy, attempting recovery...")
423
423
  if not await self._attempt_recovery():
424
424
  if self.enable_metrics:
425
425
  self._update_metrics(time.time() - start_time, False)
426
- return {
427
- "isError": True,
428
- "error": "Failed to recover connection"
429
- }
430
-
426
+ return {"isError": True, "error": "Failed to recover connection"}
427
+
431
428
  response = await asyncio.wait_for(
432
- send_tools_call(*self._streams, tool_name, arguments),
433
- timeout=tool_timeout
429
+ send_tools_call(*self._streams, tool_name, arguments), timeout=tool_timeout
434
430
  )
435
-
431
+
436
432
  response_time = time.time() - start_time
437
433
  result = self._normalize_mcp_response(response)
438
-
434
+
439
435
  # Reset failure count and update health on success
440
436
  self._consecutive_failures = 0
441
437
  self._last_successful_ping = time.time()
442
-
438
+
443
439
  if self.enable_metrics:
444
440
  self._update_metrics(response_time, not result.get("isError", False))
445
-
441
+
446
442
  if not result.get("isError", False):
447
443
  logger.debug("Tool '%s' completed successfully in %.3fs", tool_name, response_time)
448
444
  else:
449
- logger.warning("Tool '%s' failed in %.3fs: %s", tool_name, response_time,
450
- result.get('error', 'Unknown error'))
451
-
445
+ logger.warning(
446
+ "Tool '%s' failed in %.3fs: %s", tool_name, response_time, result.get("error", "Unknown error")
447
+ )
448
+
452
449
  return result
453
-
454
- except asyncio.TimeoutError:
450
+
451
+ except TimeoutError:
455
452
  response_time = time.time() - start_time
456
453
  self._consecutive_failures += 1
457
454
  if self.enable_metrics:
458
455
  self._update_metrics(response_time, False)
459
-
456
+
460
457
  error_msg = f"Tool execution timed out after {tool_timeout}s"
461
458
  logger.error("Tool '%s' %s", tool_name, error_msg)
462
- return {
463
- "isError": True,
464
- "error": error_msg
465
- }
459
+ return {"isError": True, "error": error_msg}
466
460
  except Exception as e:
467
461
  response_time = time.time() - start_time
468
462
  self._consecutive_failures += 1
469
463
  if self.enable_metrics:
470
464
  self._update_metrics(response_time, False)
471
465
  self._metrics["pipe_errors"] += 1
472
-
466
+
473
467
  # Enhanced process error detection
474
468
  error_str = str(e).lower()
475
- if any(indicator in error_str for indicator in
476
- ["broken pipe", "process", "eof", "connection", "died"]):
469
+ if any(indicator in error_str for indicator in ["broken pipe", "process", "eof", "connection", "died"]):
477
470
  logger.warning("Process error detected: %s", e)
478
471
  self._initialized = False
479
472
  if self.enable_metrics:
480
473
  self._metrics["process_crashes"] += 1
481
-
474
+
482
475
  error_msg = f"Tool execution failed: {str(e)}"
483
476
  logger.error("Tool '%s' error: %s", tool_name, error_msg)
484
- return {
485
- "isError": True,
486
- "error": error_msg
487
- }
477
+ return {"isError": True, "error": error_msg}
488
478
 
489
479
  def _update_metrics(self, response_time: float, success: bool) -> None:
490
480
  """Enhanced metrics tracking (like SSE)."""
@@ -492,18 +482,16 @@ class StdioTransport(MCPBaseTransport):
492
482
  self._metrics["successful_calls"] += 1
493
483
  else:
494
484
  self._metrics["failed_calls"] += 1
495
-
485
+
496
486
  self._metrics["total_time"] += response_time
497
487
  if self._metrics["total_calls"] > 0:
498
- self._metrics["avg_response_time"] = (
499
- self._metrics["total_time"] / self._metrics["total_calls"]
500
- )
488
+ self._metrics["avg_response_time"] = self._metrics["total_time"] / self._metrics["total_calls"]
501
489
 
502
- def _normalize_mcp_response(self, response: Dict[str, Any]) -> Dict[str, Any]:
490
+ def _normalize_mcp_response(self, response: dict[str, Any]) -> dict[str, Any]:
503
491
  """
504
492
  Enhanced response normalization with STDIO-specific handling.
505
-
506
- STDIO preserves string representations of numeric values for
493
+
494
+ STDIO preserves string representations of numeric values for
507
495
  backward compatibility with existing tests.
508
496
  """
509
497
  # Handle explicit error in response
@@ -511,63 +499,62 @@ class StdioTransport(MCPBaseTransport):
511
499
  error_info = response["error"]
512
500
  error_msg = error_info.get("message", str(error_info)) if isinstance(error_info, dict) else str(error_info)
513
501
  return {"isError": True, "error": error_msg}
514
-
502
+
515
503
  # Handle successful response with result
516
504
  if "result" in response:
517
505
  result = response["result"]
518
506
  if isinstance(result, dict) and "content" in result:
519
507
  return {"isError": False, "content": self._extract_stdio_content(result["content"])}
520
508
  return {"isError": False, "content": result}
521
-
509
+
522
510
  # Handle direct content-based response
523
511
  if "content" in response:
524
512
  return {"isError": False, "content": self._extract_stdio_content(response["content"])}
525
-
513
+
526
514
  return {"isError": False, "content": response}
527
515
 
528
516
  def _extract_stdio_content(self, content_list: Any) -> Any:
529
517
  """
530
518
  Enhanced content extraction with STDIO-specific string preservation.
531
-
519
+
532
520
  STDIO transport preserves string representations of numeric values
533
521
  for backward compatibility with existing tests.
534
522
  """
535
523
  if not isinstance(content_list, list) or not content_list:
536
524
  return content_list
537
-
525
+
538
526
  if len(content_list) == 1:
539
527
  item = content_list[0]
540
528
  if isinstance(item, dict) and item.get("type") == "text":
541
529
  text = item.get("text", "")
542
-
530
+
543
531
  # STDIO-specific: preserve string format for numeric values
544
532
  try:
545
533
  parsed = json.loads(text)
546
534
  # If the parsed result is a simple type and the original was a string,
547
535
  # keep it as a string to maintain compatibility
548
- if isinstance(parsed, (int, float, bool)) and isinstance(text, str):
549
- # Check if this looks like a simple numeric string
550
- if text.strip().isdigit() or (text.strip().replace('.', '', 1).isdigit()):
551
- return text # Return as string for numeric values
536
+ if (
537
+ isinstance(parsed, int | float | bool)
538
+ and isinstance(text, str)
539
+ and (text.strip().isdigit() or text.strip().replace(".", "", 1).isdigit())
540
+ ):
541
+ return text # Return as string for numeric values
552
542
  return parsed
553
543
  except json.JSONDecodeError:
554
544
  return text
555
545
  return item
556
-
546
+
557
547
  return content_list
558
548
 
559
- async def list_resources(self) -> Dict[str, Any]:
549
+ async def list_resources(self) -> dict[str, Any]:
560
550
  """Enhanced resource listing with error handling."""
561
551
  if not self._initialized:
562
552
  return {}
563
553
  try:
564
- response = await asyncio.wait_for(
565
- send_resources_list(*self._streams),
566
- timeout=self.default_timeout
567
- )
554
+ response = await asyncio.wait_for(send_resources_list(*self._streams), timeout=self.default_timeout)
568
555
  self._consecutive_failures = 0 # Reset on success
569
556
  return response if isinstance(response, dict) else {}
570
- except asyncio.TimeoutError:
557
+ except TimeoutError:
571
558
  logger.error("List resources timed out")
572
559
  self._consecutive_failures += 1
573
560
  return {}
@@ -576,18 +563,15 @@ class StdioTransport(MCPBaseTransport):
576
563
  self._consecutive_failures += 1
577
564
  return {}
578
565
 
579
- async def list_prompts(self) -> Dict[str, Any]:
566
+ async def list_prompts(self) -> dict[str, Any]:
580
567
  """Enhanced prompt listing with error handling."""
581
568
  if not self._initialized:
582
569
  return {}
583
570
  try:
584
- response = await asyncio.wait_for(
585
- send_prompts_list(*self._streams),
586
- timeout=self.default_timeout
587
- )
571
+ response = await asyncio.wait_for(send_prompts_list(*self._streams), timeout=self.default_timeout)
588
572
  self._consecutive_failures = 0 # Reset on success
589
573
  return response if isinstance(response, dict) else {}
590
- except asyncio.TimeoutError:
574
+ except TimeoutError:
591
575
  logger.error("List prompts timed out")
592
576
  self._consecutive_failures += 1
593
577
  return {}
@@ -596,18 +580,15 @@ class StdioTransport(MCPBaseTransport):
596
580
  self._consecutive_failures += 1
597
581
  return {}
598
582
 
599
- async def read_resource(self, uri: str) -> Dict[str, Any]:
583
+ async def read_resource(self, uri: str) -> dict[str, Any]:
600
584
  """Read a specific resource."""
601
585
  if not self._initialized:
602
586
  return {}
603
587
  try:
604
- response = await asyncio.wait_for(
605
- send_resources_read(*self._streams, uri),
606
- timeout=self.default_timeout
607
- )
588
+ response = await asyncio.wait_for(send_resources_read(*self._streams, uri), timeout=self.default_timeout)
608
589
  self._consecutive_failures = 0 # Reset on success
609
590
  return response if isinstance(response, dict) else {}
610
- except asyncio.TimeoutError:
591
+ except TimeoutError:
611
592
  logger.error("Read resource timed out")
612
593
  self._consecutive_failures += 1
613
594
  return {}
@@ -616,18 +597,17 @@ class StdioTransport(MCPBaseTransport):
616
597
  self._consecutive_failures += 1
617
598
  return {}
618
599
 
619
- async def get_prompt(self, name: str, arguments: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
600
+ async def get_prompt(self, name: str, arguments: dict[str, Any] | None = None) -> dict[str, Any]:
620
601
  """Get a specific prompt."""
621
602
  if not self._initialized:
622
603
  return {}
623
604
  try:
624
605
  response = await asyncio.wait_for(
625
- send_prompts_get(*self._streams, name, arguments or {}),
626
- timeout=self.default_timeout
606
+ send_prompts_get(*self._streams, name, arguments or {}), timeout=self.default_timeout
627
607
  )
628
608
  self._consecutive_failures = 0 # Reset on success
629
609
  return response if isinstance(response, dict) else {}
630
- except asyncio.TimeoutError:
610
+ except TimeoutError:
631
611
  logger.error("Get prompt timed out")
632
612
  self._consecutive_failures += 1
633
613
  return {}
@@ -636,17 +616,19 @@ class StdioTransport(MCPBaseTransport):
636
616
  self._consecutive_failures += 1
637
617
  return {}
638
618
 
639
- def get_metrics(self) -> Dict[str, Any]:
619
+ def get_metrics(self) -> dict[str, Any]:
640
620
  """Enhanced metrics with process and health information."""
641
621
  metrics = self._metrics.copy()
642
- metrics.update({
643
- "is_connected": self.is_connected(),
644
- "consecutive_failures": self._consecutive_failures,
645
- "last_successful_ping": self._last_successful_ping,
646
- "max_consecutive_failures": self._max_consecutive_failures,
647
- "process_id": self._process_id,
648
- "process_uptime": (time.time() - self._process_start_time) if self._process_start_time else 0,
649
- })
622
+ metrics.update(
623
+ {
624
+ "is_connected": self.is_connected(),
625
+ "consecutive_failures": self._consecutive_failures,
626
+ "last_successful_ping": self._last_successful_ping,
627
+ "max_consecutive_failures": self._max_consecutive_failures,
628
+ "process_id": self._process_id,
629
+ "process_uptime": (time.time() - self._process_start_time) if self._process_start_time else 0,
630
+ }
631
+ )
650
632
  return metrics
651
633
 
652
634
  def reset_metrics(self) -> None:
@@ -654,7 +636,7 @@ class StdioTransport(MCPBaseTransport):
654
636
  preserved_init_time = self._metrics.get("initialization_time")
655
637
  preserved_last_ping = self._metrics.get("last_ping_time")
656
638
  preserved_restarts = self._metrics.get("process_restarts", 0)
657
-
639
+
658
640
  self._metrics = {
659
641
  "total_calls": 0,
660
642
  "successful_calls": 0,
@@ -671,7 +653,7 @@ class StdioTransport(MCPBaseTransport):
671
653
  "cpu_percent": 0.0,
672
654
  }
673
655
 
674
- def get_streams(self) -> List[tuple]:
656
+ def get_streams(self) -> list[tuple]:
675
657
  """Enhanced streams access with connection check."""
676
658
  return [self._streams] if self._streams else []
677
659
 
@@ -684,4 +666,4 @@ class StdioTransport(MCPBaseTransport):
684
666
 
685
667
  async def __aexit__(self, exc_type, exc_val, exc_tb):
686
668
  """Enhanced context manager cleanup."""
687
- await self.close()
669
+ await self.close()