chuk-tool-processor 0.5.4__py3-none-any.whl → 0.6.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

@@ -1,11 +1,12 @@
1
1
  # chuk_tool_processor/mcp/stream_manager.py
2
2
  """
3
- StreamManager for CHUK Tool Processor - Updated with HTTP Streamable support
3
+ StreamManager for CHUK Tool Processor - Enhanced with robust shutdown handling
4
4
  """
5
5
  from __future__ import annotations
6
6
 
7
7
  import asyncio
8
8
  from typing import Any, Dict, List, Optional, Tuple
9
+ from contextlib import asynccontextmanager
9
10
 
10
11
  # --------------------------------------------------------------------------- #
11
12
  # CHUK imports #
@@ -26,6 +27,8 @@ class StreamManager:
26
27
  """
27
28
  Manager for MCP server streams with support for multiple transport types.
28
29
 
30
+ Enhanced with robust shutdown handling to prevent event loop closure issues.
31
+
29
32
  Updated to support the latest transports:
30
33
  - STDIO (process-based)
31
34
  - SSE (Server-Sent Events)
@@ -39,9 +42,11 @@ class StreamManager:
39
42
  self.server_names: Dict[int, str] = {}
40
43
  self.all_tools: List[Dict[str, Any]] = []
41
44
  self._lock = asyncio.Lock()
45
+ self._closed = False # Track if we've been closed
46
+ self._shutdown_timeout = 2.0 # Maximum time to spend on shutdown
42
47
 
43
48
  # ------------------------------------------------------------------ #
44
- # factory helpers #
49
+ # factory helpers with enhanced error handling #
45
50
  # ------------------------------------------------------------------ #
46
51
  @classmethod
47
52
  async def create(
@@ -51,16 +56,25 @@ class StreamManager:
51
56
  server_names: Optional[Dict[int, str]] = None,
52
57
  transport_type: str = "stdio",
53
58
  default_timeout: float = 30.0,
59
+ initialization_timeout: float = 60.0, # NEW: Timeout for entire initialization
54
60
  ) -> "StreamManager":
55
- inst = cls()
56
- await inst.initialize(
57
- config_file,
58
- servers,
59
- server_names,
60
- transport_type,
61
- default_timeout=default_timeout
62
- )
63
- return inst
61
+ """Create StreamManager with timeout protection."""
62
+ try:
63
+ inst = cls()
64
+ await asyncio.wait_for(
65
+ inst.initialize(
66
+ config_file,
67
+ servers,
68
+ server_names,
69
+ transport_type,
70
+ default_timeout=default_timeout
71
+ ),
72
+ timeout=initialization_timeout
73
+ )
74
+ return inst
75
+ except asyncio.TimeoutError:
76
+ logger.error(f"StreamManager initialization timed out after {initialization_timeout}s")
77
+ raise RuntimeError(f"StreamManager initialization timed out after {initialization_timeout}s")
64
78
 
65
79
  @classmethod
66
80
  async def create_with_sse(
@@ -69,15 +83,24 @@ class StreamManager:
69
83
  server_names: Optional[Dict[int, str]] = None,
70
84
  connection_timeout: float = 10.0,
71
85
  default_timeout: float = 30.0,
86
+ initialization_timeout: float = 60.0, # NEW
72
87
  ) -> "StreamManager":
73
- inst = cls()
74
- await inst.initialize_with_sse(
75
- servers,
76
- server_names,
77
- connection_timeout=connection_timeout,
78
- default_timeout=default_timeout
79
- )
80
- return inst
88
+ """Create StreamManager with SSE transport and timeout protection."""
89
+ try:
90
+ inst = cls()
91
+ await asyncio.wait_for(
92
+ inst.initialize_with_sse(
93
+ servers,
94
+ server_names,
95
+ connection_timeout=connection_timeout,
96
+ default_timeout=default_timeout
97
+ ),
98
+ timeout=initialization_timeout
99
+ )
100
+ return inst
101
+ except asyncio.TimeoutError:
102
+ logger.error(f"SSE StreamManager initialization timed out after {initialization_timeout}s")
103
+ raise RuntimeError(f"SSE StreamManager initialization timed out after {initialization_timeout}s")
81
104
 
82
105
  @classmethod
83
106
  async def create_with_http_streamable(
@@ -86,16 +109,60 @@ class StreamManager:
86
109
  server_names: Optional[Dict[int, str]] = None,
87
110
  connection_timeout: float = 30.0,
88
111
  default_timeout: float = 30.0,
112
+ initialization_timeout: float = 60.0, # NEW
89
113
  ) -> "StreamManager":
90
- """Create StreamManager with HTTP Streamable transport."""
91
- inst = cls()
92
- await inst.initialize_with_http_streamable(
93
- servers,
94
- server_names,
95
- connection_timeout=connection_timeout,
96
- default_timeout=default_timeout
97
- )
98
- return inst
114
+ """Create StreamManager with HTTP Streamable transport and timeout protection."""
115
+ try:
116
+ inst = cls()
117
+ await asyncio.wait_for(
118
+ inst.initialize_with_http_streamable(
119
+ servers,
120
+ server_names,
121
+ connection_timeout=connection_timeout,
122
+ default_timeout=default_timeout
123
+ ),
124
+ timeout=initialization_timeout
125
+ )
126
+ return inst
127
+ except asyncio.TimeoutError:
128
+ logger.error(f"HTTP Streamable StreamManager initialization timed out after {initialization_timeout}s")
129
+ raise RuntimeError(f"HTTP Streamable StreamManager initialization timed out after {initialization_timeout}s")
130
+
131
+ # ------------------------------------------------------------------ #
132
+ # NEW: Context manager support for automatic cleanup #
133
+ # ------------------------------------------------------------------ #
134
+ async def __aenter__(self):
135
+ """Context manager entry."""
136
+ return self
137
+
138
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
139
+ """Context manager exit with automatic cleanup."""
140
+ await self.close()
141
+
142
+ @classmethod
143
+ @asynccontextmanager
144
+ async def create_managed(
145
+ cls,
146
+ config_file: str,
147
+ servers: List[str],
148
+ server_names: Optional[Dict[int, str]] = None,
149
+ transport_type: str = "stdio",
150
+ default_timeout: float = 30.0,
151
+ ):
152
+ """Context manager factory for automatic cleanup."""
153
+ stream_manager = None
154
+ try:
155
+ stream_manager = await cls.create(
156
+ config_file=config_file,
157
+ servers=servers,
158
+ server_names=server_names,
159
+ transport_type=transport_type,
160
+ default_timeout=default_timeout,
161
+ )
162
+ yield stream_manager
163
+ finally:
164
+ if stream_manager:
165
+ await stream_manager.close()
99
166
 
100
167
  # ------------------------------------------------------------------ #
101
168
  # initialisation - stdio / sse / http_streamable #
@@ -108,6 +175,9 @@ class StreamManager:
108
175
  transport_type: str = "stdio",
109
176
  default_timeout: float = 30.0,
110
177
  ) -> None:
178
+ if self._closed:
179
+ raise RuntimeError("Cannot initialize a closed StreamManager")
180
+
111
181
  async with self._lock:
112
182
  self.server_names = server_names or {}
113
183
 
@@ -157,14 +227,16 @@ class StreamManager:
157
227
  logger.error("Unsupported transport type: %s", transport_type)
158
228
  continue
159
229
 
160
- if not await transport.initialize():
230
+ # Initialize with timeout protection
231
+ if not await asyncio.wait_for(transport.initialize(), timeout=default_timeout):
161
232
  logger.error("Failed to init %s", server_name)
162
233
  continue
163
234
 
164
235
  self.transports[server_name] = transport
165
236
 
166
- status = "Up" if await transport.send_ping() else "Down"
167
- tools = await transport.get_tools()
237
+ # Ping and get tools with timeout protection
238
+ status = "Up" if await asyncio.wait_for(transport.send_ping(), timeout=5.0) else "Down"
239
+ tools = await asyncio.wait_for(transport.get_tools(), timeout=10.0)
168
240
 
169
241
  for t in tools:
170
242
  name = t.get("name")
@@ -181,6 +253,8 @@ class StreamManager:
181
253
  }
182
254
  )
183
255
  logger.info("Initialised %s - %d tool(s)", server_name, len(tools))
256
+ except asyncio.TimeoutError:
257
+ logger.error("Timeout initialising %s", server_name)
184
258
  except Exception as exc:
185
259
  logger.error("Error initialising %s: %s", server_name, exc)
186
260
 
@@ -197,6 +271,9 @@ class StreamManager:
197
271
  connection_timeout: float = 10.0,
198
272
  default_timeout: float = 30.0,
199
273
  ) -> None:
274
+ if self._closed:
275
+ raise RuntimeError("Cannot initialize a closed StreamManager")
276
+
200
277
  async with self._lock:
201
278
  self.server_names = server_names or {}
202
279
 
@@ -213,13 +290,13 @@ class StreamManager:
213
290
  default_timeout=default_timeout
214
291
  )
215
292
 
216
- if not await transport.initialize():
293
+ if not await asyncio.wait_for(transport.initialize(), timeout=connection_timeout):
217
294
  logger.error("Failed to init SSE %s", name)
218
295
  continue
219
296
 
220
297
  self.transports[name] = transport
221
- status = "Up" if await transport.send_ping() else "Down"
222
- tools = await transport.get_tools()
298
+ status = "Up" if await asyncio.wait_for(transport.send_ping(), timeout=5.0) else "Down"
299
+ tools = await asyncio.wait_for(transport.get_tools(), timeout=10.0)
223
300
 
224
301
  for t in tools:
225
302
  tname = t.get("name")
@@ -231,6 +308,8 @@ class StreamManager:
231
308
  {"id": idx, "name": name, "tools": len(tools), "status": status}
232
309
  )
233
310
  logger.info("Initialised SSE %s - %d tool(s)", name, len(tools))
311
+ except asyncio.TimeoutError:
312
+ logger.error("Timeout initialising SSE %s", name)
234
313
  except Exception as exc:
235
314
  logger.error("Error initialising SSE %s: %s", name, exc)
236
315
 
@@ -248,6 +327,9 @@ class StreamManager:
248
327
  default_timeout: float = 30.0,
249
328
  ) -> None:
250
329
  """Initialize with HTTP Streamable transport (modern MCP spec 2025-03-26)."""
330
+ if self._closed:
331
+ raise RuntimeError("Cannot initialize a closed StreamManager")
332
+
251
333
  async with self._lock:
252
334
  self.server_names = server_names or {}
253
335
 
@@ -265,13 +347,13 @@ class StreamManager:
265
347
  session_id=cfg.get("session_id")
266
348
  )
267
349
 
268
- if not await transport.initialize():
350
+ if not await asyncio.wait_for(transport.initialize(), timeout=connection_timeout):
269
351
  logger.error("Failed to init HTTP Streamable %s", name)
270
352
  continue
271
353
 
272
354
  self.transports[name] = transport
273
- status = "Up" if await transport.send_ping() else "Down"
274
- tools = await transport.get_tools()
355
+ status = "Up" if await asyncio.wait_for(transport.send_ping(), timeout=5.0) else "Down"
356
+ tools = await asyncio.wait_for(transport.get_tools(), timeout=10.0)
275
357
 
276
358
  for t in tools:
277
359
  tname = t.get("name")
@@ -283,6 +365,8 @@ class StreamManager:
283
365
  {"id": idx, "name": name, "tools": len(tools), "status": status}
284
366
  )
285
367
  logger.info("Initialised HTTP Streamable %s - %d tool(s)", name, len(tools))
368
+ except asyncio.TimeoutError:
369
+ logger.error("Timeout initialising HTTP Streamable %s", name)
286
370
  except Exception as exc:
287
371
  logger.error("Error initialising HTTP Streamable %s: %s", name, exc)
288
372
 
@@ -306,6 +390,10 @@ class StreamManager:
306
390
 
307
391
  async def list_tools(self, server_name: str) -> List[Dict[str, Any]]:
308
392
  """List all tools available from a specific server."""
393
+ if self._closed:
394
+ logger.warning("Cannot list tools: StreamManager is closed")
395
+ return []
396
+
309
397
  if server_name not in self.transports:
310
398
  logger.error(f"Server '{server_name}' not found in transports")
311
399
  return []
@@ -313,9 +401,12 @@ class StreamManager:
313
401
  transport = self.transports[server_name]
314
402
 
315
403
  try:
316
- tools = await transport.get_tools()
404
+ tools = await asyncio.wait_for(transport.get_tools(), timeout=10.0)
317
405
  logger.debug(f"Found {len(tools)} tools for server {server_name}")
318
406
  return tools
407
+ except asyncio.TimeoutError:
408
+ logger.error(f"Timeout listing tools for server {server_name}")
409
+ return []
319
410
  except Exception as e:
320
411
  logger.error(f"Error listing tools for server {server_name}: {e}")
321
412
  return []
@@ -324,21 +415,27 @@ class StreamManager:
324
415
  # EXTRA HELPERS - ping / resources / prompts #
325
416
  # ------------------------------------------------------------------ #
326
417
  async def ping_servers(self) -> List[Dict[str, Any]]:
418
+ if self._closed:
419
+ return []
420
+
327
421
  async def _ping_one(name: str, tr: MCPBaseTransport):
328
422
  try:
329
- ok = await tr.send_ping()
423
+ ok = await asyncio.wait_for(tr.send_ping(), timeout=5.0)
330
424
  except Exception:
331
425
  ok = False
332
426
  return {"server": name, "ok": ok}
333
427
 
334
- return await asyncio.gather(*(_ping_one(n, t) for n, t in self.transports.items()))
428
+ return await asyncio.gather(*(_ping_one(n, t) for n, t in self.transports.items()), return_exceptions=True)
335
429
 
336
430
  async def list_resources(self) -> List[Dict[str, Any]]:
431
+ if self._closed:
432
+ return []
433
+
337
434
  out: List[Dict[str, Any]] = []
338
435
 
339
436
  async def _one(name: str, tr: MCPBaseTransport):
340
437
  try:
341
- res = await tr.list_resources()
438
+ res = await asyncio.wait_for(tr.list_resources(), timeout=10.0)
342
439
  resources = (
343
440
  res.get("resources", []) if isinstance(res, dict) else res
344
441
  )
@@ -349,15 +446,18 @@ class StreamManager:
349
446
  except Exception as exc:
350
447
  logger.debug("resources/list failed for %s: %s", name, exc)
351
448
 
352
- await asyncio.gather(*(_one(n, t) for n, t in self.transports.items()))
449
+ await asyncio.gather(*(_one(n, t) for n, t in self.transports.items()), return_exceptions=True)
353
450
  return out
354
451
 
355
452
  async def list_prompts(self) -> List[Dict[str, Any]]:
453
+ if self._closed:
454
+ return []
455
+
356
456
  out: List[Dict[str, Any]] = []
357
457
 
358
458
  async def _one(name: str, tr: MCPBaseTransport):
359
459
  try:
360
- res = await tr.list_prompts()
460
+ res = await asyncio.wait_for(tr.list_prompts(), timeout=10.0)
361
461
  prompts = res.get("prompts", []) if isinstance(res, dict) else res
362
462
  for item in prompts:
363
463
  item = dict(item)
@@ -366,7 +466,7 @@ class StreamManager:
366
466
  except Exception as exc:
367
467
  logger.debug("prompts/list failed for %s: %s", name, exc)
368
468
 
369
- await asyncio.gather(*(_one(n, t) for n, t in self.transports.items()))
469
+ await asyncio.gather(*(_one(n, t) for n, t in self.transports.items()), return_exceptions=True)
370
470
  return out
371
471
 
372
472
  # ------------------------------------------------------------------ #
@@ -380,6 +480,12 @@ class StreamManager:
380
480
  timeout: Optional[float] = None,
381
481
  ) -> Dict[str, Any]:
382
482
  """Call a tool on the appropriate server with timeout support."""
483
+ if self._closed:
484
+ return {
485
+ "isError": True,
486
+ "error": "StreamManager is closed",
487
+ }
488
+
383
489
  server_name = server_name or self.get_server_for_tool(tool_name)
384
490
  if not server_name or server_name not in self.transports:
385
491
  return {
@@ -417,44 +523,146 @@ class StreamManager:
417
523
  return await transport.call_tool(tool_name, arguments)
418
524
 
419
525
  # ------------------------------------------------------------------ #
420
- # shutdown - FIXED VERSION to prevent cancel scope errors #
526
+ # ENHANCED shutdown with robust error handling #
421
527
  # ------------------------------------------------------------------ #
422
528
  async def close(self) -> None:
423
- """Close all transports safely without cancel scope errors."""
529
+ """
530
+ Close all transports safely with enhanced error handling.
531
+
532
+ ENHANCED: Uses asyncio.shield() to protect critical cleanup and
533
+ provides multiple fallback strategies for different failure modes.
534
+ """
535
+ if self._closed:
536
+ logger.debug("StreamManager already closed")
537
+ return
538
+
424
539
  if not self.transports:
425
540
  logger.debug("No transports to close")
541
+ self._closed = True
426
542
  return
427
543
 
428
544
  logger.debug(f"Closing {len(self.transports)} transports...")
429
545
 
430
- # Strategy: Close transports sequentially with short timeouts
546
+ try:
547
+ # Use shield to protect the cleanup operation from cancellation
548
+ await asyncio.shield(self._do_close_all_transports())
549
+ except asyncio.CancelledError:
550
+ # If shield fails (rare), fall back to synchronous cleanup
551
+ logger.debug("Close operation cancelled, performing synchronous cleanup")
552
+ self._sync_cleanup()
553
+ except Exception as e:
554
+ logger.debug(f"Error during close: {e}")
555
+ self._sync_cleanup()
556
+ finally:
557
+ self._closed = True
558
+
559
+ async def _do_close_all_transports(self) -> None:
560
+ """Protected cleanup implementation with multiple strategies."""
431
561
  close_results = []
432
562
  transport_items = list(self.transports.items())
433
563
 
564
+ # Strategy 1: Try concurrent close with timeout
565
+ try:
566
+ await self._concurrent_close(transport_items, close_results)
567
+ except Exception as e:
568
+ logger.debug(f"Concurrent close failed: {e}, falling back to sequential close")
569
+ # Strategy 2: Fall back to sequential close
570
+ await self._sequential_close(transport_items, close_results)
571
+
572
+ # Always clean up state
573
+ self._cleanup_state()
574
+
575
+ # Log summary
576
+ if close_results:
577
+ successful_closes = sum(1 for _, success, _ in close_results if success)
578
+ logger.debug(f"Transport cleanup: {successful_closes}/{len(close_results)} closed successfully")
579
+
580
+ async def _concurrent_close(self, transport_items: List[Tuple[str, MCPBaseTransport]], close_results: List) -> None:
581
+ """Try to close all transports concurrently."""
582
+ close_tasks = []
434
583
  for name, transport in transport_items:
584
+ task = asyncio.create_task(
585
+ self._close_single_transport(name, transport),
586
+ name=f"close_{name}"
587
+ )
588
+ close_tasks.append((name, task))
589
+
590
+ # Wait for all tasks with a reasonable timeout
591
+ if close_tasks:
435
592
  try:
593
+ results = await asyncio.wait_for(
594
+ asyncio.gather(
595
+ *[task for _, task in close_tasks],
596
+ return_exceptions=True
597
+ ),
598
+ timeout=self._shutdown_timeout
599
+ )
600
+
601
+ # Process results
602
+ for i, (name, _) in enumerate(close_tasks):
603
+ result = results[i] if i < len(results) else None
604
+ if isinstance(result, Exception):
605
+ logger.debug(f"Transport {name} close failed: {result}")
606
+ close_results.append((name, False, str(result)))
607
+ else:
608
+ logger.debug(f"Transport {name} closed successfully")
609
+ close_results.append((name, True, None))
610
+
611
+ except asyncio.TimeoutError:
612
+ # Cancel any remaining tasks
613
+ for name, task in close_tasks:
614
+ if not task.done():
615
+ task.cancel()
616
+ close_results.append((name, False, "timeout"))
617
+
618
+ # Brief wait for cancellations to complete
436
619
  try:
437
- await asyncio.wait_for(transport.close(), timeout=0.2)
438
- logger.debug(f"Closed transport: {name}")
439
- close_results.append((name, True, None))
620
+ await asyncio.wait_for(
621
+ asyncio.gather(*[task for _, task in close_tasks], return_exceptions=True),
622
+ timeout=0.5
623
+ )
440
624
  except asyncio.TimeoutError:
441
- logger.debug(f"Transport {name} close timed out (normal during shutdown)")
442
- close_results.append((name, False, "timeout"))
443
- except asyncio.CancelledError:
444
- logger.debug(f"Transport {name} close cancelled during event loop shutdown")
445
- close_results.append((name, False, "cancelled"))
446
-
625
+ pass # Some tasks may not cancel cleanly
626
+
627
+ async def _sequential_close(self, transport_items: List[Tuple[str, MCPBaseTransport]], close_results: List) -> None:
628
+ """Close transports one by one as fallback."""
629
+ for name, transport in transport_items:
630
+ try:
631
+ await asyncio.wait_for(
632
+ self._close_single_transport(name, transport),
633
+ timeout=0.5 # Short timeout per transport
634
+ )
635
+ logger.debug(f"Closed transport: {name}")
636
+ close_results.append((name, True, None))
637
+ except asyncio.TimeoutError:
638
+ logger.debug(f"Transport {name} close timed out (normal during shutdown)")
639
+ close_results.append((name, False, "timeout"))
640
+ except asyncio.CancelledError:
641
+ logger.debug(f"Transport {name} close cancelled during event loop shutdown")
642
+ close_results.append((name, False, "cancelled"))
447
643
  except Exception as e:
448
644
  logger.debug(f"Error closing transport {name}: {e}")
449
645
  close_results.append((name, False, str(e)))
450
-
451
- # Clean up state
452
- self._cleanup_state()
453
-
454
- # Log summary
455
- successful_closes = sum(1 for _, success, _ in close_results if success)
456
- if close_results:
457
- logger.debug(f"Transport cleanup: {successful_closes}/{len(close_results)} closed successfully")
646
+
647
+ async def _close_single_transport(self, name: str, transport: MCPBaseTransport) -> None:
648
+ """Close a single transport with error handling."""
649
+ try:
650
+ if hasattr(transport, 'close') and callable(transport.close):
651
+ await transport.close()
652
+ else:
653
+ logger.debug(f"Transport {name} has no close method")
654
+ except Exception as e:
655
+ logger.debug(f"Error closing transport {name}: {e}")
656
+ raise
657
+
658
+ def _sync_cleanup(self) -> None:
659
+ """Synchronous cleanup for use when async cleanup fails."""
660
+ try:
661
+ transport_count = len(self.transports)
662
+ self._cleanup_state()
663
+ logger.debug(f"Synchronous cleanup completed for {transport_count} transports")
664
+ except Exception as e:
665
+ logger.debug(f"Error during synchronous cleanup: {e}")
458
666
 
459
667
  def _cleanup_state(self) -> None:
460
668
  """Clean up internal state synchronously."""
@@ -472,6 +680,9 @@ class StreamManager:
472
680
  # ------------------------------------------------------------------ #
473
681
  def get_streams(self) -> List[Tuple[Any, Any]]:
474
682
  """Return a list of (read_stream, write_stream) tuples for all transports."""
683
+ if self._closed:
684
+ return []
685
+
475
686
  pairs: List[Tuple[Any, Any]] = []
476
687
 
477
688
  for tr in self.transports.values():
@@ -489,4 +700,47 @@ class StreamManager:
489
700
  @property
490
701
  def streams(self) -> List[Tuple[Any, Any]]:
491
702
  """Convenience alias for get_streams()."""
492
- return self.get_streams()
703
+ return self.get_streams()
704
+
705
+ # ------------------------------------------------------------------ #
706
+ # NEW: Health check and diagnostic methods #
707
+ # ------------------------------------------------------------------ #
708
+ def is_closed(self) -> bool:
709
+ """Check if the StreamManager has been closed."""
710
+ return self._closed
711
+
712
+ def get_transport_count(self) -> int:
713
+ """Get the number of active transports."""
714
+ return len(self.transports)
715
+
716
+ async def health_check(self) -> Dict[str, Any]:
717
+ """Perform a health check on all transports."""
718
+ if self._closed:
719
+ return {"status": "closed", "transports": {}}
720
+
721
+ health_info = {
722
+ "status": "active",
723
+ "transport_count": len(self.transports),
724
+ "transports": {}
725
+ }
726
+
727
+ for name, transport in self.transports.items():
728
+ try:
729
+ ping_ok = await asyncio.wait_for(transport.send_ping(), timeout=5.0)
730
+ health_info["transports"][name] = {
731
+ "status": "healthy" if ping_ok else "unhealthy",
732
+ "ping_success": ping_ok
733
+ }
734
+ except asyncio.TimeoutError:
735
+ health_info["transports"][name] = {
736
+ "status": "timeout",
737
+ "ping_success": False
738
+ }
739
+ except Exception as e:
740
+ health_info["transports"][name] = {
741
+ "status": "error",
742
+ "ping_success": False,
743
+ "error": str(e)
744
+ }
745
+
746
+ return health_info
@@ -1,16 +1,39 @@
1
1
  # chuk_tool_processor/mcp/transport/__init__.py
2
2
  """
3
- MCP transport implementations.
3
+ MCP Transport module providing multiple transport implementations.
4
4
  """
5
5
 
6
6
  from .base_transport import MCPBaseTransport
7
- from .stdio_transport import StdioTransport
8
- from .sse_transport import SSETransport
9
- from .http_streamable_transport import HTTPStreamableTransport
7
+
8
+ # Always available transports
9
+ try:
10
+ from .stdio_transport import StdioTransport
11
+ HAS_STDIO_TRANSPORT = True
12
+ except ImportError:
13
+ StdioTransport = None
14
+ HAS_STDIO_TRANSPORT = False
15
+
16
+ # Conditionally available transports
17
+ try:
18
+ from .sse_transport import SSETransport
19
+ HAS_SSE_TRANSPORT = True
20
+ except ImportError:
21
+ SSETransport = None
22
+ HAS_SSE_TRANSPORT = False
23
+
24
+ try:
25
+ from .http_streamable_transport import HTTPStreamableTransport
26
+ HAS_HTTP_STREAMABLE_TRANSPORT = True
27
+ except ImportError:
28
+ HTTPStreamableTransport = None
29
+ HAS_HTTP_STREAMABLE_TRANSPORT = False
10
30
 
11
31
  __all__ = [
12
32
  "MCPBaseTransport",
13
- "StdioTransport",
14
- "SSETransport",
15
- "HTTPStreamableTransport"
33
+ "StdioTransport",
34
+ "SSETransport",
35
+ "HTTPStreamableTransport",
36
+ "HAS_STDIO_TRANSPORT",
37
+ "HAS_SSE_TRANSPORT",
38
+ "HAS_HTTP_STREAMABLE_TRANSPORT"
16
39
  ]