chuk-tool-processor 0.6.4__py3-none-any.whl → 0.9.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of chuk-tool-processor might be problematic. Click here for more details.

Files changed (66) hide show
  1. chuk_tool_processor/core/__init__.py +32 -1
  2. chuk_tool_processor/core/exceptions.py +225 -13
  3. chuk_tool_processor/core/processor.py +135 -104
  4. chuk_tool_processor/execution/strategies/__init__.py +6 -0
  5. chuk_tool_processor/execution/strategies/inprocess_strategy.py +142 -150
  6. chuk_tool_processor/execution/strategies/subprocess_strategy.py +202 -206
  7. chuk_tool_processor/execution/tool_executor.py +82 -84
  8. chuk_tool_processor/execution/wrappers/__init__.py +42 -0
  9. chuk_tool_processor/execution/wrappers/caching.py +150 -116
  10. chuk_tool_processor/execution/wrappers/circuit_breaker.py +370 -0
  11. chuk_tool_processor/execution/wrappers/rate_limiting.py +76 -43
  12. chuk_tool_processor/execution/wrappers/retry.py +116 -78
  13. chuk_tool_processor/logging/__init__.py +23 -17
  14. chuk_tool_processor/logging/context.py +40 -45
  15. chuk_tool_processor/logging/formatter.py +22 -21
  16. chuk_tool_processor/logging/helpers.py +28 -42
  17. chuk_tool_processor/logging/metrics.py +13 -15
  18. chuk_tool_processor/mcp/__init__.py +8 -12
  19. chuk_tool_processor/mcp/mcp_tool.py +158 -114
  20. chuk_tool_processor/mcp/register_mcp_tools.py +22 -22
  21. chuk_tool_processor/mcp/setup_mcp_http_streamable.py +57 -17
  22. chuk_tool_processor/mcp/setup_mcp_sse.py +57 -17
  23. chuk_tool_processor/mcp/setup_mcp_stdio.py +11 -11
  24. chuk_tool_processor/mcp/stream_manager.py +333 -276
  25. chuk_tool_processor/mcp/transport/__init__.py +22 -29
  26. chuk_tool_processor/mcp/transport/base_transport.py +180 -44
  27. chuk_tool_processor/mcp/transport/http_streamable_transport.py +505 -325
  28. chuk_tool_processor/mcp/transport/models.py +100 -0
  29. chuk_tool_processor/mcp/transport/sse_transport.py +607 -276
  30. chuk_tool_processor/mcp/transport/stdio_transport.py +597 -116
  31. chuk_tool_processor/models/__init__.py +21 -1
  32. chuk_tool_processor/models/execution_strategy.py +16 -21
  33. chuk_tool_processor/models/streaming_tool.py +28 -25
  34. chuk_tool_processor/models/tool_call.py +49 -31
  35. chuk_tool_processor/models/tool_export_mixin.py +22 -8
  36. chuk_tool_processor/models/tool_result.py +40 -77
  37. chuk_tool_processor/models/tool_spec.py +350 -0
  38. chuk_tool_processor/models/validated_tool.py +36 -18
  39. chuk_tool_processor/observability/__init__.py +30 -0
  40. chuk_tool_processor/observability/metrics.py +312 -0
  41. chuk_tool_processor/observability/setup.py +105 -0
  42. chuk_tool_processor/observability/tracing.py +345 -0
  43. chuk_tool_processor/plugins/__init__.py +1 -1
  44. chuk_tool_processor/plugins/discovery.py +11 -11
  45. chuk_tool_processor/plugins/parsers/__init__.py +1 -1
  46. chuk_tool_processor/plugins/parsers/base.py +1 -2
  47. chuk_tool_processor/plugins/parsers/function_call_tool.py +13 -8
  48. chuk_tool_processor/plugins/parsers/json_tool.py +4 -3
  49. chuk_tool_processor/plugins/parsers/openai_tool.py +12 -7
  50. chuk_tool_processor/plugins/parsers/xml_tool.py +4 -4
  51. chuk_tool_processor/registry/__init__.py +12 -12
  52. chuk_tool_processor/registry/auto_register.py +22 -30
  53. chuk_tool_processor/registry/decorators.py +127 -129
  54. chuk_tool_processor/registry/interface.py +26 -23
  55. chuk_tool_processor/registry/metadata.py +27 -22
  56. chuk_tool_processor/registry/provider.py +17 -18
  57. chuk_tool_processor/registry/providers/__init__.py +16 -19
  58. chuk_tool_processor/registry/providers/memory.py +18 -25
  59. chuk_tool_processor/registry/tool_export.py +42 -51
  60. chuk_tool_processor/utils/validation.py +15 -16
  61. chuk_tool_processor-0.9.7.dist-info/METADATA +1813 -0
  62. chuk_tool_processor-0.9.7.dist-info/RECORD +67 -0
  63. chuk_tool_processor-0.6.4.dist-info/METADATA +0 -697
  64. chuk_tool_processor-0.6.4.dist-info/RECORD +0 -60
  65. {chuk_tool_processor-0.6.4.dist-info → chuk_tool_processor-0.9.7.dist-info}/WHEEL +0 -0
  66. {chuk_tool_processor-0.6.4.dist-info → chuk_tool_processor-0.9.7.dist-info}/top_level.txt +0 -0
@@ -1,39 +1,30 @@
1
- # chuk_tool_processor/mcp/transport/http_streamable_transport.py
1
+ # chuk_tool_processor/mcp/transport/http_streamable_transport.py - ENHANCED
2
2
  from __future__ import annotations
3
3
 
4
4
  import asyncio
5
- import json
6
- import time
7
- from typing import Dict, Any, List, Optional
8
5
  import logging
9
-
10
- from .base_transport import MCPBaseTransport
6
+ import time
7
+ from typing import Any
8
+
9
+ from chuk_mcp.protocol.messages import ( # type: ignore[import-untyped]
10
+ send_initialize,
11
+ send_ping,
12
+ send_prompts_get,
13
+ send_prompts_list,
14
+ send_resources_list,
15
+ send_resources_read,
16
+ send_tools_call,
17
+ send_tools_list,
18
+ )
19
+ from chuk_mcp.transports.http.parameters import StreamableHTTPParameters # type: ignore[import-untyped]
11
20
 
12
21
  # Import chuk-mcp HTTP transport components
13
- try:
14
- from chuk_mcp.transports.http import http_client
15
- from chuk_mcp.transports.http.parameters import StreamableHTTPParameters
16
- from chuk_mcp.protocol.messages import (
17
- send_initialize,
18
- send_ping,
19
- send_tools_list,
20
- send_tools_call,
21
- )
22
- HAS_HTTP_SUPPORT = True
23
- except ImportError:
24
- HAS_HTTP_SUPPORT = False
25
-
26
- # Import optional resource and prompt support
27
- try:
28
- from chuk_mcp.protocol.messages import (
29
- send_resources_list,
30
- send_resources_read,
31
- send_prompts_list,
32
- send_prompts_get,
33
- )
34
- HAS_RESOURCES_PROMPTS = True
35
- except ImportError:
36
- HAS_RESOURCES_PROMPTS = False
22
+ from chuk_mcp.transports.http.transport import (
23
+ StreamableHTTPTransport as ChukHTTPTransport, # type: ignore[import-untyped]
24
+ )
25
+
26
+ from .base_transport import MCPBaseTransport
27
+ from .models import TimeoutConfig, TransportMetrics
37
28
 
38
29
  logger = logging.getLogger(__name__)
39
30
 
@@ -41,457 +32,646 @@ logger = logging.getLogger(__name__)
41
32
  class HTTPStreamableTransport(MCPBaseTransport):
42
33
  """
43
34
  HTTP Streamable transport using chuk-mcp HTTP client.
44
-
45
- This implements the modern MCP spec (2025-03-26) replacement for SSE transport.
46
- Follows the same patterns as SSETransport but uses HTTP requests instead of SSE.
35
+
36
+ ENHANCED: Now matches SSE transport robustness with improved connection
37
+ management, health monitoring, and comprehensive error handling.
47
38
  """
48
39
 
49
- def __init__(self, url: str, api_key: Optional[str] = None,
50
- connection_timeout: float = 30.0, default_timeout: float = 30.0,
51
- session_id: Optional[str] = None, enable_metrics: bool = True):
40
+ def __init__(
41
+ self,
42
+ url: str,
43
+ api_key: str | None = None,
44
+ headers: dict[str, str] | None = None,
45
+ connection_timeout: float = 30.0,
46
+ default_timeout: float = 30.0,
47
+ session_id: str | None = None,
48
+ enable_metrics: bool = True,
49
+ oauth_refresh_callback: Any | None = None,
50
+ timeout_config: TimeoutConfig | None = None,
51
+ ):
52
52
  """
53
- Initialize HTTP Streamable transport with chuk-mcp.
54
-
53
+ Initialize HTTP Streamable transport with enhanced configuration.
54
+
55
55
  Args:
56
56
  url: HTTP server URL (should end with /mcp)
57
57
  api_key: Optional API key for authentication
58
- connection_timeout: Timeout for initial connection
59
- default_timeout: Default timeout for operations
58
+ headers: Optional custom headers
59
+ connection_timeout: Timeout for initial connection (overrides timeout_config.connect)
60
+ default_timeout: Default timeout for operations (overrides timeout_config.operation)
60
61
  session_id: Optional session ID for stateful connections
61
62
  enable_metrics: Whether to track performance metrics
63
+ oauth_refresh_callback: Optional async callback to refresh OAuth tokens
64
+ timeout_config: Optional timeout configuration model with connect/operation/quick/shutdown
62
65
  """
63
66
  # Ensure URL points to the /mcp endpoint
64
- if not url.endswith('/mcp'):
67
+ if not url.endswith("/mcp"):
65
68
  self.url = f"{url.rstrip('/')}/mcp"
66
69
  else:
67
70
  self.url = url
68
-
71
+
69
72
  self.api_key = api_key
70
- self.connection_timeout = connection_timeout
71
- self.default_timeout = default_timeout
73
+ self.configured_headers = headers or {}
72
74
  self.session_id = session_id
73
75
  self.enable_metrics = enable_metrics
74
-
75
- # State tracking (following SSE pattern)
76
- self._http_context = None
76
+ self.oauth_refresh_callback = oauth_refresh_callback
77
+
78
+ # Use timeout config or create from individual parameters
79
+ if timeout_config is None:
80
+ timeout_config = TimeoutConfig(connect=connection_timeout, operation=default_timeout)
81
+
82
+ self.timeout_config = timeout_config
83
+ self.connection_timeout = timeout_config.connect
84
+ self.default_timeout = timeout_config.operation
85
+
86
+ logger.debug("HTTP Streamable transport initialized with URL: %s", self.url)
87
+ if self.api_key:
88
+ logger.debug("API key configured for authentication")
89
+ if self.configured_headers:
90
+ logger.debug("Custom headers configured: %s", list(self.configured_headers.keys()))
91
+ if self.session_id:
92
+ logger.debug("Session ID configured: %s", self.session_id)
93
+
94
+ # State tracking (enhanced like SSE)
95
+ self._http_transport = None
77
96
  self._read_stream = None
78
97
  self._write_stream = None
79
98
  self._initialized = False
80
-
81
- # Performance metrics (enhanced from SSE version)
82
- self._metrics = {
83
- "total_calls": 0,
84
- "successful_calls": 0,
85
- "failed_calls": 0,
86
- "total_time": 0.0,
87
- "avg_response_time": 0.0,
88
- "last_ping_time": None,
89
- "initialization_time": None
99
+
100
+ # Health monitoring (NEW - like SSE)
101
+ self._last_successful_ping = None
102
+ self._consecutive_failures = 0
103
+ self._max_consecutive_failures = 3
104
+
105
+ # Performance metrics (enhanced like SSE) - use Pydantic model
106
+ self._metrics = TransportMetrics() if enable_metrics else None
107
+
108
+ def _get_headers(self) -> dict[str, str]:
109
+ """Get headers with authentication and custom headers (like SSE)."""
110
+ headers = {
111
+ "Content-Type": "application/json",
112
+ "Accept": "application/json, text/event-stream",
113
+ "User-Agent": "chuk-tool-processor/1.0.0",
90
114
  }
91
-
92
- if not HAS_HTTP_SUPPORT:
93
- logger.warning("HTTP Streamable transport not available - operations will fail")
94
- if not HAS_RESOURCES_PROMPTS:
95
- logger.debug("Resources/prompts not available in chuk-mcp")
115
+
116
+ # Add configured headers first
117
+ if self.configured_headers:
118
+ headers.update(self.configured_headers)
119
+
120
+ # Add API key as Bearer token if provided and no Authorization header exists
121
+ # This prevents clobbering OAuth tokens from configured_headers
122
+ if self.api_key and "Authorization" not in headers:
123
+ headers["Authorization"] = f"Bearer {self.api_key}"
124
+
125
+ # Add session ID if provided
126
+ if self.session_id:
127
+ headers["X-Session-ID"] = self.session_id
128
+
129
+ return headers
130
+
131
+ async def _test_connection_health(self) -> bool:
132
+ """Test basic HTTP connectivity (like SSE's connectivity test)."""
133
+ try:
134
+ import httpx
135
+
136
+ async with httpx.AsyncClient(timeout=self.timeout_config.quick) as client:
137
+ # Test basic connectivity to base URL
138
+ base_url = self.url.replace("/mcp", "")
139
+ response = await client.get(f"{base_url}/health", headers=self._get_headers())
140
+ logger.debug("Health check response: %s", response.status_code)
141
+ return response.status_code < 500 # Accept any non-server-error
142
+ except Exception as e:
143
+ logger.debug("Connection health test failed: %s", e)
144
+ return True # Don't fail on health check errors
96
145
 
97
146
  async def initialize(self) -> bool:
98
- """Initialize using chuk-mcp http_client (following SSE pattern)."""
99
- if not HAS_HTTP_SUPPORT:
100
- logger.error("HTTP Streamable transport not available in chuk-mcp")
101
- return False
102
-
147
+ """Initialize with enhanced error handling and health monitoring."""
103
148
  if self._initialized:
104
149
  logger.warning("Transport already initialized")
105
150
  return True
106
-
151
+
107
152
  start_time = time.time()
108
-
153
+
109
154
  try:
110
- logger.info(f"Initializing HTTP Streamable transport to {self.url}")
111
-
112
- # Create HTTP parameters for chuk-mcp (following SSE pattern)
113
- headers = {}
114
- if self.api_key:
115
- headers["Authorization"] = f"Bearer {self.api_key}"
116
- logger.debug("API key configured for authentication")
117
-
118
- if self.session_id:
119
- headers["X-Session-ID"] = self.session_id
120
- logger.debug(f"Using session ID: {self.session_id}")
121
-
155
+ logger.debug("Initializing HTTP Streamable transport to %s", self.url)
156
+
157
+ # Test basic connectivity first (like SSE)
158
+ if not await self._test_connection_health():
159
+ logger.warning("Connection health test failed, proceeding anyway")
160
+
161
+ # Build headers properly
162
+ headers = self._get_headers()
163
+ logger.debug("Using headers: %s", list(headers.keys()))
164
+
165
+ # Create StreamableHTTPParameters with minimal configuration
166
+ # NOTE: Keep params minimal - extra params can break message routing
122
167
  http_params = StreamableHTTPParameters(
123
168
  url=self.url,
124
- timeout=self.connection_timeout,
169
+ timeout=self.default_timeout,
125
170
  headers=headers,
126
- bearer_token=self.api_key,
127
- session_id=self.session_id,
128
- enable_streaming=True, # Enable SSE streaming when available
129
- max_concurrent_requests=10
171
+ enable_streaming=True,
130
172
  )
131
-
132
- # Create and enter the HTTP context (same pattern as SSE)
133
- self._http_context = http_client(http_params)
134
-
135
- logger.debug("Establishing HTTP connection and MCP handshake...")
136
- self._read_stream, self._write_stream = await asyncio.wait_for(
137
- self._http_context.__aenter__(),
138
- timeout=self.connection_timeout
173
+
174
+ # Create and store transport (will be managed via async with in parent scope)
175
+ self._http_transport = ChukHTTPTransport(http_params)
176
+
177
+ # IMPORTANT: Must use async with for proper stream setup
178
+ logger.debug("Establishing HTTP connection...")
179
+ self._http_context_entered = await asyncio.wait_for(
180
+ self._http_transport.__aenter__(), timeout=self.connection_timeout
139
181
  )
140
-
141
- # At this point, chuk-mcp should have established the HTTP connection
142
- # Verify the connection works with a simple ping (same as SSE)
182
+
183
+ # Get streams after context entered
184
+ self._read_stream, self._write_stream = await self._http_transport.get_streams()
185
+
186
+ # Give the transport's message handler task time to start
187
+ await asyncio.sleep(0.1)
188
+
189
+ # Enhanced MCP initialize sequence
190
+ logger.debug("Sending MCP initialize request...")
191
+ init_start = time.time()
192
+
193
+ await asyncio.wait_for(
194
+ send_initialize(self._read_stream, self._write_stream, timeout=self.default_timeout),
195
+ timeout=self.default_timeout,
196
+ )
197
+
198
+ init_time = time.time() - init_start
199
+ logger.debug("MCP initialize completed in %.3fs", init_time)
200
+
201
+ # Verify connection with ping (enhanced like SSE)
143
202
  logger.debug("Verifying connection with ping...")
144
203
  ping_start = time.time()
204
+ # Use connect timeout for initial ping - some servers (like Notion) are slow
205
+ ping_timeout = self.timeout_config.connect
145
206
  ping_success = await asyncio.wait_for(
146
- send_ping(self._read_stream, self._write_stream),
147
- timeout=5.0
207
+ send_ping(self._read_stream, self._write_stream, timeout=ping_timeout),
208
+ timeout=ping_timeout,
148
209
  )
149
210
  ping_time = time.time() - ping_start
150
-
211
+
151
212
  if ping_success:
152
213
  self._initialized = True
153
- init_time = time.time() - start_time
154
- self._metrics["initialization_time"] = init_time
155
- self._metrics["last_ping_time"] = ping_time
156
-
157
- logger.info(f"HTTP Streamable transport initialized successfully in {init_time:.3f}s (ping: {ping_time:.3f}s)")
214
+ self._last_successful_ping = time.time()
215
+ self._consecutive_failures = 0
216
+
217
+ total_init_time = time.time() - start_time
218
+ if self.enable_metrics and self._metrics:
219
+ self._metrics.initialization_time = total_init_time
220
+ self._metrics.last_ping_time = ping_time
221
+
222
+ logger.debug(
223
+ "HTTP Streamable transport initialized successfully in %.3fs (ping: %.3fs)",
224
+ total_init_time,
225
+ ping_time,
226
+ )
158
227
  return True
159
228
  else:
160
- logger.warning("HTTP connection established but ping failed")
161
- # Still consider it initialized since connection was established (same as SSE)
229
+ logger.debug("HTTP connection established but ping failed")
230
+ # Still consider it initialized since connection was established
162
231
  self._initialized = True
163
- self._metrics["initialization_time"] = time.time() - start_time
232
+ self._consecutive_failures = 1 # Mark one failure
233
+ if self.enable_metrics and self._metrics:
234
+ self._metrics.initialization_time = time.time() - start_time
164
235
  return True
165
236
 
166
- except asyncio.TimeoutError:
167
- logger.error(f"HTTP Streamable initialization timed out after {self.connection_timeout}s")
168
- logger.error("This may indicate the server is not responding to MCP initialization")
237
+ except TimeoutError:
238
+ logger.error("HTTP Streamable initialization timed out after %ss", self.connection_timeout)
169
239
  await self._cleanup()
170
- return False
240
+ if self.enable_metrics and self._metrics:
241
+ self._metrics.connection_errors += 1
242
+ raise # Re-raise for OAuth error detection in mcp-cli
171
243
  except Exception as e:
172
- logger.error(f"Error initializing HTTP Streamable transport: {e}", exc_info=True)
244
+ logger.error("Error initializing HTTP Streamable transport: %s", e, exc_info=True)
173
245
  await self._cleanup()
246
+ if self.enable_metrics and self._metrics:
247
+ self._metrics.connection_errors += 1
248
+ raise # Re-raise for OAuth error detection in mcp-cli
249
+
250
+ async def _attempt_recovery(self) -> bool:
251
+ """Attempt to recover from connection issues (NEW - like SSE resilience)."""
252
+ if self.enable_metrics and self._metrics:
253
+ self._metrics.recovery_attempts += 1
254
+
255
+ logger.debug("Attempting HTTP connection recovery...")
256
+
257
+ try:
258
+ # Clean up existing connection
259
+ await self._cleanup()
260
+
261
+ # Re-initialize
262
+ return await self.initialize()
263
+ except Exception as e:
264
+ logger.warning("Recovery attempt failed: %s", e)
174
265
  return False
175
266
 
176
267
  async def close(self) -> None:
177
- """Close the HTTP Streamable transport properly (same pattern as SSE)."""
268
+ """Close with enhanced cleanup and metrics reporting."""
178
269
  if not self._initialized:
179
270
  return
180
-
181
- # Log final metrics (enhanced from SSE)
182
- if self.enable_metrics and self._metrics["total_calls"] > 0:
183
- logger.info(
184
- f"HTTP Streamable transport closing - Total calls: {self._metrics['total_calls']}, "
185
- f"Success rate: {(self._metrics['successful_calls']/self._metrics['total_calls']*100):.1f}%, "
186
- f"Avg response time: {self._metrics['avg_response_time']:.3f}s"
271
+
272
+ # Enhanced metrics logging (like SSE)
273
+ if self.enable_metrics and self._metrics and self._metrics.total_calls > 0:
274
+ success_rate = self._metrics.successful_calls / self._metrics.total_calls * 100
275
+ logger.debug(
276
+ "HTTP Streamable transport closing - Calls: %d, Success: %.1f%%, "
277
+ "Avg time: %.3fs, Recoveries: %d, Errors: %d",
278
+ self._metrics.total_calls,
279
+ success_rate,
280
+ self._metrics.avg_response_time,
281
+ self._metrics.recovery_attempts,
282
+ self._metrics.connection_errors,
187
283
  )
188
-
284
+
189
285
  try:
190
- if self._http_context is not None:
191
- await self._http_context.__aexit__(None, None, None)
286
+ if self._http_transport is not None:
287
+ await self._http_transport.__aexit__(None, None, None)
192
288
  logger.debug("HTTP Streamable context closed")
193
-
289
+
194
290
  except Exception as e:
195
- logger.debug(f"Error during transport close: {e}")
291
+ logger.debug("Error during transport close: %s", e)
196
292
  finally:
197
293
  await self._cleanup()
198
294
 
199
295
  async def _cleanup(self) -> None:
200
- """Clean up internal state (same as SSE)."""
201
- self._http_context = None
296
+ """Enhanced cleanup with state reset."""
297
+ self._http_transport = None
202
298
  self._read_stream = None
203
299
  self._write_stream = None
204
300
  self._initialized = False
205
301
 
206
302
  async def send_ping(self) -> bool:
207
- """Send ping with performance tracking (enhanced from SSE)."""
303
+ """Enhanced ping with health monitoring (like SSE)."""
208
304
  if not self._initialized or not self._read_stream:
209
- logger.error("Cannot send ping: transport not initialized")
305
+ logger.debug("Cannot send ping: transport not initialized")
210
306
  return False
211
-
307
+
212
308
  start_time = time.time()
213
309
  try:
214
310
  result = await asyncio.wait_for(
215
- send_ping(self._read_stream, self._write_stream),
216
- timeout=self.default_timeout
311
+ send_ping(self._read_stream, self._write_stream, timeout=self.default_timeout),
312
+ timeout=self.default_timeout,
217
313
  )
218
-
219
- if self.enable_metrics:
314
+
315
+ success = bool(result)
316
+
317
+ if success:
318
+ self._last_successful_ping = time.time()
319
+ self._consecutive_failures = 0
320
+ else:
321
+ self._consecutive_failures += 1
322
+
323
+ if self.enable_metrics and self._metrics:
220
324
  ping_time = time.time() - start_time
221
- self._metrics["last_ping_time"] = ping_time
222
- logger.debug(f"Ping completed in {ping_time:.3f}s: {result}")
223
-
224
- return bool(result)
225
- except asyncio.TimeoutError:
226
- logger.error("Ping timed out")
325
+ self._metrics.last_ping_time = ping_time
326
+ logger.debug("HTTP Streamable ping completed in %.3fs: %s", ping_time, success)
327
+
328
+ return success
329
+ except TimeoutError:
330
+ logger.error("HTTP Streamable ping timed out")
331
+ self._consecutive_failures += 1
227
332
  return False
228
333
  except Exception as e:
229
- logger.error(f"Ping failed: {e}")
334
+ logger.error("HTTP Streamable ping failed: %s", e)
335
+ self._consecutive_failures += 1
336
+ if self.enable_metrics and self._metrics:
337
+ self._metrics.stream_errors += 1
230
338
  return False
231
339
 
232
- async def get_tools(self) -> List[Dict[str, Any]]:
233
- """Get tools list with performance tracking (enhanced from SSE)."""
340
+ def is_connected(self) -> bool:
341
+ """Enhanced connection status check (like SSE)."""
342
+ if not self._initialized or not self._read_stream or not self._write_stream:
343
+ return False
344
+
345
+ # Check if we've had too many consecutive failures (like SSE)
346
+ if self._consecutive_failures >= self._max_consecutive_failures:
347
+ logger.warning("Connection marked unhealthy after %d failures", self._consecutive_failures)
348
+ return False
349
+
350
+ return True
351
+
352
+ async def get_tools(self) -> list[dict[str, Any]]:
353
+ """Enhanced tools retrieval with error handling."""
234
354
  if not self._initialized:
235
- logger.error("Cannot get tools: transport not initialized")
355
+ logger.debug("Cannot get tools: transport not initialized")
236
356
  return []
237
-
357
+
238
358
  start_time = time.time()
239
359
  try:
240
360
  tools_response = await asyncio.wait_for(
241
- send_tools_list(self._read_stream, self._write_stream),
242
- timeout=self.default_timeout
361
+ send_tools_list(self._read_stream, self._write_stream, timeout=self.default_timeout),
362
+ timeout=self.default_timeout,
243
363
  )
244
-
245
- # Normalize response (same as SSE)
246
- if isinstance(tools_response, dict):
364
+
365
+ # Normalize response - handle multiple formats including Pydantic models
366
+ # 1. Check if it's a Pydantic model with tools attribute (e.g., ListToolsResult from chuk_mcp)
367
+ if hasattr(tools_response, "tools"):
368
+ tools = tools_response.tools
369
+ # Convert Pydantic Tool models to dicts if needed
370
+ if tools and len(tools) > 0 and hasattr(tools[0], "model_dump"):
371
+ tools = [t.model_dump() for t in tools]
372
+ elif tools and len(tools) > 0 and hasattr(tools[0], "dict"):
373
+ tools = [t.dict() for t in tools]
374
+ # 2. Check if it's a dict with "tools" key
375
+ elif isinstance(tools_response, dict):
247
376
  tools = tools_response.get("tools", [])
377
+ # 3. Check if it's already a list
248
378
  elif isinstance(tools_response, list):
249
379
  tools = tools_response
250
380
  else:
251
- logger.warning(f"Unexpected tools response type: {type(tools_response)}")
381
+ logger.warning("Unexpected tools response type: %s", type(tools_response))
252
382
  tools = []
253
-
383
+
384
+ # Reset failure count on success
385
+ self._consecutive_failures = 0
386
+
254
387
  if self.enable_metrics:
255
388
  response_time = time.time() - start_time
256
- logger.debug(f"Retrieved {len(tools)} tools in {response_time:.3f}s")
257
-
389
+ logger.debug("Retrieved %d tools in %.3fs", len(tools), response_time)
390
+
258
391
  return tools
259
-
260
- except asyncio.TimeoutError:
392
+
393
+ except TimeoutError:
261
394
  logger.error("Get tools timed out")
395
+ self._consecutive_failures += 1
262
396
  return []
263
397
  except Exception as e:
264
- logger.error(f"Error getting tools: {e}")
398
+ logger.error("Error getting tools: %s", e)
399
+ self._consecutive_failures += 1
400
+ if self.enable_metrics and self._metrics:
401
+ self._metrics.stream_errors += 1
265
402
  return []
266
403
 
267
- async def call_tool(self, tool_name: str, arguments: Dict[str, Any],
268
- timeout: Optional[float] = None) -> Dict[str, Any]:
269
- """Call tool with enhanced performance tracking and error handling."""
404
+ async def call_tool(
405
+ self, tool_name: str, arguments: dict[str, Any], timeout: float | None = None
406
+ ) -> dict[str, Any]:
407
+ """Enhanced tool calling with recovery and health monitoring."""
270
408
  if not self._initialized:
271
- return {
272
- "isError": True,
273
- "error": "Transport not initialized"
274
- }
409
+ return {"isError": True, "error": "Transport not initialized"}
275
410
 
276
411
  tool_timeout = timeout or self.default_timeout
277
412
  start_time = time.time()
278
-
279
- if self.enable_metrics:
280
- self._metrics["total_calls"] += 1
413
+
414
+ if self.enable_metrics and self._metrics:
415
+ self._metrics.total_calls += 1
281
416
 
282
417
  try:
283
- logger.debug(f"Calling tool '{tool_name}' with timeout {tool_timeout}s")
284
-
418
+ logger.debug("Calling tool '%s' with timeout %ss", tool_name, tool_timeout)
419
+
420
+ # Enhanced connection check with recovery attempt
421
+ if not self.is_connected():
422
+ logger.warning("Connection unhealthy, attempting recovery...")
423
+ if not await self._attempt_recovery():
424
+ if self.enable_metrics:
425
+ self._update_metrics(time.time() - start_time, False)
426
+ return {"isError": True, "error": "Failed to recover connection"}
427
+
285
428
  raw_response = await asyncio.wait_for(
286
- send_tools_call(
287
- self._read_stream,
288
- self._write_stream,
289
- tool_name,
290
- arguments
291
- ),
292
- timeout=tool_timeout
429
+ send_tools_call(self._read_stream, self._write_stream, tool_name, arguments), timeout=tool_timeout
293
430
  )
294
-
431
+
295
432
  response_time = time.time() - start_time
296
- result = self._normalize_tool_response(raw_response)
297
-
433
+ result = self._normalize_mcp_response(raw_response)
434
+
435
+ # NEW: Check for OAuth errors and attempt refresh if callback is available
436
+ if result.get("isError", False) and self._is_oauth_error(result.get("error", "")):
437
+ logger.warning("OAuth error detected: %s", result.get("error"))
438
+
439
+ if self.oauth_refresh_callback:
440
+ logger.debug("Attempting OAuth token refresh...")
441
+ try:
442
+ # Call the refresh callback
443
+ new_headers = await self.oauth_refresh_callback()
444
+
445
+ if new_headers and "Authorization" in new_headers:
446
+ # Update configured headers with new token
447
+ self.configured_headers.update(new_headers)
448
+ logger.debug("OAuth token refreshed, reconnecting...")
449
+
450
+ # Reconnect with new token
451
+ if await self._attempt_recovery():
452
+ logger.debug("Retrying tool call after token refresh...")
453
+ # Retry the tool call once with new token
454
+ raw_response = await asyncio.wait_for(
455
+ send_tools_call(self._read_stream, self._write_stream, tool_name, arguments),
456
+ timeout=tool_timeout,
457
+ )
458
+ result = self._normalize_mcp_response(raw_response)
459
+ logger.debug("Tool call retry completed")
460
+ else:
461
+ logger.error("Failed to reconnect after token refresh")
462
+ else:
463
+ logger.warning("Token refresh did not return valid Authorization header")
464
+ except Exception as refresh_error:
465
+ logger.error("OAuth token refresh failed: %s", refresh_error)
466
+ else:
467
+ logger.warning("OAuth error detected but no refresh callback configured")
468
+
469
+ # Reset failure count on success
470
+ if not result.get("isError", False):
471
+ self._consecutive_failures = 0
472
+ self._last_successful_ping = time.time() # Update health timestamp
473
+
298
474
  if self.enable_metrics:
299
475
  self._update_metrics(response_time, not result.get("isError", False))
300
-
476
+
301
477
  if not result.get("isError", False):
302
- logger.debug(f"Tool '{tool_name}' completed successfully in {response_time:.3f}s")
478
+ logger.debug("Tool '%s' completed successfully in %.3fs", tool_name, response_time)
303
479
  else:
304
- logger.warning(f"Tool '{tool_name}' failed in {response_time:.3f}s: {result.get('error', 'Unknown error')}")
305
-
480
+ logger.warning(
481
+ "Tool '%s' failed in %.3fs: %s", tool_name, response_time, result.get("error", "Unknown error")
482
+ )
483
+
306
484
  return result
307
485
 
308
- except asyncio.TimeoutError:
486
+ except TimeoutError:
309
487
  response_time = time.time() - start_time
488
+ self._consecutive_failures += 1
310
489
  if self.enable_metrics:
311
490
  self._update_metrics(response_time, False)
312
-
491
+
313
492
  error_msg = f"Tool execution timed out after {tool_timeout}s"
314
- logger.error(f"Tool '{tool_name}' {error_msg}")
315
- return {
316
- "isError": True,
317
- "error": error_msg
318
- }
493
+ logger.error("Tool '%s' %s", tool_name, error_msg)
494
+ return {"isError": True, "error": error_msg}
319
495
  except Exception as e:
320
496
  response_time = time.time() - start_time
321
- if self.enable_metrics:
497
+ self._consecutive_failures += 1
498
+ if self.enable_metrics and self._metrics:
322
499
  self._update_metrics(response_time, False)
323
-
500
+ self._metrics.stream_errors += 1
501
+
502
+ # Enhanced connection error detection
503
+ error_str = str(e).lower()
504
+ if any(indicator in error_str for indicator in ["connection", "disconnected", "broken pipe", "eof"]):
505
+ logger.warning("Connection error detected: %s", e)
506
+ self._initialized = False
507
+ if self.enable_metrics and self._metrics:
508
+ self._metrics.connection_errors += 1
509
+
324
510
  error_msg = f"Tool execution failed: {str(e)}"
325
- logger.error(f"Tool '{tool_name}' error: {error_msg}")
326
- return {
327
- "isError": True,
328
- "error": error_msg
329
- }
511
+ logger.error("Tool '%s' error: %s", tool_name, error_msg)
512
+ return {"isError": True, "error": error_msg}
330
513
 
331
514
  def _update_metrics(self, response_time: float, success: bool) -> None:
332
- """Update performance metrics (new feature)."""
333
- if success:
334
- self._metrics["successful_calls"] += 1
335
- else:
336
- self._metrics["failed_calls"] += 1
337
-
338
- self._metrics["total_time"] += response_time
339
- self._metrics["avg_response_time"] = (
340
- self._metrics["total_time"] / self._metrics["total_calls"]
341
- )
515
+ """Enhanced metrics tracking (like SSE)."""
516
+ if not self._metrics:
517
+ return
342
518
 
343
- async def list_resources(self) -> Dict[str, Any]:
344
- """List resources using chuk-mcp (same as SSE)."""
345
- if not HAS_RESOURCES_PROMPTS:
346
- logger.debug("Resources/prompts not available in chuk-mcp")
347
- return {}
348
-
519
+ self._metrics.update_call_metrics(response_time, success)
520
+
521
+ def _is_oauth_error(self, error_msg: str) -> bool:
522
+ """
523
+ Detect if error is OAuth-related per RFC 6750 and MCP OAuth spec.
524
+
525
+ Checks for:
526
+ - RFC 6750 Section 3.1 Bearer token errors (invalid_token, insufficient_scope)
527
+ - OAuth 2.1 token refresh errors (invalid_grant)
528
+ - MCP spec OAuth validation failures (401/403 responses)
529
+ """
530
+ if not error_msg:
531
+ return False
532
+
533
+ error_lower = error_msg.lower()
534
+ oauth_indicators = [
535
+ # RFC 6750 Section 3.1 - Standard Bearer token errors
536
+ "invalid_token", # Token expired, revoked, malformed, or invalid
537
+ "insufficient_scope", # Request requires higher privileges (403 Forbidden)
538
+ # OAuth 2.1 token refresh errors
539
+ "invalid_grant", # Refresh token errors
540
+ # MCP spec - OAuth validation failures (401 Unauthorized)
541
+ "oauth validation",
542
+ "unauthorized",
543
+ # Common OAuth error descriptions
544
+ "expired token",
545
+ "token expired",
546
+ "authentication failed",
547
+ "invalid access token",
548
+ ]
549
+
550
+ return any(indicator in error_lower for indicator in oauth_indicators)
551
+
552
+ async def list_resources(self) -> dict[str, Any]:
553
+ """Enhanced resource listing with error handling."""
349
554
  if not self._initialized:
350
555
  return {}
351
-
556
+
352
557
  try:
353
558
  response = await asyncio.wait_for(
354
- send_resources_list(self._read_stream, self._write_stream),
355
- timeout=self.default_timeout
559
+ send_resources_list(self._read_stream, self._write_stream), timeout=self.default_timeout
356
560
  )
357
561
  return response if isinstance(response, dict) else {}
358
- except asyncio.TimeoutError:
562
+ except TimeoutError:
359
563
  logger.error("List resources timed out")
564
+ self._consecutive_failures += 1
360
565
  return {}
361
566
  except Exception as e:
362
- logger.debug(f"Error listing resources: {e}")
567
+ logger.debug("Error listing resources: %s", e)
568
+ self._consecutive_failures += 1
363
569
  return {}
364
570
 
365
- async def list_prompts(self) -> Dict[str, Any]:
366
- """List prompts using chuk-mcp (same as SSE)."""
367
- if not HAS_RESOURCES_PROMPTS:
368
- logger.debug("Resources/prompts not available in chuk-mcp")
369
- return {}
370
-
571
+ async def list_prompts(self) -> dict[str, Any]:
572
+ """Enhanced prompt listing with error handling."""
371
573
  if not self._initialized:
372
574
  return {}
373
-
575
+
374
576
  try:
375
577
  response = await asyncio.wait_for(
376
- send_prompts_list(self._read_stream, self._write_stream),
377
- timeout=self.default_timeout
578
+ send_prompts_list(self._read_stream, self._write_stream), timeout=self.default_timeout
378
579
  )
379
580
  return response if isinstance(response, dict) else {}
380
- except asyncio.TimeoutError:
581
+ except TimeoutError:
381
582
  logger.error("List prompts timed out")
583
+ self._consecutive_failures += 1
382
584
  return {}
383
585
  except Exception as e:
384
- logger.debug(f"Error listing prompts: {e}")
586
+ logger.debug("Error listing prompts: %s", e)
587
+ self._consecutive_failures += 1
385
588
  return {}
386
589
 
387
- def _normalize_tool_response(self, raw_response: Dict[str, Any]) -> Dict[str, Any]:
388
- """Normalize response for backward compatibility (same as SSE)."""
389
- # Handle explicit error in response
390
- if "error" in raw_response:
391
- error_info = raw_response["error"]
392
- if isinstance(error_info, dict):
393
- error_msg = error_info.get("message", "Unknown error")
394
- else:
395
- error_msg = str(error_info)
396
-
397
- return {
398
- "isError": True,
399
- "error": error_msg
400
- }
590
+ async def read_resource(self, uri: str) -> dict[str, Any]:
591
+ """Read a specific resource."""
592
+ if not self._initialized:
593
+ return {}
401
594
 
402
- # Handle successful response with result
403
- if "result" in raw_response:
404
- result = raw_response["result"]
405
-
406
- if isinstance(result, dict) and "content" in result:
407
- return {
408
- "isError": False,
409
- "content": self._extract_content(result["content"])
410
- }
411
- else:
412
- return {
413
- "isError": False,
414
- "content": result
415
- }
416
-
417
- # Handle direct content-based response
418
- if "content" in raw_response:
419
- return {
420
- "isError": False,
421
- "content": self._extract_content(raw_response["content"])
595
+ try:
596
+ response = await asyncio.wait_for(
597
+ send_resources_read(self._read_stream, self._write_stream, uri), timeout=self.default_timeout
598
+ )
599
+ return response if isinstance(response, dict) else {}
600
+ except TimeoutError:
601
+ logger.error("Read resource timed out")
602
+ self._consecutive_failures += 1
603
+ return {}
604
+ except Exception as e:
605
+ logger.debug("Error reading resource: %s", e)
606
+ self._consecutive_failures += 1
607
+ return {}
608
+
609
+ async def get_prompt(self, name: str, arguments: dict[str, Any] | None = None) -> dict[str, Any]:
610
+ """Get a specific prompt."""
611
+ if not self._initialized:
612
+ return {}
613
+
614
+ try:
615
+ response = await asyncio.wait_for(
616
+ send_prompts_get(self._read_stream, self._write_stream, name, arguments or {}),
617
+ timeout=self.default_timeout,
618
+ )
619
+ return response if isinstance(response, dict) else {}
620
+ except TimeoutError:
621
+ logger.error("Get prompt timed out")
622
+ self._consecutive_failures += 1
623
+ return {}
624
+ except Exception as e:
625
+ logger.debug("Error getting prompt: %s", e)
626
+ self._consecutive_failures += 1
627
+ return {}
628
+
629
+ def get_metrics(self) -> dict[str, Any]:
630
+ """Enhanced metrics with health information."""
631
+ if not self._metrics:
632
+ return {}
633
+
634
+ metrics = self._metrics.to_dict()
635
+ metrics.update(
636
+ {
637
+ "is_connected": self.is_connected(),
638
+ "consecutive_failures": self._consecutive_failures,
639
+ "last_successful_ping": self._last_successful_ping,
640
+ "max_consecutive_failures": self._max_consecutive_failures,
422
641
  }
642
+ )
643
+ return metrics
423
644
 
424
- # Fallback
425
- return {
426
- "isError": False,
427
- "content": raw_response
428
- }
645
+ def reset_metrics(self) -> None:
646
+ """Enhanced metrics reset preserving health state."""
647
+ if not self._metrics:
648
+ return
429
649
 
430
- def _extract_content(self, content_list: Any) -> Any:
431
- """Extract content from MCP content format (same as SSE)."""
432
- if not isinstance(content_list, list) or not content_list:
433
- return content_list
434
-
435
- # Handle single content item
436
- if len(content_list) == 1:
437
- content_item = content_list[0]
438
- if isinstance(content_item, dict):
439
- if content_item.get("type") == "text":
440
- text_content = content_item.get("text", "")
441
- # Try to parse JSON, fall back to plain text
442
- try:
443
- return json.loads(text_content)
444
- except json.JSONDecodeError:
445
- return text_content
446
- else:
447
- return content_item
448
-
449
- # Multiple content items
450
- return content_list
650
+ # Preserve important historical values
651
+ preserved_init_time = self._metrics.initialization_time
652
+ preserved_last_ping = self._metrics.last_ping_time
653
+ preserved_resets = self._metrics.connection_resets
451
654
 
452
- def get_streams(self) -> List[tuple]:
453
- """Provide streams for backward compatibility (same as SSE)."""
655
+ # Create new metrics instance with preserved values
656
+ self._metrics = TransportMetrics(
657
+ initialization_time=preserved_init_time,
658
+ last_ping_time=preserved_last_ping,
659
+ connection_resets=preserved_resets,
660
+ )
661
+
662
+ def get_streams(self) -> list[tuple]:
663
+ """Enhanced streams access with connection check."""
454
664
  if self._initialized and self._read_stream and self._write_stream:
455
665
  return [(self._read_stream, self._write_stream)]
456
666
  return []
457
667
 
458
- def is_connected(self) -> bool:
459
- """Check connection status (same as SSE)."""
460
- return self._initialized and self._read_stream is not None and self._write_stream is not None
461
-
462
- def get_metrics(self) -> Dict[str, Any]:
463
- """Get performance metrics (new feature)."""
464
- return self._metrics.copy()
465
-
466
- def reset_metrics(self) -> None:
467
- """Reset performance metrics (new feature)."""
468
- self._metrics = {
469
- "total_calls": 0,
470
- "successful_calls": 0,
471
- "failed_calls": 0,
472
- "total_time": 0.0,
473
- "avg_response_time": 0.0,
474
- "last_ping_time": self._metrics.get("last_ping_time"),
475
- "initialization_time": self._metrics.get("initialization_time")
476
- }
477
-
478
668
  async def __aenter__(self):
479
- """Context manager support (same as SSE)."""
669
+ """Enhanced context manager entry."""
480
670
  success = await self.initialize()
481
671
  if not success:
482
- raise RuntimeError("Failed to initialize HTTP Streamable transport")
672
+ raise RuntimeError("Failed to initialize HTTPStreamableTransport")
483
673
  return self
484
674
 
485
675
  async def __aexit__(self, exc_type, exc_val, exc_tb):
486
- """Context manager cleanup (same as SSE)."""
676
+ """Enhanced context manager cleanup."""
487
677
  await self.close()
488
-
489
- def __repr__(self) -> str:
490
- """Enhanced string representation for debugging."""
491
- status = "initialized" if self._initialized else "not initialized"
492
- metrics_info = ""
493
- if self.enable_metrics and self._metrics["total_calls"] > 0:
494
- success_rate = (self._metrics["successful_calls"] / self._metrics["total_calls"]) * 100
495
- metrics_info = f", calls: {self._metrics['total_calls']}, success: {success_rate:.1f}%"
496
-
497
- return f"HTTPStreamableTransport(status={status}, url={self.url}{metrics_info})"