chuk-tool-processor 0.6.11__py3-none-any.whl → 0.6.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of chuk-tool-processor might be problematic. Click here for more details.
- chuk_tool_processor/core/__init__.py +1 -1
- chuk_tool_processor/core/exceptions.py +10 -4
- chuk_tool_processor/core/processor.py +97 -97
- chuk_tool_processor/execution/strategies/inprocess_strategy.py +142 -150
- chuk_tool_processor/execution/strategies/subprocess_strategy.py +200 -205
- chuk_tool_processor/execution/tool_executor.py +82 -84
- chuk_tool_processor/execution/wrappers/caching.py +102 -103
- chuk_tool_processor/execution/wrappers/rate_limiting.py +45 -42
- chuk_tool_processor/execution/wrappers/retry.py +23 -25
- chuk_tool_processor/logging/__init__.py +23 -17
- chuk_tool_processor/logging/context.py +40 -45
- chuk_tool_processor/logging/formatter.py +22 -21
- chuk_tool_processor/logging/helpers.py +24 -38
- chuk_tool_processor/logging/metrics.py +11 -13
- chuk_tool_processor/mcp/__init__.py +8 -12
- chuk_tool_processor/mcp/mcp_tool.py +153 -109
- chuk_tool_processor/mcp/register_mcp_tools.py +17 -17
- chuk_tool_processor/mcp/setup_mcp_http_streamable.py +11 -13
- chuk_tool_processor/mcp/setup_mcp_sse.py +11 -13
- chuk_tool_processor/mcp/setup_mcp_stdio.py +7 -9
- chuk_tool_processor/mcp/stream_manager.py +168 -204
- chuk_tool_processor/mcp/transport/__init__.py +4 -4
- chuk_tool_processor/mcp/transport/base_transport.py +43 -58
- chuk_tool_processor/mcp/transport/http_streamable_transport.py +145 -163
- chuk_tool_processor/mcp/transport/sse_transport.py +266 -252
- chuk_tool_processor/mcp/transport/stdio_transport.py +171 -189
- chuk_tool_processor/models/__init__.py +1 -1
- chuk_tool_processor/models/execution_strategy.py +16 -21
- chuk_tool_processor/models/streaming_tool.py +28 -25
- chuk_tool_processor/models/tool_call.py +19 -34
- chuk_tool_processor/models/tool_export_mixin.py +22 -8
- chuk_tool_processor/models/tool_result.py +40 -77
- chuk_tool_processor/models/validated_tool.py +14 -16
- chuk_tool_processor/plugins/__init__.py +1 -1
- chuk_tool_processor/plugins/discovery.py +10 -10
- chuk_tool_processor/plugins/parsers/__init__.py +1 -1
- chuk_tool_processor/plugins/parsers/base.py +1 -2
- chuk_tool_processor/plugins/parsers/function_call_tool.py +13 -8
- chuk_tool_processor/plugins/parsers/json_tool.py +4 -3
- chuk_tool_processor/plugins/parsers/openai_tool.py +12 -7
- chuk_tool_processor/plugins/parsers/xml_tool.py +4 -4
- chuk_tool_processor/registry/__init__.py +12 -12
- chuk_tool_processor/registry/auto_register.py +22 -30
- chuk_tool_processor/registry/decorators.py +127 -129
- chuk_tool_processor/registry/interface.py +26 -23
- chuk_tool_processor/registry/metadata.py +27 -22
- chuk_tool_processor/registry/provider.py +17 -18
- chuk_tool_processor/registry/providers/__init__.py +16 -19
- chuk_tool_processor/registry/providers/memory.py +18 -25
- chuk_tool_processor/registry/tool_export.py +42 -51
- chuk_tool_processor/utils/validation.py +15 -16
- {chuk_tool_processor-0.6.11.dist-info → chuk_tool_processor-0.6.13.dist-info}/METADATA +1 -1
- chuk_tool_processor-0.6.13.dist-info/RECORD +60 -0
- chuk_tool_processor-0.6.11.dist-info/RECORD +0 -60
- {chuk_tool_processor-0.6.11.dist-info → chuk_tool_processor-0.6.13.dist-info}/WHEEL +0 -0
- {chuk_tool_processor-0.6.11.dist-info → chuk_tool_processor-0.6.13.dist-info}/top_level.txt +0 -0
|
@@ -2,17 +2,19 @@
|
|
|
2
2
|
"""
|
|
3
3
|
SSE transport for MCP communication.
|
|
4
4
|
|
|
5
|
-
FIXED:
|
|
6
|
-
The SSE endpoint works perfectly, so we
|
|
5
|
+
FIXED: Improved health monitoring to avoid false unhealthy states.
|
|
6
|
+
The SSE endpoint works perfectly, so we need more lenient health checks.
|
|
7
7
|
"""
|
|
8
|
+
|
|
8
9
|
from __future__ import annotations
|
|
9
10
|
|
|
10
11
|
import asyncio
|
|
12
|
+
import contextlib
|
|
11
13
|
import json
|
|
14
|
+
import logging
|
|
12
15
|
import time
|
|
13
16
|
import uuid
|
|
14
|
-
from typing import
|
|
15
|
-
import logging
|
|
17
|
+
from typing import Any
|
|
16
18
|
|
|
17
19
|
import httpx
|
|
18
20
|
|
|
@@ -24,47 +26,53 @@ logger = logging.getLogger(__name__)
|
|
|
24
26
|
class SSETransport(MCPBaseTransport):
|
|
25
27
|
"""
|
|
26
28
|
SSE transport implementing the MCP protocol over Server-Sent Events.
|
|
27
|
-
|
|
28
|
-
FIXED:
|
|
29
|
+
|
|
30
|
+
FIXED: More lenient health monitoring to avoid false unhealthy states.
|
|
29
31
|
"""
|
|
30
32
|
|
|
31
|
-
def __init__(
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
url: str,
|
|
36
|
+
api_key: str | None = None,
|
|
37
|
+
headers: dict[str, str] | None = None,
|
|
38
|
+
connection_timeout: float = 30.0,
|
|
39
|
+
default_timeout: float = 60.0,
|
|
40
|
+
enable_metrics: bool = True,
|
|
41
|
+
):
|
|
36
42
|
"""
|
|
37
43
|
Initialize SSE transport.
|
|
38
44
|
"""
|
|
39
|
-
self.url = url.rstrip(
|
|
45
|
+
self.url = url.rstrip("/")
|
|
40
46
|
self.api_key = api_key
|
|
41
47
|
self.configured_headers = headers or {}
|
|
42
48
|
self.connection_timeout = connection_timeout
|
|
43
49
|
self.default_timeout = default_timeout
|
|
44
50
|
self.enable_metrics = enable_metrics
|
|
45
|
-
|
|
51
|
+
|
|
46
52
|
logger.debug("SSE Transport initialized with URL: %s", self.url)
|
|
47
|
-
|
|
53
|
+
|
|
48
54
|
# Connection state
|
|
49
55
|
self.session_id = None
|
|
50
56
|
self.message_url = None
|
|
51
|
-
self.pending_requests:
|
|
57
|
+
self.pending_requests: dict[str, asyncio.Future] = {}
|
|
52
58
|
self._initialized = False
|
|
53
|
-
|
|
59
|
+
|
|
54
60
|
# HTTP clients
|
|
55
61
|
self.stream_client = None
|
|
56
62
|
self.send_client = None
|
|
57
|
-
|
|
63
|
+
|
|
58
64
|
# SSE stream management
|
|
59
65
|
self.sse_task = None
|
|
60
66
|
self.sse_response = None
|
|
61
67
|
self.sse_stream_context = None
|
|
62
|
-
|
|
63
|
-
#
|
|
68
|
+
|
|
69
|
+
# FIXED: More lenient health monitoring
|
|
64
70
|
self._last_successful_ping = None
|
|
65
71
|
self._consecutive_failures = 0
|
|
66
|
-
self._max_consecutive_failures = 3
|
|
67
|
-
|
|
72
|
+
self._max_consecutive_failures = 5 # INCREASED: was 3, now 5
|
|
73
|
+
self._connection_grace_period = 30.0 # NEW: Grace period after initialization
|
|
74
|
+
self._initialization_time = None # NEW: Track when we initialized
|
|
75
|
+
|
|
68
76
|
# Performance metrics
|
|
69
77
|
self._metrics = {
|
|
70
78
|
"total_calls": 0,
|
|
@@ -75,43 +83,43 @@ class SSETransport(MCPBaseTransport):
|
|
|
75
83
|
"last_ping_time": None,
|
|
76
84
|
"initialization_time": None,
|
|
77
85
|
"session_discoveries": 0,
|
|
78
|
-
"stream_errors": 0
|
|
86
|
+
"stream_errors": 0,
|
|
79
87
|
}
|
|
80
88
|
|
|
81
89
|
def _construct_sse_url(self, base_url: str) -> str:
|
|
82
90
|
"""Construct the SSE endpoint URL from the base URL."""
|
|
83
|
-
base_url = base_url.rstrip(
|
|
84
|
-
|
|
85
|
-
if base_url.endswith(
|
|
91
|
+
base_url = base_url.rstrip("/")
|
|
92
|
+
|
|
93
|
+
if base_url.endswith("/sse"):
|
|
86
94
|
logger.debug("URL already contains /sse endpoint: %s", base_url)
|
|
87
95
|
return base_url
|
|
88
|
-
|
|
96
|
+
|
|
89
97
|
sse_url = f"{base_url}/sse"
|
|
90
98
|
logger.debug("Constructed SSE URL: %s -> %s", base_url, sse_url)
|
|
91
99
|
return sse_url
|
|
92
100
|
|
|
93
|
-
def _get_headers(self) ->
|
|
101
|
+
def _get_headers(self) -> dict[str, str]:
|
|
94
102
|
"""Get headers with authentication and custom headers."""
|
|
95
103
|
headers = {
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
104
|
+
"User-Agent": "chuk-tool-processor/1.0.0",
|
|
105
|
+
"Accept": "text/event-stream",
|
|
106
|
+
"Cache-Control": "no-cache",
|
|
99
107
|
}
|
|
100
|
-
|
|
108
|
+
|
|
101
109
|
# Add configured headers first
|
|
102
110
|
if self.configured_headers:
|
|
103
111
|
headers.update(self.configured_headers)
|
|
104
|
-
|
|
112
|
+
|
|
105
113
|
# Add API key as Bearer token if provided
|
|
106
114
|
if self.api_key:
|
|
107
|
-
headers[
|
|
108
|
-
|
|
115
|
+
headers["Authorization"] = f"Bearer {self.api_key}"
|
|
116
|
+
|
|
109
117
|
return headers
|
|
110
118
|
|
|
111
119
|
async def _test_gateway_connectivity(self) -> bool:
|
|
112
120
|
"""
|
|
113
121
|
Skip connectivity test - we know the SSE endpoint works.
|
|
114
|
-
|
|
122
|
+
|
|
115
123
|
FIXED: The diagnostic proves SSE endpoint works perfectly.
|
|
116
124
|
No need to test base URL that causes 401 errors.
|
|
117
125
|
"""
|
|
@@ -119,63 +127,58 @@ class SSETransport(MCPBaseTransport):
|
|
|
119
127
|
return True
|
|
120
128
|
|
|
121
129
|
async def initialize(self) -> bool:
|
|
122
|
-
"""Initialize SSE connection."""
|
|
130
|
+
"""Initialize SSE connection with improved health tracking."""
|
|
123
131
|
if self._initialized:
|
|
124
132
|
logger.warning("Transport already initialized")
|
|
125
133
|
return True
|
|
126
|
-
|
|
134
|
+
|
|
127
135
|
start_time = time.time()
|
|
128
|
-
|
|
136
|
+
|
|
129
137
|
try:
|
|
130
138
|
logger.debug("Initializing SSE transport...")
|
|
131
|
-
|
|
139
|
+
|
|
132
140
|
# FIXED: Skip problematic connectivity test
|
|
133
141
|
if not await self._test_gateway_connectivity():
|
|
134
142
|
logger.error("Gateway connectivity test failed")
|
|
135
143
|
return False
|
|
136
|
-
|
|
144
|
+
|
|
137
145
|
# Create HTTP clients
|
|
138
146
|
self.stream_client = httpx.AsyncClient(
|
|
139
147
|
timeout=httpx.Timeout(self.connection_timeout),
|
|
140
148
|
follow_redirects=True,
|
|
141
|
-
limits=httpx.Limits(max_connections=10, max_keepalive_connections=5)
|
|
149
|
+
limits=httpx.Limits(max_connections=10, max_keepalive_connections=5),
|
|
142
150
|
)
|
|
143
151
|
self.send_client = httpx.AsyncClient(
|
|
144
152
|
timeout=httpx.Timeout(self.default_timeout),
|
|
145
153
|
follow_redirects=True,
|
|
146
|
-
limits=httpx.Limits(max_connections=10, max_keepalive_connections=5)
|
|
154
|
+
limits=httpx.Limits(max_connections=10, max_keepalive_connections=5),
|
|
147
155
|
)
|
|
148
|
-
|
|
156
|
+
|
|
149
157
|
# Connect to SSE stream
|
|
150
158
|
sse_url = self._construct_sse_url(self.url)
|
|
151
159
|
logger.debug("Connecting to SSE endpoint: %s", sse_url)
|
|
152
|
-
|
|
153
|
-
self.sse_stream_context = self.stream_client.stream(
|
|
154
|
-
'GET', sse_url, headers=self._get_headers()
|
|
155
|
-
)
|
|
160
|
+
|
|
161
|
+
self.sse_stream_context = self.stream_client.stream("GET", sse_url, headers=self._get_headers())
|
|
156
162
|
self.sse_response = await self.sse_stream_context.__aenter__()
|
|
157
|
-
|
|
163
|
+
|
|
158
164
|
if self.sse_response.status_code != 200:
|
|
159
165
|
logger.error("SSE connection failed with status: %s", self.sse_response.status_code)
|
|
160
166
|
await self._cleanup()
|
|
161
167
|
return False
|
|
162
|
-
|
|
168
|
+
|
|
163
169
|
logger.debug("SSE streaming connection established")
|
|
164
|
-
|
|
170
|
+
|
|
165
171
|
# Start SSE processing task
|
|
166
|
-
self.sse_task = asyncio.create_task(
|
|
167
|
-
|
|
168
|
-
name="sse_stream_processor"
|
|
169
|
-
)
|
|
170
|
-
|
|
172
|
+
self.sse_task = asyncio.create_task(self._process_sse_stream(), name="sse_stream_processor")
|
|
173
|
+
|
|
171
174
|
# Wait for session discovery
|
|
172
175
|
logger.debug("Waiting for session discovery...")
|
|
173
176
|
session_timeout = 10.0
|
|
174
177
|
session_start = time.time()
|
|
175
|
-
|
|
178
|
+
|
|
176
179
|
while not self.message_url and (time.time() - session_start) < session_timeout:
|
|
177
180
|
await asyncio.sleep(0.1)
|
|
178
|
-
|
|
181
|
+
|
|
179
182
|
# Check if SSE task died
|
|
180
183
|
if self.sse_task.done():
|
|
181
184
|
exception = self.sse_task.exception()
|
|
@@ -183,51 +186,55 @@ class SSETransport(MCPBaseTransport):
|
|
|
183
186
|
logger.error(f"SSE task died during session discovery: {exception}")
|
|
184
187
|
await self._cleanup()
|
|
185
188
|
return False
|
|
186
|
-
|
|
189
|
+
|
|
187
190
|
if not self.message_url:
|
|
188
191
|
logger.error("Failed to discover session endpoint within %.1fs", session_timeout)
|
|
189
192
|
await self._cleanup()
|
|
190
193
|
return False
|
|
191
|
-
|
|
194
|
+
|
|
192
195
|
if self.enable_metrics:
|
|
193
196
|
self._metrics["session_discoveries"] += 1
|
|
194
|
-
|
|
197
|
+
|
|
195
198
|
logger.debug("Session endpoint discovered: %s", self.message_url)
|
|
196
|
-
|
|
199
|
+
|
|
197
200
|
# Perform MCP initialization handshake
|
|
198
201
|
try:
|
|
199
|
-
init_response = await self._send_request(
|
|
200
|
-
"
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
"
|
|
204
|
-
"version": "1.0.0"
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
202
|
+
init_response = await self._send_request(
|
|
203
|
+
"initialize",
|
|
204
|
+
{
|
|
205
|
+
"protocolVersion": "2024-11-05",
|
|
206
|
+
"capabilities": {},
|
|
207
|
+
"clientInfo": {"name": "chuk-tool-processor", "version": "1.0.0"},
|
|
208
|
+
},
|
|
209
|
+
timeout=self.default_timeout,
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
if "error" in init_response:
|
|
213
|
+
logger.error("MCP initialize failed: %s", init_response["error"])
|
|
210
214
|
await self._cleanup()
|
|
211
215
|
return False
|
|
212
|
-
|
|
216
|
+
|
|
213
217
|
# Send initialized notification
|
|
214
218
|
await self._send_notification("notifications/initialized")
|
|
215
|
-
|
|
219
|
+
|
|
220
|
+
# FIXED: Set health tracking state
|
|
216
221
|
self._initialized = True
|
|
222
|
+
self._initialization_time = time.time()
|
|
217
223
|
self._last_successful_ping = time.time()
|
|
218
|
-
|
|
224
|
+
self._consecutive_failures = 0 # Reset failure count
|
|
225
|
+
|
|
219
226
|
if self.enable_metrics:
|
|
220
227
|
init_time = time.time() - start_time
|
|
221
228
|
self._metrics["initialization_time"] = init_time
|
|
222
|
-
|
|
229
|
+
|
|
223
230
|
logger.debug("SSE transport initialized successfully in %.3fs", time.time() - start_time)
|
|
224
231
|
return True
|
|
225
|
-
|
|
232
|
+
|
|
226
233
|
except Exception as e:
|
|
227
234
|
logger.error("MCP handshake failed: %s", e)
|
|
228
235
|
await self._cleanup()
|
|
229
236
|
return False
|
|
230
|
-
|
|
237
|
+
|
|
231
238
|
except Exception as e:
|
|
232
239
|
logger.error("Error initializing SSE transport: %s", e, exc_info=True)
|
|
233
240
|
await self._cleanup()
|
|
@@ -237,243 +244,248 @@ class SSETransport(MCPBaseTransport):
|
|
|
237
244
|
"""Process the SSE stream for responses and session discovery."""
|
|
238
245
|
try:
|
|
239
246
|
logger.debug("Starting SSE stream processing...")
|
|
240
|
-
|
|
247
|
+
|
|
241
248
|
current_event = None
|
|
242
|
-
|
|
249
|
+
|
|
243
250
|
async for line in self.sse_response.aiter_lines():
|
|
244
251
|
line = line.strip()
|
|
245
252
|
if not line:
|
|
246
253
|
continue
|
|
247
|
-
|
|
254
|
+
|
|
248
255
|
# Handle event type declarations
|
|
249
|
-
if line.startswith(
|
|
250
|
-
current_event = line.split(
|
|
256
|
+
if line.startswith("event:"):
|
|
257
|
+
current_event = line.split(":", 1)[1].strip()
|
|
251
258
|
logger.debug("SSE event type: %s", current_event)
|
|
252
259
|
continue
|
|
253
|
-
|
|
260
|
+
|
|
254
261
|
# Handle session endpoint discovery
|
|
255
|
-
if not self.message_url and line.startswith(
|
|
256
|
-
data_part = line.split(
|
|
257
|
-
|
|
262
|
+
if not self.message_url and line.startswith("data:"):
|
|
263
|
+
data_part = line.split(":", 1)[1].strip()
|
|
264
|
+
|
|
258
265
|
# NEW FORMAT: event: endpoint + data: https://...
|
|
259
|
-
if current_event == "endpoint" and data_part.startswith(
|
|
266
|
+
if current_event == "endpoint" and data_part.startswith("http"):
|
|
260
267
|
self.message_url = data_part
|
|
261
|
-
|
|
268
|
+
|
|
262
269
|
# Extract session ID from URL if present
|
|
263
|
-
if
|
|
264
|
-
self.session_id = data_part.split(
|
|
270
|
+
if "session_id=" in data_part:
|
|
271
|
+
self.session_id = data_part.split("session_id=")[1].split("&")[0]
|
|
265
272
|
else:
|
|
266
273
|
self.session_id = str(uuid.uuid4())
|
|
267
|
-
|
|
274
|
+
|
|
268
275
|
logger.debug("Session endpoint discovered via event format: %s", self.message_url)
|
|
269
276
|
continue
|
|
270
|
-
|
|
277
|
+
|
|
271
278
|
# OLD FORMAT: data: /messages/... (backwards compatibility)
|
|
272
|
-
elif
|
|
279
|
+
elif "/messages/" in data_part:
|
|
273
280
|
endpoint_path = data_part
|
|
274
281
|
self.message_url = f"{self.url}{endpoint_path}"
|
|
275
|
-
|
|
282
|
+
|
|
276
283
|
# Extract session ID if present
|
|
277
|
-
if
|
|
278
|
-
self.session_id = endpoint_path.split(
|
|
284
|
+
if "session_id=" in endpoint_path:
|
|
285
|
+
self.session_id = endpoint_path.split("session_id=")[1].split("&")[0]
|
|
279
286
|
else:
|
|
280
287
|
self.session_id = str(uuid.uuid4())
|
|
281
|
-
|
|
288
|
+
|
|
282
289
|
logger.debug("Session endpoint discovered via old format: %s", self.message_url)
|
|
283
290
|
continue
|
|
284
|
-
|
|
291
|
+
|
|
285
292
|
# Handle JSON-RPC responses
|
|
286
|
-
if line.startswith(
|
|
287
|
-
data_part = line.split(
|
|
288
|
-
|
|
293
|
+
if line.startswith("data:"):
|
|
294
|
+
data_part = line.split(":", 1)[1].strip()
|
|
295
|
+
|
|
289
296
|
# Skip keepalive pings and empty data
|
|
290
|
-
if not data_part or data_part.startswith(
|
|
297
|
+
if not data_part or data_part.startswith("ping") or data_part in ("{}", "[]"):
|
|
291
298
|
continue
|
|
292
|
-
|
|
299
|
+
|
|
293
300
|
try:
|
|
294
301
|
response_data = json.loads(data_part)
|
|
295
|
-
|
|
302
|
+
|
|
296
303
|
# Handle JSON-RPC responses with request IDs
|
|
297
|
-
if
|
|
298
|
-
request_id = str(response_data[
|
|
299
|
-
|
|
304
|
+
if "jsonrpc" in response_data and "id" in response_data:
|
|
305
|
+
request_id = str(response_data["id"])
|
|
306
|
+
|
|
300
307
|
# Resolve pending request if found
|
|
301
308
|
if request_id in self.pending_requests:
|
|
302
309
|
future = self.pending_requests.pop(request_id)
|
|
303
310
|
if not future.done():
|
|
304
311
|
future.set_result(response_data)
|
|
305
312
|
logger.debug("Resolved request ID: %s", request_id)
|
|
306
|
-
|
|
313
|
+
|
|
307
314
|
except json.JSONDecodeError as e:
|
|
308
315
|
logger.debug("Non-JSON data in SSE stream (ignoring): %s", e)
|
|
309
|
-
|
|
316
|
+
|
|
310
317
|
except Exception as e:
|
|
311
318
|
if self.enable_metrics:
|
|
312
319
|
self._metrics["stream_errors"] += 1
|
|
313
320
|
logger.error("SSE stream processing error: %s", e)
|
|
314
|
-
|
|
321
|
+
# FIXED: Don't increment consecutive failures for stream processing errors
|
|
322
|
+
# These are often temporary and don't indicate connection health
|
|
315
323
|
|
|
316
|
-
async def _send_request(
|
|
317
|
-
|
|
324
|
+
async def _send_request(
|
|
325
|
+
self, method: str, params: dict[str, Any] = None, timeout: float | None = None
|
|
326
|
+
) -> dict[str, Any]:
|
|
318
327
|
"""Send JSON-RPC request and wait for async response via SSE."""
|
|
319
328
|
if not self.message_url:
|
|
320
329
|
raise RuntimeError("SSE transport not connected - no message URL")
|
|
321
|
-
|
|
330
|
+
|
|
322
331
|
request_id = str(uuid.uuid4())
|
|
323
|
-
message = {
|
|
324
|
-
|
|
325
|
-
"id": request_id,
|
|
326
|
-
"method": method,
|
|
327
|
-
"params": params or {}
|
|
328
|
-
}
|
|
329
|
-
|
|
332
|
+
message = {"jsonrpc": "2.0", "id": request_id, "method": method, "params": params or {}}
|
|
333
|
+
|
|
330
334
|
# Create future for async response
|
|
331
335
|
future = asyncio.Future()
|
|
332
336
|
self.pending_requests[request_id] = future
|
|
333
|
-
|
|
337
|
+
|
|
334
338
|
try:
|
|
335
339
|
# Send HTTP POST request
|
|
336
|
-
headers = {
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
response = await self.send_client.post(
|
|
342
|
-
self.message_url,
|
|
343
|
-
headers=headers,
|
|
344
|
-
json=message
|
|
345
|
-
)
|
|
346
|
-
|
|
340
|
+
headers = {"Content-Type": "application/json", **self._get_headers()}
|
|
341
|
+
|
|
342
|
+
response = await self.send_client.post(self.message_url, headers=headers, json=message)
|
|
343
|
+
|
|
347
344
|
if response.status_code == 202:
|
|
348
345
|
# Async response - wait for result via SSE
|
|
349
346
|
request_timeout = timeout or self.default_timeout
|
|
350
347
|
result = await asyncio.wait_for(future, timeout=request_timeout)
|
|
351
|
-
|
|
348
|
+
# FIXED: Only reset failures on successful tool calls, not all requests
|
|
349
|
+
if method.startswith("tools/"):
|
|
350
|
+
self._consecutive_failures = 0
|
|
351
|
+
self._last_successful_ping = time.time()
|
|
352
352
|
return result
|
|
353
353
|
elif response.status_code == 200:
|
|
354
354
|
# Immediate response
|
|
355
355
|
self.pending_requests.pop(request_id, None)
|
|
356
|
-
|
|
356
|
+
# FIXED: Only reset failures on successful tool calls
|
|
357
|
+
if method.startswith("tools/"):
|
|
358
|
+
self._consecutive_failures = 0
|
|
359
|
+
self._last_successful_ping = time.time()
|
|
357
360
|
return response.json()
|
|
358
361
|
else:
|
|
359
362
|
self.pending_requests.pop(request_id, None)
|
|
360
|
-
|
|
363
|
+
# FIXED: Only increment failures for tool calls, not initialization
|
|
364
|
+
if method.startswith("tools/"):
|
|
365
|
+
self._consecutive_failures += 1
|
|
361
366
|
raise RuntimeError(f"HTTP request failed with status: {response.status_code}")
|
|
362
|
-
|
|
363
|
-
except
|
|
367
|
+
|
|
368
|
+
except TimeoutError:
|
|
364
369
|
self.pending_requests.pop(request_id, None)
|
|
365
|
-
|
|
370
|
+
# FIXED: Only increment failures for tool calls
|
|
371
|
+
if method.startswith("tools/"):
|
|
372
|
+
self._consecutive_failures += 1
|
|
366
373
|
raise
|
|
367
374
|
except Exception:
|
|
368
375
|
self.pending_requests.pop(request_id, None)
|
|
369
|
-
|
|
376
|
+
# FIXED: Only increment failures for tool calls
|
|
377
|
+
if method.startswith("tools/"):
|
|
378
|
+
self._consecutive_failures += 1
|
|
370
379
|
raise
|
|
371
380
|
|
|
372
|
-
async def _send_notification(self, method: str, params:
|
|
381
|
+
async def _send_notification(self, method: str, params: dict[str, Any] = None):
|
|
373
382
|
"""Send JSON-RPC notification (no response expected)."""
|
|
374
383
|
if not self.message_url:
|
|
375
384
|
raise RuntimeError("SSE transport not connected - no message URL")
|
|
376
|
-
|
|
377
|
-
message = {
|
|
378
|
-
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
headers = {
|
|
384
|
-
'Content-Type': 'application/json',
|
|
385
|
-
**self._get_headers()
|
|
386
|
-
}
|
|
387
|
-
|
|
388
|
-
response = await self.send_client.post(
|
|
389
|
-
self.message_url,
|
|
390
|
-
headers=headers,
|
|
391
|
-
json=message
|
|
392
|
-
)
|
|
393
|
-
|
|
385
|
+
|
|
386
|
+
message = {"jsonrpc": "2.0", "method": method, "params": params or {}}
|
|
387
|
+
|
|
388
|
+
headers = {"Content-Type": "application/json", **self._get_headers()}
|
|
389
|
+
|
|
390
|
+
response = await self.send_client.post(self.message_url, headers=headers, json=message)
|
|
391
|
+
|
|
394
392
|
if response.status_code not in (200, 202):
|
|
395
393
|
logger.warning("Notification failed with status: %s", response.status_code)
|
|
396
394
|
|
|
397
395
|
async def send_ping(self) -> bool:
|
|
398
|
-
"""Send ping to check connection health."""
|
|
396
|
+
"""Send ping to check connection health with improved logic."""
|
|
399
397
|
if not self._initialized:
|
|
400
398
|
return False
|
|
401
|
-
|
|
399
|
+
|
|
402
400
|
start_time = time.time()
|
|
403
401
|
try:
|
|
404
402
|
# Use tools/list as a lightweight ping since not all servers support ping
|
|
405
403
|
response = await self._send_request("tools/list", {}, timeout=10.0)
|
|
406
|
-
|
|
407
|
-
success =
|
|
408
|
-
|
|
404
|
+
|
|
405
|
+
success = "error" not in response
|
|
406
|
+
|
|
409
407
|
if success:
|
|
410
408
|
self._last_successful_ping = time.time()
|
|
411
|
-
|
|
412
|
-
|
|
409
|
+
# FIXED: Don't reset consecutive failures here - let tool calls do that
|
|
410
|
+
|
|
413
411
|
if self.enable_metrics:
|
|
414
412
|
ping_time = time.time() - start_time
|
|
415
413
|
self._metrics["last_ping_time"] = ping_time
|
|
416
414
|
logger.debug("SSE ping completed in %.3fs: %s", ping_time, success)
|
|
417
|
-
|
|
415
|
+
|
|
418
416
|
return success
|
|
419
417
|
except Exception as e:
|
|
420
418
|
logger.debug("SSE ping failed: %s", e)
|
|
421
|
-
|
|
419
|
+
# FIXED: Don't increment consecutive failures for ping failures
|
|
422
420
|
return False
|
|
423
421
|
|
|
424
422
|
def is_connected(self) -> bool:
|
|
425
|
-
"""
|
|
423
|
+
"""
|
|
424
|
+
FIXED: More lenient connection health check.
|
|
425
|
+
|
|
426
|
+
The diagnostic shows the connection works fine, so we need to be less aggressive
|
|
427
|
+
about marking it as unhealthy.
|
|
428
|
+
"""
|
|
426
429
|
if not self._initialized or not self.session_id:
|
|
427
430
|
return False
|
|
428
|
-
|
|
429
|
-
#
|
|
431
|
+
|
|
432
|
+
# FIXED: Grace period after initialization - always return True for a while
|
|
433
|
+
if self._initialization_time and time.time() - self._initialization_time < self._connection_grace_period:
|
|
434
|
+
logger.debug("Within grace period - connection considered healthy")
|
|
435
|
+
return True
|
|
436
|
+
|
|
437
|
+
# FIXED: More lenient failure threshold
|
|
430
438
|
if self._consecutive_failures >= self._max_consecutive_failures:
|
|
431
|
-
logger.warning(f"Connection marked unhealthy after {self._consecutive_failures} failures")
|
|
439
|
+
logger.warning(f"Connection marked unhealthy after {self._consecutive_failures} consecutive failures")
|
|
432
440
|
return False
|
|
433
|
-
|
|
441
|
+
|
|
434
442
|
# Check if SSE task is still running
|
|
435
443
|
if self.sse_task and self.sse_task.done():
|
|
436
444
|
exception = self.sse_task.exception()
|
|
437
445
|
if exception:
|
|
438
446
|
logger.warning(f"SSE task died: {exception}")
|
|
439
447
|
return False
|
|
440
|
-
|
|
448
|
+
|
|
449
|
+
# FIXED: If we have a recent successful ping/tool call, we're healthy
|
|
450
|
+
if self._last_successful_ping and time.time() - self._last_successful_ping < 60.0: # Success within last minute
|
|
451
|
+
return True
|
|
452
|
+
|
|
453
|
+
# FIXED: Default to healthy if no clear indicators of problems
|
|
454
|
+
logger.debug("No clear health indicators - defaulting to healthy")
|
|
441
455
|
return True
|
|
442
456
|
|
|
443
|
-
async def get_tools(self) ->
|
|
457
|
+
async def get_tools(self) -> list[dict[str, Any]]:
|
|
444
458
|
"""Get list of available tools from the server."""
|
|
445
459
|
if not self._initialized:
|
|
446
460
|
logger.error("Cannot get tools: transport not initialized")
|
|
447
461
|
return []
|
|
448
|
-
|
|
462
|
+
|
|
449
463
|
start_time = time.time()
|
|
450
464
|
try:
|
|
451
465
|
response = await self._send_request("tools/list", {})
|
|
452
|
-
|
|
453
|
-
if
|
|
454
|
-
logger.error("Error getting tools: %s", response[
|
|
466
|
+
|
|
467
|
+
if "error" in response:
|
|
468
|
+
logger.error("Error getting tools: %s", response["error"])
|
|
455
469
|
return []
|
|
456
|
-
|
|
457
|
-
tools = response.get(
|
|
458
|
-
|
|
470
|
+
|
|
471
|
+
tools = response.get("result", {}).get("tools", [])
|
|
472
|
+
|
|
459
473
|
if self.enable_metrics:
|
|
460
474
|
response_time = time.time() - start_time
|
|
461
475
|
logger.debug("Retrieved %d tools in %.3fs", len(tools), response_time)
|
|
462
|
-
|
|
476
|
+
|
|
463
477
|
return tools
|
|
464
|
-
|
|
478
|
+
|
|
465
479
|
except Exception as e:
|
|
466
480
|
logger.error("Error getting tools: %s", e)
|
|
467
481
|
return []
|
|
468
482
|
|
|
469
|
-
async def call_tool(
|
|
470
|
-
|
|
483
|
+
async def call_tool(
|
|
484
|
+
self, tool_name: str, arguments: dict[str, Any], timeout: float | None = None
|
|
485
|
+
) -> dict[str, Any]:
|
|
471
486
|
"""Execute a tool with the given arguments."""
|
|
472
487
|
if not self._initialized:
|
|
473
|
-
return {
|
|
474
|
-
"isError": True,
|
|
475
|
-
"error": "Transport not initialized"
|
|
476
|
-
}
|
|
488
|
+
return {"isError": True, "error": "Transport not initialized"}
|
|
477
489
|
|
|
478
490
|
start_time = time.time()
|
|
479
491
|
if self.enable_metrics:
|
|
@@ -481,51 +493,37 @@ class SSETransport(MCPBaseTransport):
|
|
|
481
493
|
|
|
482
494
|
try:
|
|
483
495
|
logger.debug("Calling tool '%s' with arguments: %s", tool_name, arguments)
|
|
484
|
-
|
|
496
|
+
|
|
485
497
|
response = await self._send_request(
|
|
486
|
-
"tools/call",
|
|
487
|
-
{
|
|
488
|
-
"name": tool_name,
|
|
489
|
-
"arguments": arguments
|
|
490
|
-
},
|
|
491
|
-
timeout=timeout
|
|
498
|
+
"tools/call", {"name": tool_name, "arguments": arguments}, timeout=timeout
|
|
492
499
|
)
|
|
493
|
-
|
|
494
|
-
if
|
|
500
|
+
|
|
501
|
+
if "error" in response:
|
|
495
502
|
if self.enable_metrics:
|
|
496
503
|
self._update_metrics(time.time() - start_time, False)
|
|
497
|
-
|
|
498
|
-
return {
|
|
499
|
-
|
|
500
|
-
"error": response['error'].get('message', 'Unknown error')
|
|
501
|
-
}
|
|
502
|
-
|
|
504
|
+
|
|
505
|
+
return {"isError": True, "error": response["error"].get("message", "Unknown error")}
|
|
506
|
+
|
|
503
507
|
# Extract and normalize result using base class method
|
|
504
|
-
result = response.get(
|
|
508
|
+
result = response.get("result", {})
|
|
505
509
|
normalized_result = self._normalize_mcp_response({"result": result})
|
|
506
|
-
|
|
510
|
+
|
|
507
511
|
if self.enable_metrics:
|
|
508
512
|
self._update_metrics(time.time() - start_time, True)
|
|
509
|
-
|
|
513
|
+
|
|
510
514
|
return normalized_result
|
|
511
|
-
|
|
512
|
-
except
|
|
515
|
+
|
|
516
|
+
except TimeoutError:
|
|
513
517
|
if self.enable_metrics:
|
|
514
518
|
self._update_metrics(time.time() - start_time, False)
|
|
515
|
-
|
|
516
|
-
return {
|
|
517
|
-
"isError": True,
|
|
518
|
-
"error": "Tool execution timed out"
|
|
519
|
-
}
|
|
519
|
+
|
|
520
|
+
return {"isError": True, "error": "Tool execution timed out"}
|
|
520
521
|
except Exception as e:
|
|
521
522
|
if self.enable_metrics:
|
|
522
523
|
self._update_metrics(time.time() - start_time, False)
|
|
523
|
-
|
|
524
|
+
|
|
524
525
|
logger.error("Error calling tool '%s': %s", tool_name, e)
|
|
525
|
-
return {
|
|
526
|
-
"isError": True,
|
|
527
|
-
"error": str(e)
|
|
528
|
-
}
|
|
526
|
+
return {"isError": True, "error": str(e)}
|
|
529
527
|
|
|
530
528
|
def _update_metrics(self, response_time: float, success: bool) -> None:
|
|
531
529
|
"""Update performance metrics."""
|
|
@@ -533,39 +531,37 @@ class SSETransport(MCPBaseTransport):
|
|
|
533
531
|
self._metrics["successful_calls"] += 1
|
|
534
532
|
else:
|
|
535
533
|
self._metrics["failed_calls"] += 1
|
|
536
|
-
|
|
534
|
+
|
|
537
535
|
self._metrics["total_time"] += response_time
|
|
538
536
|
if self._metrics["total_calls"] > 0:
|
|
539
|
-
self._metrics["avg_response_time"] =
|
|
540
|
-
self._metrics["total_time"] / self._metrics["total_calls"]
|
|
541
|
-
)
|
|
537
|
+
self._metrics["avg_response_time"] = self._metrics["total_time"] / self._metrics["total_calls"]
|
|
542
538
|
|
|
543
|
-
async def list_resources(self) ->
|
|
539
|
+
async def list_resources(self) -> dict[str, Any]:
|
|
544
540
|
"""List available resources from the server."""
|
|
545
541
|
if not self._initialized:
|
|
546
542
|
return {}
|
|
547
|
-
|
|
543
|
+
|
|
548
544
|
try:
|
|
549
545
|
response = await self._send_request("resources/list", {}, timeout=10.0)
|
|
550
|
-
if
|
|
551
|
-
logger.debug("Resources not supported: %s", response[
|
|
546
|
+
if "error" in response:
|
|
547
|
+
logger.debug("Resources not supported: %s", response["error"])
|
|
552
548
|
return {}
|
|
553
|
-
return response.get(
|
|
549
|
+
return response.get("result", {})
|
|
554
550
|
except Exception as e:
|
|
555
551
|
logger.debug("Error listing resources: %s", e)
|
|
556
552
|
return {}
|
|
557
553
|
|
|
558
|
-
async def list_prompts(self) ->
|
|
554
|
+
async def list_prompts(self) -> dict[str, Any]:
|
|
559
555
|
"""List available prompts from the server."""
|
|
560
556
|
if not self._initialized:
|
|
561
557
|
return {}
|
|
562
|
-
|
|
558
|
+
|
|
563
559
|
try:
|
|
564
560
|
response = await self._send_request("prompts/list", {}, timeout=10.0)
|
|
565
|
-
if
|
|
566
|
-
logger.debug("Prompts not supported: %s", response[
|
|
561
|
+
if "error" in response:
|
|
562
|
+
logger.debug("Prompts not supported: %s", response["error"])
|
|
567
563
|
return {}
|
|
568
|
-
return response.get(
|
|
564
|
+
return response.get("result", {})
|
|
569
565
|
except Exception as e:
|
|
570
566
|
logger.debug("Error listing prompts: %s", e)
|
|
571
567
|
return {}
|
|
@@ -574,16 +570,16 @@ class SSETransport(MCPBaseTransport):
|
|
|
574
570
|
"""Close the transport and clean up resources."""
|
|
575
571
|
if not self._initialized:
|
|
576
572
|
return
|
|
577
|
-
|
|
573
|
+
|
|
578
574
|
# Log final metrics
|
|
579
575
|
if self.enable_metrics and self._metrics["total_calls"] > 0:
|
|
580
576
|
logger.debug(
|
|
581
577
|
"SSE transport closing - Total calls: %d, Success rate: %.1f%%, Avg response time: %.3fs",
|
|
582
578
|
self._metrics["total_calls"],
|
|
583
579
|
(self._metrics["successful_calls"] / self._metrics["total_calls"] * 100),
|
|
584
|
-
self._metrics["avg_response_time"]
|
|
580
|
+
self._metrics["avg_response_time"],
|
|
585
581
|
)
|
|
586
|
-
|
|
582
|
+
|
|
587
583
|
await self._cleanup()
|
|
588
584
|
|
|
589
585
|
async def _cleanup(self) -> None:
|
|
@@ -591,30 +587,28 @@ class SSETransport(MCPBaseTransport):
|
|
|
591
587
|
# Cancel SSE processing task
|
|
592
588
|
if self.sse_task and not self.sse_task.done():
|
|
593
589
|
self.sse_task.cancel()
|
|
594
|
-
|
|
590
|
+
with contextlib.suppress(asyncio.CancelledError):
|
|
595
591
|
await self.sse_task
|
|
596
|
-
|
|
597
|
-
pass
|
|
598
|
-
|
|
592
|
+
|
|
599
593
|
# Close SSE stream context
|
|
600
594
|
if self.sse_stream_context:
|
|
601
595
|
try:
|
|
602
596
|
await self.sse_stream_context.__aexit__(None, None, None)
|
|
603
597
|
except Exception as e:
|
|
604
598
|
logger.debug("Error closing SSE stream: %s", e)
|
|
605
|
-
|
|
599
|
+
|
|
606
600
|
# Close HTTP clients
|
|
607
601
|
if self.stream_client:
|
|
608
602
|
await self.stream_client.aclose()
|
|
609
|
-
|
|
603
|
+
|
|
610
604
|
if self.send_client:
|
|
611
605
|
await self.send_client.aclose()
|
|
612
|
-
|
|
606
|
+
|
|
613
607
|
# Cancel any pending requests
|
|
614
|
-
for
|
|
608
|
+
for _request_id, future in self.pending_requests.items():
|
|
615
609
|
if not future.done():
|
|
616
610
|
future.cancel()
|
|
617
|
-
|
|
611
|
+
|
|
618
612
|
# Reset state
|
|
619
613
|
self._initialized = False
|
|
620
614
|
self.session_id = None
|
|
@@ -625,10 +619,30 @@ class SSETransport(MCPBaseTransport):
|
|
|
625
619
|
self.sse_stream_context = None
|
|
626
620
|
self.stream_client = None
|
|
627
621
|
self.send_client = None
|
|
622
|
+
# FIXED: Reset health tracking
|
|
623
|
+
self._consecutive_failures = 0
|
|
624
|
+
self._last_successful_ping = None
|
|
625
|
+
self._initialization_time = None
|
|
628
626
|
|
|
629
|
-
def get_metrics(self) ->
|
|
630
|
-
"""Get performance and connection metrics."""
|
|
631
|
-
|
|
627
|
+
def get_metrics(self) -> dict[str, Any]:
|
|
628
|
+
"""Get performance and connection metrics with health info."""
|
|
629
|
+
metrics = self._metrics.copy()
|
|
630
|
+
metrics.update(
|
|
631
|
+
{
|
|
632
|
+
"is_connected": self.is_connected(),
|
|
633
|
+
"consecutive_failures": self._consecutive_failures,
|
|
634
|
+
"max_consecutive_failures": self._max_consecutive_failures,
|
|
635
|
+
"last_successful_ping": self._last_successful_ping,
|
|
636
|
+
"initialization_time_timestamp": self._initialization_time,
|
|
637
|
+
"grace_period_active": (
|
|
638
|
+
self._initialization_time
|
|
639
|
+
and time.time() - self._initialization_time < self._connection_grace_period
|
|
640
|
+
)
|
|
641
|
+
if self._initialization_time
|
|
642
|
+
else False,
|
|
643
|
+
}
|
|
644
|
+
)
|
|
645
|
+
return metrics
|
|
632
646
|
|
|
633
647
|
def reset_metrics(self) -> None:
|
|
634
648
|
"""Reset performance metrics."""
|
|
@@ -641,10 +655,10 @@ class SSETransport(MCPBaseTransport):
|
|
|
641
655
|
"last_ping_time": self._metrics.get("last_ping_time"),
|
|
642
656
|
"initialization_time": self._metrics.get("initialization_time"),
|
|
643
657
|
"session_discoveries": self._metrics.get("session_discoveries", 0),
|
|
644
|
-
"stream_errors": 0
|
|
658
|
+
"stream_errors": 0,
|
|
645
659
|
}
|
|
646
660
|
|
|
647
|
-
def get_streams(self) ->
|
|
661
|
+
def get_streams(self) -> list[tuple]:
|
|
648
662
|
"""SSE transport doesn't expose raw streams."""
|
|
649
663
|
return []
|
|
650
664
|
|
|
@@ -657,4 +671,4 @@ class SSETransport(MCPBaseTransport):
|
|
|
657
671
|
|
|
658
672
|
async def __aexit__(self, exc_type, exc_val, exc_tb):
|
|
659
673
|
"""Context manager cleanup."""
|
|
660
|
-
await self.close()
|
|
674
|
+
await self.close()
|