flock-core 0.4.2__py3-none-any.whl → 0.4.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/core/__init__.py +11 -0
- flock/core/flock.py +144 -42
- flock/core/flock_agent.py +117 -4
- flock/core/flock_evaluator.py +1 -1
- flock/core/flock_factory.py +290 -2
- flock/core/flock_module.py +101 -0
- flock/core/flock_registry.py +39 -2
- flock/core/flock_server_manager.py +136 -0
- flock/core/logging/telemetry.py +1 -1
- flock/core/mcp/__init__.py +1 -0
- flock/core/mcp/flock_mcp_server.py +614 -0
- flock/core/mcp/flock_mcp_tool_base.py +201 -0
- flock/core/mcp/mcp_client.py +658 -0
- flock/core/mcp/mcp_client_manager.py +201 -0
- flock/core/mcp/mcp_config.py +237 -0
- flock/core/mcp/types/__init__.py +1 -0
- flock/core/mcp/types/callbacks.py +86 -0
- flock/core/mcp/types/factories.py +111 -0
- flock/core/mcp/types/handlers.py +240 -0
- flock/core/mcp/types/types.py +157 -0
- flock/core/mcp/util/__init__.py +0 -0
- flock/core/mcp/util/helpers.py +23 -0
- flock/core/mixin/dspy_integration.py +45 -12
- flock/core/serialization/flock_serializer.py +52 -1
- flock/core/util/spliter.py +4 -0
- flock/evaluators/declarative/declarative_evaluator.py +4 -3
- flock/mcp/servers/sse/__init__.py +1 -0
- flock/mcp/servers/sse/flock_sse_server.py +139 -0
- flock/mcp/servers/stdio/__init__.py +1 -0
- flock/mcp/servers/stdio/flock_stdio_server.py +138 -0
- flock/mcp/servers/websockets/__init__.py +1 -0
- flock/mcp/servers/websockets/flock_websocket_server.py +119 -0
- flock/modules/performance/metrics_module.py +159 -1
- {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/METADATA +278 -64
- {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/RECORD +38 -18
- {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/WHEEL +0 -0
- {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/entry_points.txt +0 -0
- {flock_core-0.4.2.dist-info → flock_core-0.4.5.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,658 @@
|
|
|
1
|
+
"""Wrapper Class for a mcp ClientSession Object."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import random
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from asyncio import Lock
|
|
7
|
+
from contextlib import (
|
|
8
|
+
AbstractAsyncContextManager,
|
|
9
|
+
AsyncExitStack,
|
|
10
|
+
)
|
|
11
|
+
from datetime import timedelta
|
|
12
|
+
from typing import (
|
|
13
|
+
Any,
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
import httpx
|
|
17
|
+
from anyio import ClosedResourceError
|
|
18
|
+
from anyio.streams.memory import (
|
|
19
|
+
MemoryObjectReceiveStream,
|
|
20
|
+
MemoryObjectSendStream,
|
|
21
|
+
)
|
|
22
|
+
from cachetools import TTLCache, cached
|
|
23
|
+
from mcp import (
|
|
24
|
+
ClientSession,
|
|
25
|
+
InitializeResult,
|
|
26
|
+
ListToolsResult,
|
|
27
|
+
McpError,
|
|
28
|
+
ServerCapabilities,
|
|
29
|
+
)
|
|
30
|
+
from mcp.types import CallToolResult, JSONRPCMessage
|
|
31
|
+
from opentelemetry import trace
|
|
32
|
+
from pydantic import (
|
|
33
|
+
BaseModel,
|
|
34
|
+
ConfigDict,
|
|
35
|
+
Field,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
from flock.core.logging.logging import get_logger
|
|
39
|
+
from flock.core.mcp.flock_mcp_tool_base import FlockMCPToolBase
|
|
40
|
+
from flock.core.mcp.mcp_config import FlockMCPConfigurationBase
|
|
41
|
+
from flock.core.mcp.types.factories import (
|
|
42
|
+
default_flock_mcp_list_roots_callback_factory,
|
|
43
|
+
default_flock_mcp_logging_callback_factory,
|
|
44
|
+
default_flock_mcp_message_handler_callback_factory,
|
|
45
|
+
default_flock_mcp_sampling_callback_factory,
|
|
46
|
+
)
|
|
47
|
+
from flock.core.mcp.types.types import (
|
|
48
|
+
FlockListRootsMCPCallback,
|
|
49
|
+
FlockLoggingMCPCallback,
|
|
50
|
+
FlockMessageHandlerMCPCallback,
|
|
51
|
+
FlockSamplingMCPCallback,
|
|
52
|
+
MCPRoot,
|
|
53
|
+
ServerParameters,
|
|
54
|
+
)
|
|
55
|
+
from flock.core.mcp.util.helpers import cache_key_generator
|
|
56
|
+
|
|
57
|
+
logger = get_logger("core.mcp.client_base")
|
|
58
|
+
tracer = trace.get_tracer(__name__)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class FlockMCPClientBase(BaseModel, ABC):
|
|
62
|
+
"""Wrapper for mcp ClientSession.
|
|
63
|
+
|
|
64
|
+
Class will attempt to re-establish connection if possible.
|
|
65
|
+
If connection establishment fails after max_retries, then
|
|
66
|
+
`has_error` will be set to true and `error_message` will
|
|
67
|
+
contain the details of the exception.
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
# --- Properties ---
|
|
71
|
+
config: FlockMCPConfigurationBase = Field(
|
|
72
|
+
..., description="The config for this client instance."
|
|
73
|
+
)
|
|
74
|
+
|
|
75
|
+
tool_cache: TTLCache | None = Field(
|
|
76
|
+
default=None,
|
|
77
|
+
exclude=True,
|
|
78
|
+
description="Cache for tools. Excluded from Serialization.",
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
tool_result_cache: TTLCache | None = Field(
|
|
82
|
+
default=None,
|
|
83
|
+
exclude=True,
|
|
84
|
+
description="Cache for the result of tool call. Excluded from Serialization.",
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
resource_contents_cache: TTLCache | None = Field(
|
|
88
|
+
default=None,
|
|
89
|
+
exclude=True,
|
|
90
|
+
description="Cache for resource contents. Excluded from Serialization.",
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
resource_list_cache: TTLCache | None = Field(
|
|
94
|
+
default=None,
|
|
95
|
+
exclude=True,
|
|
96
|
+
description="Cache for Resource Lists. Excluded from Serialization.",
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
client_session: ClientSession | None = Field(
|
|
100
|
+
default=None, exclude=True, description="ClientSession Reference."
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
connected_server_capabilities: ServerCapabilities | None = Field(
|
|
104
|
+
default=None,
|
|
105
|
+
exclude=True,
|
|
106
|
+
description="Capabilities of the connected server.",
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
current_roots: list[MCPRoot] | None = Field(
|
|
110
|
+
default=None, description="Currently used roots of the client."
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
lock: Lock = Field(
|
|
114
|
+
default_factory=Lock,
|
|
115
|
+
exclude=True,
|
|
116
|
+
description="Global lock for the client.",
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
session_stack: AsyncExitStack = Field(
|
|
120
|
+
default_factory=AsyncExitStack,
|
|
121
|
+
exclude=True,
|
|
122
|
+
description="Internal AsyncExitStack for session.",
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
sampling_callback: FlockSamplingMCPCallback | None = Field(
|
|
126
|
+
default=None, description="Sampling Callback."
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
list_roots_callback: FlockListRootsMCPCallback | None = Field(
|
|
130
|
+
default=None, description="List Roots Callback."
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
logging_callback: FlockLoggingMCPCallback | None = Field(
|
|
134
|
+
default=None, description="Logging Callback."
|
|
135
|
+
)
|
|
136
|
+
|
|
137
|
+
message_handler: FlockMessageHandlerMCPCallback | None = Field(
|
|
138
|
+
default=None, description="MessageHandler Callback."
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
additional_params: dict[str, Any] | None = Field(
|
|
142
|
+
default=None,
|
|
143
|
+
description="Additional Parameters for connection. Can be modified using server modules.",
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
# Auto-reconnect proxy
|
|
147
|
+
class _SessionProxy:
|
|
148
|
+
def __init__(self, client: Any):
|
|
149
|
+
self._client = client
|
|
150
|
+
|
|
151
|
+
def __getattr__(self, name: str):
|
|
152
|
+
# return an async function that auto-reconnects, then calls through.
|
|
153
|
+
async def _method(*args, **kwargs):
|
|
154
|
+
with tracer.start_as_current_span(
|
|
155
|
+
"session_proxy.__getattr__"
|
|
156
|
+
) as span:
|
|
157
|
+
client = self._client
|
|
158
|
+
cfg = client.config
|
|
159
|
+
max_tries = cfg.connection_config.max_retries or 1
|
|
160
|
+
base_delay = 0.1
|
|
161
|
+
span.set_attribute("client.name", client.config.name)
|
|
162
|
+
|
|
163
|
+
for attempt in range(1, max_tries + 2):
|
|
164
|
+
span.set_attribute(
|
|
165
|
+
"max_tries", max_tries
|
|
166
|
+
) # TODO: shift outside of loop
|
|
167
|
+
span.set_attribute("base_delay", base_delay)
|
|
168
|
+
span.set_attribute("attempt", attempt)
|
|
169
|
+
await client._ensure_connected()
|
|
170
|
+
try:
|
|
171
|
+
# delegate the real session
|
|
172
|
+
return await getattr(client.client_session, name)(
|
|
173
|
+
*args, **kwargs
|
|
174
|
+
)
|
|
175
|
+
except McpError as e:
|
|
176
|
+
# only retry on a transport timeout
|
|
177
|
+
if e.error.code == httpx.codes.REQUEST_TIMEOUT:
|
|
178
|
+
kind = "timeout"
|
|
179
|
+
else:
|
|
180
|
+
# application-level MCP error -> give up immediately
|
|
181
|
+
logger.error(
|
|
182
|
+
f"MCP error in session.{name}: {e.error}"
|
|
183
|
+
)
|
|
184
|
+
return None
|
|
185
|
+
except (BrokenPipeError, ClosedResourceError) as e:
|
|
186
|
+
kind = type(e).__name__
|
|
187
|
+
span.record_exception(e)
|
|
188
|
+
except Exception as e:
|
|
189
|
+
# anything else is treated as transport failure
|
|
190
|
+
span.record_exception(e)
|
|
191
|
+
kind = type(e).__name__
|
|
192
|
+
|
|
193
|
+
# no more retries
|
|
194
|
+
if attempt > max_tries:
|
|
195
|
+
logger.error(
|
|
196
|
+
f"Session.{name} failed after {max_tries} retries ({kind}); giving up."
|
|
197
|
+
)
|
|
198
|
+
try:
|
|
199
|
+
await client.disconnect()
|
|
200
|
+
except Exception as e:
|
|
201
|
+
logger.warning(
|
|
202
|
+
f"Error tearing down stale session: {e}"
|
|
203
|
+
)
|
|
204
|
+
span.record_exception(e)
|
|
205
|
+
return None
|
|
206
|
+
|
|
207
|
+
# otherwise log + tear down + back off
|
|
208
|
+
logger.warning(
|
|
209
|
+
f"Session.{name} attempt {attempt}/{max_tries} failed. ({kind}). Reconnecting."
|
|
210
|
+
)
|
|
211
|
+
try:
|
|
212
|
+
await client.disconnect()
|
|
213
|
+
await client._connect()
|
|
214
|
+
except Exception as e:
|
|
215
|
+
logger.error(f"Reconnect failed: {e}")
|
|
216
|
+
span.record_exception(e)
|
|
217
|
+
|
|
218
|
+
# Exponential backoff + 10% jitter
|
|
219
|
+
delay = base_delay ** (2 ** (attempt - 1))
|
|
220
|
+
delay += random.uniform(0, delay * 0.1)
|
|
221
|
+
await asyncio.sleep(delay)
|
|
222
|
+
|
|
223
|
+
return _method
|
|
224
|
+
|
|
225
|
+
def __init__(
|
|
226
|
+
self,
|
|
227
|
+
config: FlockMCPConfigurationBase,
|
|
228
|
+
lock: Lock | None = None,
|
|
229
|
+
tool_cache: TTLCache | None = None,
|
|
230
|
+
tool_result_cache: TTLCache | None = None,
|
|
231
|
+
resource_contents_cache: TTLCache | None = None,
|
|
232
|
+
resource_list_cache: TTLCache | None = None,
|
|
233
|
+
client_session: ClientSession | None = None,
|
|
234
|
+
connected_server_capabilities: ServerCapabilities | None = None,
|
|
235
|
+
session_stack: AsyncExitStack = AsyncExitStack(),
|
|
236
|
+
sampling_callback: FlockSamplingMCPCallback | None = None,
|
|
237
|
+
list_roots_callback: FlockListRootsMCPCallback | None = None,
|
|
238
|
+
logging_callback: FlockLoggingMCPCallback | None = None,
|
|
239
|
+
message_handler: FlockMessageHandlerMCPCallback | None = None,
|
|
240
|
+
current_roots: list[MCPRoot] | None = None,
|
|
241
|
+
**kwargs,
|
|
242
|
+
):
|
|
243
|
+
"""Init function."""
|
|
244
|
+
lock = lock or Lock()
|
|
245
|
+
super().__init__(
|
|
246
|
+
config=config,
|
|
247
|
+
lock=lock,
|
|
248
|
+
tool_cache=tool_cache,
|
|
249
|
+
tool_result_cache=tool_result_cache,
|
|
250
|
+
resource_contents_cache=resource_contents_cache,
|
|
251
|
+
resource_list_cache=resource_list_cache,
|
|
252
|
+
client_session=client_session,
|
|
253
|
+
connected_server_capabilities=connected_server_capabilities,
|
|
254
|
+
session_stack=session_stack,
|
|
255
|
+
sampling_callback=sampling_callback,
|
|
256
|
+
list_roots_callback=list_roots_callback,
|
|
257
|
+
logging_callback=logging_callback,
|
|
258
|
+
message_handler=message_handler,
|
|
259
|
+
current_roots=current_roots,
|
|
260
|
+
**kwargs,
|
|
261
|
+
)
|
|
262
|
+
|
|
263
|
+
# Check if roots are specified in the config:
|
|
264
|
+
if (
|
|
265
|
+
not self.current_roots
|
|
266
|
+
and self.config.connection_config.mount_points
|
|
267
|
+
):
|
|
268
|
+
# That means that the roots are set in the config
|
|
269
|
+
self.current_roots = self.config.connection_config.mount_points
|
|
270
|
+
|
|
271
|
+
if not self.tool_cache:
|
|
272
|
+
self.tool_cache = TTLCache(
|
|
273
|
+
maxsize=self.config.caching_config.tool_cache_max_size,
|
|
274
|
+
ttl=self.config.caching_config.tool_cache_max_ttl,
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
# set up the caches
|
|
278
|
+
if not self.tool_result_cache:
|
|
279
|
+
self.tool_result_cache = TTLCache(
|
|
280
|
+
maxsize=self.config.caching_config.tool_result_cache_max_size,
|
|
281
|
+
ttl=self.config.caching_config.tool_result_cache_max_ttl,
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
if not self.resource_contents_cache:
|
|
285
|
+
self.resource_contents_cache = TTLCache(
|
|
286
|
+
maxsize=self.config.caching_config.resource_contents_cache_max_size,
|
|
287
|
+
ttl=self.config.caching_config.resource_contents_cache_max_ttl,
|
|
288
|
+
)
|
|
289
|
+
if not self.resource_list_cache:
|
|
290
|
+
self.resource_list_cache = TTLCache(
|
|
291
|
+
maxsize=self.config.caching_config.resource_list_cache_max_size,
|
|
292
|
+
ttl=self.config.caching_config.resource_list_cache_max_ttl,
|
|
293
|
+
)
|
|
294
|
+
|
|
295
|
+
# set up callbacks
|
|
296
|
+
if not self.logging_callback:
|
|
297
|
+
if not self.config.callback_config.logging_callback:
|
|
298
|
+
self.logging_callback = (
|
|
299
|
+
default_flock_mcp_logging_callback_factory(
|
|
300
|
+
associated_client=self,
|
|
301
|
+
logger=logger,
|
|
302
|
+
)
|
|
303
|
+
)
|
|
304
|
+
else:
|
|
305
|
+
self.logging_callback = (
|
|
306
|
+
self.config.callback_config.logging_callback
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
if not self.message_handler:
|
|
310
|
+
if not self.config.callback_config.message_handler:
|
|
311
|
+
self.message_handler = (
|
|
312
|
+
default_flock_mcp_message_handler_callback_factory(
|
|
313
|
+
associated_client=self,
|
|
314
|
+
logger=logger,
|
|
315
|
+
)
|
|
316
|
+
)
|
|
317
|
+
else:
|
|
318
|
+
self.message_handler = (
|
|
319
|
+
self.config.callback_config.message_handler
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
if not self.list_roots_callback:
|
|
323
|
+
if not self.config.callback_config.list_roots_callback:
|
|
324
|
+
self.list_roots_callback = (
|
|
325
|
+
default_flock_mcp_list_roots_callback_factory(
|
|
326
|
+
associated_client=self,
|
|
327
|
+
logger=logger,
|
|
328
|
+
)
|
|
329
|
+
)
|
|
330
|
+
else:
|
|
331
|
+
self.list_roots_callback = (
|
|
332
|
+
self.config.callback_config.list_roots_callback
|
|
333
|
+
)
|
|
334
|
+
|
|
335
|
+
if not self.sampling_callback:
|
|
336
|
+
if not self.config.callback_config.sampling_callback:
|
|
337
|
+
self.sampling_callback = (
|
|
338
|
+
default_flock_mcp_sampling_callback_factory(
|
|
339
|
+
associated_client=self,
|
|
340
|
+
logger=logger,
|
|
341
|
+
)
|
|
342
|
+
)
|
|
343
|
+
else:
|
|
344
|
+
self.sampling_callback = (
|
|
345
|
+
self.config.callback_config.sampling_callback
|
|
346
|
+
)
|
|
347
|
+
|
|
348
|
+
@property
|
|
349
|
+
def session(self) -> _SessionProxy:
|
|
350
|
+
"""Always-connected proxy for client_session methods.
|
|
351
|
+
|
|
352
|
+
Usage: await self.client_session.call_tool(...), await self.client_session.list_tools(...)
|
|
353
|
+
"""
|
|
354
|
+
return self._SessionProxy(self)
|
|
355
|
+
|
|
356
|
+
model_config = ConfigDict(
|
|
357
|
+
arbitrary_types_allowed=True,
|
|
358
|
+
extra="allow",
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
# --- Abstract methods / class methods ---
|
|
362
|
+
@abstractmethod
|
|
363
|
+
async def create_transport(
|
|
364
|
+
self,
|
|
365
|
+
params: ServerParameters,
|
|
366
|
+
additional_params: dict[str, Any] | None = None,
|
|
367
|
+
) -> AbstractAsyncContextManager[
|
|
368
|
+
tuple[
|
|
369
|
+
MemoryObjectReceiveStream[JSONRPCMessage | Exception],
|
|
370
|
+
MemoryObjectSendStream[JSONRPCMessage],
|
|
371
|
+
]
|
|
372
|
+
]:
|
|
373
|
+
"""Given your custom ServerParameters, return an async-contextmgr whose __aenter yields (read_stream, write_stream)."""
|
|
374
|
+
...
|
|
375
|
+
|
|
376
|
+
# --- Public methods ---
|
|
377
|
+
async def get_tools(
|
|
378
|
+
self,
|
|
379
|
+
agent_id: str,
|
|
380
|
+
run_id: str,
|
|
381
|
+
) -> list[FlockMCPToolBase]:
|
|
382
|
+
"""Gets a list of available tools from the server."""
|
|
383
|
+
|
|
384
|
+
@cached(cache=self.tool_cache, key=cache_key_generator)
|
|
385
|
+
async def _get_tools_cached(
|
|
386
|
+
agent_id: str,
|
|
387
|
+
run_id: str,
|
|
388
|
+
) -> list[FlockMCPToolBase]:
|
|
389
|
+
if not self.config.feature_config.tools_enabled:
|
|
390
|
+
return []
|
|
391
|
+
|
|
392
|
+
async def _get_tools_internal() -> list[FlockMCPToolBase]:
|
|
393
|
+
response: ListToolsResult = await self.session.list_tools()
|
|
394
|
+
flock_tools = []
|
|
395
|
+
|
|
396
|
+
for tool in response.tools:
|
|
397
|
+
converted_tool = FlockMCPToolBase.from_mcp_tool(
|
|
398
|
+
tool,
|
|
399
|
+
agent_id=agent_id,
|
|
400
|
+
run_id=run_id,
|
|
401
|
+
)
|
|
402
|
+
if converted_tool:
|
|
403
|
+
flock_tools.append(converted_tool)
|
|
404
|
+
return flock_tools
|
|
405
|
+
|
|
406
|
+
return await _get_tools_internal()
|
|
407
|
+
|
|
408
|
+
return await _get_tools_cached(agent_id=agent_id, run_id=run_id)
|
|
409
|
+
|
|
410
|
+
async def call_tool(
|
|
411
|
+
self, agent_id: str, run_id: str, name: str, arguments: dict[str, Any]
|
|
412
|
+
) -> CallToolResult:
|
|
413
|
+
"""Call a tool via the MCP Protocol on the client's server."""
|
|
414
|
+
|
|
415
|
+
@cached(cache=self.tool_result_cache, key=cache_key_generator)
|
|
416
|
+
async def _call_tool_cached(
|
|
417
|
+
agent_id: str, run_id: str, name: str, arguments: dict[str, Any]
|
|
418
|
+
) -> CallToolResult:
|
|
419
|
+
async def _call_tool_internal(
|
|
420
|
+
name: str, arguments: dict[str, Any]
|
|
421
|
+
) -> CallToolResult:
|
|
422
|
+
logger.debug(
|
|
423
|
+
f"Calling tool '{name}' with arguments {arguments}"
|
|
424
|
+
)
|
|
425
|
+
return await self.session.call_tool(
|
|
426
|
+
name=name,
|
|
427
|
+
arguments=arguments,
|
|
428
|
+
)
|
|
429
|
+
|
|
430
|
+
return await _call_tool_internal(name=name, arguments=arguments)
|
|
431
|
+
|
|
432
|
+
return await _call_tool_cached(
|
|
433
|
+
agent_id=agent_id, run_id=run_id, name=name, arguments=arguments
|
|
434
|
+
)
|
|
435
|
+
|
|
436
|
+
async def get_server_name(self) -> str:
|
|
437
|
+
"""Return the server_name.
|
|
438
|
+
|
|
439
|
+
Uses a lock under the hood.
|
|
440
|
+
"""
|
|
441
|
+
async with self.lock:
|
|
442
|
+
return self.config.name
|
|
443
|
+
|
|
444
|
+
async def get_roots(self) -> list[MCPRoot] | None:
|
|
445
|
+
"""Get the currently set roots of the client.
|
|
446
|
+
|
|
447
|
+
Locks under the hood.
|
|
448
|
+
"""
|
|
449
|
+
async with self.lock:
|
|
450
|
+
return self.current_roots
|
|
451
|
+
|
|
452
|
+
async def set_roots(self, new_roots: list[MCPRoot]) -> None:
|
|
453
|
+
"""Set the current roots of the client.
|
|
454
|
+
|
|
455
|
+
Locks under the hood.
|
|
456
|
+
"""
|
|
457
|
+
async with self.lock:
|
|
458
|
+
self.current_roots = new_roots
|
|
459
|
+
if self.session:
|
|
460
|
+
try:
|
|
461
|
+
await self.client_session.send_roots_list_changed()
|
|
462
|
+
except McpError as e:
|
|
463
|
+
logger.warning(f"Send roots list changed: {e}")
|
|
464
|
+
|
|
465
|
+
async def invalidate_tool_cache(self) -> None:
|
|
466
|
+
"""Invalidate the entries in the tool cache."""
|
|
467
|
+
logger.debug(f"Invalidating tool_cache for server '{self.config.name}'")
|
|
468
|
+
async with self.lock:
|
|
469
|
+
if self.tool_cache:
|
|
470
|
+
self.tool_cache.clear()
|
|
471
|
+
logger.debug(
|
|
472
|
+
f"Invalidated tool_cache for server '{self.config.name}'"
|
|
473
|
+
)
|
|
474
|
+
|
|
475
|
+
async def invalidate_resource_list_cache(self) -> None:
|
|
476
|
+
"""Invalidate the entries in the resource list cache."""
|
|
477
|
+
logger.debug(
|
|
478
|
+
f"Invalidating resource_list_cache for server '{self.config.name}'"
|
|
479
|
+
)
|
|
480
|
+
async with self.lock:
|
|
481
|
+
if self.resource_list_cache:
|
|
482
|
+
self.resource_list_cache.clear()
|
|
483
|
+
logger.debug(
|
|
484
|
+
f"Invalidated resource_list_cache for server '{self.config.name}'"
|
|
485
|
+
)
|
|
486
|
+
|
|
487
|
+
async def invalidate_resource_contents_cache(self) -> None:
|
|
488
|
+
"""Invalidate the entries in the resource contents cache."""
|
|
489
|
+
logger.debug(
|
|
490
|
+
f"Invalidating resource_contents_cache for server '{self.config.name}'."
|
|
491
|
+
)
|
|
492
|
+
async with self.lock:
|
|
493
|
+
if self.resource_contents_cache:
|
|
494
|
+
self.resource_contents_cache.clear()
|
|
495
|
+
logger.debug(
|
|
496
|
+
f"Invalidated resource_contents_cache for server '{self.config.name}'"
|
|
497
|
+
)
|
|
498
|
+
|
|
499
|
+
async def invalidate_resource_contents_cache_entry(self, key: str) -> None:
|
|
500
|
+
"""Invalidate a single entry in the resource contents cache."""
|
|
501
|
+
logger.debug(
|
|
502
|
+
f"Attempting to clear entry with key: {key} from resource_contents_cache for server '{self.config.name}'"
|
|
503
|
+
)
|
|
504
|
+
async with self.lock:
|
|
505
|
+
if self.resource_contents_cache:
|
|
506
|
+
try:
|
|
507
|
+
self.resource_contents_cache.pop(key, None)
|
|
508
|
+
logger.debug(
|
|
509
|
+
f"Cleared entry with key {key} from resource_contents_cache for server '{self.config.name}'"
|
|
510
|
+
)
|
|
511
|
+
except Exception as e:
|
|
512
|
+
logger.debug(
|
|
513
|
+
f"No entry for key {key} found in resource_contents_cache for server '{self.config.name}'. Ignoring. (Exception was: {e})"
|
|
514
|
+
)
|
|
515
|
+
return # do nothing
|
|
516
|
+
|
|
517
|
+
async def disconnect(self) -> None:
|
|
518
|
+
"""If previously connected via `self._connect()`, tear it down."""
|
|
519
|
+
async with self.lock:
|
|
520
|
+
if self.session_stack:
|
|
521
|
+
# manually __aexit__
|
|
522
|
+
await self.session_stack.aclose()
|
|
523
|
+
self.session_stack = None
|
|
524
|
+
self.client_session = None
|
|
525
|
+
|
|
526
|
+
# --- Private Methods ---
|
|
527
|
+
async def _create_session(self) -> None:
|
|
528
|
+
"""Create and hol onto a single ClientSession + ExitStack."""
|
|
529
|
+
logger.debug(f"Creating Client Session for server '{self.config.name}'")
|
|
530
|
+
stack = AsyncExitStack()
|
|
531
|
+
await stack.__aenter__()
|
|
532
|
+
|
|
533
|
+
server_params = self.config.connection_config.connection_parameters
|
|
534
|
+
|
|
535
|
+
# Single Hook
|
|
536
|
+
transport_ctx = await self.create_transport(
|
|
537
|
+
server_params, self.additional_params
|
|
538
|
+
)
|
|
539
|
+
read, write = await stack.enter_async_context(transport_ctx)
|
|
540
|
+
read_timeout = self.config.connection_config.read_timeout_seconds
|
|
541
|
+
|
|
542
|
+
if (
|
|
543
|
+
self.additional_params
|
|
544
|
+
and "read_timeout_seconds" in self.additional_params
|
|
545
|
+
):
|
|
546
|
+
read_timeout = self.additional_params.get(
|
|
547
|
+
"read_timeout_seconds", read_timeout
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
timeout_seconds = (
|
|
551
|
+
read_timeout
|
|
552
|
+
if isinstance(read_timeout, timedelta)
|
|
553
|
+
else timedelta(seconds=float(read_timeout))
|
|
554
|
+
)
|
|
555
|
+
|
|
556
|
+
session = await stack.enter_async_context(
|
|
557
|
+
ClientSession(
|
|
558
|
+
read_stream=read,
|
|
559
|
+
write_stream=write,
|
|
560
|
+
read_timeout_seconds=timeout_seconds,
|
|
561
|
+
list_roots_callback=self.list_roots_callback,
|
|
562
|
+
message_handler=self.message_handler,
|
|
563
|
+
sampling_callback=self.sampling_callback,
|
|
564
|
+
logging_callback=self.logging_callback,
|
|
565
|
+
)
|
|
566
|
+
)
|
|
567
|
+
logger.debug(f"Created Client Session for server '{self.config.name}'")
|
|
568
|
+
# store for reuse
|
|
569
|
+
self.session_stack = stack
|
|
570
|
+
self.client_session = session
|
|
571
|
+
|
|
572
|
+
async def _connect(self, retries: int | None = None) -> ClientSession:
|
|
573
|
+
"""Connect to an MCP Server and set self.client_session to ClientSession.
|
|
574
|
+
|
|
575
|
+
Establish the transport and keep it open.
|
|
576
|
+
"""
|
|
577
|
+
async with self.lock:
|
|
578
|
+
# if already connected, return it
|
|
579
|
+
if self.client_session:
|
|
580
|
+
logger.debug(
|
|
581
|
+
f"Client Session for Server '{self.config.name}' exists and is healthy."
|
|
582
|
+
)
|
|
583
|
+
return self.client_session
|
|
584
|
+
|
|
585
|
+
else:
|
|
586
|
+
logger.debug(
|
|
587
|
+
f"Client Session for Server '{self.config.name}' does not exist yet. Connecting..."
|
|
588
|
+
)
|
|
589
|
+
await self._create_session()
|
|
590
|
+
|
|
591
|
+
if not self.connected_server_capabilities:
|
|
592
|
+
# This means we never asked the server to initialize the connection.
|
|
593
|
+
await self._perform_initial_handshake()
|
|
594
|
+
return self.client_session
|
|
595
|
+
|
|
596
|
+
async def _perform_initial_handshake(self) -> None:
|
|
597
|
+
"""Tell the server who we are, what capabilities we have, and what roots we're interested in."""
|
|
598
|
+
# 1) do the LSP-style initialize handshake
|
|
599
|
+
logger.debug(
|
|
600
|
+
f"Performing intialize handshake with server '{self.config.name}'"
|
|
601
|
+
)
|
|
602
|
+
init: InitializeResult = await self.client_session.initialize()
|
|
603
|
+
|
|
604
|
+
self.connected_server_capabilities = init
|
|
605
|
+
|
|
606
|
+
init_report = f"""
|
|
607
|
+
Server Init Handshake completed Server '{self.config.name}'
|
|
608
|
+
Lists the following Capabilities:
|
|
609
|
+
|
|
610
|
+
- Protocol Version: {init.protocolVersion}
|
|
611
|
+
- Instructions: {init.instructions or "No specific Instructions"}
|
|
612
|
+
- MCP Implementation:
|
|
613
|
+
- Name: {init.serverInfo.name}
|
|
614
|
+
- Version: {init.serverInfo.version}
|
|
615
|
+
- Capabilities:
|
|
616
|
+
{init.capabilities}
|
|
617
|
+
"""
|
|
618
|
+
|
|
619
|
+
logger.debug(init_report)
|
|
620
|
+
|
|
621
|
+
# 2) if we already know our current roots, notify the server
|
|
622
|
+
# so that it will follow up with a ListRootsRequest
|
|
623
|
+
if self.current_roots and self.config.feature_config.roots_enabled:
|
|
624
|
+
await self.client_session.send_roots_list_changed()
|
|
625
|
+
|
|
626
|
+
# 3) Tell the server, what logging level we would like to use
|
|
627
|
+
try:
|
|
628
|
+
await self.client_session.set_logging_level(
|
|
629
|
+
level=self.config.connection_config.server_logging_level
|
|
630
|
+
)
|
|
631
|
+
except McpError as e:
|
|
632
|
+
logger.warning(
|
|
633
|
+
f"Trying to set logging level for server '{self.config.name}' resulted in Exception: {e}"
|
|
634
|
+
)
|
|
635
|
+
|
|
636
|
+
async def _ensure_connected(self) -> None:
|
|
637
|
+
# if we've never connected, then connect.
|
|
638
|
+
if not self.client_session:
|
|
639
|
+
await self._connect()
|
|
640
|
+
return
|
|
641
|
+
|
|
642
|
+
# otherwise, ping and reconnect on error
|
|
643
|
+
try:
|
|
644
|
+
await self.client_session.send_ping()
|
|
645
|
+
except Exception as e:
|
|
646
|
+
logger.warning(
|
|
647
|
+
f"Session to '{self.config.name}' died, reconnecting. Exception was: {e}"
|
|
648
|
+
)
|
|
649
|
+
await self.disconnect()
|
|
650
|
+
await self._connect()
|
|
651
|
+
|
|
652
|
+
async def _get_client_session(self) -> ClientSession | None:
|
|
653
|
+
"""Lazily start one session and reuse it forever (until closed)."""
|
|
654
|
+
async with self.lock:
|
|
655
|
+
if self.client_session is None:
|
|
656
|
+
await self._create_session()
|
|
657
|
+
|
|
658
|
+
return self.client_session
|