lollms-client 0.19.9__py3-none-any.whl → 0.20.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- examples/external_mcp.py +267 -0
- examples/openai_mcp.py +203 -0
- examples/run_standard_mcp_example.py +204 -0
- lollms_client/__init__.py +1 -1
- lollms_client/lollms_core.py +27 -12
- lollms_client/mcp_bindings/remote_mcp/__init__.py +241 -0
- lollms_client/mcp_bindings/standard_mcp/__init__.py +535 -0
- {lollms_client-0.19.9.dist-info → lollms_client-0.20.1.dist-info}/METADATA +1 -1
- {lollms_client-0.19.9.dist-info → lollms_client-0.20.1.dist-info}/RECORD +12 -7
- {lollms_client-0.19.9.dist-info → lollms_client-0.20.1.dist-info}/WHEEL +0 -0
- {lollms_client-0.19.9.dist-info → lollms_client-0.20.1.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.19.9.dist-info → lollms_client-0.20.1.dist-info}/top_level.txt +0 -0
lollms_client/__init__.py
CHANGED
|
@@ -7,7 +7,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
7
7
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
8
8
|
|
|
9
9
|
|
|
10
|
-
__version__ = "0.
|
|
10
|
+
__version__ = "0.20.1" # Updated version
|
|
11
11
|
|
|
12
12
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
13
13
|
__all__ = [
|
lollms_client/lollms_core.py
CHANGED
|
@@ -587,7 +587,7 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
|
|
|
587
587
|
# 1. Discover tools if not provided
|
|
588
588
|
if tools is None:
|
|
589
589
|
try:
|
|
590
|
-
tools = self.mcp.discover_tools()
|
|
590
|
+
tools = self.mcp.discover_tools(force_refresh=True)
|
|
591
591
|
if not tools:
|
|
592
592
|
ASCIIColors.warning("No MCP tools discovered by the binding.")
|
|
593
593
|
except Exception as e_disc:
|
|
@@ -898,7 +898,7 @@ Respond with a JSON object containing ONE of the following structures:
|
|
|
898
898
|
# 0. Optional Objectives Extraction Step
|
|
899
899
|
if extract_objectives:
|
|
900
900
|
if streaming_callback:
|
|
901
|
-
streaming_callback("Extracting and structuring objectives...", MSG_TYPE.
|
|
901
|
+
streaming_callback("Extracting and structuring objectives...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "objectives_extraction"}, turn_rag_history_for_callback)
|
|
902
902
|
obj_prompt = (
|
|
903
903
|
"You are an expert analyst. "
|
|
904
904
|
"Your task is to extract and structure the key objectives from the user's request below. "
|
|
@@ -914,23 +914,33 @@ Respond with a JSON object containing ONE of the following structures:
|
|
|
914
914
|
)
|
|
915
915
|
objectives_text = self.remove_thinking_blocks(obj_gen).strip()
|
|
916
916
|
if streaming_callback:
|
|
917
|
-
streaming_callback(f"Objectives
|
|
917
|
+
streaming_callback(f"Objectives: {objectives_text}", MSG_TYPE.MSG_TYPE_STEP, {"id": "objectives_extraction"}, turn_rag_history_for_callback)
|
|
918
|
+
|
|
919
|
+
if streaming_callback:
|
|
920
|
+
streaming_callback(f"Objectives extracted:\n{objectives_text}", MSG_TYPE.MSG_TYPE_STEP_END, {"id": "objectives_extraction"}, turn_rag_history_for_callback)
|
|
918
921
|
|
|
919
922
|
current_query_for_rag = rag_query_text or None
|
|
920
923
|
previous_queries=[]
|
|
921
924
|
# 1. RAG Hops
|
|
922
925
|
for hop_count in range(max_rag_hops + 1):
|
|
923
926
|
if streaming_callback:
|
|
924
|
-
streaming_callback(f"Starting RAG Hop {hop_count + 1}", MSG_TYPE.
|
|
925
|
-
|
|
927
|
+
streaming_callback(f"Starting RAG Hop {hop_count + 1}", MSG_TYPE.MSG_TYPE_STEP_START, {"id": f"rag_hop_{hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
|
|
926
928
|
txt_previous_queries = f"Previous queries:\n"+'\n'.join(previous_queries)+"\n\n" if len(previous_queries)>0 else ""
|
|
927
929
|
txt_informations = f"Information:\n"+'\n'.join([f"(from {chunk['document']}):{chunk['content']}" for _, chunk in all_unique_retrieved_chunks_map.items()]) if len(all_unique_retrieved_chunks_map)>0 else "This is the first request. No data received yet. Build a new query."
|
|
928
|
-
txt_sp =
|
|
929
|
-
|
|
930
|
+
txt_sp = (
|
|
931
|
+
"Your objective is to analyze the provided chunks of information to determine "
|
|
932
|
+
"whether they are sufficient to reach the objective. If not, formulate a refined and focused query "
|
|
933
|
+
"that can retrieve more relevant information from a vector database. Ensure the query captures the semantic essence "
|
|
934
|
+
"of what is missing, is contextually independent, and is optimized for vector-based similarity search. "
|
|
935
|
+
"Do not repeat or rephrase earlier queries—always generate a new, meaningful atomic query targeting the current gap in knowledge."
|
|
936
|
+
)
|
|
937
|
+
|
|
938
|
+
txt_formatting = """The output format must be in form of JSON placed inside a JSON markdown tag. Use the following schema:
|
|
930
939
|
```json
|
|
931
940
|
{
|
|
932
|
-
"decision": A boolean
|
|
933
|
-
"query": (optional, only if decision is true). A new query
|
|
941
|
+
"decision": A boolean indicating your decision (true: more data is needed, false: the current data is sufficient),
|
|
942
|
+
"query": (str, optional, only if decision is true). A new, atomic query suitable for semantic search in a vector database.
|
|
943
|
+
It should capture the missing concept or insight in concise, context-rich language, avoiding reuse of earlier queries.
|
|
934
944
|
}
|
|
935
945
|
```
|
|
936
946
|
"""
|
|
@@ -942,10 +952,13 @@ Respond with a JSON object containing ONE of the following structures:
|
|
|
942
952
|
if not decision:
|
|
943
953
|
break
|
|
944
954
|
else:
|
|
945
|
-
current_query_for_rag = answer["query"]
|
|
955
|
+
current_query_for_rag = str(answer["query"])
|
|
946
956
|
except Exception as ex:
|
|
947
957
|
trace_exception(ex)
|
|
948
958
|
|
|
959
|
+
if streaming_callback:
|
|
960
|
+
streaming_callback(f"Query: {current_query_for_rag}", MSG_TYPE.MSG_TYPE_STEP, {"id": f"query for hop {hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
|
|
961
|
+
|
|
949
962
|
# Retrieve chunks
|
|
950
963
|
try:
|
|
951
964
|
retrieved = rag_query_function(current_query_for_rag, rag_vectorizer_name, rag_top_k, rag_min_similarity_percent)
|
|
@@ -972,6 +985,8 @@ Respond with a JSON object containing ONE of the following structures:
|
|
|
972
985
|
hop_details["status"] = "No *new* unique chunks retrieved"
|
|
973
986
|
rag_hops_details_list.append(hop_details)
|
|
974
987
|
|
|
988
|
+
if streaming_callback:
|
|
989
|
+
streaming_callback(f"RAG Hop {hop_count + 1} done", MSG_TYPE.MSG_TYPE_STEP_END, {"id": f"rag_hop_{hop_count + 1}", "hop": hop_count + 1}, turn_rag_history_for_callback)
|
|
975
990
|
|
|
976
991
|
|
|
977
992
|
# 2. Prepare & Summarize Context
|
|
@@ -995,7 +1010,7 @@ Respond with a JSON object containing ONE of the following structures:
|
|
|
995
1010
|
# If context exceeds our effective limit, summarize it
|
|
996
1011
|
if self.count_tokens(accumulated_context) > effective_ctx_size:
|
|
997
1012
|
if streaming_callback:
|
|
998
|
-
streaming_callback("Context too large, performing intermediate summary...", MSG_TYPE.
|
|
1013
|
+
streaming_callback("Context too large, performing intermediate summary...", MSG_TYPE.MSG_TYPE_STEP_START, {"id": "intermediate_summary"}, turn_rag_history_for_callback)
|
|
999
1014
|
summary_prompt = (
|
|
1000
1015
|
"Summarize the following gathered context into a concise form "
|
|
1001
1016
|
"that preserves all key facts and sources needed to answer the user's request:\n\n"
|
|
@@ -1010,7 +1025,7 @@ Respond with a JSON object containing ONE of the following structures:
|
|
|
1010
1025
|
)
|
|
1011
1026
|
accumulated_context = self.remove_thinking_blocks(summary).strip()
|
|
1012
1027
|
if streaming_callback:
|
|
1013
|
-
streaming_callback("Intermediate summary complete.", MSG_TYPE.MSG_TYPE_STEP_END, {"
|
|
1028
|
+
streaming_callback("Intermediate summary complete.", MSG_TYPE.MSG_TYPE_STEP_END, {"id": "intermediate_summary"}, turn_rag_history_for_callback)
|
|
1014
1029
|
|
|
1015
1030
|
# 3. Final Answer Generation
|
|
1016
1031
|
final_prompt = [
|
|
@@ -0,0 +1,241 @@
|
|
|
1
|
+
# Conceptual: lollms_client/mcp_bindings/remote_mcp/__init__.py
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
from contextlib import AsyncExitStack
|
|
5
|
+
from typing import Optional, List, Dict, Any, Tuple
|
|
6
|
+
from lollms_client.lollms_mcp_binding import LollmsMCPBinding
|
|
7
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
8
|
+
import threading
|
|
9
|
+
try:
|
|
10
|
+
from mcp import ClientSession, types
|
|
11
|
+
# Import the specific network client from MCP SDK
|
|
12
|
+
from mcp.client.streamable_http import streamablehttp_client
|
|
13
|
+
# If supporting OAuth, you'd import auth components:
|
|
14
|
+
# from mcp.client.auth import OAuthClientProvider, TokenStorage
|
|
15
|
+
# from mcp.shared.auth import OAuthClientMetadata, OAuthToken
|
|
16
|
+
MCP_LIBRARY_AVAILABLE = True
|
|
17
|
+
except ImportError:
|
|
18
|
+
# ... (error handling as in StandardMCPBinding) ...
|
|
19
|
+
MCP_LIBRARY_AVAILABLE = False
|
|
20
|
+
ClientSession = None # etc.
|
|
21
|
+
streamablehttp_client = None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
BindingName = "RemoteMCPBinding"
|
|
25
|
+
# No TOOL_NAME_SEPARATOR needed if connecting to one remote server per instance,
|
|
26
|
+
# or if server aliases are handled differently (e.g. part of URL or config)
|
|
27
|
+
TOOL_NAME_SEPARATOR = "::"
|
|
28
|
+
|
|
29
|
+
class RemoteMCPBinding(LollmsMCPBinding):
|
|
30
|
+
def __init__(self,
|
|
31
|
+
server_url: str, # e.g., "http://localhost:8000/mcp"
|
|
32
|
+
alias: str = "remote_server", # An alias for this connection
|
|
33
|
+
auth_config: Optional[Dict[str, Any]] = None, # For API keys, OAuth, etc.
|
|
34
|
+
**other_config_params: Any):
|
|
35
|
+
super().__init__(binding_name="remote_mcp")
|
|
36
|
+
|
|
37
|
+
if not MCP_LIBRARY_AVAILABLE:
|
|
38
|
+
ASCIIColors.error(f"{self.binding_name}: MCP library not available.")
|
|
39
|
+
return
|
|
40
|
+
|
|
41
|
+
if not server_url:
|
|
42
|
+
ASCIIColors.error(f"{self.binding_name}: server_url is required.")
|
|
43
|
+
# Or raise ValueError
|
|
44
|
+
return
|
|
45
|
+
|
|
46
|
+
self.server_url = server_url
|
|
47
|
+
self.alias = alias # Could be used to prefix tool names if managing multiple remotes
|
|
48
|
+
self.auth_config = auth_config or {}
|
|
49
|
+
self.config = {
|
|
50
|
+
"server_url": server_url,
|
|
51
|
+
"alias": alias,
|
|
52
|
+
"auth_config": self.auth_config
|
|
53
|
+
}
|
|
54
|
+
self.config.update(other_config_params)
|
|
55
|
+
|
|
56
|
+
self._mcp_session: Optional[ClientSession] = None
|
|
57
|
+
self._exit_stack: Optional[AsyncExitStack] = None
|
|
58
|
+
self._discovered_tools_cache: List[Dict[str, Any]] = []
|
|
59
|
+
self._is_initialized = False
|
|
60
|
+
self._loop: Optional[asyncio.AbstractEventLoop] = None
|
|
61
|
+
self._thread: Optional[threading.Thread] = None
|
|
62
|
+
|
|
63
|
+
self._start_event_loop_thread() # Similar to StandardMCPBinding
|
|
64
|
+
|
|
65
|
+
def _start_event_loop_thread(self): # Simplified from StandardMCPBinding
|
|
66
|
+
if self._loop and self._loop.is_running(): return
|
|
67
|
+
self._loop = asyncio.new_event_loop()
|
|
68
|
+
self._thread = threading.Thread(target=self._run_loop_forever, daemon=True)
|
|
69
|
+
self._thread.start()
|
|
70
|
+
|
|
71
|
+
def _run_loop_forever(self):
|
|
72
|
+
if not self._loop: return
|
|
73
|
+
asyncio.set_event_loop(self._loop)
|
|
74
|
+
try: self._loop.run_forever()
|
|
75
|
+
finally:
|
|
76
|
+
# ... (loop cleanup as in StandardMCPBinding) ...
|
|
77
|
+
if not self._loop.is_closed(): self._loop.close()
|
|
78
|
+
|
|
79
|
+
def _run_async(self, coro, timeout=None): # Simplified
|
|
80
|
+
if not self._loop or not self._loop.is_running(): raise RuntimeError("Event loop not running.")
|
|
81
|
+
future = asyncio.run_coroutine_threadsafe(coro, self._loop)
|
|
82
|
+
return future.result(timeout)
|
|
83
|
+
|
|
84
|
+
async def _initialize_connection_async(self) -> bool:
|
|
85
|
+
if self._is_initialized: return True
|
|
86
|
+
ASCIIColors.info(f"{self.binding_name}: Initializing connection to {self.server_url}...")
|
|
87
|
+
try:
|
|
88
|
+
self._exit_stack = AsyncExitStack()
|
|
89
|
+
|
|
90
|
+
# --- Authentication Setup (Conceptual) ---
|
|
91
|
+
# oauth_provider = None
|
|
92
|
+
# if self.auth_config.get("type") == "oauth":
|
|
93
|
+
# # oauth_provider = OAuthClientProvider(...) # Setup based on auth_config
|
|
94
|
+
# pass
|
|
95
|
+
# http_headers = {}
|
|
96
|
+
# if self.auth_config.get("type") == "api_key":
|
|
97
|
+
# key = self.auth_config.get("key")
|
|
98
|
+
# header_name = self.auth_config.get("header_name", "X-API-Key")
|
|
99
|
+
# if key: http_headers[header_name] = key
|
|
100
|
+
|
|
101
|
+
# Use streamablehttp_client from MCP SDK
|
|
102
|
+
# The `auth` parameter of streamablehttp_client takes an OAuthClientProvider
|
|
103
|
+
# For simple API key headers, you might need to use `httpx` directly
|
|
104
|
+
# or see if streamablehttp_client allows passing custom headers.
|
|
105
|
+
# The MCP client example for streamable HTTP doesn't show custom headers directly,
|
|
106
|
+
# it focuses on OAuth.
|
|
107
|
+
# If `streamablehttp_client` takes `**kwargs` that are passed to `httpx.AsyncClient`,
|
|
108
|
+
# then `headers=http_headers` might work.
|
|
109
|
+
|
|
110
|
+
# Assuming streamablehttp_client can take headers if needed, or auth provider
|
|
111
|
+
# For now, let's assume no auth for simplicity or that it's handled by underlying httpx if passed via kwargs
|
|
112
|
+
client_streams = await self._exit_stack.enter_async_context(
|
|
113
|
+
streamablehttp_client(self.server_url) # Add auth=oauth_provider or headers=http_headers if supported
|
|
114
|
+
)
|
|
115
|
+
read_stream, write_stream, _http_client_instance = client_streams # http_client_instance might be useful
|
|
116
|
+
|
|
117
|
+
self._mcp_session = await self._exit_stack.enter_async_context(
|
|
118
|
+
ClientSession(read_stream, write_stream)
|
|
119
|
+
)
|
|
120
|
+
await self._mcp_session.initialize()
|
|
121
|
+
self._is_initialized = True
|
|
122
|
+
ASCIIColors.green(f"{self.binding_name}: Connected to {self.server_url}")
|
|
123
|
+
await self._refresh_tools_cache_async()
|
|
124
|
+
return True
|
|
125
|
+
except Exception as e:
|
|
126
|
+
trace_exception(e)
|
|
127
|
+
ASCIIColors.error(f"{self.binding_name}: Failed to connect to {self.server_url}: {e}")
|
|
128
|
+
if self._exit_stack: await self._exit_stack.aclose() # Cleanup on failure
|
|
129
|
+
self._exit_stack = None
|
|
130
|
+
self._mcp_session = None
|
|
131
|
+
self._is_initialized = False
|
|
132
|
+
return False
|
|
133
|
+
|
|
134
|
+
def _ensure_initialized_sync(self, timeout=30.0):
|
|
135
|
+
if not self._is_initialized:
|
|
136
|
+
success = self._run_async(self._initialize_connection_async(), timeout=timeout)
|
|
137
|
+
if not success: raise ConnectionError(f"Failed to initialize remote MCP connection to {self.server_url}")
|
|
138
|
+
if not self._mcp_session: # Double check
|
|
139
|
+
raise ConnectionError(f"MCP Session not valid after init attempt for {self.server_url}")
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
async def _refresh_tools_cache_async(self):
|
|
143
|
+
if not self._is_initialized or not self._mcp_session: return
|
|
144
|
+
ASCIIColors.info(f"{self.binding_name}: Refreshing tools from {self.server_url}...")
|
|
145
|
+
try:
|
|
146
|
+
list_tools_result = await self._mcp_session.list_tools()
|
|
147
|
+
current_tools = []
|
|
148
|
+
# ... (tool parsing logic similar to StandardMCPBinding, but no server alias prefix needed if one server per binding instance)
|
|
149
|
+
for tool_obj in list_tools_result.tools:
|
|
150
|
+
# ...
|
|
151
|
+
input_schema_dict = {}
|
|
152
|
+
tool_input_schema = getattr(tool_obj, 'inputSchema', getattr(tool_obj, 'input_schema', None))
|
|
153
|
+
if tool_input_schema:
|
|
154
|
+
if hasattr(tool_input_schema, 'model_dump'):
|
|
155
|
+
input_schema_dict = tool_input_schema.model_dump(mode='json', exclude_none=True)
|
|
156
|
+
elif isinstance(tool_input_schema, dict):
|
|
157
|
+
input_schema_dict = tool_input_schema
|
|
158
|
+
|
|
159
|
+
tool_name_for_client = f"{self.alias}{TOOL_NAME_SEPARATOR}{tool_obj.name}" if TOOL_NAME_SEPARATOR else tool_obj.name
|
|
160
|
+
|
|
161
|
+
current_tools.append({
|
|
162
|
+
"name": tool_name_for_client, # Use self.alias to prefix
|
|
163
|
+
"description": tool_obj.description or "",
|
|
164
|
+
"input_schema": input_schema_dict
|
|
165
|
+
})
|
|
166
|
+
self._discovered_tools_cache = current_tools
|
|
167
|
+
ASCIIColors.green(f"{self.binding_name}: Tools refreshed for {self.server_url}. Found {len(current_tools)} tools.")
|
|
168
|
+
except Exception as e:
|
|
169
|
+
trace_exception(e)
|
|
170
|
+
ASCIIColors.error(f"{self.binding_name}: Error refreshing tools from {self.server_url}: {e}")
|
|
171
|
+
|
|
172
|
+
def discover_tools(self, force_refresh: bool = False, timeout_per_server: float = 10.0, **kwargs) -> List[Dict[str, Any]]:
|
|
173
|
+
# This binding instance connects to ONE server, so timeout_per_server is just 'timeout'
|
|
174
|
+
try:
|
|
175
|
+
self._ensure_initialized_sync(timeout=timeout_per_server)
|
|
176
|
+
if force_refresh or not self._discovered_tools_cache:
|
|
177
|
+
self._run_async(self._refresh_tools_cache_async(), timeout=timeout_per_server)
|
|
178
|
+
return self._discovered_tools_cache
|
|
179
|
+
except Exception as e:
|
|
180
|
+
ASCIIColors.error(f"{self.binding_name}: Problem during tool discovery for {self.server_url}: {e}")
|
|
181
|
+
return []
|
|
182
|
+
|
|
183
|
+
async def _execute_tool_async(self, actual_tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
|
|
184
|
+
if not self._is_initialized or not self._mcp_session:
|
|
185
|
+
return {"error": f"Not connected to {self.server_url}", "status_code": 503}
|
|
186
|
+
|
|
187
|
+
ASCIIColors.info(f"{self.binding_name}: Executing remote tool '{actual_tool_name}' on {self.server_url} with params: {json.dumps(params)}")
|
|
188
|
+
try:
|
|
189
|
+
mcp_call_result = await self._mcp_session.call_tool(name=actual_tool_name, arguments=params)
|
|
190
|
+
# ... (result parsing as in StandardMCPBinding) ...
|
|
191
|
+
output_parts = [p.text for p in mcp_call_result.content if isinstance(p, types.TextContent) and p.text is not None] if mcp_call_result.content else []
|
|
192
|
+
if not output_parts: return {"output": {"message": "Tool executed but returned no textual content."}, "status_code": 200}
|
|
193
|
+
combined_output_str = "\n".join(output_parts)
|
|
194
|
+
try: return {"output": json.loads(combined_output_str), "status_code": 200}
|
|
195
|
+
except json.JSONDecodeError: return {"output": combined_output_str, "status_code": 200}
|
|
196
|
+
except Exception as e:
|
|
197
|
+
trace_exception(e)
|
|
198
|
+
return {"error": f"Error executing remote tool '{actual_tool_name}': {str(e)}", "status_code": 500}
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
def execute_tool(self, tool_name_with_alias: str, params: Dict[str, Any], **kwargs) -> Dict[str, Any]:
|
|
202
|
+
timeout = float(kwargs.get('timeout', 60.0))
|
|
203
|
+
|
|
204
|
+
# If using alias prefixing (self.alias + TOOL_NAME_SEPARATOR + actual_name)
|
|
205
|
+
expected_prefix = f"{self.alias}{TOOL_NAME_SEPARATOR}"
|
|
206
|
+
if TOOL_NAME_SEPARATOR and tool_name_with_alias.startswith(expected_prefix):
|
|
207
|
+
actual_tool_name = tool_name_with_alias[len(expected_prefix):]
|
|
208
|
+
elif not TOOL_NAME_SEPARATOR and tool_name_with_alias: # No prefixing, tool_name is actual_tool_name
|
|
209
|
+
actual_tool_name = tool_name_with_alias
|
|
210
|
+
else:
|
|
211
|
+
return {"error": f"Tool name '{tool_name_with_alias}' does not match expected alias '{self.alias}'.", "status_code": 400}
|
|
212
|
+
|
|
213
|
+
try:
|
|
214
|
+
self._ensure_initialized_sync(timeout=min(timeout, 30.0))
|
|
215
|
+
return self._run_async(self._execute_tool_async(actual_tool_name, params), timeout=timeout)
|
|
216
|
+
# ... (error handling as in StandardMCPBinding) ...
|
|
217
|
+
except ConnectionError as e: return {"error": f"{self.binding_name}: Connection issue for '{self.server_url}': {e}", "status_code": 503}
|
|
218
|
+
except TimeoutError: return {"error": f"{self.binding_name}: Remote tool '{actual_tool_name}' on '{self.server_url}' timed out.", "status_code": 504}
|
|
219
|
+
except Exception as e:
|
|
220
|
+
trace_exception(e)
|
|
221
|
+
return {"error": f"{self.binding_name}: Failed to run remote MCP tool '{actual_tool_name}': {e}", "status_code": 500}
|
|
222
|
+
|
|
223
|
+
def close(self):
|
|
224
|
+
ASCIIColors.info(f"{self.binding_name}: Closing connection to {self.server_url}...")
|
|
225
|
+
if self._exit_stack:
|
|
226
|
+
try:
|
|
227
|
+
# The anyio task error might also occur here if not careful
|
|
228
|
+
self._run_async(self._exit_stack.aclose(), timeout=10.0)
|
|
229
|
+
except Exception as e:
|
|
230
|
+
ASCIIColors.error(f"{self.binding_name}: Error during async close for {self.server_url}: {e}")
|
|
231
|
+
self._exit_stack = None
|
|
232
|
+
self._mcp_session = None
|
|
233
|
+
self._is_initialized = False
|
|
234
|
+
|
|
235
|
+
# Stop event loop thread
|
|
236
|
+
if self._loop and self._loop.is_running(): self._loop.call_soon_threadsafe(self._loop.stop)
|
|
237
|
+
if self._thread and self._thread.is_alive(): self._thread.join(timeout=5.0)
|
|
238
|
+
ASCIIColors.info(f"{self.binding_name}: Remote connection binding closed.")
|
|
239
|
+
|
|
240
|
+
def get_binding_config(self) -> Dict[str, Any]: # LollmsMCPBinding might expect this
|
|
241
|
+
return self.config
|