lollms-client 0.19.8__py3-none-any.whl → 0.20.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- examples/external_mcp.py +267 -0
- examples/run_standard_mcp_example.py +204 -0
- lollms_client/__init__.py +1 -1
- lollms_client/lollms_core.py +24 -41
- lollms_client/mcp_bindings/standard_mcp/__init__.py +519 -0
- {lollms_client-0.19.8.dist-info → lollms_client-0.20.0.dist-info}/METADATA +1 -1
- {lollms_client-0.19.8.dist-info → lollms_client-0.20.0.dist-info}/RECORD +10 -7
- {lollms_client-0.19.8.dist-info → lollms_client-0.20.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.19.8.dist-info → lollms_client-0.20.0.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-0.19.8.dist-info → lollms_client-0.20.0.dist-info}/top_level.txt +0 -0
examples/external_mcp.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
# File: run_lollms_client_with_mcp_example.py
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
import os
|
|
5
|
+
import shutil
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
import json
|
|
8
|
+
import subprocess
|
|
9
|
+
from dotenv import load_dotenv # Import the function
|
|
10
|
+
|
|
11
|
+
# --- Load environment variables from .env file ---
|
|
12
|
+
# Load from .env in the current script's directory, or from project root if specified
|
|
13
|
+
# You can specify a path: load_dotenv(dotenv_path=Path('.') / '.env')
|
|
14
|
+
# By default, it looks for .env in the current working directory or parent directories.
|
|
15
|
+
# For simplicity, let's assume .env is next to this script or in a discoverable location.
|
|
16
|
+
load_dotenv()
|
|
17
|
+
|
|
18
|
+
# --- Python Path Adjustment (same as before) ---
|
|
19
|
+
current_script_dir = Path(__file__).resolve().parent
|
|
20
|
+
project_root_for_lollms_client = current_script_dir.parent
|
|
21
|
+
if str(project_root_for_lollms_client) not in sys.path:
|
|
22
|
+
sys.path.insert(0, str(project_root_for_lollms_client))
|
|
23
|
+
print(f"Added to sys.path: {project_root_for_lollms_client}")
|
|
24
|
+
|
|
25
|
+
# --- Pipmaster and LollmsClient Core Imports (same as before) ---
|
|
26
|
+
try:
|
|
27
|
+
import pipmaster as pm
|
|
28
|
+
except ImportError:
|
|
29
|
+
print("ERROR: pipmaster is not installed or not in PYTHONPATH.")
|
|
30
|
+
sys.exit(1)
|
|
31
|
+
|
|
32
|
+
try:
|
|
33
|
+
from lollms_client import LollmsClient
|
|
34
|
+
from lollms_client.lollms_llm_binding import LollmsLLMBinding
|
|
35
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
36
|
+
from lollms_client.lollms_types import MSG_TYPE
|
|
37
|
+
except ImportError as e:
|
|
38
|
+
print(f"ERROR: Could not import LollmsClient components: {e}")
|
|
39
|
+
trace_exception(e)
|
|
40
|
+
sys.exit(1)
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
# --- Dummy Server Scripts (Time and Calculator - same as before) ---
|
|
44
|
+
TIME_SERVER_PY = """
|
|
45
|
+
import asyncio
|
|
46
|
+
from datetime import datetime
|
|
47
|
+
from mcp.server.fastmcp import FastMCP
|
|
48
|
+
|
|
49
|
+
mcp_server = FastMCP("TimeMCP", description="A server that provides the current time.")
|
|
50
|
+
|
|
51
|
+
@mcp_server.tool(description="Returns the current server time and echoes received parameters.")
|
|
52
|
+
def get_current_time(user_id: str = "unknown_user") -> dict:
|
|
53
|
+
return {"time": datetime.now().isoformat(), "params_received": {"user_id": user_id}, "server_name": "TimeServer"}
|
|
54
|
+
|
|
55
|
+
if __name__ == "__main__":
|
|
56
|
+
mcp_server.run(transport="stdio")
|
|
57
|
+
"""
|
|
58
|
+
|
|
59
|
+
CALCULATOR_SERVER_PY = """
|
|
60
|
+
import asyncio
|
|
61
|
+
from typing import List, Union
|
|
62
|
+
from mcp.server.fastmcp import FastMCP
|
|
63
|
+
|
|
64
|
+
mcp_server = FastMCP("CalculatorMCP", description="A server that performs addition.")
|
|
65
|
+
|
|
66
|
+
@mcp_server.tool(description="Adds a list of numbers provided in the 'numbers' parameter.")
|
|
67
|
+
def add_numbers(numbers: List[Union[int, float]]) -> dict:
|
|
68
|
+
if not isinstance(numbers, list) or not all(isinstance(x, (int, float)) for x in numbers):
|
|
69
|
+
return {"error": "'numbers' must be a list of numbers."}
|
|
70
|
+
return {"sum": sum(numbers), "server_name": "CalculatorServer"}
|
|
71
|
+
|
|
72
|
+
if __name__ == "__main__":
|
|
73
|
+
mcp_server.run(transport="stdio")
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
# --- Main Function ---
|
|
77
|
+
def main():
|
|
78
|
+
ASCIIColors.red("--- Example: Using LollmsClient with StandardMCPBinding (including external ElevenLabs MCP) ---")
|
|
79
|
+
|
|
80
|
+
# --- 1. Setup Temporary Directory for Dummy MCP Servers ---
|
|
81
|
+
example_base_dir = Path(__file__).parent / "temp_mcp_example_servers"
|
|
82
|
+
if example_base_dir.exists():
|
|
83
|
+
shutil.rmtree(example_base_dir)
|
|
84
|
+
example_base_dir.mkdir(exist_ok=True)
|
|
85
|
+
|
|
86
|
+
time_server_script_path = example_base_dir / "time_server.py"
|
|
87
|
+
with open(time_server_script_path, "w") as f: f.write(TIME_SERVER_PY)
|
|
88
|
+
|
|
89
|
+
calculator_server_script_path = example_base_dir / "calculator_server.py"
|
|
90
|
+
with open(calculator_server_script_path, "w") as f: f.write(CALCULATOR_SERVER_PY)
|
|
91
|
+
|
|
92
|
+
# --- 2. MCP Configuration ---
|
|
93
|
+
initial_mcp_servers = {
|
|
94
|
+
"time_machine": {
|
|
95
|
+
"command": [sys.executable, str(time_server_script_path.resolve())],
|
|
96
|
+
},
|
|
97
|
+
"calc_unit": {
|
|
98
|
+
"command": [sys.executable, str(calculator_server_script_path.resolve())]
|
|
99
|
+
}
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
# --- Configuration for ElevenLabs MCP Server (Optional) ---
|
|
103
|
+
# Variables are now loaded from .env by load_dotenv() at the start of the script
|
|
104
|
+
elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
|
105
|
+
elevenlabs_voice_id_from_env = os.getenv("ELEVENLABS_VOICE_ID", "Rachel") # Default if not in .env
|
|
106
|
+
elevenlabs_model_id_from_env = os.getenv("ELEVENLABS_MODEL_ID", "eleven_multilingual_v2") # Default
|
|
107
|
+
|
|
108
|
+
uvx_available = False
|
|
109
|
+
try:
|
|
110
|
+
subprocess.run(["uvx", "--version"], capture_output=True, check=True, text=True, timeout=5)
|
|
111
|
+
uvx_available = True
|
|
112
|
+
ASCIIColors.green("uvx command is available.")
|
|
113
|
+
except (FileNotFoundError, subprocess.CalledProcessError, subprocess.TimeoutExpired):
|
|
114
|
+
ASCIIColors.yellow("uvx command not found, not working, or timed out. ElevenLabs MCP server (via uvx) will not be configured.")
|
|
115
|
+
|
|
116
|
+
if elevenlabs_api_key and uvx_available:
|
|
117
|
+
ASCIIColors.green("ELEVENLABS_API_KEY found (from .env) and uvx available. Configuring ElevenLabs MCP server.")
|
|
118
|
+
initial_mcp_servers["elevenlabs"] = {
|
|
119
|
+
"command": ["uvx"],
|
|
120
|
+
"args": ["elevenlabs-mcp-server"],
|
|
121
|
+
"env": {
|
|
122
|
+
"ELEVENLABS_API_KEY": elevenlabs_api_key,
|
|
123
|
+
"ELEVENLABS_VOICE_ID": elevenlabs_voice_id_from_env,
|
|
124
|
+
"ELEVENLABS_MODEL_ID": elevenlabs_model_id_from_env,
|
|
125
|
+
"ELEVENLABS_OUTPUT_DIR": str(example_base_dir / "elevenlabs_output")
|
|
126
|
+
# Add other ELEVENLABS_ env vars from os.getenv() if needed
|
|
127
|
+
}
|
|
128
|
+
}
|
|
129
|
+
(example_base_dir / "elevenlabs_output").mkdir(exist_ok=True)
|
|
130
|
+
elif not elevenlabs_api_key:
|
|
131
|
+
ASCIIColors.yellow("ELEVENLABS_API_KEY not found in .env file or environment variables. Skipping ElevenLabs MCP server configuration.")
|
|
132
|
+
|
|
133
|
+
mcp_config = {"initial_servers": initial_mcp_servers}
|
|
134
|
+
|
|
135
|
+
# --- 3. Initialize LollmsClient ---
|
|
136
|
+
ASCIIColors.magenta("\n1. Initializing LollmsClient...")
|
|
137
|
+
try:
|
|
138
|
+
client = LollmsClient(
|
|
139
|
+
binding_name="ollama",
|
|
140
|
+
model_name="mistral-nemo:latest",
|
|
141
|
+
mcp_binding_name="standard_mcp",
|
|
142
|
+
mcp_binding_config=mcp_config,
|
|
143
|
+
)
|
|
144
|
+
except Exception as e:
|
|
145
|
+
ASCIIColors.error(f"Failed to initialize LollmsClient: {e}")
|
|
146
|
+
trace_exception(e)
|
|
147
|
+
shutil.rmtree(example_base_dir, ignore_errors=True)
|
|
148
|
+
sys.exit(1)
|
|
149
|
+
|
|
150
|
+
if not client.binding:
|
|
151
|
+
ASCIIColors.error("LollmsClient's LLM binding (ollama) failed to load.")
|
|
152
|
+
shutil.rmtree(example_base_dir, ignore_errors=True)
|
|
153
|
+
sys.exit(1)
|
|
154
|
+
if not client.mcp:
|
|
155
|
+
ASCIIColors.error("LollmsClient's MCP binding (standard_mcp) failed to load.")
|
|
156
|
+
if hasattr(client, 'close'): client.close()
|
|
157
|
+
shutil.rmtree(example_base_dir, ignore_errors=True)
|
|
158
|
+
sys.exit(1)
|
|
159
|
+
ASCIIColors.green("LollmsClient initialized successfully.")
|
|
160
|
+
|
|
161
|
+
|
|
162
|
+
# --- 4. Define Streaming Callback (same as before) ---
|
|
163
|
+
def mcp_streaming_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, history: list = None) -> bool:
|
|
164
|
+
if metadata:
|
|
165
|
+
type_info = metadata.get('type', 'unknown_type')
|
|
166
|
+
if msg_type == MSG_TYPE.MSG_TYPE_STEP_START: ASCIIColors.cyan(f"MCP Step Start ({type_info}): {chunk}")
|
|
167
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END: ASCIIColors.cyan(f"MCP Step End ({type_info}): {chunk}")
|
|
168
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_INFO: ASCIIColors.yellow(f"MCP Info ({type_info}): {chunk}")
|
|
169
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK: ASCIIColors.green(chunk, end="")
|
|
170
|
+
else: ASCIIColors.green(f"MCP Output ({str(msg_type)}, {type_info}): {chunk}")
|
|
171
|
+
else:
|
|
172
|
+
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: ASCIIColors.green(chunk, end="")
|
|
173
|
+
else: ASCIIColors.green(f"MCP Output ({str(msg_type)}): {chunk}")
|
|
174
|
+
sys.stdout.flush()
|
|
175
|
+
return True
|
|
176
|
+
|
|
177
|
+
# --- 5. Use generate_with_mcp with local dummy servers ---
|
|
178
|
+
ASCIIColors.magenta("\n2. Calling generate_with_mcp to get current time (local dummy server)...")
|
|
179
|
+
time_prompt = "Hey assistant, what time is it right now?"
|
|
180
|
+
time_response = client.generate_with_mcp(
|
|
181
|
+
prompt=time_prompt,
|
|
182
|
+
streaming_callback=mcp_streaming_callback
|
|
183
|
+
)
|
|
184
|
+
print()
|
|
185
|
+
ASCIIColors.blue(f"Final response for time prompt: {json.dumps(time_response, indent=2)}")
|
|
186
|
+
assert time_response.get("error") is None, f"Time prompt error: {time_response.get('error')}"
|
|
187
|
+
assert time_response.get("final_answer"), "Time prompt no final answer."
|
|
188
|
+
assert len(time_response.get("tool_calls", [])) > 0, "Time prompt should call tool."
|
|
189
|
+
if time_response.get("tool_calls"):
|
|
190
|
+
assert time_response["tool_calls"][0]["name"] == "time_machine::get_current_time", "Incorrect tool for time."
|
|
191
|
+
assert "time" in time_response["tool_calls"][0].get("result", {}).get("output", {}), "Time tool result missing time."
|
|
192
|
+
|
|
193
|
+
ASCIIColors.magenta("\n3. Calling generate_with_mcp for calculation (local dummy server)...")
|
|
194
|
+
calc_prompt = "Can you sum 50, 25, and 7.5 for me?"
|
|
195
|
+
calc_response = client.generate_with_mcp(
|
|
196
|
+
prompt=calc_prompt,
|
|
197
|
+
streaming_callback=mcp_streaming_callback
|
|
198
|
+
)
|
|
199
|
+
print()
|
|
200
|
+
ASCIIColors.blue(f"Final response for calc prompt: {json.dumps(calc_response, indent=2)}")
|
|
201
|
+
assert calc_response.get("error") is None, f"Calc prompt error: {calc_response.get('error')}"
|
|
202
|
+
assert calc_response.get("final_answer"), "Calc prompt no final answer."
|
|
203
|
+
assert len(calc_response.get("tool_calls", [])) > 0, "Calc prompt should call tool."
|
|
204
|
+
if calc_response.get("tool_calls"):
|
|
205
|
+
assert calc_response["tool_calls"][0]["name"] == "calc_unit::add_numbers", "Incorrect tool for calc."
|
|
206
|
+
assert "sum" in calc_response["tool_calls"][0].get("result", {}).get("output", {}), "Calculator tool result missing sum."
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
# --- 6. Interact with ElevenLabs MCP Server (if configured) ---
|
|
210
|
+
if "elevenlabs" in client.mcp.get_binding_config().get("initial_servers", {}):
|
|
211
|
+
ASCIIColors.magenta("\n4. Interacting with ElevenLabs MCP server...")
|
|
212
|
+
|
|
213
|
+
ASCIIColors.info("Discovering all available tools (including ElevenLabs)...")
|
|
214
|
+
all_mcp_tools = client.mcp.discover_tools(force_refresh=True, timeout_per_server=45) # Longer timeout for external server
|
|
215
|
+
ASCIIColors.green(f"Discovered {len(all_mcp_tools)} tools in total:")
|
|
216
|
+
for tool in all_mcp_tools:
|
|
217
|
+
# Try to get properties keys from input_schema for a more informative print
|
|
218
|
+
props_keys = "N/A"
|
|
219
|
+
if isinstance(tool.get('input_schema'), dict) and isinstance(tool['input_schema'].get('properties'), dict):
|
|
220
|
+
props_keys = list(tool['input_schema']['properties'].keys())
|
|
221
|
+
print(f" - Name: {tool.get('name')}, Desc: {tool.get('description')}, Schema Props: {props_keys}")
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
elevenlabs_list_voices_tool_name = "elevenlabs::list_voices"
|
|
225
|
+
if any(t['name'] == elevenlabs_list_voices_tool_name for t in all_mcp_tools):
|
|
226
|
+
ASCIIColors.magenta(f"\n4a. Calling '{elevenlabs_list_voices_tool_name}' via LLM prompt...")
|
|
227
|
+
|
|
228
|
+
list_voices_prompt = "Please list all the available voices from the elevenlabs tool."
|
|
229
|
+
voices_response = client.generate_with_mcp(
|
|
230
|
+
prompt=list_voices_prompt,
|
|
231
|
+
streaming_callback=mcp_streaming_callback,
|
|
232
|
+
max_tool_calls=1
|
|
233
|
+
)
|
|
234
|
+
print()
|
|
235
|
+
ASCIIColors.blue(f"Final response for ElevenLabs list_voices prompt: {json.dumps(voices_response, indent=2)}")
|
|
236
|
+
|
|
237
|
+
assert voices_response.get("error") is None, f"ElevenLabs list_voices error: {voices_response.get('error')}"
|
|
238
|
+
assert voices_response.get("final_answer"), "ElevenLabs list_voices no final answer."
|
|
239
|
+
tool_calls = voices_response.get("tool_calls", [])
|
|
240
|
+
assert len(tool_calls) > 0, "ElevenLabs list_voices should call tool."
|
|
241
|
+
if tool_calls:
|
|
242
|
+
assert tool_calls[0]["name"] == elevenlabs_list_voices_tool_name, "Incorrect tool for ElevenLabs list_voices."
|
|
243
|
+
tool_output = tool_calls[0].get("result", {}).get("output")
|
|
244
|
+
assert isinstance(tool_output, list), f"ElevenLabs list_voices output not a list, got: {type(tool_output)}"
|
|
245
|
+
if tool_output:
|
|
246
|
+
ASCIIColors.green(f"First voice from ElevenLabs: {tool_output[0].get('name')} (ID: {tool_output[0].get('voice_id')})")
|
|
247
|
+
else:
|
|
248
|
+
ASCIIColors.yellow(f"Tool '{elevenlabs_list_voices_tool_name}' not found. Skipping ElevenLabs tool execution test.")
|
|
249
|
+
else:
|
|
250
|
+
ASCIIColors.yellow("ElevenLabs MCP server not configured in this run (check .env for API key and uvx availability). Skipping ElevenLabs tests.")
|
|
251
|
+
|
|
252
|
+
# --- 7. Cleanup ---
|
|
253
|
+
ASCIIColors.magenta("\n5. Closing LollmsClient and cleaning up...")
|
|
254
|
+
if client and hasattr(client, 'close'):
|
|
255
|
+
try:
|
|
256
|
+
client.close()
|
|
257
|
+
except Exception as e:
|
|
258
|
+
ASCIIColors.error(f"Error closing LollmsClient: {e}")
|
|
259
|
+
trace_exception(e)
|
|
260
|
+
|
|
261
|
+
ASCIIColors.info("Cleaning up temporary server scripts directory...")
|
|
262
|
+
shutil.rmtree(example_base_dir, ignore_errors=True)
|
|
263
|
+
|
|
264
|
+
ASCIIColors.red("\n--- LollmsClient with MCP Example (including external) Finished ---")
|
|
265
|
+
|
|
266
|
+
if __name__ == "__main__":
|
|
267
|
+
main()
|
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
# File: run_lollms_client_with_mcp_example.py
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
import os
|
|
5
|
+
import shutil
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
import json
|
|
8
|
+
from lollms_client import LollmsClient
|
|
9
|
+
# --- Dynamically adjust Python path to find lollms_client ---
|
|
10
|
+
# This assumes the example script is in a directory, and 'lollms_client' is
|
|
11
|
+
# in a sibling directory or a known relative path. Adjust as needed.
|
|
12
|
+
# For example, if script is in 'lollms_client/examples/' and lollms_client code is in 'lollms_client/'
|
|
13
|
+
# then the parent of the script's parent is the project root.
|
|
14
|
+
|
|
15
|
+
# Get the directory of the current script
|
|
16
|
+
current_script_dir = Path(__file__).resolve().parent
|
|
17
|
+
|
|
18
|
+
# Option 1: If lollms_client is in the parent directory of this script's directory
|
|
19
|
+
# (e.g. script is in 'project_root/examples' and lollms_client is in 'project_root/lollms_client')
|
|
20
|
+
# project_root = current_script_dir.parent
|
|
21
|
+
# lollms_client_path = project_root / "lollms_client" # Assuming this is where lollms_client.py and bindings are
|
|
22
|
+
|
|
23
|
+
# Option 2: If lollms_client package is directly one level up
|
|
24
|
+
# (e.g. script is in 'lollms_client/examples' and lollms_client package is 'lollms_client')
|
|
25
|
+
project_root_for_lollms_client = current_script_dir.parent
|
|
26
|
+
if str(project_root_for_lollms_client) not in sys.path:
|
|
27
|
+
sys.path.insert(0, str(project_root_for_lollms_client))
|
|
28
|
+
print(f"Added to sys.path: {project_root_for_lollms_client}")
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# --- Ensure pipmaster is available (core LoLLMs dependency) ---
|
|
32
|
+
try:
|
|
33
|
+
import pipmaster as pm
|
|
34
|
+
except ImportError:
|
|
35
|
+
print("ERROR: pipmaster is not installed or not in PYTHONPATH.")
|
|
36
|
+
sys.exit(1)
|
|
37
|
+
|
|
38
|
+
# --- Import LollmsClient and supporting components ---
|
|
39
|
+
try:
|
|
40
|
+
|
|
41
|
+
from lollms_client.lollms_llm_binding import LollmsLLMBinding # Base for LLM
|
|
42
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
43
|
+
from lollms_client.lollms_types import MSG_TYPE # Assuming MSG_TYPE is here
|
|
44
|
+
except ImportError as e:
|
|
45
|
+
print(f"ERROR: Could not import LollmsClient components: {e}")
|
|
46
|
+
print("Ensure 'lollms_client' package structure is correct and accessible via PYTHONPATH.")
|
|
47
|
+
print(f"Current sys.path: {sys.path}")
|
|
48
|
+
trace_exception(e)
|
|
49
|
+
sys.exit(1)
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
# --- Dummy Server Scripts using FastMCP (as per previous successful iteration) ---
|
|
53
|
+
TIME_SERVER_PY = """
|
|
54
|
+
import asyncio
|
|
55
|
+
from datetime import datetime
|
|
56
|
+
from mcp.server.fastmcp import FastMCP
|
|
57
|
+
|
|
58
|
+
mcp_server = FastMCP("TimeMCP", description="A server that provides the current time.")
|
|
59
|
+
|
|
60
|
+
@mcp_server.tool(description="Returns the current server time and echoes received parameters.")
|
|
61
|
+
def get_current_time(user_id: str = "unknown_user") -> dict:
|
|
62
|
+
return {"time": datetime.now().isoformat(), "params_received": {"user_id": user_id}, "server_name": "TimeServer"}
|
|
63
|
+
|
|
64
|
+
if __name__ == "__main__":
|
|
65
|
+
mcp_server.run(transport="stdio")
|
|
66
|
+
"""
|
|
67
|
+
|
|
68
|
+
CALCULATOR_SERVER_PY = """
|
|
69
|
+
import asyncio
|
|
70
|
+
from typing import List, Union
|
|
71
|
+
from mcp.server.fastmcp import FastMCP
|
|
72
|
+
|
|
73
|
+
mcp_server = FastMCP("CalculatorMCP", description="A server that performs addition.")
|
|
74
|
+
|
|
75
|
+
@mcp_server.tool(description="Adds a list of numbers provided in the 'numbers' parameter.")
|
|
76
|
+
def add_numbers(numbers: List[Union[int, float]]) -> dict:
|
|
77
|
+
if not isinstance(numbers, list) or not all(isinstance(x, (int, float)) for x in numbers):
|
|
78
|
+
return {"error": "'numbers' must be a list of numbers."}
|
|
79
|
+
return {"sum": sum(numbers), "server_name": "CalculatorServer"}
|
|
80
|
+
|
|
81
|
+
if __name__ == "__main__":
|
|
82
|
+
mcp_server.run(transport="stdio")
|
|
83
|
+
"""
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def main():
|
|
87
|
+
ASCIIColors.red("--- Example: Using LollmsClient with StandardMCPBinding ---")
|
|
88
|
+
|
|
89
|
+
# --- 1. Setup Temporary Directory for Dummy MCP Servers ---
|
|
90
|
+
example_base_dir = Path(__file__).parent / "temp_mcp_example_servers"
|
|
91
|
+
if example_base_dir.exists():
|
|
92
|
+
shutil.rmtree(example_base_dir)
|
|
93
|
+
example_base_dir.mkdir(exist_ok=True)
|
|
94
|
+
|
|
95
|
+
time_server_script_path = example_base_dir / "time_server.py"
|
|
96
|
+
with open(time_server_script_path, "w") as f: f.write(TIME_SERVER_PY)
|
|
97
|
+
|
|
98
|
+
calculator_server_script_path = example_base_dir / "calculator_server.py"
|
|
99
|
+
with open(calculator_server_script_path, "w") as f: f.write(CALCULATOR_SERVER_PY)
|
|
100
|
+
|
|
101
|
+
# MCP Binding Configuration (for StandardMCPBinding)
|
|
102
|
+
mcp_config = {
|
|
103
|
+
"initial_servers": {
|
|
104
|
+
"time_machine": {
|
|
105
|
+
"command": [sys.executable, str(time_server_script_path.resolve())],
|
|
106
|
+
},
|
|
107
|
+
"calc_unit": {
|
|
108
|
+
"command": [sys.executable, str(calculator_server_script_path.resolve())]
|
|
109
|
+
}
|
|
110
|
+
}
|
|
111
|
+
}
|
|
112
|
+
ASCIIColors.magenta("\n1. Initializing LollmsClient...")
|
|
113
|
+
try:
|
|
114
|
+
client = LollmsClient(
|
|
115
|
+
binding_name="ollama", # Use the dummy LLM binding
|
|
116
|
+
model_name="mistral-nemo:latest",
|
|
117
|
+
mcp_binding_name="standard_mcp",
|
|
118
|
+
mcp_binding_config=mcp_config,
|
|
119
|
+
)
|
|
120
|
+
except Exception as e:
|
|
121
|
+
ASCIIColors.error(f"Failed to initialize LollmsClient: {e}")
|
|
122
|
+
trace_exception(e)
|
|
123
|
+
shutil.rmtree(example_base_dir)
|
|
124
|
+
sys.exit(1)
|
|
125
|
+
|
|
126
|
+
if not client.binding:
|
|
127
|
+
ASCIIColors.error("LollmsClient's LLM binding (dummy_llm) failed to load.")
|
|
128
|
+
shutil.rmtree(example_base_dir)
|
|
129
|
+
sys.exit(1)
|
|
130
|
+
if not client.mcp:
|
|
131
|
+
ASCIIColors.error("LollmsClient's MCP binding (standard_mcp) failed to load.")
|
|
132
|
+
client.close() # Close LLM binding if it loaded
|
|
133
|
+
shutil.rmtree(example_base_dir)
|
|
134
|
+
sys.exit(1)
|
|
135
|
+
|
|
136
|
+
ASCIIColors.green("LollmsClient initialized successfully with DummyLLM and StandardMCP bindings.")
|
|
137
|
+
|
|
138
|
+
# --- 3. Define a streaming callback for generate_with_mcp ---
|
|
139
|
+
def mcp_streaming_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, history: list = None) -> bool:
|
|
140
|
+
if metadata:
|
|
141
|
+
type_info = metadata.get('type', 'unknown_type')
|
|
142
|
+
if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
|
|
143
|
+
ASCIIColors.cyan(f"MCP Step Start ({type_info}): {chunk}")
|
|
144
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
|
|
145
|
+
ASCIIColors.cyan(f"MCP Step End ({type_info}): {chunk}")
|
|
146
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_INFO:
|
|
147
|
+
ASCIIColors.yellow(f"MCP Info ({type_info}): {chunk}")
|
|
148
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # Part of final answer typically
|
|
149
|
+
ASCIIColors.green(chunk, end="") # type: ignore
|
|
150
|
+
else: # FULL, default, etc.
|
|
151
|
+
ASCIIColors.green(f"MCP Output ({str(msg_type)}, {type_info}): {chunk}")
|
|
152
|
+
else:
|
|
153
|
+
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
154
|
+
ASCIIColors.green(chunk, end="") # type: ignore
|
|
155
|
+
else:
|
|
156
|
+
ASCIIColors.green(f"MCP Output ({str(msg_type)}): {chunk}")
|
|
157
|
+
sys.stdout.flush()
|
|
158
|
+
return True # Continue streaming
|
|
159
|
+
|
|
160
|
+
# --- 4. Use generate_with_mcp ---
|
|
161
|
+
ASCIIColors.magenta("\n2. Calling generate_with_mcp to get current time...")
|
|
162
|
+
time_prompt = "Hey assistant, what time is it right now?"
|
|
163
|
+
time_response = client.generate_with_mcp(
|
|
164
|
+
prompt=time_prompt,
|
|
165
|
+
streaming_callback=mcp_streaming_callback,
|
|
166
|
+
interactive_tool_execution=False # Set to True to test interactive mode
|
|
167
|
+
)
|
|
168
|
+
print() # Newline after streaming
|
|
169
|
+
ASCIIColors.blue(f"Final response for time prompt: {json.dumps(time_response, indent=2)}")
|
|
170
|
+
|
|
171
|
+
assert time_response.get("error") is None, f"Time prompt resulted in an error: {time_response.get('error')}"
|
|
172
|
+
assert time_response.get("final_answer"), "Time prompt did not produce a final answer."
|
|
173
|
+
assert len(time_response.get("tool_calls", [])) > 0, "Time prompt should have called a tool."
|
|
174
|
+
assert time_response["tool_calls"][0]["name"] == "time_machine::get_current_time", "Incorrect tool called for time."
|
|
175
|
+
assert "time" in time_response["tool_calls"][0].get("result", {}).get("output", {}), "Time tool result missing time."
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
ASCIIColors.magenta("\n3. Calling generate_with_mcp for calculation...")
|
|
179
|
+
calc_prompt = "Can you please calculate the sum of 50, 25, and 7.5 for me?"
|
|
180
|
+
calc_response = client.generate_with_mcp(
|
|
181
|
+
prompt=calc_prompt,
|
|
182
|
+
streaming_callback=mcp_streaming_callback
|
|
183
|
+
)
|
|
184
|
+
print() # Newline
|
|
185
|
+
ASCIIColors.blue(f"Final response for calc prompt: {json.dumps(calc_response, indent=2)}")
|
|
186
|
+
|
|
187
|
+
assert calc_response.get("error") is None, f"Calc prompt resulted in an error: {calc_response.get('error')}"
|
|
188
|
+
assert calc_response.get("final_answer"), "Calc prompt did not produce a final answer."
|
|
189
|
+
assert len(calc_response.get("tool_calls", [])) > 0, "Calc prompt should have called a tool."
|
|
190
|
+
assert calc_response["tool_calls"][0]["name"] == "calc_unit::add_numbers", "Incorrect tool called for calculation."
|
|
191
|
+
# The dummy LLM uses hardcoded params [1,2,3] for calc, so result will be 6.
|
|
192
|
+
# A real LLM would extract 50, 25, 7.5.
|
|
193
|
+
# For this dummy test, we check against the dummy's behavior.
|
|
194
|
+
assert calc_response["tool_calls"][0].get("result", {}).get("output", {}).get("sum") == 82.5, "Calculator tool result mismatch for dummy params."
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
# --- 5. Cleanup ---
|
|
198
|
+
ASCIIColors.info("Cleaning up temporary server scripts and dummy binding directory...")
|
|
199
|
+
shutil.rmtree(example_base_dir, ignore_errors=True)
|
|
200
|
+
|
|
201
|
+
ASCIIColors.red("\n--- LollmsClient with StandardMCPBinding Example Finished Successfully! ---")
|
|
202
|
+
|
|
203
|
+
if __name__ == "__main__":
|
|
204
|
+
main()
|
lollms_client/__init__.py
CHANGED
|
@@ -7,7 +7,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
7
7
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
8
8
|
|
|
9
9
|
|
|
10
|
-
__version__ = "0.
|
|
10
|
+
__version__ = "0.20.0" # Updated version
|
|
11
11
|
|
|
12
12
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
13
13
|
__all__ = [
|
lollms_client/lollms_core.py
CHANGED
|
@@ -922,28 +922,34 @@ Respond with a JSON object containing ONE of the following structures:
|
|
|
922
922
|
for hop_count in range(max_rag_hops + 1):
|
|
923
923
|
if streaming_callback:
|
|
924
924
|
streaming_callback(f"Starting RAG Hop {hop_count + 1}", MSG_TYPE.MSG_TYPE_STEP, {"type": "rag_hop_start", "hop": hop_count + 1}, turn_rag_history_for_callback)
|
|
925
|
-
|
|
926
|
-
|
|
927
|
-
if
|
|
928
|
-
|
|
929
|
-
|
|
930
|
-
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
|
|
936
|
-
|
|
937
|
-
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
925
|
+
txt_previous_queries = f"Previous queries:\n"+'\n'.join(previous_queries)+"\n\n" if len(previous_queries)>0 else ""
|
|
926
|
+
txt_informations = f"Information:\n"+'\n'.join([f"(from {chunk['document']}):{chunk['content']}" for _, chunk in all_unique_retrieved_chunks_map.items()]) if len(all_unique_retrieved_chunks_map)>0 else "This is the first request. No data received yet. Build a new query."
|
|
927
|
+
txt_sp = "Your objective is to analyze the provided chunks of information, then decise if they are sufficient to reach the objective. If you need more information, formulate a new query to extract more data."
|
|
928
|
+
txt_formatting = """The output format must be in form of json placed inside a json markdown tag. Here is the schema to use:
|
|
929
|
+
```json
|
|
930
|
+
{
|
|
931
|
+
"decision": A boolean depicting your decision (true: more data is needed, false: there is enough data to reach objective),
|
|
932
|
+
"query": (str, optional, only if decision is true). A new query to recover more information from the data source (do not use previous queries as they have already been used)
|
|
933
|
+
}
|
|
934
|
+
```
|
|
935
|
+
"""
|
|
936
|
+
p = f"Objective:\n{objectives_text}\n\n{txt_previous_queries}\n\n{txt_informations}\n\n{txt_formatting}\n\n"
|
|
937
|
+
response = self.generate_code(p,system_prompt=txt_sp)
|
|
938
|
+
try:
|
|
939
|
+
answer = json.loads(response)
|
|
940
|
+
decision = answer["decision"]
|
|
941
|
+
if not decision:
|
|
942
|
+
break
|
|
943
|
+
else:
|
|
944
|
+
current_query_for_rag = str(answer["query"])
|
|
945
|
+
except Exception as ex:
|
|
946
|
+
trace_exception(ex)
|
|
942
947
|
|
|
943
948
|
# Retrieve chunks
|
|
944
949
|
try:
|
|
945
950
|
retrieved = rag_query_function(current_query_for_rag, rag_vectorizer_name, rag_top_k, rag_min_similarity_percent)
|
|
946
951
|
except Exception as e:
|
|
952
|
+
trace_exception(e)
|
|
947
953
|
return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": str(e)}
|
|
948
954
|
|
|
949
955
|
hop_details = {"query": current_query_for_rag, "retrieved_chunks_details": [], "status": ""}
|
|
@@ -965,30 +971,7 @@ Respond with a JSON object containing ONE of the following structures:
|
|
|
965
971
|
hop_details["status"] = "No *new* unique chunks retrieved"
|
|
966
972
|
rag_hops_details_list.append(hop_details)
|
|
967
973
|
|
|
968
|
-
|
|
969
|
-
if hop_count < max_rag_hops:
|
|
970
|
-
txt_previous_queries = f"Previous queries:\n"+'\n'.join(previous_queries)+"\n\n" if len(previous_queries)>0 else ""
|
|
971
|
-
txt_informations = f"Information:\n"+'\n'.join([f"(from {chunk['document']}):{chunk['content']}" for _, chunk in all_unique_retrieved_chunks_map.items()])
|
|
972
|
-
txt_sp = "Your objective is to analyze the provided chunks of information, then decise if they are sufficient to reach the objective. If you need more information, formulate a new query to extract more data."
|
|
973
|
-
txt_formatting = """The output format must be in form of json placed inside a json markdown tag. Here is the schema to use:
|
|
974
|
-
```json
|
|
975
|
-
{
|
|
976
|
-
"decision": A boolean depicting your decision (true: more data is needed, false: there is enough data to reach objective),
|
|
977
|
-
"query": (optional, only if decision is true). A new query to recover more information from the data source (do not use previous queries as they have already been used)
|
|
978
|
-
}
|
|
979
|
-
```
|
|
980
|
-
"""
|
|
981
|
-
p = f"Objective:\n{objectives_text}\n\n{txt_previous_queries}\n\n{txt_informations}\n\n{txt_formatting}\n\n"
|
|
982
|
-
response = self.generate_code(p,system_prompt=txt_sp)
|
|
983
|
-
try:
|
|
984
|
-
answer = json.loads(response)
|
|
985
|
-
decision = answer["decision"]
|
|
986
|
-
if not decision:
|
|
987
|
-
break
|
|
988
|
-
else:
|
|
989
|
-
current_query_for_rag = answer["query"]
|
|
990
|
-
except Exception as ex:
|
|
991
|
-
trace_exception(ex)
|
|
974
|
+
|
|
992
975
|
|
|
993
976
|
# 2. Prepare & Summarize Context
|
|
994
977
|
sorted_chunks = sorted(all_unique_retrieved_chunks_map.values(),
|
|
@@ -0,0 +1,519 @@
|
|
|
1
|
+
# File: lollms_client/mcp_bindings/standard_mcp/__init__.py
|
|
2
|
+
|
|
3
|
+
import pipmaster as pm
|
|
4
|
+
|
|
5
|
+
# Ensure critical dependencies for this binding are present.
|
|
6
|
+
# If pipmaster itself is missing, lollms_client is not correctly installed.
|
|
7
|
+
pm.ensure_packages(["mcp", "ascii-colors"])
|
|
8
|
+
|
|
9
|
+
import asyncio
|
|
10
|
+
import json
|
|
11
|
+
import threading
|
|
12
|
+
import sys
|
|
13
|
+
from contextlib import AsyncExitStack
|
|
14
|
+
from pathlib import Path
|
|
15
|
+
from typing import Optional, List, Dict, Any, Tuple
|
|
16
|
+
|
|
17
|
+
# These imports should now succeed if pipmaster did its job.
|
|
18
|
+
from lollms_client.lollms_mcp_binding import LollmsMCPBinding # Assuming this base class exists
|
|
19
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
20
|
+
|
|
21
|
+
# Attempt to import MCP library components.
|
|
22
|
+
try:
|
|
23
|
+
from mcp import ClientSession, StdioServerParameters
|
|
24
|
+
from mcp.client.stdio import stdio_client
|
|
25
|
+
from mcp import types # Use mcp.types for data structures
|
|
26
|
+
MCP_LIBRARY_AVAILABLE = True
|
|
27
|
+
ASCIIColors.green("Successfully imported MCP library components for StandardMCPBinding.")
|
|
28
|
+
except ImportError as e:
|
|
29
|
+
ASCIIColors.error(f"StandardMCPBinding: Critical MCP library components could not be imported even after pipmaster attempt: {e}")
|
|
30
|
+
ASCIIColors.error("Please check your Python environment, internet connection, and pip installation.")
|
|
31
|
+
ASCIIColors.error("StandardMCPBinding will be non-functional.")
|
|
32
|
+
ClientSession = None
|
|
33
|
+
StdioServerParameters = None
|
|
34
|
+
stdio_client = None
|
|
35
|
+
types = None # MCP types module unavailable
|
|
36
|
+
MCP_LIBRARY_AVAILABLE = False
|
|
37
|
+
|
|
38
|
+
# This variable is used by LollmsMCPBindingManager to identify the binding class.
|
|
39
|
+
BindingName = "StandardMCPBinding" # Must match the class name below
|
|
40
|
+
TOOL_NAME_SEPARATOR = "::"
|
|
41
|
+
|
|
42
|
+
class StandardMCPBinding(LollmsMCPBinding):
|
|
43
|
+
"""
|
|
44
|
+
A LollmsMCPBinding to connect to multiple standard Model Context Protocol (MCP) servers.
|
|
45
|
+
This binding acts as an MCP client to these servers.
|
|
46
|
+
Each server is launched via a command, communicates over stdio, and is identified by a unique alias.
|
|
47
|
+
Tool names are prefixed with 'server_alias::' for disambiguation.
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def __init__(self,
|
|
51
|
+
initial_servers: Optional[Dict[str, Dict[str, Any]]] = None,
|
|
52
|
+
**other_config_params: Any):
|
|
53
|
+
super().__init__(binding_name="standard_mcp")
|
|
54
|
+
|
|
55
|
+
self.config = {"initial_servers": initial_servers if initial_servers else {}}
|
|
56
|
+
self.config.update(other_config_params)
|
|
57
|
+
|
|
58
|
+
self._server_configs: Dict[str, Dict[str, Any]] = {}
|
|
59
|
+
# Type hint with ClientSession, actual obj if MCP_LIBRARY_AVAILABLE
|
|
60
|
+
self._mcp_sessions: Dict[str, ClientSession] = {} # type: ignore
|
|
61
|
+
self._exit_stacks: Dict[str, AsyncExitStack] = {}
|
|
62
|
+
self._discovered_tools_cache: Dict[str, List[Dict[str, Any]]] = {}
|
|
63
|
+
self._server_locks: Dict[str, threading.Lock] = {}
|
|
64
|
+
self._initialization_status: Dict[str, bool] = {}
|
|
65
|
+
self._loop: Optional[asyncio.AbstractEventLoop] = None
|
|
66
|
+
self._thread: Optional[threading.Thread] = None
|
|
67
|
+
|
|
68
|
+
if not MCP_LIBRARY_AVAILABLE:
|
|
69
|
+
ASCIIColors.error(f"{self.binding_name}: Cannot initialize; MCP library components are missing.")
|
|
70
|
+
return # Binding remains in a non-functional state
|
|
71
|
+
|
|
72
|
+
self._loop = asyncio.new_event_loop()
|
|
73
|
+
self._thread = threading.Thread(target=self._start_event_loop, daemon=True,
|
|
74
|
+
name=f"{self.binding_name}EventLoopThread")
|
|
75
|
+
self._thread.start()
|
|
76
|
+
ASCIIColors.info(f"{self.binding_name}: Event loop thread started.")
|
|
77
|
+
|
|
78
|
+
if initial_servers:
|
|
79
|
+
for alias, config_data in initial_servers.items():
|
|
80
|
+
if isinstance(config_data, dict):
|
|
81
|
+
# Ensure command is a list
|
|
82
|
+
command = config_data.get("command")
|
|
83
|
+
if isinstance(command, str): # if command is a single string, convert to list
|
|
84
|
+
command = command.split()
|
|
85
|
+
|
|
86
|
+
self.add_server(
|
|
87
|
+
alias=alias,
|
|
88
|
+
command=command, # type: ignore
|
|
89
|
+
cwd=config_data.get("cwd"),
|
|
90
|
+
env=config_data.get("env")
|
|
91
|
+
)
|
|
92
|
+
else:
|
|
93
|
+
ASCIIColors.warning(f"{self.binding_name}: Invalid configuration for server alias '{alias}' in 'initial_servers'. Expected a dictionary.")
|
|
94
|
+
|
|
95
|
+
def _start_event_loop(self):
|
|
96
|
+
if not self._loop: return
|
|
97
|
+
asyncio.set_event_loop(self._loop)
|
|
98
|
+
try:
|
|
99
|
+
self._loop.run_forever()
|
|
100
|
+
finally:
|
|
101
|
+
# Cleanup tasks before closing the loop
|
|
102
|
+
if hasattr(asyncio, 'all_tasks'): # Python 3.7+
|
|
103
|
+
pending = asyncio.all_tasks(self._loop)
|
|
104
|
+
else: # Python 3.6
|
|
105
|
+
pending = asyncio.Task.all_tasks(self._loop) # type: ignore
|
|
106
|
+
|
|
107
|
+
if pending:
|
|
108
|
+
self._loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
|
|
109
|
+
|
|
110
|
+
if self._loop.is_running():
|
|
111
|
+
self._loop.stop()
|
|
112
|
+
|
|
113
|
+
if not self._loop.is_closed():
|
|
114
|
+
if sys.platform == "win32" and isinstance(self._loop, asyncio.ProactorEventLoop): # type: ignore
|
|
115
|
+
self._loop.call_soon(self._loop.stop)
|
|
116
|
+
try:
|
|
117
|
+
# This run_until_complete might be problematic if called from non-loop thread after stop
|
|
118
|
+
# but often necessary for proactor loop cleanup on Windows
|
|
119
|
+
self._loop.run_until_complete(asyncio.sleep(0.1))
|
|
120
|
+
except RuntimeError as e:
|
|
121
|
+
if "cannot be called from a different thread" not in str(e):
|
|
122
|
+
ASCIIColors.warning(f"{self.binding_name}: Minor issue during proactor loop sleep: {e}")
|
|
123
|
+
self._loop.close()
|
|
124
|
+
ASCIIColors.info(f"{self.binding_name}: Asyncio event loop has stopped and closed.")
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
def _run_async_task(self, coro, timeout: Optional[float] = None) -> Any:
|
|
128
|
+
if not MCP_LIBRARY_AVAILABLE or not self._loop or not self._loop.is_running() or not self._thread or not self._thread.is_alive():
|
|
129
|
+
raise RuntimeError(f"{self.binding_name}'s event loop is not operational or MCP library is missing.")
|
|
130
|
+
future = asyncio.run_coroutine_threadsafe(coro, self._loop)
|
|
131
|
+
try:
|
|
132
|
+
return future.result(timeout=timeout)
|
|
133
|
+
except TimeoutError:
|
|
134
|
+
future.cancel() # Attempt to cancel the coroutine
|
|
135
|
+
raise
|
|
136
|
+
except Exception:
|
|
137
|
+
raise
|
|
138
|
+
|
|
139
|
+
def add_server(self, alias: str, command: List[str], cwd: Optional[str] = None, env: Optional[Dict[str, str]] = None) -> bool:
|
|
140
|
+
if not MCP_LIBRARY_AVAILABLE:
|
|
141
|
+
ASCIIColors.error(f"{self.binding_name}: Cannot add server '{alias}', MCP library is not available.")
|
|
142
|
+
return False
|
|
143
|
+
|
|
144
|
+
if not alias or not isinstance(alias, str):
|
|
145
|
+
ASCIIColors.error(f"{self.binding_name}: Server alias must be a non-empty string.")
|
|
146
|
+
return False
|
|
147
|
+
if not command or not isinstance(command, list) or not all(isinstance(c, str) for c in command) or not command[0]:
|
|
148
|
+
ASCIIColors.error(f"{self.binding_name}: Server command for '{alias}' must be a non-empty list of strings (e.g., ['python', 'server.py']).")
|
|
149
|
+
return False
|
|
150
|
+
|
|
151
|
+
if alias in self._server_configs:
|
|
152
|
+
ASCIIColors.warning(f"{self.binding_name}: Reconfiguring server '{alias}'. Existing connection (if any) will be closed.")
|
|
153
|
+
self.remove_server(alias, silent=True)
|
|
154
|
+
|
|
155
|
+
self._server_configs[alias] = {"command": command, "cwd": cwd, "env": env}
|
|
156
|
+
self._server_locks[alias] = threading.Lock()
|
|
157
|
+
self._initialization_status[alias] = False
|
|
158
|
+
self._discovered_tools_cache[alias] = [] # Initialize cache for the new server
|
|
159
|
+
ASCIIColors.info(f"{self.binding_name}: Server '{alias}' configured with command: {command}")
|
|
160
|
+
|
|
161
|
+
if "initial_servers" not in self.config:
|
|
162
|
+
self.config["initial_servers"] = {}
|
|
163
|
+
if isinstance(self.config["initial_servers"], dict): # Ensure it's a dict
|
|
164
|
+
self.config["initial_servers"][alias] = self._server_configs[alias]
|
|
165
|
+
return True
|
|
166
|
+
|
|
167
|
+
async def _close_server_connection_async(self, alias: str):
|
|
168
|
+
exit_stack_to_close = self._exit_stacks.pop(alias, None)
|
|
169
|
+
# Pop session and status immediately to reflect desired state
|
|
170
|
+
self._mcp_sessions.pop(alias, None)
|
|
171
|
+
self._initialization_status[alias] = False
|
|
172
|
+
|
|
173
|
+
if exit_stack_to_close:
|
|
174
|
+
ASCIIColors.info(f"{self.binding_name}: Attempting to close MCP connection for server '{alias}'...")
|
|
175
|
+
try:
|
|
176
|
+
await exit_stack_to_close.aclose()
|
|
177
|
+
ASCIIColors.info(f"{self.binding_name}: MCP connection for '{alias}' resources released via aclose.")
|
|
178
|
+
except RuntimeError as e:
|
|
179
|
+
if "Attempted to exit cancel scope in a different task" in str(e):
|
|
180
|
+
ASCIIColors.warning(f"{self.binding_name}: Known anyio task ownership issue during close for '{alias}': {e}.")
|
|
181
|
+
ASCIIColors.warning(f"{self.binding_name}: Underlying MCP client resources for '{alias}' may not have been fully cleaned up due to this anyio constraint.")
|
|
182
|
+
# At this point, the stdio process might still be running.
|
|
183
|
+
# Further action (like trying to kill the process) is outside the scope of AsyncExitStack.
|
|
184
|
+
else:
|
|
185
|
+
# Reraise other RuntimeErrors or handle them
|
|
186
|
+
trace_exception(e)
|
|
187
|
+
ASCIIColors.error(f"{self.binding_name}: Unexpected RuntimeError closing MCP connection for '{alias}': {e}")
|
|
188
|
+
except Exception as e:
|
|
189
|
+
trace_exception(e)
|
|
190
|
+
ASCIIColors.error(f"{self.binding_name}: General error closing MCP connection for '{alias}': {e}")
|
|
191
|
+
# else:
|
|
192
|
+
# ASCIIColors.debug(f"{self.binding_name}: No active exit stack found for server '{alias}' to close (already closed or never fully initialized).")
|
|
193
|
+
|
|
194
|
+
def remove_server(self, alias: str, silent: bool = False):
|
|
195
|
+
if not MCP_LIBRARY_AVAILABLE:
|
|
196
|
+
if not silent: ASCIIColors.error(f"{self.binding_name}: Cannot remove server '{alias}', MCP library issues persist."); return
|
|
197
|
+
|
|
198
|
+
if alias not in self._server_configs:
|
|
199
|
+
if not silent: ASCIIColors.warning(f"{self.binding_name}: Server '{alias}' not found for removal.")
|
|
200
|
+
return
|
|
201
|
+
|
|
202
|
+
if not silent: ASCIIColors.info(f"{self.binding_name}: Removing server '{alias}'.")
|
|
203
|
+
|
|
204
|
+
if self._initialization_status.get(alias) or alias in self._exit_stacks or alias in self._mcp_sessions:
|
|
205
|
+
try:
|
|
206
|
+
self._run_async_task(self._close_server_connection_async(alias), timeout=10.0)
|
|
207
|
+
except RuntimeError as e:
|
|
208
|
+
if not silent: ASCIIColors.warning(f"{self.binding_name}: Could not run async close for '{alias}' (event loop issue?): {e}")
|
|
209
|
+
except Exception as e:
|
|
210
|
+
if not silent: ASCIIColors.error(f"{self.binding_name}: Exception during async close for '{alias}': {e}")
|
|
211
|
+
|
|
212
|
+
self._server_configs.pop(alias, None)
|
|
213
|
+
self._server_locks.pop(alias, None)
|
|
214
|
+
self._initialization_status.pop(alias, None)
|
|
215
|
+
self._discovered_tools_cache.pop(alias, None)
|
|
216
|
+
if "initial_servers" in self.config and isinstance(self.config["initial_servers"], dict) and alias in self.config["initial_servers"]:
|
|
217
|
+
self.config["initial_servers"].pop(alias)
|
|
218
|
+
if not silent: ASCIIColors.info(f"{self.binding_name}: Server '{alias}' removed.")
|
|
219
|
+
|
|
220
|
+
async def _initialize_connection_async(self, alias: str) -> bool:
|
|
221
|
+
if not MCP_LIBRARY_AVAILABLE or not types or not ClientSession or not StdioServerParameters or not stdio_client:
|
|
222
|
+
ASCIIColors.error(f"{self.binding_name}: MCP library components (types, ClientSession, etc.) not available. Cannot initialize '{alias}'.")
|
|
223
|
+
return False
|
|
224
|
+
if self._initialization_status.get(alias): return True
|
|
225
|
+
if alias not in self._server_configs:
|
|
226
|
+
ASCIIColors.error(f"{self.binding_name}: No configuration for server alias '{alias}'. Cannot initialize.")
|
|
227
|
+
return False
|
|
228
|
+
|
|
229
|
+
config = self._server_configs[alias]
|
|
230
|
+
ASCIIColors.info(f"{self.binding_name}: Initializing MCP connection for server '{alias}'...")
|
|
231
|
+
try:
|
|
232
|
+
if alias in self._exit_stacks: # Should ideally be cleaned up if a previous attempt failed
|
|
233
|
+
old_stack = self._exit_stacks.pop(alias)
|
|
234
|
+
await old_stack.aclose()
|
|
235
|
+
|
|
236
|
+
exit_stack = AsyncExitStack()
|
|
237
|
+
self._exit_stacks[alias] = exit_stack
|
|
238
|
+
|
|
239
|
+
server_params = StdioServerParameters(
|
|
240
|
+
command=config["command"][0],
|
|
241
|
+
args=config["command"][1:],
|
|
242
|
+
cwd=Path(config["cwd"]) if config["cwd"] else None,
|
|
243
|
+
env=config["env"]
|
|
244
|
+
)
|
|
245
|
+
read_stream, write_stream = await exit_stack.enter_async_context(stdio_client(server_params))
|
|
246
|
+
|
|
247
|
+
# CORRECTED: Removed client_name from ClientSession constructor
|
|
248
|
+
session = await exit_stack.enter_async_context(ClientSession(read_stream, write_stream))
|
|
249
|
+
|
|
250
|
+
await session.initialize() # This is where client capabilities/info might be exchanged
|
|
251
|
+
self._mcp_sessions[alias] = session
|
|
252
|
+
self._initialization_status[alias] = True
|
|
253
|
+
ASCIIColors.green(f"{self.binding_name}: Successfully initialized MCP session for server '{alias}'.")
|
|
254
|
+
await self._refresh_tools_cache_async(alias)
|
|
255
|
+
return True
|
|
256
|
+
except Exception as e:
|
|
257
|
+
trace_exception(e)
|
|
258
|
+
ASCIIColors.error(f"{self.binding_name}: Failed to initialize MCP connection for '{alias}': {e}")
|
|
259
|
+
if alias in self._exit_stacks:
|
|
260
|
+
current_stack = self._exit_stacks.pop(alias)
|
|
261
|
+
try:
|
|
262
|
+
await current_stack.aclose()
|
|
263
|
+
except Exception as e_close:
|
|
264
|
+
ASCIIColors.error(f"{self.binding_name}: Error during cleanup after failed init for '{alias}': {e_close}")
|
|
265
|
+
self._initialization_status[alias] = False
|
|
266
|
+
self._mcp_sessions.pop(alias, None)
|
|
267
|
+
return False
|
|
268
|
+
|
|
269
|
+
def _ensure_server_initialized_sync(self, alias: str, timeout: float = 30.0):
|
|
270
|
+
if not MCP_LIBRARY_AVAILABLE or not self._loop or not types:
|
|
271
|
+
raise ConnectionError(f"{self.binding_name}: MCP library/event loop/types module not available. Cannot initialize server '{alias}'.")
|
|
272
|
+
|
|
273
|
+
if alias not in self._server_configs:
|
|
274
|
+
raise ValueError(f"{self.binding_name}: Server alias '{alias}' is not configured.")
|
|
275
|
+
|
|
276
|
+
lock = self._server_locks.get(alias)
|
|
277
|
+
if not lock:
|
|
278
|
+
ASCIIColors.error(f"{self.binding_name}: Internal error - No lock for server '{alias}'. Creating one now.")
|
|
279
|
+
self._server_locks[alias] = threading.Lock()
|
|
280
|
+
lock = self._server_locks[alias]
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
with lock:
|
|
284
|
+
if not self._initialization_status.get(alias):
|
|
285
|
+
ASCIIColors.info(f"{self.binding_name}: Connection for '{alias}' not initialized. Attempting initialization...")
|
|
286
|
+
try:
|
|
287
|
+
success = self._run_async_task(self._initialize_connection_async(alias), timeout=timeout)
|
|
288
|
+
if not success:
|
|
289
|
+
raise ConnectionError(f"Failed to initialize MCP connection for server '{alias}'. Check previous logs for details.")
|
|
290
|
+
except TimeoutError:
|
|
291
|
+
raise ConnectionError(f"MCP initialization for server '{alias}' timed out after {timeout} seconds.")
|
|
292
|
+
except Exception as e:
|
|
293
|
+
raise ConnectionError(f"MCP initialization for server '{alias}' failed: {str(e)}")
|
|
294
|
+
|
|
295
|
+
if not self._initialization_status.get(alias) or alias not in self._mcp_sessions:
|
|
296
|
+
raise ConnectionError(f"MCP Session for server '{alias}' is not valid after initialization attempt.")
|
|
297
|
+
|
|
298
|
+
async def _refresh_tools_cache_async(self, alias: str):
|
|
299
|
+
if not MCP_LIBRARY_AVAILABLE or not types:
|
|
300
|
+
ASCIIColors.error(f"{self.binding_name}: MCP library or types module not available. Cannot refresh tools for '{alias}'.")
|
|
301
|
+
return
|
|
302
|
+
if not self._initialization_status.get(alias) or alias not in self._mcp_sessions:
|
|
303
|
+
ASCIIColors.warning(f"{self.binding_name}: Server '{alias}' not initialized or no session. Cannot refresh tools.")
|
|
304
|
+
return
|
|
305
|
+
|
|
306
|
+
session = self._mcp_sessions[alias]
|
|
307
|
+
ASCIIColors.info(f"{self.binding_name}: Refreshing tools cache for server '{alias}'...")
|
|
308
|
+
try:
|
|
309
|
+
list_tools_result = await session.list_tools() # Expected to be types.ListToolsResult
|
|
310
|
+
current_server_tools = []
|
|
311
|
+
if list_tools_result and list_tools_result.tools:
|
|
312
|
+
for tool_obj in list_tools_result.tools: # tool_obj is expected to be types.Tool
|
|
313
|
+
# --- DEBUGGING ---
|
|
314
|
+
# print(f"DEBUG: tool_obj type: {type(tool_obj)}")
|
|
315
|
+
# print(f"DEBUG: tool_obj dir: {dir(tool_obj)}")
|
|
316
|
+
# if hasattr(tool_obj, 'model_fields'): print(f"DEBUG: tool_obj fields: {tool_obj.model_fields.keys()}") # Pydantic v2
|
|
317
|
+
# elif hasattr(tool_obj, '__fields__'): print(f"DEBUG: tool_obj fields: {tool_obj.__fields__.keys()}") # Pydantic v1
|
|
318
|
+
# if hasattr(tool_obj, 'model_dump_json'): print(f"DEBUG: tool_obj JSON: {tool_obj.model_dump_json(indent=2)}")
|
|
319
|
+
# elif hasattr(tool_obj, 'json'): print(f"DEBUG: tool_obj JSON: {tool_obj.json(indent=2)}")
|
|
320
|
+
# --- END DEBUGGING ---
|
|
321
|
+
|
|
322
|
+
input_schema_dict = {}
|
|
323
|
+
# Try accessing with 'inputSchema' (camelCase) or check other potential names based on debug output
|
|
324
|
+
tool_input_schema = None
|
|
325
|
+
if hasattr(tool_obj, 'inputSchema'): # Common JSON convention
|
|
326
|
+
tool_input_schema = tool_obj.inputSchema
|
|
327
|
+
elif hasattr(tool_obj, 'input_schema'): # Python convention
|
|
328
|
+
tool_input_schema = tool_obj.input_schema
|
|
329
|
+
# Add more elif for other possibilities if revealed by debugging
|
|
330
|
+
|
|
331
|
+
if tool_input_schema: # Check if the schema object itself exists and is not None
|
|
332
|
+
# tool_input_schema is expected to be types.InputSchema | None
|
|
333
|
+
# or a Pydantic model that has model_dump
|
|
334
|
+
if hasattr(tool_input_schema, 'model_dump'):
|
|
335
|
+
input_schema_dict = tool_input_schema.model_dump(mode='json', exclude_none=True)
|
|
336
|
+
else:
|
|
337
|
+
# If it's not a Pydantic model but some other dict-like structure
|
|
338
|
+
# This part might need adjustment based on what tool_input_schema actually is
|
|
339
|
+
ASCIIColors.warning(f"{self.binding_name}: input schema for tool '{tool_obj.name}' on '{alias}' is not a Pydantic model with model_dump. Type: {type(tool_input_schema)}")
|
|
340
|
+
if isinstance(tool_input_schema, dict):
|
|
341
|
+
input_schema_dict = tool_input_schema
|
|
342
|
+
# else: leave it as empty dict
|
|
343
|
+
|
|
344
|
+
tool_dict = {
|
|
345
|
+
"name": tool_obj.name,
|
|
346
|
+
"description": tool_obj.description or "",
|
|
347
|
+
"input_schema": input_schema_dict
|
|
348
|
+
}
|
|
349
|
+
current_server_tools.append(tool_dict)
|
|
350
|
+
self._discovered_tools_cache[alias] = current_server_tools
|
|
351
|
+
ASCIIColors.green(f"{self.binding_name}: Tools cache for '{alias}' refreshed. Found {len(current_server_tools)} tools.")
|
|
352
|
+
except Exception as e:
|
|
353
|
+
trace_exception(e)
|
|
354
|
+
ASCIIColors.error(f"{self.binding_name}: Error refreshing tools cache for '{alias}': {e}")
|
|
355
|
+
self._discovered_tools_cache[alias] = [] # Clear cache on error
|
|
356
|
+
|
|
357
|
+
def discover_tools(self, specific_tool_names: Optional[List[str]]=None, force_refresh: bool=False, timeout_per_server: float=10.0, **kwargs) -> List[Dict[str, Any]]:
|
|
358
|
+
if not MCP_LIBRARY_AVAILABLE or not self._loop or not types:
|
|
359
|
+
ASCIIColors.warning(f"{self.binding_name}: Cannot discover tools, MCP library, event loop, or types module not available.")
|
|
360
|
+
return []
|
|
361
|
+
|
|
362
|
+
stn = kwargs.get('specific_tool_names', specific_tool_names)
|
|
363
|
+
fr = kwargs.get('force_refresh', force_refresh)
|
|
364
|
+
tps = kwargs.get('timeout_per_server', timeout_per_server)
|
|
365
|
+
|
|
366
|
+
all_tools: List[Dict[str, Any]] = []
|
|
367
|
+
active_aliases = list(self._server_configs.keys())
|
|
368
|
+
|
|
369
|
+
for alias in active_aliases:
|
|
370
|
+
try:
|
|
371
|
+
self._ensure_server_initialized_sync(alias, timeout=tps)
|
|
372
|
+
|
|
373
|
+
if fr or (self._initialization_status.get(alias) and not self._discovered_tools_cache.get(alias)):
|
|
374
|
+
ASCIIColors.info(f"{self.binding_name}: Force refreshing tools for '{alias}' or cache is empty.")
|
|
375
|
+
self._run_async_task(self._refresh_tools_cache_async(alias), timeout=tps)
|
|
376
|
+
|
|
377
|
+
for tool_data in self._discovered_tools_cache.get(alias, []):
|
|
378
|
+
prefixed_tool_data = tool_data.copy()
|
|
379
|
+
prefixed_tool_data["name"] = f"{alias}{TOOL_NAME_SEPARATOR}{tool_data['name']}"
|
|
380
|
+
all_tools.append(prefixed_tool_data)
|
|
381
|
+
except ConnectionError as e:
|
|
382
|
+
ASCIIColors.error(f"{self.binding_name}: Connection problem with server '{alias}' during tool discovery: {e}")
|
|
383
|
+
except Exception as e:
|
|
384
|
+
trace_exception(e)
|
|
385
|
+
ASCIIColors.error(f"{self.binding_name}: Unexpected problem with server '{alias}' during tool discovery: {e}")
|
|
386
|
+
|
|
387
|
+
if stn:
|
|
388
|
+
return [t for t in all_tools if t.get("name") in stn]
|
|
389
|
+
return all_tools
|
|
390
|
+
|
|
391
|
+
def _parse_tool_name(self, prefixed_tool_name: str) -> Optional[Tuple[str, str]]:
|
|
392
|
+
parts = prefixed_tool_name.split(TOOL_NAME_SEPARATOR, 1)
|
|
393
|
+
if len(parts) == 2:
|
|
394
|
+
return parts[0], parts[1]
|
|
395
|
+
ASCIIColors.warning(f"{self.binding_name}: Tool name '{prefixed_tool_name}' is not in the expected 'alias{TOOL_NAME_SEPARATOR}tool' format.")
|
|
396
|
+
return None
|
|
397
|
+
|
|
398
|
+
async def _execute_tool_async(self, server_alias: str, actual_tool_name: str, params: Dict[str, Any]) -> Dict[str, Any]:
|
|
399
|
+
if not MCP_LIBRARY_AVAILABLE or not types:
|
|
400
|
+
error_msg = f"{self.binding_name}: MCP library or types module not available. Cannot execute tool '{actual_tool_name}' on '{server_alias}'."
|
|
401
|
+
ASCIIColors.error(error_msg)
|
|
402
|
+
return {"error": error_msg, "status_code": 503}
|
|
403
|
+
|
|
404
|
+
if not self._initialization_status.get(server_alias) or server_alias not in self._mcp_sessions:
|
|
405
|
+
error_msg = f"Server '{server_alias}' not initialized or session lost. Cannot execute tool '{actual_tool_name}'."
|
|
406
|
+
ASCIIColors.error(f"{self.binding_name}: {error_msg}")
|
|
407
|
+
return {"error": error_msg, "status_code": 503}
|
|
408
|
+
|
|
409
|
+
session = self._mcp_sessions[server_alias]
|
|
410
|
+
# Use a more careful way to log params if they can be very large or sensitive
|
|
411
|
+
params_log = {k: (v[:100] + '...' if isinstance(v, str) and len(v) > 100 else v) for k,v in params.items()}
|
|
412
|
+
ASCIIColors.info(f"{self.binding_name}: Executing MCP tool '{actual_tool_name}' on server '{server_alias}' with params: {json.dumps(params_log)}")
|
|
413
|
+
try:
|
|
414
|
+
# call_tool returns types.CallToolResult
|
|
415
|
+
mcp_call_result = await session.call_tool(name=actual_tool_name, arguments=params)
|
|
416
|
+
|
|
417
|
+
output_parts = []
|
|
418
|
+
if mcp_call_result and mcp_call_result.content: # content is List[types.ContentPart]
|
|
419
|
+
for content_part in mcp_call_result.content:
|
|
420
|
+
if isinstance(content_part, types.TextContent) and hasattr(content_part, 'text') and content_part.text is not None:
|
|
421
|
+
output_parts.append(content_part.text)
|
|
422
|
+
|
|
423
|
+
if not output_parts:
|
|
424
|
+
ASCIIColors.info(f"{self.binding_name}: Tool '{actual_tool_name}' on '{server_alias}' executed but returned no textual content.")
|
|
425
|
+
return {"output": {"message": "Tool executed successfully but returned no textual content."}, "status_code": 200}
|
|
426
|
+
|
|
427
|
+
combined_output_str = "\n".join(output_parts)
|
|
428
|
+
ASCIIColors.success(f"{self.binding_name}: Tool '{actual_tool_name}' on '{server_alias}' executed. Raw output (first 200 chars): '{combined_output_str[:200]}'")
|
|
429
|
+
|
|
430
|
+
try:
|
|
431
|
+
parsed_output = json.loads(combined_output_str)
|
|
432
|
+
return {"output": parsed_output, "status_code": 200}
|
|
433
|
+
except json.JSONDecodeError:
|
|
434
|
+
return {"output": combined_output_str, "status_code": 200}
|
|
435
|
+
|
|
436
|
+
except Exception as e:
|
|
437
|
+
trace_exception(e)
|
|
438
|
+
error_msg = f"Error executing tool '{actual_tool_name}' on server '{server_alias}': {str(e)}"
|
|
439
|
+
ASCIIColors.error(f"{self.binding_name}: {error_msg}")
|
|
440
|
+
return {"error": error_msg, "status_code": 500}
|
|
441
|
+
|
|
442
|
+
def execute_tool(self, tool_name: str, params: Dict[str, Any], **kwargs) -> Dict[str, Any]:
|
|
443
|
+
if not MCP_LIBRARY_AVAILABLE or not self._loop or not types:
|
|
444
|
+
error_msg = f"{self.binding_name}: MCP support (library, event loop, or types module) not available. Cannot execute tool '{tool_name}'."
|
|
445
|
+
ASCIIColors.warning(error_msg)
|
|
446
|
+
return {"error": error_msg, "status_code": 503}
|
|
447
|
+
|
|
448
|
+
timeout = float(kwargs.get('timeout', 60.0))
|
|
449
|
+
|
|
450
|
+
parsed_name = self._parse_tool_name(tool_name)
|
|
451
|
+
if not parsed_name:
|
|
452
|
+
return {"error": f"Invalid tool name format for {self.binding_name}: '{tool_name}'. Expected 'alias{TOOL_NAME_SEPARATOR}toolname'.", "status_code": 400}
|
|
453
|
+
|
|
454
|
+
server_alias, actual_tool_name = parsed_name
|
|
455
|
+
|
|
456
|
+
if server_alias not in self._server_configs:
|
|
457
|
+
return {"error": f"Server alias '{server_alias}' (from tool name '{tool_name}') is not configured.", "status_code": 404}
|
|
458
|
+
|
|
459
|
+
try:
|
|
460
|
+
init_timeout = min(timeout, 30.0)
|
|
461
|
+
self._ensure_server_initialized_sync(server_alias, timeout=init_timeout)
|
|
462
|
+
except ConnectionError as e:
|
|
463
|
+
return {"error": f"{self.binding_name}: Connection or configuration issue for server '{server_alias}': {e}", "status_code": 503}
|
|
464
|
+
except Exception as e:
|
|
465
|
+
trace_exception(e)
|
|
466
|
+
return {"error": f"{self.binding_name}: Failed to ensure server '{server_alias}' is initialized: {e}", "status_code": 500}
|
|
467
|
+
|
|
468
|
+
try:
|
|
469
|
+
return self._run_async_task(self._execute_tool_async(server_alias, actual_tool_name, params), timeout=timeout)
|
|
470
|
+
except TimeoutError:
|
|
471
|
+
return {"error": f"{self.binding_name}: Tool '{actual_tool_name}' on server '{server_alias}' timed out after {timeout} seconds.", "status_code": 504}
|
|
472
|
+
except RuntimeError as e:
|
|
473
|
+
return {"error": f"{self.binding_name}: Runtime error executing tool '{actual_tool_name}' on '{server_alias}': {e}", "status_code": 500}
|
|
474
|
+
except Exception as e:
|
|
475
|
+
trace_exception(e)
|
|
476
|
+
return {"error": f"{self.binding_name}: An unexpected error occurred while running MCP tool '{actual_tool_name}' on server '{server_alias}': {e}", "status_code": 500}
|
|
477
|
+
|
|
478
|
+
def close(self):
|
|
479
|
+
ASCIIColors.info(f"{self.binding_name}: Initiating shutdown process...")
|
|
480
|
+
|
|
481
|
+
if hasattr(self, '_server_configs') and self._server_configs:
|
|
482
|
+
active_aliases = list(self._server_configs.keys())
|
|
483
|
+
if active_aliases:
|
|
484
|
+
ASCIIColors.info(f"{self.binding_name}: Closing connections for servers: {active_aliases}")
|
|
485
|
+
for alias in active_aliases:
|
|
486
|
+
self.remove_server(alias, silent=True)
|
|
487
|
+
|
|
488
|
+
if hasattr(self, '_loop') and self._loop:
|
|
489
|
+
if self._loop.is_running():
|
|
490
|
+
ASCIIColors.info(f"{self.binding_name}: Requesting event loop to stop.")
|
|
491
|
+
self._loop.call_soon_threadsafe(self._loop.stop)
|
|
492
|
+
|
|
493
|
+
if hasattr(self, '_thread') and self._thread and self._thread.is_alive():
|
|
494
|
+
ASCIIColors.info(f"{self.binding_name}: Waiting for event loop thread to join...")
|
|
495
|
+
self._thread.join(timeout=10.0)
|
|
496
|
+
if self._thread.is_alive():
|
|
497
|
+
ASCIIColors.warning(f"{self.binding_name}: Event loop thread did not terminate cleanly after 10 seconds.")
|
|
498
|
+
else:
|
|
499
|
+
ASCIIColors.info(f"{self.binding_name}: Event loop thread joined successfully.")
|
|
500
|
+
|
|
501
|
+
ASCIIColors.info(f"{self.binding_name}: Binding closed.")
|
|
502
|
+
|
|
503
|
+
def __del__(self):
|
|
504
|
+
# Check if attributes relevant to closing exist to prevent errors if __init__ failed early
|
|
505
|
+
needs_close = False
|
|
506
|
+
if hasattr(self, '_loop') and self._loop and (self._loop.is_running() or not self._loop.is_closed()):
|
|
507
|
+
needs_close = True
|
|
508
|
+
if hasattr(self, '_thread') and self._thread and self._thread.is_alive():
|
|
509
|
+
needs_close = True
|
|
510
|
+
if hasattr(self, '_server_configs') and self._server_configs: # Check if there are any servers to close
|
|
511
|
+
needs_close = True
|
|
512
|
+
|
|
513
|
+
if needs_close:
|
|
514
|
+
ASCIIColors.warning(f"{self.binding_name}: __del__ called; attempting to close resources. Explicit .close() is recommended for reliability.")
|
|
515
|
+
try:
|
|
516
|
+
self.close()
|
|
517
|
+
except Exception as e:
|
|
518
|
+
# __del__ should not raise exceptions
|
|
519
|
+
ASCIIColors.error(f"{self.binding_name}: Error during __del__ cleanup: {e}")
|
|
@@ -1,8 +1,10 @@
|
|
|
1
|
+
examples/external_mcp.py,sha256=swx1KCOz6jk8jGTAycq-xu7GXPAhRMDe1x--SKocugE,13371
|
|
1
2
|
examples/function_calling_with_local_custom_mcp.py,sha256=g6wOFRB8-p9Cv7hKmQaGzPvtMX3H77gas01QVNEOduM,12407
|
|
2
3
|
examples/generate_a_benchmark_for_safe_store.py,sha256=bkSt0mrpNsN0krZAUShm0jgVM1ukrPpjI7VwSgcNdSA,3974
|
|
3
4
|
examples/generate_text_with_multihop_rag_example.py,sha256=riEyVYo97r6ZYdySL-NJkRhE4MnpwbZku1sN8RNvbvs,11519
|
|
4
5
|
examples/internet_search_with_rag.py,sha256=cbUoGgY3rxZpQ5INoaA0Nhm0cutii-2AQ9WCz71Ch3o,12369
|
|
5
6
|
examples/local_mcp.py,sha256=w40dgayvHYe01yvekEE0LjcbkpwKjWwJ-9v4_wGYsUk,9113
|
|
7
|
+
examples/run_standard_mcp_example.py,sha256=GSZpaACPf3mDPsjA8esBQVUsIi7owI39ca5avsmvCxA,9419
|
|
6
8
|
examples/simple_text_gen_test.py,sha256=RoX9ZKJjGMujeep60wh5WT_GoBn0O9YKJY6WOy-ZmOc,8710
|
|
7
9
|
examples/simple_text_gen_with_image_test.py,sha256=rR1O5Prcb52UHtJ3c6bv7VuTd1cvbkr5aNZU-v-Rs3Y,9263
|
|
8
10
|
examples/text_2_audio.py,sha256=MfL4AH_NNwl6m0I0ywl4BXRZJ0b9Y_9fRqDIe6O-Sbw,3523
|
|
@@ -20,9 +22,9 @@ examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5D
|
|
|
20
22
|
examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
|
|
21
23
|
examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
|
|
22
24
|
examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
|
|
23
|
-
lollms_client/__init__.py,sha256=
|
|
25
|
+
lollms_client/__init__.py,sha256=6mEOGmlBcMfm69OMeheDvrv_uQhGXf8s6A-u45k0KEs,910
|
|
24
26
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
25
|
-
lollms_client/lollms_core.py,sha256=
|
|
27
|
+
lollms_client/lollms_core.py,sha256=w-wy0YO6Yojido4p2f-0zykpLEzLRI2XGUAYlfVsUCA,112870
|
|
26
28
|
lollms_client/lollms_discussion.py,sha256=EV90dIgw8a-f-82vB2GspR60RniYz7WnBmAWSIg5mW0,2158
|
|
27
29
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
28
30
|
lollms_client/lollms_llm_binding.py,sha256=bdElz_IBx0zZ-85YTT1fyY_mSoHo46tKIMiHYJlKCkM,9809
|
|
@@ -50,6 +52,7 @@ lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py,sh
|
|
|
50
52
|
lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py,sha256=THtZsMxNnXZiBdkwoBlfbWY2C5hhDdmPtnM-8cSKN6s,9488
|
|
51
53
|
lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py,sha256=PLC31-D04QKTOTb1uuCHnrAlpysQjsk89yIJngK0VGc,4586
|
|
52
54
|
lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py,sha256=McDCBVoVrMDYgU7EYtyOY7mCk1uEeTea0PSD69QqDsQ,6228
|
|
55
|
+
lollms_client/mcp_bindings/standard_mcp/__init__.py,sha256=xhXlBFHA7njNKnshrGF-WTw0DbxNKDchYt43-I56-2U,30230
|
|
53
56
|
lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
54
57
|
lollms_client/stt_bindings/lollms/__init__.py,sha256=jBz3285atdPRqQe9ZRrb-AvjqKRB4f8tjLXjma0DLfE,6082
|
|
55
58
|
lollms_client/stt_bindings/whisper/__init__.py,sha256=vrua7fLwDId9_WiH4y2gXOE0hy3Gr2Ig-z5ScIT2bHI,15447
|
|
@@ -70,8 +73,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
|
|
|
70
73
|
lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
|
|
71
74
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
72
75
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
73
|
-
lollms_client-0.
|
|
74
|
-
lollms_client-0.
|
|
75
|
-
lollms_client-0.
|
|
76
|
-
lollms_client-0.
|
|
77
|
-
lollms_client-0.
|
|
76
|
+
lollms_client-0.20.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
77
|
+
lollms_client-0.20.0.dist-info/METADATA,sha256=GB6TGuMpSqhgobl3ig9zTdSoyc_OzVk29cit_wht0HA,13374
|
|
78
|
+
lollms_client-0.20.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
79
|
+
lollms_client-0.20.0.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
|
|
80
|
+
lollms_client-0.20.0.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|