lollms-client 0.33.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
- lollms_client/llm_bindings/claude/__init__.py +4 -7
- lollms_client/llm_bindings/gemini/__init__.py +3 -7
- lollms_client/llm_bindings/grok/__init__.py +3 -7
- lollms_client/llm_bindings/groq/__init__.py +4 -6
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
- lollms_client/llm_bindings/litellm/__init__.py +15 -6
- lollms_client/llm_bindings/llamacpp/__init__.py +27 -9
- lollms_client/llm_bindings/lollms/__init__.py +24 -14
- lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
- lollms_client/llm_bindings/mistral/__init__.py +3 -5
- lollms_client/llm_bindings/ollama/__init__.py +6 -11
- lollms_client/llm_bindings/open_router/__init__.py +4 -6
- lollms_client/llm_bindings/openai/__init__.py +7 -14
- lollms_client/llm_bindings/openllm/__init__.py +12 -12
- lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
- lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
- lollms_client/llm_bindings/transformers/__init__.py +14 -6
- lollms_client/llm_bindings/vllm/__init__.py +16 -12
- lollms_client/lollms_core.py +303 -490
- lollms_client/lollms_discussion.py +431 -78
- lollms_client/lollms_llm_binding.py +192 -381
- lollms_client/lollms_mcp_binding.py +33 -2
- lollms_client/lollms_tti_binding.py +107 -2
- lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
- lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
- lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
- lollms_client/stt_bindings/lollms/__init__.py +6 -8
- lollms_client/stt_bindings/whisper/__init__.py +2 -4
- lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
- lollms_client/tti_bindings/dalle/__init__.py +50 -29
- lollms_client/tti_bindings/diffusers/__init__.py +227 -439
- lollms_client/tti_bindings/gemini/__init__.py +320 -0
- lollms_client/tti_bindings/lollms/__init__.py +8 -9
- lollms_client-1.1.0.dist-info/METADATA +1214 -0
- lollms_client-1.1.0.dist-info/RECORD +69 -0
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/top_level.txt +0 -2
- examples/article_summary/article_summary.py +0 -58
- examples/console_discussion/console_app.py +0 -266
- examples/console_discussion.py +0 -448
- examples/deep_analyze/deep_analyse.py +0 -30
- examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
- examples/function_calling_with_local_custom_mcp.py +0 -250
- examples/generate_a_benchmark_for_safe_store.py +0 -89
- examples/generate_and_speak/generate_and_speak.py +0 -251
- examples/generate_game_sfx/generate_game_fx.py +0 -240
- examples/generate_text_with_multihop_rag_example.py +0 -210
- examples/gradio_chat_app.py +0 -228
- examples/gradio_lollms_chat.py +0 -259
- examples/internet_search_with_rag.py +0 -226
- examples/lollms_chat/calculator.py +0 -59
- examples/lollms_chat/derivative.py +0 -48
- examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
- examples/lollms_discussions_test.py +0 -155
- examples/mcp_examples/external_mcp.py +0 -267
- examples/mcp_examples/local_mcp.py +0 -171
- examples/mcp_examples/openai_mcp.py +0 -203
- examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
- examples/mcp_examples/run_standard_mcp_example.py +0 -204
- examples/simple_text_gen_test.py +0 -173
- examples/simple_text_gen_with_image_test.py +0 -178
- examples/test_local_models/local_chat.py +0 -9
- examples/text_2_audio.py +0 -77
- examples/text_2_image.py +0 -144
- examples/text_2_image_diffusers.py +0 -274
- examples/text_and_image_2_audio.py +0 -59
- examples/text_gen.py +0 -30
- examples/text_gen_system_prompt.py +0 -29
- lollms_client-0.33.0.dist-info/METADATA +0 -854
- lollms_client-0.33.0.dist-info/RECORD +0 -101
- test/test_lollms_discussion.py +0 -368
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,267 +0,0 @@
|
|
|
1
|
-
# File: run_lollms_client_with_mcp_example.py
|
|
2
|
-
|
|
3
|
-
import sys
|
|
4
|
-
import os
|
|
5
|
-
import shutil
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
import json
|
|
8
|
-
import subprocess
|
|
9
|
-
from dotenv import load_dotenv # Import the function
|
|
10
|
-
|
|
11
|
-
# --- Load environment variables from .env file ---
|
|
12
|
-
# Load from .env in the current script's directory, or from project root if specified
|
|
13
|
-
# You can specify a path: load_dotenv(dotenv_path=Path('.') / '.env')
|
|
14
|
-
# By default, it looks for .env in the current working directory or parent directories.
|
|
15
|
-
# For simplicity, let's assume .env is next to this script or in a discoverable location.
|
|
16
|
-
load_dotenv()
|
|
17
|
-
|
|
18
|
-
# --- Python Path Adjustment (same as before) ---
|
|
19
|
-
current_script_dir = Path(__file__).resolve().parent
|
|
20
|
-
project_root_for_lollms_client = current_script_dir.parent
|
|
21
|
-
if str(project_root_for_lollms_client) not in sys.path:
|
|
22
|
-
sys.path.insert(0, str(project_root_for_lollms_client))
|
|
23
|
-
print(f"Added to sys.path: {project_root_for_lollms_client}")
|
|
24
|
-
|
|
25
|
-
# --- Pipmaster and LollmsClient Core Imports (same as before) ---
|
|
26
|
-
try:
|
|
27
|
-
import pipmaster as pm
|
|
28
|
-
except ImportError:
|
|
29
|
-
print("ERROR: pipmaster is not installed or not in PYTHONPATH.")
|
|
30
|
-
sys.exit(1)
|
|
31
|
-
|
|
32
|
-
try:
|
|
33
|
-
from lollms_client import LollmsClient
|
|
34
|
-
from lollms_client.lollms_llm_binding import LollmsLLMBinding
|
|
35
|
-
from ascii_colors import ASCIIColors, trace_exception
|
|
36
|
-
from lollms_client.lollms_types import MSG_TYPE
|
|
37
|
-
except ImportError as e:
|
|
38
|
-
print(f"ERROR: Could not import LollmsClient components: {e}")
|
|
39
|
-
trace_exception(e)
|
|
40
|
-
sys.exit(1)
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
# --- Dummy Server Scripts (Time and Calculator - same as before) ---
|
|
44
|
-
TIME_SERVER_PY = """
|
|
45
|
-
import asyncio
|
|
46
|
-
from datetime import datetime
|
|
47
|
-
from mcp.server.fastmcp import FastMCP
|
|
48
|
-
|
|
49
|
-
mcp_server = FastMCP("TimeMCP", description="A server that provides the current time.")
|
|
50
|
-
|
|
51
|
-
@mcp_server.tool(description="Returns the current server time and echoes received parameters.")
|
|
52
|
-
def get_current_time(user_id: str = "unknown_user") -> dict:
|
|
53
|
-
return {"time": datetime.now().isoformat(), "params_received": {"user_id": user_id}, "server_name": "TimeServer"}
|
|
54
|
-
|
|
55
|
-
if __name__ == "__main__":
|
|
56
|
-
mcp_server.run(transport="stdio")
|
|
57
|
-
"""
|
|
58
|
-
|
|
59
|
-
CALCULATOR_SERVER_PY = """
|
|
60
|
-
import asyncio
|
|
61
|
-
from typing import List, Union
|
|
62
|
-
from mcp.server.fastmcp import FastMCP
|
|
63
|
-
|
|
64
|
-
mcp_server = FastMCP("CalculatorMCP", description="A server that performs addition.")
|
|
65
|
-
|
|
66
|
-
@mcp_server.tool(description="Adds a list of numbers provided in the 'numbers' parameter.")
|
|
67
|
-
def add_numbers(numbers: List[Union[int, float]]) -> dict:
|
|
68
|
-
if not isinstance(numbers, list) or not all(isinstance(x, (int, float)) for x in numbers):
|
|
69
|
-
return {"error": "'numbers' must be a list of numbers."}
|
|
70
|
-
return {"sum": sum(numbers), "server_name": "CalculatorServer"}
|
|
71
|
-
|
|
72
|
-
if __name__ == "__main__":
|
|
73
|
-
mcp_server.run(transport="stdio")
|
|
74
|
-
"""
|
|
75
|
-
|
|
76
|
-
# --- Main Function ---
|
|
77
|
-
def main():
|
|
78
|
-
ASCIIColors.red("--- Example: Using LollmsClient with StandardMCPBinding (including external ElevenLabs MCP) ---")
|
|
79
|
-
|
|
80
|
-
# --- 1. Setup Temporary Directory for Dummy MCP Servers ---
|
|
81
|
-
example_base_dir = Path(__file__).parent / "temp_mcp_example_servers"
|
|
82
|
-
if example_base_dir.exists():
|
|
83
|
-
shutil.rmtree(example_base_dir)
|
|
84
|
-
example_base_dir.mkdir(exist_ok=True)
|
|
85
|
-
|
|
86
|
-
time_server_script_path = example_base_dir / "time_server.py"
|
|
87
|
-
with open(time_server_script_path, "w") as f: f.write(TIME_SERVER_PY)
|
|
88
|
-
|
|
89
|
-
calculator_server_script_path = example_base_dir / "calculator_server.py"
|
|
90
|
-
with open(calculator_server_script_path, "w") as f: f.write(CALCULATOR_SERVER_PY)
|
|
91
|
-
|
|
92
|
-
# --- 2. MCP Configuration ---
|
|
93
|
-
initial_mcp_servers = {
|
|
94
|
-
"time_machine": {
|
|
95
|
-
"command": [sys.executable, str(time_server_script_path.resolve())],
|
|
96
|
-
},
|
|
97
|
-
"calc_unit": {
|
|
98
|
-
"command": [sys.executable, str(calculator_server_script_path.resolve())]
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
# --- Configuration for ElevenLabs MCP Server (Optional) ---
|
|
103
|
-
# Variables are now loaded from .env by load_dotenv() at the start of the script
|
|
104
|
-
elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
|
105
|
-
elevenlabs_voice_id_from_env = os.getenv("ELEVENLABS_VOICE_ID", "Rachel") # Default if not in .env
|
|
106
|
-
elevenlabs_model_id_from_env = os.getenv("ELEVENLABS_MODEL_ID", "eleven_multilingual_v2") # Default
|
|
107
|
-
|
|
108
|
-
uvx_available = False
|
|
109
|
-
try:
|
|
110
|
-
subprocess.run(["uvx", "--version"], capture_output=True, check=True, text=True, timeout=5)
|
|
111
|
-
uvx_available = True
|
|
112
|
-
ASCIIColors.green("uvx command is available.")
|
|
113
|
-
except (FileNotFoundError, subprocess.CalledProcessError, subprocess.TimeoutExpired):
|
|
114
|
-
ASCIIColors.yellow("uvx command not found, not working, or timed out. ElevenLabs MCP server (via uvx) will not be configured.")
|
|
115
|
-
|
|
116
|
-
if elevenlabs_api_key and uvx_available:
|
|
117
|
-
ASCIIColors.green("ELEVENLABS_API_KEY found (from .env) and uvx available. Configuring ElevenLabs MCP server.")
|
|
118
|
-
initial_mcp_servers["elevenlabs"] = {
|
|
119
|
-
"command": ["uvx"],
|
|
120
|
-
"args": ["elevenlabs-mcp-server"],
|
|
121
|
-
"env": {
|
|
122
|
-
"ELEVENLABS_API_KEY": elevenlabs_api_key,
|
|
123
|
-
"ELEVENLABS_VOICE_ID": elevenlabs_voice_id_from_env,
|
|
124
|
-
"ELEVENLABS_MODEL_ID": elevenlabs_model_id_from_env,
|
|
125
|
-
"ELEVENLABS_OUTPUT_DIR": str(example_base_dir / "elevenlabs_output")
|
|
126
|
-
# Add other ELEVENLABS_ env vars from os.getenv() if needed
|
|
127
|
-
}
|
|
128
|
-
}
|
|
129
|
-
(example_base_dir / "elevenlabs_output").mkdir(exist_ok=True)
|
|
130
|
-
elif not elevenlabs_api_key:
|
|
131
|
-
ASCIIColors.yellow("ELEVENLABS_API_KEY not found in .env file or environment variables. Skipping ElevenLabs MCP server configuration.")
|
|
132
|
-
|
|
133
|
-
mcp_config = {"initial_servers": initial_mcp_servers}
|
|
134
|
-
|
|
135
|
-
# --- 3. Initialize LollmsClient ---
|
|
136
|
-
ASCIIColors.magenta("\n1. Initializing LollmsClient...")
|
|
137
|
-
try:
|
|
138
|
-
client = LollmsClient(
|
|
139
|
-
binding_name="ollama",
|
|
140
|
-
model_name="mistral-nemo:latest",
|
|
141
|
-
mcp_binding_name="standard_mcp",
|
|
142
|
-
mcp_binding_config=mcp_config,
|
|
143
|
-
)
|
|
144
|
-
except Exception as e:
|
|
145
|
-
ASCIIColors.error(f"Failed to initialize LollmsClient: {e}")
|
|
146
|
-
trace_exception(e)
|
|
147
|
-
shutil.rmtree(example_base_dir, ignore_errors=True)
|
|
148
|
-
sys.exit(1)
|
|
149
|
-
|
|
150
|
-
if not client.binding:
|
|
151
|
-
ASCIIColors.error("LollmsClient's LLM binding (ollama) failed to load.")
|
|
152
|
-
shutil.rmtree(example_base_dir, ignore_errors=True)
|
|
153
|
-
sys.exit(1)
|
|
154
|
-
if not client.mcp:
|
|
155
|
-
ASCIIColors.error("LollmsClient's MCP binding (standard_mcp) failed to load.")
|
|
156
|
-
if hasattr(client, 'close'): client.close()
|
|
157
|
-
shutil.rmtree(example_base_dir, ignore_errors=True)
|
|
158
|
-
sys.exit(1)
|
|
159
|
-
ASCIIColors.green("LollmsClient initialized successfully.")
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
# --- 4. Define Streaming Callback (same as before) ---
|
|
163
|
-
def mcp_streaming_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, history: list = None) -> bool:
|
|
164
|
-
if metadata:
|
|
165
|
-
type_info = metadata.get('type', 'unknown_type')
|
|
166
|
-
if msg_type == MSG_TYPE.MSG_TYPE_STEP_START: ASCIIColors.cyan(f"MCP Step Start ({type_info}): {chunk}")
|
|
167
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END: ASCIIColors.cyan(f"MCP Step End ({type_info}): {chunk}")
|
|
168
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_INFO: ASCIIColors.yellow(f"MCP Info ({type_info}): {chunk}")
|
|
169
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK: ASCIIColors.green(chunk, end="")
|
|
170
|
-
else: ASCIIColors.green(f"MCP Output ({str(msg_type)}, {type_info}): {chunk}")
|
|
171
|
-
else:
|
|
172
|
-
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: ASCIIColors.green(chunk, end="")
|
|
173
|
-
else: ASCIIColors.green(f"MCP Output ({str(msg_type)}): {chunk}")
|
|
174
|
-
sys.stdout.flush()
|
|
175
|
-
return True
|
|
176
|
-
|
|
177
|
-
# --- 5. Use generate_with_mcp with local dummy servers ---
|
|
178
|
-
ASCIIColors.magenta("\n2. Calling generate_with_mcp to get current time (local dummy server)...")
|
|
179
|
-
time_prompt = "Hey assistant, what time is it right now?"
|
|
180
|
-
time_response = client.generate_with_mcp(
|
|
181
|
-
prompt=time_prompt,
|
|
182
|
-
streaming_callback=mcp_streaming_callback
|
|
183
|
-
)
|
|
184
|
-
print()
|
|
185
|
-
ASCIIColors.blue(f"Final response for time prompt: {json.dumps(time_response, indent=2)}")
|
|
186
|
-
assert time_response.get("error") is None, f"Time prompt error: {time_response.get('error')}"
|
|
187
|
-
assert time_response.get("final_answer"), "Time prompt no final answer."
|
|
188
|
-
assert len(time_response.get("tool_calls", [])) > 0, "Time prompt should call tool."
|
|
189
|
-
if time_response.get("tool_calls"):
|
|
190
|
-
assert time_response["tool_calls"][0]["name"] == "time_machine::get_current_time", "Incorrect tool for time."
|
|
191
|
-
assert "time" in time_response["tool_calls"][0].get("result", {}).get("output", {}), "Time tool result missing time."
|
|
192
|
-
|
|
193
|
-
ASCIIColors.magenta("\n3. Calling generate_with_mcp for calculation (local dummy server)...")
|
|
194
|
-
calc_prompt = "Can you sum 50, 25, and 7.5 for me?"
|
|
195
|
-
calc_response = client.generate_with_mcp(
|
|
196
|
-
prompt=calc_prompt,
|
|
197
|
-
streaming_callback=mcp_streaming_callback
|
|
198
|
-
)
|
|
199
|
-
print()
|
|
200
|
-
ASCIIColors.blue(f"Final response for calc prompt: {json.dumps(calc_response, indent=2)}")
|
|
201
|
-
assert calc_response.get("error") is None, f"Calc prompt error: {calc_response.get('error')}"
|
|
202
|
-
assert calc_response.get("final_answer"), "Calc prompt no final answer."
|
|
203
|
-
assert len(calc_response.get("tool_calls", [])) > 0, "Calc prompt should call tool."
|
|
204
|
-
if calc_response.get("tool_calls"):
|
|
205
|
-
assert calc_response["tool_calls"][0]["name"] == "calc_unit::add_numbers", "Incorrect tool for calc."
|
|
206
|
-
assert "sum" in calc_response["tool_calls"][0].get("result", {}).get("output", {}), "Calculator tool result missing sum."
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
# --- 6. Interact with ElevenLabs MCP Server (if configured) ---
|
|
210
|
-
if "elevenlabs" in client.mcp.get_binding_config().get("initial_servers", {}):
|
|
211
|
-
ASCIIColors.magenta("\n4. Interacting with ElevenLabs MCP server...")
|
|
212
|
-
|
|
213
|
-
ASCIIColors.info("Discovering all available tools (including ElevenLabs)...")
|
|
214
|
-
all_mcp_tools = client.mcp.discover_tools(force_refresh=True, timeout_per_server=45) # Longer timeout for external server
|
|
215
|
-
ASCIIColors.green(f"Discovered {len(all_mcp_tools)} tools in total:")
|
|
216
|
-
for tool in all_mcp_tools:
|
|
217
|
-
# Try to get properties keys from input_schema for a more informative print
|
|
218
|
-
props_keys = "N/A"
|
|
219
|
-
if isinstance(tool.get('input_schema'), dict) and isinstance(tool['input_schema'].get('properties'), dict):
|
|
220
|
-
props_keys = list(tool['input_schema']['properties'].keys())
|
|
221
|
-
print(f" - Name: {tool.get('name')}, Desc: {tool.get('description')}, Schema Props: {props_keys}")
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
elevenlabs_list_voices_tool_name = "elevenlabs::list_voices"
|
|
225
|
-
if any(t['name'] == elevenlabs_list_voices_tool_name for t in all_mcp_tools):
|
|
226
|
-
ASCIIColors.magenta(f"\n4a. Calling '{elevenlabs_list_voices_tool_name}' via LLM prompt...")
|
|
227
|
-
|
|
228
|
-
list_voices_prompt = "Please list all the available voices from the elevenlabs tool."
|
|
229
|
-
voices_response = client.generate_with_mcp(
|
|
230
|
-
prompt=list_voices_prompt,
|
|
231
|
-
streaming_callback=mcp_streaming_callback,
|
|
232
|
-
max_tool_calls=1
|
|
233
|
-
)
|
|
234
|
-
print()
|
|
235
|
-
ASCIIColors.blue(f"Final response for ElevenLabs list_voices prompt: {json.dumps(voices_response, indent=2)}")
|
|
236
|
-
|
|
237
|
-
assert voices_response.get("error") is None, f"ElevenLabs list_voices error: {voices_response.get('error')}"
|
|
238
|
-
assert voices_response.get("final_answer"), "ElevenLabs list_voices no final answer."
|
|
239
|
-
tool_calls = voices_response.get("tool_calls", [])
|
|
240
|
-
assert len(tool_calls) > 0, "ElevenLabs list_voices should call tool."
|
|
241
|
-
if tool_calls:
|
|
242
|
-
assert tool_calls[0]["name"] == elevenlabs_list_voices_tool_name, "Incorrect tool for ElevenLabs list_voices."
|
|
243
|
-
tool_output = tool_calls[0].get("result", {}).get("output")
|
|
244
|
-
assert isinstance(tool_output, list), f"ElevenLabs list_voices output not a list, got: {type(tool_output)}"
|
|
245
|
-
if tool_output:
|
|
246
|
-
ASCIIColors.green(f"First voice from ElevenLabs: {tool_output[0].get('name')} (ID: {tool_output[0].get('voice_id')})")
|
|
247
|
-
else:
|
|
248
|
-
ASCIIColors.yellow(f"Tool '{elevenlabs_list_voices_tool_name}' not found. Skipping ElevenLabs tool execution test.")
|
|
249
|
-
else:
|
|
250
|
-
ASCIIColors.yellow("ElevenLabs MCP server not configured in this run (check .env for API key and uvx availability). Skipping ElevenLabs tests.")
|
|
251
|
-
|
|
252
|
-
# --- 7. Cleanup ---
|
|
253
|
-
ASCIIColors.magenta("\n5. Closing LollmsClient and cleaning up...")
|
|
254
|
-
if client and hasattr(client, 'close'):
|
|
255
|
-
try:
|
|
256
|
-
client.close()
|
|
257
|
-
except Exception as e:
|
|
258
|
-
ASCIIColors.error(f"Error closing LollmsClient: {e}")
|
|
259
|
-
trace_exception(e)
|
|
260
|
-
|
|
261
|
-
ASCIIColors.info("Cleaning up temporary server scripts directory...")
|
|
262
|
-
shutil.rmtree(example_base_dir, ignore_errors=True)
|
|
263
|
-
|
|
264
|
-
ASCIIColors.red("\n--- LollmsClient with MCP Example (including external) Finished ---")
|
|
265
|
-
|
|
266
|
-
if __name__ == "__main__":
|
|
267
|
-
main()
|
|
@@ -1,171 +0,0 @@
|
|
|
1
|
-
from lollms_client import LollmsClient, MSG_TYPE
|
|
2
|
-
from ascii_colors import ASCIIColors, trace_exception
|
|
3
|
-
from pathlib import Path
|
|
4
|
-
import json # For pretty printing results
|
|
5
|
-
import os # For OPENAI_API_KEY
|
|
6
|
-
|
|
7
|
-
# --- Configuration ---
|
|
8
|
-
# LLM Configuration
|
|
9
|
-
LLM_BINDING_NAME = "ollama" # Or "openai", "lollms", etc.
|
|
10
|
-
OLLAMA_HOST_ADDRESS = "http://localhost:11434"
|
|
11
|
-
OLLAMA_MODEL_NAME = "mistral:latest" # Ensure this model is capable of JSON and tool use decisions
|
|
12
|
-
|
|
13
|
-
# TTI Configuration (for the generate_image_from_prompt MCP tool)
|
|
14
|
-
TTI_BINDING_NAME = "dalle" # To use DALL-E via LollmsClient's TTI
|
|
15
|
-
# OPENAI_API_KEY should be set as an environment variable for DALL-E
|
|
16
|
-
|
|
17
|
-
# MCP Configuration
|
|
18
|
-
# We will NOT provide mcp_binding_config.tools_folder_path,
|
|
19
|
-
# so LocalMCPBinding should use its packaged default_tools.
|
|
20
|
-
MCP_BINDING_NAME = "local_mcp"
|
|
21
|
-
|
|
22
|
-
# Function Calling Parameters
|
|
23
|
-
MAX_LLM_ITERATIONS_FOR_TOOL_CALLS = 4
|
|
24
|
-
MAX_TOOL_CALLS_PER_TURN = 3
|
|
25
|
-
|
|
26
|
-
# --- Callback for streaming ---
|
|
27
|
-
def function_calling_stream_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, turn_history: list = None) -> bool:
|
|
28
|
-
"""
|
|
29
|
-
Callback to handle streamed output during function calling.
|
|
30
|
-
"""
|
|
31
|
-
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
32
|
-
ASCIIColors.success(chunk, end="", flush=True)
|
|
33
|
-
|
|
34
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
|
|
35
|
-
step_type = metadata.get("type", "step") if metadata else "step"
|
|
36
|
-
step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
|
|
37
|
-
ASCIIColors.info(f"\n>> Starting {step_type}: {step_info}", flush=True)
|
|
38
|
-
|
|
39
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
|
|
40
|
-
step_type = metadata.get("type", "step") if metadata else "step"
|
|
41
|
-
step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
|
|
42
|
-
if metadata and "result" in metadata:
|
|
43
|
-
ASCIIColors.success(f"\n<< Finished {step_type}: {step_info} -> Result: {json.dumps(metadata['result'])}", flush=True)
|
|
44
|
-
else:
|
|
45
|
-
ASCIIColors.success(f"\n<< Finished {step_type}: {step_info}", flush=True)
|
|
46
|
-
|
|
47
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_INFO:
|
|
48
|
-
if metadata and metadata.get("type") == "tool_call_request":
|
|
49
|
-
ASCIIColors.info(f"\nAI requests to call tool: {metadata.get('name')} with params: {metadata.get('params')}", flush=True)
|
|
50
|
-
else:
|
|
51
|
-
ASCIIColors.info(f"\nINFO: {chunk}", flush=True)
|
|
52
|
-
|
|
53
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
|
|
54
|
-
ASCIIColors.error(f"\nERROR in stream: {chunk}", flush=True)
|
|
55
|
-
|
|
56
|
-
return True
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
def run_default_tools_example():
|
|
61
|
-
ASCIIColors.red("--- LoLLMs Client with Default Local MCP Tools Example ---")
|
|
62
|
-
|
|
63
|
-
# Check for OpenAI API Key if DALL-E is used
|
|
64
|
-
if TTI_BINDING_NAME.lower() == "dalle" and not os.getenv("OPENAI_API_KEY"):
|
|
65
|
-
ASCIIColors.error("OPENAI_API_KEY environment variable is not set. DALL-E TTI will fail.")
|
|
66
|
-
ASCIIColors.error("Please set it or choose a different TTI_BINDING_NAME.")
|
|
67
|
-
# return # Optionally exit if key is critical for the test
|
|
68
|
-
|
|
69
|
-
try:
|
|
70
|
-
ASCIIColors.magenta("\n1. Initializing LollmsClient...")
|
|
71
|
-
|
|
72
|
-
lc = LollmsClient(
|
|
73
|
-
binding_name=LLM_BINDING_NAME,
|
|
74
|
-
host_address=OLLAMA_HOST_ADDRESS, # For Ollama LLM
|
|
75
|
-
model_name=OLLAMA_MODEL_NAME, # For Ollama LLM
|
|
76
|
-
|
|
77
|
-
mcp_binding_name=MCP_BINDING_NAME,
|
|
78
|
-
# No mcp_binding_config, so LocalMCPBinding should use its 'default_tools'
|
|
79
|
-
|
|
80
|
-
tti_binding_name=TTI_BINDING_NAME, # For the 'generate_image_from_prompt' tool
|
|
81
|
-
# tti_binding_config would be needed here if DALL-E or other TTI bindings
|
|
82
|
-
# require specific init params beyond API key (which DALL-E binding gets from env).
|
|
83
|
-
# e.g. tti_binding_config={"api_key": "your_key_here"} if not using env for DALL-E.
|
|
84
|
-
|
|
85
|
-
temperature=0.1,
|
|
86
|
-
n_predict=1500 # Allow more tokens for complex reasoning and tool outputs
|
|
87
|
-
)
|
|
88
|
-
ASCIIColors.green("LollmsClient initialized successfully.")
|
|
89
|
-
if lc.mcp:
|
|
90
|
-
ASCIIColors.info(f"MCP Binding '{lc.mcp.binding_name}' loaded.")
|
|
91
|
-
discovered_tools_on_init = lc.mcp.discover_tools() # Should pick up default_tools
|
|
92
|
-
ASCIIColors.info(f"Tools initially discovered by MCP binding: {[t['name'] for t in discovered_tools_on_init]}")
|
|
93
|
-
assert any(t['name'] == 'internet_search' for t in discovered_tools_on_init), "Default 'internet_search' tool not found."
|
|
94
|
-
assert any(t['name'] == 'file_writer' for t in discovered_tools_on_init), "Default 'file_writer' tool not found."
|
|
95
|
-
assert any(t['name'] == 'python_interpreter' for t in discovered_tools_on_init), "Default 'python_interpreter' tool not found."
|
|
96
|
-
assert any(t['name'] == 'generate_image_from_prompt' for t in discovered_tools_on_init), "Default 'generate_image_from_prompt' tool not found."
|
|
97
|
-
else:
|
|
98
|
-
ASCIIColors.error("MCP binding was not loaded correctly. Aborting.")
|
|
99
|
-
return
|
|
100
|
-
|
|
101
|
-
if TTI_BINDING_NAME and not lc.tti:
|
|
102
|
-
ASCIIColors.warning(f"TTI binding '{TTI_BINDING_NAME}' was specified but not loaded in LollmsClient. The 'generate_image_from_prompt' tool may fail.")
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
# --- Example Interaction 1: Internet Search ---
|
|
106
|
-
ASCIIColors.magenta("\n2. Example: Asking for information requiring internet search")
|
|
107
|
-
user_prompt_search = "What were the main headlines on AI ethics in the last month?"
|
|
108
|
-
ASCIIColors.blue(f"User: {user_prompt_search}")
|
|
109
|
-
ASCIIColors.yellow(f"AI processing (streaming output):")
|
|
110
|
-
|
|
111
|
-
search_result_data = lc.generate_with_mcp(
|
|
112
|
-
prompt=user_prompt_search,
|
|
113
|
-
max_tool_calls=1, # Limit to one search for this
|
|
114
|
-
max_llm_iterations=2,
|
|
115
|
-
streaming_callback=function_calling_stream_callback,
|
|
116
|
-
)
|
|
117
|
-
print("\n--- End of AI Response (Search) ---")
|
|
118
|
-
if search_result_data["error"]:
|
|
119
|
-
ASCIIColors.error(f"Error in search example: {search_result_data['error']}")
|
|
120
|
-
else:
|
|
121
|
-
ASCIIColors.cyan(f"\nFinal Answer (Search): {search_result_data['final_answer']}")
|
|
122
|
-
ASCIIColors.info("\nTool Calls Made (Search Example):")
|
|
123
|
-
for tc in search_result_data["tool_calls"]:
|
|
124
|
-
# Truncate long snippets for display
|
|
125
|
-
if tc['name'] == 'internet_search' and 'output' in tc['result'] and 'search_results' in tc['result']['output']:
|
|
126
|
-
for res_item in tc['result']['output']['search_results']:
|
|
127
|
-
if 'snippet' in res_item and len(res_item['snippet']) > 100:
|
|
128
|
-
res_item['snippet'] = res_item['snippet'][:100] + "..."
|
|
129
|
-
print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'], indent=2)}")
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
# --- Example Interaction 2: Image Generation ---
|
|
133
|
-
ASCIIColors.magenta("\n3. Example: Requesting an image generation")
|
|
134
|
-
user_prompt_image = "Please generate an image of a futuristic robot holding a glowing orb."
|
|
135
|
-
ASCIIColors.blue(f"User: {user_prompt_image}")
|
|
136
|
-
ASCIIColors.yellow(f"AI processing (streaming output):")
|
|
137
|
-
|
|
138
|
-
image_gen_result_data = lc.generate_with_mcp(
|
|
139
|
-
prompt=user_prompt_image,
|
|
140
|
-
max_tool_calls=1,
|
|
141
|
-
max_llm_iterations=2,
|
|
142
|
-
streaming_callback=function_calling_stream_callback,
|
|
143
|
-
)
|
|
144
|
-
print("\n--- End of AI Response (Image Gen) ---")
|
|
145
|
-
if image_gen_result_data["error"]:
|
|
146
|
-
ASCIIColors.error(f"Error in image gen example: {image_gen_result_data['error']}")
|
|
147
|
-
else:
|
|
148
|
-
ASCIIColors.cyan(f"\nFinal Answer (Image Gen): {image_gen_result_data['final_answer']}")
|
|
149
|
-
ASCIIColors.info("\nTool Calls Made (Image Gen Example):")
|
|
150
|
-
for tc in image_gen_result_data["tool_calls"]:
|
|
151
|
-
print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'], indent=2)}")
|
|
152
|
-
if tc['name'] == 'generate_image_from_prompt' and tc['result'].get('output', {}).get('status') == 'success':
|
|
153
|
-
img_path = tc['result']['output'].get('image_path')
|
|
154
|
-
img_url = tc['result']['output'].get('image_url')
|
|
155
|
-
ASCIIColors.green(f"Image was reportedly saved. Path hint: {img_path}, URL: {img_url}")
|
|
156
|
-
ASCIIColors.info("Check your LollmsClient outputs/mcp_generated_images/ directory (or similar based on tool's save logic).")
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
except ValueError as ve:
|
|
160
|
-
ASCIIColors.error(f"Initialization Error: {ve}")
|
|
161
|
-
trace_exception(ve)
|
|
162
|
-
except ConnectionRefusedError:
|
|
163
|
-
ASCIIColors.error(f"Connection refused. Is the Ollama server running at {OLLAMA_HOST_ADDRESS}?")
|
|
164
|
-
except Exception as e:
|
|
165
|
-
ASCIIColors.error(f"An unexpected error occurred: {e}")
|
|
166
|
-
trace_exception(e)
|
|
167
|
-
|
|
168
|
-
ASCIIColors.red("\n--- Default Tools Example Finished ---")
|
|
169
|
-
|
|
170
|
-
if __name__ == "__main__":
|
|
171
|
-
run_default_tools_example()
|
|
@@ -1,203 +0,0 @@
|
|
|
1
|
-
# File: run_openai_mcp_example.py
|
|
2
|
-
# (Keep imports, path adjustments, helper functions, and initial setup as before)
|
|
3
|
-
|
|
4
|
-
import sys
|
|
5
|
-
import os
|
|
6
|
-
import shutil
|
|
7
|
-
from pathlib import Path
|
|
8
|
-
import json
|
|
9
|
-
import base64
|
|
10
|
-
from dotenv import load_dotenv
|
|
11
|
-
|
|
12
|
-
load_dotenv() # For this script's own potential .env
|
|
13
|
-
|
|
14
|
-
try:
|
|
15
|
-
from lollms_client import LollmsClient
|
|
16
|
-
from ascii_colors import ASCIIColors, trace_exception
|
|
17
|
-
from lollms_client.lollms_types import MSG_TYPE
|
|
18
|
-
except ImportError as e:
|
|
19
|
-
print(f"ERROR: Could not import LollmsClient components: {e}")
|
|
20
|
-
trace_exception(e)
|
|
21
|
-
sys.exit(1)
|
|
22
|
-
|
|
23
|
-
PATH_TO_OPENAI_MCP_SERVER_PROJECT = Path(__file__).resolve().parent # Standard if script is in PArisNeoMCPServers root
|
|
24
|
-
if not PATH_TO_OPENAI_MCP_SERVER_PROJECT.is_dir():
|
|
25
|
-
print(f"ERROR: openai-mcp-server project not found at {PATH_TO_OPENAI_MCP_SERVER_PROJECT}")
|
|
26
|
-
sys.exit(1)
|
|
27
|
-
|
|
28
|
-
OUTPUT_DIRECTORY = Path(__file__).resolve().parent / "mcp_example_outputs"
|
|
29
|
-
OUTPUT_DIRECTORY.mkdir(parents=True, exist_ok=True)
|
|
30
|
-
|
|
31
|
-
def save_base64_audio(base64_str: str, filename_stem: str, audio_format: str) -> Path:
|
|
32
|
-
audio_bytes = base64.b64decode(base64_str)
|
|
33
|
-
file_path = OUTPUT_DIRECTORY / f"{filename_stem}.{audio_format}"
|
|
34
|
-
with open(file_path, "wb") as f: f.write(audio_bytes)
|
|
35
|
-
ASCIIColors.green(f"Audio saved to: {file_path}")
|
|
36
|
-
return file_path
|
|
37
|
-
|
|
38
|
-
def save_base64_image(base64_str: str, filename_stem: str) -> Path:
|
|
39
|
-
image_bytes = base64.b64decode(base64_str)
|
|
40
|
-
file_path = OUTPUT_DIRECTORY / f"{filename_stem}.png"
|
|
41
|
-
with open(file_path, "wb") as f: f.write(image_bytes)
|
|
42
|
-
ASCIIColors.green(f"Image saved to: {file_path}")
|
|
43
|
-
return file_path
|
|
44
|
-
|
|
45
|
-
def main():
|
|
46
|
-
ASCIIColors.red(f"--- Example: Using LollmsClient with OpenAI MCP Server (TTS & DALL-E) ---")
|
|
47
|
-
ASCIIColors.red(f"--- Make sure OPENAI_API_KEY is set in: {PATH_TO_OPENAI_MCP_SERVER_PROJECT / '.env'} ---")
|
|
48
|
-
ASCIIColors.red(f"--- And that 'uv pip install -e .' has been run in: {PATH_TO_OPENAI_MCP_SERVER_PROJECT} ---")
|
|
49
|
-
|
|
50
|
-
# Determine the Python executable within the server's .venv IF IT EXISTS
|
|
51
|
-
# This is the most robust way to ensure the server runs with its own isolated dependencies.
|
|
52
|
-
path_to_openai_server_venv_python = PATH_TO_OPENAI_MCP_SERVER_PROJECT / ".venv" / ("Scripts" if os.name == "nt" else "bin") / "python"
|
|
53
|
-
|
|
54
|
-
python_exe_to_use = None
|
|
55
|
-
if path_to_openai_server_venv_python.exists():
|
|
56
|
-
python_exe_to_use = str(path_to_openai_server_venv_python.resolve())
|
|
57
|
-
ASCIIColors.cyan(f"Attempting to use Python from server's .venv: {python_exe_to_use}")
|
|
58
|
-
else:
|
|
59
|
-
python_exe_to_use = sys.executable # Fallback to current script's Python
|
|
60
|
-
ASCIIColors.yellow(f"Server's .venv Python not found at {path_to_openai_server_venv_python}. Using current environment's Python: {python_exe_to_use}")
|
|
61
|
-
ASCIIColors.yellow("Ensure openai-mcp-server dependencies are met in the current environment if its .venv is not used.")
|
|
62
|
-
|
|
63
|
-
mcp_config = {
|
|
64
|
-
"initial_servers": {
|
|
65
|
-
"my_openai_server": {
|
|
66
|
-
"command": [
|
|
67
|
-
"uv", # Use uv to manage the environment for the python execution
|
|
68
|
-
"run",
|
|
69
|
-
"--quiet", # Optional: reduce uv's own output unless there's an error
|
|
70
|
-
"--", # Separator: arguments after this are for the command being run by `uv run`
|
|
71
|
-
python_exe_to_use, # Explicitly specify the Python interpreter
|
|
72
|
-
str((PATH_TO_OPENAI_MCP_SERVER_PROJECT / "openai_mcp_server" / "server.py").resolve()) # Full path to your server script
|
|
73
|
-
],
|
|
74
|
-
"args": [], # No *additional* arguments for server.py itself here
|
|
75
|
-
"cwd": str(PATH_TO_OPENAI_MCP_SERVER_PROJECT.resolve()), # CRUCIAL
|
|
76
|
-
}
|
|
77
|
-
}
|
|
78
|
-
}
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
ASCIIColors.magenta("\n1. Initializing LollmsClient...")
|
|
82
|
-
try:
|
|
83
|
-
client = LollmsClient(
|
|
84
|
-
binding_name="ollama",
|
|
85
|
-
model_name="mistral-nemo:latest",
|
|
86
|
-
mcp_binding_name="standard_mcp",
|
|
87
|
-
mcp_binding_config=mcp_config,
|
|
88
|
-
)
|
|
89
|
-
except Exception as e:
|
|
90
|
-
ASCIIColors.error(f"Failed to initialize LollmsClient: {e}")
|
|
91
|
-
trace_exception(e)
|
|
92
|
-
sys.exit(1)
|
|
93
|
-
|
|
94
|
-
if not client.binding or not client.mcp:
|
|
95
|
-
ASCIIColors.error("LollmsClient LLM or MCP binding failed to load.")
|
|
96
|
-
if hasattr(client, 'close'): client.close()
|
|
97
|
-
sys.exit(1)
|
|
98
|
-
ASCIIColors.green("LollmsClient initialized successfully.")
|
|
99
|
-
|
|
100
|
-
def mcp_streaming_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, history: list = None) -> bool:
|
|
101
|
-
prefix = ""
|
|
102
|
-
color_func = ASCIIColors.green
|
|
103
|
-
if metadata:
|
|
104
|
-
type_info = metadata.get('type', 'unknown_type')
|
|
105
|
-
tool_name_info = metadata.get('tool_name', '')
|
|
106
|
-
prefix = f"MCP ({type_info}{f' - {tool_name_info}' if tool_name_info else ''})"
|
|
107
|
-
if msg_type == MSG_TYPE.MSG_TYPE_STEP_START: color_func = ASCIIColors.cyan; prefix += " Step Start"
|
|
108
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END: color_func = ASCIIColors.cyan; prefix += " Step End"
|
|
109
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_INFO: color_func = ASCIIColors.yellow; prefix += " Info"
|
|
110
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION: color_func = ASCIIColors.red; prefix += " Exception"
|
|
111
|
-
else:
|
|
112
|
-
prefix = f"MCP (Type: {str(msg_type).split('.')[-1]})"
|
|
113
|
-
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: ASCIIColors.green(chunk, end="")
|
|
114
|
-
else: color_func(f"{prefix}: {chunk}")
|
|
115
|
-
sys.stdout.flush()
|
|
116
|
-
return True
|
|
117
|
-
|
|
118
|
-
# --- Test 1: General Text Query (handled by Ollama, no MCP tool expected) ---
|
|
119
|
-
ASCIIColors.magenta("\n2. Test: General Text Query (should be handled by Ollama)")
|
|
120
|
-
general_query = "What is the capital of France?"
|
|
121
|
-
general_response = client.generate_with_mcp( # generate_with_mcp will discover no suitable text tool
|
|
122
|
-
prompt=general_query,
|
|
123
|
-
streaming_callback=mcp_streaming_callback,
|
|
124
|
-
# tools=[] # Optionally explicitly pass an empty list of tools if you want to be sure
|
|
125
|
-
# generate_with_mcp will discover tools from the binding if not passed
|
|
126
|
-
)
|
|
127
|
-
print()
|
|
128
|
-
ASCIIColors.blue(f"Final response for general query: {json.dumps(general_response, indent=2)}")
|
|
129
|
-
assert general_response.get("error") is None, f"General query error: {general_response.get('error')}"
|
|
130
|
-
assert general_response.get("final_answer"), "General query: no final answer."
|
|
131
|
-
tool_calls_general = general_response.get("tool_calls", [])
|
|
132
|
-
assert len(tool_calls_general) == 0, "General query should NOT have called an MCP tool from my_openai_server."
|
|
133
|
-
ASCIIColors.green(f"General query handled by LLM directly, as expected. Answer: {general_response.get('final_answer')[:100]}...")
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
# --- Test 2: Text-to-Speech (TTS) ---
|
|
137
|
-
ASCIIColors.magenta("\n3. Test: OpenAI TTS via MCP")
|
|
138
|
-
tts_text = "This audio was generated by the OpenAI MCP server through Lollms Client."
|
|
139
|
-
tts_prompt_for_llm = f"Please use the OpenAI tool to say the following using tts: '{tts_text}'."
|
|
140
|
-
|
|
141
|
-
tts_response = client.generate_with_mcp(
|
|
142
|
-
prompt=tts_prompt_for_llm,
|
|
143
|
-
streaming_callback=mcp_streaming_callback,
|
|
144
|
-
max_tool_calls=1
|
|
145
|
-
)
|
|
146
|
-
print()
|
|
147
|
-
ASCIIColors.blue(f"Final response for TTS prompt: {json.dumps(tts_response, indent=2)}")
|
|
148
|
-
|
|
149
|
-
assert tts_response.get("error") is None, f"TTS error: {tts_response.get('error')}"
|
|
150
|
-
assert tts_response.get("final_answer"), "TTS: no final answer (LLM should confirm action)."
|
|
151
|
-
tool_calls_tts = tts_response.get("tool_calls", [])
|
|
152
|
-
assert len(tool_calls_tts) > 0, "TTS should have called a tool."
|
|
153
|
-
if tool_calls_tts:
|
|
154
|
-
assert tool_calls_tts[0]["name"] == "my_openai_server::generate_tts", "Incorrect tool for TTS."
|
|
155
|
-
tts_result_output = tool_calls_tts[0].get("result", {}).get("output", {})
|
|
156
|
-
assert "audio_base64" in tts_result_output, "TTS tool result missing 'audio_base64'."
|
|
157
|
-
assert "format" in tts_result_output, "TTS tool result missing 'format'."
|
|
158
|
-
if tts_result_output.get("audio_base64"):
|
|
159
|
-
save_base64_audio(tts_result_output["audio_base64"], "openai_tts_example_output", tts_result_output["format"])
|
|
160
|
-
|
|
161
|
-
# --- Test 3: DALL-E Image Generation ---
|
|
162
|
-
ASCIIColors.magenta("\n4. Test: OpenAI DALL-E Image Generation via MCP")
|
|
163
|
-
dalle_image_prompt = "A vibrant illustration of a friendly AI robot helping a human plant a tree on a futuristic Earth."
|
|
164
|
-
dalle_prompt_for_llm = f"I need an image for a presentation. Can you use DALL-E to create this: {dalle_image_prompt}. Please use URL format for the image."
|
|
165
|
-
|
|
166
|
-
dalle_response = client.generate_with_mcp(
|
|
167
|
-
prompt=dalle_prompt_for_llm,
|
|
168
|
-
streaming_callback=mcp_streaming_callback,
|
|
169
|
-
max_tool_calls=1,
|
|
170
|
-
# You could also try to force params for the tool if LLM struggles:
|
|
171
|
-
# Example: if LLM isn't picking response_format="url"
|
|
172
|
-
# This requires knowing the exact tool name and schema, usually let LLM handle it.
|
|
173
|
-
)
|
|
174
|
-
print()
|
|
175
|
-
ASCIIColors.blue(f"Final response for DALL-E prompt: {json.dumps(dalle_response, indent=2)}")
|
|
176
|
-
|
|
177
|
-
assert dalle_response.get("error") is None, f"DALL-E error: {dalle_response.get('error')}"
|
|
178
|
-
assert dalle_response.get("final_answer"), "DALL-E: no final answer (LLM should confirm action)."
|
|
179
|
-
tool_calls_dalle = dalle_response.get("tool_calls", [])
|
|
180
|
-
assert len(tool_calls_dalle) > 0, "DALL-E should have called a tool."
|
|
181
|
-
if tool_calls_dalle:
|
|
182
|
-
assert tool_calls_dalle[0]["name"] == "my_openai_server::generate_image_dalle", "Incorrect tool for DALL-E."
|
|
183
|
-
dalle_result_output = tool_calls_dalle[0].get("result", {}).get("output", {})
|
|
184
|
-
assert "images" in dalle_result_output and isinstance(dalle_result_output["images"], list), "DALL-E result missing 'images' list."
|
|
185
|
-
if dalle_result_output.get("images"):
|
|
186
|
-
image_data = dalle_result_output["images"][0]
|
|
187
|
-
if image_data.get("url"):
|
|
188
|
-
ASCIIColors.green(f"DALL-E image URL: {image_data['url']}")
|
|
189
|
-
ASCIIColors.info(f"Revised prompt by DALL-E: {image_data.get('revised_prompt')}")
|
|
190
|
-
elif image_data.get("b64_json"):
|
|
191
|
-
save_base64_image(image_data["b64_json"], "openai_dalle_example_output")
|
|
192
|
-
ASCIIColors.info(f"Revised prompt by DALL-E: {image_data.get('revised_prompt')}")
|
|
193
|
-
|
|
194
|
-
ASCIIColors.magenta("\n5. Closing LollmsClient...")
|
|
195
|
-
if client and hasattr(client, 'close'):
|
|
196
|
-
try: client.close()
|
|
197
|
-
except Exception as e: ASCIIColors.error(f"Error closing LollmsClient: {e}"); trace_exception(e)
|
|
198
|
-
|
|
199
|
-
ASCIIColors.info(f"Example finished. Check {OUTPUT_DIRECTORY} for any generated files.")
|
|
200
|
-
ASCIIColors.red("\n--- LollmsClient with OpenAI MCP Server (TTS & DALL-E) Example Finished ---")
|
|
201
|
-
|
|
202
|
-
if __name__ == "__main__":
|
|
203
|
-
main()
|