lollms-client 0.32.1__py3-none-any.whl → 1.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (73) hide show
  1. lollms_client/__init__.py +1 -1
  2. lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
  3. lollms_client/llm_bindings/claude/__init__.py +4 -7
  4. lollms_client/llm_bindings/gemini/__init__.py +3 -7
  5. lollms_client/llm_bindings/grok/__init__.py +3 -7
  6. lollms_client/llm_bindings/groq/__init__.py +4 -7
  7. lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
  8. lollms_client/llm_bindings/litellm/__init__.py +15 -6
  9. lollms_client/llm_bindings/llamacpp/__init__.py +214 -388
  10. lollms_client/llm_bindings/lollms/__init__.py +24 -14
  11. lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
  12. lollms_client/llm_bindings/mistral/__init__.py +58 -29
  13. lollms_client/llm_bindings/ollama/__init__.py +6 -11
  14. lollms_client/llm_bindings/open_router/__init__.py +45 -14
  15. lollms_client/llm_bindings/openai/__init__.py +7 -14
  16. lollms_client/llm_bindings/openllm/__init__.py +12 -12
  17. lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
  18. lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
  19. lollms_client/llm_bindings/transformers/__init__.py +14 -6
  20. lollms_client/llm_bindings/vllm/__init__.py +16 -12
  21. lollms_client/lollms_core.py +296 -487
  22. lollms_client/lollms_discussion.py +436 -78
  23. lollms_client/lollms_llm_binding.py +223 -11
  24. lollms_client/lollms_mcp_binding.py +33 -2
  25. lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
  26. lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
  27. lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
  28. lollms_client/stt_bindings/lollms/__init__.py +6 -8
  29. lollms_client/stt_bindings/whisper/__init__.py +2 -4
  30. lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
  31. lollms_client/tti_bindings/dalle/__init__.py +29 -28
  32. lollms_client/tti_bindings/diffusers/__init__.py +25 -21
  33. lollms_client/tti_bindings/gemini/__init__.py +215 -0
  34. lollms_client/tti_bindings/lollms/__init__.py +8 -9
  35. lollms_client-1.0.0.dist-info/METADATA +1214 -0
  36. lollms_client-1.0.0.dist-info/RECORD +69 -0
  37. {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/top_level.txt +0 -2
  38. examples/article_summary/article_summary.py +0 -58
  39. examples/console_discussion/console_app.py +0 -266
  40. examples/console_discussion.py +0 -448
  41. examples/deep_analyze/deep_analyse.py +0 -30
  42. examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
  43. examples/function_calling_with_local_custom_mcp.py +0 -250
  44. examples/generate_a_benchmark_for_safe_store.py +0 -89
  45. examples/generate_and_speak/generate_and_speak.py +0 -251
  46. examples/generate_game_sfx/generate_game_fx.py +0 -240
  47. examples/generate_text_with_multihop_rag_example.py +0 -210
  48. examples/gradio_chat_app.py +0 -228
  49. examples/gradio_lollms_chat.py +0 -259
  50. examples/internet_search_with_rag.py +0 -226
  51. examples/lollms_chat/calculator.py +0 -59
  52. examples/lollms_chat/derivative.py +0 -48
  53. examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
  54. examples/lollms_discussions_test.py +0 -155
  55. examples/mcp_examples/external_mcp.py +0 -267
  56. examples/mcp_examples/local_mcp.py +0 -171
  57. examples/mcp_examples/openai_mcp.py +0 -203
  58. examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
  59. examples/mcp_examples/run_standard_mcp_example.py +0 -204
  60. examples/simple_text_gen_test.py +0 -173
  61. examples/simple_text_gen_with_image_test.py +0 -178
  62. examples/test_local_models/local_chat.py +0 -9
  63. examples/text_2_audio.py +0 -77
  64. examples/text_2_image.py +0 -144
  65. examples/text_2_image_diffusers.py +0 -274
  66. examples/text_and_image_2_audio.py +0 -59
  67. examples/text_gen.py +0 -30
  68. examples/text_gen_system_prompt.py +0 -29
  69. lollms_client-0.32.1.dist-info/METADATA +0 -854
  70. lollms_client-0.32.1.dist-info/RECORD +0 -101
  71. test/test_lollms_discussion.py +0 -368
  72. {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/WHEEL +0 -0
  73. {lollms_client-0.32.1.dist-info → lollms_client-1.0.0.dist-info}/licenses/LICENSE +0 -0
@@ -1,290 +0,0 @@
1
- # File: run_lollms_client_with_mcp_example.py
2
-
3
- import sys
4
- import os
5
- import shutil
6
- from pathlib import Path
7
- import json
8
- from lollms_client import LollmsClient
9
- import subprocess
10
- from typing import Optional, List, Dict, Any
11
-
12
-
13
- MOCK_KNOWLEDGE_BASE = {
14
- "python_basics.md": [
15
- {"chunk_id": 1, "text": "Python is a high-level, interpreted programming language known for its readability and versatility. It was created by Guido van Rossum and first released in 1991."},
16
- {"chunk_id": 2, "text": "Key features of Python include dynamic typing, automatic memory management (garbage collection), and a large standard library. It supports multiple programming paradigms, such as procedural, object-oriented, and functional programming."},
17
- {"chunk_id": 3, "text": "Common applications of Python include web development (e.g., Django, Flask), data science (e.g., Pandas, NumPy, Scikit-learn), machine learning, artificial intelligence, automation, and scripting."},
18
- ],
19
- "javascript_info.js": [
20
- {"chunk_id": 1, "text": "JavaScript is a scripting language primarily used for front-end web development to create interactive effects within web browsers. It is also used in back-end development (Node.js), mobile app development, and game development."},
21
- {"chunk_id": 2, "text": "JavaScript is dynamically typed, prototype-based, and multi-paradigm. Along with HTML and CSS, it is one of the core technologies of the World Wide Web."},
22
- {"chunk_id": 3, "text": "Popular JavaScript frameworks and libraries include React, Angular, Vue.js for front-end, and Express.js for Node.js back-end applications."},
23
- ],
24
- "ai_concepts.txt": [
25
- {"chunk_id": 1, "text": "Artificial Intelligence (AI) refers to the simulation of human intelligence in machines that are programmed to think like humans and mimic their actions. The term may also be applied to any machine that exhibits traits associated with a human mind such as learning and problem-solving."},
26
- {"chunk_id": 2, "text": "Machine Learning (ML) is a subset of AI that provides systems the ability to automatically learn and improve from experience without being explicitly programmed. Deep Learning (DL) is a further subset of ML based on artificial neural networks with representation learning."},
27
- {"chunk_id": 3, "text": "Retrieval Augmented Generation (RAG) is an AI framework for improving the quality of LLM-generated responses by grounding the model on external sources of knowledge to supplement the LLM’s internal representation of information."},
28
- ]
29
- }
30
- # --- Dynamically adjust Python path to find lollms_client ---
31
- # This assumes the example script is in a directory, and 'lollms_client' is
32
- # in a sibling directory or a known relative path. Adjust as needed.
33
- # For example, if script is in 'lollms_client/examples/' and lollms_client code is in 'lollms_client/'
34
- # then the parent of the script's parent is the project root.
35
-
36
- # Get the directory of the current script
37
- current_script_dir = Path(__file__).resolve().parent
38
-
39
- # Option 1: If lollms_client is in the parent directory of this script's directory
40
- # (e.g. script is in 'project_root/examples' and lollms_client is in 'project_root/lollms_client')
41
- # project_root = current_script_dir.parent
42
- # lollms_client_path = project_root / "lollms_client" # Assuming this is where lollms_client.py and bindings are
43
-
44
- # Option 2: If lollms_client package is directly one level up
45
- # (e.g. script is in 'lollms_client/examples' and lollms_client package is 'lollms_client')
46
- project_root_for_lollms_client = current_script_dir.parent
47
- if str(project_root_for_lollms_client) not in sys.path:
48
- sys.path.insert(0, str(project_root_for_lollms_client))
49
- print(f"Added to sys.path: {project_root_for_lollms_client}")
50
-
51
-
52
- # --- Ensure pipmaster is available (core LoLLMs dependency) ---
53
- try:
54
- import pipmaster as pm
55
- except ImportError:
56
- print("ERROR: pipmaster is not installed or not in PYTHONPATH.")
57
- sys.exit(1)
58
-
59
- # --- Import LollmsClient and supporting components ---
60
- try:
61
-
62
- from lollms_client.lollms_llm_binding import LollmsLLMBinding # Base for LLM
63
- from ascii_colors import ASCIIColors, trace_exception
64
- from lollms_client.lollms_types import MSG_TYPE # Assuming MSG_TYPE is here
65
- except ImportError as e:
66
- print(f"ERROR: Could not import LollmsClient components: {e}")
67
- print("Ensure 'lollms_client' package structure is correct and accessible via PYTHONPATH.")
68
- print(f"Current sys.path: {sys.path}")
69
- trace_exception(e)
70
- sys.exit(1)
71
-
72
-
73
- # --- Dummy Server Scripts using FastMCP (as per previous successful iteration) ---
74
- TIME_SERVER_PY = """
75
- import asyncio
76
- from datetime import datetime
77
- from mcp.server.fastmcp import FastMCP
78
-
79
- mcp_server = FastMCP("TimeMCP", description="A server that provides the current time.", host="localhost",
80
- port=9624,
81
- log_level="DEBUG")
82
-
83
- @mcp_server.tool(description="Returns the current server time and echoes received parameters.")
84
- def get_current_time(user_id: str = "unknown_user") -> dict:
85
- return {"time": datetime.now().isoformat(), "params_received": {"user_id": user_id}, "server_name": "TimeServer"}
86
-
87
- if __name__ == "__main__":
88
- mcp_server.run(transport="streamable-http")
89
- """
90
-
91
- CALCULATOR_SERVER_PY = """
92
- import asyncio
93
- from typing import List, Union
94
- from mcp.server.fastmcp import FastMCP
95
-
96
- mcp_server = FastMCP("CalculatorMCP", description="A server that performs addition.", host="localhost",
97
- port=9625,
98
- log_level="DEBUG")
99
-
100
- @mcp_server.tool(description="Adds a list of numbers provided in the 'numbers' parameter.")
101
- def add_numbers(numbers: List[Union[int, float]]) -> dict:
102
- if not isinstance(numbers, list) or not all(isinstance(x, (int, float)) for x in numbers):
103
- return {"error": "'numbers' must be a list of numbers."}
104
- return {"sum": sum(numbers), "server_name": "CalculatorServer"}
105
-
106
- if __name__ == "__main__":
107
- mcp_server.run(transport="streamable-http")
108
- """
109
-
110
-
111
- def main():
112
- ASCIIColors.red("--- Example: Using LollmsClient with StandardMCPBinding ---")
113
-
114
- # --- 1. Setup Temporary Directory for Dummy MCP Servers ---
115
- example_base_dir = Path(__file__).parent / "temp_mcp_example_servers"
116
- if example_base_dir.exists():
117
- shutil.rmtree(example_base_dir)
118
- example_base_dir.mkdir(exist_ok=True)
119
-
120
- time_server_script_path = example_base_dir / "time_server.py"
121
- with open(time_server_script_path, "w") as f: f.write(TIME_SERVER_PY)
122
-
123
- calculator_server_script_path = example_base_dir / "calculator_server.py"
124
- with open(calculator_server_script_path, "w") as f: f.write(CALCULATOR_SERVER_PY)
125
-
126
- subprocess.Popen(
127
- [sys.executable, str(time_server_script_path.resolve())],
128
- stdin=subprocess.DEVNULL,
129
- stdout=subprocess.DEVNULL,
130
- stderr=subprocess.DEVNULL,
131
- start_new_session=True
132
- )
133
-
134
- subprocess.Popen(
135
- [sys.executable, str(calculator_server_script_path.resolve())],
136
- stdin=subprocess.DEVNULL,
137
- stdout=subprocess.DEVNULL,
138
- stderr=subprocess.DEVNULL,
139
- start_new_session=True
140
- )
141
- # MCP Binding Configuration (for RemoteMCPBinding with multiple servers)
142
- mcp_config = {
143
- "servers_infos":{
144
- "time_machine":{
145
- "server_url": "http://localhost:9624/mcp",
146
- },
147
-
148
- "calc_unit":{
149
- "server_url": "http://localhost:9625/mcp",
150
- },
151
- }
152
- }
153
- ASCIIColors.magenta("\n1. Initializing LollmsClient...")
154
- try:
155
- client = LollmsClient(
156
- binding_name="ollama", # Use the dummy LLM binding
157
- model_name="mistral-nemo:latest",
158
- mcp_binding_name="remote_mcp",
159
- mcp_binding_config=mcp_config,
160
-
161
- )
162
- except Exception as e:
163
- ASCIIColors.error(f"Failed to initialize LollmsClient: {e}")
164
- trace_exception(e)
165
- shutil.rmtree(example_base_dir)
166
- sys.exit(1)
167
-
168
- if not client.binding:
169
- ASCIIColors.error("LollmsClient's LLM binding (dummy_llm) failed to load.")
170
- shutil.rmtree(example_base_dir)
171
- sys.exit(1)
172
- if not client.mcp:
173
- ASCIIColors.error("LollmsClient's MCP binding (standard_mcp) failed to load.")
174
- client.close() # Close LLM binding if it loaded
175
- shutil.rmtree(example_base_dir)
176
- sys.exit(1)
177
-
178
- ASCIIColors.green("LollmsClient initialized successfully with DummyLLM and StandardMCP bindings.")
179
-
180
- # --- 3. Define a streaming callback for generate_with_mcp ---
181
- def mcp_streaming_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, history: list = None) -> bool:
182
- if metadata:
183
- type_info = metadata.get('type', 'unknown_type')
184
- if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
185
- ASCIIColors.cyan(f"MCP Step Start ({type_info}): {chunk}")
186
- elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
187
- ASCIIColors.cyan(f"MCP Step End ({type_info}): {chunk}")
188
- elif msg_type == MSG_TYPE.MSG_TYPE_INFO:
189
- ASCIIColors.yellow(f"MCP Info ({type_info}): {chunk}")
190
- elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # Part of final answer typically
191
- ASCIIColors.green(chunk, end="") # type: ignore
192
- else: # FULL, default, etc.
193
- ASCIIColors.green(f"MCP Output ({str(msg_type)}, {type_info}): {chunk}")
194
- else:
195
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
196
- ASCIIColors.green(chunk, end="") # type: ignore
197
- else:
198
- ASCIIColors.green(f"MCP Output ({str(msg_type)}): {chunk}")
199
- sys.stdout.flush()
200
- return True # Continue streaming
201
-
202
- # --- 4. Use generate_with_mcp ---
203
-
204
- def mock_rag_query_function(
205
- query_text: str,
206
- vectorizer_name: Optional[str] = None, # Ignored in mock
207
- top_k: int = 3,
208
- min_similarity_percent: float = 0.0 # Ignored in mock, simple keyword match
209
- ) -> List[Dict[str, Any]]:
210
- """
211
- A mock RAG query function.
212
- Performs a simple keyword search in the MOCK_KNOWLEDGE_BASE.
213
- """
214
- ASCIIColors.magenta(f" [MOCK RAG] Querying with: '{query_text}', top_k={top_k}")
215
- results = []
216
- query_lower = query_text.lower()
217
-
218
- all_chunks = []
219
- for file_path, chunks_in_file in MOCK_KNOWLEDGE_BASE.items():
220
- for chunk_data in chunks_in_file:
221
- all_chunks.append({"file_path": file_path, **chunk_data})
222
-
223
- # Simple keyword matching and scoring (very basic)
224
- scored_chunks = []
225
- for chunk_info in all_chunks:
226
- score = 0
227
- for keyword in query_lower.split():
228
- if keyword in chunk_info["text"].lower() and len(keyword)>2: # Basic relevance
229
- score += 1
230
- if "python" in query_lower and "python" in chunk_info["file_path"].lower(): score+=5
231
- if "javascript" in query_lower and "javascript" in chunk_info["file_path"].lower(): score+=5
232
- if "ai" in query_lower and "ai" in chunk_info["file_path"].lower(): score+=3
233
-
234
-
235
- if score > 0 : # Only include if some keywords match
236
- # Simulate similarity percentage (higher score = higher similarity)
237
- similarity = min(100.0, score * 20.0 + 40.0) # Arbitrary scaling
238
- if similarity >= min_similarity_percent:
239
- scored_chunks.append({
240
- "file_path": chunk_info["file_path"],
241
- "chunk_text": chunk_info["text"],
242
- "similarity_percent": similarity,
243
- "_score_for_ranking": score # Internal score for sorting
244
- })
245
- ASCIIColors.magenta("\n2. Calling generate_with_mcp to get current time...")
246
- time_prompt = "Hey assistant, what time is it right now?"
247
- time_response = client.generate_with_mcp_rag(
248
- prompt=time_prompt,
249
- use_mcps=True,
250
- use_data_store={"coding_store":mock_rag_query_function},
251
- streaming_callback=mcp_streaming_callback,
252
- interactive_tool_execution=False # Set to True to test interactive mode
253
- )
254
- print() # Newline after streaming
255
- ASCIIColors.blue(f"Final response for time prompt: {json.dumps(time_response, indent=2)}")
256
-
257
- assert time_response.get("error") is None, f"Time prompt resulted in an error: {time_response.get('error')}"
258
- assert time_response.get("final_answer"), "Time prompt did not produce a final answer."
259
- assert len(time_response.get("tool_calls", [])) > 0, "Time prompt should have called a tool."
260
- assert time_response["tool_calls"][0]["name"] == "time_machine::get_current_time", "Incorrect tool called for time."
261
- assert "time" in time_response["tool_calls"][0].get("result", {}).get("output", {}), "Time tool result missing time."
262
-
263
-
264
- ASCIIColors.magenta("\n3. Calling generate_with_mcp for calculation...")
265
- calc_prompt = "Can you please calculate the sum of 50, 25, and 7.5 for me?"
266
- calc_response = client.generate_with_mcp(
267
- prompt=calc_prompt,
268
- streaming_callback=mcp_streaming_callback
269
- )
270
- print() # Newline
271
- ASCIIColors.blue(f"Final response for calc prompt: {json.dumps(calc_response, indent=2)}")
272
-
273
- assert calc_response.get("error") is None, f"Calc prompt resulted in an error: {calc_response.get('error')}"
274
- assert calc_response.get("final_answer"), "Calc prompt did not produce a final answer."
275
- assert len(calc_response.get("tool_calls", [])) > 0, "Calc prompt should have called a tool."
276
- assert calc_response["tool_calls"][0]["name"] == "calc_unit::add_numbers", "Incorrect tool called for calculation."
277
- # The dummy LLM uses hardcoded params [1,2,3] for calc, so result will be 6.
278
- # A real LLM would extract 50, 25, 7.5.
279
- # For this dummy test, we check against the dummy's behavior.
280
- assert calc_response["tool_calls"][0].get("result", {}).get("output", {}).get("sum") == 82.5, "Calculator tool result mismatch for dummy params."
281
-
282
-
283
- # --- 5. Cleanup ---
284
- ASCIIColors.info("Cleaning up temporary server scripts and dummy binding directory...")
285
- shutil.rmtree(example_base_dir, ignore_errors=True)
286
-
287
- ASCIIColors.red("\n--- LollmsClient with StandardMCPBinding Example Finished Successfully! ---")
288
-
289
- if __name__ == "__main__":
290
- main()
@@ -1,204 +0,0 @@
1
- # File: run_lollms_client_with_mcp_example.py
2
-
3
- import sys
4
- import os
5
- import shutil
6
- from pathlib import Path
7
- import json
8
- from lollms_client import LollmsClient
9
- # --- Dynamically adjust Python path to find lollms_client ---
10
- # This assumes the example script is in a directory, and 'lollms_client' is
11
- # in a sibling directory or a known relative path. Adjust as needed.
12
- # For example, if script is in 'lollms_client/examples/' and lollms_client code is in 'lollms_client/'
13
- # then the parent of the script's parent is the project root.
14
-
15
- # Get the directory of the current script
16
- current_script_dir = Path(__file__).resolve().parent
17
-
18
- # Option 1: If lollms_client is in the parent directory of this script's directory
19
- # (e.g. script is in 'project_root/examples' and lollms_client is in 'project_root/lollms_client')
20
- # project_root = current_script_dir.parent
21
- # lollms_client_path = project_root / "lollms_client" # Assuming this is where lollms_client.py and bindings are
22
-
23
- # Option 2: If lollms_client package is directly one level up
24
- # (e.g. script is in 'lollms_client/examples' and lollms_client package is 'lollms_client')
25
- project_root_for_lollms_client = current_script_dir.parent
26
- if str(project_root_for_lollms_client) not in sys.path:
27
- sys.path.insert(0, str(project_root_for_lollms_client))
28
- print(f"Added to sys.path: {project_root_for_lollms_client}")
29
-
30
-
31
- # --- Ensure pipmaster is available (core LoLLMs dependency) ---
32
- try:
33
- import pipmaster as pm
34
- except ImportError:
35
- print("ERROR: pipmaster is not installed or not in PYTHONPATH.")
36
- sys.exit(1)
37
-
38
- # --- Import LollmsClient and supporting components ---
39
- try:
40
-
41
- from lollms_client.lollms_llm_binding import LollmsLLMBinding # Base for LLM
42
- from ascii_colors import ASCIIColors, trace_exception
43
- from lollms_client.lollms_types import MSG_TYPE # Assuming MSG_TYPE is here
44
- except ImportError as e:
45
- print(f"ERROR: Could not import LollmsClient components: {e}")
46
- print("Ensure 'lollms_client' package structure is correct and accessible via PYTHONPATH.")
47
- print(f"Current sys.path: {sys.path}")
48
- trace_exception(e)
49
- sys.exit(1)
50
-
51
-
52
- # --- Dummy Server Scripts using FastMCP (as per previous successful iteration) ---
53
- TIME_SERVER_PY = """
54
- import asyncio
55
- from datetime import datetime
56
- from mcp.server.fastmcp import FastMCP
57
-
58
- mcp_server = FastMCP("TimeMCP", description="A server that provides the current time.")
59
-
60
- @mcp_server.tool(description="Returns the current server time and echoes received parameters.")
61
- def get_current_time(user_id: str = "unknown_user") -> dict:
62
- return {"time": datetime.now().isoformat(), "params_received": {"user_id": user_id}, "server_name": "TimeServer"}
63
-
64
- if __name__ == "__main__":
65
- mcp_server.run(transport="stdio")
66
- """
67
-
68
- CALCULATOR_SERVER_PY = """
69
- import asyncio
70
- from typing import List, Union
71
- from mcp.server.fastmcp import FastMCP
72
-
73
- mcp_server = FastMCP("CalculatorMCP", description="A server that performs addition.")
74
-
75
- @mcp_server.tool(description="Adds a list of numbers provided in the 'numbers' parameter.")
76
- def add_numbers(numbers: List[Union[int, float]]) -> dict:
77
- if not isinstance(numbers, list) or not all(isinstance(x, (int, float)) for x in numbers):
78
- return {"error": "'numbers' must be a list of numbers."}
79
- return {"sum": sum(numbers), "server_name": "CalculatorServer"}
80
-
81
- if __name__ == "__main__":
82
- mcp_server.run(transport="stdio")
83
- """
84
-
85
-
86
- def main():
87
- ASCIIColors.red("--- Example: Using LollmsClient with StandardMCPBinding ---")
88
-
89
- # --- 1. Setup Temporary Directory for Dummy MCP Servers ---
90
- example_base_dir = Path(__file__).parent / "temp_mcp_example_servers"
91
- if example_base_dir.exists():
92
- shutil.rmtree(example_base_dir)
93
- example_base_dir.mkdir(exist_ok=True)
94
-
95
- time_server_script_path = example_base_dir / "time_server.py"
96
- with open(time_server_script_path, "w") as f: f.write(TIME_SERVER_PY)
97
-
98
- calculator_server_script_path = example_base_dir / "calculator_server.py"
99
- with open(calculator_server_script_path, "w") as f: f.write(CALCULATOR_SERVER_PY)
100
-
101
- # MCP Binding Configuration (for StandardMCPBinding)
102
- mcp_config = {
103
- "initial_servers": {
104
- "time_machine": {
105
- "command": [sys.executable, str(time_server_script_path.resolve())],
106
- },
107
- "calc_unit": {
108
- "command": [sys.executable, str(calculator_server_script_path.resolve())]
109
- }
110
- }
111
- }
112
- ASCIIColors.magenta("\n1. Initializing LollmsClient...")
113
- try:
114
- client = LollmsClient(
115
- binding_name="ollama", # Use the dummy LLM binding
116
- model_name="mistral-nemo:latest",
117
- mcp_binding_name="standard_mcp",
118
- mcp_binding_config=mcp_config,
119
- )
120
- except Exception as e:
121
- ASCIIColors.error(f"Failed to initialize LollmsClient: {e}")
122
- trace_exception(e)
123
- shutil.rmtree(example_base_dir)
124
- sys.exit(1)
125
-
126
- if not client.binding:
127
- ASCIIColors.error("LollmsClient's LLM binding (dummy_llm) failed to load.")
128
- shutil.rmtree(example_base_dir)
129
- sys.exit(1)
130
- if not client.mcp:
131
- ASCIIColors.error("LollmsClient's MCP binding (standard_mcp) failed to load.")
132
- client.close() # Close LLM binding if it loaded
133
- shutil.rmtree(example_base_dir)
134
- sys.exit(1)
135
-
136
- ASCIIColors.green("LollmsClient initialized successfully with DummyLLM and StandardMCP bindings.")
137
-
138
- # --- 3. Define a streaming callback for generate_with_mcp ---
139
- def mcp_streaming_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, history: list = None) -> bool:
140
- if metadata:
141
- type_info = metadata.get('type', 'unknown_type')
142
- if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
143
- ASCIIColors.cyan(f"MCP Step Start ({type_info}): {chunk}")
144
- elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
145
- ASCIIColors.cyan(f"MCP Step End ({type_info}): {chunk}")
146
- elif msg_type == MSG_TYPE.MSG_TYPE_INFO:
147
- ASCIIColors.yellow(f"MCP Info ({type_info}): {chunk}")
148
- elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # Part of final answer typically
149
- ASCIIColors.green(chunk, end="") # type: ignore
150
- else: # FULL, default, etc.
151
- ASCIIColors.green(f"MCP Output ({str(msg_type)}, {type_info}): {chunk}")
152
- else:
153
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
154
- ASCIIColors.green(chunk, end="") # type: ignore
155
- else:
156
- ASCIIColors.green(f"MCP Output ({str(msg_type)}): {chunk}")
157
- sys.stdout.flush()
158
- return True # Continue streaming
159
-
160
- # --- 4. Use generate_with_mcp ---
161
- ASCIIColors.magenta("\n2. Calling generate_with_mcp to get current time...")
162
- time_prompt = "Hey assistant, what time is it right now?"
163
- time_response = client.generate_with_mcp(
164
- prompt=time_prompt,
165
- streaming_callback=mcp_streaming_callback,
166
- interactive_tool_execution=False # Set to True to test interactive mode
167
- )
168
- print() # Newline after streaming
169
- ASCIIColors.blue(f"Final response for time prompt: {json.dumps(time_response, indent=2)}")
170
-
171
- assert time_response.get("error") is None, f"Time prompt resulted in an error: {time_response.get('error')}"
172
- assert time_response.get("final_answer"), "Time prompt did not produce a final answer."
173
- assert len(time_response.get("tool_calls", [])) > 0, "Time prompt should have called a tool."
174
- assert time_response["tool_calls"][0]["name"] == "time_machine::get_current_time", "Incorrect tool called for time."
175
- assert "time" in time_response["tool_calls"][0].get("result", {}).get("output", {}), "Time tool result missing time."
176
-
177
-
178
- ASCIIColors.magenta("\n3. Calling generate_with_mcp for calculation...")
179
- calc_prompt = "Can you please calculate the sum of 50, 25, and 7.5 for me?"
180
- calc_response = client.generate_with_mcp(
181
- prompt=calc_prompt,
182
- streaming_callback=mcp_streaming_callback
183
- )
184
- print() # Newline
185
- ASCIIColors.blue(f"Final response for calc prompt: {json.dumps(calc_response, indent=2)}")
186
-
187
- assert calc_response.get("error") is None, f"Calc prompt resulted in an error: {calc_response.get('error')}"
188
- assert calc_response.get("final_answer"), "Calc prompt did not produce a final answer."
189
- assert len(calc_response.get("tool_calls", [])) > 0, "Calc prompt should have called a tool."
190
- assert calc_response["tool_calls"][0]["name"] == "calc_unit::add_numbers", "Incorrect tool called for calculation."
191
- # The dummy LLM uses hardcoded params [1,2,3] for calc, so result will be 6.
192
- # A real LLM would extract 50, 25, 7.5.
193
- # For this dummy test, we check against the dummy's behavior.
194
- assert calc_response["tool_calls"][0].get("result", {}).get("output", {}).get("sum") == 82.5, "Calculator tool result mismatch for dummy params."
195
-
196
-
197
- # --- 5. Cleanup ---
198
- ASCIIColors.info("Cleaning up temporary server scripts and dummy binding directory...")
199
- shutil.rmtree(example_base_dir, ignore_errors=True)
200
-
201
- ASCIIColors.red("\n--- LollmsClient with StandardMCPBinding Example Finished Successfully! ---")
202
-
203
- if __name__ == "__main__":
204
- main()
@@ -1,173 +0,0 @@
1
- from lollms_client import LollmsClient
2
- from lollms_client.lollms_types import MSG_TYPE # For callback signature
3
- from ascii_colors import ASCIIColors, trace_exception
4
-
5
- # --- Configuration ---
6
- # Choose your LLM binding and parameters here
7
- # Option 1: Default LOLLMS server binding
8
- BINDING_NAME = "lollms"
9
- HOST_ADDRESS = "http://localhost:9600"
10
- MODEL_NAME = None # Server will use its default or last loaded model
11
-
12
- # Option 2: Ollama binding
13
- #ensure you have the right models
14
- #BINDING_NAME = "ollama"
15
- #HOST_ADDRESS = "http://localhost:11434" # Default Ollama host
16
- #MODEL_NAME = "mistral:latest" # Or "llama3:latest", "phi3:latest", etc. - ensure it's pulled in Ollama
17
-
18
- # Option 3: OpenAI binding (requires OPENAI_API_KEY environment variable or service_key)
19
- # BINDING_NAME = "openai"
20
- # HOST_ADDRESS = None # Defaults to OpenAI API
21
- # MODEL_NAME = "gpt-3.5-turbo"
22
- # SERVICE_KEY = "" # Optional, can use env var
23
-
24
- # --- Callback for streaming ---
25
- def simple_streaming_callback(chunk: str, msg_type: MSG_TYPE, params=None, metadata=None) -> bool:
26
- """
27
- Simple callback function to print streamed text chunks.
28
- """
29
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
30
- print(chunk, end="", flush=True)
31
- elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
32
- ASCIIColors.error(f"\nStreaming Error: {chunk}")
33
- # Return True to continue streaming, False to stop
34
- return True
35
-
36
- def test_text_generation():
37
- ASCIIColors.cyan(f"\n--- Testing Text Generation with '{BINDING_NAME}' binding ---")
38
- ASCIIColors.cyan(f"Host: {HOST_ADDRESS or 'Default'}, Model: {MODEL_NAME or 'Default'}")
39
-
40
- try:
41
- # Initialize LollmsClient
42
- lc_params = {
43
- "binding_name": BINDING_NAME,
44
- "host_address": HOST_ADDRESS,
45
- "model_name": MODEL_NAME,
46
- #"service_key": SERVICE_KEY, # Uncomment for OpenAI if needed
47
- }
48
- # Remove None host_address for bindings that have internal defaults (like OpenAI)
49
- if lc_params["host_address"] is None and BINDING_NAME in ["openai"]:
50
- del lc_params["host_address"]
51
-
52
-
53
- lc = LollmsClient(**lc_params)
54
-
55
- # 1. Test basic non-streaming generation
56
- ASCIIColors.magenta("\n1. Basic Non-Streaming Generation:")
57
- prompt_non_stream = "Tell me a short joke about a programmer."
58
- ASCIIColors.yellow(f"Prompt: {prompt_non_stream}")
59
- response_non_stream = lc.generate_text(
60
- prompt=prompt_non_stream,
61
- stream=False,
62
- temperature=0.7,
63
- n_predict=100 # Max tokens for the joke
64
- )
65
-
66
- if isinstance(response_non_stream, str):
67
- ASCIIColors.green("Response:")
68
- print(response_non_stream)
69
- elif isinstance(response_non_stream, dict) and "error" in response_non_stream:
70
- ASCIIColors.error(f"Error in non-streaming generation: {response_non_stream['error']}")
71
- else:
72
- ASCIIColors.warning(f"Unexpected response format: {response_non_stream}")
73
-
74
- # 2. Test streaming generation
75
- ASCIIColors.magenta("\n\n2. Streaming Generation:")
76
- prompt_stream = "Explain the concept of recursion in one sentence."
77
- ASCIIColors.yellow(f"Prompt: {prompt_stream}")
78
- ASCIIColors.green("Response (streaming):")
79
- response_stream = lc.generate_text(
80
- prompt=prompt_stream,
81
- stream=True,
82
- streaming_callback=simple_streaming_callback,
83
- temperature=0.5,
84
- n_predict=150
85
- )
86
- print() # Newline after streaming
87
-
88
- # The 'response_stream' variable will contain the full concatenated text if streaming_callback returns True throughout
89
- # or an error dictionary if generation failed.
90
- if isinstance(response_stream, str):
91
- ASCIIColors.cyan(f"\n(Full streamed text was: {response_stream[:100]}...)") # Show a snippet of full text
92
- elif isinstance(response_stream, dict) and "error" in response_stream:
93
- ASCIIColors.error(f"Error in streaming generation: {response_stream['error']}")
94
-
95
- print("Testing embedding")
96
- emb = lc.embed("hello")
97
- print(emb)
98
-
99
- # else: if callback returns False early, response_stream might be partial.
100
- nb_tokens = lc.count_tokens("")
101
- ASCIIColors.yellow("Number of tokens of : Testing count of tokens\n"+f"{nb_tokens}")
102
-
103
- # 3. Test generation with a specific model (if applicable and different from default)
104
- # This tests the switch_model or model loading mechanism of the binding.
105
- # For 'lollms' binding, this would set the model on the server.
106
- # For 'ollama' or 'openai', it means the next generate_text will use this model.
107
- ASCIIColors.magenta("\n\n3. List Available Models & Generate with Specific Model:")
108
- available_models = lc.listModels()
109
- if isinstance(available_models, list) and available_models:
110
- ASCIIColors.green("Available models:")
111
- for i, model_info in enumerate(available_models[:5]): # Print first 5
112
- model_id = model_info.get('model_name', model_info.get('id', str(model_info)))
113
- print(f" - {model_id}")
114
-
115
- # Try to use the first available model (or a known one if list is too generic)
116
- target_model = None
117
- if BINDING_NAME == "ollama":
118
- # For Ollama, try using a different small model if available, or the same one
119
- if "phi3:latest" in [m.get('name') for m in available_models if isinstance(m, dict)]:
120
- target_model = "phi3:latest"
121
- elif available_models: # Fallback to first model in list if phi3 not present
122
- first_model_entry = available_models[0]
123
- target_model = first_model_entry.get('name', first_model_entry.get('model_name'))
124
-
125
-
126
- elif BINDING_NAME == "lollms":
127
- # For lollms, this would typically be a path or server-recognized name
128
- # This part is harder to make generic without knowing server's models
129
- ASCIIColors.yellow("For 'lollms' binding, ensure the target model is known to the server.")
130
- if available_models and isinstance(available_models[0], str): # e.g. gptq model paths
131
- target_model = available_models[0]
132
-
133
-
134
- if target_model and target_model != lc.binding.model_name: # Only if different and valid
135
- ASCIIColors.info(f"\nSwitching to model (or using for next gen): {target_model}")
136
- # For bindings like ollama/openai, setting model_name on binding directly works.
137
- # For 'lollms' server binding, LollmsClient doesn't have a direct 'switch_model_on_server'
138
- # but setting lc.binding.model_name will make the next generate_text request it.
139
- lc.binding.model_name = target_model # Update the binding's current model_name
140
-
141
- prompt_specific_model = f"What is the main capability of the {target_model.split(':')[0]} language model?"
142
- ASCIIColors.yellow(f"Prompt (for {target_model}): {prompt_specific_model}")
143
- ASCIIColors.green("Response:")
144
- response_specific = lc.generate_text(
145
- prompt=prompt_specific_model,
146
- stream=True, # Keep it streaming for responsiveness
147
- streaming_callback=simple_streaming_callback,
148
- n_predict=200
149
- )
150
- print()
151
- elif target_model == lc.binding.model_name:
152
- ASCIIColors.yellow(f"Target model '{target_model}' is already the current model. Skipping specific model test.")
153
- else:
154
- ASCIIColors.yellow("Could not determine a different target model from the list to test specific model generation.")
155
-
156
- elif isinstance(available_models, dict) and "error" in available_models:
157
- ASCIIColors.error(f"Error listing models: {available_models['error']}")
158
- else:
159
- ASCIIColors.yellow("No models listed by the binding or format not recognized.")
160
-
161
-
162
- except ValueError as ve:
163
- ASCIIColors.error(f"Initialization Error: {ve}")
164
- trace_exception(ve)
165
- except RuntimeError as re:
166
- ASCIIColors.error(f"Runtime Error (binding likely not initialized): {re}")
167
- trace_exception(re)
168
- except Exception as e:
169
- ASCIIColors.error(f"An unexpected error occurred: {e}")
170
- trace_exception(e)
171
-
172
- if __name__ == "__main__":
173
- test_text_generation()