lollms-client 0.17.2__tar.gz → 0.19.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (82) hide show
  1. {lollms_client-0.17.2/lollms_client.egg-info → lollms_client-0.19.0}/PKG-INFO +1 -1
  2. lollms_client-0.19.0/examples/function_calling_with_local_custom_mcp.py +250 -0
  3. lollms_client-0.19.0/examples/local_mcp.py +171 -0
  4. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/__init__.py +7 -6
  5. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_core.py +345 -10
  6. lollms_client-0.19.0/lollms_client/lollms_mcp_binding.py +198 -0
  7. lollms_client-0.19.0/lollms_client/mcp_bindings/local_mcp/__init__.py +311 -0
  8. lollms_client-0.19.0/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +74 -0
  9. lollms_client-0.19.0/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +195 -0
  10. lollms_client-0.19.0/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +107 -0
  11. lollms_client-0.19.0/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +141 -0
  12. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tti_bindings/dalle/__init__.py +2 -1
  13. {lollms_client-0.17.2 → lollms_client-0.19.0/lollms_client.egg-info}/PKG-INFO +1 -1
  14. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client.egg-info/SOURCES.txt +8 -3
  15. lollms_client-0.17.2/examples/function_call/functions_call_with images.py +0 -52
  16. lollms_client-0.17.2/lollms_client/lollms_functions.py +0 -72
  17. lollms_client-0.17.2/lollms_client/lollms_tasks.py +0 -691
  18. {lollms_client-0.17.2 → lollms_client-0.19.0}/LICENSE +0 -0
  19. {lollms_client-0.17.2 → lollms_client-0.19.0}/README.md +0 -0
  20. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/article_summary/article_summary.py +0 -0
  21. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/deep_analyze/deep_analyse.py +0 -0
  22. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  23. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/generate_and_speak/generate_and_speak.py +0 -0
  24. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  25. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/personality_test/chat_test.py +0 -0
  26. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/personality_test/chat_with_aristotle.py +0 -0
  27. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/personality_test/tesks_test.py +0 -0
  28. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/simple_text_gen_test.py +0 -0
  29. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/simple_text_gen_with_image_test.py +0 -0
  30. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/test_local_models/local_chat.py +0 -0
  31. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/text_2_audio.py +0 -0
  32. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/text_2_image.py +0 -0
  33. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/text_2_image_diffusers.py +0 -0
  34. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/text_and_image_2_audio.py +0 -0
  35. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/text_gen.py +0 -0
  36. {lollms_client-0.17.2 → lollms_client-0.19.0}/examples/text_gen_system_prompt.py +0 -0
  37. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/__init__.py +0 -0
  38. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  39. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  40. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  41. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/openai/__init__.py +0 -0
  42. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  43. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  44. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  45. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  46. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  47. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_config.py +0 -0
  48. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_discussion.py +0 -0
  49. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_js_analyzer.py +0 -0
  50. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_llm_binding.py +0 -0
  51. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_python_analyzer.py +0 -0
  52. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_stt_binding.py +0 -0
  53. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_tti_binding.py +0 -0
  54. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_ttm_binding.py +0 -0
  55. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_tts_binding.py +0 -0
  56. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_ttv_binding.py +0 -0
  57. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_types.py +0 -0
  58. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/lollms_utilities.py +0 -0
  59. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/stt_bindings/__init__.py +0 -0
  60. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  61. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  62. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  63. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tti_bindings/__init__.py +0 -0
  64. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  65. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  66. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  67. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/ttm_bindings/__init__.py +0 -0
  68. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  69. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  70. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  71. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tts_bindings/__init__.py +0 -0
  72. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  73. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  74. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  75. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  76. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/ttv_bindings/__init__.py +0 -0
  77. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  78. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client.egg-info/dependency_links.txt +0 -0
  79. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client.egg-info/requires.txt +0 -0
  80. {lollms_client-0.17.2 → lollms_client-0.19.0}/lollms_client.egg-info/top_level.txt +0 -0
  81. {lollms_client-0.17.2 → lollms_client-0.19.0}/pyproject.toml +0 -0
  82. {lollms_client-0.17.2 → lollms_client-0.19.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.17.2
3
+ Version: 0.19.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -0,0 +1,250 @@
1
+ from lollms_client import LollmsClient, MSG_TYPE
2
+ from ascii_colors import ASCIIColors, trace_exception
3
+ from pathlib import Path
4
+ import json # For pretty printing results
5
+
6
+ # --- Configuration ---
7
+ # LLM Configuration
8
+ LLM_BINDING_NAME = "ollama"
9
+ OLLAMA_HOST_ADDRESS = "http://localhost:11434" # Default Ollama host
10
+ # Ensure you have a model capable of following instructions and generating JSON.
11
+ # Mistral, Llama 3, or Phi-3 variants often work well.
12
+ OLLAMA_MODEL_NAME = "mistral-nemo:latest" # Or "llama3:latest", "phi3:latest" - ensure it's pulled
13
+
14
+ # Local MCP Binding Configuration
15
+ # This path should point to the directory containing your tool subdirectories
16
+ # (e.g., 'get_weather/', 'sum_numbers/')
17
+ # For this example, we assume 'temp_mcp_tools_for_test' is in the parent directory
18
+ # of this examples folder.
19
+ TOOLS_FOLDER = Path(__file__).parent.parent / "temp_mcp_tools_for_test"
20
+
21
+ # Function Calling Parameters
22
+ MAX_LLM_ITERATIONS_FOR_TOOL_CALLS = 3 # How many times LLM can decide to call a tool in a sequence
23
+ MAX_TOOL_CALLS_PER_TURN = 2 # Max distinct tools executed per user prompt
24
+
25
+ # --- Helper to Create Dummy Tools (if they don't exist) ---
26
+ def ensure_dummy_tools_exist(base_tools_dir: Path):
27
+ if not base_tools_dir.exists():
28
+ ASCIIColors.info(f"Creating dummy tools directory: {base_tools_dir}")
29
+ base_tools_dir.mkdir(parents=True, exist_ok=True)
30
+
31
+ tool_defs = {
32
+ "get_weather": {
33
+ "mcp": {
34
+ "name": "get_weather",
35
+ "description": "Fetches the current weather for a given city.",
36
+ "input_schema": {
37
+ "type": "object",
38
+ "properties": {
39
+ "city": {"type": "string", "description": "The city name."},
40
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"], "default": "celsius"}
41
+ },
42
+ "required": ["city"]
43
+ },
44
+ "output_schema": {
45
+ "type": "object",
46
+ "properties": {
47
+ "temperature": {"type": "number"}, "condition": {"type": "string"}, "unit": {"type": "string"}
48
+ }
49
+ }
50
+ },
51
+ "py": """
52
+ import random
53
+ def execute(params: dict) -> dict:
54
+ city = params.get("city")
55
+ unit = params.get("unit", "celsius")
56
+ if not city: return {"error": "City not provided"}
57
+ conditions = ["sunny", "cloudy", "rainy", "snowy", "windy"]
58
+ temp = random.randint(-5 if unit == "celsius" else 23, 30 if unit == "celsius" else 86)
59
+ return {"temperature": temp, "condition": random.choice(conditions), "unit": unit}
60
+ """
61
+ },
62
+ "sum_numbers": {
63
+ "mcp": {
64
+ "name": "sum_numbers",
65
+ "description": "Calculates the sum of a list of numbers.",
66
+ "input_schema": {
67
+ "type": "object",
68
+ "properties": {"numbers": {"type": "array", "items": {"type": "number"}}},
69
+ "required": ["numbers"]
70
+ },
71
+ "output_schema": {"type": "object", "properties": {"sum": {"type": "number"}}}
72
+ },
73
+ "py": """
74
+ def execute(params: dict) -> dict:
75
+ numbers = params.get("numbers", [])
76
+ if not isinstance(numbers, list) or not all(isinstance(n, (int, float)) for n in numbers):
77
+ return {"error": "Invalid input: 'numbers' must be a list of numbers."}
78
+ return {"sum": sum(numbers)}
79
+ """
80
+ }
81
+ }
82
+
83
+ for tool_name, files_content in tool_defs.items():
84
+ tool_dir = base_tools_dir / tool_name
85
+ tool_dir.mkdir(exist_ok=True)
86
+
87
+ mcp_file = tool_dir / f"{tool_name}.mcp.json"
88
+ py_file = tool_dir / f"{tool_name}.py"
89
+
90
+ if not mcp_file.exists():
91
+ with open(mcp_file, "w") as f:
92
+ json.dump(files_content["mcp"], f, indent=2)
93
+ ASCIIColors.info(f"Created MCP definition for {tool_name}")
94
+
95
+ if not py_file.exists():
96
+ with open(py_file, "w") as f:
97
+ f.write(files_content["py"])
98
+ ASCIIColors.info(f"Created Python code for {tool_name}")
99
+
100
+ # --- Callback for streaming ---
101
+ def function_calling_stream_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, turn_history: list = None) -> bool:
102
+ """
103
+ Callback to handle streamed output during function calling.
104
+ """
105
+ if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # LLM generating text
106
+ ASCIIColors.success(chunk, end="", flush=True)
107
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
108
+ step_type = metadata.get("type", "step") if metadata else "step"
109
+ step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
110
+ ASCIIColors.info(f">> Starting {step_type}: {step_info}", flush=True)
111
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
112
+ step_type = metadata.get("type", "step") if metadata else "step"
113
+ step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
114
+ if metadata and "result" in metadata:
115
+ ASCIIColors.success(f"<< Finished {step_type}: {step_info} -> Result: {json.dumps(metadata['result'])}", flush=True)
116
+ else:
117
+ ASCIIColors.success(f"<< Finished {step_type}: {step_info}", flush=True)
118
+ elif msg_type == MSG_TYPE.MSG_TYPE_INFO:
119
+ if metadata and metadata.get("type") == "tool_call_request":
120
+ ASCIIColors.info(f"AI requests to call tool: {metadata.get('name')} with params: {metadata.get('params')}", flush=True)
121
+ else:
122
+ ASCIIColors.info(f"INFO: {chunk}", flush=True)
123
+ elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
124
+ ASCIIColors.error(f"ERROR in stream: {chunk}", flush=True)
125
+
126
+ # Optional debug info:
127
+ # ASCIIColors.info(f"DEBUG Turn History (so far): {turn_history}")
128
+ return True
129
+
130
+
131
+
132
+ def run_function_calling_example():
133
+ ASCIIColors.red("--- LoLLMs Client with Local MCP Function Calling Example ---")
134
+
135
+ ensure_dummy_tools_exist(TOOLS_FOLDER) # Make sure our example tools are present
136
+
137
+ try:
138
+ ASCIIColors.magenta("\n1. Initializing LollmsClient...")
139
+ # MCP binding config is passed directly to the binding constructor
140
+ mcp_binding_configuration = {"tools_folder_path": str(TOOLS_FOLDER)}
141
+
142
+ lc = LollmsClient(
143
+ binding_name=LLM_BINDING_NAME,
144
+ host_address=OLLAMA_HOST_ADDRESS,
145
+ model_name=OLLAMA_MODEL_NAME,
146
+ mcp_binding_name="local_mcp", # Activate the LocalMCP binding
147
+ mcp_binding_config=mcp_binding_configuration, # Pass its specific config
148
+ # Optional: Configure default LLM generation params if needed
149
+ temperature=0.2, # Lower temp for more focused tool decisions / final answer
150
+ n_predict=1024
151
+ )
152
+ ASCIIColors.green("LollmsClient initialized successfully.")
153
+ if lc.mcp:
154
+ ASCIIColors.info(f"MCP Binding '{lc.mcp.binding_name}' loaded.")
155
+ discovered_tools_on_init = lc.mcp.discover_tools()
156
+ ASCIIColors.info(f"Tools discovered by MCP binding on init: {[t['name'] for t in discovered_tools_on_init]}")
157
+ else:
158
+ ASCIIColors.error("MCP binding was not loaded correctly. Aborting.")
159
+ return
160
+
161
+ # --- Example Interaction 1: Weather Request ---
162
+ ASCIIColors.magenta("\n2. Example 1: Asking for weather")
163
+ user_prompt_weather = "What's the weather like in Paris today, and can you tell me in Fahrenheit?"
164
+ ASCIIColors.blue(f"User: {user_prompt_weather}")
165
+ ASCIIColors.yellow(f"AI thinking and interacting with tools (streaming output):")
166
+
167
+ weather_result = lc.generate_with_mcp(
168
+ prompt=user_prompt_weather,
169
+ # tools=None, # Let it discover from the binding
170
+ max_tool_calls=MAX_TOOL_CALLS_PER_TURN,
171
+ max_llm_iterations=MAX_LLM_ITERATIONS_FOR_TOOL_CALLS,
172
+ streaming_callback=function_calling_stream_callback,
173
+ # interactive_tool_execution=True # Uncomment to confirm tool calls
174
+ )
175
+ print("\n--- End of AI Response for Weather ---")
176
+ if weather_result["error"]:
177
+ ASCIIColors.error(f"Error in weather example: {weather_result['error']}")
178
+ else:
179
+ ASCIIColors.cyan(f"\nFinal Answer (Weather): {weather_result['final_answer']}")
180
+ ASCIIColors.info("\nTool Calls Made (Weather Example):")
181
+ for tc in weather_result["tool_calls"]:
182
+ print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'])}")
183
+
184
+
185
+ # --- Example Interaction 2: Summation Request ---
186
+ ASCIIColors.magenta("\n3. Example 2: Asking to sum numbers")
187
+ user_prompt_sum = "Hey, can you please calculate the sum of 15.5, 25, and -5.5 for me?"
188
+ ASCIIColors.blue(f"User: {user_prompt_sum}")
189
+ ASCIIColors.yellow(f"AI thinking and interacting with tools (streaming output):")
190
+
191
+ sum_result_data = lc.generate_with_mcp(
192
+ prompt=user_prompt_sum,
193
+ max_tool_calls=MAX_TOOL_CALLS_PER_TURN,
194
+ max_llm_iterations=MAX_LLM_ITERATIONS_FOR_TOOL_CALLS,
195
+ streaming_callback=function_calling_stream_callback
196
+ )
197
+ print("\n--- End of AI Response for Sum ---")
198
+ if sum_result_data["error"]:
199
+ ASCIIColors.error(f"Error in sum example: {sum_result_data['error']}")
200
+ else:
201
+ ASCIIColors.cyan(f"\nFinal Answer (Sum): {sum_result_data['final_answer']}")
202
+ ASCIIColors.info("\nTool Calls Made (Sum Example):")
203
+ for tc in sum_result_data["tool_calls"]:
204
+ print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'])}")
205
+
206
+
207
+ # --- Example Interaction 3: Multi-step (hypothetical, weather then sum) ---
208
+ ASCIIColors.magenta("\n4. Example 3: Multi-step (Weather, then maybe sum if AI decides)")
209
+ user_prompt_multi = "What's the weather in Berlin? And also, what's 100 + 200 + 300?"
210
+ ASCIIColors.blue(f"User: {user_prompt_sum}")
211
+ ASCIIColors.yellow(f"AI thinking and interacting with tools (streaming output):")
212
+
213
+ multi_result_data = lc.generate_with_mcp(
214
+ prompt=user_prompt_multi,
215
+ max_tool_calls=MAX_TOOL_CALLS_PER_TURN, # Allow up to 2 different tools
216
+ max_llm_iterations=MAX_LLM_ITERATIONS_FOR_TOOL_CALLS +1, # Allow a bit more LLM thinking
217
+ streaming_callback=function_calling_stream_callback
218
+ )
219
+ print("\n--- End of AI Response for Multi-step ---")
220
+ if multi_result_data["error"]:
221
+ ASCIIColors.error(f"Error in multi-step example: {multi_result_data['error']}")
222
+ else:
223
+ ASCIIColors.cyan(f"\nFinal Answer (Multi-step): {multi_result_data['final_answer']}")
224
+ ASCIIColors.info("\nTool Calls Made (Multi-step Example):")
225
+ for tc in multi_result_data["tool_calls"]:
226
+ print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'])}")
227
+
228
+
229
+ except ValueError as ve: # Catch init errors
230
+ ASCIIColors.error(f"Initialization Error: {ve}")
231
+ trace_exception(ve)
232
+ except ConnectionRefusedError:
233
+ ASCIIColors.error(f"Connection refused. Is the Ollama server running at {OLLAMA_HOST_ADDRESS}?")
234
+ except Exception as e:
235
+ ASCIIColors.error(f"An unexpected error occurred: {e}")
236
+ trace_exception(e)
237
+ finally:
238
+ ASCIIColors.info(f"If dummy tools were created, they are in: {TOOLS_FOLDER.resolve()}")
239
+ # Consider cleaning up TOOLS_FOLDER if it was created purely for this test run
240
+ # For this example, we'll leave them.
241
+ # import shutil
242
+ # if "ensure_dummy_tools_exist" in globals() and TOOLS_FOLDER.exists() and "temp_mcp_tools_for_test" in str(TOOLS_FOLDER):
243
+ # if input(f"Clean up dummy tools at {TOOLS_FOLDER}? (y/n): ").lower() == 'y':
244
+ # shutil.rmtree(TOOLS_FOLDER)
245
+ # ASCIIColors.info("Cleaned up dummy tools folder.")
246
+
247
+ ASCIIColors.red("\n--- Example Finished ---")
248
+
249
+ if __name__ == "__main__":
250
+ run_function_calling_example()
@@ -0,0 +1,171 @@
1
+ from lollms_client import LollmsClient, MSG_TYPE
2
+ from ascii_colors import ASCIIColors, trace_exception
3
+ from pathlib import Path
4
+ import json # For pretty printing results
5
+ import os # For OPENAI_API_KEY
6
+
7
+ # --- Configuration ---
8
+ # LLM Configuration
9
+ LLM_BINDING_NAME = "ollama" # Or "openai", "lollms", etc.
10
+ OLLAMA_HOST_ADDRESS = "http://localhost:11434"
11
+ OLLAMA_MODEL_NAME = "mistral:latest" # Ensure this model is capable of JSON and tool use decisions
12
+
13
+ # TTI Configuration (for the generate_image_from_prompt MCP tool)
14
+ TTI_BINDING_NAME = "dalle" # To use DALL-E via LollmsClient's TTI
15
+ # OPENAI_API_KEY should be set as an environment variable for DALL-E
16
+
17
+ # MCP Configuration
18
+ # We will NOT provide mcp_binding_config.tools_folder_path,
19
+ # so LocalMCPBinding should use its packaged default_tools.
20
+ MCP_BINDING_NAME = "local_mcp"
21
+
22
+ # Function Calling Parameters
23
+ MAX_LLM_ITERATIONS_FOR_TOOL_CALLS = 4
24
+ MAX_TOOL_CALLS_PER_TURN = 3
25
+
26
+ # --- Callback for streaming ---
27
+ def function_calling_stream_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, turn_history: list = None) -> bool:
28
+ """
29
+ Callback to handle streamed output during function calling.
30
+ """
31
+ if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
32
+ ASCIIColors.success(chunk, end="", flush=True)
33
+
34
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
35
+ step_type = metadata.get("type", "step") if metadata else "step"
36
+ step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
37
+ ASCIIColors.info(f"\n>> Starting {step_type}: {step_info}", flush=True)
38
+
39
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
40
+ step_type = metadata.get("type", "step") if metadata else "step"
41
+ step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
42
+ if metadata and "result" in metadata:
43
+ ASCIIColors.success(f"\n<< Finished {step_type}: {step_info} -> Result: {json.dumps(metadata['result'])}", flush=True)
44
+ else:
45
+ ASCIIColors.success(f"\n<< Finished {step_type}: {step_info}", flush=True)
46
+
47
+ elif msg_type == MSG_TYPE.MSG_TYPE_INFO:
48
+ if metadata and metadata.get("type") == "tool_call_request":
49
+ ASCIIColors.info(f"\nAI requests to call tool: {metadata.get('name')} with params: {metadata.get('params')}", flush=True)
50
+ else:
51
+ ASCIIColors.info(f"\nINFO: {chunk}", flush=True)
52
+
53
+ elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
54
+ ASCIIColors.error(f"\nERROR in stream: {chunk}", flush=True)
55
+
56
+ return True
57
+
58
+
59
+
60
+ def run_default_tools_example():
61
+ ASCIIColors.red("--- LoLLMs Client with Default Local MCP Tools Example ---")
62
+
63
+ # Check for OpenAI API Key if DALL-E is used
64
+ if TTI_BINDING_NAME.lower() == "dalle" and not os.getenv("OPENAI_API_KEY"):
65
+ ASCIIColors.error("OPENAI_API_KEY environment variable is not set. DALL-E TTI will fail.")
66
+ ASCIIColors.error("Please set it or choose a different TTI_BINDING_NAME.")
67
+ # return # Optionally exit if key is critical for the test
68
+
69
+ try:
70
+ ASCIIColors.magenta("\n1. Initializing LollmsClient...")
71
+
72
+ lc = LollmsClient(
73
+ binding_name=LLM_BINDING_NAME,
74
+ host_address=OLLAMA_HOST_ADDRESS, # For Ollama LLM
75
+ model_name=OLLAMA_MODEL_NAME, # For Ollama LLM
76
+
77
+ mcp_binding_name=MCP_BINDING_NAME,
78
+ # No mcp_binding_config, so LocalMCPBinding should use its 'default_tools'
79
+
80
+ tti_binding_name=TTI_BINDING_NAME, # For the 'generate_image_from_prompt' tool
81
+ # tti_binding_config would be needed here if DALL-E or other TTI bindings
82
+ # require specific init params beyond API key (which DALL-E binding gets from env).
83
+ # e.g. tti_binding_config={"api_key": "your_key_here"} if not using env for DALL-E.
84
+
85
+ temperature=0.1,
86
+ n_predict=1500 # Allow more tokens for complex reasoning and tool outputs
87
+ )
88
+ ASCIIColors.green("LollmsClient initialized successfully.")
89
+ if lc.mcp:
90
+ ASCIIColors.info(f"MCP Binding '{lc.mcp.binding_name}' loaded.")
91
+ discovered_tools_on_init = lc.mcp.discover_tools() # Should pick up default_tools
92
+ ASCIIColors.info(f"Tools initially discovered by MCP binding: {[t['name'] for t in discovered_tools_on_init]}")
93
+ assert any(t['name'] == 'internet_search' for t in discovered_tools_on_init), "Default 'internet_search' tool not found."
94
+ assert any(t['name'] == 'file_writer' for t in discovered_tools_on_init), "Default 'file_writer' tool not found."
95
+ assert any(t['name'] == 'python_interpreter' for t in discovered_tools_on_init), "Default 'python_interpreter' tool not found."
96
+ assert any(t['name'] == 'generate_image_from_prompt' for t in discovered_tools_on_init), "Default 'generate_image_from_prompt' tool not found."
97
+ else:
98
+ ASCIIColors.error("MCP binding was not loaded correctly. Aborting.")
99
+ return
100
+
101
+ if TTI_BINDING_NAME and not lc.tti:
102
+ ASCIIColors.warning(f"TTI binding '{TTI_BINDING_NAME}' was specified but not loaded in LollmsClient. The 'generate_image_from_prompt' tool may fail.")
103
+
104
+
105
+ # --- Example Interaction 1: Internet Search ---
106
+ ASCIIColors.magenta("\n2. Example: Asking for information requiring internet search")
107
+ user_prompt_search = "What were the main headlines on AI ethics in the last month?"
108
+ ASCIIColors.blue(f"User: {user_prompt_search}")
109
+ ASCIIColors.yellow(f"AI processing (streaming output):")
110
+
111
+ search_result_data = lc.generate_with_mcp(
112
+ prompt=user_prompt_search,
113
+ max_tool_calls=1, # Limit to one search for this
114
+ max_llm_iterations=2,
115
+ streaming_callback=function_calling_stream_callback,
116
+ )
117
+ print("\n--- End of AI Response (Search) ---")
118
+ if search_result_data["error"]:
119
+ ASCIIColors.error(f"Error in search example: {search_result_data['error']}")
120
+ else:
121
+ ASCIIColors.cyan(f"\nFinal Answer (Search): {search_result_data['final_answer']}")
122
+ ASCIIColors.info("\nTool Calls Made (Search Example):")
123
+ for tc in search_result_data["tool_calls"]:
124
+ # Truncate long snippets for display
125
+ if tc['name'] == 'internet_search' and 'output' in tc['result'] and 'search_results' in tc['result']['output']:
126
+ for res_item in tc['result']['output']['search_results']:
127
+ if 'snippet' in res_item and len(res_item['snippet']) > 100:
128
+ res_item['snippet'] = res_item['snippet'][:100] + "..."
129
+ print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'], indent=2)}")
130
+
131
+
132
+ # --- Example Interaction 2: Image Generation ---
133
+ ASCIIColors.magenta("\n3. Example: Requesting an image generation")
134
+ user_prompt_image = "Please generate an image of a futuristic robot holding a glowing orb."
135
+ ASCIIColors.blue(f"User: {user_prompt_image}")
136
+ ASCIIColors.yellow(f"AI processing (streaming output):")
137
+
138
+ image_gen_result_data = lc.generate_with_mcp(
139
+ prompt=user_prompt_image,
140
+ max_tool_calls=1,
141
+ max_llm_iterations=2,
142
+ streaming_callback=function_calling_stream_callback,
143
+ )
144
+ print("\n--- End of AI Response (Image Gen) ---")
145
+ if image_gen_result_data["error"]:
146
+ ASCIIColors.error(f"Error in image gen example: {image_gen_result_data['error']}")
147
+ else:
148
+ ASCIIColors.cyan(f"\nFinal Answer (Image Gen): {image_gen_result_data['final_answer']}")
149
+ ASCIIColors.info("\nTool Calls Made (Image Gen Example):")
150
+ for tc in image_gen_result_data["tool_calls"]:
151
+ print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'], indent=2)}")
152
+ if tc['name'] == 'generate_image_from_prompt' and tc['result'].get('output', {}).get('status') == 'success':
153
+ img_path = tc['result']['output'].get('image_path')
154
+ img_url = tc['result']['output'].get('image_url')
155
+ ASCIIColors.green(f"Image was reportedly saved. Path hint: {img_path}, URL: {img_url}")
156
+ ASCIIColors.info("Check your LollmsClient outputs/mcp_generated_images/ directory (or similar based on tool's save logic).")
157
+
158
+
159
+ except ValueError as ve:
160
+ ASCIIColors.error(f"Initialization Error: {ve}")
161
+ trace_exception(ve)
162
+ except ConnectionRefusedError:
163
+ ASCIIColors.error(f"Connection refused. Is the Ollama server running at {OLLAMA_HOST_ADDRESS}?")
164
+ except Exception as e:
165
+ ASCIIColors.error(f"An unexpected error occurred: {e}")
166
+ trace_exception(e)
167
+
168
+ ASCIIColors.red("\n--- Default Tools Example Finished ---")
169
+
170
+ if __name__ == "__main__":
171
+ run_default_tools_example()
@@ -1,21 +1,22 @@
1
1
  # lollms_client/__init__.py
2
2
  from lollms_client.lollms_core import LollmsClient, ELF_COMPLETION_FORMAT
3
- from lollms_client.lollms_tasks import TasksLibrary
4
3
  from lollms_client.lollms_types import MSG_TYPE # Assuming ELF_GENERATION_FORMAT is not directly used by users from here
5
4
  from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
6
5
  from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
7
- from lollms_client.lollms_functions import FunctionCalling_Library
6
+ # Import new MCP binding classes
7
+ from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
8
8
 
9
- __version__ = "0.17.2"
9
+
10
+ __version__ = "0.19.0" # Updated version
10
11
 
11
12
  # Optionally, you could define __all__ if you want to be explicit about exports
12
13
  __all__ = [
13
14
  "LollmsClient",
14
15
  "ELF_COMPLETION_FORMAT",
15
- "TasksLibrary",
16
16
  "MSG_TYPE",
17
17
  "LollmsDiscussion",
18
18
  "LollmsMessage",
19
19
  "PromptReshaper",
20
- "FunctionCalling_Library"
21
- ]
20
+ "LollmsMCPBinding", # Export LollmsMCPBinding ABC
21
+ "LollmsMCPBindingManager", # Export LollmsMCPBindingManager
22
+ ]