lollms-client 0.17.2__tar.gz → 0.19.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (82) hide show
  1. {lollms_client-0.17.2/lollms_client.egg-info → lollms_client-0.19.1}/PKG-INFO +71 -16
  2. {lollms_client-0.17.2 → lollms_client-0.19.1}/README.md +70 -15
  3. lollms_client-0.19.1/examples/function_calling_with_local_custom_mcp.py +250 -0
  4. lollms_client-0.19.1/examples/local_mcp.py +171 -0
  5. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/__init__.py +7 -6
  6. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_core.py +345 -10
  7. lollms_client-0.19.1/lollms_client/lollms_mcp_binding.py +198 -0
  8. lollms_client-0.19.1/lollms_client/mcp_bindings/local_mcp/__init__.py +311 -0
  9. lollms_client-0.19.1/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +74 -0
  10. lollms_client-0.19.1/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +195 -0
  11. lollms_client-0.19.1/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +107 -0
  12. lollms_client-0.19.1/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +141 -0
  13. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tti_bindings/dalle/__init__.py +2 -1
  14. {lollms_client-0.17.2 → lollms_client-0.19.1/lollms_client.egg-info}/PKG-INFO +71 -16
  15. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client.egg-info/SOURCES.txt +8 -3
  16. lollms_client-0.17.2/examples/function_call/functions_call_with images.py +0 -52
  17. lollms_client-0.17.2/lollms_client/lollms_functions.py +0 -72
  18. lollms_client-0.17.2/lollms_client/lollms_tasks.py +0 -691
  19. {lollms_client-0.17.2 → lollms_client-0.19.1}/LICENSE +0 -0
  20. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/article_summary/article_summary.py +0 -0
  21. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/deep_analyze/deep_analyse.py +0 -0
  22. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  23. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/generate_and_speak/generate_and_speak.py +0 -0
  24. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  25. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/personality_test/chat_test.py +0 -0
  26. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/personality_test/chat_with_aristotle.py +0 -0
  27. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/personality_test/tesks_test.py +0 -0
  28. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/simple_text_gen_test.py +0 -0
  29. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/simple_text_gen_with_image_test.py +0 -0
  30. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/test_local_models/local_chat.py +0 -0
  31. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/text_2_audio.py +0 -0
  32. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/text_2_image.py +0 -0
  33. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/text_2_image_diffusers.py +0 -0
  34. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/text_and_image_2_audio.py +0 -0
  35. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/text_gen.py +0 -0
  36. {lollms_client-0.17.2 → lollms_client-0.19.1}/examples/text_gen_system_prompt.py +0 -0
  37. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/__init__.py +0 -0
  38. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  39. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  40. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  41. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/openai/__init__.py +0 -0
  42. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  43. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  44. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  45. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  46. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  47. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_config.py +0 -0
  48. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_discussion.py +0 -0
  49. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_js_analyzer.py +0 -0
  50. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_llm_binding.py +0 -0
  51. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_python_analyzer.py +0 -0
  52. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_stt_binding.py +0 -0
  53. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_tti_binding.py +0 -0
  54. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_ttm_binding.py +0 -0
  55. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_tts_binding.py +0 -0
  56. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_ttv_binding.py +0 -0
  57. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_types.py +0 -0
  58. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/lollms_utilities.py +0 -0
  59. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/stt_bindings/__init__.py +0 -0
  60. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  61. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  62. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  63. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tti_bindings/__init__.py +0 -0
  64. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  65. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  66. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  67. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/ttm_bindings/__init__.py +0 -0
  68. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  69. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  70. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  71. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tts_bindings/__init__.py +0 -0
  72. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  73. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  74. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  75. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  76. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/ttv_bindings/__init__.py +0 -0
  77. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  78. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client.egg-info/dependency_links.txt +0 -0
  79. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client.egg-info/requires.txt +0 -0
  80. {lollms_client-0.17.2 → lollms_client-0.19.1}/lollms_client.egg-info/top_level.txt +0 -0
  81. {lollms_client-0.17.2 → lollms_client-0.19.1}/pyproject.toml +0 -0
  82. {lollms_client-0.17.2 → lollms_client-0.19.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.17.2
3
+ Version: 0.19.1
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -39,7 +39,7 @@ Dynamic: license-file
39
39
  [![GitHub stars](https://img.shields.io/github/stars/ParisNeo/lollms_client.svg?style=social&label=Star&maxAge=2592000)](https://github.com/ParisNeo/lollms_client/stargazers/)
40
40
  [![GitHub issues](https://img.shields.io/github/issues/ParisNeo/lollms_client.svg)](https://github.com/ParisNeo/lollms_client/issues)
41
41
 
42
- **`lollms_client`** is a powerful and flexible Python library designed to simplify interactions with the **LoLLMs (Lord of Large Language Models)** ecosystem and various other Large Language Model (LLM) backends. It provides a unified API for text generation, multimodal operations (text-to-image, text-to-speech, etc.), function calling, and advanced AI-driven tasks.
42
+ **`lollms_client`** is a powerful and flexible Python library designed to simplify interactions with the **LoLLMs (Lord of Large Language Models)** ecosystem and various other Large Language Model (LLM) backends. It provides a unified API for text generation, multimodal operations (text-to-image, text-to-speech, etc.), and robust function calling through the Model Context Protocol (MCP).
43
43
 
44
44
  Whether you're connecting to a remote LoLLMs server, an Ollama instance, the OpenAI API, or running models locally using GGUF (via `llama-cpp-python` or a managed `llama.cpp` server), Hugging Face Transformers, or vLLM, `lollms-client` offers a consistent and developer-friendly experience.
45
45
 
@@ -47,12 +47,12 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
47
47
 
48
48
  * 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM) without major code changes.
49
49
  * 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS) and images (TTI).
50
- * 🚀 **Streaming & Callbacks:** Efficiently handle real-time text generation with customizable callback functions.
51
- * 🛠️ **Task-Oriented Library:** High-level `TasksLibrary` for common operations like summarization, Q&A, code generation, and structured data extraction.
52
- * 📞 **Function Calling:** Enable LLMs to invoke your custom Python functions, bridging the gap between language models and external tools or data sources.
50
+ * 🤖 **Function Calling with MCP:** Empowers LLMs to use external tools and functions through the Model Context Protocol (MCP), with built-in support for local Python tool execution via `local_mcp` binding and its default tools (file I/O, internet search, Python interpreter, image generation).
51
+ * 🚀 **Streaming & Callbacks:** Efficiently handle real-time text generation with customizable callback functions, including during MCP interactions.
53
52
  * 💬 **Discussion Management:** Utilities to easily manage and format conversation histories for chat applications.
54
53
  * ⚙️ **Configuration Management:** Flexible ways to configure bindings and generation parameters.
55
- * 🧩 **Extensible:** Designed to easily incorporate new LLM backends and modality services.
54
+ * 🧩 **Extensible:** Designed to easily incorporate new LLM backends and modality services, including custom MCP toolsets.
55
+ * 📝 **High-Level Operations:** Includes convenience methods for complex tasks like sequential summarization and deep text analysis directly within `LollmsClient`.
56
56
 
57
57
  ## Installation
58
58
 
@@ -119,12 +119,61 @@ except Exception as e:
119
119
 
120
120
  ```
121
121
 
122
+ ### Function Calling with MCP
123
+
124
+ `lollms-client` supports robust function calling via the Model Context Protocol (MCP), allowing LLMs to interact with your custom Python tools or pre-defined utilities.
125
+
126
+ ```python
127
+ from lollms_client import LollmsClient, MSG_TYPE
128
+ from ascii_colors import ASCIIColors
129
+ import json # For pretty printing results
130
+
131
+ # Example callback for MCP streaming
132
+ def mcp_stream_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, turn_history: list = None) -> bool:
133
+ if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: ASCIIColors.success(chunk, end="", flush=True) # LLM's final answer or thought process
134
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START: ASCIIColors.info(f"\n>> MCP Step Start: {metadata.get('tool_name', chunk)}", flush=True)
135
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END: ASCIIColors.success(f"\n<< MCP Step End: {metadata.get('tool_name', chunk)} -> Result: {json.dumps(metadata.get('result', ''))}", flush=True)
136
+ elif msg_type == MSG_TYPE.MSG_TYPE_INFO and metadata and metadata.get("type") == "tool_call_request": ASCIIColors.info(f"\nAI requests: {metadata.get('name')}({metadata.get('params')})", flush=True)
137
+ return True
138
+
139
+ try:
140
+ # Initialize LollmsClient with an LLM binding and the local_mcp binding
141
+ lc = LollmsClient(
142
+ binding_name="ollama", model_name="mistral", # Example LLM
143
+ mcp_binding_name="local_mcp" # Enables default tools (file_writer, internet_search, etc.)
144
+ # or custom tools if mcp_binding_config.tools_folder_path is set.
145
+ )
146
+
147
+ user_query = "What were the main AI headlines last week and write a summary to 'ai_news.txt'?"
148
+ ASCIIColors.blue(f"User Query: {user_query}")
149
+ ASCIIColors.yellow("AI Processing with MCP (streaming):")
150
+
151
+ mcp_result = lc.generate_with_mcp(
152
+ prompt=user_query,
153
+ streaming_callback=mcp_stream_callback
154
+ )
155
+ print("\n--- End of MCP Interaction ---")
156
+
157
+ if mcp_result.get("error"):
158
+ ASCIIColors.error(f"MCP Error: {mcp_result['error']}")
159
+ else:
160
+ ASCIIColors.cyan(f"\nFinal Answer from AI: {mcp_result.get('final_answer', 'N/A')}")
161
+ ASCIIColors.magenta("\nTool Calls Made:")
162
+ for tc in mcp_result.get("tool_calls", []):
163
+ print(f" - Tool: {tc.get('name')}, Params: {tc.get('params')}, Result (first 50 chars): {str(tc.get('result'))[:50]}...")
164
+
165
+ except Exception as e:
166
+ ASCIIColors.error(f"An error occurred in MCP example: {e}")
167
+ trace_exception(e) # Assuming you have trace_exception utility
168
+ ```
169
+ For a comprehensive guide on function calling and setting up tools, please refer to the [Usage Guide (DOC_USE.md)](DOC_USE.md).
170
+
122
171
  ## Documentation
123
172
 
124
173
  For more in-depth information, please refer to:
125
174
 
126
- * **[Usage Guide (DOC_USE.md)](DOC_USE.md):** Learn how to use `LollmsClient`, different bindings, modality features, `TasksLibrary`, and `FunctionCalling_Library` with comprehensive examples.
127
- * **[Developer Guide (DOC_DEV.md)](DOC_DEV.md):** Understand the architecture, how to create new bindings, and contribute to the library.
175
+ * **[Usage Guide (DOC_USE.md)](DOC_USE.md):** Learn how to use `LollmsClient`, different bindings, modality features, function calling with MCP, and high-level operations.
176
+ * **[Developer Guide (DOC_DEV.md)](DOC_DEV.md):** Understand the architecture, how to create new bindings (LLM, modality, MCP), and contribute to the library.
128
177
 
129
178
  ## Core Concepts
130
179
 
@@ -134,8 +183,9 @@ graph LR
134
183
 
135
184
  subgraph LollmsClient_Core
136
185
  LC -- Manages --> LLB[LLM Binding];
137
- LC -- Provides Access To --> TL[TasksLibrary];
138
- LC -- Provides Access To --> FCL[FunctionCalling_Library];
186
+ LC -- Manages --> MCPB[MCP Binding];
187
+ LC -- Orchestrates --> MCP_Interaction[generate_with_mcp];
188
+ LC -- Provides --> HighLevelOps[High-Level Ops<br>(summarize, deep_analyze etc.)];
139
189
  LC -- Provides Access To --> DM[DiscussionManager];
140
190
  LC -- Provides Access To --> ModalityBindings[TTS, TTI, STT etc.];
141
191
  end
@@ -148,14 +198,19 @@ graph LR
148
198
  LLB --> LocalHF[Local HuggingFace<br>(transformers / vLLM)];
149
199
  end
150
200
 
151
- ModalityBindings --> ModalityServices[Modality Services<br>(e.g., LoLLMs Server TTS/TTI)];
201
+ MCP_Interaction --> MCPB;
202
+ MCPB --> LocalTools[Local Python Tools<br>(via local_mcp)];
203
+ MCPB --> RemoteTools[Remote MCP Tool Servers<br>(Future Potential)];
204
+
205
+
206
+ ModalityBindings --> ModalityServices[Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)];
152
207
  ```
153
208
 
154
- * **`LollmsClient`**: The central class for all interactions. It holds the currently active LLM binding and provides access to modality bindings and helper libraries.
209
+ * **`LollmsClient`**: The central class for all interactions. It holds the currently active LLM binding, an optional MCP binding, and provides access to modality bindings and high-level operations.
155
210
  * **LLM Bindings**: These are plugins that allow `LollmsClient` to communicate with different LLM backends. You choose a binding (e.g., `"ollama"`, `"lollms"`, `"pythonllamacpp"`) when you initialize `LollmsClient`.
211
+ * **🔧 MCP Bindings**: Enable tool use and function calling. `lollms-client` includes `local_mcp` for executing Python tools. It discovers tools from a specified folder (or uses its default set), each defined by a `.py` script and a `.mcp.json` metadata file.
156
212
  * **Modality Bindings**: Similar to LLM bindings, but for services like Text-to-Speech (`tts`), Text-to-Image (`tti`), etc.
157
- * **`TasksLibrary`**: Offers high-level functions for common AI tasks (summarization, Q&A) built on top of `LollmsClient`.
158
- * **`FunctionCalling_Library`**: Enables you to define Python functions that the LLM can request to execute, allowing for tool usage.
213
+ * **High-Level Operations**: Methods directly on `LollmsClient` (e.g., `sequential_summarize`, `deep_analyze`, `generate_code`, `yes_no`) for performing complex, multi-step AI tasks.
159
214
  * **`LollmsDiscussion`**: Helps manage and format conversation histories for chat applications.
160
215
 
161
216
  ## Examples
@@ -164,8 +219,8 @@ The `examples/` directory in this repository contains a rich set of scripts demo
164
219
  * Basic text generation with different bindings.
165
220
  * Streaming and non-streaming examples.
166
221
  * Multimodal generation (text with images).
167
- * Using `TasksLibrary` for summarization and Q&A.
168
- * Implementing and using function calls.
222
+ * Using built-in methods for summarization and Q&A.
223
+ * Implementing and using function calls with **`generate_with_mcp`** and the `local_mcp` binding (see `examples/function_calling_with_local_custom_mcp.py` and `examples/local_mcp.py`).
169
224
  * Text-to-Speech and Text-to-Image generation.
170
225
 
171
226
  Explore these examples to see `lollms-client` in action!
@@ -9,7 +9,7 @@
9
9
  [![GitHub stars](https://img.shields.io/github/stars/ParisNeo/lollms_client.svg?style=social&label=Star&maxAge=2592000)](https://github.com/ParisNeo/lollms_client/stargazers/)
10
10
  [![GitHub issues](https://img.shields.io/github/issues/ParisNeo/lollms_client.svg)](https://github.com/ParisNeo/lollms_client/issues)
11
11
 
12
- **`lollms_client`** is a powerful and flexible Python library designed to simplify interactions with the **LoLLMs (Lord of Large Language Models)** ecosystem and various other Large Language Model (LLM) backends. It provides a unified API for text generation, multimodal operations (text-to-image, text-to-speech, etc.), function calling, and advanced AI-driven tasks.
12
+ **`lollms_client`** is a powerful and flexible Python library designed to simplify interactions with the **LoLLMs (Lord of Large Language Models)** ecosystem and various other Large Language Model (LLM) backends. It provides a unified API for text generation, multimodal operations (text-to-image, text-to-speech, etc.), and robust function calling through the Model Context Protocol (MCP).
13
13
 
14
14
  Whether you're connecting to a remote LoLLMs server, an Ollama instance, the OpenAI API, or running models locally using GGUF (via `llama-cpp-python` or a managed `llama.cpp` server), Hugging Face Transformers, or vLLM, `lollms-client` offers a consistent and developer-friendly experience.
15
15
 
@@ -17,12 +17,12 @@ Whether you're connecting to a remote LoLLMs server, an Ollama instance, the Ope
17
17
 
18
18
  * 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM) without major code changes.
19
19
  * 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS) and images (TTI).
20
- * 🚀 **Streaming & Callbacks:** Efficiently handle real-time text generation with customizable callback functions.
21
- * 🛠️ **Task-Oriented Library:** High-level `TasksLibrary` for common operations like summarization, Q&A, code generation, and structured data extraction.
22
- * 📞 **Function Calling:** Enable LLMs to invoke your custom Python functions, bridging the gap between language models and external tools or data sources.
20
+ * 🤖 **Function Calling with MCP:** Empowers LLMs to use external tools and functions through the Model Context Protocol (MCP), with built-in support for local Python tool execution via `local_mcp` binding and its default tools (file I/O, internet search, Python interpreter, image generation).
21
+ * 🚀 **Streaming & Callbacks:** Efficiently handle real-time text generation with customizable callback functions, including during MCP interactions.
23
22
  * 💬 **Discussion Management:** Utilities to easily manage and format conversation histories for chat applications.
24
23
  * ⚙️ **Configuration Management:** Flexible ways to configure bindings and generation parameters.
25
- * 🧩 **Extensible:** Designed to easily incorporate new LLM backends and modality services.
24
+ * 🧩 **Extensible:** Designed to easily incorporate new LLM backends and modality services, including custom MCP toolsets.
25
+ * 📝 **High-Level Operations:** Includes convenience methods for complex tasks like sequential summarization and deep text analysis directly within `LollmsClient`.
26
26
 
27
27
  ## Installation
28
28
 
@@ -89,12 +89,61 @@ except Exception as e:
89
89
 
90
90
  ```
91
91
 
92
+ ### Function Calling with MCP
93
+
94
+ `lollms-client` supports robust function calling via the Model Context Protocol (MCP), allowing LLMs to interact with your custom Python tools or pre-defined utilities.
95
+
96
+ ```python
97
+ from lollms_client import LollmsClient, MSG_TYPE
98
+ from ascii_colors import ASCIIColors
99
+ import json # For pretty printing results
100
+
101
+ # Example callback for MCP streaming
102
+ def mcp_stream_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, turn_history: list = None) -> bool:
103
+ if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: ASCIIColors.success(chunk, end="", flush=True) # LLM's final answer or thought process
104
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START: ASCIIColors.info(f"\n>> MCP Step Start: {metadata.get('tool_name', chunk)}", flush=True)
105
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END: ASCIIColors.success(f"\n<< MCP Step End: {metadata.get('tool_name', chunk)} -> Result: {json.dumps(metadata.get('result', ''))}", flush=True)
106
+ elif msg_type == MSG_TYPE.MSG_TYPE_INFO and metadata and metadata.get("type") == "tool_call_request": ASCIIColors.info(f"\nAI requests: {metadata.get('name')}({metadata.get('params')})", flush=True)
107
+ return True
108
+
109
+ try:
110
+ # Initialize LollmsClient with an LLM binding and the local_mcp binding
111
+ lc = LollmsClient(
112
+ binding_name="ollama", model_name="mistral", # Example LLM
113
+ mcp_binding_name="local_mcp" # Enables default tools (file_writer, internet_search, etc.)
114
+ # or custom tools if mcp_binding_config.tools_folder_path is set.
115
+ )
116
+
117
+ user_query = "What were the main AI headlines last week and write a summary to 'ai_news.txt'?"
118
+ ASCIIColors.blue(f"User Query: {user_query}")
119
+ ASCIIColors.yellow("AI Processing with MCP (streaming):")
120
+
121
+ mcp_result = lc.generate_with_mcp(
122
+ prompt=user_query,
123
+ streaming_callback=mcp_stream_callback
124
+ )
125
+ print("\n--- End of MCP Interaction ---")
126
+
127
+ if mcp_result.get("error"):
128
+ ASCIIColors.error(f"MCP Error: {mcp_result['error']}")
129
+ else:
130
+ ASCIIColors.cyan(f"\nFinal Answer from AI: {mcp_result.get('final_answer', 'N/A')}")
131
+ ASCIIColors.magenta("\nTool Calls Made:")
132
+ for tc in mcp_result.get("tool_calls", []):
133
+ print(f" - Tool: {tc.get('name')}, Params: {tc.get('params')}, Result (first 50 chars): {str(tc.get('result'))[:50]}...")
134
+
135
+ except Exception as e:
136
+ ASCIIColors.error(f"An error occurred in MCP example: {e}")
137
+ trace_exception(e) # Assuming you have trace_exception utility
138
+ ```
139
+ For a comprehensive guide on function calling and setting up tools, please refer to the [Usage Guide (DOC_USE.md)](DOC_USE.md).
140
+
92
141
  ## Documentation
93
142
 
94
143
  For more in-depth information, please refer to:
95
144
 
96
- * **[Usage Guide (DOC_USE.md)](DOC_USE.md):** Learn how to use `LollmsClient`, different bindings, modality features, `TasksLibrary`, and `FunctionCalling_Library` with comprehensive examples.
97
- * **[Developer Guide (DOC_DEV.md)](DOC_DEV.md):** Understand the architecture, how to create new bindings, and contribute to the library.
145
+ * **[Usage Guide (DOC_USE.md)](DOC_USE.md):** Learn how to use `LollmsClient`, different bindings, modality features, function calling with MCP, and high-level operations.
146
+ * **[Developer Guide (DOC_DEV.md)](DOC_DEV.md):** Understand the architecture, how to create new bindings (LLM, modality, MCP), and contribute to the library.
98
147
 
99
148
  ## Core Concepts
100
149
 
@@ -104,8 +153,9 @@ graph LR
104
153
 
105
154
  subgraph LollmsClient_Core
106
155
  LC -- Manages --> LLB[LLM Binding];
107
- LC -- Provides Access To --> TL[TasksLibrary];
108
- LC -- Provides Access To --> FCL[FunctionCalling_Library];
156
+ LC -- Manages --> MCPB[MCP Binding];
157
+ LC -- Orchestrates --> MCP_Interaction[generate_with_mcp];
158
+ LC -- Provides --> HighLevelOps[High-Level Ops<br>(summarize, deep_analyze etc.)];
109
159
  LC -- Provides Access To --> DM[DiscussionManager];
110
160
  LC -- Provides Access To --> ModalityBindings[TTS, TTI, STT etc.];
111
161
  end
@@ -118,14 +168,19 @@ graph LR
118
168
  LLB --> LocalHF[Local HuggingFace<br>(transformers / vLLM)];
119
169
  end
120
170
 
121
- ModalityBindings --> ModalityServices[Modality Services<br>(e.g., LoLLMs Server TTS/TTI)];
171
+ MCP_Interaction --> MCPB;
172
+ MCPB --> LocalTools[Local Python Tools<br>(via local_mcp)];
173
+ MCPB --> RemoteTools[Remote MCP Tool Servers<br>(Future Potential)];
174
+
175
+
176
+ ModalityBindings --> ModalityServices[Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)];
122
177
  ```
123
178
 
124
- * **`LollmsClient`**: The central class for all interactions. It holds the currently active LLM binding and provides access to modality bindings and helper libraries.
179
+ * **`LollmsClient`**: The central class for all interactions. It holds the currently active LLM binding, an optional MCP binding, and provides access to modality bindings and high-level operations.
125
180
  * **LLM Bindings**: These are plugins that allow `LollmsClient` to communicate with different LLM backends. You choose a binding (e.g., `"ollama"`, `"lollms"`, `"pythonllamacpp"`) when you initialize `LollmsClient`.
181
+ * **🔧 MCP Bindings**: Enable tool use and function calling. `lollms-client` includes `local_mcp` for executing Python tools. It discovers tools from a specified folder (or uses its default set), each defined by a `.py` script and a `.mcp.json` metadata file.
126
182
  * **Modality Bindings**: Similar to LLM bindings, but for services like Text-to-Speech (`tts`), Text-to-Image (`tti`), etc.
127
- * **`TasksLibrary`**: Offers high-level functions for common AI tasks (summarization, Q&A) built on top of `LollmsClient`.
128
- * **`FunctionCalling_Library`**: Enables you to define Python functions that the LLM can request to execute, allowing for tool usage.
183
+ * **High-Level Operations**: Methods directly on `LollmsClient` (e.g., `sequential_summarize`, `deep_analyze`, `generate_code`, `yes_no`) for performing complex, multi-step AI tasks.
129
184
  * **`LollmsDiscussion`**: Helps manage and format conversation histories for chat applications.
130
185
 
131
186
  ## Examples
@@ -134,8 +189,8 @@ The `examples/` directory in this repository contains a rich set of scripts demo
134
189
  * Basic text generation with different bindings.
135
190
  * Streaming and non-streaming examples.
136
191
  * Multimodal generation (text with images).
137
- * Using `TasksLibrary` for summarization and Q&A.
138
- * Implementing and using function calls.
192
+ * Using built-in methods for summarization and Q&A.
193
+ * Implementing and using function calls with **`generate_with_mcp`** and the `local_mcp` binding (see `examples/function_calling_with_local_custom_mcp.py` and `examples/local_mcp.py`).
139
194
  * Text-to-Speech and Text-to-Image generation.
140
195
 
141
196
  Explore these examples to see `lollms-client` in action!
@@ -0,0 +1,250 @@
1
+ from lollms_client import LollmsClient, MSG_TYPE
2
+ from ascii_colors import ASCIIColors, trace_exception
3
+ from pathlib import Path
4
+ import json # For pretty printing results
5
+
6
+ # --- Configuration ---
7
+ # LLM Configuration
8
+ LLM_BINDING_NAME = "ollama"
9
+ OLLAMA_HOST_ADDRESS = "http://localhost:11434" # Default Ollama host
10
+ # Ensure you have a model capable of following instructions and generating JSON.
11
+ # Mistral, Llama 3, or Phi-3 variants often work well.
12
+ OLLAMA_MODEL_NAME = "mistral-nemo:latest" # Or "llama3:latest", "phi3:latest" - ensure it's pulled
13
+
14
+ # Local MCP Binding Configuration
15
+ # This path should point to the directory containing your tool subdirectories
16
+ # (e.g., 'get_weather/', 'sum_numbers/')
17
+ # For this example, we assume 'temp_mcp_tools_for_test' is in the parent directory
18
+ # of this examples folder.
19
+ TOOLS_FOLDER = Path(__file__).parent.parent / "temp_mcp_tools_for_test"
20
+
21
+ # Function Calling Parameters
22
+ MAX_LLM_ITERATIONS_FOR_TOOL_CALLS = 3 # How many times LLM can decide to call a tool in a sequence
23
+ MAX_TOOL_CALLS_PER_TURN = 2 # Max distinct tools executed per user prompt
24
+
25
+ # --- Helper to Create Dummy Tools (if they don't exist) ---
26
+ def ensure_dummy_tools_exist(base_tools_dir: Path):
27
+ if not base_tools_dir.exists():
28
+ ASCIIColors.info(f"Creating dummy tools directory: {base_tools_dir}")
29
+ base_tools_dir.mkdir(parents=True, exist_ok=True)
30
+
31
+ tool_defs = {
32
+ "get_weather": {
33
+ "mcp": {
34
+ "name": "get_weather",
35
+ "description": "Fetches the current weather for a given city.",
36
+ "input_schema": {
37
+ "type": "object",
38
+ "properties": {
39
+ "city": {"type": "string", "description": "The city name."},
40
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"], "default": "celsius"}
41
+ },
42
+ "required": ["city"]
43
+ },
44
+ "output_schema": {
45
+ "type": "object",
46
+ "properties": {
47
+ "temperature": {"type": "number"}, "condition": {"type": "string"}, "unit": {"type": "string"}
48
+ }
49
+ }
50
+ },
51
+ "py": """
52
+ import random
53
+ def execute(params: dict) -> dict:
54
+ city = params.get("city")
55
+ unit = params.get("unit", "celsius")
56
+ if not city: return {"error": "City not provided"}
57
+ conditions = ["sunny", "cloudy", "rainy", "snowy", "windy"]
58
+ temp = random.randint(-5 if unit == "celsius" else 23, 30 if unit == "celsius" else 86)
59
+ return {"temperature": temp, "condition": random.choice(conditions), "unit": unit}
60
+ """
61
+ },
62
+ "sum_numbers": {
63
+ "mcp": {
64
+ "name": "sum_numbers",
65
+ "description": "Calculates the sum of a list of numbers.",
66
+ "input_schema": {
67
+ "type": "object",
68
+ "properties": {"numbers": {"type": "array", "items": {"type": "number"}}},
69
+ "required": ["numbers"]
70
+ },
71
+ "output_schema": {"type": "object", "properties": {"sum": {"type": "number"}}}
72
+ },
73
+ "py": """
74
+ def execute(params: dict) -> dict:
75
+ numbers = params.get("numbers", [])
76
+ if not isinstance(numbers, list) or not all(isinstance(n, (int, float)) for n in numbers):
77
+ return {"error": "Invalid input: 'numbers' must be a list of numbers."}
78
+ return {"sum": sum(numbers)}
79
+ """
80
+ }
81
+ }
82
+
83
+ for tool_name, files_content in tool_defs.items():
84
+ tool_dir = base_tools_dir / tool_name
85
+ tool_dir.mkdir(exist_ok=True)
86
+
87
+ mcp_file = tool_dir / f"{tool_name}.mcp.json"
88
+ py_file = tool_dir / f"{tool_name}.py"
89
+
90
+ if not mcp_file.exists():
91
+ with open(mcp_file, "w") as f:
92
+ json.dump(files_content["mcp"], f, indent=2)
93
+ ASCIIColors.info(f"Created MCP definition for {tool_name}")
94
+
95
+ if not py_file.exists():
96
+ with open(py_file, "w") as f:
97
+ f.write(files_content["py"])
98
+ ASCIIColors.info(f"Created Python code for {tool_name}")
99
+
100
+ # --- Callback for streaming ---
101
+ def function_calling_stream_callback(chunk: str, msg_type: MSG_TYPE, metadata: dict = None, turn_history: list = None) -> bool:
102
+ """
103
+ Callback to handle streamed output during function calling.
104
+ """
105
+ if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # LLM generating text
106
+ ASCIIColors.success(chunk, end="", flush=True)
107
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
108
+ step_type = metadata.get("type", "step") if metadata else "step"
109
+ step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
110
+ ASCIIColors.info(f">> Starting {step_type}: {step_info}", flush=True)
111
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
112
+ step_type = metadata.get("type", "step") if metadata else "step"
113
+ step_info = metadata.get("tool_name", "") if metadata and "tool_name" in metadata else chunk
114
+ if metadata and "result" in metadata:
115
+ ASCIIColors.success(f"<< Finished {step_type}: {step_info} -> Result: {json.dumps(metadata['result'])}", flush=True)
116
+ else:
117
+ ASCIIColors.success(f"<< Finished {step_type}: {step_info}", flush=True)
118
+ elif msg_type == MSG_TYPE.MSG_TYPE_INFO:
119
+ if metadata and metadata.get("type") == "tool_call_request":
120
+ ASCIIColors.info(f"AI requests to call tool: {metadata.get('name')} with params: {metadata.get('params')}", flush=True)
121
+ else:
122
+ ASCIIColors.info(f"INFO: {chunk}", flush=True)
123
+ elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
124
+ ASCIIColors.error(f"ERROR in stream: {chunk}", flush=True)
125
+
126
+ # Optional debug info:
127
+ # ASCIIColors.info(f"DEBUG Turn History (so far): {turn_history}")
128
+ return True
129
+
130
+
131
+
132
+ def run_function_calling_example():
133
+ ASCIIColors.red("--- LoLLMs Client with Local MCP Function Calling Example ---")
134
+
135
+ ensure_dummy_tools_exist(TOOLS_FOLDER) # Make sure our example tools are present
136
+
137
+ try:
138
+ ASCIIColors.magenta("\n1. Initializing LollmsClient...")
139
+ # MCP binding config is passed directly to the binding constructor
140
+ mcp_binding_configuration = {"tools_folder_path": str(TOOLS_FOLDER)}
141
+
142
+ lc = LollmsClient(
143
+ binding_name=LLM_BINDING_NAME,
144
+ host_address=OLLAMA_HOST_ADDRESS,
145
+ model_name=OLLAMA_MODEL_NAME,
146
+ mcp_binding_name="local_mcp", # Activate the LocalMCP binding
147
+ mcp_binding_config=mcp_binding_configuration, # Pass its specific config
148
+ # Optional: Configure default LLM generation params if needed
149
+ temperature=0.2, # Lower temp for more focused tool decisions / final answer
150
+ n_predict=1024
151
+ )
152
+ ASCIIColors.green("LollmsClient initialized successfully.")
153
+ if lc.mcp:
154
+ ASCIIColors.info(f"MCP Binding '{lc.mcp.binding_name}' loaded.")
155
+ discovered_tools_on_init = lc.mcp.discover_tools()
156
+ ASCIIColors.info(f"Tools discovered by MCP binding on init: {[t['name'] for t in discovered_tools_on_init]}")
157
+ else:
158
+ ASCIIColors.error("MCP binding was not loaded correctly. Aborting.")
159
+ return
160
+
161
+ # --- Example Interaction 1: Weather Request ---
162
+ ASCIIColors.magenta("\n2. Example 1: Asking for weather")
163
+ user_prompt_weather = "What's the weather like in Paris today, and can you tell me in Fahrenheit?"
164
+ ASCIIColors.blue(f"User: {user_prompt_weather}")
165
+ ASCIIColors.yellow(f"AI thinking and interacting with tools (streaming output):")
166
+
167
+ weather_result = lc.generate_with_mcp(
168
+ prompt=user_prompt_weather,
169
+ # tools=None, # Let it discover from the binding
170
+ max_tool_calls=MAX_TOOL_CALLS_PER_TURN,
171
+ max_llm_iterations=MAX_LLM_ITERATIONS_FOR_TOOL_CALLS,
172
+ streaming_callback=function_calling_stream_callback,
173
+ # interactive_tool_execution=True # Uncomment to confirm tool calls
174
+ )
175
+ print("\n--- End of AI Response for Weather ---")
176
+ if weather_result["error"]:
177
+ ASCIIColors.error(f"Error in weather example: {weather_result['error']}")
178
+ else:
179
+ ASCIIColors.cyan(f"\nFinal Answer (Weather): {weather_result['final_answer']}")
180
+ ASCIIColors.info("\nTool Calls Made (Weather Example):")
181
+ for tc in weather_result["tool_calls"]:
182
+ print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'])}")
183
+
184
+
185
+ # --- Example Interaction 2: Summation Request ---
186
+ ASCIIColors.magenta("\n3. Example 2: Asking to sum numbers")
187
+ user_prompt_sum = "Hey, can you please calculate the sum of 15.5, 25, and -5.5 for me?"
188
+ ASCIIColors.blue(f"User: {user_prompt_sum}")
189
+ ASCIIColors.yellow(f"AI thinking and interacting with tools (streaming output):")
190
+
191
+ sum_result_data = lc.generate_with_mcp(
192
+ prompt=user_prompt_sum,
193
+ max_tool_calls=MAX_TOOL_CALLS_PER_TURN,
194
+ max_llm_iterations=MAX_LLM_ITERATIONS_FOR_TOOL_CALLS,
195
+ streaming_callback=function_calling_stream_callback
196
+ )
197
+ print("\n--- End of AI Response for Sum ---")
198
+ if sum_result_data["error"]:
199
+ ASCIIColors.error(f"Error in sum example: {sum_result_data['error']}")
200
+ else:
201
+ ASCIIColors.cyan(f"\nFinal Answer (Sum): {sum_result_data['final_answer']}")
202
+ ASCIIColors.info("\nTool Calls Made (Sum Example):")
203
+ for tc in sum_result_data["tool_calls"]:
204
+ print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'])}")
205
+
206
+
207
+ # --- Example Interaction 3: Multi-step (hypothetical, weather then sum) ---
208
+ ASCIIColors.magenta("\n4. Example 3: Multi-step (Weather, then maybe sum if AI decides)")
209
+ user_prompt_multi = "What's the weather in Berlin? And also, what's 100 + 200 + 300?"
210
+ ASCIIColors.blue(f"User: {user_prompt_sum}")
211
+ ASCIIColors.yellow(f"AI thinking and interacting with tools (streaming output):")
212
+
213
+ multi_result_data = lc.generate_with_mcp(
214
+ prompt=user_prompt_multi,
215
+ max_tool_calls=MAX_TOOL_CALLS_PER_TURN, # Allow up to 2 different tools
216
+ max_llm_iterations=MAX_LLM_ITERATIONS_FOR_TOOL_CALLS +1, # Allow a bit more LLM thinking
217
+ streaming_callback=function_calling_stream_callback
218
+ )
219
+ print("\n--- End of AI Response for Multi-step ---")
220
+ if multi_result_data["error"]:
221
+ ASCIIColors.error(f"Error in multi-step example: {multi_result_data['error']}")
222
+ else:
223
+ ASCIIColors.cyan(f"\nFinal Answer (Multi-step): {multi_result_data['final_answer']}")
224
+ ASCIIColors.info("\nTool Calls Made (Multi-step Example):")
225
+ for tc in multi_result_data["tool_calls"]:
226
+ print(f" - Tool: {tc['name']}, Params: {tc['params']}, Result: {json.dumps(tc['result'])}")
227
+
228
+
229
+ except ValueError as ve: # Catch init errors
230
+ ASCIIColors.error(f"Initialization Error: {ve}")
231
+ trace_exception(ve)
232
+ except ConnectionRefusedError:
233
+ ASCIIColors.error(f"Connection refused. Is the Ollama server running at {OLLAMA_HOST_ADDRESS}?")
234
+ except Exception as e:
235
+ ASCIIColors.error(f"An unexpected error occurred: {e}")
236
+ trace_exception(e)
237
+ finally:
238
+ ASCIIColors.info(f"If dummy tools were created, they are in: {TOOLS_FOLDER.resolve()}")
239
+ # Consider cleaning up TOOLS_FOLDER if it was created purely for this test run
240
+ # For this example, we'll leave them.
241
+ # import shutil
242
+ # if "ensure_dummy_tools_exist" in globals() and TOOLS_FOLDER.exists() and "temp_mcp_tools_for_test" in str(TOOLS_FOLDER):
243
+ # if input(f"Clean up dummy tools at {TOOLS_FOLDER}? (y/n): ").lower() == 'y':
244
+ # shutil.rmtree(TOOLS_FOLDER)
245
+ # ASCIIColors.info("Cleaned up dummy tools folder.")
246
+
247
+ ASCIIColors.red("\n--- Example Finished ---")
248
+
249
+ if __name__ == "__main__":
250
+ run_function_calling_example()