lollms-client 0.33.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
- lollms_client/llm_bindings/claude/__init__.py +4 -7
- lollms_client/llm_bindings/gemini/__init__.py +3 -7
- lollms_client/llm_bindings/grok/__init__.py +3 -7
- lollms_client/llm_bindings/groq/__init__.py +4 -6
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
- lollms_client/llm_bindings/litellm/__init__.py +15 -6
- lollms_client/llm_bindings/llamacpp/__init__.py +27 -9
- lollms_client/llm_bindings/lollms/__init__.py +24 -14
- lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
- lollms_client/llm_bindings/mistral/__init__.py +3 -5
- lollms_client/llm_bindings/ollama/__init__.py +6 -11
- lollms_client/llm_bindings/open_router/__init__.py +4 -6
- lollms_client/llm_bindings/openai/__init__.py +7 -14
- lollms_client/llm_bindings/openllm/__init__.py +12 -12
- lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
- lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
- lollms_client/llm_bindings/transformers/__init__.py +14 -6
- lollms_client/llm_bindings/vllm/__init__.py +16 -12
- lollms_client/lollms_core.py +303 -490
- lollms_client/lollms_discussion.py +431 -78
- lollms_client/lollms_llm_binding.py +192 -381
- lollms_client/lollms_mcp_binding.py +33 -2
- lollms_client/lollms_tti_binding.py +107 -2
- lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
- lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
- lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
- lollms_client/stt_bindings/lollms/__init__.py +6 -8
- lollms_client/stt_bindings/whisper/__init__.py +2 -4
- lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
- lollms_client/tti_bindings/dalle/__init__.py +50 -29
- lollms_client/tti_bindings/diffusers/__init__.py +227 -439
- lollms_client/tti_bindings/gemini/__init__.py +320 -0
- lollms_client/tti_bindings/lollms/__init__.py +8 -9
- lollms_client-1.1.0.dist-info/METADATA +1214 -0
- lollms_client-1.1.0.dist-info/RECORD +69 -0
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/top_level.txt +0 -2
- examples/article_summary/article_summary.py +0 -58
- examples/console_discussion/console_app.py +0 -266
- examples/console_discussion.py +0 -448
- examples/deep_analyze/deep_analyse.py +0 -30
- examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
- examples/function_calling_with_local_custom_mcp.py +0 -250
- examples/generate_a_benchmark_for_safe_store.py +0 -89
- examples/generate_and_speak/generate_and_speak.py +0 -251
- examples/generate_game_sfx/generate_game_fx.py +0 -240
- examples/generate_text_with_multihop_rag_example.py +0 -210
- examples/gradio_chat_app.py +0 -228
- examples/gradio_lollms_chat.py +0 -259
- examples/internet_search_with_rag.py +0 -226
- examples/lollms_chat/calculator.py +0 -59
- examples/lollms_chat/derivative.py +0 -48
- examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
- examples/lollms_discussions_test.py +0 -155
- examples/mcp_examples/external_mcp.py +0 -267
- examples/mcp_examples/local_mcp.py +0 -171
- examples/mcp_examples/openai_mcp.py +0 -203
- examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
- examples/mcp_examples/run_standard_mcp_example.py +0 -204
- examples/simple_text_gen_test.py +0 -173
- examples/simple_text_gen_with_image_test.py +0 -178
- examples/test_local_models/local_chat.py +0 -9
- examples/text_2_audio.py +0 -77
- examples/text_2_image.py +0 -144
- examples/text_2_image_diffusers.py +0 -274
- examples/text_and_image_2_audio.py +0 -59
- examples/text_gen.py +0 -30
- examples/text_gen_system_prompt.py +0 -29
- lollms_client-0.33.0.dist-info/METADATA +0 -854
- lollms_client-0.33.0.dist-info/RECORD +0 -101
- test/test_lollms_discussion.py +0 -368
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,1214 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: lollms_client
|
|
3
|
+
Version: 1.1.0
|
|
4
|
+
Summary: A client library for LoLLMs generate endpoint
|
|
5
|
+
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
|
+
License: Apache Software License
|
|
7
|
+
Project-URL: Homepage, https://github.com/ParisNeo/lollms_client
|
|
8
|
+
Classifier: Programming Language :: Python :: 3
|
|
9
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
10
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
11
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
12
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
13
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
14
|
+
Classifier: License :: OSI Approved :: Apache Software License
|
|
15
|
+
Classifier: Operating System :: OS Independent
|
|
16
|
+
Classifier: Intended Audience :: Developers
|
|
17
|
+
Classifier: Intended Audience :: Science/Research
|
|
18
|
+
Requires-Python: >=3.7
|
|
19
|
+
Description-Content-Type: text/markdown
|
|
20
|
+
License-File: LICENSE
|
|
21
|
+
Requires-Dist: requests
|
|
22
|
+
Requires-Dist: ascii-colors
|
|
23
|
+
Requires-Dist: pipmaster
|
|
24
|
+
Requires-Dist: pyyaml
|
|
25
|
+
Requires-Dist: tiktoken
|
|
26
|
+
Requires-Dist: pydantic
|
|
27
|
+
Requires-Dist: numpy
|
|
28
|
+
Requires-Dist: pillow
|
|
29
|
+
Requires-Dist: sqlalchemy
|
|
30
|
+
Dynamic: license-file
|
|
31
|
+
|
|
32
|
+
# LoLLMs Client Library
|
|
33
|
+
|
|
34
|
+
[](https://opensource.org/licenses/Apache-2.0)
|
|
35
|
+
[](https://badge.fury.io/py/lollms_client)
|
|
36
|
+
[](https://pypi.org/project/lollms-client/)
|
|
37
|
+
[](https://pepy.tech/project/lollms-client)
|
|
38
|
+
[](DOC_USE.md)
|
|
39
|
+
[](DOC_DEV.md)
|
|
40
|
+
[](https://github.com/ParisNeo/lollms_client/stargazers/)
|
|
41
|
+
[](https://github.com/ParisNeo/lollms_client/issues)
|
|
42
|
+
|
|
43
|
+
**`lollms_client`** is a powerful and flexible Python library designed to simplify interactions with the **LoLLMs (Lord of Large Language Models)** ecosystem and various other Large Language Model (LLM) backends. It provides a unified API for text generation, multimodal operations (text-to-image, text-to-speech, etc.), and robust function calling through the Model Context Protocol (MCP).
|
|
44
|
+
|
|
45
|
+
Whether you're connecting to a remote LoLLMs server, an Ollama instance, the OpenAI API, or running models locally using GGUF (via `llama-cpp-python` or a managed `llama.cpp` server), Hugging Face Transformers, or vLLM, `lollms-client` offers a consistent and developer-friendly experience.
|
|
46
|
+
|
|
47
|
+
## Key Features
|
|
48
|
+
|
|
49
|
+
* 🔌 **Versatile Binding System:** Seamlessly switch between different LLM backends (LoLLMs, Ollama, OpenAI, Llama.cpp, Transformers, vLLM, OpenLLM, Gemini, Claude, Groq, OpenRouter, Hugging Face Inference API) using a unified `llm_binding_config` dictionary for all parameters.
|
|
50
|
+
* 🗣️ **Multimodal Support:** Interact with models capable of processing images and generate various outputs like speech (TTS), images (TTI), video (TTV), and music (TTM).
|
|
51
|
+
* 🖼️ **Selective Image Activation:** Control which images in a message are active and sent to the model, allowing for fine-grained multimodal context management without deleting the original data.
|
|
52
|
+
* 🤖 **Agentic Workflows with MCP:** Empower LLMs to act as sophisticated agents, breaking down complex tasks, selecting and executing external tools (e.g., internet search, code interpreter, file I/O, image generation) through the Model Context Protocol (MCP) using a robust "observe-think-act" loop.
|
|
53
|
+
* 🎭 **Personalities as Agents:** Personalities can now define their own set of required tools (MCPs) and have access to static or dynamic knowledge bases (`data_source`), turning them into self-contained, ready-to-use agents.
|
|
54
|
+
* 🚀 **Streaming & Callbacks:** Efficiently handle real-time text generation with customizable callback functions across all generation methods, including during agentic (MCP) interactions.
|
|
55
|
+
* 📑 **Long Context Processing:** The `long_context_processing` method (formerly `sequential_summarize`) intelligently chunks and synthesizes texts that exceed the model's context window, suitable for summarization or deep analysis.
|
|
56
|
+
* 📝 **Advanced Structured Content Generation:** Reliably generate structured JSON output from natural language prompts using the `generate_structured_content` helper method, enforcing a specific schema.
|
|
57
|
+
* 💬 **Advanced Discussion Management:** Robustly manage conversation histories with `LollmsDiscussion`, featuring branching, context exporting, and automatic pruning.
|
|
58
|
+
* 🧠 **Persistent Memory & Data Zones:** `LollmsDiscussion` now supports multiple, distinct data zones (`user_data_zone`, `discussion_data_zone`, `personality_data_zone`) and a long-term `memory` field. This allows for sophisticated context layering and state management, enabling agents to learn and remember over time.
|
|
59
|
+
* ✍️ **Automatic Memorization:** A new `memorize()` method allows the AI to analyze a conversation and extract key facts, appending them to the long-term `memory` for recall in future sessions.
|
|
60
|
+
* 📊 **Detailed Context Analysis:** The `get_context_status()` method provides a rich, detailed breakdown of the prompt context, showing the content and token count for each individual component (system prompt, data zones, message history).
|
|
61
|
+
* ⚙️ **Standardized Configuration Management:** A unified dictionary-based system (`llm_binding_config`) to configure any binding in a consistent manner.
|
|
62
|
+
* 🧩 **Extensible:** Designed to easily incorporate new LLM backends and modality services, including custom MCP toolsets.
|
|
63
|
+
* 📝 **High-Level Operations:** Includes convenience methods for complex tasks like sequential summarization and deep text analysis directly within `LollmsClient`.
|
|
64
|
+
|
|
65
|
+
## Installation
|
|
66
|
+
|
|
67
|
+
You can install `lollms_client` directly from PyPI:
|
|
68
|
+
|
|
69
|
+
```bash
|
|
70
|
+
pip install lollms-client
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
This will install the core library. Some bindings may require additional dependencies (e.g., `llama-cpp-python`, `torch`, `transformers`, `ollama`, `vllm`, `Pillow` for image utilities, `docling` for document parsing). The library attempts to manage these using `pipmaster`, but for complex dependencies (especially those requiring compilation like `llama-cpp-python` with GPU support), manual installation might be preferred.
|
|
74
|
+
|
|
75
|
+
## Core Generation Methods
|
|
76
|
+
|
|
77
|
+
The `LollmsClient` provides several methods for generating text, catering to different use cases.
|
|
78
|
+
|
|
79
|
+
### Basic Text Generation (`generate_text`)
|
|
80
|
+
|
|
81
|
+
This is the most straightforward method for generating a response based on a simple prompt.
|
|
82
|
+
|
|
83
|
+
```python
|
|
84
|
+
from lollms_client import LollmsClient, MSG_TYPE
|
|
85
|
+
from ascii_colors import ASCIIColors
|
|
86
|
+
import os
|
|
87
|
+
|
|
88
|
+
# Callback for streaming output
|
|
89
|
+
def simple_streaming_callback(chunk: str, msg_type: MSG_TYPE, params=None, metadata=None) -> bool:
|
|
90
|
+
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
91
|
+
print(chunk, end="", flush=True)
|
|
92
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
|
|
93
|
+
ASCIIColors.error(f"\nStreaming Error: {chunk}")
|
|
94
|
+
return True # True to continue streaming
|
|
95
|
+
|
|
96
|
+
try:
|
|
97
|
+
# Initialize client to connect to a LoLLMs server.
|
|
98
|
+
# All binding-specific parameters now go into the 'llm_binding_config' dictionary.
|
|
99
|
+
lc = LollmsClient(
|
|
100
|
+
llm_binding_name="lollms", # This is the default binding
|
|
101
|
+
llm_binding_config={
|
|
102
|
+
"host_address": "http://localhost:9642", # Default port for LoLLMs server
|
|
103
|
+
# "service_key": "your_lollms_api_key_here" # Get key from LoLLMs UI -> User Settings if security is enabled
|
|
104
|
+
}
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
prompt = "Tell me a fun fact about space."
|
|
108
|
+
ASCIIColors.yellow(f"Prompt: {prompt}")
|
|
109
|
+
|
|
110
|
+
# Generate text with streaming
|
|
111
|
+
ASCIIColors.green("Streaming Response:")
|
|
112
|
+
response_text = lc.generate_text(
|
|
113
|
+
prompt,
|
|
114
|
+
n_predict=100,
|
|
115
|
+
stream=True,
|
|
116
|
+
streaming_callback=simple_streaming_callback
|
|
117
|
+
)
|
|
118
|
+
print("\n--- End of Stream ---")
|
|
119
|
+
|
|
120
|
+
# The 'response_text' variable will contain the full concatenated text
|
|
121
|
+
# if streaming_callback returns True throughout.
|
|
122
|
+
if isinstance(response_text, str):
|
|
123
|
+
ASCIIColors.cyan(f"\nFull streamed text collected: {response_text[:100]}...")
|
|
124
|
+
elif isinstance(response_text, dict) and "error" in response_text:
|
|
125
|
+
ASCIIColors.error(f"Error during generation: {response_text['error']}")
|
|
126
|
+
|
|
127
|
+
except ValueError as ve:
|
|
128
|
+
ASCIIColors.error(f"Initialization Error: {ve}")
|
|
129
|
+
ASCIIColors.info("Ensure a LoLLMs server is running or configure another binding.")
|
|
130
|
+
except ConnectionRefusedError:
|
|
131
|
+
ASCIIColors.error("Connection refused. Is the LoLLMs server running at http://localhost:9642?")
|
|
132
|
+
except Exception as e:
|
|
133
|
+
ASCIIColors.error(f"An unexpected error occurred: {e}")
|
|
134
|
+
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
### Generating from Message Lists (`generate_from_messages`)
|
|
138
|
+
|
|
139
|
+
For more complex conversational interactions, you can provide the LLM with a list of messages, similar to the OpenAI Chat Completion API. This allows you to define roles (system, user, assistant) and build multi-turn conversations programmatically.
|
|
140
|
+
|
|
141
|
+
```python
|
|
142
|
+
from lollms_client import LollmsClient, MSG_TYPE
|
|
143
|
+
from ascii_colors import ASCIIColors
|
|
144
|
+
import os
|
|
145
|
+
|
|
146
|
+
def streaming_callback_for_messages(chunk: str, msg_type: MSG_TYPE, params=None, metadata=None) -> bool:
|
|
147
|
+
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
148
|
+
print(chunk, end="", flush=True)
|
|
149
|
+
return True
|
|
150
|
+
|
|
151
|
+
try:
|
|
152
|
+
# Example for an Ollama binding
|
|
153
|
+
# Ensure you have Ollama installed and model 'llama3' pulled (e.g., ollama pull llama3)
|
|
154
|
+
lc = LollmsClient(
|
|
155
|
+
llm_binding_name="ollama",
|
|
156
|
+
llm_binding_config={
|
|
157
|
+
"model_name": "llama3",
|
|
158
|
+
"host_address": "http://localhost:11434" # Default Ollama address
|
|
159
|
+
}
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
# Define the conversation history as a list of messages
|
|
163
|
+
messages = [
|
|
164
|
+
{"role": "system", "content": "You are a helpful assistant that specializes in programming."},
|
|
165
|
+
{"role": "user", "content": "Hello, what's your name?"},
|
|
166
|
+
{"role": "assistant", "content": "I am an AI assistant created by Google."},
|
|
167
|
+
{"role": "user", "content": "Can you explain recursion in Python?"}
|
|
168
|
+
]
|
|
169
|
+
|
|
170
|
+
ASCIIColors.yellow("\nGenerating response from messages:")
|
|
171
|
+
response_text = lc.generate_from_messages(
|
|
172
|
+
messages=messages,
|
|
173
|
+
n_predict=200,
|
|
174
|
+
stream=True,
|
|
175
|
+
streaming_callback=streaming_callback_for_messages
|
|
176
|
+
)
|
|
177
|
+
print("\n--- End of Message Stream ---")
|
|
178
|
+
ASCIIColors.cyan(f"\nFull collected response: {response_text[:150]}...")
|
|
179
|
+
|
|
180
|
+
except Exception as e:
|
|
181
|
+
ASCIIColors.error(f"Error during message generation: {e}")
|
|
182
|
+
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
### Advanced Structured Content Generation (`generate_structured_content`)
|
|
186
|
+
|
|
187
|
+
The `generate_structured_content` method is a powerful utility for forcing an LLM's output into a specific JSON format. It's ideal for extracting information, getting consistent tool parameters, or any task requiring reliable, machine-readable output.
|
|
188
|
+
|
|
189
|
+
```python
|
|
190
|
+
from lollms_client import LollmsClient
|
|
191
|
+
from ascii_colors import ASCIIColors
|
|
192
|
+
import json
|
|
193
|
+
import os
|
|
194
|
+
|
|
195
|
+
try:
|
|
196
|
+
# Using Ollama as an example binding
|
|
197
|
+
lc = LollmsClient(llm_binding_name="ollama", llm_binding_config={"model_name": "llama3"})
|
|
198
|
+
|
|
199
|
+
text_block = "John Doe is a 34-year-old software engineer from New York. He loves hiking and Python programming."
|
|
200
|
+
|
|
201
|
+
# Define the exact JSON structure you want
|
|
202
|
+
output_template = {
|
|
203
|
+
"full_name": "string",
|
|
204
|
+
"age": "integer",
|
|
205
|
+
"profession": "string",
|
|
206
|
+
"city": "string",
|
|
207
|
+
"hobbies": ["list", "of", "strings"] # Example of a list in schema
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
ASCIIColors.yellow(f"\nExtracting structured data from: '{text_block}'")
|
|
211
|
+
ASCIIColors.yellow(f"Using schema: {json.dumps(output_template)}")
|
|
212
|
+
|
|
213
|
+
# Generate the structured data
|
|
214
|
+
extracted_data = lc.generate_structured_content(
|
|
215
|
+
prompt=f"Extract the relevant information from the following text:\n\n{text_block}",
|
|
216
|
+
schema=output_template, # Note: parameter is 'schema'
|
|
217
|
+
temperature=0.0 # Use low temperature for deterministic structured output
|
|
218
|
+
)
|
|
219
|
+
|
|
220
|
+
if extracted_data:
|
|
221
|
+
ASCIIColors.green("\nExtracted Data (JSON):")
|
|
222
|
+
print(json.dumps(extracted_data, indent=2))
|
|
223
|
+
else:
|
|
224
|
+
ASCIIColors.error("\nFailed to extract structured data.")
|
|
225
|
+
|
|
226
|
+
except Exception as e:
|
|
227
|
+
ASCIIColors.error(f"An error occurred during structured content generation: {e}")
|
|
228
|
+
```
|
|
229
|
+
|
|
230
|
+
## Advanced Discussion Management
|
|
231
|
+
|
|
232
|
+
The `LollmsDiscussion` class is a core component for managing conversational state, including message history, long-term memory, and various context zones.
|
|
233
|
+
|
|
234
|
+
### Basic Chat with `LollmsDiscussion`
|
|
235
|
+
|
|
236
|
+
For general conversational agents that need to maintain context across turns, `LollmsDiscussion` simplifies the process. It automatically handles message formatting, history management, and context window limitations.
|
|
237
|
+
|
|
238
|
+
```python
|
|
239
|
+
from lollms_client import LollmsClient, LollmsDiscussion, MSG_TYPE, LollmsDataManager
|
|
240
|
+
from ascii_colors import ASCIIColors
|
|
241
|
+
import os
|
|
242
|
+
import tempfile
|
|
243
|
+
|
|
244
|
+
# Initialize LollmsClient
|
|
245
|
+
try:
|
|
246
|
+
lc = LollmsClient(
|
|
247
|
+
llm_binding_name="ollama",
|
|
248
|
+
llm_binding_config={
|
|
249
|
+
"model_name": "llama3",
|
|
250
|
+
"host_address": "http://localhost:11434"
|
|
251
|
+
}
|
|
252
|
+
)
|
|
253
|
+
except Exception as e:
|
|
254
|
+
ASCIIColors.error(f"Failed to initialize LollmsClient for discussion: {e}")
|
|
255
|
+
exit()
|
|
256
|
+
|
|
257
|
+
# Create a new discussion. For persistent discussions, pass a db_manager.
|
|
258
|
+
# Using a temporary directory for the database for this example's simplicity
|
|
259
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
260
|
+
db_path = Path(tmpdir) / "discussion_db.sqlite"
|
|
261
|
+
db_manager = LollmsDataManager(f"sqlite:///{db_path}")
|
|
262
|
+
|
|
263
|
+
discussion_id = "basic_chat_example"
|
|
264
|
+
discussion = db_manager.get_discussion(lc, discussion_id)
|
|
265
|
+
if not discussion:
|
|
266
|
+
ASCIIColors.yellow(f"\nCreating new discussion '{discussion_id}'...")
|
|
267
|
+
discussion = LollmsDiscussion.create_new(
|
|
268
|
+
lollms_client=lc,
|
|
269
|
+
db_manager=db_manager,
|
|
270
|
+
id=discussion_id,
|
|
271
|
+
autosave=True # Important for persistence
|
|
272
|
+
)
|
|
273
|
+
discussion.system_prompt = "You are a friendly and helpful AI."
|
|
274
|
+
discussion.commit()
|
|
275
|
+
else:
|
|
276
|
+
ASCIIColors.green(f"\nLoaded existing discussion '{discussion_id}'.")
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
# Define a simple callback for streaming
|
|
280
|
+
def chat_callback(chunk: str, msg_type: MSG_TYPE, **kwargs) -> bool:
|
|
281
|
+
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
282
|
+
print(chunk, end="", flush=True)
|
|
283
|
+
return True
|
|
284
|
+
|
|
285
|
+
try:
|
|
286
|
+
ASCIIColors.cyan("> User: Hello, how are you today?")
|
|
287
|
+
response = discussion.chat(
|
|
288
|
+
user_message="Hello, how are you today?",
|
|
289
|
+
streaming_callback=chat_callback
|
|
290
|
+
)
|
|
291
|
+
print("\n") # Newline after stream finishes
|
|
292
|
+
|
|
293
|
+
ai_message = response['ai_message']
|
|
294
|
+
user_message = response['user_message']
|
|
295
|
+
|
|
296
|
+
ASCIIColors.green(f"< Assistant (Full): {ai_message.content[:100]}...")
|
|
297
|
+
|
|
298
|
+
# Now, continue the conversation
|
|
299
|
+
ASCIIColors.cyan("\n> User: Can you recommend a good book?")
|
|
300
|
+
response = discussion.chat(
|
|
301
|
+
user_message="Can you recommend a good book?",
|
|
302
|
+
streaming_callback=chat_callback
|
|
303
|
+
)
|
|
304
|
+
print("\n")
|
|
305
|
+
|
|
306
|
+
# You can inspect the full message history
|
|
307
|
+
ASCIIColors.magenta("\n--- Discussion History (last 3 messages) ---")
|
|
308
|
+
for msg in discussion.get_messages()[-3:]:
|
|
309
|
+
print(f"[{msg.sender.capitalize()}]: {msg.content[:50]}...")
|
|
310
|
+
|
|
311
|
+
except Exception as e:
|
|
312
|
+
ASCIIColors.error(f"An error occurred during discussion chat: {e}")
|
|
313
|
+
```
|
|
314
|
+
|
|
315
|
+
### Building Stateful Agents with Memory and Data Zones
|
|
316
|
+
|
|
317
|
+
The `LollmsDiscussion` class provides a sophisticated system for creating stateful agents that can remember information across conversations. This is achieved through a layered system of "context zones" that are automatically combined into the AI's system prompt.
|
|
318
|
+
|
|
319
|
+
#### Understanding the Context Zones
|
|
320
|
+
|
|
321
|
+
The AI's context is more than just chat history. It's built from several distinct components, each with a specific purpose:
|
|
322
|
+
|
|
323
|
+
* **`system_prompt`**: The foundational layer defining the AI's core identity, persona, and primary instructions.
|
|
324
|
+
* **`memory`**: The AI's long-term, persistent memory. It stores key facts about the user or topics, built up over time using the `memorize()` method.
|
|
325
|
+
* **`user_data_zone`**: Holds session-specific information about the user's current state or goals (e.g., "User is currently working on 'file.py'").
|
|
326
|
+
* **`discussion_data_zone`**: Contains state or meta-information about the current conversational task (e.g., "Step 1 of the plan is complete").
|
|
327
|
+
* **`personality_data_zone`**: A knowledge base or set of rules automatically injected from a `LollmsPersonality`'s `data_source`.
|
|
328
|
+
* **`pruning_summary`**: An automatic, AI-generated summary of the oldest messages in a very long chat, used to conserve tokens without losing the gist of the early conversation.
|
|
329
|
+
|
|
330
|
+
The `get_context_status()` method is your window into this system, showing you exactly how these zones are combined and how many tokens they consume.
|
|
331
|
+
|
|
332
|
+
Let's see this in action with a "Personal Assistant" agent that learns about the user over time.
|
|
333
|
+
|
|
334
|
+
```python
|
|
335
|
+
from lollms_client import LollmsClient, LollmsDataManager, LollmsDiscussion, MSG_TYPE
|
|
336
|
+
from ascii_colors import ASCIIColors
|
|
337
|
+
import json
|
|
338
|
+
import tempfile
|
|
339
|
+
import os
|
|
340
|
+
|
|
341
|
+
# --- 1. Setup a persistent database for our discussion ---
|
|
342
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
343
|
+
db_path = Path(tmpdir) / "my_assistant.db"
|
|
344
|
+
db_manager = LollmsDataManager(f"sqlite:///{db_path}")
|
|
345
|
+
|
|
346
|
+
try:
|
|
347
|
+
lc = LollmsClient(llm_binding_name="ollama", llm_binding_config={"model_name": "llama3"})
|
|
348
|
+
except Exception as e:
|
|
349
|
+
ASCIIColors.error(f"Failed to initialize LollmsClient for stateful agent: {e}")
|
|
350
|
+
exit()
|
|
351
|
+
|
|
352
|
+
# Try to load an existing discussion or create a new one
|
|
353
|
+
discussion_id = "user_assistant_chat_1"
|
|
354
|
+
discussion = db_manager.get_discussion(lc, discussion_id)
|
|
355
|
+
if not discussion:
|
|
356
|
+
ASCIIColors.yellow("Creating a new discussion for stateful agent...")
|
|
357
|
+
discussion = LollmsDiscussion.create_new(
|
|
358
|
+
lollms_client=lc,
|
|
359
|
+
db_manager=db_manager,
|
|
360
|
+
id=discussion_id,
|
|
361
|
+
autosave=True # Important for persistence
|
|
362
|
+
)
|
|
363
|
+
# Let's preset some data in different zones
|
|
364
|
+
discussion.system_prompt = "You are a helpful Personal Assistant."
|
|
365
|
+
discussion.user_data_zone = "User's Name: Alex\nUser's Goal: Learn about AI development."
|
|
366
|
+
discussion.commit()
|
|
367
|
+
else:
|
|
368
|
+
ASCIIColors.green("Loaded existing discussion for stateful agent.")
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
def run_chat_turn(prompt: str):
|
|
372
|
+
"""Helper function to run a single chat turn and print details."""
|
|
373
|
+
ASCIIColors.cyan(f"\n> User: {prompt}")
|
|
374
|
+
|
|
375
|
+
# --- A. Check context status BEFORE the turn using get_context_status() ---
|
|
376
|
+
ASCIIColors.magenta("\n--- Context Status (Before Generation) ---")
|
|
377
|
+
status = discussion.get_context_status()
|
|
378
|
+
print(f"Max Tokens: {status.get('max_tokens')}, Current Tokens: {status.get('current_tokens')}")
|
|
379
|
+
|
|
380
|
+
# Print the system context details
|
|
381
|
+
if 'system_context' in status['zones']:
|
|
382
|
+
sys_ctx = status['zones']['system_context']
|
|
383
|
+
print(f" - System Context Tokens: {sys_ctx['tokens']}")
|
|
384
|
+
# The 'breakdown' shows the individual zones that were combined
|
|
385
|
+
for name, content in sys_ctx.get('breakdown', {}).items():
|
|
386
|
+
# For brevity, show only first line of content
|
|
387
|
+
print(f" -> Contains '{name}': {content.split(os.linesep)[0]}...")
|
|
388
|
+
|
|
389
|
+
# Print the message history details
|
|
390
|
+
if 'message_history' in status['zones']:
|
|
391
|
+
msg_hist = status['zones']['message_history']
|
|
392
|
+
print(f" - Message History Tokens: {msg_hist['tokens']} ({msg_hist['message_count']} messages)")
|
|
393
|
+
|
|
394
|
+
print("------------------------------------------")
|
|
395
|
+
|
|
396
|
+
# --- B. Run the chat ---
|
|
397
|
+
ASCIIColors.green("\n< Assistant:")
|
|
398
|
+
response = discussion.chat(
|
|
399
|
+
user_message=prompt,
|
|
400
|
+
streaming_callback=lambda chunk, type, **k: print(chunk, end="", flush=True) if type==MSG_TYPE.MSG_TYPE_CHUNK else None
|
|
401
|
+
)
|
|
402
|
+
print() # Newline after stream
|
|
403
|
+
|
|
404
|
+
# --- C. Trigger memorization to update the 'memory' zone ---
|
|
405
|
+
ASCIIColors.yellow("\nTriggering memorization process...")
|
|
406
|
+
discussion.memorize()
|
|
407
|
+
discussion.commit() # Save the new memory to the DB
|
|
408
|
+
ASCIIColors.yellow("Memorization complete.")
|
|
409
|
+
|
|
410
|
+
# --- Run a few turns ---
|
|
411
|
+
run_chat_turn("Hi there! Can you recommend a good Python library for building web APIs?")
|
|
412
|
+
run_chat_turn("That sounds great. By the way, my favorite programming language is Rust, I find its safety features amazing.")
|
|
413
|
+
run_chat_turn("What was my favorite programming language again?")
|
|
414
|
+
|
|
415
|
+
# --- Final Inspection of Memory ---
|
|
416
|
+
ASCIIColors.magenta("\n--- Final Context Status ---")
|
|
417
|
+
status = discussion.get_context_status()
|
|
418
|
+
print(f"Max Tokens: {status.get('max_tokens')}, Current Tokens: {status.get('current_tokens')}")
|
|
419
|
+
if 'system_context' in status['zones']:
|
|
420
|
+
sys_ctx = status['zones']['system_context']
|
|
421
|
+
print(f" - System Context Tokens: {sys_ctx['tokens']}")
|
|
422
|
+
for name, content in sys_ctx.get('breakdown', {}).items():
|
|
423
|
+
# Print the full content of the memory zone to verify it was updated
|
|
424
|
+
if name == 'memory':
|
|
425
|
+
ASCIIColors.yellow(f" -> Full '{name}' content:\n{content}")
|
|
426
|
+
else:
|
|
427
|
+
print(f" -> Contains '{name}': {content.split(os.linesep)[0]}...")
|
|
428
|
+
print("------------------------------------------")
|
|
429
|
+
|
|
430
|
+
```
|
|
431
|
+
|
|
432
|
+
#### How it Works:
|
|
433
|
+
|
|
434
|
+
1. **Persistence & Initialization:** The `LollmsDataManager` saves and loads the discussion. We initialize the `system_prompt` and `user_data_zone` to provide initial context.
|
|
435
|
+
2. **`get_context_status()`:** Before each generation, we call this method. The output shows a `system_context` block with a token count for all combined zones and a `breakdown` field that lets us see the content of each individual zone that contributed to it.
|
|
436
|
+
3. **`memorize()`:** After the user mentions their favorite language, `memorize()` is called. The LLM analyzes the last turn, identifies this new, important fact, and appends it to the `discussion.memory` zone.
|
|
437
|
+
4. **Recall:** In the final turn, when asked to recall the favorite language, the AI has access to the updated `memory` content within its system context and can correctly answer "Rust". This demonstrates true long-term, stateful memory.
|
|
438
|
+
|
|
439
|
+
### Managing Multimodal Context: Activating and Deactivating Images
|
|
440
|
+
|
|
441
|
+
When working with multimodal models, you can now control which images in a message are active and sent to the model. This is useful for focusing the AI's attention, saving tokens on expensive vision models, or allowing a user to correct which images are relevant.
|
|
442
|
+
|
|
443
|
+
This is managed at the `LollmsMessage` level using the `toggle_image_activation()` method.
|
|
444
|
+
|
|
445
|
+
```python
|
|
446
|
+
from lollms_client import LollmsClient, LollmsDiscussion, LollmsDataManager, MSG_TYPE
|
|
447
|
+
from ascii_colors import ASCIIColors
|
|
448
|
+
import base64
|
|
449
|
+
from pathlib import Path
|
|
450
|
+
import os
|
|
451
|
+
import tempfile
|
|
452
|
+
|
|
453
|
+
# Helper to create a dummy image b64 string
|
|
454
|
+
def create_dummy_image(text, output_dir):
|
|
455
|
+
try:
|
|
456
|
+
from PIL import Image, ImageDraw, ImageFont
|
|
457
|
+
except ImportError:
|
|
458
|
+
ASCIIColors.warning("Pillow not installed. Skipping image example.")
|
|
459
|
+
return None
|
|
460
|
+
|
|
461
|
+
# Try to find a common font, otherwise use default
|
|
462
|
+
font_path = Path("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf") # Common Linux path
|
|
463
|
+
if not font_path.exists():
|
|
464
|
+
font_path = Path("/Library/Fonts/Arial.ttf") # Common macOS path
|
|
465
|
+
if not font_path.exists():
|
|
466
|
+
font_path = Path("C:/Windows/Fonts/arial.ttf") # Common Windows path
|
|
467
|
+
|
|
468
|
+
try:
|
|
469
|
+
font = ImageFont.truetype(str(font_path), 15)
|
|
470
|
+
except (IOError, OSError):
|
|
471
|
+
font = ImageFont.load_default() # Fallback to default if font not found
|
|
472
|
+
|
|
473
|
+
img = Image.new('RGB', (200, 50), color = (73, 109, 137))
|
|
474
|
+
d = ImageDraw.Draw(img)
|
|
475
|
+
d.text((10,10), text, fill=(255,255,0), font=font)
|
|
476
|
+
|
|
477
|
+
temp_file = Path(output_dir) / f"temp_img_{text.replace(' ', '_')}.png"
|
|
478
|
+
img.save(temp_file, "PNG")
|
|
479
|
+
b64 = base64.b64encode(temp_file.read_bytes()).decode('utf-8')
|
|
480
|
+
temp_file.unlink() # Clean up temporary file
|
|
481
|
+
return b64
|
|
482
|
+
|
|
483
|
+
# --- 1. Setup ---
|
|
484
|
+
try:
|
|
485
|
+
# Llava is a good multi-modal model for Ollama
|
|
486
|
+
# Ensure Ollama is running and 'llava' model is pulled (e.g., ollama pull llava)
|
|
487
|
+
lc = LollmsClient(llm_binding_name="ollama", llm_binding_config={"model_name": "llava"})
|
|
488
|
+
except Exception as e:
|
|
489
|
+
ASCIIColors.warning(f"Failed to initialize LollmsClient for image example: {e}")
|
|
490
|
+
ASCIIColors.warning("Skipping image activation example. Ensure Ollama is running and 'llava' model is pulled.")
|
|
491
|
+
exit()
|
|
492
|
+
|
|
493
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
494
|
+
db_path = Path(tmpdir) / "image_discussion_db.sqlite"
|
|
495
|
+
db_manager = LollmsDataManager(f"sqlite:///{db_path}")
|
|
496
|
+
discussion = LollmsDiscussion.create_new(lollms_client=lc, db_manager=db_manager)
|
|
497
|
+
|
|
498
|
+
# --- 2. Add a message with multiple images ---
|
|
499
|
+
# Ensure Pillow is installed: pip install Pillow
|
|
500
|
+
img1_b64 = create_dummy_image("Image 1: Apple", tmpdir)
|
|
501
|
+
img2_b64 = create_dummy_image("Image 2: Cat", tmpdir)
|
|
502
|
+
img3_b64 = create_dummy_image("Image 3: Dog", tmpdir)
|
|
503
|
+
|
|
504
|
+
if not img1_b64 or not img2_b64 or not img3_b64:
|
|
505
|
+
ASCIIColors.warning("Skipping image activation example due to image creation failure (likely missing Pillow or font).")
|
|
506
|
+
exit()
|
|
507
|
+
|
|
508
|
+
discussion.add_message(
|
|
509
|
+
sender="user",
|
|
510
|
+
content="What is in the second image?",
|
|
511
|
+
images=[img1_b64, img2_b64, img3_b64]
|
|
512
|
+
)
|
|
513
|
+
user_message = discussion.get_messages()[-1]
|
|
514
|
+
|
|
515
|
+
# --- 3. Check the initial state ---
|
|
516
|
+
ASCIIColors.magenta("--- Initial State (All 3 Images Active) ---")
|
|
517
|
+
status_before = discussion.get_context_status()
|
|
518
|
+
# The 'content' field for message history will indicate the number of images if present
|
|
519
|
+
print(f"Message History Text (showing active images):\n{status_before['zones']['message_history']['content']}")
|
|
520
|
+
|
|
521
|
+
# --- 4. Deactivate irrelevant images ---
|
|
522
|
+
ASCIIColors.magenta("\n--- Deactivating images 1 and 3 ---")
|
|
523
|
+
user_message.toggle_image_activation(index=0, active=False) # Deactivate first image (Apple)
|
|
524
|
+
user_message.toggle_image_activation(index=2, active=False) # Deactivate third image (Dog)
|
|
525
|
+
discussion.commit() # Save changes to the message
|
|
526
|
+
|
|
527
|
+
# --- 5. Check the new state ---
|
|
528
|
+
ASCIIColors.magenta("\n--- New State (Only Image 2 is Active) ---")
|
|
529
|
+
status_after = discussion.get_context_status()
|
|
530
|
+
print(f"Message History Text (showing active images):\n{status_after['zones']['message_history']['content']}")
|
|
531
|
+
|
|
532
|
+
ASCIIColors.green("\nNotice the message now says '(1 image(s) attached)' instead of 3, and only the active image will be sent to the multimodal LLM.")
|
|
533
|
+
ASCIIColors.green("To confirm, let's ask the model what it sees:")
|
|
534
|
+
|
|
535
|
+
# This will send only the activated image
|
|
536
|
+
response = discussion.chat(
|
|
537
|
+
user_message="What do you see in the image(s) attached to my last message?",
|
|
538
|
+
# Use a streaming callback to see the response
|
|
539
|
+
streaming_callback=lambda chunk, type, **k: print(chunk, end="", flush=True) if type==MSG_TYPE.MSG_TYPE_CHUNK else None
|
|
540
|
+
)
|
|
541
|
+
print("\n")
|
|
542
|
+
ASCIIColors.green(f"Assistant's response after toggling images: {response['ai_message'].content}")
|
|
543
|
+
|
|
544
|
+
```
|
|
545
|
+
**Note:** The image generation helper in the example requires `Pillow` (`pip install Pillow`). It also attempts to find common system fonts; if issues persist, you might need to install `matplotlib` for better font handling or provide a specific font path.
|
|
546
|
+
|
|
547
|
+
### Putting It All Together: An Advanced Agentic Example
|
|
548
|
+
|
|
549
|
+
Let's create a **Python Coder Agent**. This agent will use a set of coding rules from a local file as its knowledge base and will be equipped with a tool to execute the code it writes. This demonstrates the synergy between `LollmsPersonality` (with `data_source` and `active_mcps`), `LollmsDiscussion`, and the MCP system.
|
|
550
|
+
|
|
551
|
+
#### Step 1: Create the Knowledge Base (`coding_rules.txt`)
|
|
552
|
+
|
|
553
|
+
Create a simple text file with the rules our agent must follow.
|
|
554
|
+
|
|
555
|
+
```text
|
|
556
|
+
# File: coding_rules.txt
|
|
557
|
+
|
|
558
|
+
1. All Python functions must include a Google-style docstring.
|
|
559
|
+
2. Use type hints for all function parameters and return values.
|
|
560
|
+
3. The main execution block should be protected by `if __name__ == "__main__":`.
|
|
561
|
+
4. After defining a function, add a simple example of its usage inside the main block.
|
|
562
|
+
5. Print the output of the example usage to the console.
|
|
563
|
+
```
|
|
564
|
+
|
|
565
|
+
#### Step 2: The Main Script (`agent_example.py`)
|
|
566
|
+
|
|
567
|
+
This script will define the personality, initialize the client, and run the agent.
|
|
568
|
+
|
|
569
|
+
```python
|
|
570
|
+
from pathlib import Path
|
|
571
|
+
from lollms_client import LollmsClient, LollmsPersonality, LollmsDiscussion, MSG_TYPE
|
|
572
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
573
|
+
import json
|
|
574
|
+
import tempfile
|
|
575
|
+
import os
|
|
576
|
+
|
|
577
|
+
# A detailed callback to visualize the agent's process
|
|
578
|
+
def agent_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, **kwargs) -> bool:
|
|
579
|
+
if not params: params = {}
|
|
580
|
+
|
|
581
|
+
if msg_type == MSG_TYPE.MSG_TYPE_STEP:
|
|
582
|
+
ASCIIColors.yellow(f"\n>> Agent Step: {chunk}")
|
|
583
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
|
|
584
|
+
ASCIIColors.yellow(f"\n>> Agent Step Start: {chunk}")
|
|
585
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
|
|
586
|
+
result = params.get('result', '')
|
|
587
|
+
# Only print a snippet of result to avoid overwhelming console for large outputs
|
|
588
|
+
if isinstance(result, dict):
|
|
589
|
+
result_str = json.dumps(result)[:150] + ("..." if len(json.dumps(result)) > 150 else "")
|
|
590
|
+
else:
|
|
591
|
+
result_str = str(result)[:150] + ("..." if len(str(result)) > 150 else "")
|
|
592
|
+
ASCIIColors.green(f"<< Agent Step End: {chunk} -> Result: {result_str}")
|
|
593
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT:
|
|
594
|
+
ASCIIColors.magenta(f"🤔 Agent Thought: {chunk}")
|
|
595
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_TOOL_CALL:
|
|
596
|
+
tool_name = params.get('name', 'unknown_tool')
|
|
597
|
+
tool_params = params.get('parameters', {})
|
|
598
|
+
ASCIIColors.blue(f"🛠️ Agent Action: Called '{tool_name}' with {tool_params}")
|
|
599
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_TOOL_OUTPUT:
|
|
600
|
+
ASCIIColors.cyan(f"👀 Agent Observation (Tool Output): {params.get('result', 'No result')}")
|
|
601
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
602
|
+
print(chunk, end="", flush=True) # Final answer stream
|
|
603
|
+
return True
|
|
604
|
+
|
|
605
|
+
# Create a temporary directory for the discussion DB and coding rules file
|
|
606
|
+
with tempfile.TemporaryDirectory() as tmpdir:
|
|
607
|
+
db_path = Path(tmpdir) / "agent_discussion.db"
|
|
608
|
+
|
|
609
|
+
# Create the coding rules file
|
|
610
|
+
rules_path = Path(tmpdir) / "coding_rules.txt"
|
|
611
|
+
rules_content = """
|
|
612
|
+
1. All Python functions must include a Google-style docstring.
|
|
613
|
+
2. Use type hints for all function parameters and return values.
|
|
614
|
+
3. The main execution block should be protected by `if __name__ == "__main__":`.
|
|
615
|
+
4. After defining a function, add a simple example of its usage inside the main block.
|
|
616
|
+
5. Print the output of the example usage to the console.
|
|
617
|
+
"""
|
|
618
|
+
rules_path.write_text(rules_content.strip())
|
|
619
|
+
ASCIIColors.yellow(f"Created temporary coding rules file at: {rules_path}")
|
|
620
|
+
|
|
621
|
+
try:
|
|
622
|
+
# --- 1. Load the knowledge base from the file ---
|
|
623
|
+
coding_rules = rules_path.read_text()
|
|
624
|
+
|
|
625
|
+
# --- 2. Define the Coder Agent Personality ---
|
|
626
|
+
coder_personality = LollmsPersonality(
|
|
627
|
+
name="Python Coder Agent",
|
|
628
|
+
author="lollms-client",
|
|
629
|
+
category="Coding",
|
|
630
|
+
description="An agent that writes and executes Python code according to specific rules.",
|
|
631
|
+
system_prompt=(
|
|
632
|
+
"You are an expert Python programmer. Your task is to write clean, executable Python code based on the user's request. "
|
|
633
|
+
"You MUST strictly follow all rules provided in the 'Personality Static Data' section. "
|
|
634
|
+
"First, think about the plan. Then, use the `python_code_interpreter` tool to write and execute the code. "
|
|
635
|
+
"Finally, present the code and its output to the user."
|
|
636
|
+
),
|
|
637
|
+
# A) Attach the static knowledge base
|
|
638
|
+
data_source=coding_rules,
|
|
639
|
+
# B) Equip the agent with a code execution tool
|
|
640
|
+
active_mcps=["python_code_interpreter"]
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
# --- 3. Initialize the Client and Discussion ---
|
|
644
|
+
# A code-specialized model is recommended (e.g., codellama, deepseek-coder)
|
|
645
|
+
# Ensure Ollama is running and 'codellama' model is pulled (e.g., ollama pull codellama)
|
|
646
|
+
lc = LollmsClient(
|
|
647
|
+
llm_binding_name="ollama",
|
|
648
|
+
llm_binding_config={
|
|
649
|
+
"model_name": "codellama",
|
|
650
|
+
"host_address": "http://localhost:11434"
|
|
651
|
+
},
|
|
652
|
+
mcp_binding_name="local_mcp" # Enable the local tool execution engine
|
|
653
|
+
)
|
|
654
|
+
# For agentic workflows, it's often good to have a persistent discussion
|
|
655
|
+
db_manager = LollmsDataManager(f"sqlite:///{db_path}")
|
|
656
|
+
discussion = LollmsDiscussion.create_new(lollms_client=lc, db_manager=db_manager)
|
|
657
|
+
|
|
658
|
+
# --- 4. The User's Request ---
|
|
659
|
+
user_prompt = "Write a Python function that takes two numbers and returns their sum."
|
|
660
|
+
|
|
661
|
+
ASCIIColors.yellow(f"User Prompt: {user_prompt}")
|
|
662
|
+
print("\n" + "="*50 + "\nAgent is now running...\n" + "="*50)
|
|
663
|
+
|
|
664
|
+
# --- 5. Run the Agentic Chat Turn ---
|
|
665
|
+
response = discussion.chat(
|
|
666
|
+
user_message=user_prompt,
|
|
667
|
+
personality=coder_personality,
|
|
668
|
+
streaming_callback=agent_callback,
|
|
669
|
+
max_llm_iterations=5, # Limit iterations for faster demo
|
|
670
|
+
tool_call_decision_temperature=0.0 # Make decision more deterministic
|
|
671
|
+
)
|
|
672
|
+
|
|
673
|
+
print("\n\n" + "="*50 + "\nAgent finished.\n" + "="*50)
|
|
674
|
+
|
|
675
|
+
# --- 6. Inspect the results ---
|
|
676
|
+
ai_message = response['ai_message']
|
|
677
|
+
ASCIIColors.green("\n--- Final Answer from Agent ---")
|
|
678
|
+
print(ai_message.content)
|
|
679
|
+
|
|
680
|
+
ASCIIColors.magenta("\n--- Tool Calls Made (from metadata) ---")
|
|
681
|
+
if "tool_calls" in ai_message.metadata:
|
|
682
|
+
print(json.dumps(ai_message.metadata["tool_calls"], indent=2))
|
|
683
|
+
else:
|
|
684
|
+
print("No tool calls recorded in message metadata.")
|
|
685
|
+
|
|
686
|
+
except Exception as e:
|
|
687
|
+
ASCIIColors.error(f"An error occurred during agent execution: {e}")
|
|
688
|
+
ASCIIColors.warning("Please ensure Ollama is running, 'codellama' model is pulled, and 'local_mcp' binding is available.")
|
|
689
|
+
trace_exception(e) # Provide detailed traceback
|
|
690
|
+
```
|
|
691
|
+
|
|
692
|
+
#### Step 3: What Happens Under the Hood
|
|
693
|
+
|
|
694
|
+
When you run `agent_example.py`, a sophisticated process unfolds:
|
|
695
|
+
|
|
696
|
+
1. **Initialization:** The `LollmsDiscussion.chat()` method is called with the `coder_personality`.
|
|
697
|
+
2. **Knowledge Injection:** The `chat` method sees that `personality.data_source` is a string. It automatically takes the content of `coding_rules.txt` and injects it into the discussion's data zones.
|
|
698
|
+
3. **Tool Activation:** The method also sees `personality.active_mcps`. It enables the `python_code_interpreter` tool for this turn.
|
|
699
|
+
4. **Context Assembly:** The `LollmsClient` assembles a rich prompt for the LLM that includes:
|
|
700
|
+
* The personality's `system_prompt`.
|
|
701
|
+
* The content of `coding_rules.txt` (from the data zones).
|
|
702
|
+
* The list of available tools (including `python_code_interpreter`).
|
|
703
|
+
* The user's request ("Write a function...").
|
|
704
|
+
5. **Reason and Act:** The LLM, now fully briefed, reasons that it needs to use the `python_code_interpreter` tool. It formulate the Python code *according to the rules it was given*.
|
|
705
|
+
6. **Tool Execution:** The `local_mcp` binding receives the code and executes it in a secure local environment. It captures any output (`stdout`, `stderr`) and results.
|
|
706
|
+
7. **Observation:** The execution results are sent back to the LLM as an "observation."
|
|
707
|
+
8. **Final Synthesis:** The LLM now has the user's request, the rules, the code it wrote, and the code's output. It synthesizes all of this into a final, comprehensive answer for the user.
|
|
708
|
+
|
|
709
|
+
This example showcases how `lollms-client` allows you to build powerful, knowledgeable, and capable agents by simply composing personalities with data and tools.
|
|
710
|
+
|
|
711
|
+
## Using LoLLMs Client with Different Bindings
|
|
712
|
+
|
|
713
|
+
`lollms-client` supports a wide range of LLM backends through its binding system. This section provides practical examples of how to initialize `LollmsClient` for each of the major supported bindings.
|
|
714
|
+
|
|
715
|
+
### A New Configuration Model
|
|
716
|
+
|
|
717
|
+
Configuration for all bindings has been unified. Instead of passing parameters like `host_address` or `model_name` directly to the `LollmsClient` constructor, you now pass them inside a single dictionary: `llm_binding_config`.
|
|
718
|
+
|
|
719
|
+
This approach provides a clean, consistent, and extensible way to manage settings for any backend. Each binding defines its own set of required and optional parameters (e.g., `host_address`, `model_name`, `service_key`, `n_gpu_layers`).
|
|
720
|
+
|
|
721
|
+
```python
|
|
722
|
+
# General configuration pattern
|
|
723
|
+
from lollms_client import LollmsClient
|
|
724
|
+
# ... other imports as needed
|
|
725
|
+
|
|
726
|
+
# lc = LollmsClient(
|
|
727
|
+
# llm_binding_name="your_binding_name",
|
|
728
|
+
# llm_binding_config={
|
|
729
|
+
# "parameter_1_for_this_binding": "value_1",
|
|
730
|
+
# "parameter_2_for_this_binding": "value_2",
|
|
731
|
+
# # ... and so on
|
|
732
|
+
# }
|
|
733
|
+
# )
|
|
734
|
+
```
|
|
735
|
+
|
|
736
|
+
---
|
|
737
|
+
|
|
738
|
+
### 1. Core and Local Server Bindings
|
|
739
|
+
|
|
740
|
+
These bindings connect to servers running on your local network, including the core LoLLMs server itself.
|
|
741
|
+
|
|
742
|
+
#### **LoLLMs (Default Binding)**
|
|
743
|
+
|
|
744
|
+
This connects to a running LoLLMs service, which acts as a powerful backend providing access to models, personalities, and tools. This is the default and most feature-rich way to use `lollms-client`.
|
|
745
|
+
|
|
746
|
+
**Prerequisites:**
|
|
747
|
+
* A LoLLMs server instance installed and running (e.g., `lollms-webui`).
|
|
748
|
+
* An API key can be generated from the LoLLMs web UI (under User Settings -> Security) if security is enabled.
|
|
749
|
+
|
|
750
|
+
**Usage:**
|
|
751
|
+
|
|
752
|
+
```python
|
|
753
|
+
from lollms_client import LollmsClient
|
|
754
|
+
from ascii_colors import ASCIIColors
|
|
755
|
+
import os
|
|
756
|
+
|
|
757
|
+
try:
|
|
758
|
+
# The default port for a LoLLMs server is 9642 (a nod to The Hitchhiker's Guide to the Galaxy).
|
|
759
|
+
# The API key can also be set via the LOLLMS_API_KEY environment variable.
|
|
760
|
+
config = {
|
|
761
|
+
"host_address": "http://localhost:9642",
|
|
762
|
+
# "service_key": "your_lollms_api_key_here" # Uncomment and replace if security is enabled
|
|
763
|
+
}
|
|
764
|
+
|
|
765
|
+
lc = LollmsClient(
|
|
766
|
+
llm_binding_name="lollms", # This is the default, so specifying it is optional
|
|
767
|
+
llm_binding_config=config
|
|
768
|
+
)
|
|
769
|
+
|
|
770
|
+
response = lc.generate_text("What is the answer to life, the universe, and everything?")
|
|
771
|
+
ASCIIColors.green(f"\nResponse from LoLLMs: {response}")
|
|
772
|
+
|
|
773
|
+
except ConnectionRefusedError:
|
|
774
|
+
ASCIIColors.error("Connection refused. Is the LoLLMs server running at http://localhost:9642?")
|
|
775
|
+
except ValueError as ve:
|
|
776
|
+
ASCIIColors.error(f"Initialization Error: {ve}")
|
|
777
|
+
except Exception as e:
|
|
778
|
+
ASCIIColors.error(f"An unexpected error occurred: {e}")
|
|
779
|
+
```
|
|
780
|
+
|
|
781
|
+
#### **Ollama**
|
|
782
|
+
|
|
783
|
+
The `ollama` binding connects to a running Ollama server instance on your machine or network.
|
|
784
|
+
|
|
785
|
+
**Prerequisites:**
|
|
786
|
+
* [Ollama installed and running](https://ollama.com/).
|
|
787
|
+
* Models pulled, e.g., `ollama pull llama3`.
|
|
788
|
+
|
|
789
|
+
**Usage:**
|
|
790
|
+
|
|
791
|
+
```python
|
|
792
|
+
from lollms_client import LollmsClient
|
|
793
|
+
from ascii_colors import ASCIIColors
|
|
794
|
+
import os
|
|
795
|
+
|
|
796
|
+
try:
|
|
797
|
+
# Configuration for a local Ollama server
|
|
798
|
+
lc = LollmsClient(
|
|
799
|
+
llm_binding_name="ollama",
|
|
800
|
+
llm_binding_config={
|
|
801
|
+
"model_name": "llama3", # Or any other model you have pulled
|
|
802
|
+
"host_address": "http://localhost:11434" # Default Ollama address
|
|
803
|
+
}
|
|
804
|
+
)
|
|
805
|
+
|
|
806
|
+
# Now you can use lc.generate_text(), lc.chat(), etc.
|
|
807
|
+
response = lc.generate_text("Why is the sky blue?")
|
|
808
|
+
ASCIIColors.green(f"\nResponse from Ollama: {response}")
|
|
809
|
+
|
|
810
|
+
except Exception as e:
|
|
811
|
+
ASCIIColors.error(f"Error initializing Ollama binding: {e}")
|
|
812
|
+
ASCIIColors.info("Please ensure Ollama is installed, running, and the specified model is pulled.")
|
|
813
|
+
```
|
|
814
|
+
|
|
815
|
+
#### **PythonLlamaCpp (Local GGUF Models)**
|
|
816
|
+
|
|
817
|
+
The `pythonllamacpp` binding loads and runs GGUF model files directly using the powerful `llama-cpp-python` library. This is ideal for high-performance, local inference on CPU or GPU.
|
|
818
|
+
|
|
819
|
+
**Prerequisites:**
|
|
820
|
+
* A GGUF model file downloaded to your machine.
|
|
821
|
+
* `llama-cpp-python` installed. For GPU support, it must be compiled with the correct flags (e.g., `CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python`).
|
|
822
|
+
|
|
823
|
+
**Usage:**
|
|
824
|
+
|
|
825
|
+
```python
|
|
826
|
+
from lollms_client import LollmsClient
|
|
827
|
+
from ascii_colors import ASCIIColors
|
|
828
|
+
import os
|
|
829
|
+
from pathlib import Path
|
|
830
|
+
|
|
831
|
+
# Path to your GGUF model file
|
|
832
|
+
# IMPORTANT: Replace this with the actual path to your model file
|
|
833
|
+
# Example: MODEL_PATH = Path.home() / "models" / "your_model_name.gguf"
|
|
834
|
+
MODEL_PATH = Path("./path/to/your/model.gguf")
|
|
835
|
+
|
|
836
|
+
# Binding-specific configuration
|
|
837
|
+
config = {
|
|
838
|
+
"model_path": str(MODEL_PATH), # The path to the GGUF file
|
|
839
|
+
"n_gpu_layers": -1, # -1 for all layers to GPU, 0 for CPU
|
|
840
|
+
"n_ctx": 4096, # Context size
|
|
841
|
+
"seed": -1, # -1 for random seed
|
|
842
|
+
"chat_format": "chatml" # Or another format like 'llama-2' or 'mistral'
|
|
843
|
+
}
|
|
844
|
+
|
|
845
|
+
if not MODEL_PATH.exists():
|
|
846
|
+
ASCIIColors.warning(f"Model file not found at: {MODEL_PATH}")
|
|
847
|
+
ASCIIColors.warning("Skipping PythonLlamaCpp example. Please download a GGUF model and update MODEL_PATH.")
|
|
848
|
+
else:
|
|
849
|
+
try:
|
|
850
|
+
lc = LollmsClient(
|
|
851
|
+
llm_binding_name="pythonllamacpp",
|
|
852
|
+
llm_binding_config=config
|
|
853
|
+
)
|
|
854
|
+
|
|
855
|
+
response = lc.generate_text("Write a recipe for a great day.")
|
|
856
|
+
ASCIIColors.green(f"\nResponse from PythonLlamaCpp: {response}")
|
|
857
|
+
|
|
858
|
+
except ImportError:
|
|
859
|
+
ASCIIColors.error("`llama-cpp-python` not installed. Please install it (`pip install llama-cpp-python`) to run this example.")
|
|
860
|
+
except Exception as e:
|
|
861
|
+
ASCIIColors.error(f"Error initializing PythonLlamaCpp binding: {e}")
|
|
862
|
+
ASCIIColors.info("Please ensure the model path is correct and `llama-cpp-python` is correctly installed (with GPU support if desired).")
|
|
863
|
+
|
|
864
|
+
```
|
|
865
|
+
|
|
866
|
+
---
|
|
867
|
+
|
|
868
|
+
### 2. Cloud Service Bindings
|
|
869
|
+
|
|
870
|
+
These bindings connect to hosted LLM APIs from major providers.
|
|
871
|
+
|
|
872
|
+
#### **OpenAI**
|
|
873
|
+
|
|
874
|
+
Connects to the official OpenAI API to use models like GPT-4o, GPT-4, and GPT-3.5.
|
|
875
|
+
|
|
876
|
+
**Prerequisites:**
|
|
877
|
+
* An OpenAI API key (starts with `sk-...`). It's recommended to set this as an environment variable `OPENAI_API_KEY`.
|
|
878
|
+
|
|
879
|
+
**Usage:**
|
|
880
|
+
|
|
881
|
+
```python
|
|
882
|
+
from lollms_client import LollmsClient
|
|
883
|
+
from ascii_colors import ASCIIColors
|
|
884
|
+
import os
|
|
885
|
+
|
|
886
|
+
# Set your API key as an environment variable or directly in the config
|
|
887
|
+
# os.environ["OPENAI_API_KEY"] = "your_openai_api_key_here"
|
|
888
|
+
|
|
889
|
+
try:
|
|
890
|
+
if "OPENAI_API_KEY" not in os.environ and "your_openai_api_key_here" in "your_openai_api_key_here":
|
|
891
|
+
ASCIIColors.warning("OPENAI_API_KEY not set in environment or hardcoded. Skipping OpenAI example.")
|
|
892
|
+
else:
|
|
893
|
+
lc = LollmsClient(
|
|
894
|
+
llm_binding_name="openai",
|
|
895
|
+
llm_binding_config={
|
|
896
|
+
"model_name": "gpt-4o", # Or "gpt-3.5-turbo"
|
|
897
|
+
# "service_key": os.environ.get("OPENAI_API_KEY", "your_openai_api_key_here")
|
|
898
|
+
# ^ service_key is optional if OPENAI_API_KEY env var is set
|
|
899
|
+
}
|
|
900
|
+
)
|
|
901
|
+
|
|
902
|
+
response = lc.generate_text("What is the difference between AI and machine learning?")
|
|
903
|
+
ASCIIColors.green(f"\nResponse from OpenAI: {response}")
|
|
904
|
+
|
|
905
|
+
except Exception as e:
|
|
906
|
+
ASCIIColors.error(f"Error initializing OpenAI binding: {e}")
|
|
907
|
+
ASCIIColors.info("Please ensure your OpenAI API key is correctly set and you have access to the specified model.")
|
|
908
|
+
```
|
|
909
|
+
|
|
910
|
+
#### **Google Gemini**
|
|
911
|
+
|
|
912
|
+
Connects to Google's Gemini family of models via the Google AI Studio API.
|
|
913
|
+
|
|
914
|
+
**Prerequisites:**
|
|
915
|
+
* A Google AI Studio API key. It's recommended to set this as an environment variable `GEMINI_API_KEY`.
|
|
916
|
+
|
|
917
|
+
**Usage:**
|
|
918
|
+
|
|
919
|
+
```python
|
|
920
|
+
from lollms_client import LollmsClient
|
|
921
|
+
from ascii_colors import ASCIIColors
|
|
922
|
+
import os
|
|
923
|
+
|
|
924
|
+
# Set your API key as an environment variable or directly in the config
|
|
925
|
+
# os.environ["GEMINI_API_KEY"] = "your_google_api_key_here"
|
|
926
|
+
|
|
927
|
+
try:
|
|
928
|
+
if "GEMINI_API_KEY" not in os.environ and "your_google_api_key_here" in "your_google_api_key_here":
|
|
929
|
+
ASCIIColors.warning("GEMINI_API_KEY not set in environment or hardcoded. Skipping Gemini example.")
|
|
930
|
+
else:
|
|
931
|
+
lc = LollmsClient(
|
|
932
|
+
llm_binding_name="gemini",
|
|
933
|
+
llm_binding_config={
|
|
934
|
+
"model_name": "gemini-1.5-pro-latest",
|
|
935
|
+
# "service_key": os.environ.get("GEMINI_API_KEY", "your_google_api_key_here")
|
|
936
|
+
}
|
|
937
|
+
)
|
|
938
|
+
|
|
939
|
+
response = lc.generate_text("Summarize the plot of 'Dune' in three sentences.")
|
|
940
|
+
ASCIIColors.green(f"\nResponse from Gemini: {response}")
|
|
941
|
+
|
|
942
|
+
except Exception as e:
|
|
943
|
+
ASCIIColors.error(f"Error initializing Gemini binding: {e}")
|
|
944
|
+
ASCIIColors.info("Please ensure your Google AI Studio API key is correctly set and you have access to the specified model.")
|
|
945
|
+
```
|
|
946
|
+
|
|
947
|
+
#### **Anthropic Claude**
|
|
948
|
+
|
|
949
|
+
Connects to Anthropic's API to use the Claude family of models, including Claude 3.5 Sonnet, Opus, and Haiku.
|
|
950
|
+
|
|
951
|
+
**Prerequisites:**
|
|
952
|
+
* An Anthropic API key. It's recommended to set this as an environment variable `ANTHROPIC_API_KEY`.
|
|
953
|
+
|
|
954
|
+
**Usage:**
|
|
955
|
+
|
|
956
|
+
```python
|
|
957
|
+
from lollms_client import LollmsClient
|
|
958
|
+
from ascii_colors import ASCIIColors
|
|
959
|
+
import os
|
|
960
|
+
|
|
961
|
+
# Set your API key as an environment variable or directly in the config
|
|
962
|
+
# os.environ["ANTHROPIC_API_KEY"] = "your_anthropic_api_key_here"
|
|
963
|
+
|
|
964
|
+
try:
|
|
965
|
+
if "ANTHROPIC_API_KEY" not in os.environ and "your_anthropic_api_key_here" in "your_anthropic_api_key_here":
|
|
966
|
+
ASCIIColors.warning("ANTHROPIC_API_KEY not set in environment or hardcoded. Skipping Claude example.")
|
|
967
|
+
else:
|
|
968
|
+
lc = LollmsClient(
|
|
969
|
+
llm_binding_name="claude",
|
|
970
|
+
llm_binding_config={
|
|
971
|
+
"model_name": "claude-3-5-sonnet-20240620",
|
|
972
|
+
# "service_key": os.environ.get("ANTHROPIC_API_KEY", "your_anthropic_api_key_here")
|
|
973
|
+
}
|
|
974
|
+
)
|
|
975
|
+
|
|
976
|
+
response = lc.generate_text("What are the core principles of constitutional AI?")
|
|
977
|
+
ASCIIColors.green(f"\nResponse from Claude: {response}")
|
|
978
|
+
|
|
979
|
+
except Exception as e:
|
|
980
|
+
ASCIIColors.error(f"Error initializing Claude binding: {e}")
|
|
981
|
+
ASCIIColors.info("Please ensure your Anthropic API key is correctly set and you have access to the specified model.")
|
|
982
|
+
```
|
|
983
|
+
|
|
984
|
+
---
|
|
985
|
+
|
|
986
|
+
### 3. API Aggregator Bindings
|
|
987
|
+
|
|
988
|
+
These bindings connect to services that provide access to many different models through a single API.
|
|
989
|
+
|
|
990
|
+
#### **OpenRouter**
|
|
991
|
+
|
|
992
|
+
OpenRouter provides a unified, OpenAI-compatible interface to access models from dozens of providers (Google, Anthropic, Mistral, Groq, etc.) with one API key.
|
|
993
|
+
|
|
994
|
+
**Prerequisites:**
|
|
995
|
+
* An OpenRouter API key (starts with `sk-or-...`). It's recommended to set this as an environment variable `OPENROUTER_API_KEY`.
|
|
996
|
+
|
|
997
|
+
**Usage:**
|
|
998
|
+
Model names must be specified in the format `provider/model-name`.
|
|
999
|
+
|
|
1000
|
+
```python
|
|
1001
|
+
from lollms_client import LollmsClient
|
|
1002
|
+
from ascii_colors import ASCIIColors
|
|
1003
|
+
import os
|
|
1004
|
+
|
|
1005
|
+
# Set your API key as an environment variable or directly in the config
|
|
1006
|
+
# os.environ["OPENROUTER_API_KEY"] = "your_openrouter_api_key_here"
|
|
1007
|
+
|
|
1008
|
+
try:
|
|
1009
|
+
if "OPENROUTER_API_KEY" not in os.environ and "your_openrouter_api_key_here" in "your_openrouter_api_key_here":
|
|
1010
|
+
ASCIIColors.warning("OPENROUTER_API_KEY not set in environment or hardcoded. Skipping OpenRouter example.")
|
|
1011
|
+
else:
|
|
1012
|
+
lc = LollmsClient(
|
|
1013
|
+
llm_binding_name="open_router",
|
|
1014
|
+
llm_binding_config={
|
|
1015
|
+
"model_name": "anthropic/claude-3-haiku-20240307",
|
|
1016
|
+
# "open_router_api_key": os.environ.get("OPENROUTER_API_KEY", "your_openrouter_api_key_here")
|
|
1017
|
+
}
|
|
1018
|
+
)
|
|
1019
|
+
|
|
1020
|
+
response = lc.generate_text("Explain what an API aggregator is, as if to a beginner.")
|
|
1021
|
+
ASCIIColors.green(f"\nResponse from OpenRouter: {response}")
|
|
1022
|
+
|
|
1023
|
+
except Exception as e:
|
|
1024
|
+
ASCIIColors.error(f"Error initializing OpenRouter binding: {e}")
|
|
1025
|
+
ASCIIColors.info("Please ensure your OpenRouter API key is correctly set and you have access to the specified model.")
|
|
1026
|
+
```
|
|
1027
|
+
|
|
1028
|
+
#### **Groq**
|
|
1029
|
+
|
|
1030
|
+
While Groq is a direct provider, it's famous as an aggregator of speed. It runs open-source models on custom LPU hardware for exceptionally fast inference.
|
|
1031
|
+
|
|
1032
|
+
**Prerequisites:**
|
|
1033
|
+
* A Groq API key. It's recommended to set this as an environment variable `GROQ_API_KEY`.
|
|
1034
|
+
|
|
1035
|
+
**Usage:**
|
|
1036
|
+
|
|
1037
|
+
```python
|
|
1038
|
+
from lollms_client import LollmsClient
|
|
1039
|
+
from ascii_colors import ASCIIColors
|
|
1040
|
+
import os
|
|
1041
|
+
|
|
1042
|
+
# Set your API key as an environment variable or directly in the config
|
|
1043
|
+
# os.environ["GROQ_API_KEY"] = "your_groq_api_key_here"
|
|
1044
|
+
|
|
1045
|
+
try:
|
|
1046
|
+
if "GROQ_API_KEY" not in os.environ and "your_groq_api_key_here" in "your_groq_api_key_here":
|
|
1047
|
+
ASCIIColors.warning("GROQ_API_KEY not set in environment or hardcoded. Skipping Groq example.")
|
|
1048
|
+
else:
|
|
1049
|
+
lc = LollmsClient(
|
|
1050
|
+
llm_binding_name="groq",
|
|
1051
|
+
llm_binding_config={
|
|
1052
|
+
"model_name": "llama3-8b-8192", # Or "mixtral-8x7b-32768"
|
|
1053
|
+
# "groq_api_key": os.environ.get("GROQ_API_KEY", "your_groq_api_key_here")
|
|
1054
|
+
}
|
|
1055
|
+
)
|
|
1056
|
+
|
|
1057
|
+
response = lc.generate_text("Write a 3-line poem about incredible speed.")
|
|
1058
|
+
ASCIIColors.green(f"\nResponse from Groq: {response}")
|
|
1059
|
+
|
|
1060
|
+
except Exception as e:
|
|
1061
|
+
ASCIIColors.error(f"Error initializing Groq binding: {e}")
|
|
1062
|
+
ASCIIColors.info("Please ensure your Groq API key is correctly set and you have access to the specified model.")
|
|
1063
|
+
```
|
|
1064
|
+
|
|
1065
|
+
#### **Hugging Face Inference API**
|
|
1066
|
+
|
|
1067
|
+
This connects to the serverless Hugging Face Inference API, allowing experimentation with thousands of open-source models without local hardware.
|
|
1068
|
+
|
|
1069
|
+
**Note:** This API can have "cold starts," so the first request might be slow.
|
|
1070
|
+
|
|
1071
|
+
**Prerequisites:**
|
|
1072
|
+
* A Hugging Face User Access Token (starts with `hf_...`). It's recommended to set this as an environment variable `HF_API_KEY`.
|
|
1073
|
+
|
|
1074
|
+
**Usage:**
|
|
1075
|
+
|
|
1076
|
+
```python
|
|
1077
|
+
from lollms_client import LollmsClient
|
|
1078
|
+
from ascii_colors import ASCIIColors
|
|
1079
|
+
import os
|
|
1080
|
+
|
|
1081
|
+
# Set your API key as an environment variable or directly in the config
|
|
1082
|
+
# os.environ["HF_API_KEY"] = "your_hugging_face_token_here"
|
|
1083
|
+
|
|
1084
|
+
try:
|
|
1085
|
+
if "HF_API_KEY" not in os.environ and "your_hugging_face_token_here" in "your_hugging_face_token_here":
|
|
1086
|
+
ASCIIColors.warning("HF_API_KEY not set in environment or hardcoded. Skipping Hugging Face Inference API example.")
|
|
1087
|
+
else:
|
|
1088
|
+
lc = LollmsClient(
|
|
1089
|
+
llm_binding_name="hugging_face_inference_api",
|
|
1090
|
+
llm_binding_config={
|
|
1091
|
+
"model_name": "google/gemma-1.1-7b-it", # Or other suitable models from HF
|
|
1092
|
+
# "hf_api_key": os.environ.get("HF_API_KEY", "your_hugging_face_token_here")
|
|
1093
|
+
}
|
|
1094
|
+
)
|
|
1095
|
+
|
|
1096
|
+
response = lc.generate_text("Write a short story about a robot who discovers music.")
|
|
1097
|
+
ASCIIColors.green(f"\nResponse from Hugging Face: {response}")
|
|
1098
|
+
|
|
1099
|
+
except Exception as e:
|
|
1100
|
+
ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
|
|
1101
|
+
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")
|
|
1102
|
+
```
|
|
1103
|
+
|
|
1104
|
+
### Listing Available Models
|
|
1105
|
+
|
|
1106
|
+
You can query the active LLM binding to get a list of models it supports or has available. The exact information returned depends on the binding (e.g., Ollama lists local models, OpenAI lists all its API models).
|
|
1107
|
+
|
|
1108
|
+
```python
|
|
1109
|
+
from lollms_client import LollmsClient
|
|
1110
|
+
from ascii_colors import ASCIIColors
|
|
1111
|
+
import os
|
|
1112
|
+
|
|
1113
|
+
try:
|
|
1114
|
+
# Initialize client for Ollama (or any other binding)
|
|
1115
|
+
lc = LollmsClient(
|
|
1116
|
+
llm_binding_name="ollama",
|
|
1117
|
+
llm_binding_config={
|
|
1118
|
+
"host_address": "http://localhost:11434"
|
|
1119
|
+
# model_name is not needed just to list models
|
|
1120
|
+
}
|
|
1121
|
+
)
|
|
1122
|
+
|
|
1123
|
+
ASCIIColors.yellow("\nListing available models for the current binding:")
|
|
1124
|
+
available_models = lc.listModels()
|
|
1125
|
+
|
|
1126
|
+
if isinstance(available_models, list):
|
|
1127
|
+
for model in available_models:
|
|
1128
|
+
# Model structure varies by binding, common fields are 'name'
|
|
1129
|
+
model_name = model.get('name', 'N/A')
|
|
1130
|
+
model_size = model.get('size', 'N/A') # Common for Ollama
|
|
1131
|
+
print(f"- {model_name} (Size: {model_size})")
|
|
1132
|
+
elif isinstance(available_models, dict) and "error" in available_models:
|
|
1133
|
+
ASCIIColors.error(f"Error listing models: {available_models['error']}")
|
|
1134
|
+
else:
|
|
1135
|
+
print("Could not retrieve model list or unexpected format.")
|
|
1136
|
+
|
|
1137
|
+
except Exception as e:
|
|
1138
|
+
ASCIIColors.error(f"An error occurred: {e}")
|
|
1139
|
+
|
|
1140
|
+
```
|
|
1141
|
+
|
|
1142
|
+
### Long Context Processing for Long Texts (`long_context_processing`)
|
|
1143
|
+
|
|
1144
|
+
When dealing with a document, article, or transcript that is too large to fit into a model's context window, the `long_context_processing` method is the solution. It intelligently chunks the text, summarizes or processes each piece, and then synthesizes those into a final, coherent output.
|
|
1145
|
+
|
|
1146
|
+
```python
|
|
1147
|
+
from lollms_client import LollmsClient, MSG_TYPE
|
|
1148
|
+
from ascii_colors import ASCIIColors
|
|
1149
|
+
import os
|
|
1150
|
+
|
|
1151
|
+
# --- A very long text (imagine this is 10,000+ tokens) ---
|
|
1152
|
+
long_text = """
|
|
1153
|
+
The history of computing is a fascinating journey from mechanical contraptions to the powerful devices we use today.
|
|
1154
|
+
It began with devices like the abacus, used for arithmetic tasks. In the 19th century, Charles Babbage conceived
|
|
1155
|
+
the Analytical Engine, a mechanical computer that was never fully built but laid the groundwork for modern computing.
|
|
1156
|
+
Ada Lovelace, daughter of Lord Byron, is often credited as the first computer programmer for her work on Babbage's Engine.
|
|
1157
|
+
The 20th century saw the rise of electronic computers, starting with vacuum tubes and progressing to transistors and integrated circuits.
|
|
1158
|
+
Early computers like ENIAC were massive machines, but technological advancements rapidly led to smaller, more powerful, and more accessible devices.
|
|
1159
|
+
The invention of the microprocessor in 1971 by Intel's Ted Hoff was a pivotal moment, leading to the personal computer revolution.
|
|
1160
|
+
Companies like Apple and Microsoft brought computing to the masses. The internet, initially ARPANET, transformed communication and information access globally.
|
|
1161
|
+
In recent decades, cloud computing, big data, and artificial intelligence have become dominant themes. AI, particularly machine learning and deep learning,
|
|
1162
|
+
has enabled breakthroughs in areas like image recognition, natural language processing, and autonomous systems.
|
|
1163
|
+
Today, a new revolution is on the horizon with quantum computing, which promises to solve problems that are currently intractable
|
|
1164
|
+
for even the most powerful supercomputers. Researchers are exploring qubits and quantum entanglement to create
|
|
1165
|
+
machines that will redefine what is computationally possible, impacting fields from medicine to materials science.
|
|
1166
|
+
This continuous evolution demonstrates humanity's relentless pursuit of greater computational power and intelligence.
|
|
1167
|
+
""" * 10 # Simulate a very long text (repeated 10 times)
|
|
1168
|
+
|
|
1169
|
+
# --- Callback to see the process in action ---
|
|
1170
|
+
def lcp_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, **kwargs):
|
|
1171
|
+
if msg_type in [MSG_TYPE.MSG_TYPE_STEP_START, MSG_TYPE.MSG_TYPE_STEP_END]:
|
|
1172
|
+
ASCIIColors.yellow(f">> {chunk}")
|
|
1173
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP:
|
|
1174
|
+
ASCIIColors.cyan(f" {chunk}")
|
|
1175
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
1176
|
+
# Only print final answer chunks, not internal step chunks
|
|
1177
|
+
pass
|
|
1178
|
+
return True
|
|
1179
|
+
|
|
1180
|
+
try:
|
|
1181
|
+
lc = LollmsClient(llm_binding_name="ollama", llm_binding_config={"model_name": "llama3"})
|
|
1182
|
+
|
|
1183
|
+
# The contextual prompt guides the focus of the processing
|
|
1184
|
+
context_prompt = "Summarize the text, focusing on the key technological milestones, notable figures, and future directions in computing history."
|
|
1185
|
+
|
|
1186
|
+
ASCIIColors.blue("--- Starting Long Context Processing (Summarization) ---")
|
|
1187
|
+
|
|
1188
|
+
final_summary = lc.long_context_processing(
|
|
1189
|
+
text_to_process=long_text,
|
|
1190
|
+
contextual_prompt=context_prompt,
|
|
1191
|
+
chunk_size_tokens=1000, # Adjust based on your model's context size
|
|
1192
|
+
overlap_tokens=200,
|
|
1193
|
+
streaming_callback=lcp_callback,
|
|
1194
|
+
temperature=0.1 # Good for factual summarization
|
|
1195
|
+
)
|
|
1196
|
+
|
|
1197
|
+
ASCIIColors.blue("\n--- Final Comprehensive Summary ---")
|
|
1198
|
+
ASCIIColors.green(final_summary)
|
|
1199
|
+
|
|
1200
|
+
except Exception as e:
|
|
1201
|
+
ASCIIColors.error(f"An error occurred during long context processing: {e}")
|
|
1202
|
+
```
|
|
1203
|
+
|
|
1204
|
+
## Contributing
|
|
1205
|
+
|
|
1206
|
+
Contributions are welcome! Whether it's bug reports, feature suggestions, documentation improvements, or new bindings, please feel free to open an issue or submit a pull request on our [GitHub repository](https://github.com/ParisNeo/lollms_client).
|
|
1207
|
+
|
|
1208
|
+
## License
|
|
1209
|
+
|
|
1210
|
+
This project is licensed under the **Apache 2.0 License**. See the [LICENSE](LICENSE) file for details.
|
|
1211
|
+
|
|
1212
|
+
## Changelog
|
|
1213
|
+
|
|
1214
|
+
For a list of changes and updates, please refer to the [CHANGELOG.md](CHANGELOG.md) file.
|