lollms-client 0.25.1__tar.gz → 0.25.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- {lollms_client-0.25.1/lollms_client.egg-info → lollms_client-0.25.6}/PKG-INFO +108 -7
- {lollms_client-0.25.1 → lollms_client-0.25.6}/README.md +107 -6
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/__init__.py +1 -1
- lollms_client-0.25.6/lollms_client/llm_bindings/gemini/__init__.py +501 -0
- lollms_client-0.25.6/lollms_client/llm_bindings/litellm/__init__.py +201 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_core.py +30 -16
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_discussion.py +28 -3
- {lollms_client-0.25.1 → lollms_client-0.25.6/lollms_client.egg-info}/PKG-INFO +108 -7
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client.egg-info/SOURCES.txt +2 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/LICENSE +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/article_summary/article_summary.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/console_discussion/console_app.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/console_discussion.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/deep_analyze/deep_analyse.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/function_calling_with_local_custom_mcp.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/generate_a_benchmark_for_safe_store.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/generate_and_speak/generate_and_speak.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/generate_game_sfx/generate_game_fx.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/generate_text_with_multihop_rag_example.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/gradio_chat_app.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/gradio_lollms_chat.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/internet_search_with_rag.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/lollms_discussions_test.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/mcp_examples/external_mcp.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/mcp_examples/local_mcp.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/mcp_examples/openai_mcp.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/mcp_examples/run_remote_mcp_example_v2.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/mcp_examples/run_standard_mcp_example.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/simple_text_gen_test.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/simple_text_gen_with_image_test.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/test_local_models/local_chat.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/text_2_audio.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/text_2_image.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/text_2_image_diffusers.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/text_and_image_2_audio.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/text_gen.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/examples/text_gen_system_prompt.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/openai/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_config.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_llm_binding.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_mcp_binding.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_personality.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_stt_binding.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_tti_binding.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_ttm_binding.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_tts_binding.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_ttv_binding.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_types.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tts_bindings/bark/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client.egg-info/requires.txt +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/pyproject.toml +0 -0
- {lollms_client-0.25.1 → lollms_client-0.25.6}/setup.cfg +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 0.25.
|
|
3
|
+
Version: 0.25.6
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache Software License
|
|
@@ -169,6 +169,107 @@ except Exception as e:
|
|
|
169
169
|
```
|
|
170
170
|
For a comprehensive guide on function calling and setting up tools, please refer to the [Usage Guide (DOC_USE.md)](DOC_USE.md).
|
|
171
171
|
|
|
172
|
+
### 🤖 Advanced Agentic Generation with RAG: `generate_with_mcp_rag`
|
|
173
|
+
|
|
174
|
+
For more complex tasks, `generate_with_mcp_rag` provides a powerful, built-in agent that uses a ReAct-style (Reason, Act) loop. This agent can reason about a user's request, use tools (MCP), retrieve information from knowledge bases (RAG), and adapt its plan based on the results of its actions.
|
|
175
|
+
|
|
176
|
+
**Key Agent Capabilities:**
|
|
177
|
+
|
|
178
|
+
* **Observe-Think-Act Loop:** The agent iteratively reviews its progress, thinks about the next logical step, and takes an action (like calling a tool).
|
|
179
|
+
* **Tool Integration (MCP):** Can use any available MCP tools, such as searching the web or executing code.
|
|
180
|
+
* **Retrieval-Augmented Generation (RAG):** You can provide one or more "data stores" (knowledge bases). The agent gains a `research::{store_name}` tool to query these stores for relevant information.
|
|
181
|
+
* **In-Memory Code Generation:** The agent has a special `generate_code` tool. This allows it to first write a piece of code (e.g., a complex Python script) and then pass that code to another tool (e.g., `python_code_interpreter`) in a subsequent step.
|
|
182
|
+
* **Stateful Progress Tracking:** Designed for rich UI experiences, it emits `step_start` and `step_end` events with unique IDs via the streaming callback. This allows an application to track the agent's individual thoughts and long-running tool calls in real-time.
|
|
183
|
+
* **Self-Correction:** Includes a `refactor_scratchpad` tool for the agent to clean up its own thought process if it becomes cluttered.
|
|
184
|
+
|
|
185
|
+
Here is an example of using the agent to answer a question by first performing RAG on a custom knowledge base and then using the retrieved information to generate and execute code.
|
|
186
|
+
|
|
187
|
+
```python
|
|
188
|
+
import json
|
|
189
|
+
from lollms_client import LollmsClient, MSG_TYPE
|
|
190
|
+
from ascii_colors import ASCIIColors
|
|
191
|
+
|
|
192
|
+
# 1. Define a mock RAG data store and retrieval function
|
|
193
|
+
project_notes = {
|
|
194
|
+
"project_phoenix_details": "Project Phoenix has a current budget of $500,000 and an expected quarterly growth rate of 15%."
|
|
195
|
+
}
|
|
196
|
+
|
|
197
|
+
def retrieve_from_notes(query: str, top_k: int = 1, min_similarity: float = 0.5):
|
|
198
|
+
"""A simple keyword-based retriever for our mock data store."""
|
|
199
|
+
results = []
|
|
200
|
+
for key, text in project_notes.items():
|
|
201
|
+
if query.lower() in text.lower():
|
|
202
|
+
results.append({"source": key, "content": text})
|
|
203
|
+
return results[:top_k]
|
|
204
|
+
|
|
205
|
+
# 2. Define a detailed streaming callback to visualize the agent's process
|
|
206
|
+
def agent_streaming_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, metadata: list = None) -> bool:
|
|
207
|
+
if not params: params = {}
|
|
208
|
+
msg_id = params.get("id", "")
|
|
209
|
+
|
|
210
|
+
if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
|
|
211
|
+
ASCIIColors.yellow(f"\n>> Agent Step Start [ID: {msg_id}]: {chunk}")
|
|
212
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
|
|
213
|
+
ASCIIColors.green(f"<< Agent Step End [ID: {msg_id}]: {chunk}")
|
|
214
|
+
if params.get('result'):
|
|
215
|
+
ASCIIColors.cyan(f" Result: {json.dumps(params['result'], indent=2)}")
|
|
216
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT:
|
|
217
|
+
ASCIIColors.magenta(f"\n🤔 Agent Thought: {chunk}")
|
|
218
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_TOOL_CALL:
|
|
219
|
+
ASCIIColors.blue(f"\n🛠️ Agent Action: {chunk}")
|
|
220
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_OBSERVATION:
|
|
221
|
+
ASCIIColors.cyan(f"\n👀 Agent Observation: {chunk}")
|
|
222
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
223
|
+
print(chunk, end="", flush=True) # Final answer stream
|
|
224
|
+
return True
|
|
225
|
+
|
|
226
|
+
try:
|
|
227
|
+
# 3. Initialize LollmsClient with an LLM and local tools enabled
|
|
228
|
+
lc = LollmsClient(
|
|
229
|
+
binding_name="ollama", # Use Ollama
|
|
230
|
+
model_name="llama3", # Or any capable model like mistral, gemma, etc.
|
|
231
|
+
mcp_binding_name="local_mcp" # Enable local tools like python_code_interpreter
|
|
232
|
+
)
|
|
233
|
+
|
|
234
|
+
# 4. Define the user prompt and the RAG data store
|
|
235
|
+
prompt = "Based on my notes about Project Phoenix, write and run a Python script to calculate its projected budget after two quarters."
|
|
236
|
+
|
|
237
|
+
rag_data_store = {
|
|
238
|
+
"project_notes": {"callable": retrieve_from_notes}
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
ASCIIColors.yellow(f"User Prompt: {prompt}")
|
|
242
|
+
print("\n" + "="*50 + "\nAgent is now running...\n" + "="*50)
|
|
243
|
+
|
|
244
|
+
# 5. Run the agent
|
|
245
|
+
agent_output = lc.generate_with_mcp_rag(
|
|
246
|
+
prompt=prompt,
|
|
247
|
+
use_data_store=rag_data_store,
|
|
248
|
+
use_mcps=["python_code_interpreter"], # Make specific tools available
|
|
249
|
+
streaming_callback=agent_streaming_callback,
|
|
250
|
+
max_reasoning_steps=5
|
|
251
|
+
)
|
|
252
|
+
|
|
253
|
+
print("\n" + "="*50 + "\nAgent finished.\n" + "="*50)
|
|
254
|
+
|
|
255
|
+
# 6. Print the final results
|
|
256
|
+
if agent_output.get("error"):
|
|
257
|
+
ASCIIColors.error(f"\nAgent Error: {agent_output['error']}")
|
|
258
|
+
else:
|
|
259
|
+
ASCIIColors.green("\n--- Final Answer ---")
|
|
260
|
+
print(agent_output.get("final_answer"))
|
|
261
|
+
|
|
262
|
+
ASCIIColors.magenta("\n--- Tool Calls ---")
|
|
263
|
+
print(json.dumps(agent_output.get("tool_calls", []), indent=2))
|
|
264
|
+
|
|
265
|
+
ASCIIColors.cyan("\n--- RAG Sources ---")
|
|
266
|
+
print(json.dumps(agent_output.get("sources", []), indent=2))
|
|
267
|
+
|
|
268
|
+
except Exception as e:
|
|
269
|
+
ASCIIColors.red(f"\nAn unexpected error occurred: {e}")
|
|
270
|
+
|
|
271
|
+
```
|
|
272
|
+
|
|
172
273
|
## Documentation
|
|
173
274
|
|
|
174
275
|
For more in-depth information, please refer to:
|
|
@@ -186,7 +287,7 @@ graph LR
|
|
|
186
287
|
LC -- Manages --> LLB[LLM Binding];
|
|
187
288
|
LC -- Manages --> MCPB[MCP Binding];
|
|
188
289
|
LC -- Orchestrates --> MCP_Interaction[generate_with_mcp];
|
|
189
|
-
LC -- Provides --> HighLevelOps[High-Level Ops
|
|
290
|
+
LC -- Provides --> HighLevelOps["High-Level Ops(summarize, deep_analyze etc.)"];
|
|
190
291
|
LC -- Provides Access To --> DM[DiscussionManager];
|
|
191
292
|
LC -- Provides Access To --> ModalityBindings[TTS, TTI, STT etc.];
|
|
192
293
|
end
|
|
@@ -195,16 +296,16 @@ graph LR
|
|
|
195
296
|
LLB --> LollmsServer[LoLLMs Server];
|
|
196
297
|
LLB --> OllamaServer[Ollama];
|
|
197
298
|
LLB --> OpenAPIServer[OpenAI API];
|
|
198
|
-
LLB --> LocalGGUF[Local GGUF<br>(pythonllamacpp / llamacpp server)];
|
|
199
|
-
LLB --> LocalHF[Local HuggingFace<br>(transformers / vLLM)];
|
|
299
|
+
LLB --> LocalGGUF["Local GGUF<br>(pythonllamacpp / llamacpp server)"];
|
|
300
|
+
LLB --> LocalHF["Local HuggingFace<br>(transformers / vLLM)"];
|
|
200
301
|
end
|
|
201
302
|
|
|
202
303
|
MCP_Interaction --> MCPB;
|
|
203
|
-
MCPB --> LocalTools[Local Python Tools<br>(via local_mcp)];
|
|
204
|
-
MCPB --> RemoteTools[Remote MCP Tool Servers<br>(Future Potential)];
|
|
304
|
+
MCPB --> LocalTools["Local Python Tools<br>(via local_mcp)"];
|
|
305
|
+
MCPB --> RemoteTools["Remote MCP Tool Servers<br>(Future Potential)"];
|
|
205
306
|
|
|
206
307
|
|
|
207
|
-
ModalityBindings --> ModalityServices[Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)];
|
|
308
|
+
ModalityBindings --> ModalityServices["Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)"];
|
|
208
309
|
```
|
|
209
310
|
|
|
210
311
|
* **`LollmsClient`**: The central class for all interactions. It holds the currently active LLM binding, an optional MCP binding, and provides access to modality bindings and high-level operations.
|
|
@@ -138,6 +138,107 @@ except Exception as e:
|
|
|
138
138
|
```
|
|
139
139
|
For a comprehensive guide on function calling and setting up tools, please refer to the [Usage Guide (DOC_USE.md)](DOC_USE.md).
|
|
140
140
|
|
|
141
|
+
### 🤖 Advanced Agentic Generation with RAG: `generate_with_mcp_rag`
|
|
142
|
+
|
|
143
|
+
For more complex tasks, `generate_with_mcp_rag` provides a powerful, built-in agent that uses a ReAct-style (Reason, Act) loop. This agent can reason about a user's request, use tools (MCP), retrieve information from knowledge bases (RAG), and adapt its plan based on the results of its actions.
|
|
144
|
+
|
|
145
|
+
**Key Agent Capabilities:**
|
|
146
|
+
|
|
147
|
+
* **Observe-Think-Act Loop:** The agent iteratively reviews its progress, thinks about the next logical step, and takes an action (like calling a tool).
|
|
148
|
+
* **Tool Integration (MCP):** Can use any available MCP tools, such as searching the web or executing code.
|
|
149
|
+
* **Retrieval-Augmented Generation (RAG):** You can provide one or more "data stores" (knowledge bases). The agent gains a `research::{store_name}` tool to query these stores for relevant information.
|
|
150
|
+
* **In-Memory Code Generation:** The agent has a special `generate_code` tool. This allows it to first write a piece of code (e.g., a complex Python script) and then pass that code to another tool (e.g., `python_code_interpreter`) in a subsequent step.
|
|
151
|
+
* **Stateful Progress Tracking:** Designed for rich UI experiences, it emits `step_start` and `step_end` events with unique IDs via the streaming callback. This allows an application to track the agent's individual thoughts and long-running tool calls in real-time.
|
|
152
|
+
* **Self-Correction:** Includes a `refactor_scratchpad` tool for the agent to clean up its own thought process if it becomes cluttered.
|
|
153
|
+
|
|
154
|
+
Here is an example of using the agent to answer a question by first performing RAG on a custom knowledge base and then using the retrieved information to generate and execute code.
|
|
155
|
+
|
|
156
|
+
```python
|
|
157
|
+
import json
|
|
158
|
+
from lollms_client import LollmsClient, MSG_TYPE
|
|
159
|
+
from ascii_colors import ASCIIColors
|
|
160
|
+
|
|
161
|
+
# 1. Define a mock RAG data store and retrieval function
|
|
162
|
+
project_notes = {
|
|
163
|
+
"project_phoenix_details": "Project Phoenix has a current budget of $500,000 and an expected quarterly growth rate of 15%."
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
def retrieve_from_notes(query: str, top_k: int = 1, min_similarity: float = 0.5):
|
|
167
|
+
"""A simple keyword-based retriever for our mock data store."""
|
|
168
|
+
results = []
|
|
169
|
+
for key, text in project_notes.items():
|
|
170
|
+
if query.lower() in text.lower():
|
|
171
|
+
results.append({"source": key, "content": text})
|
|
172
|
+
return results[:top_k]
|
|
173
|
+
|
|
174
|
+
# 2. Define a detailed streaming callback to visualize the agent's process
|
|
175
|
+
def agent_streaming_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, metadata: list = None) -> bool:
|
|
176
|
+
if not params: params = {}
|
|
177
|
+
msg_id = params.get("id", "")
|
|
178
|
+
|
|
179
|
+
if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
|
|
180
|
+
ASCIIColors.yellow(f"\n>> Agent Step Start [ID: {msg_id}]: {chunk}")
|
|
181
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
|
|
182
|
+
ASCIIColors.green(f"<< Agent Step End [ID: {msg_id}]: {chunk}")
|
|
183
|
+
if params.get('result'):
|
|
184
|
+
ASCIIColors.cyan(f" Result: {json.dumps(params['result'], indent=2)}")
|
|
185
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT:
|
|
186
|
+
ASCIIColors.magenta(f"\n🤔 Agent Thought: {chunk}")
|
|
187
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_TOOL_CALL:
|
|
188
|
+
ASCIIColors.blue(f"\n🛠️ Agent Action: {chunk}")
|
|
189
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_OBSERVATION:
|
|
190
|
+
ASCIIColors.cyan(f"\n👀 Agent Observation: {chunk}")
|
|
191
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
|
|
192
|
+
print(chunk, end="", flush=True) # Final answer stream
|
|
193
|
+
return True
|
|
194
|
+
|
|
195
|
+
try:
|
|
196
|
+
# 3. Initialize LollmsClient with an LLM and local tools enabled
|
|
197
|
+
lc = LollmsClient(
|
|
198
|
+
binding_name="ollama", # Use Ollama
|
|
199
|
+
model_name="llama3", # Or any capable model like mistral, gemma, etc.
|
|
200
|
+
mcp_binding_name="local_mcp" # Enable local tools like python_code_interpreter
|
|
201
|
+
)
|
|
202
|
+
|
|
203
|
+
# 4. Define the user prompt and the RAG data store
|
|
204
|
+
prompt = "Based on my notes about Project Phoenix, write and run a Python script to calculate its projected budget after two quarters."
|
|
205
|
+
|
|
206
|
+
rag_data_store = {
|
|
207
|
+
"project_notes": {"callable": retrieve_from_notes}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
ASCIIColors.yellow(f"User Prompt: {prompt}")
|
|
211
|
+
print("\n" + "="*50 + "\nAgent is now running...\n" + "="*50)
|
|
212
|
+
|
|
213
|
+
# 5. Run the agent
|
|
214
|
+
agent_output = lc.generate_with_mcp_rag(
|
|
215
|
+
prompt=prompt,
|
|
216
|
+
use_data_store=rag_data_store,
|
|
217
|
+
use_mcps=["python_code_interpreter"], # Make specific tools available
|
|
218
|
+
streaming_callback=agent_streaming_callback,
|
|
219
|
+
max_reasoning_steps=5
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
print("\n" + "="*50 + "\nAgent finished.\n" + "="*50)
|
|
223
|
+
|
|
224
|
+
# 6. Print the final results
|
|
225
|
+
if agent_output.get("error"):
|
|
226
|
+
ASCIIColors.error(f"\nAgent Error: {agent_output['error']}")
|
|
227
|
+
else:
|
|
228
|
+
ASCIIColors.green("\n--- Final Answer ---")
|
|
229
|
+
print(agent_output.get("final_answer"))
|
|
230
|
+
|
|
231
|
+
ASCIIColors.magenta("\n--- Tool Calls ---")
|
|
232
|
+
print(json.dumps(agent_output.get("tool_calls", []), indent=2))
|
|
233
|
+
|
|
234
|
+
ASCIIColors.cyan("\n--- RAG Sources ---")
|
|
235
|
+
print(json.dumps(agent_output.get("sources", []), indent=2))
|
|
236
|
+
|
|
237
|
+
except Exception as e:
|
|
238
|
+
ASCIIColors.red(f"\nAn unexpected error occurred: {e}")
|
|
239
|
+
|
|
240
|
+
```
|
|
241
|
+
|
|
141
242
|
## Documentation
|
|
142
243
|
|
|
143
244
|
For more in-depth information, please refer to:
|
|
@@ -155,7 +256,7 @@ graph LR
|
|
|
155
256
|
LC -- Manages --> LLB[LLM Binding];
|
|
156
257
|
LC -- Manages --> MCPB[MCP Binding];
|
|
157
258
|
LC -- Orchestrates --> MCP_Interaction[generate_with_mcp];
|
|
158
|
-
LC -- Provides --> HighLevelOps[High-Level Ops
|
|
259
|
+
LC -- Provides --> HighLevelOps["High-Level Ops(summarize, deep_analyze etc.)"];
|
|
159
260
|
LC -- Provides Access To --> DM[DiscussionManager];
|
|
160
261
|
LC -- Provides Access To --> ModalityBindings[TTS, TTI, STT etc.];
|
|
161
262
|
end
|
|
@@ -164,16 +265,16 @@ graph LR
|
|
|
164
265
|
LLB --> LollmsServer[LoLLMs Server];
|
|
165
266
|
LLB --> OllamaServer[Ollama];
|
|
166
267
|
LLB --> OpenAPIServer[OpenAI API];
|
|
167
|
-
LLB --> LocalGGUF[Local GGUF<br>(pythonllamacpp / llamacpp server)];
|
|
168
|
-
LLB --> LocalHF[Local HuggingFace<br>(transformers / vLLM)];
|
|
268
|
+
LLB --> LocalGGUF["Local GGUF<br>(pythonllamacpp / llamacpp server)"];
|
|
269
|
+
LLB --> LocalHF["Local HuggingFace<br>(transformers / vLLM)"];
|
|
169
270
|
end
|
|
170
271
|
|
|
171
272
|
MCP_Interaction --> MCPB;
|
|
172
|
-
MCPB --> LocalTools[Local Python Tools<br>(via local_mcp)];
|
|
173
|
-
MCPB --> RemoteTools[Remote MCP Tool Servers<br>(Future Potential)];
|
|
273
|
+
MCPB --> LocalTools["Local Python Tools<br>(via local_mcp)"];
|
|
274
|
+
MCPB --> RemoteTools["Remote MCP Tool Servers<br>(Future Potential)"];
|
|
174
275
|
|
|
175
276
|
|
|
176
|
-
ModalityBindings --> ModalityServices[Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)];
|
|
277
|
+
ModalityBindings --> ModalityServices["Modality Services<br>(e.g., LoLLMs Server TTS/TTI, local Bark/XTTS)"];
|
|
177
278
|
```
|
|
178
279
|
|
|
179
280
|
* **`LollmsClient`**: The central class for all interactions. It holds the currently active LLM binding, an optional MCP binding, and provides access to modality bindings and high-level operations.
|
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
|
|
10
10
|
|
|
11
|
-
__version__ = "0.25.
|
|
11
|
+
__version__ = "0.25.6" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|