lollms-client 0.25.5__tar.gz → 0.25.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (93) hide show
  1. {lollms_client-0.25.5/lollms_client.egg-info → lollms_client-0.25.6}/PKG-INFO +102 -1
  2. {lollms_client-0.25.5 → lollms_client-0.25.6}/README.md +102 -1
  3. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/__init__.py +1 -1
  4. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_core.py +2 -2
  5. {lollms_client-0.25.5 → lollms_client-0.25.6/lollms_client.egg-info}/PKG-INFO +102 -1
  6. {lollms_client-0.25.5 → lollms_client-0.25.6}/LICENSE +0 -0
  7. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/article_summary/article_summary.py +0 -0
  8. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/console_discussion/console_app.py +0 -0
  9. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/console_discussion.py +0 -0
  10. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/deep_analyze/deep_analyse.py +0 -0
  11. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  12. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/function_calling_with_local_custom_mcp.py +0 -0
  13. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/generate_a_benchmark_for_safe_store.py +0 -0
  14. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/generate_and_speak/generate_and_speak.py +0 -0
  15. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/generate_game_sfx/generate_game_fx.py +0 -0
  16. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/generate_text_with_multihop_rag_example.py +0 -0
  17. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/gradio_chat_app.py +0 -0
  18. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/gradio_lollms_chat.py +0 -0
  19. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/internet_search_with_rag.py +0 -0
  20. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/lollms_discussions_test.py +0 -0
  21. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/mcp_examples/external_mcp.py +0 -0
  22. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/mcp_examples/local_mcp.py +0 -0
  23. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/mcp_examples/openai_mcp.py +0 -0
  24. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/mcp_examples/run_remote_mcp_example_v2.py +0 -0
  25. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/mcp_examples/run_standard_mcp_example.py +0 -0
  26. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/simple_text_gen_test.py +0 -0
  27. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/simple_text_gen_with_image_test.py +0 -0
  28. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/test_local_models/local_chat.py +0 -0
  29. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/text_2_audio.py +0 -0
  30. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/text_2_image.py +0 -0
  31. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/text_2_image_diffusers.py +0 -0
  32. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/text_and_image_2_audio.py +0 -0
  33. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/text_gen.py +0 -0
  34. {lollms_client-0.25.5 → lollms_client-0.25.6}/examples/text_gen_system_prompt.py +0 -0
  35. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/__init__.py +0 -0
  36. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/gemini/__init__.py +0 -0
  37. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/litellm/__init__.py +0 -0
  38. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  39. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  40. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  41. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/openai/__init__.py +0 -0
  42. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  43. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  44. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  45. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  46. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  47. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_config.py +0 -0
  48. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_discussion.py +0 -0
  49. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_js_analyzer.py +0 -0
  50. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_llm_binding.py +0 -0
  51. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_mcp_binding.py +0 -0
  52. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_personality.py +0 -0
  53. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_python_analyzer.py +0 -0
  54. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_stt_binding.py +0 -0
  55. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_tti_binding.py +0 -0
  56. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_ttm_binding.py +0 -0
  57. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_tts_binding.py +0 -0
  58. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_ttv_binding.py +0 -0
  59. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_types.py +0 -0
  60. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/lollms_utilities.py +0 -0
  61. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  62. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  63. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  64. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  65. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  66. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  67. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  68. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/stt_bindings/__init__.py +0 -0
  69. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  70. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  71. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  72. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tti_bindings/__init__.py +0 -0
  73. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
  74. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
  75. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
  76. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  77. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/ttm_bindings/__init__.py +0 -0
  78. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  79. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
  80. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  81. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tts_bindings/__init__.py +0 -0
  82. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tts_bindings/bark/__init__.py +0 -0
  83. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  84. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  85. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  86. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/ttv_bindings/__init__.py +0 -0
  87. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  88. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client.egg-info/SOURCES.txt +0 -0
  89. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client.egg-info/dependency_links.txt +0 -0
  90. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client.egg-info/requires.txt +0 -0
  91. {lollms_client-0.25.5 → lollms_client-0.25.6}/lollms_client.egg-info/top_level.txt +0 -0
  92. {lollms_client-0.25.5 → lollms_client-0.25.6}/pyproject.toml +0 -0
  93. {lollms_client-0.25.5 → lollms_client-0.25.6}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.25.5
3
+ Version: 0.25.6
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -169,6 +169,107 @@ except Exception as e:
169
169
  ```
170
170
  For a comprehensive guide on function calling and setting up tools, please refer to the [Usage Guide (DOC_USE.md)](DOC_USE.md).
171
171
 
172
+ ### 🤖 Advanced Agentic Generation with RAG: `generate_with_mcp_rag`
173
+
174
+ For more complex tasks, `generate_with_mcp_rag` provides a powerful, built-in agent that uses a ReAct-style (Reason, Act) loop. This agent can reason about a user's request, use tools (MCP), retrieve information from knowledge bases (RAG), and adapt its plan based on the results of its actions.
175
+
176
+ **Key Agent Capabilities:**
177
+
178
+ * **Observe-Think-Act Loop:** The agent iteratively reviews its progress, thinks about the next logical step, and takes an action (like calling a tool).
179
+ * **Tool Integration (MCP):** Can use any available MCP tools, such as searching the web or executing code.
180
+ * **Retrieval-Augmented Generation (RAG):** You can provide one or more "data stores" (knowledge bases). The agent gains a `research::{store_name}` tool to query these stores for relevant information.
181
+ * **In-Memory Code Generation:** The agent has a special `generate_code` tool. This allows it to first write a piece of code (e.g., a complex Python script) and then pass that code to another tool (e.g., `python_code_interpreter`) in a subsequent step.
182
+ * **Stateful Progress Tracking:** Designed for rich UI experiences, it emits `step_start` and `step_end` events with unique IDs via the streaming callback. This allows an application to track the agent's individual thoughts and long-running tool calls in real-time.
183
+ * **Self-Correction:** Includes a `refactor_scratchpad` tool for the agent to clean up its own thought process if it becomes cluttered.
184
+
185
+ Here is an example of using the agent to answer a question by first performing RAG on a custom knowledge base and then using the retrieved information to generate and execute code.
186
+
187
+ ```python
188
+ import json
189
+ from lollms_client import LollmsClient, MSG_TYPE
190
+ from ascii_colors import ASCIIColors
191
+
192
+ # 1. Define a mock RAG data store and retrieval function
193
+ project_notes = {
194
+ "project_phoenix_details": "Project Phoenix has a current budget of $500,000 and an expected quarterly growth rate of 15%."
195
+ }
196
+
197
+ def retrieve_from_notes(query: str, top_k: int = 1, min_similarity: float = 0.5):
198
+ """A simple keyword-based retriever for our mock data store."""
199
+ results = []
200
+ for key, text in project_notes.items():
201
+ if query.lower() in text.lower():
202
+ results.append({"source": key, "content": text})
203
+ return results[:top_k]
204
+
205
+ # 2. Define a detailed streaming callback to visualize the agent's process
206
+ def agent_streaming_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, metadata: list = None) -> bool:
207
+ if not params: params = {}
208
+ msg_id = params.get("id", "")
209
+
210
+ if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
211
+ ASCIIColors.yellow(f"\n>> Agent Step Start [ID: {msg_id}]: {chunk}")
212
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
213
+ ASCIIColors.green(f"<< Agent Step End [ID: {msg_id}]: {chunk}")
214
+ if params.get('result'):
215
+ ASCIIColors.cyan(f" Result: {json.dumps(params['result'], indent=2)}")
216
+ elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT:
217
+ ASCIIColors.magenta(f"\n🤔 Agent Thought: {chunk}")
218
+ elif msg_type == MSG_TYPE.MSG_TYPE_TOOL_CALL:
219
+ ASCIIColors.blue(f"\n🛠️ Agent Action: {chunk}")
220
+ elif msg_type == MSG_TYPE.MSG_TYPE_OBSERVATION:
221
+ ASCIIColors.cyan(f"\n👀 Agent Observation: {chunk}")
222
+ elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
223
+ print(chunk, end="", flush=True) # Final answer stream
224
+ return True
225
+
226
+ try:
227
+ # 3. Initialize LollmsClient with an LLM and local tools enabled
228
+ lc = LollmsClient(
229
+ binding_name="ollama", # Use Ollama
230
+ model_name="llama3", # Or any capable model like mistral, gemma, etc.
231
+ mcp_binding_name="local_mcp" # Enable local tools like python_code_interpreter
232
+ )
233
+
234
+ # 4. Define the user prompt and the RAG data store
235
+ prompt = "Based on my notes about Project Phoenix, write and run a Python script to calculate its projected budget after two quarters."
236
+
237
+ rag_data_store = {
238
+ "project_notes": {"callable": retrieve_from_notes}
239
+ }
240
+
241
+ ASCIIColors.yellow(f"User Prompt: {prompt}")
242
+ print("\n" + "="*50 + "\nAgent is now running...\n" + "="*50)
243
+
244
+ # 5. Run the agent
245
+ agent_output = lc.generate_with_mcp_rag(
246
+ prompt=prompt,
247
+ use_data_store=rag_data_store,
248
+ use_mcps=["python_code_interpreter"], # Make specific tools available
249
+ streaming_callback=agent_streaming_callback,
250
+ max_reasoning_steps=5
251
+ )
252
+
253
+ print("\n" + "="*50 + "\nAgent finished.\n" + "="*50)
254
+
255
+ # 6. Print the final results
256
+ if agent_output.get("error"):
257
+ ASCIIColors.error(f"\nAgent Error: {agent_output['error']}")
258
+ else:
259
+ ASCIIColors.green("\n--- Final Answer ---")
260
+ print(agent_output.get("final_answer"))
261
+
262
+ ASCIIColors.magenta("\n--- Tool Calls ---")
263
+ print(json.dumps(agent_output.get("tool_calls", []), indent=2))
264
+
265
+ ASCIIColors.cyan("\n--- RAG Sources ---")
266
+ print(json.dumps(agent_output.get("sources", []), indent=2))
267
+
268
+ except Exception as e:
269
+ ASCIIColors.red(f"\nAn unexpected error occurred: {e}")
270
+
271
+ ```
272
+
172
273
  ## Documentation
173
274
 
174
275
  For more in-depth information, please refer to:
@@ -138,6 +138,107 @@ except Exception as e:
138
138
  ```
139
139
  For a comprehensive guide on function calling and setting up tools, please refer to the [Usage Guide (DOC_USE.md)](DOC_USE.md).
140
140
 
141
+ ### 🤖 Advanced Agentic Generation with RAG: `generate_with_mcp_rag`
142
+
143
+ For more complex tasks, `generate_with_mcp_rag` provides a powerful, built-in agent that uses a ReAct-style (Reason, Act) loop. This agent can reason about a user's request, use tools (MCP), retrieve information from knowledge bases (RAG), and adapt its plan based on the results of its actions.
144
+
145
+ **Key Agent Capabilities:**
146
+
147
+ * **Observe-Think-Act Loop:** The agent iteratively reviews its progress, thinks about the next logical step, and takes an action (like calling a tool).
148
+ * **Tool Integration (MCP):** Can use any available MCP tools, such as searching the web or executing code.
149
+ * **Retrieval-Augmented Generation (RAG):** You can provide one or more "data stores" (knowledge bases). The agent gains a `research::{store_name}` tool to query these stores for relevant information.
150
+ * **In-Memory Code Generation:** The agent has a special `generate_code` tool. This allows it to first write a piece of code (e.g., a complex Python script) and then pass that code to another tool (e.g., `python_code_interpreter`) in a subsequent step.
151
+ * **Stateful Progress Tracking:** Designed for rich UI experiences, it emits `step_start` and `step_end` events with unique IDs via the streaming callback. This allows an application to track the agent's individual thoughts and long-running tool calls in real-time.
152
+ * **Self-Correction:** Includes a `refactor_scratchpad` tool for the agent to clean up its own thought process if it becomes cluttered.
153
+
154
+ Here is an example of using the agent to answer a question by first performing RAG on a custom knowledge base and then using the retrieved information to generate and execute code.
155
+
156
+ ```python
157
+ import json
158
+ from lollms_client import LollmsClient, MSG_TYPE
159
+ from ascii_colors import ASCIIColors
160
+
161
+ # 1. Define a mock RAG data store and retrieval function
162
+ project_notes = {
163
+ "project_phoenix_details": "Project Phoenix has a current budget of $500,000 and an expected quarterly growth rate of 15%."
164
+ }
165
+
166
+ def retrieve_from_notes(query: str, top_k: int = 1, min_similarity: float = 0.5):
167
+ """A simple keyword-based retriever for our mock data store."""
168
+ results = []
169
+ for key, text in project_notes.items():
170
+ if query.lower() in text.lower():
171
+ results.append({"source": key, "content": text})
172
+ return results[:top_k]
173
+
174
+ # 2. Define a detailed streaming callback to visualize the agent's process
175
+ def agent_streaming_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, metadata: list = None) -> bool:
176
+ if not params: params = {}
177
+ msg_id = params.get("id", "")
178
+
179
+ if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
180
+ ASCIIColors.yellow(f"\n>> Agent Step Start [ID: {msg_id}]: {chunk}")
181
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
182
+ ASCIIColors.green(f"<< Agent Step End [ID: {msg_id}]: {chunk}")
183
+ if params.get('result'):
184
+ ASCIIColors.cyan(f" Result: {json.dumps(params['result'], indent=2)}")
185
+ elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT:
186
+ ASCIIColors.magenta(f"\n🤔 Agent Thought: {chunk}")
187
+ elif msg_type == MSG_TYPE.MSG_TYPE_TOOL_CALL:
188
+ ASCIIColors.blue(f"\n🛠️ Agent Action: {chunk}")
189
+ elif msg_type == MSG_TYPE.MSG_TYPE_OBSERVATION:
190
+ ASCIIColors.cyan(f"\n👀 Agent Observation: {chunk}")
191
+ elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
192
+ print(chunk, end="", flush=True) # Final answer stream
193
+ return True
194
+
195
+ try:
196
+ # 3. Initialize LollmsClient with an LLM and local tools enabled
197
+ lc = LollmsClient(
198
+ binding_name="ollama", # Use Ollama
199
+ model_name="llama3", # Or any capable model like mistral, gemma, etc.
200
+ mcp_binding_name="local_mcp" # Enable local tools like python_code_interpreter
201
+ )
202
+
203
+ # 4. Define the user prompt and the RAG data store
204
+ prompt = "Based on my notes about Project Phoenix, write and run a Python script to calculate its projected budget after two quarters."
205
+
206
+ rag_data_store = {
207
+ "project_notes": {"callable": retrieve_from_notes}
208
+ }
209
+
210
+ ASCIIColors.yellow(f"User Prompt: {prompt}")
211
+ print("\n" + "="*50 + "\nAgent is now running...\n" + "="*50)
212
+
213
+ # 5. Run the agent
214
+ agent_output = lc.generate_with_mcp_rag(
215
+ prompt=prompt,
216
+ use_data_store=rag_data_store,
217
+ use_mcps=["python_code_interpreter"], # Make specific tools available
218
+ streaming_callback=agent_streaming_callback,
219
+ max_reasoning_steps=5
220
+ )
221
+
222
+ print("\n" + "="*50 + "\nAgent finished.\n" + "="*50)
223
+
224
+ # 6. Print the final results
225
+ if agent_output.get("error"):
226
+ ASCIIColors.error(f"\nAgent Error: {agent_output['error']}")
227
+ else:
228
+ ASCIIColors.green("\n--- Final Answer ---")
229
+ print(agent_output.get("final_answer"))
230
+
231
+ ASCIIColors.magenta("\n--- Tool Calls ---")
232
+ print(json.dumps(agent_output.get("tool_calls", []), indent=2))
233
+
234
+ ASCIIColors.cyan("\n--- RAG Sources ---")
235
+ print(json.dumps(agent_output.get("sources", []), indent=2))
236
+
237
+ except Exception as e:
238
+ ASCIIColors.red(f"\nAn unexpected error occurred: {e}")
239
+
240
+ ```
241
+
141
242
  ## Documentation
142
243
 
143
244
  For more in-depth information, please refer to:
@@ -205,4 +306,4 @@ This project is licensed under the **Apache 2.0 License**. See the [LICENSE](LIC
205
306
 
206
307
  ## Changelog
207
308
 
208
- For a list of changes and updates, please refer to the [CHANGELOG.md](CHANGELOG.md) file.
309
+ For a list of changes and updates, please refer to the [CHANGELOG.md](CHANGELOG.md) file.
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
 
10
10
 
11
- __version__ = "0.25.5" # Updated version
11
+ __version__ = "0.25.6" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -1595,7 +1595,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1595
1595
  formatted_tools_list += "\n**request_clarification**:\nUse if the user's request is ambiguous and you can not infer a clear idea of his intent. this tool has no parameters."
1596
1596
  formatted_tools_list += "\n**final_answer**:\nUse when you are ready to respond to the user. this tool has no parameters."
1597
1597
 
1598
- if discovery_step_id: log_event("Discovering tools",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
1598
+ if discovery_step_id: log_event("**Discovering tools**",MSG_TYPE.MSG_TYPE_STEP_END, event_id=discovery_step_id)
1599
1599
 
1600
1600
  # --- 2. Dynamic Reasoning Loop ---
1601
1601
  for i in range(max_reasoning_steps):
@@ -1755,7 +1755,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1755
1755
 
1756
1756
  tool_calls_this_turn.append({"name": tool_name, "params": tool_params, "result": tool_result})
1757
1757
  current_scratchpad += f"\n\n### Step {i+1}: Observation\n- **Action:** Called `{tool_name}`\n- **Result:**\n{observation_text}"
1758
- log_event(f"Observation: Result from `{tool_name}`:\n{dict_to_markdown(sanitized_result)}", MSG_TYPE.MSG_TYPE_OBSERVATION)
1758
+ log_event(f"**Observation**: Result from `{tool_name}`:\n{dict_to_markdown(sanitized_result)}", MSG_TYPE.MSG_TYPE_OBSERVATION)
1759
1759
 
1760
1760
  if reasoning_step_id: log_event(f"**Reasoning Step {i+1}/{max_reasoning_steps}**", MSG_TYPE.MSG_TYPE_STEP_END, event_id = reasoning_step_id)
1761
1761
  except Exception as ex:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.25.5
3
+ Version: 0.25.6
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -169,6 +169,107 @@ except Exception as e:
169
169
  ```
170
170
  For a comprehensive guide on function calling and setting up tools, please refer to the [Usage Guide (DOC_USE.md)](DOC_USE.md).
171
171
 
172
+ ### 🤖 Advanced Agentic Generation with RAG: `generate_with_mcp_rag`
173
+
174
+ For more complex tasks, `generate_with_mcp_rag` provides a powerful, built-in agent that uses a ReAct-style (Reason, Act) loop. This agent can reason about a user's request, use tools (MCP), retrieve information from knowledge bases (RAG), and adapt its plan based on the results of its actions.
175
+
176
+ **Key Agent Capabilities:**
177
+
178
+ * **Observe-Think-Act Loop:** The agent iteratively reviews its progress, thinks about the next logical step, and takes an action (like calling a tool).
179
+ * **Tool Integration (MCP):** Can use any available MCP tools, such as searching the web or executing code.
180
+ * **Retrieval-Augmented Generation (RAG):** You can provide one or more "data stores" (knowledge bases). The agent gains a `research::{store_name}` tool to query these stores for relevant information.
181
+ * **In-Memory Code Generation:** The agent has a special `generate_code` tool. This allows it to first write a piece of code (e.g., a complex Python script) and then pass that code to another tool (e.g., `python_code_interpreter`) in a subsequent step.
182
+ * **Stateful Progress Tracking:** Designed for rich UI experiences, it emits `step_start` and `step_end` events with unique IDs via the streaming callback. This allows an application to track the agent's individual thoughts and long-running tool calls in real-time.
183
+ * **Self-Correction:** Includes a `refactor_scratchpad` tool for the agent to clean up its own thought process if it becomes cluttered.
184
+
185
+ Here is an example of using the agent to answer a question by first performing RAG on a custom knowledge base and then using the retrieved information to generate and execute code.
186
+
187
+ ```python
188
+ import json
189
+ from lollms_client import LollmsClient, MSG_TYPE
190
+ from ascii_colors import ASCIIColors
191
+
192
+ # 1. Define a mock RAG data store and retrieval function
193
+ project_notes = {
194
+ "project_phoenix_details": "Project Phoenix has a current budget of $500,000 and an expected quarterly growth rate of 15%."
195
+ }
196
+
197
+ def retrieve_from_notes(query: str, top_k: int = 1, min_similarity: float = 0.5):
198
+ """A simple keyword-based retriever for our mock data store."""
199
+ results = []
200
+ for key, text in project_notes.items():
201
+ if query.lower() in text.lower():
202
+ results.append({"source": key, "content": text})
203
+ return results[:top_k]
204
+
205
+ # 2. Define a detailed streaming callback to visualize the agent's process
206
+ def agent_streaming_callback(chunk: str, msg_type: MSG_TYPE, params: dict = None, metadata: list = None) -> bool:
207
+ if not params: params = {}
208
+ msg_id = params.get("id", "")
209
+
210
+ if msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
211
+ ASCIIColors.yellow(f"\n>> Agent Step Start [ID: {msg_id}]: {chunk}")
212
+ elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
213
+ ASCIIColors.green(f"<< Agent Step End [ID: {msg_id}]: {chunk}")
214
+ if params.get('result'):
215
+ ASCIIColors.cyan(f" Result: {json.dumps(params['result'], indent=2)}")
216
+ elif msg_type == MSG_TYPE.MSG_TYPE_THOUGHT_CONTENT:
217
+ ASCIIColors.magenta(f"\n🤔 Agent Thought: {chunk}")
218
+ elif msg_type == MSG_TYPE.MSG_TYPE_TOOL_CALL:
219
+ ASCIIColors.blue(f"\n🛠️ Agent Action: {chunk}")
220
+ elif msg_type == MSG_TYPE.MSG_TYPE_OBSERVATION:
221
+ ASCIIColors.cyan(f"\n👀 Agent Observation: {chunk}")
222
+ elif msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
223
+ print(chunk, end="", flush=True) # Final answer stream
224
+ return True
225
+
226
+ try:
227
+ # 3. Initialize LollmsClient with an LLM and local tools enabled
228
+ lc = LollmsClient(
229
+ binding_name="ollama", # Use Ollama
230
+ model_name="llama3", # Or any capable model like mistral, gemma, etc.
231
+ mcp_binding_name="local_mcp" # Enable local tools like python_code_interpreter
232
+ )
233
+
234
+ # 4. Define the user prompt and the RAG data store
235
+ prompt = "Based on my notes about Project Phoenix, write and run a Python script to calculate its projected budget after two quarters."
236
+
237
+ rag_data_store = {
238
+ "project_notes": {"callable": retrieve_from_notes}
239
+ }
240
+
241
+ ASCIIColors.yellow(f"User Prompt: {prompt}")
242
+ print("\n" + "="*50 + "\nAgent is now running...\n" + "="*50)
243
+
244
+ # 5. Run the agent
245
+ agent_output = lc.generate_with_mcp_rag(
246
+ prompt=prompt,
247
+ use_data_store=rag_data_store,
248
+ use_mcps=["python_code_interpreter"], # Make specific tools available
249
+ streaming_callback=agent_streaming_callback,
250
+ max_reasoning_steps=5
251
+ )
252
+
253
+ print("\n" + "="*50 + "\nAgent finished.\n" + "="*50)
254
+
255
+ # 6. Print the final results
256
+ if agent_output.get("error"):
257
+ ASCIIColors.error(f"\nAgent Error: {agent_output['error']}")
258
+ else:
259
+ ASCIIColors.green("\n--- Final Answer ---")
260
+ print(agent_output.get("final_answer"))
261
+
262
+ ASCIIColors.magenta("\n--- Tool Calls ---")
263
+ print(json.dumps(agent_output.get("tool_calls", []), indent=2))
264
+
265
+ ASCIIColors.cyan("\n--- RAG Sources ---")
266
+ print(json.dumps(agent_output.get("sources", []), indent=2))
267
+
268
+ except Exception as e:
269
+ ASCIIColors.red(f"\nAn unexpected error occurred: {e}")
270
+
271
+ ```
272
+
172
273
  ## Documentation
173
274
 
174
275
  For more in-depth information, please refer to:
File without changes
File without changes