lollms-client 0.23.0__py3-none-any.whl → 0.24.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -0,0 +1,266 @@
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import sys
5
+ import json
6
+ from pathlib import Path
7
+ from typing import Optional, List, Dict, Any
8
+
9
+ # Correctly import all necessary classes from the lollms_client package
10
+ from lollms_client import LollmsClient, LollmsDataManager, LollmsDiscussion, LollmsPersonality
11
+ from ascii_colors import ASCIIColors, trace_exception
12
+
13
+ # --- Configuration ---
14
+ MAX_CONTEXT_SIZE_FOR_TEST = 2048 # Increased for agentic turns
15
+
16
+ # Database and workspace configuration
17
+ WORKSPACE_DIR = Path("./test_workspace_agentic")
18
+ DATABASE_PATH = f"sqlite:///{WORKSPACE_DIR / 'test_discussion_agentic.db'}"
19
+ DISCUSSION_ID = "console-agentic-test-1" # Use a fixed ID for easy resumption
20
+
21
+ # --- MOCK KNOWLEDGE BASE for RAG ---
22
+ MOCK_KNOWLEDGE_BASE = {
23
+ "python_basics.md": [
24
+ {"chunk_id": 1, "text": "Python is a high-level, interpreted programming language known for its readability. It was created by Guido van Rossum and released in 1991."},
25
+ {"chunk_id": 2, "text": "Key features of Python include dynamic typing, garbage collection, and a large standard library. It supports procedural, object-oriented, and functional programming."},
26
+ ],
27
+ "javascript_info.js": [
28
+ {"chunk_id": 1, "text": "JavaScript is a scripting language for front-end web development. It is also used in back-end development (Node.js)."},
29
+ {"chunk_id": 2, "text": "Popular JavaScript frameworks include React, Angular, and Vue.js."},
30
+ ],
31
+ "ai_concepts.txt": [
32
+ {"chunk_id": 1, "text": "Retrieval Augmented Generation (RAG) is an AI framework for improving LLM responses by grounding the model on external knowledge sources."},
33
+ ]
34
+ }
35
+
36
+ # --- Dummy MCP Server Scripts ---
37
+ TIME_SERVER_PY = """
38
+ import asyncio
39
+ from datetime import datetime
40
+ from mcp.server.fastmcp import FastMCP
41
+ mcp_server = FastMCP("TimeMCP", description="A server that provides the current time.", host="localhost",
42
+ port=9624,
43
+ log_level="DEBUG")
44
+ @mcp_server.tool()
45
+ def get_current_time(user_id: str = "unknown"):
46
+ return {"time": datetime.now().isoformat(), "user_id": user_id}
47
+ if __name__ == "__main__": mcp_server.run(transport="streamable-http")
48
+ """
49
+ CALCULATOR_SERVER_PY = """
50
+ import asyncio
51
+ from typing import List, Union
52
+ from mcp.server.fastmcp import FastMCP
53
+ mcp_server = FastMCP("TimeMCP", description="A server that provides the current time.", host="localhost",
54
+ port=9625,
55
+ log_level="DEBUG")
56
+ @mcp_server.tool()
57
+ def add_numbers(numbers: List[Union[int, float]]):
58
+ if not isinstance(numbers, list): return {"error": "Input must be a list"}
59
+ return {"sum": sum(numbers)}
60
+ if __name__ == "__main__": mcp_server.run(transport="streamable-http")
61
+ """
62
+
63
+ # --- RAG Mock Function ---
64
+ def mock_rag_query_function(query_text: str, top_k: int = 3, **kwargs) -> List[Dict[str, Any]]:
65
+ ASCIIColors.magenta(f"\n [MOCK RAG] Querying knowledge base for: '{query_text}'")
66
+ results = []
67
+ query_lower = query_text.lower()
68
+ for file_path, chunks in MOCK_KNOWLEDGE_BASE.items():
69
+ for chunk in chunks:
70
+ if any(word in chunk["text"].lower() for word in query_lower.split() if len(word) > 2):
71
+ results.append({"file_path": file_path, "chunk_text": chunk["text"]})
72
+ ASCIIColors.magenta(f" [MOCK RAG] Found {len(results[:top_k])} relevant chunks.")
73
+ return results[:top_k]
74
+
75
+ def start_mcp_servers():
76
+ """Starts the dummy MCP servers in the background."""
77
+ ASCIIColors.yellow("--- Starting background MCP servers ---")
78
+ server_dir = WORKSPACE_DIR / "mcp_servers"
79
+ server_dir.mkdir(exist_ok=True, parents=True)
80
+
81
+ (server_dir / "time_server.py").write_text(TIME_SERVER_PY)
82
+ (server_dir / "calculator_server.py").write_text(CALCULATOR_SERVER_PY)
83
+
84
+ procs = []
85
+ procs.append(subprocess.Popen([sys.executable, str(server_dir / "time_server.py")], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL))
86
+ procs.append(subprocess.Popen([sys.executable, str(server_dir / "calculator_server.py")], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL))
87
+ ASCIIColors.yellow("--- MCP servers launched ---")
88
+ return procs
89
+
90
+ def setup_client_and_discussion() -> LollmsDiscussion:
91
+ """Sets up the LollmsClient with MCP, the DB manager, and the discussion."""
92
+ print("--- Setting up Lollms Environment ---")
93
+ WORKSPACE_DIR.mkdir(exist_ok=True)
94
+
95
+ mcp_config = {
96
+ "servers_infos": {
97
+ "time_machine": {"server_url": "http://localhost:9624/mcp"},
98
+ "calc_unit": {"server_url": "http://localhost:9625/mcp"},
99
+ }
100
+ }
101
+
102
+ try:
103
+ client = LollmsClient(
104
+ "ollama",
105
+ model_name="mistral-nemo:latest",
106
+ mcp_binding_name="remote_mcp",
107
+ mcp_binding_config=mcp_config
108
+ )
109
+ except Exception as e:
110
+ trace_exception(e)
111
+ print("\n---FATAL ERROR---")
112
+ print("Could not initialize LollmsClient. Ensure Ollama is running and mcp is installed.")
113
+ exit()
114
+
115
+ print(f"-> Using model: {client.binding.model_name}")
116
+ print(f"-> Using MCP binding: {client.mcp.binding_name}")
117
+
118
+ db_manager = LollmsDataManager(db_path=DATABASE_PATH)
119
+ discussion = db_manager.get_discussion(client, DISCUSSION_ID)
120
+
121
+ if discussion:
122
+ print(f"-> Resuming discussion (ID: {DISCUSSION_ID})")
123
+ discussion.max_context_size = MAX_CONTEXT_SIZE_FOR_TEST
124
+ else:
125
+ print(f"-> Creating new discussion (ID: {DISCUSSION_ID})")
126
+ discussion = LollmsDiscussion.create_new(
127
+ lollms_client=client,
128
+ db_manager=db_manager,
129
+ id=DISCUSSION_ID,
130
+ title="Console Agentic Test",
131
+ max_context_size=MAX_CONTEXT_SIZE_FOR_TEST
132
+ )
133
+
134
+ print("--- Setup Complete. Ready to chat! ---\n")
135
+ return discussion
136
+
137
+ def print_help():
138
+ print("\n--- Commands ---")
139
+ print("!agent <prompt> - Run a prompt using all available tools (MCP).")
140
+ print("!rag <prompt> - Run a prompt using the mock knowledge base (RAG).")
141
+ print("!both <prompt> - Run a prompt using both MCP tools and RAG.")
142
+ print("!status - Show current discussion state (pruning, message count).")
143
+ print("!regen - Regenerate the last AI response.")
144
+ print("!exit - Exit the application.")
145
+ print("----------------\n")
146
+
147
+ def print_agentic_results(response_dict):
148
+ """Renders a beautiful report of the agent's turn."""
149
+ ai_message = response_dict.get('ai_message')
150
+ if not ai_message:
151
+ return
152
+
153
+ ASCIIColors.cyan("\n" + "="*22 + " Agentic Turn Report " + "="*22)
154
+
155
+ # --- Final Answer ---
156
+ ASCIIColors.blue("\nFinal Answer:")
157
+ ASCIIColors.green(f" {ai_message.content}")
158
+
159
+ # --- Agent's Internal Monologue (The Scratchpad) ---
160
+ if ai_message.scratchpad:
161
+ ASCIIColors.blue("\nAgent's Reasoning Log (Scratchpad):")
162
+ # Print scratchpad line by line for better color coding
163
+ for line in ai_message.scratchpad.split('\n'):
164
+ if line.startswith("### Step"):
165
+ ASCIIColors.yellow(line)
166
+ elif line.startswith("- **Action**:") or line.startswith("- **Result**:") or line.startswith("- **Error**:") :
167
+ ASCIIColors.magenta(line)
168
+ else:
169
+ print(line)
170
+
171
+ # --- Sources Used (from metadata) ---
172
+ if ai_message.metadata and "sources" in ai_message.metadata:
173
+ sources = ai_message.metadata.get("sources", [])
174
+ if sources:
175
+ ASCIIColors.blue("\nSources Consulted (RAG):")
176
+ for i, source in enumerate(sources):
177
+ print(f" [{i+1}] Path: {source.get('file_path', 'N/A')}")
178
+ # Indent the content for readability
179
+ content = source.get('chunk_text', 'N/A').replace('\n', '\n ')
180
+ print(f" Content: \"{content}\"")
181
+
182
+ ASCIIColors.cyan("\n" + "="*61 + "\n")
183
+
184
+ def run_chat_console(discussion: LollmsDiscussion):
185
+ print_help()
186
+ while True:
187
+ user_input = input("You: ")
188
+ if not user_input:
189
+ continue
190
+
191
+ use_mcps_flag = False
192
+ use_data_store_flag = False
193
+ prompt = user_input
194
+
195
+ # --- Command Handling ---
196
+ if user_input.lower().startswith("!exit"):
197
+ break
198
+ elif user_input.lower().startswith("!help"):
199
+ print_help()
200
+ continue
201
+ elif user_input.lower().startswith("!status"):
202
+ # Assuming a print_status function exists
203
+ # print_status(discussion)
204
+ continue
205
+ elif user_input.lower().startswith("!regen"):
206
+ # Assuming a regenerate_branch method exists
207
+ # discussion.regenerate_branch(...)
208
+ continue
209
+ elif user_input.lower().startswith("!agent "):
210
+ use_mcps_flag = True
211
+ prompt = user_input[7:].strip()
212
+ ASCIIColors.yellow(f"Agentic MCP turn initiated for: '{prompt}'")
213
+ elif user_input.lower().startswith("!rag "):
214
+ use_data_store_flag = True
215
+ prompt = user_input[5:].strip()
216
+ ASCIIColors.yellow(f"Agentic RAG turn initiated for: '{prompt}'")
217
+ elif user_input.lower().startswith("!both "):
218
+ use_mcps_flag = True
219
+ use_data_store_flag = True
220
+ prompt = user_input[6:].strip()
221
+ ASCIIColors.yellow(f"Agentic MCP+RAG turn initiated for: '{prompt}'")
222
+
223
+ # --- Streaming Callback ---
224
+ def stream_callback(chunk, msg_type, metadata={}, **kwargs):
225
+ # Render steps and thoughts in real-time
226
+ if msg_type == 12: # MSG_TYPE.MSG_TYPE_STEP_START
227
+ ASCIIColors.cyan(f"\n> Starting: {chunk}")
228
+ elif msg_type == 13: # MSG_TYPE.MSG_TYPE_STEP_END
229
+ ASCIIColors.cyan(f"> Finished: {chunk}")
230
+ elif msg_type == 2: # MSG_TYPE.MSG_TYPE_INFO (for thoughts)
231
+ ASCIIColors.yellow(f"\n (Thought): {chunk}")
232
+ else: # Final answer chunks are printed by the main loop
233
+ pass # The final answer is printed after the full report
234
+ return True
235
+
236
+ # --- Main Chat Logic ---
237
+ try:
238
+ #print("\nAI: ", end="", flush=True)
239
+
240
+ response_dict = discussion.chat(
241
+ user_message=prompt,
242
+ use_mcps=use_mcps_flag,
243
+ use_data_store={"coding_store": mock_rag_query_function} if use_data_store_flag else None,
244
+ streaming_callback=stream_callback
245
+ )
246
+ if use_mcps_flag or use_data_store_flag:
247
+ print_agentic_results(response_dict)
248
+ print("\nAI: ", end="")
249
+ ASCIIColors.green(response_dict['ai_message'].content)
250
+
251
+ except Exception as e:
252
+ trace_exception(e)
253
+ print(f"\nAn error occurred during generation.")
254
+
255
+ if __name__ == "__main__":
256
+ mcp_procs = start_mcp_servers()
257
+ try:
258
+ discussion_session = setup_client_and_discussion()
259
+ run_chat_console(discussion_session)
260
+ finally:
261
+ ASCIIColors.red("\n--- Shutting down MCP servers ---")
262
+ for proc in mcp_procs:
263
+ proc.terminate()
264
+ proc.wait()
265
+ shutil.rmtree(WORKSPACE_DIR, ignore_errors=True)
266
+ print("Cleanup complete. Goodbye!")
lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
 
10
10
 
11
- __version__ = "0.23.0" # Updated version
11
+ __version__ = "0.24.1" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [