lollms-client 0.33.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
- lollms_client/llm_bindings/claude/__init__.py +4 -7
- lollms_client/llm_bindings/gemini/__init__.py +3 -7
- lollms_client/llm_bindings/grok/__init__.py +3 -7
- lollms_client/llm_bindings/groq/__init__.py +4 -6
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
- lollms_client/llm_bindings/litellm/__init__.py +15 -6
- lollms_client/llm_bindings/llamacpp/__init__.py +27 -9
- lollms_client/llm_bindings/lollms/__init__.py +24 -14
- lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
- lollms_client/llm_bindings/mistral/__init__.py +3 -5
- lollms_client/llm_bindings/ollama/__init__.py +6 -11
- lollms_client/llm_bindings/open_router/__init__.py +4 -6
- lollms_client/llm_bindings/openai/__init__.py +7 -14
- lollms_client/llm_bindings/openllm/__init__.py +12 -12
- lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
- lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
- lollms_client/llm_bindings/transformers/__init__.py +14 -6
- lollms_client/llm_bindings/vllm/__init__.py +16 -12
- lollms_client/lollms_core.py +303 -490
- lollms_client/lollms_discussion.py +431 -78
- lollms_client/lollms_llm_binding.py +192 -381
- lollms_client/lollms_mcp_binding.py +33 -2
- lollms_client/lollms_tti_binding.py +107 -2
- lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
- lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
- lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
- lollms_client/stt_bindings/lollms/__init__.py +6 -8
- lollms_client/stt_bindings/whisper/__init__.py +2 -4
- lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
- lollms_client/tti_bindings/dalle/__init__.py +50 -29
- lollms_client/tti_bindings/diffusers/__init__.py +227 -439
- lollms_client/tti_bindings/gemini/__init__.py +320 -0
- lollms_client/tti_bindings/lollms/__init__.py +8 -9
- lollms_client-1.1.0.dist-info/METADATA +1214 -0
- lollms_client-1.1.0.dist-info/RECORD +69 -0
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/top_level.txt +0 -2
- examples/article_summary/article_summary.py +0 -58
- examples/console_discussion/console_app.py +0 -266
- examples/console_discussion.py +0 -448
- examples/deep_analyze/deep_analyse.py +0 -30
- examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
- examples/function_calling_with_local_custom_mcp.py +0 -250
- examples/generate_a_benchmark_for_safe_store.py +0 -89
- examples/generate_and_speak/generate_and_speak.py +0 -251
- examples/generate_game_sfx/generate_game_fx.py +0 -240
- examples/generate_text_with_multihop_rag_example.py +0 -210
- examples/gradio_chat_app.py +0 -228
- examples/gradio_lollms_chat.py +0 -259
- examples/internet_search_with_rag.py +0 -226
- examples/lollms_chat/calculator.py +0 -59
- examples/lollms_chat/derivative.py +0 -48
- examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
- examples/lollms_discussions_test.py +0 -155
- examples/mcp_examples/external_mcp.py +0 -267
- examples/mcp_examples/local_mcp.py +0 -171
- examples/mcp_examples/openai_mcp.py +0 -203
- examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
- examples/mcp_examples/run_standard_mcp_example.py +0 -204
- examples/simple_text_gen_test.py +0 -173
- examples/simple_text_gen_with_image_test.py +0 -178
- examples/test_local_models/local_chat.py +0 -9
- examples/text_2_audio.py +0 -77
- examples/text_2_image.py +0 -144
- examples/text_2_image_diffusers.py +0 -274
- examples/text_and_image_2_audio.py +0 -59
- examples/text_gen.py +0 -30
- examples/text_gen_system_prompt.py +0 -29
- lollms_client-0.33.0.dist-info/METADATA +0 -854
- lollms_client-0.33.0.dist-info/RECORD +0 -101
- test/test_lollms_discussion.py +0 -368
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,226 +0,0 @@
|
|
|
1
|
-
from lollms_client import LollmsClient, MSG_TYPE
|
|
2
|
-
from ascii_colors import ASCIIColors, trace_exception
|
|
3
|
-
from typing import List, Dict, Any, Optional, Callable
|
|
4
|
-
import json
|
|
5
|
-
from pathlib import Path
|
|
6
|
-
|
|
7
|
-
# --- Internet Search RAG Implementation ---
|
|
8
|
-
_duckduckgo_search_installed = False
|
|
9
|
-
_search_installation_error_message = ""
|
|
10
|
-
try:
|
|
11
|
-
import pipmaster as pm
|
|
12
|
-
# ensure_packages should be called by the binding init ideally,
|
|
13
|
-
# but we call it here for the example's standalone execution.
|
|
14
|
-
pm.ensure_packages(["duckduckgo_search"])
|
|
15
|
-
from duckduckgo_search import DDGS
|
|
16
|
-
_duckduckgo_search_installed = True
|
|
17
|
-
except Exception as e:
|
|
18
|
-
_search_installation_error_message = str(e)
|
|
19
|
-
DDGS = None
|
|
20
|
-
ASCIIColors.error(f"Failed to import duckduckgo_search: {_search_installation_error_message}")
|
|
21
|
-
ASCIIColors.info("Please install it: pip install duckduckgo-search")
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
def perform_internet_search_rag(
|
|
25
|
-
query_text: str,
|
|
26
|
-
vectorizer_name: Optional[str] = None, # Not used for search
|
|
27
|
-
top_k: int = 5,
|
|
28
|
-
min_similarity_percent: float = 0.0 # Not used directly for search filter, but can influence result quality/rank
|
|
29
|
-
) -> List[Dict[str, Any]]:
|
|
30
|
-
"""
|
|
31
|
-
Performs an internet search using DuckDuckGo and formats results for RAG.
|
|
32
|
-
Similarity is simulated based on rank.
|
|
33
|
-
"""
|
|
34
|
-
if not _duckduckgo_search_installed or DDGS is None:
|
|
35
|
-
ASCIIColors.error("duckduckgo_search is not available. Cannot perform internet search.")
|
|
36
|
-
return []
|
|
37
|
-
|
|
38
|
-
ASCIIColors.magenta(f" [INTERNET SEARCH] Querying DuckDuckGo for: '{query_text}', max_results={top_k}")
|
|
39
|
-
search_results_raw = []
|
|
40
|
-
try:
|
|
41
|
-
# DDGS().text returns a generator, max_results limits it.
|
|
42
|
-
# Note: The DDGS library might sometimes return fewer results than max_results.
|
|
43
|
-
with DDGS() as ddgs:
|
|
44
|
-
search_results_raw = list(ddgs.text(keywords=query_text, max_results=top_k))
|
|
45
|
-
|
|
46
|
-
except Exception as e:
|
|
47
|
-
ASCIIColors.error(f" [INTERNET SEARCH] Search failed: {e}")
|
|
48
|
-
trace_exception(e)
|
|
49
|
-
return []
|
|
50
|
-
|
|
51
|
-
formatted_results: List[Dict[str, Any]] = []
|
|
52
|
-
if search_results_raw:
|
|
53
|
-
for i, r in enumerate(search_results_raw):
|
|
54
|
-
# Simulate similarity based on rank (rank 1 is highest sim)
|
|
55
|
-
# Max similarity is 100% for rank 1, decreases linearly or non-linearly.
|
|
56
|
-
# Simple linear decrease: 100 - (rank * (100 / top_k+1))
|
|
57
|
-
# Let's use rank-based score: 100% for rank 1, 90% for rank 2, ... 50% for rank 5 etc.
|
|
58
|
-
# Ensure similarity is above min_similarity_percent if that param was intended as a filter here
|
|
59
|
-
|
|
60
|
-
simulated_similarity = max(0.0, 100.0 - i * (100.0 / (top_k + 1))) # Higher rank = lower sim
|
|
61
|
-
simulated_similarity = round(simulated_similarity, 2)
|
|
62
|
-
|
|
63
|
-
if simulated_similarity >= min_similarity_percent:
|
|
64
|
-
formatted_results.append({
|
|
65
|
-
"file_path": r.get("href", "# Unknown URL"), # Use URL as document identifier
|
|
66
|
-
"chunk_text": f"Title: {r.get('title', 'N/A')}\nSnippet: {r.get('body', 'N/A')}", # Combine title and snippet
|
|
67
|
-
"similarity_percent": simulated_similarity,
|
|
68
|
-
})
|
|
69
|
-
else:
|
|
70
|
-
ASCIIColors.debug(f" [INTERNET SEARCH] Skipping result {i+1} due to low simulated similarity ({simulated_similarity}%)")
|
|
71
|
-
|
|
72
|
-
ASCIIColors.magenta(f" [INTERNET SEARCH] Formatted {len(formatted_results)} results for RAG.")
|
|
73
|
-
if not formatted_results: ASCIIColors.yellow(f" [INTERNET SEARCH] No results found for query: '{query_text}' or none met min_similarity_percent.")
|
|
74
|
-
return formatted_results
|
|
75
|
-
|
|
76
|
-
# --- Streaming Callback for RAG and LLM ---
|
|
77
|
-
def rag_streaming_callback(
|
|
78
|
-
chunk: str,
|
|
79
|
-
msg_type: MSG_TYPE,
|
|
80
|
-
metadata: Optional[Dict] = None,
|
|
81
|
-
turn_history: Optional[List] = None
|
|
82
|
-
) -> bool:
|
|
83
|
-
metadata = metadata or {}
|
|
84
|
-
hop = metadata.get("hop", "")
|
|
85
|
-
type_info = metadata.get("type", "N/A")
|
|
86
|
-
|
|
87
|
-
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # Final answer chunks
|
|
88
|
-
ASCIIColors.success(chunk, end="", flush=True)
|
|
89
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
|
|
90
|
-
info = metadata.get("query", chunk) if type_info in ["rag_query_generation", "rag_retrieval"] else chunk
|
|
91
|
-
ASCIIColors.yellow(f"\n>> RAG Hop {hop} | START | {type_info.upper()} | Info: {str(info)[:100]}...", flush=True)
|
|
92
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
|
|
93
|
-
num_chunks = metadata.get("num_chunks")
|
|
94
|
-
query = metadata.get("query")
|
|
95
|
-
decision = metadata.get("decision")
|
|
96
|
-
|
|
97
|
-
end_info = []
|
|
98
|
-
if query: end_info.append(f"Query: '{str(query)[:50]}...'")
|
|
99
|
-
if num_chunks is not None: end_info.append(f"Results: {num_chunks}")
|
|
100
|
-
if decision: end_info.append(f"LLM Decision: NeedMore={decision.get('need_more_data')}, Summary: '{str(decision.get('new_information_summary'))[:50]}...'")
|
|
101
|
-
|
|
102
|
-
ASCIIColors.green(f"\n<< RAG Hop {hop} | END | {type_info.upper()} | {' | '.join(end_info) if end_info else chunk}", flush=True)
|
|
103
|
-
elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
|
|
104
|
-
ASCIIColors.error(f"\nError in RAG stream: {chunk}", flush=True)
|
|
105
|
-
|
|
106
|
-
return True
|
|
107
|
-
|
|
108
|
-
# --- Main Example ---
|
|
109
|
-
if __name__ == "__main__":
|
|
110
|
-
ASCIIColors.red("--- Multi-Hop Internet Search Example with LollmsClient ---")
|
|
111
|
-
|
|
112
|
-
# LLM Configuration (use a model good at instruction following and JSON)
|
|
113
|
-
# Ensure your Ollama server is running and has this model pulled.
|
|
114
|
-
LLM_BINDING_NAME = "ollama"
|
|
115
|
-
LLM_MODEL_NAME = "mistral:latest" # or llama3, phi3 etc.
|
|
116
|
-
|
|
117
|
-
# You could also enable the internet_search tool via MCP,
|
|
118
|
-
# but this example specifically uses it directly via generate_text_with_rag.
|
|
119
|
-
# For MCP example, see examples/local_mcp.py
|
|
120
|
-
|
|
121
|
-
try:
|
|
122
|
-
lc = LollmsClient(
|
|
123
|
-
binding_name=LLM_BINDING_NAME,
|
|
124
|
-
model_name=LLM_MODEL_NAME,
|
|
125
|
-
temperature=0.1,
|
|
126
|
-
ctx_size=4096
|
|
127
|
-
)
|
|
128
|
-
ASCIIColors.green(f"LollmsClient initialized with LLM: {LLM_BINDING_NAME}/{LLM_MODEL_NAME}")
|
|
129
|
-
|
|
130
|
-
if not _duckduckgo_search_installed or DDGS is None:
|
|
131
|
-
ASCIIColors.error("duckduckgo_search is not installed. Cannot run search examples.")
|
|
132
|
-
exit()
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
# --- Test Case 1: Classic Search RAG (max_rag_hops = 0) ---
|
|
136
|
-
ASCIIColors.cyan("\n\n--- Test Case 1: Classic Internet Search RAG (max_rag_hops = 0) ---")
|
|
137
|
-
classic_search_prompt = "What is the current population of Japan?"
|
|
138
|
-
ASCIIColors.blue(f"User Prompt: {classic_search_prompt}")
|
|
139
|
-
|
|
140
|
-
classic_rag_result = lc.generate_text_with_rag(
|
|
141
|
-
prompt=classic_search_prompt,
|
|
142
|
-
rag_query_function=perform_internet_search_rag, # Use the search function
|
|
143
|
-
max_rag_hops=0,
|
|
144
|
-
rag_top_k=3, # Get 3 search results
|
|
145
|
-
rag_min_similarity_percent=50.0, # Only use results with simulated sim >= 50%
|
|
146
|
-
streaming_callback=rag_streaming_callback,
|
|
147
|
-
n_predict=250
|
|
148
|
-
)
|
|
149
|
-
print("\n--- End of Classic Search RAG ---")
|
|
150
|
-
ASCIIColors.magenta("\nClassic Search RAG Final Output Structure:")
|
|
151
|
-
print(f" Final Answer (first 100 chars): {classic_rag_result.get('final_answer', '')}...")
|
|
152
|
-
print(f" Error: {classic_rag_result.get('error')}")
|
|
153
|
-
print(f" Number of Hops: {len(classic_rag_result.get('rag_hops_history', []))}")
|
|
154
|
-
print(f" Total Unique Sources Retrieved: {len(classic_rag_result.get('all_retrieved_sources', []))}")
|
|
155
|
-
if classic_rag_result.get('all_retrieved_sources'):
|
|
156
|
-
print(" Example Retrieved Source:")
|
|
157
|
-
source_ex = classic_rag_result['all_retrieved_sources'][0]
|
|
158
|
-
print(f" Document (URL): {source_ex.get('document')}")
|
|
159
|
-
print(f" Similarity: {source_ex.get('similarity')}%")
|
|
160
|
-
print(f" Content (Snippet, first 50 chars): {source_ex.get('content', '')}...")
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
# --- Test Case 2: Multi-Hop Search RAG (max_rag_hops = 1) ---
|
|
164
|
-
ASCIIColors.cyan("\n\n--- Test Case 2: Multi-Hop Internet Search RAG (max_rag_hops = 1) ---")
|
|
165
|
-
multihop_search_prompt_1 = "Tell me about the latest developments in fusion energy, including any recent news."
|
|
166
|
-
ASCIIColors.blue(f"User Prompt: {multihop_search_prompt_1}")
|
|
167
|
-
|
|
168
|
-
multihop_rag_result_1 = lc.generate_text_with_rag(
|
|
169
|
-
prompt=multihop_search_prompt_1,
|
|
170
|
-
rag_query_function=perform_internet_search_rag,
|
|
171
|
-
rag_query_text=None, # LLM will generate first query
|
|
172
|
-
max_rag_hops=1, # Allow one refinement hop
|
|
173
|
-
rag_top_k=2, # Get 2 search results per query
|
|
174
|
-
rag_min_similarity_percent=50.0,
|
|
175
|
-
streaming_callback=rag_streaming_callback,
|
|
176
|
-
n_predict=400
|
|
177
|
-
)
|
|
178
|
-
print("\n--- End of Multi-Hop Search RAG (1 hop max) ---")
|
|
179
|
-
ASCIIColors.magenta("\nMulti-Hop Search RAG (1 hop max) Final Output Structure:")
|
|
180
|
-
print(f" Final Answer (first 100 chars): {multihop_rag_result_1.get('final_answer', '')}...")
|
|
181
|
-
print(f" Error: {multihop_rag_result_1.get('error')}")
|
|
182
|
-
print(f" Number of Hops Made: {len(multihop_rag_result_1.get('rag_hops_history', []))}")
|
|
183
|
-
for i, hop_info in enumerate(multihop_rag_result_1.get('rag_hops_history', [])):
|
|
184
|
-
print(f" Hop {i+1} Query: '{hop_info.get('query')}'")
|
|
185
|
-
print(f" Hop {i+1} Results Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
|
|
186
|
-
print(f" Hop {i+1} Summary (first 50): '{str(hop_info.get('new_information_summary'))[:50]}...'")
|
|
187
|
-
print(f" Hop {i+1} LLM Decision: NeedMoreData={hop_info.get('llm_decision_json',{}).get('need_more_data')}")
|
|
188
|
-
print(f" Total Unique Sources Retrieved: {len(multihop_rag_result_1.get('all_retrieved_sources', []))}")
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
# --- Test Case 3: More complex multi-hop (max_rag_hops = 2) ---
|
|
192
|
-
ASCIIColors.cyan("\n\n--- Test Case 3: More Complex Multi-Hop Internet Search RAG (max_rag_hops = 2) ---")
|
|
193
|
-
multihop_search_prompt_2 = "What are the requirements and steps to install the lollms_client python library, and what are some of its key features?"
|
|
194
|
-
ASCIIColors.blue(f"User Prompt: {multihop_search_prompt_2}")
|
|
195
|
-
|
|
196
|
-
multihop_rag_result_2 = lc.generate_text_with_rag(
|
|
197
|
-
prompt=multihop_search_prompt_2,
|
|
198
|
-
rag_query_function=perform_internet_search_rag,
|
|
199
|
-
max_rag_hops=2, # Allow up to two refinement hops
|
|
200
|
-
rag_top_k=2, # Get 2 results per query
|
|
201
|
-
rag_min_similarity_percent=40.0, # Lower similarity to maybe get broader initial results
|
|
202
|
-
streaming_callback=rag_streaming_callback,
|
|
203
|
-
n_predict=500 # Allow more for the installation steps and features
|
|
204
|
-
)
|
|
205
|
-
print("\n--- End of More Complex Multi-Hop Search RAG (up to 2 hops) ---")
|
|
206
|
-
ASCIIColors.magenta("\nMore Complex Multi-Hop Search RAG (up to 2 hops) Final Output Structure:")
|
|
207
|
-
print(f" Final Answer (first 100 chars): {multihop_rag_result_2.get('final_answer', '')[:100]}...")
|
|
208
|
-
print(f" Error: {multihop_rag_result_2.get('error')}")
|
|
209
|
-
print(f" Number of Hops Made: {len(multihop_rag_result_2.get('rag_hops_history', []))}")
|
|
210
|
-
for i, hop_info in enumerate(multihop_rag_result_2.get('rag_hops_history', [])):
|
|
211
|
-
print(f" Hop {i+1} Query: '{hop_info.get('query')}'")
|
|
212
|
-
print(f" Hop {i+1} Results Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
|
|
213
|
-
print(f" Hop {i+1} Summary (first 50): '{str(hop_info.get('new_information_summary'))[:50]}...'")
|
|
214
|
-
print(f" Total Unique Sources Retrieved: {len(multihop_rag_result_2.get('all_retrieved_sources', []))}")
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
except ValueError as ve:
|
|
218
|
-
ASCIIColors.error(f"Initialization or RAG parameter error: {ve}")
|
|
219
|
-
trace_exception(ve)
|
|
220
|
-
except ConnectionRefusedError:
|
|
221
|
-
ASCIIColors.error(f"Connection refused. Is the Ollama server ({LLM_BINDING_NAME}) running?")
|
|
222
|
-
except Exception as e:
|
|
223
|
-
ASCIIColors.error(f"An unexpected error occurred: {e}")
|
|
224
|
-
trace_exception(e)
|
|
225
|
-
|
|
226
|
-
ASCIIColors.red("\n--- Multi-Hop Internet Search Example Finished ---")
|
|
@@ -1,59 +0,0 @@
|
|
|
1
|
-
# This is a simple example of a calculator using lollms_chat. don't expect this to be perfect as LLMs are very bad in computing
|
|
2
|
-
# this can be used for very simple calculations. don't expect it to be accurate to compute sqrt(35) or something
|
|
3
|
-
# it is just for the purpose of experimentation as even the best models in the world are not capable of doing accurate calculations yet without a calculator
|
|
4
|
-
|
|
5
|
-
from lollms_client import LollmsClient
|
|
6
|
-
import json
|
|
7
|
-
import math # Import the math module for calculations
|
|
8
|
-
|
|
9
|
-
# Make sure you use your key
|
|
10
|
-
lc = LollmsClient(
|
|
11
|
-
"openai",
|
|
12
|
-
"http://localhost:9642/v1/",
|
|
13
|
-
service_key="lollms_y-uyV-p2_AQGo5Ut6uHDmfIoRk6rKfmf0Rz6xQx-Zkl8cNyVUSFM"# make sure you generate your own key
|
|
14
|
-
)
|
|
15
|
-
|
|
16
|
-
# if you want to see what binding/model does the server support, use this:
|
|
17
|
-
models = lc.listModels()
|
|
18
|
-
print(f"Found models:\n{models}")
|
|
19
|
-
|
|
20
|
-
lc.set_model_name("ollama/gemma3:27b") # Or your preferred binding/model
|
|
21
|
-
|
|
22
|
-
expression = input("Give an expression to evaluate: ")
|
|
23
|
-
|
|
24
|
-
# Construct a detailed prompt
|
|
25
|
-
system_prompt = (
|
|
26
|
-
"You are a highly accurate calculator. You receive a mathematical expression "
|
|
27
|
-
"as input and return the result as a JSON object. "
|
|
28
|
-
"The expression can include numbers, basic arithmetic operators (+, -, *, /), "
|
|
29
|
-
"parentheses, and common mathematical functions like sin, cos, tan, pi, sqrt, and log. "
|
|
30
|
-
"Always evaluate the expression and return the final numeric result. If the expression is invalid, return 'Error'."
|
|
31
|
-
)
|
|
32
|
-
|
|
33
|
-
template = '{"result": the numeric result of the evaluated expression}'
|
|
34
|
-
|
|
35
|
-
# Include the expression in the user prompt. This is important!
|
|
36
|
-
user_prompt = f"Evaluate the following expression: {expression}"
|
|
37
|
-
|
|
38
|
-
# Generate the code
|
|
39
|
-
generation_output = lc.generate_code(
|
|
40
|
-
user_prompt,
|
|
41
|
-
system_prompt=system_prompt,
|
|
42
|
-
template=template
|
|
43
|
-
)
|
|
44
|
-
|
|
45
|
-
try:
|
|
46
|
-
# Attempt to parse the JSON response
|
|
47
|
-
generation_output = json.loads(generation_output)
|
|
48
|
-
result = generation_output["result"]
|
|
49
|
-
|
|
50
|
-
# Attempt to convert the result to a float
|
|
51
|
-
try:
|
|
52
|
-
result = float(result)
|
|
53
|
-
print(f"Result: {result}")
|
|
54
|
-
except ValueError:
|
|
55
|
-
print(f"Result: {result} (Could not convert to a number)") #Handles cases where the LLM returns non-numeric output
|
|
56
|
-
except json.JSONDecodeError:
|
|
57
|
-
print(f"Error: Could not decode JSON response: {generation_output}")
|
|
58
|
-
except KeyError:
|
|
59
|
-
print(f"Error: 'result' key not found in JSON response: {generation_output}")
|
|
@@ -1,48 +0,0 @@
|
|
|
1
|
-
from lollms_client import LollmsClient
|
|
2
|
-
import json
|
|
3
|
-
|
|
4
|
-
# Make sure you use your key
|
|
5
|
-
lc = LollmsClient(
|
|
6
|
-
"openai",
|
|
7
|
-
"http://localhost:9642/v1/",
|
|
8
|
-
service_key="lollms_y-uyV-p2_AQGo5Ut6uHDmfIoRk6rKfmf0Rz6xQx-Zkl8cNyVUSFM" # make sure you generate your own key
|
|
9
|
-
)
|
|
10
|
-
|
|
11
|
-
# if you want to see what binding/model does the server support, use this:
|
|
12
|
-
models = lc.listModels()
|
|
13
|
-
print(f"Found models:\n{models}")
|
|
14
|
-
|
|
15
|
-
lc.set_model_name("ollama/gemma3:27b") # Or your preferred binding/model
|
|
16
|
-
|
|
17
|
-
function = input("Enter the function (e.g., x^2 + 2*x): ")
|
|
18
|
-
parameter = input("Enter the parameter to differentiate with respect to (e.g., x): ")
|
|
19
|
-
|
|
20
|
-
# Construct a detailed prompt
|
|
21
|
-
system_prompt = (
|
|
22
|
-
"You are a symbolic differentiation engine. You receive a mathematical function "
|
|
23
|
-
"and a parameter as input, and you return the derivative of the function with respect to that parameter. "
|
|
24
|
-
"The function can include variables, numbers, and common mathematical operations. "
|
|
25
|
-
"Return the derivative as a string. If the function or parameter is invalid, return 'Error'."
|
|
26
|
-
)
|
|
27
|
-
|
|
28
|
-
template = '"{derivative}": the derivative of the function with respect to the parameter'
|
|
29
|
-
|
|
30
|
-
# Include the function and parameter in the user prompt. This is important!
|
|
31
|
-
user_prompt = f"Find the derivative of the function '{function}' with respect to '{parameter}'."
|
|
32
|
-
|
|
33
|
-
# Generate the code
|
|
34
|
-
generation_output = lc.generate_code(
|
|
35
|
-
user_prompt,
|
|
36
|
-
system_prompt=system_prompt,
|
|
37
|
-
template=template
|
|
38
|
-
)
|
|
39
|
-
|
|
40
|
-
try:
|
|
41
|
-
# Attempt to parse the JSON response
|
|
42
|
-
generation_output = json.loads(generation_output)
|
|
43
|
-
derivative = generation_output["derivative"]
|
|
44
|
-
print(f"Derivative: {derivative}")
|
|
45
|
-
except json.JSONDecodeError:
|
|
46
|
-
print(f"Error: Could not decode JSON response: {generation_output}")
|
|
47
|
-
except KeyError:
|
|
48
|
-
print(f"Error: 'derivative' key not found in JSON response: {generation_output}")
|
|
@@ -1,12 +0,0 @@
|
|
|
1
|
-
from lollms_client import LollmsClient
|
|
2
|
-
#make sure you use your key
|
|
3
|
-
lc = LollmsClient("openai","http://localhost:9642/v1/", service_key="lollms_zXQdyvrP_ecMXm3UZ0D004x979aHpyF8iq4ki_b52q0WdFuiEfMo")
|
|
4
|
-
models = lc.listModels()
|
|
5
|
-
print(f"Found models:\n{models}")
|
|
6
|
-
|
|
7
|
-
lc.set_model_name("ollama/gemma3:27b")
|
|
8
|
-
|
|
9
|
-
res = lc.generate_text("Describe this image",images=[
|
|
10
|
-
r"C:\Users\parisneo\Pictures\me.jpg"
|
|
11
|
-
])
|
|
12
|
-
print(res)
|
|
@@ -1,155 +0,0 @@
|
|
|
1
|
-
# research_app_final.py
|
|
2
|
-
|
|
3
|
-
import os
|
|
4
|
-
import json
|
|
5
|
-
import shutil
|
|
6
|
-
from pathlib import Path
|
|
7
|
-
# Use the correct, specified import style
|
|
8
|
-
from lollms_client import LollmsClient
|
|
9
|
-
from lollms_client.lollms_discussion import LollmsDataManager, LollmsDiscussion
|
|
10
|
-
from lollms_client.lollms_types import MSG_TYPE
|
|
11
|
-
from sqlalchemy import Column, String
|
|
12
|
-
|
|
13
|
-
# --- 1. Define Application-Specific Schema ---
|
|
14
|
-
# The developer can define their own fields for the database tables.
|
|
15
|
-
# This allows applications to store and query their own metadata.
|
|
16
|
-
class ResearchDiscussionMixin:
|
|
17
|
-
# We want each discussion to have a 'project_name' that we can search for.
|
|
18
|
-
project_name = Column(String(100), index=True, nullable=False)
|
|
19
|
-
|
|
20
|
-
class ResearchMessageMixin:
|
|
21
|
-
# This mixin is empty for this example.
|
|
22
|
-
pass
|
|
23
|
-
|
|
24
|
-
def setup_migration_dummies(folder: Path):
|
|
25
|
-
"""Creates a dummy JSON file to simulate an old, file-based project structure."""
|
|
26
|
-
if not folder.exists():
|
|
27
|
-
folder.mkdir(parents=True, exist_ok=True)
|
|
28
|
-
|
|
29
|
-
# This data structure mimics what the old `to_dict` would have produced.
|
|
30
|
-
discussion_data = {
|
|
31
|
-
"id": "old_project_alpha",
|
|
32
|
-
"metadata": {"project_name": "Project Alpha"},
|
|
33
|
-
"system_prompt": "This is the system prompt for Alpha.",
|
|
34
|
-
"created_at": "2023-01-01T12:00:00",
|
|
35
|
-
"updated_at": "2023-01-01T12:05:00",
|
|
36
|
-
"messages": [
|
|
37
|
-
{"id": "msg1", "sender": "user", "sender_type":"user", "content": "What was the first finding?", "created_at": "2023-01-01T12:00:00"},
|
|
38
|
-
{"id": "msg2", "sender": "assistant", "sender_type":"assistant", "content": "It was about quantum states.", "parent_id": "msg1", "created_at": "2023-01-01T12:05:00"}
|
|
39
|
-
]
|
|
40
|
-
}
|
|
41
|
-
with open(folder / "project_alpha.json", "w") as f:
|
|
42
|
-
json.dump(discussion_data, f, indent=2)
|
|
43
|
-
print(f"Created dummy migration file in '{folder}'.")
|
|
44
|
-
|
|
45
|
-
def main():
|
|
46
|
-
# --- 2. Setup: Lollms Client is always needed ---
|
|
47
|
-
print("--- LOLLMS Research Assistant (Final Version) ---")
|
|
48
|
-
try:
|
|
49
|
-
# Instantiate the real LollmsClient to connect to a running model service.
|
|
50
|
-
# Ensure Ollama is running and has pulled the specified model.
|
|
51
|
-
lc = LollmsClient("ollama", model_name="mistral-nemo:latest")
|
|
52
|
-
print("LollmsClient connected successfully to Ollama.")
|
|
53
|
-
except Exception as e:
|
|
54
|
-
print(f"\nFATAL: Could not connect to LLM binding. Is Ollama running?\nError: {e}")
|
|
55
|
-
return
|
|
56
|
-
|
|
57
|
-
# --- DEMO 1: In-Memory Mode (Backward Compatibility) ---
|
|
58
|
-
print("\n--- DEMO 1: In-Memory Discussion ---")
|
|
59
|
-
|
|
60
|
-
# Create an in-memory discussion by NOT passing a db_manager.
|
|
61
|
-
in_memory_discussion = LollmsDiscussion.create_new(lollms_client=lc)
|
|
62
|
-
in_memory_discussion.system_prompt = "You are a helpful assistant."
|
|
63
|
-
print("Created an in-memory discussion.")
|
|
64
|
-
|
|
65
|
-
# Interact with it. The state is held entirely in the object.
|
|
66
|
-
user_input_mem = "Can you remember that my favorite color is blue?"
|
|
67
|
-
print(f"You > {user_input_mem}")
|
|
68
|
-
print("AI > ", end="", flush=True)
|
|
69
|
-
def stream_to_console(token, msg_type=MSG_TYPE.MSG_TYPE_CHUNK):
|
|
70
|
-
print(token, end="", flush=True)
|
|
71
|
-
return True
|
|
72
|
-
in_memory_discussion.chat(user_input_mem, streaming_callback=stream_to_console)
|
|
73
|
-
print()
|
|
74
|
-
|
|
75
|
-
# Save its state to a JSON file. This now works correctly.
|
|
76
|
-
file_path = Path("./in_memory_save.json")
|
|
77
|
-
with open(file_path, "w") as f:
|
|
78
|
-
json.dump(in_memory_discussion.to_dict(), f, indent=2)
|
|
79
|
-
print(f"\nIn-memory discussion saved to '{file_path}'.")
|
|
80
|
-
os.remove(file_path)
|
|
81
|
-
|
|
82
|
-
# --- DEMO 2: Database-Backed Mode with Migration ---
|
|
83
|
-
print("\n--- DEMO 2: Database-Backed Mode ---")
|
|
84
|
-
DB_PATH = "sqlite:///research_projects_final.db"
|
|
85
|
-
ENCRYPTION_KEY = "a-secure-password-for-the-database"
|
|
86
|
-
MIGRATION_FOLDER = Path("./old_discussions")
|
|
87
|
-
|
|
88
|
-
try:
|
|
89
|
-
# Initialize the LollmsDataManager with our schema and encryption key.
|
|
90
|
-
db_manager = LollmsDataManager(
|
|
91
|
-
db_path=DB_PATH,
|
|
92
|
-
discussion_mixin=ResearchDiscussionMixin,
|
|
93
|
-
message_mixin=ResearchMessageMixin,
|
|
94
|
-
encryption_key=ENCRYPTION_KEY
|
|
95
|
-
)
|
|
96
|
-
print(f"Database setup complete. Encryption is ENABLED.")
|
|
97
|
-
except Exception as e:
|
|
98
|
-
print(f"\nFATAL: Could not initialize database. Error: {e}")
|
|
99
|
-
return
|
|
100
|
-
|
|
101
|
-
# Demonstrate the one-time migration from a folder of JSON files.
|
|
102
|
-
setup_migration_dummies(MIGRATION_FOLDER)
|
|
103
|
-
input("\nDummy migration files created. Press Enter to run the migration...")
|
|
104
|
-
LollmsDiscussion.migrate(lollms_client=lc, db_manager=db_manager, folder_path=MIGRATION_FOLDER)
|
|
105
|
-
|
|
106
|
-
session = db_manager.get_session()
|
|
107
|
-
migrated_count = session.query(db_manager.DiscussionModel).count()
|
|
108
|
-
print(f"Verification: Found {migrated_count} discussions in the database after migration.")
|
|
109
|
-
session.close()
|
|
110
|
-
|
|
111
|
-
# --- DEMO 3: Live Chat with a DB-Backed Discussion ---
|
|
112
|
-
input("\nMigration complete. Press Enter to start a new, database-backed chat session...")
|
|
113
|
-
|
|
114
|
-
# Create a new, database-backed discussion with our custom 'project_name'.
|
|
115
|
-
discussion = LollmsDiscussion.create_new(
|
|
116
|
-
lollms_client=lc,
|
|
117
|
-
db_manager=db_manager,
|
|
118
|
-
max_context_size=lc.default_ctx_size // 2,
|
|
119
|
-
autosave=True,
|
|
120
|
-
project_name="Project Gamma (Live)"
|
|
121
|
-
)
|
|
122
|
-
discussion.system_prompt = "You are a helpful assistant for Project Gamma."
|
|
123
|
-
|
|
124
|
-
print(f"\n--- Live Chat for '{discussion.db_discussion.project_name}' ---")
|
|
125
|
-
print("Type your message, or '/exit', '/export_openai', '/export_ollama' to quit.")
|
|
126
|
-
|
|
127
|
-
while True:
|
|
128
|
-
user_input = input("\nYou > ")
|
|
129
|
-
if user_input.lower() == '/exit': break
|
|
130
|
-
|
|
131
|
-
if user_input.lower().startswith('/export'):
|
|
132
|
-
try:
|
|
133
|
-
format_type = user_input.split('_')[1] + "_chat"
|
|
134
|
-
exported_data = discussion.export(format_type)
|
|
135
|
-
print(f"\n--- Exported for {format_type.split('_')[0].upper()} ---")
|
|
136
|
-
print(json.dumps(exported_data, indent=2))
|
|
137
|
-
print("-----------------------------------")
|
|
138
|
-
except IndexError:
|
|
139
|
-
print("Invalid export command. Use /export_openai or /export_ollama")
|
|
140
|
-
continue
|
|
141
|
-
|
|
142
|
-
print("AI > ", end="", flush=True)
|
|
143
|
-
# The same streaming callback works seamlessly.
|
|
144
|
-
discussion.chat(user_input, streaming_callback=stream_to_console)
|
|
145
|
-
print()
|
|
146
|
-
|
|
147
|
-
# --- Cleanup ---
|
|
148
|
-
print("\n--- Demo complete. Cleaning up. ---")
|
|
149
|
-
if os.path.exists(DB_PATH):
|
|
150
|
-
os.remove(DB_PATH)
|
|
151
|
-
if MIGRATION_FOLDER.exists():
|
|
152
|
-
shutil.rmtree(MIGRATION_FOLDER)
|
|
153
|
-
|
|
154
|
-
if __name__ == "__main__":
|
|
155
|
-
main()
|