lollms-client 0.19.1__tar.gz → 0.19.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- {lollms_client-0.19.1 → lollms_client-0.19.6}/PKG-INFO +1 -1
- lollms_client-0.19.6/examples/generate_text_with_multihop_rag_example.py +211 -0
- lollms_client-0.19.6/examples/internet_search_with_rag.py +228 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/__init__.py +1 -1
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_core.py +238 -1
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client.egg-info/PKG-INFO +1 -1
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client.egg-info/SOURCES.txt +2 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/LICENSE +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/README.md +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/article_summary/article_summary.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/deep_analyze/deep_analyse.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/function_calling_with_local_custom_mcp.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/generate_and_speak/generate_and_speak.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/generate_game_sfx/generate_game_fx.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/local_mcp.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/personality_test/chat_test.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/personality_test/chat_with_aristotle.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/personality_test/tesks_test.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/simple_text_gen_test.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/simple_text_gen_with_image_test.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/test_local_models/local_chat.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/text_2_audio.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/text_2_image.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/text_2_image_diffusers.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/text_and_image_2_audio.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/text_gen.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/examples/text_gen_system_prompt.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/ollama/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/openai/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/openllm/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_config.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_discussion.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_llm_binding.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_mcp_binding.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_stt_binding.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_tti_binding.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_ttm_binding.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_tts_binding.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_ttv_binding.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_types.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/stt_bindings/whisper/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tti_bindings/dalle/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tti_bindings/gemini/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/ttm_bindings/bark/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tts_bindings/bark/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tts_bindings/xtts/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client.egg-info/requires.txt +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/pyproject.toml +0 -0
- {lollms_client-0.19.1 → lollms_client-0.19.6}/setup.cfg +0 -0
|
@@ -0,0 +1,211 @@
|
|
|
1
|
+
from lollms_client import LollmsClient, MSG_TYPE
|
|
2
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
3
|
+
from typing import List, Dict, Any, Optional, Callable
|
|
4
|
+
import json
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
# --- Mock RAG Implementation ---
|
|
8
|
+
# In a real application, this would interact with your vector database (Pinecone, ChromaDB, FAISS, etc.)
|
|
9
|
+
# and use a real sentence transformer for vectorization.
|
|
10
|
+
|
|
11
|
+
MOCK_KNOWLEDGE_BASE = {
|
|
12
|
+
"python_basics.md": [
|
|
13
|
+
{"chunk_id": 1, "text": "Python is a high-level, interpreted programming language known for its readability and versatility. It was created by Guido van Rossum and first released in 1991."},
|
|
14
|
+
{"chunk_id": 2, "text": "Key features of Python include dynamic typing, automatic memory management (garbage collection), and a large standard library. It supports multiple programming paradigms, such as procedural, object-oriented, and functional programming."},
|
|
15
|
+
{"chunk_id": 3, "text": "Common applications of Python include web development (e.g., Django, Flask), data science (e.g., Pandas, NumPy, Scikit-learn), machine learning, artificial intelligence, automation, and scripting."},
|
|
16
|
+
],
|
|
17
|
+
"javascript_info.js": [
|
|
18
|
+
{"chunk_id": 1, "text": "JavaScript is a scripting language primarily used for front-end web development to create interactive effects within web browsers. It is also used in back-end development (Node.js), mobile app development, and game development."},
|
|
19
|
+
{"chunk_id": 2, "text": "JavaScript is dynamically typed, prototype-based, and multi-paradigm. Along with HTML and CSS, it is one of the core technologies of the World Wide Web."},
|
|
20
|
+
{"chunk_id": 3, "text": "Popular JavaScript frameworks and libraries include React, Angular, Vue.js for front-end, and Express.js for Node.js back-end applications."},
|
|
21
|
+
],
|
|
22
|
+
"ai_concepts.txt": [
|
|
23
|
+
{"chunk_id": 1, "text": "Artificial Intelligence (AI) refers to the simulation of human intelligence in machines that are programmed to think like humans and mimic their actions. The term may also be applied to any machine that exhibits traits associated with a human mind such as learning and problem-solving."},
|
|
24
|
+
{"chunk_id": 2, "text": "Machine Learning (ML) is a subset of AI that provides systems the ability to automatically learn and improve from experience without being explicitly programmed. Deep Learning (DL) is a further subset of ML based on artificial neural networks with representation learning."},
|
|
25
|
+
{"chunk_id": 3, "text": "Retrieval Augmented Generation (RAG) is an AI framework for improving the quality of LLM-generated responses by grounding the model on external sources of knowledge to supplement the LLM’s internal representation of information."},
|
|
26
|
+
]
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
def mock_rag_query_function(
|
|
30
|
+
query_text: str,
|
|
31
|
+
vectorizer_name: Optional[str] = None, # Ignored in mock
|
|
32
|
+
top_k: int = 3,
|
|
33
|
+
min_similarity_percent: float = 0.0 # Ignored in mock, simple keyword match
|
|
34
|
+
) -> List[Dict[str, Any]]:
|
|
35
|
+
"""
|
|
36
|
+
A mock RAG query function.
|
|
37
|
+
Performs a simple keyword search in the MOCK_KNOWLEDGE_BASE.
|
|
38
|
+
"""
|
|
39
|
+
ASCIIColors.magenta(f" [MOCK RAG] Querying with: '{query_text}', top_k={top_k}")
|
|
40
|
+
results = []
|
|
41
|
+
query_lower = query_text.lower()
|
|
42
|
+
|
|
43
|
+
all_chunks = []
|
|
44
|
+
for file_path, chunks_in_file in MOCK_KNOWLEDGE_BASE.items():
|
|
45
|
+
for chunk_data in chunks_in_file:
|
|
46
|
+
all_chunks.append({"file_path": file_path, **chunk_data})
|
|
47
|
+
|
|
48
|
+
# Simple keyword matching and scoring (very basic)
|
|
49
|
+
scored_chunks = []
|
|
50
|
+
for chunk_info in all_chunks:
|
|
51
|
+
score = 0
|
|
52
|
+
for keyword in query_lower.split():
|
|
53
|
+
if keyword in chunk_info["text"].lower() and len(keyword)>2: # Basic relevance
|
|
54
|
+
score += 1
|
|
55
|
+
if "python" in query_lower and "python" in chunk_info["file_path"].lower(): score+=5
|
|
56
|
+
if "javascript" in query_lower and "javascript" in chunk_info["file_path"].lower(): score+=5
|
|
57
|
+
if "ai" in query_lower and "ai" in chunk_info["file_path"].lower(): score+=3
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
if score > 0 : # Only include if some keywords match
|
|
61
|
+
# Simulate similarity percentage (higher score = higher similarity)
|
|
62
|
+
similarity = min(100.0, score * 20.0 + 40.0) # Arbitrary scaling
|
|
63
|
+
if similarity >= min_similarity_percent:
|
|
64
|
+
scored_chunks.append({
|
|
65
|
+
"file_path": chunk_info["file_path"],
|
|
66
|
+
"chunk_text": chunk_info["text"],
|
|
67
|
+
"similarity_percent": similarity,
|
|
68
|
+
"_score_for_ranking": score # Internal score for sorting
|
|
69
|
+
})
|
|
70
|
+
|
|
71
|
+
# Sort by internal score (descending) and take top_k
|
|
72
|
+
scored_chunks.sort(key=lambda x: x["_score_for_ranking"], reverse=True)
|
|
73
|
+
results = [
|
|
74
|
+
{"file_path": c["file_path"], "chunk_text": c["chunk_text"], "similarity_percent": c["similarity_percent"]}
|
|
75
|
+
for c in scored_chunks[:top_k]
|
|
76
|
+
]
|
|
77
|
+
ASCIIColors.magenta(f" [MOCK RAG] Found {len(results)} relevant chunks.")
|
|
78
|
+
return results
|
|
79
|
+
|
|
80
|
+
# --- Streaming Callback for RAG and LLM ---
|
|
81
|
+
def rag_streaming_callback(
|
|
82
|
+
chunk: str,
|
|
83
|
+
msg_type: MSG_TYPE,
|
|
84
|
+
metadata: Optional[Dict] = None,
|
|
85
|
+
turn_history: Optional[List] = None # history of this specific RAG turn
|
|
86
|
+
) -> bool:
|
|
87
|
+
"""
|
|
88
|
+
Handles various stages of RAG and final LLM generation.
|
|
89
|
+
"""
|
|
90
|
+
metadata = metadata or {}
|
|
91
|
+
turn_history = turn_history or [] # Should be populated by LollmsClient
|
|
92
|
+
|
|
93
|
+
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # Final answer chunks
|
|
94
|
+
ASCIIColors.success(chunk, end="", flush=True)
|
|
95
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
|
|
96
|
+
step_type = metadata.get("type", "step")
|
|
97
|
+
hop = metadata.get("hop", "")
|
|
98
|
+
info = metadata.get("query", chunk) if step_type == "rag_query_generation" or step_type == "rag_retrieval" else chunk
|
|
99
|
+
ASCIIColors.yellow(f"\n>> RAG Step Start (Hop {hop}): {step_type} - Info: {str(info)[:100]}...", flush=True)
|
|
100
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
|
|
101
|
+
step_type = metadata.get("type", "step")
|
|
102
|
+
hop = metadata.get("hop", "")
|
|
103
|
+
num_chunks = metadata.get("num_chunks", "")
|
|
104
|
+
query = metadata.get("query", "")
|
|
105
|
+
decision = metadata.get("decision", "")
|
|
106
|
+
|
|
107
|
+
info_str = ""
|
|
108
|
+
if step_type == "rag_query_generation" and query: info_str = f"Generated Query: {query}"
|
|
109
|
+
elif step_type == "rag_retrieval": info_str = f"Retrieved {num_chunks} chunks"
|
|
110
|
+
elif step_type == "rag_llm_decision": info_str = f"LLM Decision: {json.dumps(decision)}"
|
|
111
|
+
elif step_type == "final_answer_generation": info_str = "Final answer generation complete."
|
|
112
|
+
else: info_str = chunk
|
|
113
|
+
|
|
114
|
+
ASCIIColors.green(f"\n<< RAG Step End (Hop {hop}): {step_type} - {info_str}", flush=True)
|
|
115
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
|
|
116
|
+
ASCIIColors.error(f"\nError in RAG stream: {chunk}", flush=True)
|
|
117
|
+
|
|
118
|
+
# You can inspect turn_history here if needed:
|
|
119
|
+
# ASCIIColors.debug(f"Current RAG Turn History: {turn_history}")
|
|
120
|
+
return True
|
|
121
|
+
|
|
122
|
+
# --- Main Example ---
|
|
123
|
+
if __name__ == "__main__":
|
|
124
|
+
ASCIIColors.red("--- Multi-Hop RAG Example with LollmsClient ---")
|
|
125
|
+
|
|
126
|
+
# LLM Configuration (use a model good at instruction following and JSON)
|
|
127
|
+
# Ensure your Ollama server is running and has this model pulled.
|
|
128
|
+
LLM_BINDING_NAME = "ollama"
|
|
129
|
+
LLM_MODEL_NAME = "qwen3:4b" # or llama3, phi3 etc.
|
|
130
|
+
# LLM_MODEL_NAME = "qwen2:1.5b" # Smaller model for quicker tests, but might struggle with complex JSON
|
|
131
|
+
|
|
132
|
+
try:
|
|
133
|
+
lc = LollmsClient(
|
|
134
|
+
binding_name=LLM_BINDING_NAME,
|
|
135
|
+
model_name=LLM_MODEL_NAME,
|
|
136
|
+
temperature=0.1, # Default temp for final answer if not overridden
|
|
137
|
+
# Other LollmsClient params as needed
|
|
138
|
+
)
|
|
139
|
+
ASCIIColors.green(f"LollmsClient initialized with LLM: {LLM_BINDING_NAME}/{LLM_MODEL_NAME}")
|
|
140
|
+
|
|
141
|
+
# --- Test Case 1: Classic RAG (max_rag_hops = 0) ---
|
|
142
|
+
ASCIIColors.cyan("\n\n--- Test Case 1: Classic RAG (max_rag_hops = 0) ---")
|
|
143
|
+
classic_rag_prompt = "What are the key features of Python?"
|
|
144
|
+
ASCIIColors.blue(f"User Prompt: {classic_rag_prompt}")
|
|
145
|
+
|
|
146
|
+
classic_rag_result = lc.generate_text_with_rag(
|
|
147
|
+
prompt=classic_rag_prompt,
|
|
148
|
+
rag_query_function=mock_rag_query_function,
|
|
149
|
+
# rag_query_text=None, # Will use `prompt` for query
|
|
150
|
+
max_rag_hops=0,
|
|
151
|
+
rag_top_k=2, # Get 2 best chunks
|
|
152
|
+
rag_min_similarity_percent=50.0,
|
|
153
|
+
streaming_callback=rag_streaming_callback,
|
|
154
|
+
n_predict=1024 # Max tokens for final answer
|
|
155
|
+
)
|
|
156
|
+
print("\n--- End of Classic RAG ---")
|
|
157
|
+
ASCIIColors.magenta("\nClassic RAG Final Output:")
|
|
158
|
+
print(json.dumps(classic_rag_result, indent=2))
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
# --- Test Case 2: Multi-Hop RAG (max_rag_hops = 1) ---
|
|
162
|
+
ASCIIColors.cyan("\n\n--- Test Case 2: Multi-Hop RAG (max_rag_hops = 1) ---")
|
|
163
|
+
multihop_prompt_1 = "Compare Python and JavaScript for web development based on their common applications and core technologies."
|
|
164
|
+
ASCIIColors.blue(f"User Prompt: {multihop_prompt_1}")
|
|
165
|
+
|
|
166
|
+
multihop_rag_result_1 = lc.generate_text_with_rag(
|
|
167
|
+
prompt=multihop_prompt_1,
|
|
168
|
+
rag_query_function=mock_rag_query_function,
|
|
169
|
+
# rag_query_text="Python web development applications", # Optional: provide an initial query
|
|
170
|
+
max_rag_hops=1, # Allow one hop for LLM to refine search or decide
|
|
171
|
+
rag_top_k=2,
|
|
172
|
+
rag_min_similarity_percent=60.0,
|
|
173
|
+
streaming_callback=rag_streaming_callback,
|
|
174
|
+
n_predict=1024,
|
|
175
|
+
rag_hop_query_generation_temperature=0.1, # Focused query gen
|
|
176
|
+
rag_hop_summary_temperature=0.2 # Focused summary
|
|
177
|
+
)
|
|
178
|
+
print("\n--- End of Multi-Hop RAG (1 hop) ---")
|
|
179
|
+
ASCIIColors.magenta("\nMulti-Hop RAG (1 hop) Final Output:")
|
|
180
|
+
print(json.dumps(multihop_rag_result_1, indent=2))
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
# --- Test Case 3: Multi-Hop RAG (max_rag_hops = 2) - LLM might decide it has enough earlier ---
|
|
184
|
+
ASCIIColors.cyan("\n\n--- Test Case 3: Multi-Hop RAG (max_rag_hops = 2) ---")
|
|
185
|
+
multihop_prompt_2 = "Explain Retrieval Augmented Generation (RAG) and its relation to Machine Learning."
|
|
186
|
+
ASCIIColors.blue(f"User Prompt: {multihop_prompt_2}")
|
|
187
|
+
|
|
188
|
+
multihop_rag_result_2 = lc.generate_text_with_rag(
|
|
189
|
+
prompt=multihop_prompt_2,
|
|
190
|
+
rag_query_function=mock_rag_query_function,
|
|
191
|
+
max_rag_hops=2, # Allow up to two refinement hops
|
|
192
|
+
rag_top_k=1, # Get only the best chunk per hop to force more specific queries
|
|
193
|
+
rag_min_similarity_percent=50.0,
|
|
194
|
+
streaming_callback=rag_streaming_callback,
|
|
195
|
+
n_predict=300
|
|
196
|
+
)
|
|
197
|
+
print("\n--- End of Multi-Hop RAG (up to 2 hops) ---")
|
|
198
|
+
ASCIIColors.magenta("\nMulti-Hop RAG (up to 2 hops) Final Output:")
|
|
199
|
+
print(json.dumps(multihop_rag_result_2, indent=2))
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
except ValueError as ve:
|
|
203
|
+
ASCIIColors.error(f"Initialization or RAG parameter error: {ve}")
|
|
204
|
+
trace_exception(ve)
|
|
205
|
+
except ConnectionRefusedError:
|
|
206
|
+
ASCIIColors.error(f"Connection refused. Is the Ollama server ({LLM_BINDING_NAME}) running?")
|
|
207
|
+
except Exception as e:
|
|
208
|
+
ASCIIColors.error(f"An unexpected error occurred: {e}")
|
|
209
|
+
trace_exception(e)
|
|
210
|
+
|
|
211
|
+
ASCIIColors.red("\n--- Multi-Hop RAG Example Finished ---")
|
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
from lollms_client import LollmsClient, MSG_TYPE
|
|
2
|
+
from ascii_colors import ASCIIColors, trace_exception
|
|
3
|
+
from typing import List, Dict, Any, Optional, Callable
|
|
4
|
+
import json
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
|
|
7
|
+
# --- Internet Search RAG Implementation ---
|
|
8
|
+
_duckduckgo_search_installed = False
|
|
9
|
+
_search_installation_error_message = ""
|
|
10
|
+
try:
|
|
11
|
+
import pipmaster as pm
|
|
12
|
+
# ensure_packages should be called by the binding init ideally,
|
|
13
|
+
# but we call it here for the example's standalone execution.
|
|
14
|
+
pm.ensure_packages(["duckduckgo_search"])
|
|
15
|
+
from duckduckgo_search import DDGS
|
|
16
|
+
_duckduckgo_search_installed = True
|
|
17
|
+
except Exception as e:
|
|
18
|
+
_search_installation_error_message = str(e)
|
|
19
|
+
DDGS = None
|
|
20
|
+
ASCIIColors.error(f"Failed to import duckduckgo_search: {_search_installation_error_message}")
|
|
21
|
+
ASCIIColors.info("Please install it: pip install duckduckgo-search")
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def perform_internet_search_rag(
|
|
25
|
+
query_text: str,
|
|
26
|
+
vectorizer_name: Optional[str] = None, # Not used for search
|
|
27
|
+
top_k: int = 5,
|
|
28
|
+
min_similarity_percent: float = 0.0 # Not used directly for search filter, but can influence result quality/rank
|
|
29
|
+
) -> List[Dict[str, Any]]:
|
|
30
|
+
"""
|
|
31
|
+
Performs an internet search using DuckDuckGo and formats results for RAG.
|
|
32
|
+
Similarity is simulated based on rank.
|
|
33
|
+
"""
|
|
34
|
+
if not _duckduckgo_search_installed or DDGS is None:
|
|
35
|
+
ASCIIColors.error("duckduckgo_search is not available. Cannot perform internet search.")
|
|
36
|
+
return []
|
|
37
|
+
|
|
38
|
+
ASCIIColors.magenta(f" [INTERNET SEARCH] Querying DuckDuckGo for: '{query_text}', max_results={top_k}")
|
|
39
|
+
search_results_raw = []
|
|
40
|
+
try:
|
|
41
|
+
# DDGS().text returns a generator, max_results limits it.
|
|
42
|
+
# Note: The DDGS library might sometimes return fewer results than max_results.
|
|
43
|
+
with DDGS() as ddgs:
|
|
44
|
+
search_results_raw = list(ddgs.text(keywords=query_text, max_results=top_k))
|
|
45
|
+
|
|
46
|
+
except Exception as e:
|
|
47
|
+
ASCIIColors.error(f" [INTERNET SEARCH] Search failed: {e}")
|
|
48
|
+
trace_exception(e)
|
|
49
|
+
return []
|
|
50
|
+
|
|
51
|
+
formatted_results: List[Dict[str, Any]] = []
|
|
52
|
+
if search_results_raw:
|
|
53
|
+
for i, r in enumerate(search_results_raw):
|
|
54
|
+
# Simulate similarity based on rank (rank 1 is highest sim)
|
|
55
|
+
# Max similarity is 100% for rank 1, decreases linearly or non-linearly.
|
|
56
|
+
# Simple linear decrease: 100 - (rank * (100 / top_k+1))
|
|
57
|
+
# Let's use rank-based score: 100% for rank 1, 90% for rank 2, ... 50% for rank 5 etc.
|
|
58
|
+
# Ensure similarity is above min_similarity_percent if that param was intended as a filter here
|
|
59
|
+
|
|
60
|
+
simulated_similarity = max(0.0, 100.0 - i * (100.0 / (top_k + 1))) # Higher rank = lower sim
|
|
61
|
+
simulated_similarity = round(simulated_similarity, 2)
|
|
62
|
+
|
|
63
|
+
if simulated_similarity >= min_similarity_percent:
|
|
64
|
+
formatted_results.append({
|
|
65
|
+
"file_path": r.get("href", "# Unknown URL"), # Use URL as document identifier
|
|
66
|
+
"chunk_text": f"Title: {r.get('title', 'N/A')}\nSnippet: {r.get('body', 'N/A')}", # Combine title and snippet
|
|
67
|
+
"similarity_percent": simulated_similarity,
|
|
68
|
+
})
|
|
69
|
+
else:
|
|
70
|
+
ASCIIColors.debug(f" [INTERNET SEARCH] Skipping result {i+1} due to low simulated similarity ({simulated_similarity}%)")
|
|
71
|
+
|
|
72
|
+
ASCIIColors.magenta(f" [INTERNET SEARCH] Formatted {len(formatted_results)} results for RAG.")
|
|
73
|
+
if not formatted_results: ASCIIColors.yellow(f" [INTERNET SEARCH] No results found for query: '{query_text}' or none met min_similarity_percent.")
|
|
74
|
+
return formatted_results
|
|
75
|
+
|
|
76
|
+
# --- Streaming Callback for RAG and LLM ---
|
|
77
|
+
def rag_streaming_callback(
|
|
78
|
+
chunk: str,
|
|
79
|
+
msg_type: MSG_TYPE,
|
|
80
|
+
metadata: Optional[Dict] = None,
|
|
81
|
+
turn_history: Optional[List] = None
|
|
82
|
+
) -> bool:
|
|
83
|
+
metadata = metadata or {}
|
|
84
|
+
hop = metadata.get("hop", "")
|
|
85
|
+
type_info = metadata.get("type", "N/A")
|
|
86
|
+
|
|
87
|
+
if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # Final answer chunks
|
|
88
|
+
ASCIIColors.success(chunk, end="", flush=True)
|
|
89
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
|
|
90
|
+
info = metadata.get("query", chunk) if type_info in ["rag_query_generation", "rag_retrieval"] else chunk
|
|
91
|
+
ASCIIColors.yellow(f"\n>> RAG Hop {hop} | START | {type_info.upper()} | Info: {str(info)[:100]}...", flush=True)
|
|
92
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_STEP_END:
|
|
93
|
+
num_chunks = metadata.get("num_chunks")
|
|
94
|
+
query = metadata.get("query")
|
|
95
|
+
decision = metadata.get("decision")
|
|
96
|
+
|
|
97
|
+
end_info = []
|
|
98
|
+
if query: end_info.append(f"Query: '{str(query)[:50]}...'")
|
|
99
|
+
if num_chunks is not None: end_info.append(f"Results: {num_chunks}")
|
|
100
|
+
if decision: end_info.append(f"LLM Decision: NeedMore={decision.get('need_more_data')}, Summary: '{str(decision.get('new_information_summary'))[:50]}...'")
|
|
101
|
+
|
|
102
|
+
ASCIIColors.green(f"\n<< RAG Hop {hop} | END | {type_info.upper()} | {' | '.join(end_info) if end_info else chunk}", flush=True)
|
|
103
|
+
elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
|
|
104
|
+
ASCIIColors.error(f"\nError in RAG stream: {chunk}", flush=True)
|
|
105
|
+
|
|
106
|
+
return True
|
|
107
|
+
|
|
108
|
+
# --- Main Example ---
|
|
109
|
+
if __name__ == "__main__":
|
|
110
|
+
ASCIIColors.red("--- Multi-Hop Internet Search Example with LollmsClient ---")
|
|
111
|
+
|
|
112
|
+
# LLM Configuration (use a model good at instruction following and JSON)
|
|
113
|
+
# Ensure your Ollama server is running and has this model pulled.
|
|
114
|
+
LLM_BINDING_NAME = "ollama"
|
|
115
|
+
LLM_MODEL_NAME = "mistral:latest" # or llama3, phi3 etc.
|
|
116
|
+
|
|
117
|
+
# You could also enable the internet_search tool via MCP,
|
|
118
|
+
# but this example specifically uses it directly via generate_text_with_rag.
|
|
119
|
+
# For MCP example, see examples/local_mcp.py
|
|
120
|
+
|
|
121
|
+
try:
|
|
122
|
+
lc = LollmsClient(
|
|
123
|
+
binding_name=LLM_BINDING_NAME,
|
|
124
|
+
model_name=LLM_MODEL_NAME,
|
|
125
|
+
temperature=0.1,
|
|
126
|
+
ctx_size=4096
|
|
127
|
+
)
|
|
128
|
+
ASCIIColors.green(f"LollmsClient initialized with LLM: {LLM_BINDING_NAME}/{LLM_MODEL_NAME}")
|
|
129
|
+
|
|
130
|
+
if not _duckduckgo_search_installed or DDGS is None:
|
|
131
|
+
ASCIIColors.error("duckduckgo_search is not installed. Cannot run search examples.")
|
|
132
|
+
exit()
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
# --- Test Case 1: Classic Search RAG (max_rag_hops = 0) ---
|
|
136
|
+
ASCIIColors.cyan("\n\n--- Test Case 1: Classic Internet Search RAG (max_rag_hops = 0) ---")
|
|
137
|
+
classic_search_prompt = "What is the current population of Japan?"
|
|
138
|
+
ASCIIColors.blue(f"User Prompt: {classic_search_prompt}")
|
|
139
|
+
|
|
140
|
+
classic_rag_result = lc.generate_text_with_rag(
|
|
141
|
+
prompt=classic_search_prompt,
|
|
142
|
+
rag_query_function=perform_internet_search_rag, # Use the search function
|
|
143
|
+
max_rag_hops=0,
|
|
144
|
+
rag_top_k=3, # Get 3 search results
|
|
145
|
+
rag_min_similarity_percent=50.0, # Only use results with simulated sim >= 50%
|
|
146
|
+
streaming_callback=rag_streaming_callback,
|
|
147
|
+
n_predict=250
|
|
148
|
+
)
|
|
149
|
+
print("\n--- End of Classic Search RAG ---")
|
|
150
|
+
ASCIIColors.magenta("\nClassic Search RAG Final Output Structure:")
|
|
151
|
+
print(f" Final Answer (first 100 chars): {classic_rag_result.get('final_answer', '')[:100]}...")
|
|
152
|
+
print(f" Error: {classic_rag_result.get('error')}")
|
|
153
|
+
print(f" Number of Hops: {len(classic_rag_result.get('rag_hops_history', []))}")
|
|
154
|
+
print(f" Total Unique Sources Retrieved: {len(classic_rag_result.get('all_retrieved_sources', []))}")
|
|
155
|
+
if classic_rag_result.get('all_retrieved_sources'):
|
|
156
|
+
print(" Example Retrieved Source:")
|
|
157
|
+
source_ex = classic_rag_result['all_retrieved_sources'][0]
|
|
158
|
+
print(f" Document (URL): {source_ex.get('document')}")
|
|
159
|
+
print(f" Similarity: {source_ex.get('similarity')}%")
|
|
160
|
+
print(f" Content (Snippet, first 50 chars): {source_ex.get('content', '')[:50]}...")
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
# --- Test Case 2: Multi-Hop Search RAG (max_rag_hops = 1) ---
|
|
164
|
+
ASCIIColors.cyan("\n\n--- Test Case 2: Multi-Hop Internet Search RAG (max_rag_hops = 1) ---")
|
|
165
|
+
multihop_search_prompt_1 = "Tell me about the latest developments in fusion energy, including any recent news."
|
|
166
|
+
ASCIIColors.blue(f"User Prompt: {multihop_search_prompt_1}")
|
|
167
|
+
|
|
168
|
+
multihop_rag_result_1 = lc.generate_text_with_rag(
|
|
169
|
+
prompt=multihop_search_prompt_1,
|
|
170
|
+
rag_query_function=perform_internet_search_rag,
|
|
171
|
+
rag_query_text=None, # LLM will generate first query
|
|
172
|
+
max_rag_hops=1, # Allow one refinement hop
|
|
173
|
+
rag_top_k=2, # Get 2 search results per query
|
|
174
|
+
rag_min_similarity_percent=50.0,
|
|
175
|
+
streaming_callback=rag_streaming_callback,
|
|
176
|
+
n_predict=400,
|
|
177
|
+
rag_hop_query_generation_temperature=0.1,
|
|
178
|
+
rag_hop_summary_temperature=0.2
|
|
179
|
+
)
|
|
180
|
+
print("\n--- End of Multi-Hop Search RAG (1 hop max) ---")
|
|
181
|
+
ASCIIColors.magenta("\nMulti-Hop Search RAG (1 hop max) Final Output Structure:")
|
|
182
|
+
print(f" Final Answer (first 100 chars): {multihop_rag_result_1.get('final_answer', '')[:100]}...")
|
|
183
|
+
print(f" Error: {multihop_rag_result_1.get('error')}")
|
|
184
|
+
print(f" Number of Hops Made: {len(multihop_rag_result_1.get('rag_hops_history', []))}")
|
|
185
|
+
for i, hop_info in enumerate(multihop_rag_result_1.get('rag_hops_history', [])):
|
|
186
|
+
print(f" Hop {i+1} Query: '{hop_info.get('query')}'")
|
|
187
|
+
print(f" Hop {i+1} Results Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
|
|
188
|
+
print(f" Hop {i+1} Summary (first 50): '{str(hop_info.get('new_information_summary'))[:50]}...'")
|
|
189
|
+
print(f" Hop {i+1} LLM Decision: NeedMoreData={hop_info.get('llm_decision_json',{}).get('need_more_data')}")
|
|
190
|
+
print(f" Total Unique Sources Retrieved: {len(multihop_rag_result_1.get('all_retrieved_sources', []))}")
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
# --- Test Case 3: More complex multi-hop (max_rag_hops = 2) ---
|
|
194
|
+
ASCIIColors.cyan("\n\n--- Test Case 3: More Complex Multi-Hop Internet Search RAG (max_rag_hops = 2) ---")
|
|
195
|
+
multihop_search_prompt_2 = "What are the requirements and steps to install the lollms_client python library, and what are some of its key features?"
|
|
196
|
+
ASCIIColors.blue(f"User Prompt: {multihop_search_prompt_2}")
|
|
197
|
+
|
|
198
|
+
multihop_rag_result_2 = lc.generate_text_with_rag(
|
|
199
|
+
prompt=multihop_search_prompt_2,
|
|
200
|
+
rag_query_function=perform_internet_search_rag,
|
|
201
|
+
max_rag_hops=2, # Allow up to two refinement hops
|
|
202
|
+
rag_top_k=2, # Get 2 results per query
|
|
203
|
+
rag_min_similarity_percent=40.0, # Lower similarity to maybe get broader initial results
|
|
204
|
+
streaming_callback=rag_streaming_callback,
|
|
205
|
+
n_predict=500 # Allow more for the installation steps and features
|
|
206
|
+
)
|
|
207
|
+
print("\n--- End of More Complex Multi-Hop Search RAG (up to 2 hops) ---")
|
|
208
|
+
ASCIIColors.magenta("\nMore Complex Multi-Hop Search RAG (up to 2 hops) Final Output Structure:")
|
|
209
|
+
print(f" Final Answer (first 100 chars): {multihop_rag_result_2.get('final_answer', '')[:100]}...")
|
|
210
|
+
print(f" Error: {multihop_rag_result_2.get('error')}")
|
|
211
|
+
print(f" Number of Hops Made: {len(multihop_rag_result_2.get('rag_hops_history', []))}")
|
|
212
|
+
for i, hop_info in enumerate(multihop_rag_result_2.get('rag_hops_history', [])):
|
|
213
|
+
print(f" Hop {i+1} Query: '{hop_info.get('query')}'")
|
|
214
|
+
print(f" Hop {i+1} Results Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
|
|
215
|
+
print(f" Hop {i+1} Summary (first 50): '{str(hop_info.get('new_information_summary'))[:50]}...'")
|
|
216
|
+
print(f" Total Unique Sources Retrieved: {len(multihop_rag_result_2.get('all_retrieved_sources', []))}")
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
except ValueError as ve:
|
|
220
|
+
ASCIIColors.error(f"Initialization or RAG parameter error: {ve}")
|
|
221
|
+
trace_exception(ve)
|
|
222
|
+
except ConnectionRefusedError:
|
|
223
|
+
ASCIIColors.error(f"Connection refused. Is the Ollama server ({LLM_BINDING_NAME}) running?")
|
|
224
|
+
except Exception as e:
|
|
225
|
+
ASCIIColors.error(f"An unexpected error occurred: {e}")
|
|
226
|
+
trace_exception(e)
|
|
227
|
+
|
|
228
|
+
ASCIIColors.red("\n--- Multi-Hop Internet Search Example Finished ---")
|
|
@@ -7,7 +7,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
7
7
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
8
8
|
|
|
9
9
|
|
|
10
|
-
__version__ = "0.19.
|
|
10
|
+
__version__ = "0.19.6" # Updated version
|
|
11
11
|
|
|
12
12
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
13
13
|
__all__ = [
|
|
@@ -12,7 +12,7 @@ from lollms_client.lollms_ttv_binding import LollmsTTVBinding, LollmsTTVBindingM
|
|
|
12
12
|
from lollms_client.lollms_ttm_binding import LollmsTTMBinding, LollmsTTMBindingManager
|
|
13
13
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
14
14
|
|
|
15
|
-
import json
|
|
15
|
+
import json, re
|
|
16
16
|
from enum import Enum
|
|
17
17
|
import base64
|
|
18
18
|
import requests
|
|
@@ -853,6 +853,243 @@ Respond with a JSON object containing ONE of the following structures:
|
|
|
853
853
|
turn_history.append({"type":"final_answer_generated", "content":final_answer_text})
|
|
854
854
|
return {"final_answer": final_answer_text, "tool_calls": tool_calls_made_this_turn, "error": None}
|
|
855
855
|
|
|
856
|
+
# --- RAG ---
|
|
857
|
+
|
|
858
|
+
def generate_text_with_rag(
|
|
859
|
+
self,
|
|
860
|
+
prompt: str,
|
|
861
|
+
rag_query_function: Callable[[str, Optional[str], int, float], List[Dict[str, Any]]],
|
|
862
|
+
rag_query_text: Optional[str] = None,
|
|
863
|
+
rag_vectorizer_name: Optional[str] = None,
|
|
864
|
+
rag_top_k: int = 5,
|
|
865
|
+
rag_min_similarity_percent: float = 70.0,
|
|
866
|
+
max_rag_hops: int = 0,
|
|
867
|
+
images: Optional[List[str]] = None,
|
|
868
|
+
system_prompt: str = "",
|
|
869
|
+
n_predict: Optional[int] = None,
|
|
870
|
+
stream: Optional[bool] = None,
|
|
871
|
+
temperature: Optional[float] = None,
|
|
872
|
+
top_k: Optional[int] = None,
|
|
873
|
+
top_p: Optional[float] = None,
|
|
874
|
+
repeat_penalty: Optional[float] = None,
|
|
875
|
+
repeat_last_n: Optional[int] = None,
|
|
876
|
+
seed: Optional[int] = None,
|
|
877
|
+
n_threads: Optional[int] = None,
|
|
878
|
+
ctx_size: int | None = None,
|
|
879
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE, Optional[Dict], Optional[List]], bool]] = None,
|
|
880
|
+
rag_hop_query_generation_temperature: float = 0.2,
|
|
881
|
+
rag_hop_summary_temperature: float = 0.3,
|
|
882
|
+
**llm_generation_kwargs
|
|
883
|
+
) -> Dict[str, Any]:
|
|
884
|
+
if not self.binding:
|
|
885
|
+
return {"final_answer": "", "rag_hops_history": [], "all_retrieved_sources": [], "error": "LLM binding not initialized."}
|
|
886
|
+
|
|
887
|
+
turn_rag_history_for_callback: List[Dict[str, Any]] = []
|
|
888
|
+
accumulated_rag_context_str = ""
|
|
889
|
+
rag_hops_details_list: List[Dict[str, Any]] = []
|
|
890
|
+
all_unique_retrieved_chunks_map: Dict[str, Dict[str, Any]] = {} # To store unique chunks by content hash or path+text
|
|
891
|
+
current_query_for_rag = rag_query_text
|
|
892
|
+
original_user_prompt = prompt
|
|
893
|
+
|
|
894
|
+
for hop_count in range(max_rag_hops + 1):
|
|
895
|
+
if streaming_callback:
|
|
896
|
+
streaming_callback(f"Starting RAG Hop {hop_count + 1}", MSG_TYPE.MSG_TYPE_STEP, {"type": "rag_hop_start", "hop": hop_count + 1}, turn_rag_history_for_callback)
|
|
897
|
+
|
|
898
|
+
# 1. Determine/Generate RAG Query Text
|
|
899
|
+
if hop_count > 0 or (current_query_for_rag is None and max_rag_hops > 0):
|
|
900
|
+
if streaming_callback:
|
|
901
|
+
streaming_callback("LLM generating refined RAG query...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "rag_query_generation", "hop": hop_count + 1}, turn_rag_history_for_callback)
|
|
902
|
+
|
|
903
|
+
query_gen_prompt_parts = [
|
|
904
|
+
f"{self.system_full_header}You are an expert research assistant. Your task is to formulate the best possible search query to find information relevant to the user's original request, considering the information already gathered.",
|
|
905
|
+
f"{self.user_full_header}Original user request: '{original_user_prompt}'"
|
|
906
|
+
]
|
|
907
|
+
if accumulated_rag_context_str:
|
|
908
|
+
query_gen_prompt_parts.append(f"Information gathered so far (summaries):\n{accumulated_rag_context_str}")
|
|
909
|
+
if rag_hops_details_list:
|
|
910
|
+
query_gen_prompt_parts.append("Previous search attempts and their summarized findings:")
|
|
911
|
+
for prev_hop in rag_hops_details_list:
|
|
912
|
+
query_gen_prompt_parts.append(f" - Queried for: '{prev_hop['query']}', Summary: '{prev_hop.get('new_information_summary', 'N/A')}'")
|
|
913
|
+
|
|
914
|
+
query_gen_prompt_parts.append("Based on this, what is the most effective and specific search query to perform next to get closer to answering the user's request? Output only the search query text, nothing else.")
|
|
915
|
+
query_gen_prompt_parts.append(self.ai_full_header)
|
|
916
|
+
|
|
917
|
+
new_query_text_raw = self.remove_thinking_blocks(self.generate_text(prompt="".join(query_gen_prompt_parts), temperature=rag_hop_query_generation_temperature, n_predict=100, stream=False))
|
|
918
|
+
if isinstance(new_query_text_raw, dict) and "error" in new_query_text_raw:
|
|
919
|
+
return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"Failed to generate RAG query: {new_query_text_raw['error']}"}
|
|
920
|
+
|
|
921
|
+
current_query_for_rag = new_query_text_raw.strip().replace("Search query:", "").replace("Query:", "").strip("\"'")
|
|
922
|
+
|
|
923
|
+
if streaming_callback:
|
|
924
|
+
streaming_callback(f"Generated RAG query: {current_query_for_rag}", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "rag_query_generation", "hop": hop_count + 1, "query": current_query_for_rag}, turn_rag_history_for_callback)
|
|
925
|
+
|
|
926
|
+
elif current_query_for_rag is None and max_rag_hops == 0:
|
|
927
|
+
current_query_for_rag = original_user_prompt
|
|
928
|
+
|
|
929
|
+
if not current_query_for_rag:
|
|
930
|
+
if max_rag_hops > 0 and hop_count < max_rag_hops:
|
|
931
|
+
ASCIIColors.warning(f"RAG Hop {hop_count + 1}: Generated query was empty. Skipping hop.")
|
|
932
|
+
rag_hops_details_list.append({"query": "EMPTY_QUERY_SKIPPED", "retrieved_chunks_details": [], "new_information_summary": "Skipped due to empty query.", "llm_decision_json": {"need_more_data": True if hop_count < max_rag_hops -1 else False}})
|
|
933
|
+
turn_rag_history_for_callback.append({"type":"rag_hop_info", "hop": hop_count + 1, "query": "EMPTY_QUERY_SKIPPED", "summary":"Skipped."})
|
|
934
|
+
continue
|
|
935
|
+
else:
|
|
936
|
+
ASCIIColors.warning("RAG query is empty. Proceeding without RAG context.")
|
|
937
|
+
break
|
|
938
|
+
|
|
939
|
+
# 2. Perform RAG Query
|
|
940
|
+
if streaming_callback:
|
|
941
|
+
streaming_callback(f"Querying knowledge base for: '{current_query_for_rag}'...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "rag_retrieval", "hop": hop_count + 1, "query": current_query_for_rag}, turn_rag_history_for_callback)
|
|
942
|
+
|
|
943
|
+
try:
|
|
944
|
+
retrieved_chunks_raw = rag_query_function(current_query_for_rag, rag_vectorizer_name, rag_top_k, rag_min_similarity_percent)
|
|
945
|
+
except Exception as e_rag_query:
|
|
946
|
+
trace_exception(e_rag_query)
|
|
947
|
+
return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"RAG query function failed: {e_rag_query}"}
|
|
948
|
+
|
|
949
|
+
if streaming_callback:
|
|
950
|
+
streaming_callback(f"Retrieved {len(retrieved_chunks_raw)} chunks.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "rag_retrieval", "hop": hop_count + 1, "num_chunks": len(retrieved_chunks_raw)}, turn_rag_history_for_callback)
|
|
951
|
+
|
|
952
|
+
current_hop_details = {"query": current_query_for_rag, "retrieved_chunks_details": []}
|
|
953
|
+
|
|
954
|
+
formatted_new_chunks_for_llm_summary = ""
|
|
955
|
+
if retrieved_chunks_raw:
|
|
956
|
+
for i, chunk in enumerate(retrieved_chunks_raw):
|
|
957
|
+
doc_path = chunk.get('file_path', 'Unknown Document')
|
|
958
|
+
similarity = chunk.get('similarity_percent', 'N/A')
|
|
959
|
+
content = chunk.get('chunk_text', '')
|
|
960
|
+
|
|
961
|
+
chunk_detail_for_history = {"document": doc_path, "similarity": similarity, "content": content}
|
|
962
|
+
current_hop_details["retrieved_chunks_details"].append(chunk_detail_for_history)
|
|
963
|
+
|
|
964
|
+
# Add to unique list for final output
|
|
965
|
+
# Use a combination of path and content to uniquely identify a chunk to avoid duplicates if same content appears from different queries.
|
|
966
|
+
# A more robust unique key might involve hashing content if it's very large.
|
|
967
|
+
unique_key = f"{doc_path}::{content[:100]}" # Simple key
|
|
968
|
+
if unique_key not in all_unique_retrieved_chunks_map:
|
|
969
|
+
all_unique_retrieved_chunks_map[unique_key] = chunk_detail_for_history
|
|
970
|
+
|
|
971
|
+
# Format for LLM processing (summary or direct use)
|
|
972
|
+
formatted_new_chunks_for_llm_summary += f"Document: {doc_path} (Similarity: {similarity}%)\nContent:\n{content}\n---\n"
|
|
973
|
+
|
|
974
|
+
if not retrieved_chunks_raw:
|
|
975
|
+
current_hop_details["new_information_summary"] = "No relevant information found for this query."
|
|
976
|
+
current_hop_details["llm_decision_json"] = {"need_more_data": True if max_rag_hops > 0 and hop_count < max_rag_hops -1 else False, "reasoning_for_decision":"No new information retrieved."}
|
|
977
|
+
rag_hops_details_list.append(current_hop_details)
|
|
978
|
+
turn_rag_history_for_callback.append({"type":"rag_hop_info", **current_hop_details})
|
|
979
|
+
if max_rag_hops == 0 or hop_count >= max_rag_hops -1 :
|
|
980
|
+
break
|
|
981
|
+
else:
|
|
982
|
+
accumulated_rag_context_str += f"\n\n---\nAttempted query: '{current_query_for_rag}' - No new information found.\n---"
|
|
983
|
+
continue
|
|
984
|
+
|
|
985
|
+
if max_rag_hops == 0: # Classic RAG
|
|
986
|
+
accumulated_rag_context_str += formatted_new_chunks_for_llm_summary
|
|
987
|
+
current_hop_details["new_information_summary"] = "Directly used in context (classic RAG)."
|
|
988
|
+
current_hop_details["llm_decision_json"] = {"need_more_data": False}
|
|
989
|
+
rag_hops_details_list.append(current_hop_details)
|
|
990
|
+
turn_rag_history_for_callback.append({"type":"rag_hop_info", **current_hop_details})
|
|
991
|
+
break
|
|
992
|
+
|
|
993
|
+
# Multi-hop: LLM summarizes and decides
|
|
994
|
+
if streaming_callback:
|
|
995
|
+
streaming_callback("LLM processing retrieved data and deciding next step...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "rag_llm_decision", "hop": hop_count + 1}, turn_rag_history_for_callback)
|
|
996
|
+
|
|
997
|
+
decision_prompt_llm_parts = [
|
|
998
|
+
f"{self.system_full_header}You are an AI research assistant. Analyze newly retrieved information against the user's request and prior knowledge, then decide if more searching is needed.",
|
|
999
|
+
f"{self.user_full_header}Original user request: '{original_user_prompt}'",
|
|
1000
|
+
]
|
|
1001
|
+
if accumulated_rag_context_str:
|
|
1002
|
+
decision_prompt_llm_parts.append(f"Current accumulated knowledge summary:\n{accumulated_rag_context_str}")
|
|
1003
|
+
decision_prompt_llm_parts.append(f"You just searched for: '{current_query_for_rag}'")
|
|
1004
|
+
decision_prompt_llm_parts.append(f"And found this new information:\n--- New Information Start ---\n{formatted_new_chunks_for_llm_summary}--- New Information End ---")
|
|
1005
|
+
decision_prompt_llm_parts.append(
|
|
1006
|
+
"Task: Provide a concise summary of ONLY the new information relevant to the original request. "
|
|
1007
|
+
"Then, assess if you now have sufficient information to comprehensively answer the user's original request or if another, more targeted search is necessary. "
|
|
1008
|
+
"Respond STRICTLY in the following JSON format, with no other text before or after the JSON block:"
|
|
1009
|
+
)
|
|
1010
|
+
json_template_for_decision = """
|
|
1011
|
+
{
|
|
1012
|
+
"new_information_summary": "<Your concise summary of ONLY the new_information relevant to the original_user_request. Focus on what's new and useful. If nothing new is relevant, state that.>",
|
|
1013
|
+
"need_more_data": <true_or_false>,
|
|
1014
|
+
"reasoning_for_decision": "<Briefly explain why you need more data or why you have enough. If needing more, suggest what kind of information is still missing.>"
|
|
1015
|
+
}
|
|
1016
|
+
"""
|
|
1017
|
+
decision_prompt_llm_parts.append(f"```json\n{json_template_for_decision}\n```")
|
|
1018
|
+
decision_prompt_llm_parts.append(self.ai_full_header)
|
|
1019
|
+
|
|
1020
|
+
llm_decision_json_str = self.generate_code(prompt="".join(decision_prompt_llm_parts), language="json", template=json_template_for_decision, temperature=rag_hop_summary_temperature, max_size=1024)
|
|
1021
|
+
|
|
1022
|
+
if isinstance(llm_decision_json_str, dict) and "error" in llm_decision_json_str:
|
|
1023
|
+
return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"LLM failed to make RAG decision: {llm_decision_json_str['error']}"}
|
|
1024
|
+
if not llm_decision_json_str:
|
|
1025
|
+
return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": "LLM provided empty decision for RAG hop."}
|
|
1026
|
+
|
|
1027
|
+
try:
|
|
1028
|
+
llm_decision = json.loads(llm_decision_json_str)
|
|
1029
|
+
except json.JSONDecodeError:
|
|
1030
|
+
try:
|
|
1031
|
+
match = re.search(r"```json\s*(\{.*?\})\s*```", llm_decision_json_str, re.DOTALL)
|
|
1032
|
+
if match: llm_decision = json.loads(match.group(1))
|
|
1033
|
+
else: llm_decision = json.loads(self.extract_code_blocks(llm_decision_json_str, format="markdown")[0]["content"])
|
|
1034
|
+
except Exception as e_json_parse:
|
|
1035
|
+
trace_exception(e_json_parse)
|
|
1036
|
+
return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"Failed to parse LLM RAG decision JSON: {llm_decision_json_str}. Error: {e_json_parse}"}
|
|
1037
|
+
|
|
1038
|
+
new_summary = llm_decision.get("new_information_summary", "Summary not provided by LLM.")
|
|
1039
|
+
need_more_data = llm_decision.get("need_more_data", True)
|
|
1040
|
+
|
|
1041
|
+
current_hop_details["new_information_summary"] = new_summary
|
|
1042
|
+
current_hop_details["llm_decision_json"] = llm_decision
|
|
1043
|
+
rag_hops_details_list.append(current_hop_details)
|
|
1044
|
+
turn_rag_history_for_callback.append({"type":"rag_hop_info", **current_hop_details})
|
|
1045
|
+
|
|
1046
|
+
if streaming_callback:
|
|
1047
|
+
streaming_callback(f"LLM decision: Summary='{new_summary[:100]}...', NeedMoreData={need_more_data}", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "rag_llm_decision", "hop": hop_count + 1, "decision": llm_decision}, turn_rag_history_for_callback)
|
|
1048
|
+
|
|
1049
|
+
accumulated_rag_context_str += f"\n\n--- Summary of findings from query '{current_query_for_rag}' (Hop {hop_count + 1}) ---\n{new_summary}\n---"
|
|
1050
|
+
|
|
1051
|
+
if not need_more_data or hop_count >= max_rag_hops -1 : # Subtract 1 because current hop is finishing
|
|
1052
|
+
break
|
|
1053
|
+
|
|
1054
|
+
# 4. Final Answer Generation
|
|
1055
|
+
if streaming_callback:
|
|
1056
|
+
streaming_callback("LLM generating final answer using all gathered information...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "final_answer_generation"}, turn_rag_history_for_callback)
|
|
1057
|
+
|
|
1058
|
+
final_answer_prompt_parts = []
|
|
1059
|
+
if system_prompt:
|
|
1060
|
+
final_answer_prompt_parts.append(f"{self.system_full_header}{system_prompt}")
|
|
1061
|
+
|
|
1062
|
+
final_answer_prompt_parts.append(f"{self.user_full_header}Original request: {original_user_prompt}")
|
|
1063
|
+
if accumulated_rag_context_str:
|
|
1064
|
+
final_answer_prompt_parts.append(f"\nBased on the information I have gathered:\n--- Gathered Context Start ---\n{accumulated_rag_context_str.strip()}\n--- Gathered Context End ---")
|
|
1065
|
+
else:
|
|
1066
|
+
final_answer_prompt_parts.append("\n(No specific information was retrieved from the knowledge base for this request.)")
|
|
1067
|
+
|
|
1068
|
+
final_answer_prompt_parts.append("\nPlease provide a comprehensive answer to the original request using ONLY the provided gathered context. If the context is insufficient, clearly state that.")
|
|
1069
|
+
final_answer_prompt_parts.append(self.ai_full_header)
|
|
1070
|
+
|
|
1071
|
+
final_answer_llm_prompt = "\n".join(final_answer_prompt_parts)
|
|
1072
|
+
|
|
1073
|
+
final_answer_streaming_callback = None
|
|
1074
|
+
if streaming_callback:
|
|
1075
|
+
def final_answer_cb_adapter(chunk, msg_type):
|
|
1076
|
+
return streaming_callback(chunk, msg_type, {"type": "final_answer_chunk"}, turn_rag_history_for_callback)
|
|
1077
|
+
final_answer_streaming_callback = final_answer_cb_adapter
|
|
1078
|
+
|
|
1079
|
+
final_answer_text = self.remove_thinking_blocks(self.generate_text(
|
|
1080
|
+
prompt=final_answer_llm_prompt, images=images,
|
|
1081
|
+
n_predict=n_predict, stream=stream, temperature=temperature, top_k=top_k, top_p=top_p,
|
|
1082
|
+
repeat_penalty=repeat_penalty, repeat_last_n=repeat_last_n, seed=seed, n_threads=n_threads,
|
|
1083
|
+
ctx_size=ctx_size, streaming_callback=final_answer_streaming_callback, **llm_generation_kwargs
|
|
1084
|
+
))
|
|
1085
|
+
|
|
1086
|
+
if streaming_callback:
|
|
1087
|
+
streaming_callback("Final answer generation complete.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "final_answer_generation"}, turn_rag_history_for_callback)
|
|
1088
|
+
|
|
1089
|
+
if isinstance(final_answer_text, dict) and "error" in final_answer_text:
|
|
1090
|
+
return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"Final answer generation failed: {final_answer_text['error']}"}
|
|
1091
|
+
|
|
1092
|
+
return {"final_answer": final_answer_text, "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": None}
|
|
856
1093
|
|
|
857
1094
|
def generate_code(
|
|
858
1095
|
self,
|
|
@@ -2,6 +2,8 @@ LICENSE
|
|
|
2
2
|
README.md
|
|
3
3
|
pyproject.toml
|
|
4
4
|
examples/function_calling_with_local_custom_mcp.py
|
|
5
|
+
examples/generate_text_with_multihop_rag_example.py
|
|
6
|
+
examples/internet_search_with_rag.py
|
|
5
7
|
examples/local_mcp.py
|
|
6
8
|
examples/simple_text_gen_test.py
|
|
7
9
|
examples/simple_text_gen_with_image_test.py
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/examples/deep_analyze/deep_analyze_multiple_files.py
RENAMED
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/examples/function_calling_with_local_custom_mcp.py
RENAMED
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/examples/generate_and_speak/generate_and_speak.py
RENAMED
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/examples/generate_game_sfx/generate_game_fx.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/examples/personality_test/chat_with_aristotle.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/llamacpp/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/openllm/__init__.py
RENAMED
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/pythonllamacpp/__init__.py
RENAMED
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/tensor_rt/__init__.py
RENAMED
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/llm_bindings/transformers/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/mcp_bindings/local_mcp/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/stt_bindings/whisper/__init__.py
RENAMED
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/stt_bindings/whispercpp/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tti_bindings/diffusers/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/ttm_bindings/audiocraft/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
{lollms_client-0.19.1 → lollms_client-0.19.6}/lollms_client/tts_bindings/piper_tts/__init__.py
RENAMED
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|