lollms-client 0.19.5__py3-none-any.whl → 0.19.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -4,75 +4,76 @@ from typing import List, Dict, Any, Optional, Callable
4
4
  import json
5
5
  from pathlib import Path
6
6
 
7
- # --- Dependency Management for the Search Tool ---
8
- # Ensure the duckduckgo_search library is installed for our RAG query function.
7
+ # --- Internet Search RAG Implementation ---
8
+ _duckduckgo_search_installed = False
9
+ _search_installation_error_message = ""
9
10
  try:
10
11
  import pipmaster as pm
11
- pm.ensure_packages(["duckduckgo_search"])
12
+ # ensure_packages should be called by the binding init ideally,
13
+ # but we call it here for the example's standalone execution.
14
+ pm.ensure_packages(["duckduckgo_search"])
12
15
  from duckduckgo_search import DDGS
13
- _ddgs_installed = True
14
- except Exception as e_dep:
15
- _ddgs_installed = False
16
- ASCIIColors.error(f"Could not ensure/import duckduckgo_search: {e_dep}")
17
- ASCIIColors.warning("The RAG function in this example will not work.")
16
+ _duckduckgo_search_installed = True
17
+ except Exception as e:
18
+ _search_installation_error_message = str(e)
18
19
  DDGS = None
19
- # --- End Dependency Management ---
20
+ ASCIIColors.error(f"Failed to import duckduckgo_search: {_search_installation_error_message}")
21
+ ASCIIColors.info("Please install it: pip install duckduckgo-search")
20
22
 
21
23
 
22
- def internet_rag_query_function(
24
+ def perform_internet_search_rag(
23
25
  query_text: str,
24
- vectorizer_name: Optional[str] = None, # Not used for this keyword-based search
26
+ vectorizer_name: Optional[str] = None, # Not used for search
25
27
  top_k: int = 5,
26
- min_similarity_percent: float = 0.0 # Not used for this keyword-based search
28
+ min_similarity_percent: float = 0.0 # Not used directly for search filter, but can influence result quality/rank
27
29
  ) -> List[Dict[str, Any]]:
28
30
  """
29
- A RAG-compatible query function that performs a live internet search using DuckDuckGo.
30
-
31
- Args:
32
- query_text: The search query.
33
- vectorizer_name: Ignored by this function.
34
- top_k: The maximum number of search results to return.
35
- min_similarity_percent: Ignored by this function.
36
-
37
- Returns:
38
- A list of dictionaries, each formatted for RAG with 'document', 'content', and 'similarity'.
31
+ Performs an internet search using DuckDuckGo and formats results for RAG.
32
+ Similarity is simulated based on rank.
39
33
  """
40
- if not _ddgs_installed:
41
- ASCIIColors.error("duckduckgo_search library is not available. Cannot perform internet search.")
42
- return []
34
+ if not _duckduckgo_search_installed or DDGS is None:
35
+ ASCIIColors.error("duckduckgo_search is not available. Cannot perform internet search.")
36
+ return []
43
37
 
44
- ASCIIColors.magenta(f" [INTERNET RAG] Searching web for: '{query_text}', max_results={top_k}")
45
- formatted_results = []
38
+ ASCIIColors.magenta(f" [INTERNET SEARCH] Querying DuckDuckGo for: '{query_text}', max_results={top_k}")
39
+ search_results_raw = []
46
40
  try:
41
+ # DDGS().text returns a generator, max_results limits it.
42
+ # Note: The DDGS library might sometimes return fewer results than max_results.
47
43
  with DDGS() as ddgs:
48
- # Fetch search results from DuckDuckGo
49
- search_results = ddgs.text(keywords=query_text, max_results=top_k)
44
+ search_results_raw = list(ddgs.text(keywords=query_text, max_results=top_k))
50
45
 
51
- if not search_results:
52
- ASCIIColors.yellow(" [INTERNET RAG] DuckDuckGo returned no results for this query.")
53
- return []
54
-
55
- for i, result in enumerate(search_results):
56
- # Format the search result into the structure expected by generate_text_with_rag
57
- # 'document' will be the URL.
58
- # 'content' will be a combination of title and snippet.
59
- # 'similarity' is emulated based on rank, as DDG doesn't provide a score.
60
- formatted_results.append({
61
- "document": result.get("href", "#"),
62
- "similarity": round(100.0 - (i * (10.0 / top_k)), 2), # Create a descending score
63
- "content": f"Title: {result.get('title', 'N/A')}\nSnippet: {result.get('body', 'N/A')}"
64
- })
65
-
66
- ASCIIColors.magenta(f" [INTERNET RAG] Found {len(formatted_results)} results.")
67
- return formatted_results
68
-
69
46
  except Exception as e:
47
+ ASCIIColors.error(f" [INTERNET SEARCH] Search failed: {e}")
70
48
  trace_exception(e)
71
- ASCIIColors.error(f" [INTERNET RAG] An error occurred during search: {e}")
72
49
  return []
73
50
 
51
+ formatted_results: List[Dict[str, Any]] = []
52
+ if search_results_raw:
53
+ for i, r in enumerate(search_results_raw):
54
+ # Simulate similarity based on rank (rank 1 is highest sim)
55
+ # Max similarity is 100% for rank 1, decreases linearly or non-linearly.
56
+ # Simple linear decrease: 100 - (rank * (100 / top_k+1))
57
+ # Let's use rank-based score: 100% for rank 1, 90% for rank 2, ... 50% for rank 5 etc.
58
+ # Ensure similarity is above min_similarity_percent if that param was intended as a filter here
59
+
60
+ simulated_similarity = max(0.0, 100.0 - i * (100.0 / (top_k + 1))) # Higher rank = lower sim
61
+ simulated_similarity = round(simulated_similarity, 2)
62
+
63
+ if simulated_similarity >= min_similarity_percent:
64
+ formatted_results.append({
65
+ "file_path": r.get("href", "# Unknown URL"), # Use URL as document identifier
66
+ "chunk_text": f"Title: {r.get('title', 'N/A')}\nSnippet: {r.get('body', 'N/A')}", # Combine title and snippet
67
+ "similarity_percent": simulated_similarity,
68
+ })
69
+ else:
70
+ ASCIIColors.debug(f" [INTERNET SEARCH] Skipping result {i+1} due to low simulated similarity ({simulated_similarity}%)")
71
+
72
+ ASCIIColors.magenta(f" [INTERNET SEARCH] Formatted {len(formatted_results)} results for RAG.")
73
+ if not formatted_results: ASCIIColors.yellow(f" [INTERNET SEARCH] No results found for query: '{query_text}' or none met min_similarity_percent.")
74
+ return formatted_results
75
+
74
76
  # --- Streaming Callback for RAG and LLM ---
75
- # (This is the same useful callback from the previous example)
76
77
  def rag_streaming_callback(
77
78
  chunk: str,
78
79
  msg_type: MSG_TYPE,
@@ -83,7 +84,7 @@ def rag_streaming_callback(
83
84
  hop = metadata.get("hop", "")
84
85
  type_info = metadata.get("type", "N/A")
85
86
 
86
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
87
+ if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # Final answer chunks
87
88
  ASCIIColors.success(chunk, end="", flush=True)
88
89
  elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
89
90
  info = metadata.get("query", chunk) if type_info in ["rag_query_generation", "rag_retrieval"] else chunk
@@ -95,8 +96,8 @@ def rag_streaming_callback(
95
96
 
96
97
  end_info = []
97
98
  if query: end_info.append(f"Query: '{str(query)[:50]}...'")
98
- if num_chunks is not None: end_info.append(f"Retrieved: {num_chunks} sources")
99
- if decision: end_info.append(f"LLM Decision: NeedMore={decision.get('need_more_data')}, Summary: '{str(decision.get('new_information_summary'))[:40]}...'")
99
+ if num_chunks is not None: end_info.append(f"Results: {num_chunks}")
100
+ if decision: end_info.append(f"LLM Decision: NeedMore={decision.get('need_more_data')}, Summary: '{str(decision.get('new_information_summary'))[:50]}...'")
100
101
 
101
102
  ASCIIColors.green(f"\n<< RAG Hop {hop} | END | {type_info.upper()} | {' | '.join(end_info) if end_info else chunk}", flush=True)
102
103
  elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
@@ -106,75 +107,113 @@ def rag_streaming_callback(
106
107
 
107
108
  # --- Main Example ---
108
109
  if __name__ == "__main__":
109
- ASCIIColors.red("--- Internet Search with Multi-Hop RAG Example ---")
110
+ ASCIIColors.red("--- Multi-Hop Internet Search Example with LollmsClient ---")
110
111
 
112
+ # LLM Configuration (use a model good at instruction following and JSON)
113
+ # Ensure your Ollama server is running and has this model pulled.
111
114
  LLM_BINDING_NAME = "ollama"
112
- LLM_MODEL_NAME = "mistral-nemo:latest" # Nemo is good with JSON and reasoning
115
+ LLM_MODEL_NAME = "mistral:latest" # or llama3, phi3 etc.
113
116
 
114
- if not _ddgs_installed:
115
- ASCIIColors.error("Cannot run this example because the 'duckduckgo-search' library is not installed.")
116
- exit(1)
117
+ # You could also enable the internet_search tool via MCP,
118
+ # but this example specifically uses it directly via generate_text_with_rag.
119
+ # For MCP example, see examples/local_mcp.py
117
120
 
118
121
  try:
119
122
  lc = LollmsClient(
120
123
  binding_name=LLM_BINDING_NAME,
121
124
  model_name=LLM_MODEL_NAME,
122
125
  temperature=0.1,
123
- ctx_size=4096
126
+ ctx_size=4096
124
127
  )
125
128
  ASCIIColors.green(f"LollmsClient initialized with LLM: {LLM_BINDING_NAME}/{LLM_MODEL_NAME}")
126
129
 
127
- # --- Test Case 1: Classic RAG with Internet Search ---
128
- ASCIIColors.cyan("\n\n--- Test Case 1: Classic RAG (max_rag_hops = 0) using Internet Search ---")
129
- classic_rag_prompt = "What is the James Webb Space Telescope and what was its launch date?"
130
- ASCIIColors.blue(f"User Prompt: {classic_rag_prompt}")
130
+ if not _duckduckgo_search_installed or DDGS is None:
131
+ ASCIIColors.error("duckduckgo_search is not installed. Cannot run search examples.")
132
+ exit()
133
+
134
+
135
+ # --- Test Case 1: Classic Search RAG (max_rag_hops = 0) ---
136
+ ASCIIColors.cyan("\n\n--- Test Case 1: Classic Internet Search RAG (max_rag_hops = 0) ---")
137
+ classic_search_prompt = "What is the current population of Japan?"
138
+ ASCIIColors.blue(f"User Prompt: {classic_search_prompt}")
131
139
 
132
140
  classic_rag_result = lc.generate_text_with_rag(
133
- prompt=classic_rag_prompt,
134
- rag_query_function=internet_rag_query_function,
141
+ prompt=classic_search_prompt,
142
+ rag_query_function=perform_internet_search_rag, # Use the search function
135
143
  max_rag_hops=0,
136
- rag_top_k=3,
144
+ rag_top_k=3, # Get 3 search results
145
+ rag_min_similarity_percent=50.0, # Only use results with simulated sim >= 50%
137
146
  streaming_callback=rag_streaming_callback,
138
- n_predict=300
147
+ n_predict=250
139
148
  )
140
- print("\n--- End of Classic RAG ---")
141
- ASCIIColors.magenta("\nClassic RAG Final Output Details:")
142
- print(f" Final Answer (first 150 chars): {classic_rag_result.get('final_answer', '')[:150]}...")
149
+ print("\n--- End of Classic Search RAG ---")
150
+ ASCIIColors.magenta("\nClassic Search RAG Final Output Structure:")
151
+ print(f" Final Answer (first 100 chars): {classic_rag_result.get('final_answer', '')[:100]}...")
143
152
  print(f" Error: {classic_rag_result.get('error')}")
153
+ print(f" Number of Hops: {len(classic_rag_result.get('rag_hops_history', []))}")
144
154
  print(f" Total Unique Sources Retrieved: {len(classic_rag_result.get('all_retrieved_sources', []))}")
145
155
  if classic_rag_result.get('all_retrieved_sources'):
146
- print(" Retrieved Sources (URLs):")
147
- for source in classic_rag_result['all_retrieved_sources']:
148
- print(f" - {source.get('document')}")
149
-
150
- # --- Test Case 2: Multi-Hop RAG with Internet Search ---
151
- ASCIIColors.cyan("\n\n--- Test Case 2: Multi-Hop RAG (max_rag_hops = 2) using Internet Search ---")
152
- multihop_prompt = "First, find out what the TRAPPIST-1 system is. Then, search for recent news about its planets from the James Webb Space Telescope."
153
- ASCIIColors.blue(f"User Prompt: {multihop_prompt}")
154
-
155
- multihop_rag_result = lc.generate_text_with_rag(
156
- prompt=multihop_prompt,
157
- rag_query_function=internet_rag_query_function,
158
- rag_query_text=None, # Let the LLM generate the first query
159
- max_rag_hops=2, # Allow up to two separate search queries
160
- rag_top_k=2,
156
+ print(" Example Retrieved Source:")
157
+ source_ex = classic_rag_result['all_retrieved_sources'][0]
158
+ print(f" Document (URL): {source_ex.get('document')}")
159
+ print(f" Similarity: {source_ex.get('similarity')}%")
160
+ print(f" Content (Snippet, first 50 chars): {source_ex.get('content', '')[:50]}...")
161
+
162
+
163
+ # --- Test Case 2: Multi-Hop Search RAG (max_rag_hops = 1) ---
164
+ ASCIIColors.cyan("\n\n--- Test Case 2: Multi-Hop Internet Search RAG (max_rag_hops = 1) ---")
165
+ multihop_search_prompt_1 = "Tell me about the latest developments in fusion energy, including any recent news."
166
+ ASCIIColors.blue(f"User Prompt: {multihop_search_prompt_1}")
167
+
168
+ multihop_rag_result_1 = lc.generate_text_with_rag(
169
+ prompt=multihop_search_prompt_1,
170
+ rag_query_function=perform_internet_search_rag,
171
+ rag_query_text=None, # LLM will generate first query
172
+ max_rag_hops=1, # Allow one refinement hop
173
+ rag_top_k=2, # Get 2 search results per query
174
+ rag_min_similarity_percent=50.0,
161
175
  streaming_callback=rag_streaming_callback,
162
176
  n_predict=400,
177
+ rag_hop_query_generation_temperature=0.1,
178
+ rag_hop_summary_temperature=0.2
163
179
  )
164
- print("\n--- End of Multi-Hop RAG ---")
165
- ASCIIColors.magenta("\nMulti-Hop RAG Final Output Details:")
166
- print(f" Final Answer (first 150 chars): {multihop_rag_result.get('final_answer', '')[:150]}...")
167
- print(f" Error: {multihop_rag_result.get('error')}")
168
- print(f" Number of Hops Made: {len(multihop_rag_result.get('rag_hops_history', []))}")
169
- for i, hop_info in enumerate(multihop_rag_result.get('rag_hops_history', [])):
180
+ print("\n--- End of Multi-Hop Search RAG (1 hop max) ---")
181
+ ASCIIColors.magenta("\nMulti-Hop Search RAG (1 hop max) Final Output Structure:")
182
+ print(f" Final Answer (first 100 chars): {multihop_rag_result_1.get('final_answer', '')[:100]}...")
183
+ print(f" Error: {multihop_rag_result_1.get('error')}")
184
+ print(f" Number of Hops Made: {len(multihop_rag_result_1.get('rag_hops_history', []))}")
185
+ for i, hop_info in enumerate(multihop_rag_result_1.get('rag_hops_history', [])):
170
186
  print(f" Hop {i+1} Query: '{hop_info.get('query')}'")
171
- print(f" Hop {i+1} Retrieved Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
187
+ print(f" Hop {i+1} Results Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
188
+ print(f" Hop {i+1} Summary (first 50): '{str(hop_info.get('new_information_summary'))[:50]}...'")
172
189
  print(f" Hop {i+1} LLM Decision: NeedMoreData={hop_info.get('llm_decision_json',{}).get('need_more_data')}")
173
- print(f" Total Unique Sources Retrieved: {len(multihop_rag_result.get('all_retrieved_sources', []))}")
174
- if multihop_rag_result.get('all_retrieved_sources'):
175
- print(" All Retrieved Sources (URLs):")
176
- for source in multihop_rag_result['all_retrieved_sources']:
177
- print(f" - {source.get('document')}")
190
+ print(f" Total Unique Sources Retrieved: {len(multihop_rag_result_1.get('all_retrieved_sources', []))}")
191
+
192
+
193
+ # --- Test Case 3: More complex multi-hop (max_rag_hops = 2) ---
194
+ ASCIIColors.cyan("\n\n--- Test Case 3: More Complex Multi-Hop Internet Search RAG (max_rag_hops = 2) ---")
195
+ multihop_search_prompt_2 = "What are the requirements and steps to install the lollms_client python library, and what are some of its key features?"
196
+ ASCIIColors.blue(f"User Prompt: {multihop_search_prompt_2}")
197
+
198
+ multihop_rag_result_2 = lc.generate_text_with_rag(
199
+ prompt=multihop_search_prompt_2,
200
+ rag_query_function=perform_internet_search_rag,
201
+ max_rag_hops=2, # Allow up to two refinement hops
202
+ rag_top_k=2, # Get 2 results per query
203
+ rag_min_similarity_percent=40.0, # Lower similarity to maybe get broader initial results
204
+ streaming_callback=rag_streaming_callback,
205
+ n_predict=500 # Allow more for the installation steps and features
206
+ )
207
+ print("\n--- End of More Complex Multi-Hop Search RAG (up to 2 hops) ---")
208
+ ASCIIColors.magenta("\nMore Complex Multi-Hop Search RAG (up to 2 hops) Final Output Structure:")
209
+ print(f" Final Answer (first 100 chars): {multihop_rag_result_2.get('final_answer', '')[:100]}...")
210
+ print(f" Error: {multihop_rag_result_2.get('error')}")
211
+ print(f" Number of Hops Made: {len(multihop_rag_result_2.get('rag_hops_history', []))}")
212
+ for i, hop_info in enumerate(multihop_rag_result_2.get('rag_hops_history', [])):
213
+ print(f" Hop {i+1} Query: '{hop_info.get('query')}'")
214
+ print(f" Hop {i+1} Results Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
215
+ print(f" Hop {i+1} Summary (first 50): '{str(hop_info.get('new_information_summary'))[:50]}...'")
216
+ print(f" Total Unique Sources Retrieved: {len(multihop_rag_result_2.get('all_retrieved_sources', []))}")
178
217
 
179
218
 
180
219
  except ValueError as ve:
@@ -186,4 +225,4 @@ if __name__ == "__main__":
186
225
  ASCIIColors.error(f"An unexpected error occurred: {e}")
187
226
  trace_exception(e)
188
227
 
189
- ASCIIColors.red("\n--- Internet Search RAG Example Finished ---")
228
+ ASCIIColors.red("\n--- Multi-Hop Internet Search Example Finished ---")
lollms_client/__init__.py CHANGED
@@ -7,7 +7,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
7
7
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
8
8
 
9
9
 
10
- __version__ = "0.19.5" # Updated version
10
+ __version__ = "0.19.7" # Updated version
11
11
 
12
12
  # Optionally, you could define __all__ if you want to be explicit about exports
13
13
  __all__ = [
@@ -853,8 +853,6 @@ Respond with a JSON object containing ONE of the following structures:
853
853
  turn_history.append({"type":"final_answer_generated", "content":final_answer_text})
854
854
  return {"final_answer": final_answer_text, "tool_calls": tool_calls_made_this_turn, "error": None}
855
855
 
856
- # --- RAG ---
857
-
858
856
  def generate_text_with_rag(
859
857
  self,
860
858
  prompt: str,
@@ -878,16 +876,17 @@ Respond with a JSON object containing ONE of the following structures:
878
876
  ctx_size: int | None = None,
879
877
  streaming_callback: Optional[Callable[[str, MSG_TYPE, Optional[Dict], Optional[List]], bool]] = None,
880
878
  rag_hop_query_generation_temperature: float = 0.2,
881
- rag_hop_summary_temperature: float = 0.3,
879
+ # rag_hop_summary_temperature is no longer needed
880
+ max_rag_context_characters: int = 32000,
882
881
  **llm_generation_kwargs
883
882
  ) -> Dict[str, Any]:
884
883
  if not self.binding:
885
884
  return {"final_answer": "", "rag_hops_history": [], "all_retrieved_sources": [], "error": "LLM binding not initialized."}
886
885
 
887
886
  turn_rag_history_for_callback: List[Dict[str, Any]] = []
888
- accumulated_rag_context_str = ""
889
887
  rag_hops_details_list: List[Dict[str, Any]] = []
890
- all_unique_retrieved_chunks_map: Dict[str, Dict[str, Any]] = {} # To store unique chunks by content hash or path+text
888
+ # Stores all unique chunks with their full details, keyed by a unique identifier (e.g., path + content hash snippet)
889
+ all_unique_retrieved_chunks_map: Dict[str, Dict[str, Any]] = {}
891
890
  current_query_for_rag = rag_query_text
892
891
  original_user_prompt = prompt
893
892
 
@@ -896,205 +895,204 @@ Respond with a JSON object containing ONE of the following structures:
896
895
  streaming_callback(f"Starting RAG Hop {hop_count + 1}", MSG_TYPE.MSG_TYPE_STEP, {"type": "rag_hop_start", "hop": hop_count + 1}, turn_rag_history_for_callback)
897
896
 
898
897
  # 1. Determine/Generate RAG Query Text
899
- if hop_count > 0 or (current_query_for_rag is None and max_rag_hops > 0):
898
+ if hop_count > 0: # Query generation for multi-hop (hop 2 onwards)
900
899
  if streaming_callback:
901
900
  streaming_callback("LLM generating refined RAG query...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "rag_query_generation", "hop": hop_count + 1}, turn_rag_history_for_callback)
902
901
 
902
+ system_prompt_q_gen = "You are an expert research assistant. Your task is to formulate the best possible *new* search query to find additional information relevant to the user's original request, considering previous search attempts."
903
903
  query_gen_prompt_parts = [
904
- f"{self.system_full_header}You are an expert research assistant. Your task is to formulate the best possible search query to find information relevant to the user's original request, considering the information already gathered.",
905
- f"{self.user_full_header}Original user request: '{original_user_prompt}'"
904
+ f"Original user request:\n'{original_user_prompt}'"
906
905
  ]
907
- if accumulated_rag_context_str:
908
- query_gen_prompt_parts.append(f"Information gathered so far (summaries):\n{accumulated_rag_context_str}")
909
906
  if rag_hops_details_list:
910
- query_gen_prompt_parts.append("Previous search attempts and their summarized findings:")
911
- for prev_hop in rag_hops_details_list:
912
- query_gen_prompt_parts.append(f" - Queried for: '{prev_hop['query']}', Summary: '{prev_hop.get('new_information_summary', 'N/A')}'")
907
+ query_gen_prompt_parts.append("\nPrevious search queries and number of chunks found:")
908
+ for i, prev_hop in enumerate(rag_hops_details_list):
909
+ num_chunks_found_in_hop = len(prev_hop.get("retrieved_chunks_details", []))
910
+ query_gen_prompt_parts.append(f" - Query {i+1}: '{prev_hop['query']}' (Found {num_chunks_found_in_hop} chunks)")
913
911
 
914
- query_gen_prompt_parts.append("Based on this, what is the most effective and specific search query to perform next to get closer to answering the user's request? Output only the search query text, nothing else.")
912
+ query_gen_prompt_parts.append("\nBased on the original request and the queries already attempted, what is the most effective and specific *new* search query to perform next to get closer to answering the user's request? The query should aim to find information not likely covered by previous queries. Output only the search query text, nothing else.")
915
913
  query_gen_prompt_parts.append(self.ai_full_header)
916
914
 
917
- new_query_text_raw = self.remove_thinking_blocks(self.generate_text(prompt="".join(query_gen_prompt_parts), temperature=rag_hop_query_generation_temperature, n_predict=100, stream=False))
915
+ new_query_text_raw = self.generate_text(
916
+ prompt="".join(query_gen_prompt_parts),
917
+ system_prompt=system_prompt_q_gen,
918
+ temperature=rag_hop_query_generation_temperature,
919
+ n_predict=100,
920
+ stream=False
921
+ )
922
+
918
923
  if isinstance(new_query_text_raw, dict) and "error" in new_query_text_raw:
919
- return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"Failed to generate RAG query: {new_query_text_raw['error']}"}
924
+ return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"Failed to generate RAG query for hop {hop_count + 1}: {new_query_text_raw['error']}"}
920
925
 
921
- current_query_for_rag = new_query_text_raw.strip().replace("Search query:", "").replace("Query:", "").strip("\"'")
926
+ current_query_for_rag = self.remove_thinking_blocks(new_query_text_raw).strip().replace("Search query:", "").replace("Query:", "").strip("\"'")
922
927
 
923
928
  if streaming_callback:
924
- streaming_callback(f"Generated RAG query: {current_query_for_rag}", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "rag_query_generation", "hop": hop_count + 1, "query": current_query_for_rag}, turn_rag_history_for_callback)
925
-
926
- elif current_query_for_rag is None and max_rag_hops == 0:
929
+ streaming_callback(f"Generated RAG query for hop {hop_count + 1}: {current_query_for_rag}", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "rag_query_generation", "hop": hop_count + 1, "query": current_query_for_rag}, turn_rag_history_for_callback)
930
+
931
+ elif current_query_for_rag is None: # First hop, and no rag_query_text provided
927
932
  current_query_for_rag = original_user_prompt
933
+
934
+ # If current_query_for_rag was provided as an argument, it's used for the first hop.
928
935
 
929
936
  if not current_query_for_rag:
930
- if max_rag_hops > 0 and hop_count < max_rag_hops:
931
- ASCIIColors.warning(f"RAG Hop {hop_count + 1}: Generated query was empty. Skipping hop.")
932
- rag_hops_details_list.append({"query": "EMPTY_QUERY_SKIPPED", "retrieved_chunks_details": [], "new_information_summary": "Skipped due to empty query.", "llm_decision_json": {"need_more_data": True if hop_count < max_rag_hops -1 else False}})
933
- turn_rag_history_for_callback.append({"type":"rag_hop_info", "hop": hop_count + 1, "query": "EMPTY_QUERY_SKIPPED", "summary":"Skipped."})
934
- continue
935
- else:
936
- ASCIIColors.warning("RAG query is empty. Proceeding without RAG context.")
937
- break
937
+ ASCIIColors.warning(f"RAG Hop {hop_count + 1}: Query is empty. Stopping RAG process.")
938
+ # Add a detail for this aborted hop
939
+ rag_hops_details_list.append({
940
+ "query": "EMPTY_QUERY_STOPPED_HOPS",
941
+ "retrieved_chunks_details": [],
942
+ "status": "Query became empty, RAG stopped."
943
+ })
944
+ turn_rag_history_for_callback.append({"type":"rag_hop_info", "hop": hop_count + 1, "query": "EMPTY_QUERY_STOPPED_HOPS", "status":"Stopped."})
945
+ break # Stop if query is empty
938
946
 
939
947
  # 2. Perform RAG Query
940
948
  if streaming_callback:
941
- streaming_callback(f"Querying knowledge base for: '{current_query_for_rag}'...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "rag_retrieval", "hop": hop_count + 1, "query": current_query_for_rag}, turn_rag_history_for_callback)
949
+ streaming_callback(f"Querying knowledge base for (Hop {hop_count + 1}): '{current_query_for_rag}'...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "rag_retrieval", "hop": hop_count + 1, "query": current_query_for_rag}, turn_rag_history_for_callback)
942
950
 
943
951
  try:
944
- retrieved_chunks_raw = rag_query_function(current_query_for_rag, rag_vectorizer_name, rag_top_k, rag_min_similarity_percent)
952
+ retrieved_chunks_raw_this_hop = rag_query_function(current_query_for_rag, rag_vectorizer_name, rag_top_k, rag_min_similarity_percent)
945
953
  except Exception as e_rag_query:
946
954
  trace_exception(e_rag_query)
947
- return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"RAG query function failed: {e_rag_query}"}
955
+ return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"RAG query function failed on hop {hop_count + 1}: {e_rag_query}"}
948
956
 
949
957
  if streaming_callback:
950
- streaming_callback(f"Retrieved {len(retrieved_chunks_raw)} chunks.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "rag_retrieval", "hop": hop_count + 1, "num_chunks": len(retrieved_chunks_raw)}, turn_rag_history_for_callback)
958
+ streaming_callback(f"Retrieved {len(retrieved_chunks_raw_this_hop)} chunks for hop {hop_count + 1}.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "rag_retrieval", "hop": hop_count + 1, "num_chunks": len(retrieved_chunks_raw_this_hop)}, turn_rag_history_for_callback)
951
959
 
952
- current_hop_details = {"query": current_query_for_rag, "retrieved_chunks_details": []}
953
-
954
- formatted_new_chunks_for_llm_summary = ""
955
- if retrieved_chunks_raw:
956
- for i, chunk in enumerate(retrieved_chunks_raw):
960
+ current_hop_chunk_details_for_history = []
961
+ new_chunks_added_this_hop = 0
962
+ if retrieved_chunks_raw_this_hop:
963
+ for chunk in retrieved_chunks_raw_this_hop:
957
964
  doc_path = chunk.get('file_path', 'Unknown Document')
958
- similarity = chunk.get('similarity_percent', 'N/A')
959
965
  content = chunk.get('chunk_text', '')
966
+ similarity = chunk.get('similarity_percent', 0.0) # Default to 0.0 if not present
967
+
968
+ # Ensure content is string and similarity is float for sorting later
969
+ if not isinstance(content, str): content = str(content)
970
+ try:
971
+ similarity = float(similarity)
972
+ except (ValueError, TypeError):
973
+ similarity = 0.0 # Default if conversion fails
974
+
975
+ chunk_detail_for_map_and_history = {
976
+ "document": doc_path,
977
+ "similarity": similarity,
978
+ "content": content,
979
+ "retrieved_in_hop": hop_count + 1,
980
+ "query_used": current_query_for_rag
981
+ }
982
+ current_hop_chunk_details_for_history.append(chunk_detail_for_map_and_history)
960
983
 
961
- chunk_detail_for_history = {"document": doc_path, "similarity": similarity, "content": content}
962
- current_hop_details["retrieved_chunks_details"].append(chunk_detail_for_history)
963
-
964
- # Add to unique list for final output
965
- # Use a combination of path and content to uniquely identify a chunk to avoid duplicates if same content appears from different queries.
966
- # A more robust unique key might involve hashing content if it's very large.
967
- unique_key = f"{doc_path}::{content[:100]}" # Simple key
984
+ unique_key = f"{doc_path}::{content[:100]}" # Simple key for uniqueness
968
985
  if unique_key not in all_unique_retrieved_chunks_map:
969
- all_unique_retrieved_chunks_map[unique_key] = chunk_detail_for_history
970
-
971
- # Format for LLM processing (summary or direct use)
972
- formatted_new_chunks_for_llm_summary += f"Document: {doc_path} (Similarity: {similarity}%)\nContent:\n{content}\n---\n"
986
+ all_unique_retrieved_chunks_map[unique_key] = chunk_detail_for_map_and_history
987
+ new_chunks_added_this_hop +=1
973
988
 
974
- if not retrieved_chunks_raw:
975
- current_hop_details["new_information_summary"] = "No relevant information found for this query."
976
- current_hop_details["llm_decision_json"] = {"need_more_data": True if max_rag_hops > 0 and hop_count < max_rag_hops -1 else False, "reasoning_for_decision":"No new information retrieved."}
977
- rag_hops_details_list.append(current_hop_details)
978
- turn_rag_history_for_callback.append({"type":"rag_hop_info", **current_hop_details})
979
- if max_rag_hops == 0 or hop_count >= max_rag_hops -1 :
980
- break
981
- else:
982
- accumulated_rag_context_str += f"\n\n---\nAttempted query: '{current_query_for_rag}' - No new information found.\n---"
983
- continue
984
-
985
- if max_rag_hops == 0: # Classic RAG
986
- accumulated_rag_context_str += formatted_new_chunks_for_llm_summary
987
- current_hop_details["new_information_summary"] = "Directly used in context (classic RAG)."
988
- current_hop_details["llm_decision_json"] = {"need_more_data": False}
989
- rag_hops_details_list.append(current_hop_details)
990
- turn_rag_history_for_callback.append({"type":"rag_hop_info", **current_hop_details})
991
- break
992
-
993
- # Multi-hop: LLM summarizes and decides
994
- if streaming_callback:
995
- streaming_callback("LLM processing retrieved data and deciding next step...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "rag_llm_decision", "hop": hop_count + 1}, turn_rag_history_for_callback)
996
-
997
- decision_prompt_llm_parts = [
998
- f"{self.system_full_header}You are an AI research assistant. Analyze newly retrieved information against the user's request and prior knowledge, then decide if more searching is needed.",
999
- f"{self.user_full_header}Original user request: '{original_user_prompt}'",
1000
- ]
1001
- if accumulated_rag_context_str:
1002
- decision_prompt_llm_parts.append(f"Current accumulated knowledge summary:\n{accumulated_rag_context_str}")
1003
- decision_prompt_llm_parts.append(f"You just searched for: '{current_query_for_rag}'")
1004
- decision_prompt_llm_parts.append(f"And found this new information:\n--- New Information Start ---\n{formatted_new_chunks_for_llm_summary}--- New Information End ---")
1005
- decision_prompt_llm_parts.append(
1006
- "Task: Provide a concise summary of ONLY the new information relevant to the original request. "
1007
- "Then, assess if you now have sufficient information to comprehensively answer the user's original request or if another, more targeted search is necessary. "
1008
- "Respond STRICTLY in the following JSON format, with no other text before or after the JSON block:"
1009
- )
1010
- json_template_for_decision = """
1011
- {
1012
- "new_information_summary": "<Your concise summary of ONLY the new_information relevant to the original_user_request. Focus on what's new and useful. If nothing new is relevant, state that.>",
1013
- "need_more_data": <true_or_false>,
1014
- "reasoning_for_decision": "<Briefly explain why you need more data or why you have enough. If needing more, suggest what kind of information is still missing.>"
1015
- }
1016
- """
1017
- decision_prompt_llm_parts.append(f"```json\n{json_template_for_decision}\n```")
1018
- decision_prompt_llm_parts.append(self.ai_full_header)
1019
-
1020
- llm_decision_json_str = self.generate_code(prompt="".join(decision_prompt_llm_parts), language="json", template=json_template_for_decision, temperature=rag_hop_summary_temperature, max_size=1024)
1021
-
1022
- if isinstance(llm_decision_json_str, dict) and "error" in llm_decision_json_str:
1023
- return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"LLM failed to make RAG decision: {llm_decision_json_str['error']}"}
1024
- if not llm_decision_json_str:
1025
- return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": "LLM provided empty decision for RAG hop."}
1026
-
1027
- try:
1028
- llm_decision = json.loads(llm_decision_json_str)
1029
- except json.JSONDecodeError:
1030
- try:
1031
- match = re.search(r"```json\s*(\{.*?\})\s*```", llm_decision_json_str, re.DOTALL)
1032
- if match: llm_decision = json.loads(match.group(1))
1033
- else: llm_decision = json.loads(self.extract_code_blocks(llm_decision_json_str, format="markdown")[0]["content"])
1034
- except Exception as e_json_parse:
1035
- trace_exception(e_json_parse)
1036
- return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"Failed to parse LLM RAG decision JSON: {llm_decision_json_str}. Error: {e_json_parse}"}
1037
-
1038
- new_summary = llm_decision.get("new_information_summary", "Summary not provided by LLM.")
1039
- need_more_data = llm_decision.get("need_more_data", True)
1040
-
1041
- current_hop_details["new_information_summary"] = new_summary
1042
- current_hop_details["llm_decision_json"] = llm_decision
989
+ hop_status = "Completed"
990
+ if not retrieved_chunks_raw_this_hop:
991
+ hop_status = "No chunks retrieved for this query."
992
+ elif new_chunks_added_this_hop == 0 and hop_count > 0: # Only consider "no new unique chunks" for subsequent hops
993
+ hop_status = "No *new* unique chunks retrieved."
994
+ # Optionally, could break here if no new unique chunks are found in a multi-hop scenario
995
+ # ASCIIColors.warning(f"RAG Hop {hop_count + 1}: No new unique chunks found. Consider stopping if this persists.")
996
+
997
+
998
+ current_hop_details = {
999
+ "query": current_query_for_rag,
1000
+ "retrieved_chunks_details": current_hop_chunk_details_for_history, # Chunks from THIS hop
1001
+ "status": hop_status
1002
+ }
1043
1003
  rag_hops_details_list.append(current_hop_details)
1044
1004
  turn_rag_history_for_callback.append({"type":"rag_hop_info", **current_hop_details})
1045
1005
 
1006
+ # Reset for next potential query generation if it's not the last planned hop
1007
+ if hop_count < max_rag_hops:
1008
+ current_query_for_rag = None
1009
+ else: # This was the last hop
1010
+ break
1011
+
1012
+
1013
+ # 3. Prepare Final Context from All Unique Retrieved Chunks
1014
+ accumulated_rag_context_str = ""
1015
+ if all_unique_retrieved_chunks_map:
1046
1016
  if streaming_callback:
1047
- streaming_callback(f"LLM decision: Summary='{new_summary[:100]}...', NeedMoreData={need_more_data}", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "rag_llm_decision", "hop": hop_count + 1, "decision": llm_decision}, turn_rag_history_for_callback)
1017
+ streaming_callback("Preparing final RAG context from all retrieved chunks...", MSG_TYPE.MSG_TYPE_STEP, {"type": "context_preparation"}, turn_rag_history_for_callback)
1048
1018
 
1049
- accumulated_rag_context_str += f"\n\n--- Summary of findings from query '{current_query_for_rag}' (Hop {hop_count + 1}) ---\n{new_summary}\n---"
1019
+ # Sort all unique chunks by similarity (highest first)
1020
+ sorted_unique_chunks = sorted(
1021
+ list(all_unique_retrieved_chunks_map.values()),
1022
+ key=lambda c: c.get('similarity', 0.0),
1023
+ reverse=True
1024
+ )
1050
1025
 
1051
- if not need_more_data or hop_count >= max_rag_hops -1 : # Subtract 1 because current hop is finishing
1052
- break
1026
+ current_context_chars = 0
1027
+ chunks_used_in_final_context = 0
1028
+ context_lines = []
1029
+ for chunk in sorted_unique_chunks:
1030
+ chunk_text_to_add = f"Source: {chunk['document']} (Similarity: {chunk['similarity']:.2f}%, Hop: {chunk['retrieved_in_hop']}, Query: '{chunk['query_used']}')\nContent:\n{chunk['content']}\n---\n"
1031
+ if current_context_chars + len(chunk_text_to_add) <= max_rag_context_characters:
1032
+ context_lines.append(chunk_text_to_add)
1033
+ current_context_chars += len(chunk_text_to_add)
1034
+ chunks_used_in_final_context +=1
1035
+ else:
1036
+ ASCIIColors.warning(f"Reached max RAG context character limit ({max_rag_context_characters}). Used {chunks_used_in_final_context} of {len(sorted_unique_chunks)} unique chunks.")
1037
+ break
1038
+ accumulated_rag_context_str = "".join(context_lines)
1039
+
1040
+ if streaming_callback:
1041
+ streaming_callback(f"Final RAG context prepared using {chunks_used_in_final_context} chunks ({current_context_chars} chars).", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "context_preparation", "num_chunks_in_context": chunks_used_in_final_context, "chars_in_context": current_context_chars}, turn_rag_history_for_callback)
1042
+
1053
1043
 
1054
1044
  # 4. Final Answer Generation
1055
1045
  if streaming_callback:
1056
- streaming_callback("LLM generating final answer using all gathered information...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "final_answer_generation"}, turn_rag_history_for_callback)
1046
+ streaming_callback("LLM generating final answer...", MSG_TYPE.MSG_TYPE_STEP_START, {"type": "final_answer_generation"}, turn_rag_history_for_callback)
1057
1047
 
1058
- final_answer_prompt_parts = []
1059
- if system_prompt:
1060
- final_answer_prompt_parts.append(f"{self.system_full_header}{system_prompt}")
1061
-
1062
- final_answer_prompt_parts.append(f"{self.user_full_header}Original request: {original_user_prompt}")
1048
+ final_answer_prompt_parts = [f"Original request: {original_user_prompt}"]
1063
1049
  if accumulated_rag_context_str:
1064
- final_answer_prompt_parts.append(f"\nBased on the information I have gathered:\n--- Gathered Context Start ---\n{accumulated_rag_context_str.strip()}\n--- Gathered Context End ---")
1050
+ final_answer_prompt_parts.append(f"\nBased on the following information I have gathered from a knowledge base:\n--- Gathered Context Start ---\n{accumulated_rag_context_str.strip()}\n--- Gathered Context End ---")
1065
1051
  else:
1066
1052
  final_answer_prompt_parts.append("\n(No specific information was retrieved from the knowledge base for this request.)")
1067
1053
 
1068
- final_answer_prompt_parts.append("\nPlease provide a comprehensive answer to the original request using ONLY the provided gathered context. If the context is insufficient, clearly state that.")
1054
+ final_answer_prompt_parts.append("\nPlease provide a comprehensive answer to the original request using ONLY the provided gathered context. If the context is insufficient, clearly state that. If the context contains code examples, ensure they are accurately reproduced.")
1069
1055
  final_answer_prompt_parts.append(self.ai_full_header)
1070
1056
 
1071
1057
  final_answer_llm_prompt = "\n".join(final_answer_prompt_parts)
1072
1058
 
1073
- final_answer_streaming_callback = None
1074
- if streaming_callback:
1075
- def final_answer_cb_adapter(chunk, msg_type):
1076
- return streaming_callback(chunk, msg_type, {"type": "final_answer_chunk"}, turn_rag_history_for_callback)
1077
- final_answer_streaming_callback = final_answer_cb_adapter
1059
+ final_answer_streaming_callback_adapted = None
1060
+ if streaming_callback and stream:
1061
+ def final_answer_cb_adapter(chunk_text, msg_type_llm):
1062
+ return streaming_callback(chunk_text, msg_type_llm, {"type": "final_answer_chunk"}, turn_rag_history_for_callback)
1063
+ final_answer_streaming_callback_adapted = final_answer_cb_adapter
1064
+
1065
+ actual_streaming_cb_for_generate = final_answer_streaming_callback_adapted if stream else None
1078
1066
 
1079
- final_answer_text = self.remove_thinking_blocks(self.generate_text(
1080
- prompt=final_answer_llm_prompt, images=images,
1067
+ final_answer_raw = self.generate_text(
1068
+ prompt=final_answer_llm_prompt, images=images, system_prompt=system_prompt,
1081
1069
  n_predict=n_predict, stream=stream, temperature=temperature, top_k=top_k, top_p=top_p,
1082
1070
  repeat_penalty=repeat_penalty, repeat_last_n=repeat_last_n, seed=seed, n_threads=n_threads,
1083
- ctx_size=ctx_size, streaming_callback=final_answer_streaming_callback, **llm_generation_kwargs
1084
- ))
1085
-
1086
- if streaming_callback:
1087
- streaming_callback("Final answer generation complete.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "final_answer_generation"}, turn_rag_history_for_callback)
1071
+ ctx_size=ctx_size, streaming_callback=actual_streaming_cb_for_generate, **llm_generation_kwargs
1072
+ )
1088
1073
 
1089
- if isinstance(final_answer_text, dict) and "error" in final_answer_text:
1090
- return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"Final answer generation failed: {final_answer_text['error']}"}
1074
+ if isinstance(final_answer_raw, dict) and "error" in final_answer_raw:
1075
+ return {"final_answer": "", "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": f"Final answer generation failed: {final_answer_raw['error']}"}
1091
1076
 
1092
- return {"final_answer": final_answer_text, "rag_hops_history": rag_hops_details_list, "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), "error": None}
1077
+ final_answer_text = self.remove_thinking_blocks(final_answer_raw)
1093
1078
 
1079
+ if streaming_callback:
1080
+ streaming_callback("Final answer generation complete.", MSG_TYPE.MSG_TYPE_STEP_END, {"type": "final_answer_generation"}, turn_rag_history_for_callback)
1081
+ if not stream and final_answer_text:
1082
+ streaming_callback(final_answer_text, MSG_TYPE.MSG_TYPE_CHUNK, {"type": "final_answer_full"}, turn_rag_history_for_callback)
1083
+
1084
+ return {
1085
+ "final_answer": final_answer_text,
1086
+ "rag_hops_history": rag_hops_details_list,
1087
+ "all_retrieved_sources": list(all_unique_retrieved_chunks_map.values()), # All unique chunks found
1088
+ "error": None
1089
+ }
1090
+
1094
1091
  def generate_code(
1095
1092
  self,
1096
1093
  prompt,
1097
1094
  images=[],
1095
+ system_prompt=None,
1098
1096
  template=None,
1099
1097
  language="json",
1100
1098
  code_tag_format="markdown", # or "html"
@@ -1111,8 +1109,8 @@ Respond with a JSON object containing ONE of the following structures:
1111
1109
  Uses the underlying LLM binding via `generate_text`.
1112
1110
  Handles potential continuation if the code block is incomplete.
1113
1111
  """
1114
-
1115
- system_prompt = f"""Act as a code generation assistant that generates code from user prompt."""
1112
+ if not system_prompt:
1113
+ system_prompt = f"""Act as a code generation assistant that generates code from user prompt."""
1116
1114
 
1117
1115
  if template:
1118
1116
  system_prompt += "Here is a template of the answer:\n"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.19.5
3
+ Version: 0.19.7
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -1,6 +1,6 @@
1
1
  examples/function_calling_with_local_custom_mcp.py,sha256=g6wOFRB8-p9Cv7hKmQaGzPvtMX3H77gas01QVNEOduM,12407
2
2
  examples/generate_text_with_multihop_rag_example.py,sha256=Z6TbVqThOCHNK6kzIqRnEi76JDxIFlg1-IIMWehZars,11582
3
- examples/internet_search_with_rag.py,sha256=sw0B4Nea6D5TeGtVdh17imm2E3IassGhUTTL4LkqfuY,9150
3
+ examples/internet_search_with_rag.py,sha256=WBoYFBEjGIFKyKTzezm7lI0bGPNuHFMyoq_8iY32qLY,12434
4
4
  examples/local_mcp.py,sha256=w40dgayvHYe01yvekEE0LjcbkpwKjWwJ-9v4_wGYsUk,9113
5
5
  examples/simple_text_gen_test.py,sha256=RoX9ZKJjGMujeep60wh5WT_GoBn0O9YKJY6WOy-ZmOc,8710
6
6
  examples/simple_text_gen_with_image_test.py,sha256=rR1O5Prcb52UHtJ3c6bv7VuTd1cvbkr5aNZU-v-Rs3Y,9263
@@ -19,9 +19,9 @@ examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5D
19
19
  examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
20
20
  examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
21
21
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
22
- lollms_client/__init__.py,sha256=pR9LDMi5tPNW-WpvXcAiKcOdKjfuNvqSrKApzIFAn8o,910
22
+ lollms_client/__init__.py,sha256=86XdQomPuN11zPz9F0RAO1ikkwIwxOuay4D48Lr0imE,910
23
23
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
24
- lollms_client/lollms_core.py,sha256=56wntZAimRfx0qBR_96_1h9_ZKuvY1Uq_kMVs9xg-dE,119768
24
+ lollms_client/lollms_core.py,sha256=CRHOihPB3Euzu2lMUyvXWcd27GT6NHDbJvdlLO6cB1M,117743
25
25
  lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
26
26
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
27
27
  lollms_client/lollms_llm_binding.py,sha256=bdElz_IBx0zZ-85YTT1fyY_mSoHo46tKIMiHYJlKCkM,9809
@@ -69,8 +69,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
69
69
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
70
70
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
71
71
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
72
- lollms_client-0.19.5.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
73
- lollms_client-0.19.5.dist-info/METADATA,sha256=c7UkzPm39_qA9TpFQSTqoZSfGC0SomSnIoKiHJWbSdc,13374
74
- lollms_client-0.19.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
- lollms_client-0.19.5.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
76
- lollms_client-0.19.5.dist-info/RECORD,,
72
+ lollms_client-0.19.7.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
73
+ lollms_client-0.19.7.dist-info/METADATA,sha256=mBO2JtWYs-IAKhUX2GMIvBHzIXw-MiQ2sMZkbazoqos,13374
74
+ lollms_client-0.19.7.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
+ lollms_client-0.19.7.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
76
+ lollms_client-0.19.7.dist-info/RECORD,,