lollms-client 0.19.5__py3-none-any.whl → 0.19.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -4,75 +4,76 @@ from typing import List, Dict, Any, Optional, Callable
4
4
  import json
5
5
  from pathlib import Path
6
6
 
7
- # --- Dependency Management for the Search Tool ---
8
- # Ensure the duckduckgo_search library is installed for our RAG query function.
7
+ # --- Internet Search RAG Implementation ---
8
+ _duckduckgo_search_installed = False
9
+ _search_installation_error_message = ""
9
10
  try:
10
11
  import pipmaster as pm
11
- pm.ensure_packages(["duckduckgo_search"])
12
+ # ensure_packages should be called by the binding init ideally,
13
+ # but we call it here for the example's standalone execution.
14
+ pm.ensure_packages(["duckduckgo_search"])
12
15
  from duckduckgo_search import DDGS
13
- _ddgs_installed = True
14
- except Exception as e_dep:
15
- _ddgs_installed = False
16
- ASCIIColors.error(f"Could not ensure/import duckduckgo_search: {e_dep}")
17
- ASCIIColors.warning("The RAG function in this example will not work.")
16
+ _duckduckgo_search_installed = True
17
+ except Exception as e:
18
+ _search_installation_error_message = str(e)
18
19
  DDGS = None
19
- # --- End Dependency Management ---
20
+ ASCIIColors.error(f"Failed to import duckduckgo_search: {_search_installation_error_message}")
21
+ ASCIIColors.info("Please install it: pip install duckduckgo-search")
20
22
 
21
23
 
22
- def internet_rag_query_function(
24
+ def perform_internet_search_rag(
23
25
  query_text: str,
24
- vectorizer_name: Optional[str] = None, # Not used for this keyword-based search
26
+ vectorizer_name: Optional[str] = None, # Not used for search
25
27
  top_k: int = 5,
26
- min_similarity_percent: float = 0.0 # Not used for this keyword-based search
28
+ min_similarity_percent: float = 0.0 # Not used directly for search filter, but can influence result quality/rank
27
29
  ) -> List[Dict[str, Any]]:
28
30
  """
29
- A RAG-compatible query function that performs a live internet search using DuckDuckGo.
30
-
31
- Args:
32
- query_text: The search query.
33
- vectorizer_name: Ignored by this function.
34
- top_k: The maximum number of search results to return.
35
- min_similarity_percent: Ignored by this function.
36
-
37
- Returns:
38
- A list of dictionaries, each formatted for RAG with 'document', 'content', and 'similarity'.
31
+ Performs an internet search using DuckDuckGo and formats results for RAG.
32
+ Similarity is simulated based on rank.
39
33
  """
40
- if not _ddgs_installed:
41
- ASCIIColors.error("duckduckgo_search library is not available. Cannot perform internet search.")
42
- return []
34
+ if not _duckduckgo_search_installed or DDGS is None:
35
+ ASCIIColors.error("duckduckgo_search is not available. Cannot perform internet search.")
36
+ return []
43
37
 
44
- ASCIIColors.magenta(f" [INTERNET RAG] Searching web for: '{query_text}', max_results={top_k}")
45
- formatted_results = []
38
+ ASCIIColors.magenta(f" [INTERNET SEARCH] Querying DuckDuckGo for: '{query_text}', max_results={top_k}")
39
+ search_results_raw = []
46
40
  try:
41
+ # DDGS().text returns a generator, max_results limits it.
42
+ # Note: The DDGS library might sometimes return fewer results than max_results.
47
43
  with DDGS() as ddgs:
48
- # Fetch search results from DuckDuckGo
49
- search_results = ddgs.text(keywords=query_text, max_results=top_k)
44
+ search_results_raw = list(ddgs.text(keywords=query_text, max_results=top_k))
50
45
 
51
- if not search_results:
52
- ASCIIColors.yellow(" [INTERNET RAG] DuckDuckGo returned no results for this query.")
53
- return []
54
-
55
- for i, result in enumerate(search_results):
56
- # Format the search result into the structure expected by generate_text_with_rag
57
- # 'document' will be the URL.
58
- # 'content' will be a combination of title and snippet.
59
- # 'similarity' is emulated based on rank, as DDG doesn't provide a score.
60
- formatted_results.append({
61
- "document": result.get("href", "#"),
62
- "similarity": round(100.0 - (i * (10.0 / top_k)), 2), # Create a descending score
63
- "content": f"Title: {result.get('title', 'N/A')}\nSnippet: {result.get('body', 'N/A')}"
64
- })
65
-
66
- ASCIIColors.magenta(f" [INTERNET RAG] Found {len(formatted_results)} results.")
67
- return formatted_results
68
-
69
46
  except Exception as e:
47
+ ASCIIColors.error(f" [INTERNET SEARCH] Search failed: {e}")
70
48
  trace_exception(e)
71
- ASCIIColors.error(f" [INTERNET RAG] An error occurred during search: {e}")
72
49
  return []
73
50
 
51
+ formatted_results: List[Dict[str, Any]] = []
52
+ if search_results_raw:
53
+ for i, r in enumerate(search_results_raw):
54
+ # Simulate similarity based on rank (rank 1 is highest sim)
55
+ # Max similarity is 100% for rank 1, decreases linearly or non-linearly.
56
+ # Simple linear decrease: 100 - (rank * (100 / top_k+1))
57
+ # Let's use rank-based score: 100% for rank 1, 90% for rank 2, ... 50% for rank 5 etc.
58
+ # Ensure similarity is above min_similarity_percent if that param was intended as a filter here
59
+
60
+ simulated_similarity = max(0.0, 100.0 - i * (100.0 / (top_k + 1))) # Higher rank = lower sim
61
+ simulated_similarity = round(simulated_similarity, 2)
62
+
63
+ if simulated_similarity >= min_similarity_percent:
64
+ formatted_results.append({
65
+ "file_path": r.get("href", "# Unknown URL"), # Use URL as document identifier
66
+ "chunk_text": f"Title: {r.get('title', 'N/A')}\nSnippet: {r.get('body', 'N/A')}", # Combine title and snippet
67
+ "similarity_percent": simulated_similarity,
68
+ })
69
+ else:
70
+ ASCIIColors.debug(f" [INTERNET SEARCH] Skipping result {i+1} due to low simulated similarity ({simulated_similarity}%)")
71
+
72
+ ASCIIColors.magenta(f" [INTERNET SEARCH] Formatted {len(formatted_results)} results for RAG.")
73
+ if not formatted_results: ASCIIColors.yellow(f" [INTERNET SEARCH] No results found for query: '{query_text}' or none met min_similarity_percent.")
74
+ return formatted_results
75
+
74
76
  # --- Streaming Callback for RAG and LLM ---
75
- # (This is the same useful callback from the previous example)
76
77
  def rag_streaming_callback(
77
78
  chunk: str,
78
79
  msg_type: MSG_TYPE,
@@ -83,7 +84,7 @@ def rag_streaming_callback(
83
84
  hop = metadata.get("hop", "")
84
85
  type_info = metadata.get("type", "N/A")
85
86
 
86
- if msg_type == MSG_TYPE.MSG_TYPE_CHUNK:
87
+ if msg_type == MSG_TYPE.MSG_TYPE_CHUNK: # Final answer chunks
87
88
  ASCIIColors.success(chunk, end="", flush=True)
88
89
  elif msg_type == MSG_TYPE.MSG_TYPE_STEP_START:
89
90
  info = metadata.get("query", chunk) if type_info in ["rag_query_generation", "rag_retrieval"] else chunk
@@ -95,8 +96,8 @@ def rag_streaming_callback(
95
96
 
96
97
  end_info = []
97
98
  if query: end_info.append(f"Query: '{str(query)[:50]}...'")
98
- if num_chunks is not None: end_info.append(f"Retrieved: {num_chunks} sources")
99
- if decision: end_info.append(f"LLM Decision: NeedMore={decision.get('need_more_data')}, Summary: '{str(decision.get('new_information_summary'))[:40]}...'")
99
+ if num_chunks is not None: end_info.append(f"Results: {num_chunks}")
100
+ if decision: end_info.append(f"LLM Decision: NeedMore={decision.get('need_more_data')}, Summary: '{str(decision.get('new_information_summary'))[:50]}...'")
100
101
 
101
102
  ASCIIColors.green(f"\n<< RAG Hop {hop} | END | {type_info.upper()} | {' | '.join(end_info) if end_info else chunk}", flush=True)
102
103
  elif msg_type == MSG_TYPE.MSG_TYPE_EXCEPTION:
@@ -106,75 +107,113 @@ def rag_streaming_callback(
106
107
 
107
108
  # --- Main Example ---
108
109
  if __name__ == "__main__":
109
- ASCIIColors.red("--- Internet Search with Multi-Hop RAG Example ---")
110
+ ASCIIColors.red("--- Multi-Hop Internet Search Example with LollmsClient ---")
110
111
 
112
+ # LLM Configuration (use a model good at instruction following and JSON)
113
+ # Ensure your Ollama server is running and has this model pulled.
111
114
  LLM_BINDING_NAME = "ollama"
112
- LLM_MODEL_NAME = "mistral-nemo:latest" # Nemo is good with JSON and reasoning
115
+ LLM_MODEL_NAME = "mistral:latest" # or llama3, phi3 etc.
113
116
 
114
- if not _ddgs_installed:
115
- ASCIIColors.error("Cannot run this example because the 'duckduckgo-search' library is not installed.")
116
- exit(1)
117
+ # You could also enable the internet_search tool via MCP,
118
+ # but this example specifically uses it directly via generate_text_with_rag.
119
+ # For MCP example, see examples/local_mcp.py
117
120
 
118
121
  try:
119
122
  lc = LollmsClient(
120
123
  binding_name=LLM_BINDING_NAME,
121
124
  model_name=LLM_MODEL_NAME,
122
125
  temperature=0.1,
123
- ctx_size=4096
126
+ ctx_size=4096
124
127
  )
125
128
  ASCIIColors.green(f"LollmsClient initialized with LLM: {LLM_BINDING_NAME}/{LLM_MODEL_NAME}")
126
129
 
127
- # --- Test Case 1: Classic RAG with Internet Search ---
128
- ASCIIColors.cyan("\n\n--- Test Case 1: Classic RAG (max_rag_hops = 0) using Internet Search ---")
129
- classic_rag_prompt = "What is the James Webb Space Telescope and what was its launch date?"
130
- ASCIIColors.blue(f"User Prompt: {classic_rag_prompt}")
130
+ if not _duckduckgo_search_installed or DDGS is None:
131
+ ASCIIColors.error("duckduckgo_search is not installed. Cannot run search examples.")
132
+ exit()
133
+
134
+
135
+ # --- Test Case 1: Classic Search RAG (max_rag_hops = 0) ---
136
+ ASCIIColors.cyan("\n\n--- Test Case 1: Classic Internet Search RAG (max_rag_hops = 0) ---")
137
+ classic_search_prompt = "What is the current population of Japan?"
138
+ ASCIIColors.blue(f"User Prompt: {classic_search_prompt}")
131
139
 
132
140
  classic_rag_result = lc.generate_text_with_rag(
133
- prompt=classic_rag_prompt,
134
- rag_query_function=internet_rag_query_function,
141
+ prompt=classic_search_prompt,
142
+ rag_query_function=perform_internet_search_rag, # Use the search function
135
143
  max_rag_hops=0,
136
- rag_top_k=3,
144
+ rag_top_k=3, # Get 3 search results
145
+ rag_min_similarity_percent=50.0, # Only use results with simulated sim >= 50%
137
146
  streaming_callback=rag_streaming_callback,
138
- n_predict=300
147
+ n_predict=250
139
148
  )
140
- print("\n--- End of Classic RAG ---")
141
- ASCIIColors.magenta("\nClassic RAG Final Output Details:")
142
- print(f" Final Answer (first 150 chars): {classic_rag_result.get('final_answer', '')[:150]}...")
149
+ print("\n--- End of Classic Search RAG ---")
150
+ ASCIIColors.magenta("\nClassic Search RAG Final Output Structure:")
151
+ print(f" Final Answer (first 100 chars): {classic_rag_result.get('final_answer', '')[:100]}...")
143
152
  print(f" Error: {classic_rag_result.get('error')}")
153
+ print(f" Number of Hops: {len(classic_rag_result.get('rag_hops_history', []))}")
144
154
  print(f" Total Unique Sources Retrieved: {len(classic_rag_result.get('all_retrieved_sources', []))}")
145
155
  if classic_rag_result.get('all_retrieved_sources'):
146
- print(" Retrieved Sources (URLs):")
147
- for source in classic_rag_result['all_retrieved_sources']:
148
- print(f" - {source.get('document')}")
149
-
150
- # --- Test Case 2: Multi-Hop RAG with Internet Search ---
151
- ASCIIColors.cyan("\n\n--- Test Case 2: Multi-Hop RAG (max_rag_hops = 2) using Internet Search ---")
152
- multihop_prompt = "First, find out what the TRAPPIST-1 system is. Then, search for recent news about its planets from the James Webb Space Telescope."
153
- ASCIIColors.blue(f"User Prompt: {multihop_prompt}")
154
-
155
- multihop_rag_result = lc.generate_text_with_rag(
156
- prompt=multihop_prompt,
157
- rag_query_function=internet_rag_query_function,
158
- rag_query_text=None, # Let the LLM generate the first query
159
- max_rag_hops=2, # Allow up to two separate search queries
160
- rag_top_k=2,
156
+ print(" Example Retrieved Source:")
157
+ source_ex = classic_rag_result['all_retrieved_sources'][0]
158
+ print(f" Document (URL): {source_ex.get('document')}")
159
+ print(f" Similarity: {source_ex.get('similarity')}%")
160
+ print(f" Content (Snippet, first 50 chars): {source_ex.get('content', '')[:50]}...")
161
+
162
+
163
+ # --- Test Case 2: Multi-Hop Search RAG (max_rag_hops = 1) ---
164
+ ASCIIColors.cyan("\n\n--- Test Case 2: Multi-Hop Internet Search RAG (max_rag_hops = 1) ---")
165
+ multihop_search_prompt_1 = "Tell me about the latest developments in fusion energy, including any recent news."
166
+ ASCIIColors.blue(f"User Prompt: {multihop_search_prompt_1}")
167
+
168
+ multihop_rag_result_1 = lc.generate_text_with_rag(
169
+ prompt=multihop_search_prompt_1,
170
+ rag_query_function=perform_internet_search_rag,
171
+ rag_query_text=None, # LLM will generate first query
172
+ max_rag_hops=1, # Allow one refinement hop
173
+ rag_top_k=2, # Get 2 search results per query
174
+ rag_min_similarity_percent=50.0,
161
175
  streaming_callback=rag_streaming_callback,
162
176
  n_predict=400,
177
+ rag_hop_query_generation_temperature=0.1,
178
+ rag_hop_summary_temperature=0.2
163
179
  )
164
- print("\n--- End of Multi-Hop RAG ---")
165
- ASCIIColors.magenta("\nMulti-Hop RAG Final Output Details:")
166
- print(f" Final Answer (first 150 chars): {multihop_rag_result.get('final_answer', '')[:150]}...")
167
- print(f" Error: {multihop_rag_result.get('error')}")
168
- print(f" Number of Hops Made: {len(multihop_rag_result.get('rag_hops_history', []))}")
169
- for i, hop_info in enumerate(multihop_rag_result.get('rag_hops_history', [])):
180
+ print("\n--- End of Multi-Hop Search RAG (1 hop max) ---")
181
+ ASCIIColors.magenta("\nMulti-Hop Search RAG (1 hop max) Final Output Structure:")
182
+ print(f" Final Answer (first 100 chars): {multihop_rag_result_1.get('final_answer', '')[:100]}...")
183
+ print(f" Error: {multihop_rag_result_1.get('error')}")
184
+ print(f" Number of Hops Made: {len(multihop_rag_result_1.get('rag_hops_history', []))}")
185
+ for i, hop_info in enumerate(multihop_rag_result_1.get('rag_hops_history', [])):
170
186
  print(f" Hop {i+1} Query: '{hop_info.get('query')}'")
171
- print(f" Hop {i+1} Retrieved Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
187
+ print(f" Hop {i+1} Results Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
188
+ print(f" Hop {i+1} Summary (first 50): '{str(hop_info.get('new_information_summary'))[:50]}...'")
172
189
  print(f" Hop {i+1} LLM Decision: NeedMoreData={hop_info.get('llm_decision_json',{}).get('need_more_data')}")
173
- print(f" Total Unique Sources Retrieved: {len(multihop_rag_result.get('all_retrieved_sources', []))}")
174
- if multihop_rag_result.get('all_retrieved_sources'):
175
- print(" All Retrieved Sources (URLs):")
176
- for source in multihop_rag_result['all_retrieved_sources']:
177
- print(f" - {source.get('document')}")
190
+ print(f" Total Unique Sources Retrieved: {len(multihop_rag_result_1.get('all_retrieved_sources', []))}")
191
+
192
+
193
+ # --- Test Case 3: More complex multi-hop (max_rag_hops = 2) ---
194
+ ASCIIColors.cyan("\n\n--- Test Case 3: More Complex Multi-Hop Internet Search RAG (max_rag_hops = 2) ---")
195
+ multihop_search_prompt_2 = "What are the requirements and steps to install the lollms_client python library, and what are some of its key features?"
196
+ ASCIIColors.blue(f"User Prompt: {multihop_search_prompt_2}")
197
+
198
+ multihop_rag_result_2 = lc.generate_text_with_rag(
199
+ prompt=multihop_search_prompt_2,
200
+ rag_query_function=perform_internet_search_rag,
201
+ max_rag_hops=2, # Allow up to two refinement hops
202
+ rag_top_k=2, # Get 2 results per query
203
+ rag_min_similarity_percent=40.0, # Lower similarity to maybe get broader initial results
204
+ streaming_callback=rag_streaming_callback,
205
+ n_predict=500 # Allow more for the installation steps and features
206
+ )
207
+ print("\n--- End of More Complex Multi-Hop Search RAG (up to 2 hops) ---")
208
+ ASCIIColors.magenta("\nMore Complex Multi-Hop Search RAG (up to 2 hops) Final Output Structure:")
209
+ print(f" Final Answer (first 100 chars): {multihop_rag_result_2.get('final_answer', '')[:100]}...")
210
+ print(f" Error: {multihop_rag_result_2.get('error')}")
211
+ print(f" Number of Hops Made: {len(multihop_rag_result_2.get('rag_hops_history', []))}")
212
+ for i, hop_info in enumerate(multihop_rag_result_2.get('rag_hops_history', [])):
213
+ print(f" Hop {i+1} Query: '{hop_info.get('query')}'")
214
+ print(f" Hop {i+1} Results Count: {len(hop_info.get('retrieved_chunks_details',[]))}")
215
+ print(f" Hop {i+1} Summary (first 50): '{str(hop_info.get('new_information_summary'))[:50]}...'")
216
+ print(f" Total Unique Sources Retrieved: {len(multihop_rag_result_2.get('all_retrieved_sources', []))}")
178
217
 
179
218
 
180
219
  except ValueError as ve:
@@ -186,4 +225,4 @@ if __name__ == "__main__":
186
225
  ASCIIColors.error(f"An unexpected error occurred: {e}")
187
226
  trace_exception(e)
188
227
 
189
- ASCIIColors.red("\n--- Internet Search RAG Example Finished ---")
228
+ ASCIIColors.red("\n--- Multi-Hop Internet Search Example Finished ---")
lollms_client/__init__.py CHANGED
@@ -7,7 +7,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
7
7
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
8
8
 
9
9
 
10
- __version__ = "0.19.5" # Updated version
10
+ __version__ = "0.19.6" # Updated version
11
11
 
12
12
  # Optionally, you could define __all__ if you want to be explicit about exports
13
13
  __all__ = [
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.19.5
3
+ Version: 0.19.6
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -1,6 +1,6 @@
1
1
  examples/function_calling_with_local_custom_mcp.py,sha256=g6wOFRB8-p9Cv7hKmQaGzPvtMX3H77gas01QVNEOduM,12407
2
2
  examples/generate_text_with_multihop_rag_example.py,sha256=Z6TbVqThOCHNK6kzIqRnEi76JDxIFlg1-IIMWehZars,11582
3
- examples/internet_search_with_rag.py,sha256=sw0B4Nea6D5TeGtVdh17imm2E3IassGhUTTL4LkqfuY,9150
3
+ examples/internet_search_with_rag.py,sha256=WBoYFBEjGIFKyKTzezm7lI0bGPNuHFMyoq_8iY32qLY,12434
4
4
  examples/local_mcp.py,sha256=w40dgayvHYe01yvekEE0LjcbkpwKjWwJ-9v4_wGYsUk,9113
5
5
  examples/simple_text_gen_test.py,sha256=RoX9ZKJjGMujeep60wh5WT_GoBn0O9YKJY6WOy-ZmOc,8710
6
6
  examples/simple_text_gen_with_image_test.py,sha256=rR1O5Prcb52UHtJ3c6bv7VuTd1cvbkr5aNZU-v-Rs3Y,9263
@@ -19,7 +19,7 @@ examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5D
19
19
  examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
20
20
  examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
21
21
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
22
- lollms_client/__init__.py,sha256=pR9LDMi5tPNW-WpvXcAiKcOdKjfuNvqSrKApzIFAn8o,910
22
+ lollms_client/__init__.py,sha256=6_HMLYZ2YejnZPhg5Tomyq6wYfndu9otuy-o7YAinWQ,910
23
23
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
24
24
  lollms_client/lollms_core.py,sha256=56wntZAimRfx0qBR_96_1h9_ZKuvY1Uq_kMVs9xg-dE,119768
25
25
  lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
@@ -69,8 +69,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
69
69
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
70
70
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
71
71
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
72
- lollms_client-0.19.5.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
73
- lollms_client-0.19.5.dist-info/METADATA,sha256=c7UkzPm39_qA9TpFQSTqoZSfGC0SomSnIoKiHJWbSdc,13374
74
- lollms_client-0.19.5.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
- lollms_client-0.19.5.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
76
- lollms_client-0.19.5.dist-info/RECORD,,
72
+ lollms_client-0.19.6.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
73
+ lollms_client-0.19.6.dist-info/METADATA,sha256=kI_H4LhMJWMmt1rTJUMP7CdZVlzA-N7RhnyDSHEYJvA,13374
74
+ lollms_client-0.19.6.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
75
+ lollms_client-0.19.6.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
76
+ lollms_client-0.19.6.dist-info/RECORD,,