lollms-client 0.13.2__py3-none-any.whl → 0.14.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

examples/text_gen.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from lollms_client import LollmsClient
2
2
 
3
3
  # Initialize the LollmsClient instance
4
- lc = LollmsClient("http://localhost:9600")
4
+ lc = LollmsClient("lollms")
5
5
  # Generate Text
6
6
  # response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
7
7
  # print(response)
@@ -0,0 +1,28 @@
1
+ from lollms_client import LollmsClient
2
+
3
+ # Initialize the LollmsClient instance
4
+ lc = LollmsClient("ollama",model_name="mistral-nemo:latest")
5
+ # Generate Text
6
+ # response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
7
+ # print(response)
8
+
9
+ # # Generate Completion
10
+ # response = lc.generate_completion(prompt="What is the capital of France", stream=False, temperature=0.5)
11
+ # print(response)
12
+
13
+ def cb(chunk, type):
14
+ print(chunk,end="",flush=True)
15
+
16
+ response = lc.generate_text(prompt="One plus one equals ", system_prompt="You are a playful dude who never really answers questions correctly. always answer with quirky style.", stream=False, temperature=0.5, streaming_callback=cb)
17
+ print()
18
+ print(response)
19
+ print()
20
+
21
+
22
+ # List Mounted Personalities
23
+ response = lc.listMountedPersonalities()
24
+ print(response)
25
+
26
+ # List Models
27
+ response = lc.listModels()
28
+ print(response)
lollms_client/__init__.py CHANGED
@@ -6,7 +6,7 @@ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
6
6
  from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
7
7
  from lollms_client.lollms_functions import FunctionCalling_Library
8
8
 
9
- __version__ = "0.13.2"
9
+ __version__ = "0.14.1"
10
10
 
11
11
  # Optionally, you could define __all__ if you want to be explicit about exports
12
12
  __all__ = [
@@ -49,6 +49,7 @@ class LollmsLLMBinding(LollmsLLMBinding):
49
49
  def generate_text(self,
50
50
  prompt: str,
51
51
  images: Optional[List[str]] = None,
52
+ system_prompt: str = "",
52
53
  n_predict: Optional[int] = None,
53
54
  stream: bool = False,
54
55
  temperature: float = 0.1,
@@ -106,7 +107,7 @@ class LollmsLLMBinding(LollmsLLMBinding):
106
107
 
107
108
  # Prepare request data
108
109
  data = {
109
- "prompt": prompt,
110
+ "prompt":"!@>system: "+system_prompt+"\n"+"!@>user: "+prompt if system_prompt else prompt,
110
111
  "model_name": self.model_name,
111
112
  "personality": self.personality,
112
113
  "n_predict": n_predict,
@@ -113,9 +113,9 @@ class OllamaBinding(LollmsLLMBinding):
113
113
  def generate_text(self,
114
114
  prompt: str,
115
115
  images: Optional[List[str]] = None, # List of image file paths
116
+ system_prompt: str = "",
116
117
  n_predict: Optional[int] = None,
117
118
  stream: bool = False,
118
- system_prompt = '',
119
119
  temperature: float = 0.7, # Ollama default is 0.8, common default 0.7
120
120
  top_k: int = 40, # Ollama default is 40
121
121
  top_p: float = 0.9, # Ollama default is 0.9
@@ -201,15 +201,16 @@ class OllamaBinding(LollmsLLMBinding):
201
201
  )
202
202
  return response_dict.get('message', {}).get('content', '')
203
203
  else: # Text-only
204
+ messages = [{'role': 'system', 'content':system_prompt},{'role': 'user', 'content': prompt}]
204
205
  if stream:
205
- response_stream = self.ollama_client.generate(
206
+ response_stream = self.ollama_client.chat(
206
207
  model=self.model_name,
207
- prompt=prompt,
208
+ messages=messages,
208
209
  stream=True,
209
210
  options=options if options else None
210
211
  )
211
212
  for chunk_dict in response_stream:
212
- chunk_content = chunk_dict.get('response', '')
213
+ chunk_content = chunk_dict.message.content
213
214
  if chunk_content:
214
215
  full_response_text += chunk_content
215
216
  if streaming_callback:
@@ -217,13 +218,13 @@ class OllamaBinding(LollmsLLMBinding):
217
218
  break
218
219
  return full_response_text
219
220
  else: # Not streaming
220
- response_dict = self.ollama_client.generate(
221
+ response_dict = self.ollama_client.chat(
221
222
  model=self.model_name,
222
- prompt=prompt,
223
+ messages=messages,
223
224
  stream=False,
224
225
  options=options if options else None
225
226
  )
226
- return response_dict.get('response', '')
227
+ return response_dict.message.content
227
228
  except ollama.ResponseError as e:
228
229
  error_message = f"Ollama API ResponseError: {e.error or 'Unknown error'} (status code: {e.status_code})"
229
230
  ASCIIColors.error(error_message)
@@ -58,6 +58,7 @@ class OpenAIBinding(LollmsLLMBinding):
58
58
  def generate_text(self,
59
59
  prompt: str,
60
60
  images: Optional[List[str]] = None,
61
+ system_prompt: str = "",
61
62
  n_predict: Optional[int] = None,
62
63
  stream: bool = False,
63
64
  temperature: float = 0.1,
@@ -98,6 +99,11 @@ class OpenAIBinding(LollmsLLMBinding):
98
99
  if images:
99
100
  messages = [
100
101
  {
102
+ "role": "system",
103
+ "content": system_prompt,
104
+ },
105
+
106
+ {
101
107
  "role": "user",
102
108
  "content": [
103
109
  {
@@ -331,6 +331,7 @@ class VLLMBinding(LollmsLLMBinding):
331
331
  def generate_text(self,
332
332
  prompt: str,
333
333
  images: Optional[List[str]] = None,
334
+ system_prompt: str = "",
334
335
  n_predict: Optional[int] = 1024,
335
336
  stream: bool = False, # vLLM's generate is blocking, stream is pseudo
336
337
  temperature: float = 0.7,
@@ -381,7 +382,7 @@ class VLLMBinding(LollmsLLMBinding):
381
382
  # If providing multi_modal_data, usually prompt_token_ids are also needed.
382
383
  # This can get complex as it depends on how the model expects images to be interleaved.
383
384
  # For a simple case where image comes first:
384
- encoded_prompt_ids = self.tokenizer.encode(prompt)
385
+ encoded_prompt_ids = self.tokenizer.encode(system_prompt+"\n"+prompt if system_prompt else prompt)
385
386
  gen_kwargs["prompt_token_ids"] = [encoded_prompt_ids] # List of lists
386
387
  gen_kwargs["multi_modal_data"] = [{"image": mm_data_content}] # List of dicts
387
388
  gen_kwargs["prompts"] = None # Don't use prompts if prompt_token_ids is used
@@ -389,7 +390,7 @@ class VLLMBinding(LollmsLLMBinding):
389
390
  except Exception as e_mm:
390
391
  return {"status": False, "error": f"Multimodal prep error: {e_mm}"}
391
392
  else:
392
- gen_kwargs["prompts"] = [prompt]
393
+ gen_kwargs["prompts"] = [system_prompt+"\n"+prompt if system_prompt else prompt]
393
394
 
394
395
  try:
395
396
  outputs = self.llm_engine.generate(**gen_kwargs, sampling_params=sampling_params)
@@ -112,6 +112,7 @@ class TransformersBinding(LollmsLLMBinding):
112
112
  def generate_text(self,
113
113
  prompt: str,
114
114
  images: Optional[List[str]] = None,
115
+ system_prompt: str = "",
115
116
  n_predict: Optional[int] = None,
116
117
  stream: bool = False,
117
118
  temperature: float = 0.1,
@@ -123,8 +124,7 @@ class TransformersBinding(LollmsLLMBinding):
123
124
  n_threads: int = 8,
124
125
  ctx_size: int | None = None,
125
126
  streaming_callback: Optional[Callable[[str, str], None]] = None,
126
- return_legacy_cache: bool = False,
127
- system_prompt: str = "You are a helpful assistant.") -> Union[str, dict]:
127
+ return_legacy_cache: bool = False) -> Union[str, dict]:
128
128
  """
129
129
  Generate text using the Transformers model, with optional image support.
130
130
 
@@ -331,6 +331,7 @@ class VLLMBinding(LollmsLLMBinding):
331
331
  def generate_text(self,
332
332
  prompt: str,
333
333
  images: Optional[List[str]] = None,
334
+ system_prompt: str = "",
334
335
  n_predict: Optional[int] = 1024,
335
336
  stream: bool = False, # vLLM's generate is blocking, stream is pseudo
336
337
  temperature: float = 0.7,
@@ -381,7 +382,7 @@ class VLLMBinding(LollmsLLMBinding):
381
382
  # If providing multi_modal_data, usually prompt_token_ids are also needed.
382
383
  # This can get complex as it depends on how the model expects images to be interleaved.
383
384
  # For a simple case where image comes first:
384
- encoded_prompt_ids = self.tokenizer.encode(prompt)
385
+ encoded_prompt_ids = self.tokenizer.encode(system_prompt+"\n"+prompt if system_prompt else prompt)
385
386
  gen_kwargs["prompt_token_ids"] = [encoded_prompt_ids] # List of lists
386
387
  gen_kwargs["multi_modal_data"] = [{"image": mm_data_content}] # List of dicts
387
388
  gen_kwargs["prompts"] = None # Don't use prompts if prompt_token_ids is used
@@ -389,7 +390,7 @@ class VLLMBinding(LollmsLLMBinding):
389
390
  except Exception as e_mm:
390
391
  return {"status": False, "error": f"Multimodal prep error: {e_mm}"}
391
392
  else:
392
- gen_kwargs["prompts"] = [prompt]
393
+ gen_kwargs["prompts"] = [system_prompt+"\n"+prompt if system_prompt else prompt]
393
394
 
394
395
  try:
395
396
  outputs = self.llm_engine.generate(**gen_kwargs, sampling_params=sampling_params)
@@ -329,6 +329,7 @@ class LollmsClient():
329
329
  def generate_text(self,
330
330
  prompt: str,
331
331
  images: Optional[List[str]] = None,
332
+ system_prompt: str = "",
332
333
  n_predict: Optional[int] = None,
333
334
  stream: Optional[bool] = None,
334
335
  temperature: Optional[float] = None,
@@ -365,6 +366,7 @@ class LollmsClient():
365
366
  return self.binding.generate_text(
366
367
  prompt=prompt,
367
368
  images=images,
369
+ system_prompt=system_prompt,
368
370
  n_predict=n_predict if n_predict is not None else self.default_n_predict,
369
371
  stream=stream if stream is not None else self.default_stream,
370
372
  temperature=temperature if temperature is not None else self.default_temperature,
@@ -438,14 +440,12 @@ class LollmsClient():
438
440
  Uses the underlying LLM binding via `generate_text`.
439
441
  """
440
442
  response_full = ""
441
- full_prompt = f"""{self.system_full_header}Act as a code generation assistant that generates code from user prompt.
442
- {self.user_full_header}
443
- {prompt}
444
- """
443
+ system_prompt = f"""Act as a code generation assistant that generates code from user prompt."""
444
+
445
445
  if template:
446
- full_prompt += "Here is a template of the answer:\n"
446
+ system_prompt += "Here is a template of the answer:\n"
447
447
  if code_tag_format=="markdown":
448
- full_prompt += f"""You must answer with the code placed inside the markdown code tag like this:
448
+ system_prompt += f"""You must answer with the code placed inside the markdown code tag like this:
449
449
  ```{language}
450
450
  {template}
451
451
  ```
@@ -454,7 +454,7 @@ The code tag is mandatory.
454
454
  Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
455
455
  """
456
456
  elif code_tag_format=="html":
457
- full_prompt +=f"""You must answer with the code placed inside the html code tag like this:
457
+ system_prompt +=f"""You must answer with the code placed inside the html code tag like this:
458
458
  <code language="{language}">
459
459
  {template}
460
460
  </code>
@@ -462,13 +462,13 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
462
462
  The code tag is mandatory.
463
463
  Don't forget encapsulate the code inside a html code tag. This is mandatory.
464
464
  """
465
- full_prompt += f"""Do not split the code in multiple tags.
466
- {self.ai_full_header}"""
465
+ system_prompt += f"""Do not split the code in multiple tags."""
467
466
 
468
467
  # Use generate_text which handles images internally
469
468
  response = self.generate_text(
470
- full_prompt,
469
+ prompt,
471
470
  images=images,
471
+ system_prompt=system_prompt,
472
472
  n_predict=max_size,
473
473
  temperature=temperature,
474
474
  top_k=top_k,
@@ -507,14 +507,12 @@ Don't forget encapsulate the code inside a html code tag. This is mandatory.
507
507
  Handles potential continuation if the code block is incomplete.
508
508
  """
509
509
 
510
- full_prompt = f"""{self.system_full_header}Act as a code generation assistant that generates code from user prompt.
511
- {self.user_full_header}
512
- {prompt}
513
- """
510
+ system_prompt = f"""{self.system_full_header}Act as a code generation assistant that generates code from user prompt."""
511
+
514
512
  if template:
515
- full_prompt += "Here is a template of the answer:\n"
513
+ system_prompt += "Here is a template of the answer:\n"
516
514
  if code_tag_format=="markdown":
517
- full_prompt += f"""You must answer with the code placed inside the markdown code tag like this:
515
+ system_prompt += f"""You must answer with the code placed inside the markdown code tag like this:
518
516
  ```{language}
519
517
  {template}
520
518
  ```
@@ -523,7 +521,7 @@ The code tag is mandatory.
523
521
  Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
524
522
  """
525
523
  elif code_tag_format=="html":
526
- full_prompt +=f"""You must answer with the code placed inside the html code tag like this:
524
+ system_prompt +=f"""You must answer with the code placed inside the html code tag like this:
527
525
  <code language="{language}">
528
526
  {template}
529
527
  </code>
@@ -531,13 +529,14 @@ Don't forget encapsulate the code inside a markdown code tag. This is mandatory.
531
529
  The code tag is mandatory.
532
530
  Don't forget encapsulate the code inside a html code tag. This is mandatory.
533
531
  """
534
- full_prompt += f"""You must return a single code tag.
532
+ system_prompt += f"""You must return a single code tag.
535
533
  Do not split the code in multiple tags.
536
534
  {self.ai_full_header}"""
537
535
 
538
536
  response = self.generate_text(
539
- full_prompt,
537
+ prompt,
540
538
  images=images,
539
+ system_prompt=system_prompt,
541
540
  n_predict=max_size,
542
541
  temperature=temperature,
543
542
  top_k=top_k,
@@ -32,6 +32,7 @@ class LollmsLLMBinding(ABC):
32
32
  def generate_text(self,
33
33
  prompt: str,
34
34
  images: Optional[List[str]] = None,
35
+ system_prompt: str = "",
35
36
  n_predict: Optional[int] = None,
36
37
  stream: bool = False,
37
38
  temperature: float = 0.1,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.13.2
3
+ Version: 0.14.1
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -3,7 +3,8 @@ examples/simple_text_gen_with_image_test.py,sha256=Euv53jbKTVJDvs854lgJvA5F-iRnA
3
3
  examples/text_2_audio.py,sha256=MfL4AH_NNwl6m0I0ywl4BXRZJ0b9Y_9fRqDIe6O-Sbw,3523
4
4
  examples/text_2_image.py,sha256=Ri7lQ-GW54YWQh2eofcaN6LpwFoorbpJsJffrcXl3cg,6415
5
5
  examples/text_and_image_2_audio.py,sha256=QLvSsLff8VZZa7k7K1EFGlPpQWZy07zM4Fnli5btAl0,2074
6
- examples/text_gen.py,sha256=S1UIXi3Aj8gTAmwHXtg-qUfgSkyCJFDLVKAbiOfsYGs,773
6
+ examples/text_gen.py,sha256=IejpNmIlsfz3WpJg8IRm5X6F06JKd7h_GuonUxTITx8,758
7
+ examples/text_gen_system_prompt.py,sha256=wumwZ09WZkaK0tQ74KaZmfsYXcmjZIlsdim_P1aJmeA,910
7
8
  examples/article_summary/article_summary.py,sha256=CR8mCBNcZEVCR-q34uOmrJyMlG-xk4HkMbsV-TOZEnk,1978
8
9
  examples/deep_analyze/deep_analyse.py,sha256=fZNmDrfEAuxEAfdbjAgJYIh1k6wbiuZ4RvwHRvtyUs8,971
9
10
  examples/deep_analyze/deep_analyze_multiple_files.py,sha256=fOryShA33P4IFxcxUDe-nJ2kW0v9w9yW8KsToS3ETl8,1032
@@ -12,13 +13,13 @@ examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5D
12
13
  examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
13
14
  examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
14
15
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
15
- lollms_client/__init__.py,sha256=y-N8Dw10pI9pHtP_zlVzsj7bVjsu873EdOlbqxaxZRU,823
16
+ lollms_client/__init__.py,sha256=Nu5NJl6GDDW4W-hoRwxXQZdnHj2-fgCeaRW3Dt_zuCQ,823
16
17
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
17
- lollms_client/lollms_core.py,sha256=ZTbEVn1M_gHAL3mL5mf3wGYAXidAtnSI3qEjwz2HlwY,77980
18
+ lollms_client/lollms_core.py,sha256=aFW8KDnBJRSL8a7ibhKAV67O95qvPT6PneNHQL1y2vc,78049
18
19
  lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
19
20
  lollms_client/lollms_functions.py,sha256=p8SFtmEPqvVCsIz2fZ5HxyOHaxjrAo5c12uTzJnb6m8,3594
20
21
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
21
- lollms_client/lollms_llm_binding.py,sha256=vAFmGtdIB97nQwQzNauFowopH4qT2i-CS_-ckekY3V0,7361
22
+ lollms_client/lollms_llm_binding.py,sha256=7xvtLsFQYqFKS7m0BQQMvVq0XXZWZeGlGuv30mi1dF8,7408
22
23
  lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
23
24
  lollms_client/lollms_stt_binding.py,sha256=ovmpFF0fnmPC9VNi1-rxAJA8xI4JZDUBh_YwdtoTx28,5818
24
25
  lollms_client/lollms_tasks.py,sha256=Tgqces03gPTHFJCcPaeN9vBCsil3SSJX7nQAjCQ2-yg,34393
@@ -29,12 +30,12 @@ lollms_client/lollms_ttv_binding.py,sha256=u-gLIe22tbu4YsKA5RTyUT7iBlKxPXDmoQzcc
29
30
  lollms_client/lollms_types.py,sha256=cfc1sremM8KR4avkYX99fIVkkdRvXErrCWKGjLrgv50,2723
30
31
  lollms_client/lollms_utilities.py,sha256=YAgamfp0pBVApR68AHKjhp1lh6isMNF8iadwWLl63c0,7045
31
32
  lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
32
- lollms_client/llm_bindings/lollms/__init__.py,sha256=l1q2KnMQALz9QpLa3OUQ8e29KU4RCwkrmrdBvd7Z_kc,12236
33
- lollms_client/llm_bindings/ollama/__init__.py,sha256=DyueED1cJmmJFg5evYmu-lrkwsN9pAxaVcwgUkcAZHU,26467
34
- lollms_client/llm_bindings/openai/__init__.py,sha256=SWBgnOcOWmFRSKTN1S9ATownHNBJ9f6FEtI3L4xNJNM,11861
35
- lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=ZpeSKAbN8rh6zkysYl95sXG9Ci702NuPAhXC6zb1zT4,31840
36
- lollms_client/llm_bindings/transformers/__init__.py,sha256=8JbX3B-obLt5NNtcNOGD_E0f8OQTma2pNYtVt2urTOM,12572
37
- lollms_client/llm_bindings/vllm/__init__.py,sha256=54TxuHJhlujVxPN03BgfeUecIexTjvU6g1dVmmaDgtM,31824
33
+ lollms_client/llm_bindings/lollms/__init__.py,sha256=a36AMPFEf3xK4zx1M_L9PC-3-b0iiDf7eyLkknPjgaY,12356
34
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=MemSA20Zivn-kfP11JPA9FHigC1U2CGsJ1FaDtUFUUM,26574
35
+ lollms_client/llm_bindings/openai/__init__.py,sha256=NDZIdzW0pnHy9gPXSKfFyS6SPIOOxj9ZEzEE7gZT2NQ,12054
36
+ lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=IY4CrHVpHY77R1rzsl3iwcoarDjYD24n7bFKk_69PD8,31983
37
+ lollms_client/llm_bindings/transformers/__init__.py,sha256=IWfAmBGqZEelt5Z_jYTqpz7LzzKMVsKWx5nv4zBgKCQ,12544
38
+ lollms_client/llm_bindings/vllm/__init__.py,sha256=ZRCR7g3A2kHQ_07viNrNnVHoIGj5TNA4Q41rQWeTlxw,31967
38
39
  lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
40
  lollms_client/stt_bindings/lollms/__init__.py,sha256=7-IZkrsn15Vaz0oqkqCxMeNQfMkeilbgScLlrrywES4,6098
40
41
  lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -45,8 +46,8 @@ lollms_client/tts_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
45
46
  lollms_client/tts_bindings/lollms/__init__.py,sha256=8x2_T9XscvISw2TiaLoFxvrS7TIsVLdqbwSc04cX-wc,7164
46
47
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
47
48
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
- lollms_client-0.13.2.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
49
- lollms_client-0.13.2.dist-info/METADATA,sha256=IsZiVKLRi7NaMhITn6pFz3CX5zpIJAvb8vsddYIBJQA,7276
50
- lollms_client-0.13.2.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
51
- lollms_client-0.13.2.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
52
- lollms_client-0.13.2.dist-info/RECORD,,
49
+ lollms_client-0.14.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
50
+ lollms_client-0.14.1.dist-info/METADATA,sha256=Z6n3yPUXNYMjZa0IVrhBQDyvXHRbvi7yQwaqaa1gptY,7276
51
+ lollms_client-0.14.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
52
+ lollms_client-0.14.1.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
53
+ lollms_client-0.14.1.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.7.1)
2
+ Generator: setuptools (80.9.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5