lollms-client 0.13.1__py3-none-any.whl → 0.14.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -1,4 +1,4 @@
1
- from lollms_client import LollmsClient, ELF_COMPLETION_FORMAT
1
+ from lollms_client import LollmsClient
2
2
  from lollms_client.lollms_types import MSG_TYPE # For callback signature
3
3
  from ascii_colors import ASCIIColors, trace_exception
4
4
 
examples/text_gen.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from lollms_client import LollmsClient
2
2
 
3
3
  # Initialize the LollmsClient instance
4
- lc = LollmsClient("http://localhost:9600")
4
+ lc = LollmsClient("lollms")
5
5
  # Generate Text
6
6
  # response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
7
7
  # print(response)
@@ -0,0 +1,28 @@
1
+ from lollms_client import LollmsClient
2
+
3
+ # Initialize the LollmsClient instance
4
+ lc = LollmsClient("ollama",model_name="mistral-nemo:latest")
5
+ # Generate Text
6
+ # response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
7
+ # print(response)
8
+
9
+ # # Generate Completion
10
+ # response = lc.generate_completion(prompt="What is the capital of France", stream=False, temperature=0.5)
11
+ # print(response)
12
+
13
+ def cb(chunk, type):
14
+ print(chunk,end="",flush=True)
15
+
16
+ response = lc.generate_text(prompt="One plus one equals ", system_prompt="You are a playful dude who never really answers questions correctly. always answer with quirky style.", stream=False, temperature=0.5, streaming_callback=cb)
17
+ print()
18
+ print(response)
19
+ print()
20
+
21
+
22
+ # List Mounted Personalities
23
+ response = lc.listMountedPersonalities()
24
+ print(response)
25
+
26
+ # List Models
27
+ response = lc.listModels()
28
+ print(response)
lollms_client/__init__.py CHANGED
@@ -6,7 +6,7 @@ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
6
6
  from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
7
7
  from lollms_client.lollms_functions import FunctionCalling_Library
8
8
 
9
- __version__ = "0.13.1"
9
+ __version__ = "0.14.0"
10
10
 
11
11
  # Optionally, you could define __all__ if you want to be explicit about exports
12
12
  __all__ = [
@@ -49,6 +49,7 @@ class LollmsLLMBinding(LollmsLLMBinding):
49
49
  def generate_text(self,
50
50
  prompt: str,
51
51
  images: Optional[List[str]] = None,
52
+ system_prompt: str = "",
52
53
  n_predict: Optional[int] = None,
53
54
  stream: bool = False,
54
55
  temperature: float = 0.1,
@@ -106,7 +107,7 @@ class LollmsLLMBinding(LollmsLLMBinding):
106
107
 
107
108
  # Prepare request data
108
109
  data = {
109
- "prompt": prompt,
110
+ "prompt":"!@>system: "+system_prompt+"\n"+"!@>user: "+prompt if system_prompt else prompt,
110
111
  "model_name": self.model_name,
111
112
  "personality": self.personality,
112
113
  "n_predict": n_predict,
@@ -10,10 +10,11 @@ from typing import Optional, Callable, List, Union, Dict
10
10
 
11
11
  from ascii_colors import ASCIIColors, trace_exception
12
12
  import pipmaster as pm
13
- pm.ensure_packages(["ollama","pillow"])
13
+ pm.ensure_packages(["ollama","pillow","tiktoken"])
14
14
 
15
15
 
16
16
  import ollama
17
+ import tiktoken
17
18
  BindingName = "OllamaBinding"
18
19
 
19
20
 
@@ -112,9 +113,9 @@ class OllamaBinding(LollmsLLMBinding):
112
113
  def generate_text(self,
113
114
  prompt: str,
114
115
  images: Optional[List[str]] = None, # List of image file paths
116
+ system_prompt: str = "",
115
117
  n_predict: Optional[int] = None,
116
118
  stream: bool = False,
117
- system_prompt = '',
118
119
  temperature: float = 0.7, # Ollama default is 0.8, common default 0.7
119
120
  top_k: int = 40, # Ollama default is 40
120
121
  top_p: float = 0.9, # Ollama default is 0.9
@@ -200,15 +201,16 @@ class OllamaBinding(LollmsLLMBinding):
200
201
  )
201
202
  return response_dict.get('message', {}).get('content', '')
202
203
  else: # Text-only
204
+ messages = [{'role': 'system', 'content':system_prompt},{'role': 'user', 'content': prompt}]
203
205
  if stream:
204
- response_stream = self.ollama_client.generate(
206
+ response_stream = self.ollama_client.chat(
205
207
  model=self.model_name,
206
- prompt=prompt,
208
+ messages=messages,
207
209
  stream=True,
208
210
  options=options if options else None
209
211
  )
210
212
  for chunk_dict in response_stream:
211
- chunk_content = chunk_dict.get('response', '')
213
+ chunk_content = chunk_dict.message.content
212
214
  if chunk_content:
213
215
  full_response_text += chunk_content
214
216
  if streaming_callback:
@@ -216,13 +218,13 @@ class OllamaBinding(LollmsLLMBinding):
216
218
  break
217
219
  return full_response_text
218
220
  else: # Not streaming
219
- response_dict = self.ollama_client.generate(
221
+ response_dict = self.ollama_client.chat(
220
222
  model=self.model_name,
221
- prompt=prompt,
223
+ messages=messages,
222
224
  stream=False,
223
225
  options=options if options else None
224
226
  )
225
- return response_dict.get('response', '')
227
+ return response_dict.message.content
226
228
  except ollama.ResponseError as e:
227
229
  error_message = f"Ollama API ResponseError: {e.error or 'Unknown error'} (status code: {e.status_code})"
228
230
  ASCIIColors.error(error_message)
@@ -236,53 +238,31 @@ class OllamaBinding(LollmsLLMBinding):
236
238
  trace_exception(ex)
237
239
  return {"status": False, "error": error_message}
238
240
 
239
- def tokenize(self, text: str) -> List[Union[int, str]]:
241
+ def tokenize(self, text: str) -> list:
240
242
  """
241
- Tokenize the input text. For Ollama, this is complex as tokenization is model-specific
242
- and best done by the server. This method provides a basic character-level tokenization
243
- as a fallback or placeholder, or one could attempt to call /api/tokenize if desired.
244
- The `count_tokens` method is more accurate for Ollama.
243
+ Tokenize the input text into a list of characters.
245
244
 
246
245
  Args:
247
246
  text (str): The text to tokenize.
248
247
 
249
248
  Returns:
250
- list: List of tokens (characters or token IDs if /api/tokenize is used).
249
+ list: List of individual characters.
251
250
  """
252
- # Basic character-level tokenization
253
- # return list(text)
254
-
255
- # For actual token IDs (slower, makes a network request):
256
- api_url = f"{self.host_address.rstrip('/')}/api/tokenize"
257
- payload = {"model": self.model_name, "prompt": text}
258
- try:
259
- response = requests.post(api_url, json=payload, timeout=10, verify=self.verify_ssl_certificate, headers=self.ollama_client_headers)
260
- response.raise_for_status()
261
- return response.json().get("tokens", [])
262
- except Exception as e:
263
- ASCIIColors.warning(f"Failed to tokenize text with Ollama server, falling back to char tokens: {e}")
264
- return list(text)
265
-
266
- def detokenize(self, tokens: List[Union[int,str]]) -> str:
251
+ ## Since ollama has no endpoints to tokenize the text, we use tiktoken to have a rough estimate
252
+ return tiktoken.model.encoding_for_model("gpt-3.5-turbo").encode(text)
253
+
254
+ def detokenize(self, tokens: list) -> str:
267
255
  """
268
- Convert a list of tokens back to text. If tokens are characters, joins them.
269
- If tokens are IDs, this is non-trivial without the model's tokenizer.
256
+ Convert a list of tokens back to text.
270
257
 
271
258
  Args:
272
- tokens (list): List of tokens to detokenize.
259
+ tokens (list): List of tokens (characters) to detokenize.
273
260
 
274
261
  Returns:
275
262
  str: Detokenized text.
276
263
  """
277
- if not tokens:
278
- return ""
279
- if isinstance(tokens[0], str): # Assuming character tokens
280
- return "".join(tokens)
281
- else:
282
- # Detokenizing IDs from Ollama is not straightforward client-side without specific tokenizer.
283
- # This is a placeholder. For Ollama, detokenization usually happens server-side.
284
- ASCIIColors.warning("Detokenizing integer tokens is not accurately supported by this Ollama client binding. Returning joined string of token IDs.")
285
- return "".join(map(str, tokens))
264
+ ## Since ollama has no endpoints to tokenize the text, we use tiktoken to have a rough estimate
265
+ return tiktoken.model.encoding_for_model("gpt-3.5-turbo").decode(tokens)
286
266
 
287
267
  def count_tokens(self, text: str) -> int:
288
268
  """
@@ -297,8 +277,8 @@ class OllamaBinding(LollmsLLMBinding):
297
277
  if not self.model_name:
298
278
  ASCIIColors.warning("Cannot count tokens, model_name is not set.")
299
279
  return -1
300
- return count_tokens_ollama(text, self.model_name, self.ollama_client)
301
-
280
+ #return count_tokens_ollama(text, self.model_name, self.ollama_client)
281
+ return len(self.tokenize(text))
302
282
  def embed(self, text: str, **kwargs) -> List[float]:
303
283
  """
304
284
  Get embeddings for the input text using Ollama API.
@@ -58,6 +58,7 @@ class OpenAIBinding(LollmsLLMBinding):
58
58
  def generate_text(self,
59
59
  prompt: str,
60
60
  images: Optional[List[str]] = None,
61
+ system_prompt: str = "",
61
62
  n_predict: Optional[int] = None,
62
63
  stream: bool = False,
63
64
  temperature: float = 0.1,
@@ -98,6 +99,11 @@ class OpenAIBinding(LollmsLLMBinding):
98
99
  if images:
99
100
  messages = [
100
101
  {
102
+ "role": "system",
103
+ "content": system_prompt,
104
+ },
105
+
106
+ {
101
107
  "role": "user",
102
108
  "content": [
103
109
  {
@@ -331,6 +331,7 @@ class VLLMBinding(LollmsLLMBinding):
331
331
  def generate_text(self,
332
332
  prompt: str,
333
333
  images: Optional[List[str]] = None,
334
+ system_prompt: str = "",
334
335
  n_predict: Optional[int] = 1024,
335
336
  stream: bool = False, # vLLM's generate is blocking, stream is pseudo
336
337
  temperature: float = 0.7,
@@ -381,7 +382,7 @@ class VLLMBinding(LollmsLLMBinding):
381
382
  # If providing multi_modal_data, usually prompt_token_ids are also needed.
382
383
  # This can get complex as it depends on how the model expects images to be interleaved.
383
384
  # For a simple case where image comes first:
384
- encoded_prompt_ids = self.tokenizer.encode(prompt)
385
+ encoded_prompt_ids = self.tokenizer.encode(system_prompt+"\n"+prompt if system_prompt else prompt)
385
386
  gen_kwargs["prompt_token_ids"] = [encoded_prompt_ids] # List of lists
386
387
  gen_kwargs["multi_modal_data"] = [{"image": mm_data_content}] # List of dicts
387
388
  gen_kwargs["prompts"] = None # Don't use prompts if prompt_token_ids is used
@@ -389,7 +390,7 @@ class VLLMBinding(LollmsLLMBinding):
389
390
  except Exception as e_mm:
390
391
  return {"status": False, "error": f"Multimodal prep error: {e_mm}"}
391
392
  else:
392
- gen_kwargs["prompts"] = [prompt]
393
+ gen_kwargs["prompts"] = [system_prompt+"\n"+prompt if system_prompt else prompt]
393
394
 
394
395
  try:
395
396
  outputs = self.llm_engine.generate(**gen_kwargs, sampling_params=sampling_params)
@@ -112,6 +112,7 @@ class TransformersBinding(LollmsLLMBinding):
112
112
  def generate_text(self,
113
113
  prompt: str,
114
114
  images: Optional[List[str]] = None,
115
+ system_prompt: str = "",
115
116
  n_predict: Optional[int] = None,
116
117
  stream: bool = False,
117
118
  temperature: float = 0.1,
@@ -123,8 +124,7 @@ class TransformersBinding(LollmsLLMBinding):
123
124
  n_threads: int = 8,
124
125
  ctx_size: int | None = None,
125
126
  streaming_callback: Optional[Callable[[str, str], None]] = None,
126
- return_legacy_cache: bool = False,
127
- system_prompt: str = "You are a helpful assistant.") -> Union[str, dict]:
127
+ return_legacy_cache: bool = False) -> Union[str, dict]:
128
128
  """
129
129
  Generate text using the Transformers model, with optional image support.
130
130
 
@@ -331,6 +331,7 @@ class VLLMBinding(LollmsLLMBinding):
331
331
  def generate_text(self,
332
332
  prompt: str,
333
333
  images: Optional[List[str]] = None,
334
+ system_prompt: str = "",
334
335
  n_predict: Optional[int] = 1024,
335
336
  stream: bool = False, # vLLM's generate is blocking, stream is pseudo
336
337
  temperature: float = 0.7,
@@ -381,7 +382,7 @@ class VLLMBinding(LollmsLLMBinding):
381
382
  # If providing multi_modal_data, usually prompt_token_ids are also needed.
382
383
  # This can get complex as it depends on how the model expects images to be interleaved.
383
384
  # For a simple case where image comes first:
384
- encoded_prompt_ids = self.tokenizer.encode(prompt)
385
+ encoded_prompt_ids = self.tokenizer.encode(system_prompt+"\n"+prompt if system_prompt else prompt)
385
386
  gen_kwargs["prompt_token_ids"] = [encoded_prompt_ids] # List of lists
386
387
  gen_kwargs["multi_modal_data"] = [{"image": mm_data_content}] # List of dicts
387
388
  gen_kwargs["prompts"] = None # Don't use prompts if prompt_token_ids is used
@@ -389,7 +390,7 @@ class VLLMBinding(LollmsLLMBinding):
389
390
  except Exception as e_mm:
390
391
  return {"status": False, "error": f"Multimodal prep error: {e_mm}"}
391
392
  else:
392
- gen_kwargs["prompts"] = [prompt]
393
+ gen_kwargs["prompts"] = [system_prompt+"\n"+prompt if system_prompt else prompt]
393
394
 
394
395
  try:
395
396
  outputs = self.llm_engine.generate(**gen_kwargs, sampling_params=sampling_params)
@@ -329,6 +329,7 @@ class LollmsClient():
329
329
  def generate_text(self,
330
330
  prompt: str,
331
331
  images: Optional[List[str]] = None,
332
+ system_prompt: str = "",
332
333
  n_predict: Optional[int] = None,
333
334
  stream: Optional[bool] = None,
334
335
  temperature: Optional[float] = None,
@@ -365,6 +366,7 @@ class LollmsClient():
365
366
  return self.binding.generate_text(
366
367
  prompt=prompt,
367
368
  images=images,
369
+ system_prompt=system_prompt,
368
370
  n_predict=n_predict if n_predict is not None else self.default_n_predict,
369
371
  stream=stream if stream is not None else self.default_stream,
370
372
  temperature=temperature if temperature is not None else self.default_temperature,
@@ -32,6 +32,7 @@ class LollmsLLMBinding(ABC):
32
32
  def generate_text(self,
33
33
  prompt: str,
34
34
  images: Optional[List[str]] = None,
35
+ system_prompt: str = "",
35
36
  n_predict: Optional[int] = None,
36
37
  stream: bool = False,
37
38
  temperature: float = 0.1,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.13.1
3
+ Version: 0.14.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -1,9 +1,10 @@
1
- examples/simple_text_gen_test.py,sha256=cwXpwe_9V_Vgmn9ibe-QH3lAudL1Jxw0KrzkcmYllI4,8733
1
+ examples/simple_text_gen_test.py,sha256=RoX9ZKJjGMujeep60wh5WT_GoBn0O9YKJY6WOy-ZmOc,8710
2
2
  examples/simple_text_gen_with_image_test.py,sha256=Euv53jbKTVJDvs854lgJvA5F-iRnAATLxAklig24ots,8534
3
3
  examples/text_2_audio.py,sha256=MfL4AH_NNwl6m0I0ywl4BXRZJ0b9Y_9fRqDIe6O-Sbw,3523
4
4
  examples/text_2_image.py,sha256=Ri7lQ-GW54YWQh2eofcaN6LpwFoorbpJsJffrcXl3cg,6415
5
5
  examples/text_and_image_2_audio.py,sha256=QLvSsLff8VZZa7k7K1EFGlPpQWZy07zM4Fnli5btAl0,2074
6
- examples/text_gen.py,sha256=S1UIXi3Aj8gTAmwHXtg-qUfgSkyCJFDLVKAbiOfsYGs,773
6
+ examples/text_gen.py,sha256=IejpNmIlsfz3WpJg8IRm5X6F06JKd7h_GuonUxTITx8,758
7
+ examples/text_gen_system_prompt.py,sha256=wumwZ09WZkaK0tQ74KaZmfsYXcmjZIlsdim_P1aJmeA,910
7
8
  examples/article_summary/article_summary.py,sha256=CR8mCBNcZEVCR-q34uOmrJyMlG-xk4HkMbsV-TOZEnk,1978
8
9
  examples/deep_analyze/deep_analyse.py,sha256=fZNmDrfEAuxEAfdbjAgJYIh1k6wbiuZ4RvwHRvtyUs8,971
9
10
  examples/deep_analyze/deep_analyze_multiple_files.py,sha256=fOryShA33P4IFxcxUDe-nJ2kW0v9w9yW8KsToS3ETl8,1032
@@ -12,13 +13,13 @@ examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5D
12
13
  examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
13
14
  examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
14
15
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
15
- lollms_client/__init__.py,sha256=dYRkaz6dRRkKVNICLLMXkZsK_pDFPo7A1ukkGdv-aoU,823
16
+ lollms_client/__init__.py,sha256=PHFRY4RskAaiectooBrSCrxd6UGpZkdTqMHmXM26VnQ,823
16
17
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
17
- lollms_client/lollms_core.py,sha256=ZTbEVn1M_gHAL3mL5mf3wGYAXidAtnSI3qEjwz2HlwY,77980
18
+ lollms_client/lollms_core.py,sha256=KkeKjQZVeUjdsQjxw2bygUxq1gXlPNnYiyxdWnwA4L8,78073
18
19
  lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
19
20
  lollms_client/lollms_functions.py,sha256=p8SFtmEPqvVCsIz2fZ5HxyOHaxjrAo5c12uTzJnb6m8,3594
20
21
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
21
- lollms_client/lollms_llm_binding.py,sha256=vAFmGtdIB97nQwQzNauFowopH4qT2i-CS_-ckekY3V0,7361
22
+ lollms_client/lollms_llm_binding.py,sha256=7xvtLsFQYqFKS7m0BQQMvVq0XXZWZeGlGuv30mi1dF8,7408
22
23
  lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
23
24
  lollms_client/lollms_stt_binding.py,sha256=ovmpFF0fnmPC9VNi1-rxAJA8xI4JZDUBh_YwdtoTx28,5818
24
25
  lollms_client/lollms_tasks.py,sha256=Tgqces03gPTHFJCcPaeN9vBCsil3SSJX7nQAjCQ2-yg,34393
@@ -29,12 +30,12 @@ lollms_client/lollms_ttv_binding.py,sha256=u-gLIe22tbu4YsKA5RTyUT7iBlKxPXDmoQzcc
29
30
  lollms_client/lollms_types.py,sha256=cfc1sremM8KR4avkYX99fIVkkdRvXErrCWKGjLrgv50,2723
30
31
  lollms_client/lollms_utilities.py,sha256=YAgamfp0pBVApR68AHKjhp1lh6isMNF8iadwWLl63c0,7045
31
32
  lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
32
- lollms_client/llm_bindings/lollms/__init__.py,sha256=l1q2KnMQALz9QpLa3OUQ8e29KU4RCwkrmrdBvd7Z_kc,12236
33
- lollms_client/llm_bindings/ollama/__init__.py,sha256=8gxYzqun08fU5oXDWpbgXmAOuHeGxjx-dtpn_SqmqxI,27761
34
- lollms_client/llm_bindings/openai/__init__.py,sha256=SWBgnOcOWmFRSKTN1S9ATownHNBJ9f6FEtI3L4xNJNM,11861
35
- lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=ZpeSKAbN8rh6zkysYl95sXG9Ci702NuPAhXC6zb1zT4,31840
36
- lollms_client/llm_bindings/transformers/__init__.py,sha256=8JbX3B-obLt5NNtcNOGD_E0f8OQTma2pNYtVt2urTOM,12572
37
- lollms_client/llm_bindings/vllm/__init__.py,sha256=54TxuHJhlujVxPN03BgfeUecIexTjvU6g1dVmmaDgtM,31824
33
+ lollms_client/llm_bindings/lollms/__init__.py,sha256=a36AMPFEf3xK4zx1M_L9PC-3-b0iiDf7eyLkknPjgaY,12356
34
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=MemSA20Zivn-kfP11JPA9FHigC1U2CGsJ1FaDtUFUUM,26574
35
+ lollms_client/llm_bindings/openai/__init__.py,sha256=NDZIdzW0pnHy9gPXSKfFyS6SPIOOxj9ZEzEE7gZT2NQ,12054
36
+ lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=IY4CrHVpHY77R1rzsl3iwcoarDjYD24n7bFKk_69PD8,31983
37
+ lollms_client/llm_bindings/transformers/__init__.py,sha256=IWfAmBGqZEelt5Z_jYTqpz7LzzKMVsKWx5nv4zBgKCQ,12544
38
+ lollms_client/llm_bindings/vllm/__init__.py,sha256=ZRCR7g3A2kHQ_07viNrNnVHoIGj5TNA4Q41rQWeTlxw,31967
38
39
  lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
40
  lollms_client/stt_bindings/lollms/__init__.py,sha256=7-IZkrsn15Vaz0oqkqCxMeNQfMkeilbgScLlrrywES4,6098
40
41
  lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -45,8 +46,8 @@ lollms_client/tts_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
45
46
  lollms_client/tts_bindings/lollms/__init__.py,sha256=8x2_T9XscvISw2TiaLoFxvrS7TIsVLdqbwSc04cX-wc,7164
46
47
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
47
48
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
- lollms_client-0.13.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
49
- lollms_client-0.13.1.dist-info/METADATA,sha256=eLvEitmwnBztnm_z5Cb-aKh_FyW6DTMWqgxMvNcRwos,7276
50
- lollms_client-0.13.1.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
51
- lollms_client-0.13.1.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
52
- lollms_client-0.13.1.dist-info/RECORD,,
49
+ lollms_client-0.14.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
50
+ lollms_client-0.14.0.dist-info/METADATA,sha256=gkDoZr-SYxtqgyzp339qwmNj1_iBiFurvPVD6TTvc2Q,7276
51
+ lollms_client-0.14.0.dist-info/WHEEL,sha256=zaaOINJESkSfm_4HQVc5ssNzHCPXhJm0kEUakpsEHaU,91
52
+ lollms_client-0.14.0.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
53
+ lollms_client-0.14.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (80.7.1)
2
+ Generator: setuptools (80.8.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5