lollms-client 0.13.1__py3-none-any.whl → 0.13.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -1,4 +1,4 @@
1
- from lollms_client import LollmsClient, ELF_COMPLETION_FORMAT
1
+ from lollms_client import LollmsClient
2
2
  from lollms_client.lollms_types import MSG_TYPE # For callback signature
3
3
  from ascii_colors import ASCIIColors, trace_exception
4
4
 
lollms_client/__init__.py CHANGED
@@ -6,7 +6,7 @@ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
6
6
  from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
7
7
  from lollms_client.lollms_functions import FunctionCalling_Library
8
8
 
9
- __version__ = "0.13.1"
9
+ __version__ = "0.13.2"
10
10
 
11
11
  # Optionally, you could define __all__ if you want to be explicit about exports
12
12
  __all__ = [
@@ -10,10 +10,11 @@ from typing import Optional, Callable, List, Union, Dict
10
10
 
11
11
  from ascii_colors import ASCIIColors, trace_exception
12
12
  import pipmaster as pm
13
- pm.ensure_packages(["ollama","pillow"])
13
+ pm.ensure_packages(["ollama","pillow","tiktoken"])
14
14
 
15
15
 
16
16
  import ollama
17
+ import tiktoken
17
18
  BindingName = "OllamaBinding"
18
19
 
19
20
 
@@ -236,53 +237,31 @@ class OllamaBinding(LollmsLLMBinding):
236
237
  trace_exception(ex)
237
238
  return {"status": False, "error": error_message}
238
239
 
239
- def tokenize(self, text: str) -> List[Union[int, str]]:
240
+ def tokenize(self, text: str) -> list:
240
241
  """
241
- Tokenize the input text. For Ollama, this is complex as tokenization is model-specific
242
- and best done by the server. This method provides a basic character-level tokenization
243
- as a fallback or placeholder, or one could attempt to call /api/tokenize if desired.
244
- The `count_tokens` method is more accurate for Ollama.
242
+ Tokenize the input text into a list of characters.
245
243
 
246
244
  Args:
247
245
  text (str): The text to tokenize.
248
246
 
249
247
  Returns:
250
- list: List of tokens (characters or token IDs if /api/tokenize is used).
248
+ list: List of individual characters.
251
249
  """
252
- # Basic character-level tokenization
253
- # return list(text)
254
-
255
- # For actual token IDs (slower, makes a network request):
256
- api_url = f"{self.host_address.rstrip('/')}/api/tokenize"
257
- payload = {"model": self.model_name, "prompt": text}
258
- try:
259
- response = requests.post(api_url, json=payload, timeout=10, verify=self.verify_ssl_certificate, headers=self.ollama_client_headers)
260
- response.raise_for_status()
261
- return response.json().get("tokens", [])
262
- except Exception as e:
263
- ASCIIColors.warning(f"Failed to tokenize text with Ollama server, falling back to char tokens: {e}")
264
- return list(text)
265
-
266
- def detokenize(self, tokens: List[Union[int,str]]) -> str:
250
+ ## Since ollama has no endpoints to tokenize the text, we use tiktoken to have a rough estimate
251
+ return tiktoken.model.encoding_for_model("gpt-3.5-turbo").encode(text)
252
+
253
+ def detokenize(self, tokens: list) -> str:
267
254
  """
268
- Convert a list of tokens back to text. If tokens are characters, joins them.
269
- If tokens are IDs, this is non-trivial without the model's tokenizer.
255
+ Convert a list of tokens back to text.
270
256
 
271
257
  Args:
272
- tokens (list): List of tokens to detokenize.
258
+ tokens (list): List of tokens (characters) to detokenize.
273
259
 
274
260
  Returns:
275
261
  str: Detokenized text.
276
262
  """
277
- if not tokens:
278
- return ""
279
- if isinstance(tokens[0], str): # Assuming character tokens
280
- return "".join(tokens)
281
- else:
282
- # Detokenizing IDs from Ollama is not straightforward client-side without specific tokenizer.
283
- # This is a placeholder. For Ollama, detokenization usually happens server-side.
284
- ASCIIColors.warning("Detokenizing integer tokens is not accurately supported by this Ollama client binding. Returning joined string of token IDs.")
285
- return "".join(map(str, tokens))
263
+ ## Since ollama has no endpoints to tokenize the text, we use tiktoken to have a rough estimate
264
+ return tiktoken.model.encoding_for_model("gpt-3.5-turbo").decode(tokens)
286
265
 
287
266
  def count_tokens(self, text: str) -> int:
288
267
  """
@@ -297,8 +276,8 @@ class OllamaBinding(LollmsLLMBinding):
297
276
  if not self.model_name:
298
277
  ASCIIColors.warning("Cannot count tokens, model_name is not set.")
299
278
  return -1
300
- return count_tokens_ollama(text, self.model_name, self.ollama_client)
301
-
279
+ #return count_tokens_ollama(text, self.model_name, self.ollama_client)
280
+ return len(self.tokenize(text))
302
281
  def embed(self, text: str, **kwargs) -> List[float]:
303
282
  """
304
283
  Get embeddings for the input text using Ollama API.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.13.1
3
+ Version: 0.13.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -1,4 +1,4 @@
1
- examples/simple_text_gen_test.py,sha256=cwXpwe_9V_Vgmn9ibe-QH3lAudL1Jxw0KrzkcmYllI4,8733
1
+ examples/simple_text_gen_test.py,sha256=RoX9ZKJjGMujeep60wh5WT_GoBn0O9YKJY6WOy-ZmOc,8710
2
2
  examples/simple_text_gen_with_image_test.py,sha256=Euv53jbKTVJDvs854lgJvA5F-iRnAATLxAklig24ots,8534
3
3
  examples/text_2_audio.py,sha256=MfL4AH_NNwl6m0I0ywl4BXRZJ0b9Y_9fRqDIe6O-Sbw,3523
4
4
  examples/text_2_image.py,sha256=Ri7lQ-GW54YWQh2eofcaN6LpwFoorbpJsJffrcXl3cg,6415
@@ -12,7 +12,7 @@ examples/personality_test/chat_test.py,sha256=o2jlpoddFc-T592iqAiA29xk3x27KsdK5D
12
12
  examples/personality_test/chat_with_aristotle.py,sha256=4X_fwubMpd0Eq2rCReS2bgVlUoAqJprjkLXk2Jz6pXU,1774
13
13
  examples/personality_test/tesks_test.py,sha256=7LIiwrEbva9WWZOLi34fsmCBN__RZbPpxoUOKA_AtYk,1924
14
14
  examples/test_local_models/local_chat.py,sha256=slakja2zaHOEAUsn2tn_VmI4kLx6luLBrPqAeaNsix8,456
15
- lollms_client/__init__.py,sha256=dYRkaz6dRRkKVNICLLMXkZsK_pDFPo7A1ukkGdv-aoU,823
15
+ lollms_client/__init__.py,sha256=y-N8Dw10pI9pHtP_zlVzsj7bVjsu873EdOlbqxaxZRU,823
16
16
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
17
17
  lollms_client/lollms_core.py,sha256=ZTbEVn1M_gHAL3mL5mf3wGYAXidAtnSI3qEjwz2HlwY,77980
18
18
  lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
@@ -30,7 +30,7 @@ lollms_client/lollms_types.py,sha256=cfc1sremM8KR4avkYX99fIVkkdRvXErrCWKGjLrgv50
30
30
  lollms_client/lollms_utilities.py,sha256=YAgamfp0pBVApR68AHKjhp1lh6isMNF8iadwWLl63c0,7045
31
31
  lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
32
32
  lollms_client/llm_bindings/lollms/__init__.py,sha256=l1q2KnMQALz9QpLa3OUQ8e29KU4RCwkrmrdBvd7Z_kc,12236
33
- lollms_client/llm_bindings/ollama/__init__.py,sha256=8gxYzqun08fU5oXDWpbgXmAOuHeGxjx-dtpn_SqmqxI,27761
33
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=DyueED1cJmmJFg5evYmu-lrkwsN9pAxaVcwgUkcAZHU,26467
34
34
  lollms_client/llm_bindings/openai/__init__.py,sha256=SWBgnOcOWmFRSKTN1S9ATownHNBJ9f6FEtI3L4xNJNM,11861
35
35
  lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=ZpeSKAbN8rh6zkysYl95sXG9Ci702NuPAhXC6zb1zT4,31840
36
36
  lollms_client/llm_bindings/transformers/__init__.py,sha256=8JbX3B-obLt5NNtcNOGD_E0f8OQTma2pNYtVt2urTOM,12572
@@ -45,8 +45,8 @@ lollms_client/tts_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
45
45
  lollms_client/tts_bindings/lollms/__init__.py,sha256=8x2_T9XscvISw2TiaLoFxvrS7TIsVLdqbwSc04cX-wc,7164
46
46
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
47
47
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
48
- lollms_client-0.13.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
49
- lollms_client-0.13.1.dist-info/METADATA,sha256=eLvEitmwnBztnm_z5Cb-aKh_FyW6DTMWqgxMvNcRwos,7276
50
- lollms_client-0.13.1.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
51
- lollms_client-0.13.1.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
52
- lollms_client-0.13.1.dist-info/RECORD,,
48
+ lollms_client-0.13.2.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
49
+ lollms_client-0.13.2.dist-info/METADATA,sha256=IsZiVKLRi7NaMhITn6pFz3CX5zpIJAvb8vsddYIBJQA,7276
50
+ lollms_client-0.13.2.dist-info/WHEEL,sha256=Nw36Djuh_5VDukK0H78QzOX-_FQEo6V37m3nkm96gtU,91
51
+ lollms_client-0.13.2.dist-info/top_level.txt,sha256=NI_W8S4OYZvJjb0QWMZMSIpOrYzpqwPGYaklhyWKH2w,23
52
+ lollms_client-0.13.2.dist-info/RECORD,,