lollms-client 0.12.1__py3-none-any.whl → 0.12.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -1,8 +1,20 @@
1
+ # lollms_client/__init__.py
1
2
  from lollms_client.lollms_core import LollmsClient, ELF_COMPLETION_FORMAT
2
3
  from lollms_client.lollms_tasks import TasksLibrary
3
- from lollms_client.lollms_types import MSG_TYPE
4
- from lollms_client.lollms_personality import LollmsPersonality
4
+ from lollms_client.lollms_types import MSG_TYPE # Assuming ELF_GENERATION_FORMAT is not directly used by users from here
5
5
  from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
6
- from lollms_client.lollms_utilities import PromptReshaper
7
- from lollms_client.lollms_tts_binding import LollmsTTS
6
+ from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
8
7
  from lollms_client.lollms_functions import FunctionCalling_Library
8
+
9
+ # Optionally, you could define __all__ if you want to be explicit about exports
10
+ __all__ = [
11
+ "LollmsClient",
12
+ "ELF_COMPLETION_FORMAT",
13
+ "TasksLibrary",
14
+ "MSG_TYPE",
15
+ "LollmsPersonality",
16
+ "LollmsDiscussion",
17
+ "LollmsMessage",
18
+ "PromptReshaper",
19
+ "FunctionCalling_Library"
20
+ ]
@@ -10,6 +10,7 @@ import json
10
10
 
11
11
  BindingName = "LollmsLLMBinding"
12
12
 
13
+
13
14
  class LollmsLLMBinding(LollmsLLMBinding):
14
15
  """LOLLMS-specific binding implementation"""
15
16
 
@@ -215,7 +216,18 @@ class LollmsLLMBinding(LollmsLLMBinding):
215
216
  except Exception as ex:
216
217
  return {"status": False, "error": str(ex)}
217
218
 
219
+ def count_tokens(self, text: str) -> int:
220
+ """
221
+ Count tokens from a text.
218
222
 
223
+ Args:
224
+ tokens (list): List of tokens to detokenize.
225
+
226
+ Returns:
227
+ int: Number of tokens in text.
228
+ """
229
+ return len(self.tokenize(text))
230
+
219
231
  def embed(self, text: str, **kwargs) -> list:
220
232
  """
221
233
  Get embeddings for the input text using Ollama API
@@ -11,6 +11,72 @@ from ascii_colors import ASCIIColors, trace_exception
11
11
  BindingName = "OllamaBinding"
12
12
 
13
13
 
14
+ def count_tokens_ollama(
15
+ text_to_tokenize: str,
16
+ model_name: str,
17
+ ollama_host: str = "http://localhost:11434",
18
+ timeout: int = 30
19
+ ) -> int:
20
+ """
21
+ Counts the number of tokens in a given text using a specified Ollama model
22
+ by calling the Ollama server's /api/tokenize endpoint.
23
+
24
+ Args:
25
+ text_to_tokenize (str): The text to be tokenized.
26
+ model_name (str): The name of the Ollama model to use (e.g., "llama3", "mistral").
27
+ ollama_host (str): The base URL of the Ollama server (default: "http://localhost:11434").
28
+ timeout (int): Timeout for the request in seconds (default: 30).
29
+
30
+ Returns:
31
+ int: The number of tokens. Returns -1 if an error occurs.
32
+
33
+ Raises:
34
+ requests.exceptions.RequestException: For network issues or timeouts.
35
+ requests.exceptions.HTTPError: For HTTP error responses (4xx or 5xx).
36
+ ValueError: If the response from Ollama is not as expected (e.g., missing 'tokens' key).
37
+ """
38
+ api_url = f"{ollama_host}/api/tokenize"
39
+ payload = {
40
+ "model": model_name,
41
+ "prompt": text_to_tokenize
42
+ # You can add "options" here if needed, but for tokenization, it's usually not required.
43
+ # "options": {"num_ctx": 4096} # Example, might influence tokenizer for specific context length
44
+ }
45
+
46
+ try:
47
+ response = requests.post(api_url, json=payload, timeout=timeout)
48
+ response.raise_for_status() # Raises HTTPError for bad responses (4xx or 5xx)
49
+
50
+ response_data = response.json()
51
+
52
+ if "tokens" in response_data and isinstance(response_data["tokens"], list):
53
+ return len(response_data["tokens"])
54
+ else:
55
+ raise ValueError(
56
+ f"Ollama response did not contain a 'tokens' list. Response: {response_data}"
57
+ )
58
+
59
+ except requests.exceptions.HTTPError as http_err:
60
+ # You might want to inspect http_err.response.text for more details from Ollama
61
+ print(f"HTTP error occurred: {http_err} - {http_err.response.text}")
62
+ raise # Re-raise the exception
63
+ except requests.exceptions.ConnectionError as conn_err:
64
+ print(f"Connection error occurred: {conn_err}")
65
+ raise
66
+ except requests.exceptions.Timeout as timeout_err:
67
+ print(f"Timeout error occurred: {timeout_err}")
68
+ raise
69
+ except requests.exceptions.RequestException as req_err:
70
+ print(f"An unexpected error occurred with the request: {req_err}")
71
+ raise
72
+ except json.JSONDecodeError as json_err:
73
+ # This can happen if the server returns non-JSON, e.g., an HTML error page
74
+ raise ValueError(
75
+ f"Failed to decode JSON response from Ollama: {json_err}. Response text: {response.text}"
76
+ ) from json_err
77
+ except ValueError as val_err: # Catches the ValueError raised above for missing 'tokens'
78
+ print(f"Value error: {val_err}")
79
+ raise
14
80
  class OllamaBinding(LollmsLLMBinding):
15
81
  """Ollama-specific binding implementation"""
16
82
 
@@ -210,6 +276,18 @@ class OllamaBinding(LollmsLLMBinding):
210
276
  """
211
277
  return "".join(tokens)
212
278
 
279
+ def count_tokens(self, text: str) -> int:
280
+ """
281
+ Count tokens from a text.
282
+
283
+ Args:
284
+ tokens (list): List of tokens to detokenize.
285
+
286
+ Returns:
287
+ int: Number of tokens in text.
288
+ """
289
+ return count_tokens_ollama(text, self.model_name, self.host_address)
290
+
213
291
  def embed(self, text: str, **kwargs) -> list:
214
292
  """
215
293
  Get embeddings for the input text using Ollama API
@@ -199,6 +199,19 @@ class OpenAIBinding(LollmsLLMBinding):
199
199
  return tiktoken.model.encoding_for_model(self.model_name).decode(tokens)
200
200
  except:
201
201
  return tiktoken.model.encoding_for_model("gpt-3.5-turbo").decode(tokens)
202
+
203
+ def count_tokens(self, text: str) -> int:
204
+ """
205
+ Count tokens from a text.
206
+
207
+ Args:
208
+ tokens (list): List of tokens to detokenize.
209
+
210
+ Returns:
211
+ int: Number of tokens in text.
212
+ """
213
+ return len(self.tokenize(text))
214
+
202
215
 
203
216
  def embed(self, text: str, **kwargs) -> list:
204
217
  """
@@ -254,6 +254,19 @@ class TransformersBinding(LollmsLLMBinding):
254
254
  """Convert a list of tokens back to text."""
255
255
  return "".join(tokens)
256
256
 
257
+ def count_tokens(self, text: str) -> int:
258
+ """
259
+ Count tokens from a text.
260
+
261
+ Args:
262
+ tokens (list): List of tokens to detokenize.
263
+
264
+ Returns:
265
+ int: Number of tokens in text.
266
+ """
267
+ return len(self.tokenize(text))
268
+
269
+
257
270
  def embed(self, text: str, **kwargs) -> list:
258
271
  """Get embeddings for the input text (placeholder)."""
259
272
  pass
@@ -103,6 +103,19 @@ class LollmsLLMBinding(ABC):
103
103
  str: Detokenized text.
104
104
  """
105
105
  pass
106
+
107
+ @abstractmethod
108
+ def count_tokens(self, text: str) -> int:
109
+ """
110
+ Count tokens from a text.
111
+
112
+ Args:
113
+ tokens (list): List of tokens to detokenize.
114
+
115
+ Returns:
116
+ int: Number of tokens in text.
117
+ """
118
+ pass
106
119
 
107
120
  @abstractmethod
108
121
  def embed(self, text: str, **kwargs) -> list:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.12.1
3
+ Version: 0.12.3
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Home-page: https://github.com/ParisNeo/lollms_client
6
6
  Author: ParisNeo
@@ -1,10 +1,10 @@
1
- lollms_client/__init__.py,sha256=CrU6LzlhPyYQOMDgkw8Dqb3nvWYyZk0HixGF0QyTOWc,501
1
+ lollms_client/__init__.py,sha256=Ey-ct0qjYx07HIZo5C_6ZOPOwZdlKyrH4LaMszGUzQM,821
2
2
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
3
3
  lollms_client/lollms_core.py,sha256=NYUmzYxxMAPyy8OQ5fnPSerzM5IEYxhz598zZdA6Qrg,77772
4
4
  lollms_client/lollms_discussion.py,sha256=9b83m0D894jwpgssWYTQHbVxp1gJoI-J947Ui_dRXII,2073
5
5
  lollms_client/lollms_functions.py,sha256=p8SFtmEPqvVCsIz2fZ5HxyOHaxjrAo5c12uTzJnb6m8,3594
6
6
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
7
- lollms_client/lollms_llm_binding.py,sha256=3oyj2GF7x27bCQxM8D1ZkKiEfGKFp0E_Bv5bH_meTMk,8331
7
+ lollms_client/lollms_llm_binding.py,sha256=pYygaOHD4M4LGDNlaV4fAiHtnPzfgDlDNuOcWF0k2RQ,8627
8
8
  lollms_client/lollms_personality.py,sha256=gTOU7WJrxyNE88g-9-is5QxMf84s6xbEMAv--SD2P64,20313
9
9
  lollms_client/lollms_personality_worker.py,sha256=rQbZg9Gn-R3b6x0Ryb4JPWJzBfn4fObDzj5IWYez_9o,65331
10
10
  lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
@@ -20,10 +20,10 @@ lollms_client/lollms_ttv_binding.py,sha256=u-gLIe22tbu4YsKA5RTyUT7iBlKxPXDmoQzcc
20
20
  lollms_client/lollms_types.py,sha256=cfc1sremM8KR4avkYX99fIVkkdRvXErrCWKGjLrgv50,2723
21
21
  lollms_client/lollms_utilities.py,sha256=YAgamfp0pBVApR68AHKjhp1lh6isMNF8iadwWLl63c0,7045
22
22
  lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
23
- lollms_client/llm_bindings/lollms/__init__.py,sha256=PRQwXOdqR_nWMvxwcL2u-32YBPDrSdQZRUxkbz3-bBk,11911
24
- lollms_client/llm_bindings/ollama/__init__.py,sha256=7xZpoRQNptTPlHY9o5BD0xH2yR-b3FcT8wfv-SOWS9Q,12534
25
- lollms_client/llm_bindings/openai/__init__.py,sha256=8VbJ9EF30w0gTlc9e7gzvTxBfWl0mjNBGlHqUoqQ2eg,10463
26
- lollms_client/llm_bindings/transformers/__init__.py,sha256=vSzg5OWpiFPPP3GzEfoOs9K7Q0B1n6zCi3-j4M9tkWE,12467
23
+ lollms_client/llm_bindings/lollms/__init__.py,sha256=H1Vw6trTzinS_xaeNWZ7Aq-3XngbzoYxtA4Se2IeCpQ,12213
24
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=oOSWM7eVpDPTBGulRdHukOxpIwvA1x5VNsJEVgT2jFk,15843
25
+ lollms_client/llm_bindings/openai/__init__.py,sha256=n8y14j6MAkaZSGMsvvUogr8LRgLsNz4S6QJJdB6H0lQ,10763
26
+ lollms_client/llm_bindings/transformers/__init__.py,sha256=UyaiQcJQricBZJGe1zfNIVy6Cb3QpHSvImSoE9FhgC0,12771
27
27
  lollms_client/stt_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
28
  lollms_client/stt_bindings/lollms/__init__.py,sha256=7-IZkrsn15Vaz0oqkqCxMeNQfMkeilbgScLlrrywES4,6098
29
29
  lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -34,8 +34,8 @@ lollms_client/tts_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJ
34
34
  lollms_client/tts_bindings/lollms/__init__.py,sha256=8x2_T9XscvISw2TiaLoFxvrS7TIsVLdqbwSc04cX-wc,7164
35
35
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
36
36
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
37
- lollms_client-0.12.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
38
- lollms_client-0.12.1.dist-info/METADATA,sha256=KSlyF6abPll_F4-vU03grmQSpt5yTkW6tnOVD93HbG0,6866
39
- lollms_client-0.12.1.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
40
- lollms_client-0.12.1.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
41
- lollms_client-0.12.1.dist-info/RECORD,,
37
+ lollms_client-0.12.3.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
38
+ lollms_client-0.12.3.dist-info/METADATA,sha256=t92LRON5mw051xnzqLrPRyagxWEklQZrnaOJwEXy9OM,6866
39
+ lollms_client-0.12.3.dist-info/WHEEL,sha256=SmOxYU7pzNKBqASvQJ7DjX3XGUF92lrGhMb3R6_iiqI,91
40
+ lollms_client-0.12.3.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
41
+ lollms_client-0.12.3.dist-info/RECORD,,