lollms-client 0.13.0__tar.gz → 0.13.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (58) hide show
  1. {lollms_client-0.13.0 → lollms_client-0.13.1}/PKG-INFO +8 -1
  2. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/simple_text_gen_test.py +3 -1
  3. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/__init__.py +2 -2
  4. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/llm_bindings/ollama/__init__.py +36 -53
  5. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_core.py +14 -1
  6. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client.egg-info/PKG-INFO +8 -1
  7. lollms_client-0.13.1/lollms_client.egg-info/requires.txt +8 -0
  8. {lollms_client-0.13.0 → lollms_client-0.13.1}/pyproject.toml +8 -1
  9. lollms_client-0.13.0/lollms_client.egg-info/requires.txt +0 -1
  10. {lollms_client-0.13.0 → lollms_client-0.13.1}/LICENSE +0 -0
  11. {lollms_client-0.13.0 → lollms_client-0.13.1}/README.md +0 -0
  12. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/article_summary/article_summary.py +0 -0
  13. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/deep_analyze/deep_analyse.py +0 -0
  14. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/deep_analyze/deep_analyze_multiple_files.py +0 -0
  15. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/function_call/functions_call_with images.py +0 -0
  16. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/personality_test/chat_test.py +0 -0
  17. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/personality_test/chat_with_aristotle.py +0 -0
  18. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/personality_test/tesks_test.py +0 -0
  19. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/simple_text_gen_with_image_test.py +0 -0
  20. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/test_local_models/local_chat.py +0 -0
  21. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/text_2_audio.py +0 -0
  22. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/text_2_image.py +0 -0
  23. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/text_and_image_2_audio.py +0 -0
  24. {lollms_client-0.13.0 → lollms_client-0.13.1}/examples/text_gen.py +0 -0
  25. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/llm_bindings/__init__.py +0 -0
  26. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  27. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/llm_bindings/openai/__init__.py +0 -0
  28. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  29. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  30. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  31. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_config.py +0 -0
  32. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_discussion.py +0 -0
  33. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_functions.py +0 -0
  34. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_js_analyzer.py +0 -0
  35. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_llm_binding.py +0 -0
  36. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_python_analyzer.py +0 -0
  37. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_stt_binding.py +0 -0
  38. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_tasks.py +0 -0
  39. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_tti_binding.py +0 -0
  40. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_ttm_binding.py +0 -0
  41. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_tts_binding.py +0 -0
  42. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_ttv_binding.py +0 -0
  43. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_types.py +0 -0
  44. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/lollms_utilities.py +0 -0
  45. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/stt_bindings/__init__.py +0 -0
  46. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  47. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/tti_bindings/__init__.py +0 -0
  48. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  49. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/ttm_bindings/__init__.py +0 -0
  50. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  51. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/tts_bindings/__init__.py +0 -0
  52. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  53. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/ttv_bindings/__init__.py +0 -0
  54. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  55. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client.egg-info/SOURCES.txt +0 -0
  56. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client.egg-info/dependency_links.txt +0 -0
  57. {lollms_client-0.13.0 → lollms_client-0.13.1}/lollms_client.egg-info/top_level.txt +0 -0
  58. {lollms_client-0.13.0 → lollms_client-0.13.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.13.0
3
+ Version: 0.13.1
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -19,6 +19,13 @@ Requires-Python: >=3.7
19
19
  Description-Content-Type: text/markdown
20
20
  License-File: LICENSE
21
21
  Requires-Dist: requests
22
+ Requires-Dist: ascii-colors
23
+ Requires-Dist: pipmaster
24
+ Requires-Dist: pyyaml
25
+ Requires-Dist: tiktoken
26
+ Requires-Dist: pydantic
27
+ Requires-Dist: numpy
28
+ Requires-Dist: pillow
22
29
  Dynamic: license-file
23
30
 
24
31
  # lollms_client
@@ -97,6 +97,8 @@ def test_text_generation():
97
97
  print(emb)
98
98
 
99
99
  # else: if callback returns False early, response_stream might be partial.
100
+ nb_tokens = lc.count_tokens("")
101
+ ASCIIColors.yellow("Number of tokens of : Testing count of tokens\n"+f"{nb_tokens}")
100
102
 
101
103
  # 3. Test generation with a specific model (if applicable and different from default)
102
104
  # This tests the switch_model or model loading mechanism of the binding.
@@ -168,4 +170,4 @@ def test_text_generation():
168
170
  trace_exception(e)
169
171
 
170
172
  if __name__ == "__main__":
171
- test_text_generation()
173
+ test_text_generation()
@@ -6,7 +6,7 @@ from lollms_client.lollms_discussion import LollmsDiscussion, LollmsMessage
6
6
  from lollms_client.lollms_utilities import PromptReshaper # Keep general utilities
7
7
  from lollms_client.lollms_functions import FunctionCalling_Library
8
8
 
9
- __version__ = "0.13.0"
9
+ __version__ = "0.13.1"
10
10
 
11
11
  # Optionally, you could define __all__ if you want to be explicit about exports
12
12
  __all__ = [
@@ -18,4 +18,4 @@ __all__ = [
18
18
  "LollmsMessage",
19
19
  "PromptReshaper",
20
20
  "FunctionCalling_Library"
21
- ]
21
+ ]
@@ -20,62 +20,44 @@ BindingName = "OllamaBinding"
20
20
  def count_tokens_ollama(
21
21
  text_to_tokenize: str,
22
22
  model_name: str,
23
- ollama_host: str = "http://localhost:11434",
24
- timeout: int = 30,
25
- verify_ssl_certificate: bool = True,
26
- headers: Optional[Dict[str, str]] = None
23
+ ollama_client: ollama.Client,
27
24
  ) -> int:
28
25
  """
29
- Counts the number of tokens in a given text using a specified Ollama model
30
- by calling the Ollama server's /api/tokenize endpoint.
26
+ Counts the number of tokens in a given text for a specified Ollama model
27
+ by making a minimal request to the /api/generate endpoint and extracting
28
+ the 'prompt_eval_count' from the response.
29
+
30
+ This method is generally more accurate for the specific Ollama model instance
31
+ than using an external tokenizer, but it incurs the overhead of an API call
32
+ and model processing for the prompt.
31
33
 
32
34
  Args:
33
- text_to_tokenize (str): The text to be tokenized.
34
- model_name (str): The name of the Ollama model to use (e.g., "llama3", "mistral").
35
- ollama_host (str): The base URL of the Ollama server (default: "http://localhost:11434").
36
- timeout (int): Timeout for the request in seconds (default: 30).
37
- verify_ssl_certificate (bool): Whether to verify SSL.
38
- headers (Optional[Dict[str, str]]): Optional headers for the request.
35
+ text_to_tokenize: The string to tokenize.
36
+ model_name: The name of the Ollama model (e.g., "llama3:8b", "mistral").
37
+ ollama_host: The URL of the Ollama API host.
38
+ timeout: Timeout for the request to Ollama.
39
+ verify_ssl_certificate: Whether to verify SSL certificates for the Ollama host.
40
+ headers: Optional custom headers for the request to Ollama.
41
+ num_predict_for_eval: How many tokens to ask the model to "predict" to get
42
+ the prompt evaluation count. 0 is usually sufficient and most efficient.
43
+ If 0 doesn't consistently yield `prompt_eval_count`, try 1.
39
44
 
40
45
  Returns:
41
- int: The number of tokens. Returns -1 if an error occurs.
42
- """
43
- api_url = f"{ollama_host.rstrip('/')}/api/tokenize"
44
- payload = {
45
- "model": model_name,
46
- "prompt": text_to_tokenize
47
- }
48
- request_headers = headers if headers else {}
49
-
50
- try:
51
- response = requests.post(api_url, json=payload, timeout=timeout, verify=verify_ssl_certificate, headers=request_headers)
52
- response.raise_for_status() # Raises HTTPError for bad responses (4xx or 5xx)
53
-
54
- response_data = response.json()
55
-
56
- if "tokens" in response_data and isinstance(response_data["tokens"], list):
57
- return len(response_data["tokens"])
58
- else:
59
- ASCIIColors.warning(
60
- f"Ollama response for token count did not contain a 'tokens' list. Response: {response_data}"
61
- )
62
- return -1 # Or raise ValueError
63
-
64
- except requests.exceptions.HTTPError as http_err:
65
- ASCIIColors.error(f"HTTP error occurred during token count: {http_err} - {http_err.response.text if http_err.response else 'No response text'}")
66
- return -1
67
- except requests.exceptions.RequestException as req_err:
68
- ASCIIColors.error(f"Request error occurred during token count: {req_err}")
69
- return -1
70
- except json.JSONDecodeError as json_err:
71
- ASCIIColors.error(
72
- f"Failed to decode JSON response from Ollama during token count: {json_err}. Response text: {response.text if hasattr(response, 'text') else 'No response object'}"
73
- )
74
- return -1
75
- except Exception as e:
76
- ASCIIColors.error(f"An unexpected error occurred during token count: {e}")
77
- return -1
46
+ The number of tokens as reported by 'prompt_eval_count'.
78
47
 
48
+ Raises:
49
+ requests.exceptions.RequestException: If the API request fails.
50
+ KeyError: If 'prompt_eval_count' is not found in the response.
51
+ json.JSONDecodeError: If the response is not valid JSON.
52
+ RuntimeError: For other operational errors.
53
+ """
54
+ res = ollama_client.chat(
55
+ model=model_name,
56
+ messages=[{"role":"system","content":""},{"role":"user", "content":text_to_tokenize}],
57
+ stream=False,options={"num_predict":1}
58
+ )
59
+
60
+ return res.prompt_eval_count-5
79
61
  class OllamaBinding(LollmsLLMBinding):
80
62
  """Ollama-specific binding implementation using the ollama-python library."""
81
63
 
@@ -132,6 +114,7 @@ class OllamaBinding(LollmsLLMBinding):
132
114
  images: Optional[List[str]] = None, # List of image file paths
133
115
  n_predict: Optional[int] = None,
134
116
  stream: bool = False,
117
+ system_prompt = '',
135
118
  temperature: float = 0.7, # Ollama default is 0.8, common default 0.7
136
119
  top_k: int = 40, # Ollama default is 40
137
120
  top_p: float = 0.9, # Ollama default is 0.9
@@ -191,7 +174,7 @@ class OllamaBinding(LollmsLLMBinding):
191
174
  # If images were base64 strings, they would need decoding to bytes first.
192
175
  processed_images.append(img_path)
193
176
 
194
- messages = [{'role': 'user', 'content': prompt, 'images': processed_images if processed_images else None}]
177
+ messages = [{'role': 'system', 'content':system_prompt},{'role': 'user', 'content': prompt, 'images': processed_images if processed_images else None}]
195
178
 
196
179
  if stream:
197
180
  response_stream = self.ollama_client.chat(
@@ -314,7 +297,7 @@ class OllamaBinding(LollmsLLMBinding):
314
297
  if not self.model_name:
315
298
  ASCIIColors.warning("Cannot count tokens, model_name is not set.")
316
299
  return -1
317
- return count_tokens_ollama(text, self.model_name, self.host_address, verify_ssl_certificate=self.verify_ssl_certificate, headers=self.ollama_client_headers)
300
+ return count_tokens_ollama(text, self.model_name, self.ollama_client)
318
301
 
319
302
  def embed(self, text: str, **kwargs) -> List[float]:
320
303
  """
@@ -334,7 +317,7 @@ class OllamaBinding(LollmsLLMBinding):
334
317
  if not self.ollama_client:
335
318
  raise Exception("Ollama client not initialized.")
336
319
 
337
- model_to_use = kwargs.get("model", self.model_name)
320
+ model_to_use = kwargs.get("model", "bge-m3")
338
321
  if not model_to_use:
339
322
  raise ValueError("Model name for embedding must be specified either in init or via kwargs.")
340
323
 
@@ -574,4 +557,4 @@ if __name__ == '__main__':
574
557
  ASCIIColors.error(f"An error occurred during testing: {e}")
575
558
  trace_exception(e)
576
559
 
577
- ASCIIColors.yellow("\nOllamaBinding test finished.")
560
+ ASCIIColors.yellow("\nOllamaBinding test finished.")
@@ -278,7 +278,20 @@ class LollmsClient():
278
278
  if self.binding:
279
279
  return self.binding.detokenize(tokens)
280
280
  raise RuntimeError("LLM binding not initialized.")
281
+ def count_tokens(self, text: str) -> int:
282
+ """
283
+ Counts how many tokens are there in the text using the active LLM binding.
284
+
285
+ Args:
286
+ text (str): The text to tokenize.
281
287
 
288
+ Returns:
289
+ int: Number of tokens.
290
+ """
291
+ if self.binding:
292
+ return self.binding.count_tokens(text)
293
+ raise RuntimeError("LLM binding not initialized.")
294
+
282
295
  def get_model_details(self) -> dict:
283
296
  """
284
297
  Get model information from the active LLM binding.
@@ -1611,4 +1624,4 @@ def chunk_text(text, tokenizer, detokenizer, chunk_size, overlap, use_separators
1611
1624
  if current_pos >= len(text):
1612
1625
  break
1613
1626
 
1614
- return chunks
1627
+ return chunks
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 0.13.0
3
+ Version: 0.13.1
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -19,6 +19,13 @@ Requires-Python: >=3.7
19
19
  Description-Content-Type: text/markdown
20
20
  License-File: LICENSE
21
21
  Requires-Dist: requests
22
+ Requires-Dist: ascii-colors
23
+ Requires-Dist: pipmaster
24
+ Requires-Dist: pyyaml
25
+ Requires-Dist: tiktoken
26
+ Requires-Dist: pydantic
27
+ Requires-Dist: numpy
28
+ Requires-Dist: pillow
22
29
  Dynamic: license-file
23
30
 
24
31
  # lollms_client
@@ -0,0 +1,8 @@
1
+ requests
2
+ ascii-colors
3
+ pipmaster
4
+ pyyaml
5
+ tiktoken
6
+ pydantic
7
+ numpy
8
+ pillow
@@ -28,6 +28,13 @@ classifiers = [
28
28
  ]
29
29
  dependencies = [
30
30
  "requests",
31
+ "ascii-colors",
32
+ "pipmaster",
33
+ "pyyaml",
34
+ "tiktoken",
35
+ "pydantic",
36
+ "numpy",
37
+ "pillow",
31
38
  ]
32
39
 
33
40
  [project.urls]
@@ -37,4 +44,4 @@ Homepage = "https://github.com/ParisNeo/lollms_client"
37
44
  where = ["."]
38
45
 
39
46
  [tool.setuptools.dynamic]
40
- version = {attr = "lollms_client.__version__"}
47
+ version = {attr = "lollms_client.__version__"}
@@ -1 +0,0 @@
1
- requests
File without changes
File without changes
File without changes