lollms-client 0.9.2__tar.gz → 0.10.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (40) hide show
  1. {lollms_client-0.9.2 → lollms_client-0.10.0}/PKG-INFO +16 -12
  2. {lollms_client-0.9.2 → lollms_client-0.10.0}/README.md +15 -11
  3. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/__init__.py +1 -1
  4. lollms_client-0.10.0/lollms_client/llm_bindings/__init__.py +1 -0
  5. lollms_client-0.10.0/lollms_client/llm_bindings/lollms/__init__.py +301 -0
  6. lollms_client-0.10.0/lollms_client/llm_bindings/ollama/__init__.py +293 -0
  7. lollms_client-0.10.0/lollms_client/llm_bindings/openai/__init__.py +260 -0
  8. lollms_client-0.10.0/lollms_client/llm_bindings/transformers/__init__.py +281 -0
  9. lollms_client-0.10.0/lollms_client/lollms_core.py +1033 -0
  10. lollms_client-0.10.0/lollms_client/lollms_llm_binding.py +210 -0
  11. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_tasks.py +42 -109
  12. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_tts.py +7 -3
  13. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_types.py +19 -1
  14. lollms_client-0.10.0/lollms_client/stt_bindings/__init__.py +0 -0
  15. lollms_client-0.10.0/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  16. lollms_client-0.10.0/lollms_client/tti_bindings/__init__.py +0 -0
  17. lollms_client-0.10.0/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  18. lollms_client-0.10.0/lollms_client/tts_bindings/__init__.py +0 -0
  19. lollms_client-0.10.0/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  20. lollms_client-0.10.0/lollms_client/ttv_bindings/__init__.py +0 -0
  21. lollms_client-0.10.0/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  22. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client.egg-info/PKG-INFO +16 -12
  23. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client.egg-info/SOURCES.txt +15 -1
  24. {lollms_client-0.9.2 → lollms_client-0.10.0}/setup.py +1 -1
  25. lollms_client-0.9.2/lollms_client/lollms_core.py +0 -2106
  26. {lollms_client-0.9.2 → lollms_client-0.10.0}/LICENSE +0 -0
  27. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_config.py +0 -0
  28. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_discussion.py +0 -0
  29. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_functions.py +0 -0
  30. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_js_analyzer.py +0 -0
  31. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_personality.py +0 -0
  32. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_personality_worker.py +0 -0
  33. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_python_analyzer.py +0 -0
  34. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_stt.py +0 -0
  35. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_tti.py +0 -0
  36. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client/lollms_utilities.py +0 -0
  37. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client.egg-info/dependency_links.txt +0 -0
  38. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client.egg-info/requires.txt +0 -0
  39. {lollms_client-0.9.2 → lollms_client-0.10.0}/lollms_client.egg-info/top_level.txt +0 -0
  40. {lollms_client-0.9.2 → lollms_client-0.10.0}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lollms_client
3
- Version: 0.9.2
3
+ Version: 0.10.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Home-page: https://github.com/ParisNeo/lollms_client
6
6
  Author: ParisNeo
@@ -37,30 +37,34 @@ from lollms_client import LollmsClient
37
37
  lc = LollmsClient()
38
38
 
39
39
  # Specify a custom host and port
40
- lc = LollmsClient(host_address="http://some.server:9600")
41
-
42
- # Use a specific model with a local or remote ollama server
43
- from lollms_client import ELF_GENERATION_FORMAT
44
- lc = LollmsClient(model_name="phi4:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
45
-
46
- # Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
47
- lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
40
+ lc = LollmsClient(host_address="http://some.lollms.server:9600")
48
41
 
42
+ # Use a specific model with a local ollama server
43
+ lc = LollmsClient("ollama", model_name="phi4:latest")
49
44
  # Use a specific model with an Ollama binding on the server, with a context size of 32800
50
45
  lc = LollmsClient(
46
+ "ollama",
51
47
  host_address="http://some.other.server:11434",
52
48
  model_name="phi4:latest",
53
49
  ctx_size=32800,
54
- default_generation_mode=ELF_GENERATION_FORMAT.OLLAMA
55
50
  )
51
+ # Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
52
+ lc = LollmsClient("openai", model_name="gpt-3.5-turbo-0125", service_key="Key, or don't put anything if you have already an environment variable with these informations")
53
+
54
+ # Use a specific model with a other OpenAI compatible server
55
+ lc = LollmsClient("openai", host_address="http://some.other.server", model_name="gpt-3.5-turbo-0125")
56
56
  ```
57
57
 
58
58
  ### Text Generation
59
59
 
60
- Use `generate()` for generating text from the lollms API.
60
+ Use `generate_text()` for generating text from the lollms API.
61
61
 
62
62
  ```python
63
- response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
63
+ response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
64
+ print(response)
65
+ ```
66
+ ```python
67
+ response = lc.generate_text(prompt="Once upon a time", images= ["path to image1", "path to image 2"] stream=False, temperature=0.5)
64
68
  print(response)
65
69
  ```
66
70
 
@@ -23,30 +23,34 @@ from lollms_client import LollmsClient
23
23
  lc = LollmsClient()
24
24
 
25
25
  # Specify a custom host and port
26
- lc = LollmsClient(host_address="http://some.server:9600")
27
-
28
- # Use a specific model with a local or remote ollama server
29
- from lollms_client import ELF_GENERATION_FORMAT
30
- lc = LollmsClient(model_name="phi4:latest", default_generation_mode = ELF_GENERATION_FORMAT.OLLAMA)
31
-
32
- # Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
33
- lc = LollmsClient(model_name="gpt-3.5-turbo-0125", default_generation_mode = ELF_GENERATION_FORMAT.OPENAI)
26
+ lc = LollmsClient(host_address="http://some.lollms.server:9600")
34
27
 
28
+ # Use a specific model with a local ollama server
29
+ lc = LollmsClient("ollama", model_name="phi4:latest")
35
30
  # Use a specific model with an Ollama binding on the server, with a context size of 32800
36
31
  lc = LollmsClient(
32
+ "ollama",
37
33
  host_address="http://some.other.server:11434",
38
34
  model_name="phi4:latest",
39
35
  ctx_size=32800,
40
- default_generation_mode=ELF_GENERATION_FORMAT.OLLAMA
41
36
  )
37
+ # Use a specific model with a local or remote OpenAI server (you can either set your key as an environment variable or pass it here)
38
+ lc = LollmsClient("openai", model_name="gpt-3.5-turbo-0125", service_key="Key, or don't put anything if you have already an environment variable with these informations")
39
+
40
+ # Use a specific model with a other OpenAI compatible server
41
+ lc = LollmsClient("openai", host_address="http://some.other.server", model_name="gpt-3.5-turbo-0125")
42
42
  ```
43
43
 
44
44
  ### Text Generation
45
45
 
46
- Use `generate()` for generating text from the lollms API.
46
+ Use `generate_text()` for generating text from the lollms API.
47
47
 
48
48
  ```python
49
- response = lc.generate(prompt="Once upon a time", stream=False, temperature=0.5)
49
+ response = lc.generate_text(prompt="Once upon a time", stream=False, temperature=0.5)
50
+ print(response)
51
+ ```
52
+ ```python
53
+ response = lc.generate_text(prompt="Once upon a time", images= ["path to image1", "path to image 2"] stream=False, temperature=0.5)
50
54
  print(response)
51
55
  ```
52
56
 
@@ -1,4 +1,4 @@
1
- from lollms_client.lollms_core import LollmsClient, ELF_GENERATION_FORMAT, ELF_COMPLETION_FORMAT
1
+ from lollms_client.lollms_core import LollmsClient, ELF_COMPLETION_FORMAT
2
2
  from lollms_client.lollms_tasks import TasksLibrary
3
3
  from lollms_client.lollms_types import MSG_TYPE
4
4
  from lollms_client.lollms_personality import LollmsPersonality
@@ -0,0 +1 @@
1
+ # to be done
@@ -0,0 +1,301 @@
1
+ # bindings/lollms/binding.py
2
+ import requests
3
+ from lollms_client.lollms_llm_binding import LollmsLLMBinding
4
+ from lollms_client.lollms_types import MSG_TYPE
5
+ from lollms_client.lollms_utilities import encode_image
6
+ from lollms_client.lollms_types import ELF_COMPLETION_FORMAT
7
+ from ascii_colors import ASCIIColors, trace_exception
8
+ from typing import Optional, Callable, List, Union
9
+ import json
10
+
11
+ BindingName = "LollmsLLMBinding"
12
+
13
+ class LollmsLLMBinding(LollmsLLMBinding):
14
+ """LOLLMS-specific binding implementation"""
15
+
16
+ DEFAULT_HOST_ADDRESS = "http://localhost:9600"
17
+
18
+ def __init__(self,
19
+ host_address: str = None,
20
+ model_name: str = "",
21
+ service_key: str = None,
22
+ verify_ssl_certificate: bool = True,
23
+ personality: Optional[int] = None,
24
+ default_completion_format: ELF_COMPLETION_FORMAT = ELF_COMPLETION_FORMAT.Chat):
25
+ """
26
+ Initialize the LOLLMS binding.
27
+
28
+ Args:
29
+ host_address (str): Host address for the LOLLMS service. Defaults to DEFAULT_HOST_ADDRESS.
30
+ model_name (str): Name of the model to use. Defaults to empty string.
31
+ service_key (str): Authentication key for the service. Defaults to None.
32
+ verify_ssl_certificate (bool): Whether to verify SSL certificates. Defaults to True.
33
+ personality (Optional[int]): Personality ID for generation. Defaults to None.
34
+ """
35
+ super().__init__(
36
+ host_address=host_address if host_address is not None else self.DEFAULT_HOST_ADDRESS,
37
+ model_name=model_name,
38
+ service_key=service_key,
39
+ verify_ssl_certificate=verify_ssl_certificate,
40
+ default_completion_format=default_completion_format
41
+ )
42
+ self.personality = personality
43
+ self.model = None
44
+
45
+ def generate_text(self,
46
+ prompt: str,
47
+ images: Optional[List[str]] = None,
48
+ n_predict: Optional[int] = None,
49
+ stream: bool = False,
50
+ temperature: float = 0.1,
51
+ top_k: int = 50,
52
+ top_p: float = 0.95,
53
+ repeat_penalty: float = 0.8,
54
+ repeat_last_n: int = 40,
55
+ seed: Optional[int] = None,
56
+ n_threads: int = 8,
57
+ streaming_callback: Optional[Callable[[str, str], None]] = None) -> Union[str, dict]:
58
+ """
59
+ Generate text using the LOLLMS service, with optional image support.
60
+
61
+ Args:
62
+ prompt (str): The input prompt for text generation.
63
+ images (Optional[List[str]]): List of image file paths for multimodal generation.
64
+ If provided, uses the /lollms_generate_with_images endpoint.
65
+ n_predict (Optional[int]): Maximum number of tokens to generate.
66
+ stream (bool): Whether to stream the output. Defaults to False.
67
+ temperature (float): Sampling temperature. Defaults to 0.1.
68
+ top_k (int): Top-k sampling parameter. Defaults to 50.
69
+ top_p (float): Top-p sampling parameter. Defaults to 0.95.
70
+ repeat_penalty (float): Penalty for repeated tokens. Defaults to 0.8.
71
+ repeat_last_n (int): Number of previous tokens to consider for repeat penalty. Defaults to 40.
72
+ seed (Optional[int]): Random seed for generation.
73
+ n_threads (int): Number of threads to use. Defaults to 8.
74
+ streaming_callback (Optional[Callable[[str, str], None]]): Callback for streaming output.
75
+ - First parameter (str): The chunk of text received from the stream.
76
+ - Second parameter (str): The message type (typically MSG_TYPE.MSG_TYPE_CHUNK).
77
+
78
+ Returns:
79
+ Union[str, dict]: Generated text if successful, or a dictionary with status and error if failed.
80
+ """
81
+ # Determine endpoint based on presence of images
82
+ endpoint = "/lollms_generate_with_images" if images else "/lollms_generate"
83
+ url = f"{self.host_address}{endpoint}"
84
+
85
+ # Set headers
86
+ headers = {
87
+ 'Content-Type': 'application/json',
88
+ }
89
+ if self.service_key:
90
+ headers['Authorization'] = f'Bearer {self.service_key}'
91
+
92
+ # Handle images if provided
93
+ image_data = []
94
+ if images:
95
+ for image_path in images:
96
+ try:
97
+ encoded_image = encode_image(image_path)
98
+ image_data.append(encoded_image)
99
+ except Exception as e:
100
+ return {"status": False, "error": f"Failed to process image {image_path}: {str(e)}"}
101
+
102
+ # Prepare request data
103
+ data = {
104
+ "prompt": prompt,
105
+ "model_name": self.model_name,
106
+ "personality": self.personality,
107
+ "n_predict": n_predict,
108
+ "stream": stream,
109
+ "temperature": temperature,
110
+ "top_k": top_k,
111
+ "top_p": top_p,
112
+ "repeat_penalty": repeat_penalty,
113
+ "repeat_last_n": repeat_last_n,
114
+ "seed": seed,
115
+ "n_threads": n_threads
116
+ }
117
+
118
+ if image_data:
119
+ data["images"] = image_data
120
+
121
+ # Make the request
122
+ response = requests.post(
123
+ url,
124
+ json=data,
125
+ headers=headers,
126
+ stream=stream,
127
+ verify=self.verify_ssl_certificate
128
+ )
129
+
130
+ if not stream:
131
+ if response.status_code == 200:
132
+ try:
133
+ text = response.text.strip().rstrip('!')
134
+ return text
135
+ except Exception as ex:
136
+ return {"status": False, "error": str(ex)}
137
+ else:
138
+ return {"status": False, "error": response.text}
139
+ else:
140
+ text = ""
141
+ if response.status_code == 200:
142
+ try:
143
+ for line in response.iter_lines():
144
+ chunk = line.decode("utf-8")
145
+ text += chunk
146
+ if streaming_callback:
147
+ streaming_callback(chunk, MSG_TYPE.MSG_TYPE_CHUNK)
148
+ # Handle potential quotes from streaming response
149
+ if text and text[0] == '"':
150
+ text = text[1:]
151
+ if text and text[-1] == '"':
152
+ text = text[:-1]
153
+ return text.rstrip('!')
154
+ except Exception as ex:
155
+ return {"status": False, "error": str(ex)}
156
+ else:
157
+ return {"status": False, "error": response.text}
158
+
159
+ def tokenize(self, text: str) -> list:
160
+ """
161
+ Tokenize the input text into a list of tokens using the /lollms_tokenize endpoint.
162
+
163
+ Args:
164
+ text (str): The text to tokenize.
165
+
166
+ Returns:
167
+ list: List of tokens.
168
+ """
169
+ try:
170
+ # Prepare the request payload
171
+ payload = {
172
+ "prompt": text,
173
+ "return_named": False # Set to True if you want named tokens
174
+ }
175
+
176
+ # Make the POST request to the /lollms_tokenize endpoint
177
+ response = requests.post(f"{self.host_address}/lollms_tokenize", json=payload)
178
+
179
+ # Check if the request was successful
180
+ if response.status_code == 200:
181
+ return response.json()
182
+ else:
183
+ raise Exception(f"Failed to tokenize text: {response.text}")
184
+ except Exception as ex:
185
+ trace_exception(ex)
186
+ raise Exception(f"Failed to tokenize text: {response.text}")
187
+
188
+ def detokenize(self, tokens: list) -> str:
189
+ """
190
+ Convert a list of tokens back to text using the /lollms_detokenize endpoint.
191
+
192
+ Args:
193
+ tokens (list): List of tokens to detokenize.
194
+
195
+ Returns:
196
+ str: Detokenized text.
197
+ """
198
+ try:
199
+ # Prepare the request payload
200
+ payload = {
201
+ "tokens": tokens,
202
+ "return_named": False # Set to True if you want named tokens
203
+ }
204
+
205
+ # Make the POST request to the /lollms_detokenize endpoint
206
+ response = requests.post(f"{self.host_address}/lollms_detokenize", json=payload)
207
+
208
+ # Check if the request was successful
209
+ if response.status_code == 200:
210
+ return response.json()
211
+ else:
212
+ raise Exception(f"Failed to detokenize tokens: {response.text}")
213
+ except Exception as ex:
214
+ return {"status": False, "error": str(ex)}
215
+
216
+
217
+ def embed(self, text: str, **kwargs) -> list:
218
+ """
219
+ Get embeddings for the input text using Ollama API
220
+
221
+ Args:
222
+ text (str or List[str]): Input text to embed
223
+ **kwargs: Additional arguments like model, truncate, options, keep_alive
224
+
225
+ Returns:
226
+ dict: Response containing embeddings
227
+ """
228
+ api_key = kwargs.pop("api_key", None)
229
+ headers = (
230
+ {"Content-Type": "application/json", "Authorization": api_key}
231
+ if api_key
232
+ else {"Content-Type": "application/json"}
233
+ )
234
+ embeddings = []
235
+ request_data = {"text": text}
236
+ response = requests.post(f"{self.host_address}/lollms_embed", json=request_data, headers=headers)
237
+ response.raise_for_status()
238
+ result = response.json()
239
+ return result["vector"]
240
+
241
+ def get_model_info(self) -> dict:
242
+ """
243
+ Return information about the current LOLLMS model.
244
+
245
+ Returns:
246
+ dict: Dictionary containing model name, version, host address, and personality.
247
+ """
248
+ return {
249
+ "name": "lollms",
250
+ "version": "1.0",
251
+ "host_address": self.host_address,
252
+ "model_name": self.model_name,
253
+ "personality": self.personality
254
+ }
255
+
256
+
257
+ def listModels(self) -> dict:
258
+ """Lists models"""
259
+ url = f"{self.host_address}/list_models"
260
+
261
+ response = requests.get(url)
262
+
263
+ if response.status_code == 200:
264
+ try:
265
+ text = json.loads(response.content.decode("utf-8"))
266
+ return text
267
+ except Exception as ex:
268
+ return {"status": False, "error": str(ex)}
269
+ else:
270
+ return {"status": False, "error": response.text}
271
+
272
+
273
+ def load_model(self, model_name: str) -> bool:
274
+ """
275
+ Load a specific model into the LOLLMS binding.
276
+
277
+ Args:
278
+ model_name (str): Name of the model to load.
279
+
280
+ Returns:
281
+ bool: True if model loaded successfully.
282
+ """
283
+ self.model = model_name
284
+ self.model_name = model_name
285
+ return True
286
+
287
+ # Lollms specific methods
288
+ def lollms_listMountedPersonalities(self, host_address:str=None):
289
+ host_address = host_address if host_address else self.host_address
290
+ url = f"{host_address}/list_mounted_personalities"
291
+
292
+ response = requests.get(url)
293
+
294
+ if response.status_code == 200:
295
+ try:
296
+ text = json.loads(response.content.decode("utf-8"))
297
+ return text
298
+ except Exception as ex:
299
+ return {"status": False, "error": str(ex)}
300
+ else:
301
+ return {"status": False, "error": response.text}