lollms-client 1.5.1__py3-none-any.whl → 1.5.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.5.1" # Updated version
11
+ __version__ = "1.5.2" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -0,0 +1,303 @@
1
+ import requests
2
+ import json
3
+ import base64
4
+ import os
5
+ import mimetypes
6
+ import math
7
+ from typing import Optional, Callable, List, Union, Dict
8
+
9
+ import httpx
10
+ import tiktoken
11
+ import pipmaster as pm
12
+
13
+ from lollms_client.lollms_llm_binding import LollmsLLMBinding
14
+ from lollms_client.lollms_types import MSG_TYPE, ELF_COMPLETION_FORMAT
15
+ from lollms_client.lollms_discussion import LollmsDiscussion
16
+ from lollms_client.lollms_utilities import encode_image
17
+ from ascii_colors import ASCIIColors, trace_exception
18
+
19
+ # Ensure required packages are installed
20
+ pm.ensure_packages(["httpx", "tiktoken"])
21
+
22
+ BindingName = "OpenWebUIBinding"
23
+
24
+
25
+ def _read_file_as_base64(path):
26
+ with open(path, "rb") as f:
27
+ return base64.b64encode(f.read()).decode("utf-8")
28
+
29
+ def _extract_markdown_path(s):
30
+ s = s.strip()
31
+ if s.startswith("[") and s.endswith(")"):
32
+ lb, rb = s.find("["), s.find("]")
33
+ if lb != -1 and rb != -1 and rb > lb:
34
+ return s[lb+1:rb].strip()
35
+ return s
36
+
37
+ def _guess_mime_from_name(name, default="image/jpeg"):
38
+ mime, _ = mimetypes.guess_type(name)
39
+ return mime or default
40
+
41
+ def _to_data_url(b64_str, mime):
42
+ return f"data:{mime};base64,{b64_str}"
43
+
44
+ def normalize_image_input(img, default_mime="image/jpeg"):
45
+ """
46
+ Returns an OpenAI API-ready content block for an image.
47
+ Accepts various input formats and converts them to a data URL.
48
+ """
49
+ if isinstance(img, str):
50
+ # Handle path-like strings or raw base64
51
+ s = _extract_markdown_path(img)
52
+ if os.path.exists(s):
53
+ b64 = _read_file_as_base64(s)
54
+ mime = _guess_mime_from_name(s, default_mime)
55
+ url = _to_data_url(b64, mime)
56
+ else: # Assume it's a base64 string
57
+ url = _to_data_url(s, default_mime)
58
+ return {"type": "image_url", "image_url": {"url": url}}
59
+
60
+ raise ValueError("Unsupported image input type for OpenWebUI")
61
+
62
+
63
+ class OpenWebUIBinding(LollmsLLMBinding):
64
+ """OpenWebUI-specific binding implementation"""
65
+
66
+ def __init__(self, **kwargs):
67
+ """
68
+ Initialize the OpenWebUI binding.
69
+
70
+ Args:
71
+ host_address (str): The URL of the OpenWebUI server (e.g., "http://localhost:8080").
72
+ model_name (str): Name of the model to use.
73
+ service_key (str): Authentication token for the service.
74
+ verify_ssl_certificate (bool): Whether to verify SSL certificates.
75
+ """
76
+ super().__init__(BindingName, **kwargs)
77
+ self.host_address = kwargs.get("host_address")
78
+ self.model_name = kwargs.get("model_name")
79
+ self.service_key = kwargs.get("service_key", os.getenv("OPENWEBUI_API_KEY"))
80
+ self.verify_ssl_certificate = kwargs.get("verify_ssl_certificate", True)
81
+
82
+ if not self.host_address:
83
+ raise ValueError("OpenWebUI host address is required.")
84
+ if not self.service_key:
85
+ ASCIIColors.warning("No service key provided for OpenWebUI. Requests may fail.")
86
+
87
+ headers = {
88
+ "Authorization": f"Bearer {self.service_key}",
89
+ "Content-Type": "application/json"
90
+ }
91
+
92
+ self.client = httpx.Client(
93
+ base_url=self.host_address,
94
+ headers=headers,
95
+ verify=self.verify_ssl_certificate,
96
+ timeout=None
97
+ )
98
+
99
+ def _build_request_params(self, messages: list, **kwargs) -> dict:
100
+ """Builds the request parameters for the OpenWebUI API."""
101
+ params = {
102
+ "model": kwargs.get("model", self.model_name),
103
+ "messages": messages,
104
+ "stream": kwargs.get("stream", True),
105
+ }
106
+
107
+ # Map Lollms parameters to OpenAI-compatible parameters
108
+ if "n_predict" in kwargs and kwargs["n_predict"] is not None:
109
+ params["max_tokens"] = kwargs["n_predict"]
110
+ if "temperature" in kwargs and kwargs["temperature"] is not None:
111
+ params["temperature"] = kwargs["temperature"]
112
+ if "top_p" in kwargs and kwargs["top_p"] is not None:
113
+ params["top_p"] = kwargs["top_p"]
114
+ if "top_k" in kwargs and kwargs["top_k"] is not None:
115
+ # Note: top_k is not standard in OpenAI API, but some backends might support it.
116
+ # We include it here for potential compatibility.
117
+ params["top_k"] = kwargs["top_k"]
118
+ if "repeat_penalty" in kwargs and kwargs["repeat_penalty"] is not None:
119
+ params["frequency_penalty"] = kwargs["repeat_penalty"]
120
+ if "seed" in kwargs and kwargs["seed"] is not None:
121
+ params["seed"] = kwargs["seed"]
122
+
123
+ return params
124
+
125
+ def generate_text(self,
126
+ prompt: str,
127
+ images: Optional[List[str]] = None,
128
+ system_prompt: str = "",
129
+ n_predict: Optional[int] = None,
130
+ stream: Optional[bool] = None,
131
+ temperature: float = 0.7,
132
+ top_k: int = 40,
133
+ top_p: float = 0.9,
134
+ repeat_penalty: float = 1.1,
135
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
136
+ **kwargs
137
+ ) -> Union[str, dict]:
138
+
139
+ messages = []
140
+ if system_prompt:
141
+ messages.append({"role": "system", "content": system_prompt})
142
+
143
+ user_content = [{"type": "text", "text": prompt}]
144
+ if images:
145
+ for img in images:
146
+ user_content.append(normalize_image_input(img))
147
+
148
+ messages.append({"role": "user", "content": user_content})
149
+
150
+ params = self._build_request_params(
151
+ messages=messages,
152
+ n_predict=n_predict,
153
+ stream=stream,
154
+ temperature=temperature,
155
+ top_k=top_k,
156
+ top_p=top_p,
157
+ repeat_penalty=repeat_penalty,
158
+ **kwargs
159
+ )
160
+
161
+ return self._process_request(params, stream, streaming_callback)
162
+
163
+
164
+ def generate_from_messages(self,
165
+ messages: List[Dict],
166
+ n_predict: Optional[int] = None,
167
+ stream: Optional[bool] = None,
168
+ temperature: Optional[float] = None,
169
+ top_k: Optional[int] = None,
170
+ top_p: Optional[float] = None,
171
+ repeat_penalty: Optional[float] = None,
172
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
173
+ **kwargs
174
+ ) -> Union[str, dict]:
175
+
176
+ params = self._build_request_params(
177
+ messages=messages,
178
+ n_predict=n_predict,
179
+ stream=stream,
180
+ temperature=temperature,
181
+ top_k=top_k,
182
+ top_p=top_p,
183
+ repeat_penalty=repeat_penalty,
184
+ **kwargs
185
+ )
186
+
187
+ return self._process_request(params, stream, streaming_callback)
188
+
189
+ def _process_request(self, params, stream, streaming_callback):
190
+ """Helper to process streaming or non-streaming API calls."""
191
+ output = ""
192
+ try:
193
+ if stream:
194
+ with self.client.stream("POST", "/api/chat/completions", json=params) as response:
195
+ if response.status_code != 200:
196
+ error_content = response.read().decode('utf-8')
197
+ raise Exception(f"API Error: {response.status_code} - {error_content}")
198
+
199
+ for line in response.iter_lines():
200
+ if line.startswith("data:"):
201
+ data_str = line[len("data:"):].strip()
202
+ if data_str == "[DONE]":
203
+ break
204
+ try:
205
+ chunk = json.loads(data_str)
206
+ if chunk["choices"]:
207
+ delta = chunk["choices"][0].get("delta", {})
208
+ word = delta.get("content", "")
209
+ if word:
210
+ if streaming_callback:
211
+ if not streaming_callback(word, MSG_TYPE.MSG_TYPE_CHUNK):
212
+ break
213
+ output += word
214
+ except json.JSONDecodeError:
215
+ continue # Ignore malformed SSE lines
216
+ else:
217
+ response = self.client.post("/api/chat/completions", json=params)
218
+ if response.status_code != 200:
219
+ raise Exception(f"API Error: {response.status_code} - {response.text}")
220
+
221
+ data = response.json()
222
+ output = data["choices"][0]["message"]["content"]
223
+ if streaming_callback:
224
+ streaming_callback(output, MSG_TYPE.MSG_TYPE_CHUNK)
225
+
226
+ except Exception as e:
227
+ trace_exception(e)
228
+ err_msg = f"An error occurred with the OpenWebUI API: {e}"
229
+ if streaming_callback:
230
+ streaming_callback(err_msg, MSG_TYPE.MSG_TYPE_EXCEPTION)
231
+ return {"status": "error", "message": err_msg}
232
+
233
+ return output
234
+
235
+ def listModels(self) -> List[Dict]:
236
+ models_info = []
237
+ try:
238
+ response = self.client.get("/api/models")
239
+ response.raise_for_status()
240
+ models_data = response.json().get("data", [])
241
+
242
+ for model in models_data:
243
+ models_info.append({
244
+ "model_name": model.get("id", "N/A"),
245
+ "owned_by": model.get("details", {}).get("family", "N/A"),
246
+ "created": model.get("modified_at", "N/A"),
247
+ # Assuming context length might be in details, though not guaranteed
248
+ "context_length": model.get("details", {}).get("parameter_size", "unknown"),
249
+ })
250
+ except Exception as e:
251
+ ASCIIColors.error(f"Failed to list models from OpenWebUI: {e}")
252
+ return models_info
253
+
254
+ def _get_encoding(self, model_name: str | None = None):
255
+ """Uses tiktoken as a general-purpose tokenizer."""
256
+ try:
257
+ return tiktoken.encoding_for_model(model_name or self.model_name)
258
+ except KeyError:
259
+ return tiktoken.get_encoding("cl100k_base")
260
+
261
+ def tokenize(self, text: str) -> list[int]:
262
+ encoding = self._get_encoding()
263
+ return encoding.encode(text)
264
+
265
+ def detokenize(self, tokens: list[int]) -> str:
266
+ encoding = self._get_encoding()
267
+ return encoding.decode(tokens)
268
+
269
+ def count_tokens(self, text: str) -> int:
270
+ return len(self.tokenize(text))
271
+
272
+ def get_input_tokens_price(self, model_name: str | None = None) -> float:
273
+ return 0.0
274
+
275
+ def get_output_tokens_price(self, model_name: str | None = None) -> float:
276
+ return 0.0
277
+
278
+ def embed(self, text: str | list[str], **kwargs) -> list:
279
+ """Get embeddings using Ollama's passthrough endpoint."""
280
+ embedding_model = kwargs.get("model", self.model_name)
281
+ is_single_input = isinstance(text, str)
282
+ input_texts = [text] if is_single_input else text
283
+ embeddings = []
284
+
285
+ try:
286
+ for t in input_texts:
287
+ payload = {"model": embedding_model, "prompt": t}
288
+ response = self.client.post("/ollama/api/embeddings", json=payload)
289
+ response.raise_for_status()
290
+ embedding_data = response.json().get("embedding")
291
+ if embedding_data:
292
+ embeddings.append(embedding_data)
293
+
294
+ return embeddings[0] if is_single_input and embeddings else embeddings
295
+
296
+ except Exception as e:
297
+ ASCIIColors.error(f"Failed to generate embeddings using model '{embedding_model}': {e}")
298
+ trace_exception(e)
299
+ return []
300
+
301
+ def load_model(self, model_name: str) -> bool:
302
+ self.model_name = model_name
303
+ return True
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.5.1
3
+ Version: 1.5.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache License
@@ -1,4 +1,4 @@
1
- lollms_client/__init__.py,sha256=aR8uhZNknaO5PnB08_UPdEZJ-tJFt7CeroT26fyL-tU,1146
1
+ lollms_client/__init__.py,sha256=FKaQDL1Mladbg-jwPC4OqIssljDSifJoz6QXGimY1XI,1146
2
2
  lollms_client/lollms_agentic.py,sha256=pQiMEuB_XkG29-SW6u4KTaMFPr6eKqacInggcCuCW3k,13914
3
3
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
4
4
  lollms_client/lollms_core.py,sha256=0FavCKvsoI1_feFCoW-lbe8CWvm1z8i-3t6uWozuhpo,315238
@@ -34,6 +34,7 @@ lollms_client/llm_bindings/ollama/__init__.py,sha256=3cJra6K-r4ISPOC1VBnfSpMX6ar
34
34
  lollms_client/llm_bindings/open_router/__init__.py,sha256=cAFWtCWJx0WjIe1w2JReCf6WlAZjrXYA4jZ8l3zqxMs,14915
35
35
  lollms_client/llm_bindings/openai/__init__.py,sha256=J_1OI4TGWgAPwOIjrki1TOGePVLHZ1tYP-nKQFZNLIk,28734
36
36
  lollms_client/llm_bindings/openllm/__init__.py,sha256=RC9dVeopslS-zXTsSJ7VC4iVsKgZCBwfmccmr_LCHA0,29971
37
+ lollms_client/llm_bindings/openwebui/__init__.py,sha256=I92EB_7I6lnI5xJ3_9KsTSu4a4qjAPnvZ6OB01074Zo,12471
37
38
  lollms_client/llm_bindings/perplexity/__init__.py,sha256=lMRPdbVbGX_weByAdcsZakdxDg7nFF3uCbdzakQmBOc,15006
38
39
  lollms_client/llm_bindings/pythonllamacpp/__init__.py,sha256=ZTuVa5ngu9GPVImjs_g8ArV7Bx7a1Rze518Tz8AFJ3U,31807
39
40
  lollms_client/llm_bindings/tensor_rt/__init__.py,sha256=xiT-JAyNI_jo6CE0nle9Xoc7U8-UHAfEHrnCwmDTiOE,32023
@@ -79,8 +80,8 @@ lollms_client/tts_bindings/xtts/server/main.py,sha256=T-Kn5NM-u1FJMygeV8rOoZKlqn
79
80
  lollms_client/tts_bindings/xtts/server/setup_voices.py,sha256=UdHaPa5aNcw8dR-aRGkZr2OfSFFejH79lXgfwT0P3ss,1964
80
81
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
81
82
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
- lollms_client-1.5.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
83
- lollms_client-1.5.1.dist-info/METADATA,sha256=Zdwrec5DA_TDhKSsDk-QwOuUHK3_EPkLq8B3F1hDKlE,71854
84
- lollms_client-1.5.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
85
- lollms_client-1.5.1.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
86
- lollms_client-1.5.1.dist-info/RECORD,,
83
+ lollms_client-1.5.2.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
84
+ lollms_client-1.5.2.dist-info/METADATA,sha256=7ankzHLrdEnkmX77wozSeJ_RRGaZGzlzXLhWuNBMsaQ,71854
85
+ lollms_client-1.5.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
86
+ lollms_client-1.5.2.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
87
+ lollms_client-1.5.2.dist-info/RECORD,,