lollms-client 1.1.3__py3-none-any.whl → 1.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/lollms/__init__.py +2 -2
- lollms_client/llm_bindings/ollama/__init__.py +80 -23
- lollms_client/llm_bindings/openai/__init__.py +3 -3
- lollms_client/lollms_core.py +286 -132
- lollms_client/lollms_discussion.py +419 -147
- lollms_client/lollms_tti_binding.py +32 -82
- lollms_client/tti_bindings/diffusers/__init__.py +372 -315
- lollms_client/tti_bindings/openai/__init__.py +124 -0
- {lollms_client-1.1.3.dist-info → lollms_client-1.3.1.dist-info}/METADATA +1 -1
- {lollms_client-1.1.3.dist-info → lollms_client-1.3.1.dist-info}/RECORD +14 -14
- lollms_client/tti_bindings/dalle/__init__.py +0 -454
- {lollms_client-1.1.3.dist-info → lollms_client-1.3.1.dist-info}/WHEEL +0 -0
- {lollms_client-1.1.3.dist-info → lollms_client-1.3.1.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.1.3.dist-info → lollms_client-1.3.1.dist-info}/top_level.txt +0 -0
lollms_client/__init__.py
CHANGED
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "1.1
|
|
11
|
+
__version__ = "1.3.1" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
|
@@ -9,7 +9,7 @@ from lollms_client.lollms_discussion import LollmsDiscussion
|
|
|
9
9
|
from typing import Optional, Callable, List, Union
|
|
10
10
|
from ascii_colors import ASCIIColors, trace_exception
|
|
11
11
|
from typing import List, Dict
|
|
12
|
-
|
|
12
|
+
import httpx
|
|
13
13
|
import pipmaster as pm
|
|
14
14
|
|
|
15
15
|
pm.ensure_packages(["openai","tiktoken"])
|
|
@@ -49,7 +49,7 @@ class LollmsBinding(LollmsLLMBinding):
|
|
|
49
49
|
|
|
50
50
|
if not self.service_key:
|
|
51
51
|
self.service_key = os.getenv("LOLLMS_API_KEY", self.service_key)
|
|
52
|
-
self.client = openai.OpenAI(api_key=self.service_key, base_url=None if self.host_address is None else self.host_address if len(self.host_address)>0 else None)
|
|
52
|
+
self.client = openai.OpenAI(api_key=self.service_key, base_url=None if self.host_address is None else self.host_address if len(self.host_address)>0 else None, http_client=httpx.Client(verify=self.verify_ssl_certificate))
|
|
53
53
|
self.completion_format = ELF_COMPLETION_FORMAT.Chat
|
|
54
54
|
|
|
55
55
|
def lollms_listMountedPersonalities(self, host_address:str|None=None):
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# bindings/ollama/
|
|
1
|
+
# bindings/ollama/__init__.py
|
|
2
2
|
import requests
|
|
3
3
|
import json
|
|
4
4
|
from lollms_client.lollms_llm_binding import LollmsLLMBinding
|
|
@@ -13,7 +13,7 @@ from ascii_colors import ASCIIColors, trace_exception
|
|
|
13
13
|
import pipmaster as pm
|
|
14
14
|
from lollms_client.lollms_utilities import ImageTokenizer
|
|
15
15
|
pm.ensure_packages(["ollama","pillow","tiktoken"])
|
|
16
|
-
|
|
16
|
+
import re
|
|
17
17
|
|
|
18
18
|
import ollama
|
|
19
19
|
import tiktoken
|
|
@@ -256,22 +256,22 @@ class OllamaBinding(LollmsLLMBinding):
|
|
|
256
256
|
return {"status": False, "error": error_message}
|
|
257
257
|
|
|
258
258
|
def generate_from_messages(self,
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
264
|
-
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
259
|
+
messages: List[Dict],
|
|
260
|
+
n_predict: Optional[int] = None,
|
|
261
|
+
stream: Optional[bool] = None,
|
|
262
|
+
temperature: Optional[float] = None,
|
|
263
|
+
top_k: Optional[int] = None,
|
|
264
|
+
top_p: Optional[float] = None,
|
|
265
|
+
repeat_penalty: Optional[float] = None,
|
|
266
|
+
repeat_last_n: Optional[int] = None,
|
|
267
|
+
seed: Optional[int] = None,
|
|
268
|
+
n_threads: Optional[int] = None,
|
|
269
|
+
ctx_size: int | None = None,
|
|
270
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
271
|
+
**kwargs
|
|
272
|
+
) -> Union[str, dict]:
|
|
273
273
|
if not self.ollama_client:
|
|
274
|
-
|
|
274
|
+
return {"status": False, "error": "Ollama client not initialized."}
|
|
275
275
|
|
|
276
276
|
options = {}
|
|
277
277
|
if n_predict is not None: options['num_predict'] = n_predict
|
|
@@ -283,34 +283,91 @@ class OllamaBinding(LollmsLLMBinding):
|
|
|
283
283
|
if seed is not None: options['seed'] = seed
|
|
284
284
|
if n_threads is not None: options['num_thread'] = n_threads
|
|
285
285
|
if ctx_size is not None: options['num_ctx'] = ctx_size
|
|
286
|
-
|
|
286
|
+
|
|
287
|
+
def normalize_message(msg: Dict) -> Dict:
|
|
288
|
+
role = msg.get("role", "user")
|
|
289
|
+
content = msg.get("content", "")
|
|
290
|
+
text_parts = []
|
|
291
|
+
images = []
|
|
292
|
+
|
|
293
|
+
if isinstance(content, str):
|
|
294
|
+
text_parts.append(content)
|
|
295
|
+
elif isinstance(content, list):
|
|
296
|
+
for item in content:
|
|
297
|
+
if item.get("type") == "text":
|
|
298
|
+
text_parts.append(item.get("text", ""))
|
|
299
|
+
elif item.get("type") == "image_url":
|
|
300
|
+
base64_data = item.get("image_url", {}).get("base64")
|
|
301
|
+
url = item.get("image_url", {}).get("url")
|
|
302
|
+
if base64_data:
|
|
303
|
+
# ⚠️ remove prefix "data:image/...;base64,"
|
|
304
|
+
cleaned = re.sub(r"^data:image/[^;]+;base64,", "", base64_data)
|
|
305
|
+
images.append(cleaned)
|
|
306
|
+
elif url:
|
|
307
|
+
images.append(url)
|
|
308
|
+
|
|
309
|
+
return {
|
|
310
|
+
"role": role,
|
|
311
|
+
"content": "\n".join([p for p in text_parts if p.strip()]),
|
|
312
|
+
"images": images if images else None
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
ollama_messages = []
|
|
316
|
+
for m in messages:
|
|
317
|
+
nm = normalize_message(m)
|
|
318
|
+
if nm["images"]:
|
|
319
|
+
ollama_messages.append({
|
|
320
|
+
"role": nm["role"],
|
|
321
|
+
"content": nm["content"],
|
|
322
|
+
"images": nm["images"]
|
|
323
|
+
})
|
|
324
|
+
else:
|
|
325
|
+
ollama_messages.append({
|
|
326
|
+
"role": nm["role"],
|
|
327
|
+
"content": nm["content"]
|
|
328
|
+
})
|
|
329
|
+
|
|
287
330
|
full_response_text = ""
|
|
288
331
|
|
|
289
332
|
try:
|
|
290
333
|
if stream:
|
|
291
334
|
response_stream = self.ollama_client.chat(
|
|
292
335
|
model=self.model_name,
|
|
293
|
-
messages=
|
|
336
|
+
messages=ollama_messages,
|
|
294
337
|
stream=True,
|
|
295
338
|
options=options if options else None
|
|
296
339
|
)
|
|
297
340
|
for chunk_dict in response_stream:
|
|
298
341
|
chunk_content = chunk_dict.get('message', {}).get('content', '')
|
|
299
|
-
if chunk_content:
|
|
342
|
+
if chunk_content:
|
|
300
343
|
full_response_text += chunk_content
|
|
301
344
|
if streaming_callback:
|
|
302
345
|
if not streaming_callback(chunk_content, MSG_TYPE.MSG_TYPE_CHUNK):
|
|
303
|
-
break
|
|
346
|
+
break
|
|
304
347
|
return full_response_text
|
|
305
|
-
else:
|
|
348
|
+
else:
|
|
306
349
|
response_dict = self.ollama_client.chat(
|
|
307
350
|
model=self.model_name,
|
|
308
|
-
messages=
|
|
351
|
+
messages=ollama_messages,
|
|
309
352
|
stream=False,
|
|
310
353
|
options=options if options else None
|
|
311
354
|
)
|
|
312
355
|
return response_dict.get('message', {}).get('content', '')
|
|
313
356
|
|
|
357
|
+
except ollama.ResponseError as e:
|
|
358
|
+
error_message = f"Ollama API ResponseError: {e.error or 'Unknown error'} (status code: {e.status_code})"
|
|
359
|
+
ASCIIColors.error(error_message)
|
|
360
|
+
return {"status": False, "error": error_message, "status_code": e.status_code}
|
|
361
|
+
except ollama.RequestError as e:
|
|
362
|
+
error_message = f"Ollama API RequestError: {str(e)}"
|
|
363
|
+
ASCIIColors.error(error_message)
|
|
364
|
+
return {"status": False, "error": error_message}
|
|
365
|
+
except Exception as ex:
|
|
366
|
+
error_message = f"An unexpected error occurred: {str(ex)}"
|
|
367
|
+
trace_exception(ex)
|
|
368
|
+
return {"status": False, "error": error_message}
|
|
369
|
+
|
|
370
|
+
|
|
314
371
|
except ollama.ResponseError as e:
|
|
315
372
|
error_message = f"Ollama API ResponseError: {e.error or 'Unknown error'} (status code: {e.status_code})"
|
|
316
373
|
ASCIIColors.error(error_message)
|
|
@@ -10,7 +10,7 @@ from typing import Optional, Callable, List, Union
|
|
|
10
10
|
from ascii_colors import ASCIIColors, trace_exception
|
|
11
11
|
from typing import List, Dict
|
|
12
12
|
import math
|
|
13
|
-
|
|
13
|
+
import httpx
|
|
14
14
|
import pipmaster as pm
|
|
15
15
|
|
|
16
16
|
pm.ensure_packages(["openai","tiktoken"])
|
|
@@ -47,7 +47,7 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
47
47
|
|
|
48
48
|
if not self.service_key:
|
|
49
49
|
self.service_key = os.getenv("OPENAI_API_KEY", self.service_key)
|
|
50
|
-
self.client = openai.OpenAI(api_key=self.service_key, base_url=None if self.host_address is None else self.host_address if len(self.host_address)>0 else None)
|
|
50
|
+
self.client = openai.OpenAI(api_key=self.service_key, base_url=None if self.host_address is None else self.host_address if len(self.host_address)>0 else None, http_client=httpx.Client(verify=self.verify_ssl_certificate))
|
|
51
51
|
self.completion_format = ELF_COMPLETION_FORMAT.Chat
|
|
52
52
|
|
|
53
53
|
def _build_openai_params(self, messages: list, **kwargs) -> dict:
|
|
@@ -668,4 +668,4 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
668
668
|
"""
|
|
669
669
|
self.model = model_name
|
|
670
670
|
self.model_name = model_name
|
|
671
|
-
return True
|
|
671
|
+
return True
|