lollms-client 1.4.1__py3-none-any.whl → 1.7.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/azure_openai/__init__.py +2 -2
- lollms_client/llm_bindings/claude/__init__.py +125 -34
- lollms_client/llm_bindings/gemini/__init__.py +261 -159
- lollms_client/llm_bindings/grok/__init__.py +52 -14
- lollms_client/llm_bindings/groq/__init__.py +2 -2
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +2 -2
- lollms_client/llm_bindings/litellm/__init__.py +1 -1
- lollms_client/llm_bindings/llamacpp/__init__.py +18 -11
- lollms_client/llm_bindings/lollms/__init__.py +151 -32
- lollms_client/llm_bindings/lollms_webui/__init__.py +1 -1
- lollms_client/llm_bindings/mistral/__init__.py +2 -2
- lollms_client/llm_bindings/novita_ai/__init__.py +439 -0
- lollms_client/llm_bindings/ollama/__init__.py +309 -93
- lollms_client/llm_bindings/open_router/__init__.py +2 -2
- lollms_client/llm_bindings/openai/__init__.py +148 -29
- lollms_client/llm_bindings/openllm/__init__.py +362 -506
- lollms_client/llm_bindings/openwebui/__init__.py +465 -0
- lollms_client/llm_bindings/perplexity/__init__.py +326 -0
- lollms_client/llm_bindings/pythonllamacpp/__init__.py +3 -3
- lollms_client/llm_bindings/tensor_rt/__init__.py +1 -1
- lollms_client/llm_bindings/transformers/__init__.py +428 -632
- lollms_client/llm_bindings/vllm/__init__.py +1 -1
- lollms_client/lollms_agentic.py +4 -2
- lollms_client/lollms_base_binding.py +61 -0
- lollms_client/lollms_core.py +516 -1890
- lollms_client/lollms_discussion.py +55 -18
- lollms_client/lollms_llm_binding.py +112 -261
- lollms_client/lollms_mcp_binding.py +34 -75
- lollms_client/lollms_personality.py +5 -2
- lollms_client/lollms_stt_binding.py +85 -52
- lollms_client/lollms_tti_binding.py +23 -37
- lollms_client/lollms_ttm_binding.py +24 -42
- lollms_client/lollms_tts_binding.py +28 -17
- lollms_client/lollms_ttv_binding.py +24 -42
- lollms_client/lollms_types.py +4 -2
- lollms_client/stt_bindings/whisper/__init__.py +108 -23
- lollms_client/stt_bindings/whispercpp/__init__.py +7 -1
- lollms_client/tti_bindings/diffusers/__init__.py +418 -810
- lollms_client/tti_bindings/diffusers/server/main.py +1051 -0
- lollms_client/tti_bindings/gemini/__init__.py +182 -239
- lollms_client/tti_bindings/leonardo_ai/__init__.py +127 -0
- lollms_client/tti_bindings/lollms/__init__.py +4 -1
- lollms_client/tti_bindings/novita_ai/__init__.py +105 -0
- lollms_client/tti_bindings/openai/__init__.py +10 -11
- lollms_client/tti_bindings/stability_ai/__init__.py +178 -0
- lollms_client/ttm_bindings/audiocraft/__init__.py +7 -12
- lollms_client/ttm_bindings/beatoven_ai/__init__.py +129 -0
- lollms_client/ttm_bindings/lollms/__init__.py +4 -17
- lollms_client/ttm_bindings/replicate/__init__.py +115 -0
- lollms_client/ttm_bindings/stability_ai/__init__.py +117 -0
- lollms_client/ttm_bindings/topmediai/__init__.py +96 -0
- lollms_client/tts_bindings/bark/__init__.py +7 -10
- lollms_client/tts_bindings/lollms/__init__.py +6 -1
- lollms_client/tts_bindings/piper_tts/__init__.py +8 -11
- lollms_client/tts_bindings/xtts/__init__.py +157 -74
- lollms_client/tts_bindings/xtts/server/main.py +241 -280
- {lollms_client-1.4.1.dist-info → lollms_client-1.7.10.dist-info}/METADATA +316 -6
- lollms_client-1.7.10.dist-info/RECORD +89 -0
- lollms_client/ttm_bindings/bark/__init__.py +0 -339
- lollms_client-1.4.1.dist-info/RECORD +0 -78
- {lollms_client-1.4.1.dist-info → lollms_client-1.7.10.dist-info}/WHEEL +0 -0
- {lollms_client-1.4.1.dist-info → lollms_client-1.7.10.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.4.1.dist-info → lollms_client-1.7.10.dist-info}/top_level.txt +0 -0
|
@@ -12,16 +12,73 @@ from typing import List, Dict
|
|
|
12
12
|
import math
|
|
13
13
|
import httpx
|
|
14
14
|
import pipmaster as pm
|
|
15
|
-
|
|
15
|
+
import mimetypes
|
|
16
16
|
pm.ensure_packages(["openai","tiktoken"])
|
|
17
17
|
|
|
18
18
|
import openai
|
|
19
19
|
import tiktoken
|
|
20
20
|
import os
|
|
21
|
-
|
|
21
|
+
import base64
|
|
22
22
|
BindingName = "OpenAIBinding"
|
|
23
23
|
|
|
24
24
|
|
|
25
|
+
def _read_file_as_base64(path):
|
|
26
|
+
with open(path, "rb") as f:
|
|
27
|
+
return base64.b64encode(f.read()).decode("utf-8")
|
|
28
|
+
|
|
29
|
+
def _extract_markdown_path(s):
|
|
30
|
+
s = s.strip()
|
|
31
|
+
if s.startswith("[") and s.endswith(")"):
|
|
32
|
+
lb, rb = s.find("["), s.find("]")
|
|
33
|
+
if lb != -1 and rb != -1 and rb > lb:
|
|
34
|
+
return s[lb+1:rb].strip()
|
|
35
|
+
return s
|
|
36
|
+
|
|
37
|
+
def _guess_mime_from_name(name, default="image/jpeg"):
|
|
38
|
+
mime, _ = mimetypes.guess_type(name)
|
|
39
|
+
return mime or default
|
|
40
|
+
|
|
41
|
+
def _to_data_url(b64_str, mime):
|
|
42
|
+
return f"data:{mime};base64,{b64_str}"
|
|
43
|
+
|
|
44
|
+
def normalize_image_input(img, default_mime="image/jpeg"):
|
|
45
|
+
"""
|
|
46
|
+
Returns a Responses API-ready content block:
|
|
47
|
+
{ "type": "input_image", "image_url": "data:<mime>;base64,<...>" }
|
|
48
|
+
Accepts:
|
|
49
|
+
- dict {'data': '<base64>', 'mime': 'image/png'}
|
|
50
|
+
- dict {'path': 'E:\\images\\x.png'}
|
|
51
|
+
- string raw base64
|
|
52
|
+
- string local path (Windows/POSIX), including markdown-like "[E:\\path\\img.png]()"
|
|
53
|
+
URLs are intentionally not supported (base64 only).
|
|
54
|
+
"""
|
|
55
|
+
if isinstance(img, dict):
|
|
56
|
+
if "data" in img and isinstance(img["data"], str):
|
|
57
|
+
mime = img.get("mime", default_mime)
|
|
58
|
+
return {"type": "input_image", "image_url": _to_data_url(img["data"], mime)}
|
|
59
|
+
if "path" in img and isinstance(img["path"], str):
|
|
60
|
+
p = _extract_markdown_path(img["path"])
|
|
61
|
+
b64 = _read_file_as_base64(p)
|
|
62
|
+
mime = _guess_mime_from_name(p, default_mime)
|
|
63
|
+
return {"type": "input_image", "image_url": _to_data_url(b64, mime)}
|
|
64
|
+
if "url" in img:
|
|
65
|
+
raise ValueError("URL inputs not allowed here; provide base64 or local path")
|
|
66
|
+
raise ValueError("Unsupported dict format for image input")
|
|
67
|
+
|
|
68
|
+
if isinstance(img, str):
|
|
69
|
+
s = _extract_markdown_path(img)
|
|
70
|
+
# Accept already-correct data URLs as-is
|
|
71
|
+
if s.startswith("data:"):
|
|
72
|
+
return {"type": "input_image", "image_url": s}
|
|
73
|
+
# Local path heuristics: exists on disk or looks like a path
|
|
74
|
+
if os.path.exists(s) or (":" in s and "\\" in s) or s.startswith("/") or s.startswith("."):
|
|
75
|
+
b64 = _read_file_as_base64(s)
|
|
76
|
+
mime = _guess_mime_from_name(s, default_mime)
|
|
77
|
+
return {"type": "input_image", "image_url": _to_data_url(b64, mime)}
|
|
78
|
+
# Otherwise, treat as raw base64 payload
|
|
79
|
+
return {"type": "input_image", "image_url": _to_data_url(s, default_mime)}
|
|
80
|
+
|
|
81
|
+
raise ValueError("Unsupported image input type")
|
|
25
82
|
class OpenAIBinding(LollmsLLMBinding):
|
|
26
83
|
"""OpenAI-specific binding implementation"""
|
|
27
84
|
|
|
@@ -123,17 +180,18 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
123
180
|
|
|
124
181
|
if images:
|
|
125
182
|
if split:
|
|
183
|
+
# Original call to split message roles
|
|
126
184
|
messages += self.split_discussion(prompt, user_keyword=user_keyword, ai_keyword=ai_keyword)
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
]
|
|
185
|
+
# Convert the last message content to the structured content array
|
|
186
|
+
last = messages[-1]
|
|
187
|
+
text_block = {"type": "text", "text": last["content"]}
|
|
188
|
+
image_blocks = [normalize_image_input(img) for img in images]
|
|
189
|
+
last["content"] = [text_block] + image_blocks
|
|
131
190
|
else:
|
|
132
191
|
messages.append({
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
for path in images
|
|
192
|
+
"role": "user",
|
|
193
|
+
"content": [{"type": "text", "text": prompt}] + [
|
|
194
|
+
normalize_image_input(img) for img in images
|
|
137
195
|
]
|
|
138
196
|
})
|
|
139
197
|
else:
|
|
@@ -242,10 +300,66 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
242
300
|
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
243
301
|
**kwargs
|
|
244
302
|
) -> Union[str, dict]:
|
|
245
|
-
|
|
303
|
+
|
|
304
|
+
# --- Standardize Messages for OpenAI ---
|
|
305
|
+
def normalize_message(msg: Dict) -> Dict:
|
|
306
|
+
role = msg.get("role", "user")
|
|
307
|
+
content = msg.get("content", "")
|
|
308
|
+
text_parts = []
|
|
309
|
+
images = []
|
|
310
|
+
|
|
311
|
+
# 1. Extract Text and Images from input
|
|
312
|
+
if isinstance(content, str):
|
|
313
|
+
text_parts.append(content)
|
|
314
|
+
elif isinstance(content, list):
|
|
315
|
+
for item in content:
|
|
316
|
+
if item.get("type") == "text":
|
|
317
|
+
text_parts.append(item.get("text", ""))
|
|
318
|
+
elif item.get("type") in ["input_image", "image_url"]:
|
|
319
|
+
# Handle various internal representations of images
|
|
320
|
+
val = item.get("image_url")
|
|
321
|
+
if isinstance(val, dict):
|
|
322
|
+
# Handle dicts like {"url": "..."} or {"base64": "..."}
|
|
323
|
+
val = val.get("url") or val.get("base64")
|
|
324
|
+
|
|
325
|
+
if isinstance(val, str) and val:
|
|
326
|
+
images.append(val)
|
|
327
|
+
|
|
328
|
+
text_content = "\n".join([p for p in text_parts if p.strip()])
|
|
329
|
+
|
|
330
|
+
# 2. Format for OpenAI API
|
|
331
|
+
if not images:
|
|
332
|
+
# Simple text-only message
|
|
333
|
+
return {"role": role, "content": text_content}
|
|
334
|
+
else:
|
|
335
|
+
# Multimodal message
|
|
336
|
+
openai_content = []
|
|
337
|
+
if text_content:
|
|
338
|
+
openai_content.append({"type": "text", "text": text_content})
|
|
339
|
+
|
|
340
|
+
for img in images:
|
|
341
|
+
# OpenAI STRICTLY requires the data URI prefix for base64
|
|
342
|
+
# or a valid http/https URL.
|
|
343
|
+
img_url = img
|
|
344
|
+
if not img.startswith("http"):
|
|
345
|
+
if not img.startswith("data:"):
|
|
346
|
+
# If raw base64 is passed without header, add default jpeg header
|
|
347
|
+
img_url = f"data:image/jpeg;base64,{img}"
|
|
348
|
+
|
|
349
|
+
openai_content.append({
|
|
350
|
+
"type": "image_url",
|
|
351
|
+
"image_url": {"url": img_url}
|
|
352
|
+
})
|
|
353
|
+
|
|
354
|
+
return {"role": role, "content": openai_content}
|
|
355
|
+
|
|
356
|
+
# Process and clean the list
|
|
357
|
+
openai_messages = [normalize_message(m) for m in messages]
|
|
358
|
+
|
|
359
|
+
# --- Build Request ---
|
|
246
360
|
params = {
|
|
247
361
|
"model": self.model_name,
|
|
248
|
-
"messages":
|
|
362
|
+
"messages": openai_messages, # Use the standardized list
|
|
249
363
|
"max_tokens": n_predict,
|
|
250
364
|
"n": 1,
|
|
251
365
|
"temperature": temperature,
|
|
@@ -253,34 +367,39 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
253
367
|
"frequency_penalty": repeat_penalty,
|
|
254
368
|
"stream": stream
|
|
255
369
|
}
|
|
256
|
-
|
|
370
|
+
|
|
371
|
+
# Add seed if available
|
|
257
372
|
if seed is not None:
|
|
258
373
|
params["seed"] = seed
|
|
259
374
|
|
|
260
|
-
# Remove None values
|
|
375
|
+
# Remove None values
|
|
261
376
|
params = {k: v for k, v in params.items() if v is not None}
|
|
262
377
|
|
|
263
378
|
output = ""
|
|
264
|
-
|
|
379
|
+
|
|
380
|
+
# --- Call API ---
|
|
265
381
|
try:
|
|
266
382
|
try:
|
|
267
383
|
completion = self.client.chat.completions.create(**params)
|
|
268
384
|
except Exception as ex:
|
|
269
|
-
#
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
try: del params["top_p"]
|
|
275
|
-
except Exception: pass
|
|
276
|
-
try: del params["frequency_penalty"]
|
|
277
|
-
except Exception: pass
|
|
278
|
-
|
|
385
|
+
# Fallback/Handling for 'reasoning' models (o1, o3-mini)
|
|
386
|
+
# which don't support max_tokens, temperature, etc.
|
|
387
|
+
if "max_tokens" in params:
|
|
388
|
+
params["max_completion_tokens"] = params["max_tokens"]
|
|
389
|
+
del params["max_tokens"]
|
|
279
390
|
|
|
391
|
+
# Set temperature to 1 (required for some o-series models) or remove it
|
|
392
|
+
params["temperature"] = 1
|
|
393
|
+
|
|
394
|
+
keys_to_remove = ["top_p", "frequency_penalty", "presence_penalty"]
|
|
395
|
+
for k in keys_to_remove:
|
|
396
|
+
if k in params:
|
|
397
|
+
del params[k]
|
|
398
|
+
|
|
280
399
|
completion = self.client.chat.completions.create(**params)
|
|
400
|
+
|
|
281
401
|
if stream:
|
|
282
402
|
for chunk in completion:
|
|
283
|
-
# The streaming response for chat has a different structure
|
|
284
403
|
delta = chunk.choices[0].delta
|
|
285
404
|
if delta.content:
|
|
286
405
|
word = delta.content
|
|
@@ -292,7 +411,6 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
292
411
|
output = completion.choices[0].message.content
|
|
293
412
|
|
|
294
413
|
except Exception as e:
|
|
295
|
-
# Handle API errors gracefully
|
|
296
414
|
error_message = f"An error occurred with the OpenAI API: {e}"
|
|
297
415
|
if streaming_callback:
|
|
298
416
|
streaming_callback(error_message, MSG_TYPE.MSG_TYPE_EXCEPTION)
|
|
@@ -313,7 +431,8 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
313
431
|
seed: Optional[int] = None,
|
|
314
432
|
n_threads: Optional[int] = None,
|
|
315
433
|
ctx_size: Optional[int] = None,
|
|
316
|
-
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None
|
|
434
|
+
streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
|
|
435
|
+
**kwargs
|
|
317
436
|
) -> Union[str, dict]:
|
|
318
437
|
|
|
319
438
|
messages = discussion.export("openai_chat", branch_tip_id)
|
|
@@ -590,7 +709,7 @@ class OpenAIBinding(LollmsLLMBinding):
|
|
|
590
709
|
"model_name": self.model_name
|
|
591
710
|
}
|
|
592
711
|
|
|
593
|
-
def
|
|
712
|
+
def list_models(self) -> List[Dict]:
|
|
594
713
|
# Known context lengths
|
|
595
714
|
known_context_lengths = {
|
|
596
715
|
"gpt-4o": 128000,
|