lollms-client 1.4.8__py3-none-any.whl → 1.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.4.8" # Updated version
11
+ __version__ = "1.5.0" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -11,6 +11,8 @@ from ascii_colors import ASCIIColors, trace_exception
11
11
  from typing import List, Dict
12
12
  import httpx
13
13
  import pipmaster as pm
14
+ import mimetypes
15
+ import base64
14
16
 
15
17
  pm.ensure_packages(["openai","tiktoken"])
16
18
 
@@ -20,7 +22,63 @@ import os
20
22
 
21
23
  BindingName = "LollmsBinding"
22
24
 
23
-
25
+ def _read_file_as_base64(path):
26
+ with open(path, "rb") as f:
27
+ return base64.b64encode(f.read()).decode("utf-8")
28
+
29
+ def _extract_markdown_path(s):
30
+ s = s.strip()
31
+ if s.startswith("[") and s.endswith(")"):
32
+ lb, rb = s.find("["), s.find("]")
33
+ if lb != -1 and rb != -1 and rb > lb:
34
+ return s[lb+1:rb].strip()
35
+ return s
36
+
37
+ def _guess_mime_from_name(name, default="image/jpeg"):
38
+ mime, _ = mimetypes.guess_type(name)
39
+ return mime or default
40
+
41
+ def _to_data_url(b64_str, mime):
42
+ return f"data:{mime};base64,{b64_str}"
43
+
44
+ def normalize_image_input(img, default_mime="image/jpeg"):
45
+ """
46
+ Returns a Responses API-ready content block:
47
+ { "type": "input_image", "image_url": "data:<mime>;base64,<...>" }
48
+ Accepts:
49
+ - dict {'data': '<base64>', 'mime': 'image/png'}
50
+ - dict {'path': 'E:\\images\\x.png'}
51
+ - string raw base64
52
+ - string local path (Windows/POSIX), including markdown-like "[E:\\path\\img.png]()"
53
+ URLs are intentionally not supported (base64 only).
54
+ """
55
+ if isinstance(img, dict):
56
+ if "data" in img and isinstance(img["data"], str):
57
+ mime = img.get("mime", default_mime)
58
+ return {"type": "input_image", "image_url": _to_data_url(img["data"], mime)}
59
+ if "path" in img and isinstance(img["path"], str):
60
+ p = _extract_markdown_path(img["path"])
61
+ b64 = _read_file_as_base64(p)
62
+ mime = _guess_mime_from_name(p, default_mime)
63
+ return {"type": "input_image", "image_url": _to_data_url(b64, mime)}
64
+ if "url" in img:
65
+ raise ValueError("URL inputs not allowed here; provide base64 or local path")
66
+ raise ValueError("Unsupported dict format for image input")
67
+
68
+ if isinstance(img, str):
69
+ s = _extract_markdown_path(img)
70
+ # Accept already-correct data URLs as-is
71
+ if s.startswith("data:"):
72
+ return {"type": "input_image", "image_url": s}
73
+ # Local path heuristics: exists on disk or looks like a path
74
+ if os.path.exists(s) or (":" in s and "\\" in s) or s.startswith("/") or s.startswith("."):
75
+ b64 = _read_file_as_base64(s)
76
+ mime = _guess_mime_from_name(s, default_mime)
77
+ return {"type": "input_image", "image_url": _to_data_url(b64, mime)}
78
+ # Otherwise, treat as raw base64 payload
79
+ return {"type": "input_image", "image_url": _to_data_url(s, default_mime)}
80
+
81
+ raise ValueError("Unsupported image input type")
24
82
  class LollmsBinding(LollmsLLMBinding):
25
83
  """Lollms-specific binding implementation (open ai compatible with some extra parameters)"""
26
84
 
@@ -54,7 +112,7 @@ class LollmsBinding(LollmsLLMBinding):
54
112
 
55
113
  def lollms_listMountedPersonalities(self, host_address:str|None=None):
56
114
  host_address = host_address if host_address else self.host_address
57
- url = f"{host_address}/list_mounted_personalities"
115
+ url = f"{host_address}/personalities"
58
116
 
59
117
  response = requests.get(url)
60
118
 
@@ -136,17 +194,18 @@ class LollmsBinding(LollmsLLMBinding):
136
194
 
137
195
  if images:
138
196
  if split:
197
+ # Original call to split message roles
139
198
  messages += self.split_discussion(prompt, user_keyword=user_keyword, ai_keyword=ai_keyword)
140
- messages[-1]["content"] = [{"type": "text", "text": messages[-1]["content"]}] + [
141
- {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encode_image(path)}"}}
142
- for path in images
143
- ]
199
+ # Convert the last message content to the structured content array
200
+ last = messages[-1]
201
+ text_block = {"type": "text", "text": last["content"]}
202
+ image_blocks = [normalize_image_input(img) for img in images]
203
+ last["content"] = [text_block] + image_blocks
144
204
  else:
145
205
  messages.append({
146
- 'role': 'user',
147
- 'content': [{"type": "text", "text": prompt}] + [
148
- {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encode_image(path)}"}}
149
- for path in images
206
+ "role": "user",
207
+ "content": [{"type": "text", "text": prompt}] + [
208
+ normalize_image_input(img) for img in images
150
209
  ]
151
210
  })
152
211
  else:
@@ -296,15 +296,13 @@ class OllamaBinding(LollmsLLMBinding):
296
296
  for item in content:
297
297
  if item.get("type") == "text":
298
298
  text_parts.append(item.get("text", ""))
299
- elif item.get("type") == "image_url":
300
- base64_data = item.get("image_url", {}).get("base64")
301
- url = item.get("image_url", {}).get("url")
299
+ elif item.get("type") == "input_image":
300
+ base64_data = item.get("image_url")
302
301
  if base64_data:
303
302
  # ⚠️ remove prefix "data:image/...;base64,"
304
303
  cleaned = re.sub(r"^data:image/[^;]+;base64,", "", base64_data)
305
304
  images.append(cleaned)
306
- elif url:
307
- images.append(url)
305
+
308
306
 
309
307
  return {
310
308
  "role": role,
@@ -12,7 +12,7 @@ from typing import List, Dict
12
12
  import math
13
13
  import httpx
14
14
  import pipmaster as pm
15
-
15
+ import mimetypes
16
16
  pm.ensure_packages(["openai","tiktoken"])
17
17
 
18
18
  import openai
@@ -22,6 +22,63 @@ import os
22
22
  BindingName = "OpenAIBinding"
23
23
 
24
24
 
25
+ def _read_file_as_base64(path):
26
+ with open(path, "rb") as f:
27
+ return base64.b64encode(f.read()).decode("utf-8")
28
+
29
+ def _extract_markdown_path(s):
30
+ s = s.strip()
31
+ if s.startswith("[") and s.endswith(")"):
32
+ lb, rb = s.find("["), s.find("]")
33
+ if lb != -1 and rb != -1 and rb > lb:
34
+ return s[lb+1:rb].strip()
35
+ return s
36
+
37
+ def _guess_mime_from_name(name, default="image/jpeg"):
38
+ mime, _ = mimetypes.guess_type(name)
39
+ return mime or default
40
+
41
+ def _to_data_url(b64_str, mime):
42
+ return f"data:{mime};base64,{b64_str}"
43
+
44
+ def normalize_image_input(img, default_mime="image/jpeg"):
45
+ """
46
+ Returns a Responses API-ready content block:
47
+ { "type": "input_image", "image_url": "data:<mime>;base64,<...>" }
48
+ Accepts:
49
+ - dict {'data': '<base64>', 'mime': 'image/png'}
50
+ - dict {'path': 'E:\\images\\x.png'}
51
+ - string raw base64
52
+ - string local path (Windows/POSIX), including markdown-like "[E:\\path\\img.png]()"
53
+ URLs are intentionally not supported (base64 only).
54
+ """
55
+ if isinstance(img, dict):
56
+ if "data" in img and isinstance(img["data"], str):
57
+ mime = img.get("mime", default_mime)
58
+ return {"type": "input_image", "image_url": _to_data_url(img["data"], mime)}
59
+ if "path" in img and isinstance(img["path"], str):
60
+ p = _extract_markdown_path(img["path"])
61
+ b64 = _read_file_as_base64(p)
62
+ mime = _guess_mime_from_name(p, default_mime)
63
+ return {"type": "input_image", "image_url": _to_data_url(b64, mime)}
64
+ if "url" in img:
65
+ raise ValueError("URL inputs not allowed here; provide base64 or local path")
66
+ raise ValueError("Unsupported dict format for image input")
67
+
68
+ if isinstance(img, str):
69
+ s = _extract_markdown_path(img)
70
+ # Accept already-correct data URLs as-is
71
+ if s.startswith("data:"):
72
+ return {"type": "input_image", "image_url": s}
73
+ # Local path heuristics: exists on disk or looks like a path
74
+ if os.path.exists(s) or (":" in s and "\\" in s) or s.startswith("/") or s.startswith("."):
75
+ b64 = _read_file_as_base64(s)
76
+ mime = _guess_mime_from_name(s, default_mime)
77
+ return {"type": "input_image", "image_url": _to_data_url(b64, mime)}
78
+ # Otherwise, treat as raw base64 payload
79
+ return {"type": "input_image", "image_url": _to_data_url(s, default_mime)}
80
+
81
+ raise ValueError("Unsupported image input type")
25
82
  class OpenAIBinding(LollmsLLMBinding):
26
83
  """OpenAI-specific binding implementation"""
27
84
 
@@ -123,17 +180,18 @@ class OpenAIBinding(LollmsLLMBinding):
123
180
 
124
181
  if images:
125
182
  if split:
183
+ # Original call to split message roles
126
184
  messages += self.split_discussion(prompt, user_keyword=user_keyword, ai_keyword=ai_keyword)
127
- messages[-1]["content"] = [{"type": "text", "text": messages[-1]["content"]}] + [
128
- {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encode_image(path)}"}}
129
- for path in images
130
- ]
185
+ # Convert the last message content to the structured content array
186
+ last = messages[-1]
187
+ text_block = {"type": "text", "text": last["content"]}
188
+ image_blocks = [normalize_image_input(img) for img in images]
189
+ last["content"] = [text_block] + image_blocks
131
190
  else:
132
191
  messages.append({
133
- 'role': 'user',
134
- 'content': [{"type": "text", "text": prompt}] + [
135
- {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encode_image(path)}"}}
136
- for path in images
192
+ "role": "user",
193
+ "content": [{"type": "text", "text": prompt}] + [
194
+ normalize_image_input(img) for img in images
137
195
  ]
138
196
  })
139
197
  else:
@@ -5845,8 +5845,11 @@ Provide the final aggregated answer in {output_format} format, directly addressi
5845
5845
  MSG_TYPE.MSG_TYPE_STEP_START,
5846
5846
  {"id": f"chunk_{i+1}", "progress": progress_before}
5847
5847
  )
5848
-
5849
- prompt = chunk_summary_prompt_template.format(chunk_text=chunk)
5848
+ try:
5849
+ prompt = chunk_summary_prompt_template.format(chunk_text=chunk)
5850
+ except Exception as ex:
5851
+ ASCIIColors.warning(ex)
5852
+ prompt = chunk_summary_prompt_template.replace("{chunk_text}", chunk)
5850
5853
  processed_system_prompt = system_prompt.format(chunk_id=i,scratchpad="\n\n---\n\n".join(chunk_summaries))
5851
5854
  try:
5852
5855
  # Generate summary for the current chunk
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.4.8
3
+ Version: 1.5.0
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache License
@@ -1,7 +1,7 @@
1
- lollms_client/__init__.py,sha256=aXDoiWyHhxA9qZ2dawSv9lH1_mMWQ8AwV--DRVw2u7w,1146
1
+ lollms_client/__init__.py,sha256=JKweJeRzcBSH1iv1daEcmmdNuwUmCIdGk2TrQD55AZk,1146
2
2
  lollms_client/lollms_agentic.py,sha256=pQiMEuB_XkG29-SW6u4KTaMFPr6eKqacInggcCuCW3k,13914
3
3
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
4
- lollms_client/lollms_core.py,sha256=aCEoxmEF6ZmkBgJgZd74lKkM4A3PVVyt2IwMvLfScWw,315053
4
+ lollms_client/lollms_core.py,sha256=0FavCKvsoI1_feFCoW-lbe8CWvm1z8i-3t6uWozuhpo,315238
5
5
  lollms_client/lollms_discussion.py,sha256=ON29_FLV4toXSz8YTqNwM9_5bcNBg1XKTIeo_44rmgc,123245
6
6
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
7
7
  lollms_client/lollms_llm_binding.py,sha256=Dj1PI2bQBYv_JgPxCIaIC7DMUvWdFJGwXFdsP5hdGBg,25014
@@ -26,13 +26,13 @@ lollms_client/llm_bindings/groq/__init__.py,sha256=EGrMh9vuCoM4pskDw8ydfsAWYgEb4
26
26
  lollms_client/llm_bindings/hugging_face_inference_api/__init__.py,sha256=SFcj5XQTDmN9eR4of82IgQa9iRYZaGlF6rMlF5S5wWg,13938
27
27
  lollms_client/llm_bindings/litellm/__init__.py,sha256=lRH4VfZMUG5JCCj6a7hk2PTfSyDowAu-ujLOM-XPl-8,12756
28
28
  lollms_client/llm_bindings/llamacpp/__init__.py,sha256=llPF85AzYgMp7Cpo_4OvEHKlxIAgI6F95NB3SqskD9E,62480
29
- lollms_client/llm_bindings/lollms/__init__.py,sha256=XFQKtTJnkW8OwF1IoyzHqAZ8JAJ0PnAUKDdeOLGcbrE,24310
29
+ lollms_client/llm_bindings/lollms/__init__.py,sha256=S2ycEQORSxjy4J4S-FAoMWXjmvaQNvS-3ExeTU0DrTs,26904
30
30
  lollms_client/llm_bindings/lollms_webui/__init__.py,sha256=iuDfhZZoLC-PDEPLHrcjk5-962S5c7OeCI7PMdJxI_A,17753
31
31
  lollms_client/llm_bindings/mistral/__init__.py,sha256=cddz9xIj8NRFLKHe2JMxzstpUrNIu5s9juci3mhiHfo,14133
32
32
  lollms_client/llm_bindings/novita_ai/__init__.py,sha256=NOg6_NBCxuz9gwrijTCzrp9a78AbmBdT4k67baCTtuc,13877
33
- lollms_client/llm_bindings/ollama/__init__.py,sha256=a6cgzXPuo8ZLhIZHJFy8QF0n5ZTk0X4OC1JSyXG1enk,46013
33
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=3cJra6K-r4ISPOC1VBnfSpMX6arNxSFUzEu10eO4LQc,45848
34
34
  lollms_client/llm_bindings/open_router/__init__.py,sha256=cAFWtCWJx0WjIe1w2JReCf6WlAZjrXYA4jZ8l3zqxMs,14915
35
- lollms_client/llm_bindings/openai/__init__.py,sha256=ElLbtHLwR61Uj3W6G4g6QIhxtCqUGOCQBYwhQyN60us,26142
35
+ lollms_client/llm_bindings/openai/__init__.py,sha256=J_1OI4TGWgAPwOIjrki1TOGePVLHZ1tYP-nKQFZNLIk,28734
36
36
  lollms_client/llm_bindings/openllm/__init__.py,sha256=RC9dVeopslS-zXTsSJ7VC4iVsKgZCBwfmccmr_LCHA0,29971
37
37
  lollms_client/llm_bindings/perplexity/__init__.py,sha256=lMRPdbVbGX_weByAdcsZakdxDg7nFF3uCbdzakQmBOc,15006
38
38
  lollms_client/llm_bindings/pythonllamacpp/__init__.py,sha256=ZTuVa5ngu9GPVImjs_g8ArV7Bx7a1Rze518Tz8AFJ3U,31807
@@ -79,8 +79,8 @@ lollms_client/tts_bindings/xtts/server/main.py,sha256=T-Kn5NM-u1FJMygeV8rOoZKlqn
79
79
  lollms_client/tts_bindings/xtts/server/setup_voices.py,sha256=UdHaPa5aNcw8dR-aRGkZr2OfSFFejH79lXgfwT0P3ss,1964
80
80
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
81
81
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
82
- lollms_client-1.4.8.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
83
- lollms_client-1.4.8.dist-info/METADATA,sha256=IXy2ED-SFtzaVkrR2FDNZSNXZW1KfaPakP3RGz02TPM,71854
84
- lollms_client-1.4.8.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
85
- lollms_client-1.4.8.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
86
- lollms_client-1.4.8.dist-info/RECORD,,
82
+ lollms_client-1.5.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
83
+ lollms_client-1.5.0.dist-info/METADATA,sha256=i31CkTse62ayQGDMLRJPKs4ungJTxwPPCeGUVw__5cU,71854
84
+ lollms_client-1.5.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
85
+ lollms_client-1.5.0.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
86
+ lollms_client-1.5.0.dist-info/RECORD,,