lollms-client 1.3.0__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.3.0" # Updated version
11
+ __version__ = "1.3.1" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -1,4 +1,4 @@
1
- # bindings/ollama/binding.py
1
+ # bindings/ollama/__init__.py
2
2
  import requests
3
3
  import json
4
4
  from lollms_client.lollms_llm_binding import LollmsLLMBinding
@@ -13,7 +13,7 @@ from ascii_colors import ASCIIColors, trace_exception
13
13
  import pipmaster as pm
14
14
  from lollms_client.lollms_utilities import ImageTokenizer
15
15
  pm.ensure_packages(["ollama","pillow","tiktoken"])
16
-
16
+ import re
17
17
 
18
18
  import ollama
19
19
  import tiktoken
@@ -256,22 +256,22 @@ class OllamaBinding(LollmsLLMBinding):
256
256
  return {"status": False, "error": error_message}
257
257
 
258
258
  def generate_from_messages(self,
259
- messages: List[Dict],
260
- n_predict: Optional[int] = None,
261
- stream: Optional[bool] = None,
262
- temperature: Optional[float] = None,
263
- top_k: Optional[int] = None,
264
- top_p: Optional[float] = None,
265
- repeat_penalty: Optional[float] = None,
266
- repeat_last_n: Optional[int] = None,
267
- seed: Optional[int] = None,
268
- n_threads: Optional[int] = None,
269
- ctx_size: int | None = None,
270
- streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
271
- **kwargs
272
- ) -> Union[str, dict]:
259
+ messages: List[Dict],
260
+ n_predict: Optional[int] = None,
261
+ stream: Optional[bool] = None,
262
+ temperature: Optional[float] = None,
263
+ top_k: Optional[int] = None,
264
+ top_p: Optional[float] = None,
265
+ repeat_penalty: Optional[float] = None,
266
+ repeat_last_n: Optional[int] = None,
267
+ seed: Optional[int] = None,
268
+ n_threads: Optional[int] = None,
269
+ ctx_size: int | None = None,
270
+ streaming_callback: Optional[Callable[[str, MSG_TYPE], None]] = None,
271
+ **kwargs
272
+ ) -> Union[str, dict]:
273
273
  if not self.ollama_client:
274
- return {"status": False, "error": "Ollama client not initialized."}
274
+ return {"status": False, "error": "Ollama client not initialized."}
275
275
 
276
276
  options = {}
277
277
  if n_predict is not None: options['num_predict'] = n_predict
@@ -283,34 +283,91 @@ class OllamaBinding(LollmsLLMBinding):
283
283
  if seed is not None: options['seed'] = seed
284
284
  if n_threads is not None: options['num_thread'] = n_threads
285
285
  if ctx_size is not None: options['num_ctx'] = ctx_size
286
-
286
+
287
+ def normalize_message(msg: Dict) -> Dict:
288
+ role = msg.get("role", "user")
289
+ content = msg.get("content", "")
290
+ text_parts = []
291
+ images = []
292
+
293
+ if isinstance(content, str):
294
+ text_parts.append(content)
295
+ elif isinstance(content, list):
296
+ for item in content:
297
+ if item.get("type") == "text":
298
+ text_parts.append(item.get("text", ""))
299
+ elif item.get("type") == "image_url":
300
+ base64_data = item.get("image_url", {}).get("base64")
301
+ url = item.get("image_url", {}).get("url")
302
+ if base64_data:
303
+ # ⚠️ remove prefix "data:image/...;base64,"
304
+ cleaned = re.sub(r"^data:image/[^;]+;base64,", "", base64_data)
305
+ images.append(cleaned)
306
+ elif url:
307
+ images.append(url)
308
+
309
+ return {
310
+ "role": role,
311
+ "content": "\n".join([p for p in text_parts if p.strip()]),
312
+ "images": images if images else None
313
+ }
314
+
315
+ ollama_messages = []
316
+ for m in messages:
317
+ nm = normalize_message(m)
318
+ if nm["images"]:
319
+ ollama_messages.append({
320
+ "role": nm["role"],
321
+ "content": nm["content"],
322
+ "images": nm["images"]
323
+ })
324
+ else:
325
+ ollama_messages.append({
326
+ "role": nm["role"],
327
+ "content": nm["content"]
328
+ })
329
+
287
330
  full_response_text = ""
288
331
 
289
332
  try:
290
333
  if stream:
291
334
  response_stream = self.ollama_client.chat(
292
335
  model=self.model_name,
293
- messages=messages,
336
+ messages=ollama_messages,
294
337
  stream=True,
295
338
  options=options if options else None
296
339
  )
297
340
  for chunk_dict in response_stream:
298
341
  chunk_content = chunk_dict.get('message', {}).get('content', '')
299
- if chunk_content: # Ensure there is content to process
342
+ if chunk_content:
300
343
  full_response_text += chunk_content
301
344
  if streaming_callback:
302
345
  if not streaming_callback(chunk_content, MSG_TYPE.MSG_TYPE_CHUNK):
303
- break # Callback requested stop
346
+ break
304
347
  return full_response_text
305
- else: # Not streaming
348
+ else:
306
349
  response_dict = self.ollama_client.chat(
307
350
  model=self.model_name,
308
- messages=messages,
351
+ messages=ollama_messages,
309
352
  stream=False,
310
353
  options=options if options else None
311
354
  )
312
355
  return response_dict.get('message', {}).get('content', '')
313
356
 
357
+ except ollama.ResponseError as e:
358
+ error_message = f"Ollama API ResponseError: {e.error or 'Unknown error'} (status code: {e.status_code})"
359
+ ASCIIColors.error(error_message)
360
+ return {"status": False, "error": error_message, "status_code": e.status_code}
361
+ except ollama.RequestError as e:
362
+ error_message = f"Ollama API RequestError: {str(e)}"
363
+ ASCIIColors.error(error_message)
364
+ return {"status": False, "error": error_message}
365
+ except Exception as ex:
366
+ error_message = f"An unexpected error occurred: {str(ex)}"
367
+ trace_exception(ex)
368
+ return {"status": False, "error": error_message}
369
+
370
+
314
371
  except ollama.ResponseError as e:
315
372
  error_message = f"Ollama API ResponseError: {e.error or 'Unknown error'} (status code: {e.status_code})"
316
373
  ASCIIColors.error(error_message)
@@ -1449,7 +1449,7 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1449
1449
  context: Optional[str] = None,
1450
1450
  use_mcps: Union[None, bool, List[str]] = None,
1451
1451
  use_data_store: Union[None, Dict[str, Callable]] = None,
1452
- system_prompt: str = None,
1452
+ system_prompt: str|None = None,
1453
1453
  reasoning_system_prompt: str = "You are a logical AI assistant. Your task is to achieve the user's goal by thinking step-by-step and using the available tools.",
1454
1454
  images: Optional[List[str]] = None,
1455
1455
  max_reasoning_steps: int = 10,
@@ -1525,8 +1525,8 @@ Provide your response as a single JSON object inside a JSON markdown tag. Use th
1525
1525
  if callable(info):
1526
1526
  call_fn = info
1527
1527
  elif isinstance(info, dict):
1528
- if "call" in info and callable(info["call"]):
1529
- call_fn = info["call"]
1528
+ if "callable" in info and callable(info["callable"]):
1529
+ call_fn = info["callable"]
1530
1530
  description = info.get("description", description)
1531
1531
  if call_fn:
1532
1532
  visible_tools.append({
@@ -1727,7 +1727,7 @@ Fill the parameters for the selected tool. If code is required, do not paste cod
1727
1727
  log_event("RAG call start", MSG_TYPE.MSG_TYPE_STEP, meta={"tool_name": tool_name, "query": query, "top_k": top_k, "min_similarity_percent": min_sim, "has_filters": bool(filters)})
1728
1728
  rag_fn = rag_registry[tool_name]
1729
1729
  try:
1730
- raw_results = rag_fn(query=query, top_k=top_k, filters=filters)
1730
+ raw_results = rag_fn(query=query, top_k=top_k if top_k else None, filters=filters if filters else None)
1731
1731
  except TypeError:
1732
1732
  raw_results = rag_fn(query)
1733
1733
  docs = []
@@ -476,6 +476,7 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
476
476
 
477
477
  def __init__(self, **kwargs):
478
478
  super().__init__(binding_name=BindingName)
479
+ self.manager: Optional[ModelManager] = None
479
480
  if not DIFFUSERS_AVAILABLE:
480
481
  raise ImportError("Diffusers not available. Please install required packages.")
481
482
  self.config = self.DEFAULT_CONFIG.copy()
@@ -485,7 +486,6 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
485
486
  self.models_path = Path(models_path_str)
486
487
  self.models_path.mkdir(parents=True, exist_ok=True)
487
488
  self.registry = PipelineRegistry()
488
- self.manager: Optional[ModelManager] = None
489
489
  self._resolve_device_and_dtype()
490
490
  if self.model_name:
491
491
  self._acquire_manager()
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.3.0
3
+ Version: 1.3.1
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -1,6 +1,6 @@
1
- lollms_client/__init__.py,sha256=SayNkzMpB6FbZcZc_162cog86cn7oSCvbV6nEaDnEq0,1146
1
+ lollms_client/__init__.py,sha256=SMot7i85VNJMIL7Zf7trWYlvTgWYlRmwZpF8qchlIyI,1146
2
2
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
3
- lollms_client/lollms_core.py,sha256=4U2izPsGgm4H9GR59vsx9P9mKPT7keVWU8mwadAXU0I,171028
3
+ lollms_client/lollms_core.py,sha256=QIsKQfSWDSD2gzVrlVZmis3VdEf4_95d4ynYGB4DIQI,171085
4
4
  lollms_client/lollms_discussion.py,sha256=4vOnXJp4nLDtL2gRmnkTB4-mjYyIHsgp35pRSJPeT9U,117527
5
5
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
6
6
  lollms_client/lollms_llm_binding.py,sha256=5-Vknm0YILPd6ZiwZynsXMfns__Yd_1tDDc2fciRiiA,25020
@@ -28,7 +28,7 @@ lollms_client/llm_bindings/llamacpp/__init__.py,sha256=4CbNYpfquVEgfsxuLsxQta_dZ
28
28
  lollms_client/llm_bindings/lollms/__init__.py,sha256=7DgTGHtrFjhRnjx0YYlNTip2p5TSV-_4GN00ekEUd3g,24855
29
29
  lollms_client/llm_bindings/lollms_webui/__init__.py,sha256=iuDfhZZoLC-PDEPLHrcjk5-962S5c7OeCI7PMdJxI_A,17753
30
30
  lollms_client/llm_bindings/mistral/__init__.py,sha256=cddz9xIj8NRFLKHe2JMxzstpUrNIu5s9juci3mhiHfo,14133
31
- lollms_client/llm_bindings/ollama/__init__.py,sha256=W-4Z_lDzNA77e3xniWcPhkHGPlxwdBELVnGe-2y29uw,43587
31
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=a6cgzXPuo8ZLhIZHJFy8QF0n5ZTk0X4OC1JSyXG1enk,46013
32
32
  lollms_client/llm_bindings/open_router/__init__.py,sha256=cAFWtCWJx0WjIe1w2JReCf6WlAZjrXYA4jZ8l3zqxMs,14915
33
33
  lollms_client/llm_bindings/openai/__init__.py,sha256=ElLbtHLwR61Uj3W6G4g6QIhxtCqUGOCQBYwhQyN60us,26142
34
34
  lollms_client/llm_bindings/openllm/__init__.py,sha256=RC9dVeopslS-zXTsSJ7VC4iVsKgZCBwfmccmr_LCHA0,29971
@@ -48,7 +48,7 @@ lollms_client/stt_bindings/lollms/__init__.py,sha256=9Vmn1sQQZKLGLe7nZnc-0LnNeSY
48
48
  lollms_client/stt_bindings/whisper/__init__.py,sha256=1Ej67GdRKBy1bba14jMaYDYHiZkxJASkWm5eF07ztDQ,15363
49
49
  lollms_client/stt_bindings/whispercpp/__init__.py,sha256=xSAQRjAhljak3vWCpkP0Vmdb6WmwTzPjXyaIB85KLGU,21439
50
50
  lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
51
- lollms_client/tti_bindings/diffusers/__init__.py,sha256=XJz42oOT3m-ek7DxlnXhbOY7_1V-9iORMNnFQRd8cu8,40092
51
+ lollms_client/tti_bindings/diffusers/__init__.py,sha256=e1qrhiAQI_J-C_PKGIz2EEmtonw-uKAa9bw2N4qUP68,40092
52
52
  lollms_client/tti_bindings/gemini/__init__.py,sha256=f9fPuqnrBZ1Z-obcoP6EVvbEXNbNCSg21cd5efLCk8U,16707
53
53
  lollms_client/tti_bindings/lollms/__init__.py,sha256=5Tnsn4b17djvieQkcjtIDBm3qf0pg5ZWWov-4_2wmo0,8762
54
54
  lollms_client/tti_bindings/openai/__init__.py,sha256=YWJolJSQfIzTJvrLQVe8rQewP7rddf6z87g4rnp-lTs,4932
@@ -63,8 +63,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
63
63
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
64
64
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
65
65
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
66
- lollms_client-1.3.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
67
- lollms_client-1.3.0.dist-info/METADATA,sha256=WJ_pYuQRpgyvBPvqcsCUXmzdgpEMvCSlJqWujIs8CoY,58549
68
- lollms_client-1.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
69
- lollms_client-1.3.0.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
70
- lollms_client-1.3.0.dist-info/RECORD,,
66
+ lollms_client-1.3.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
67
+ lollms_client-1.3.1.dist-info/METADATA,sha256=vxRJoe8JCZ1v_mnehbmuOTYEzDLjlWVYKk2hL9chuS8,58549
68
+ lollms_client-1.3.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
69
+ lollms_client-1.3.1.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
70
+ lollms_client-1.3.1.dist-info/RECORD,,