lollms-client 1.6.4__tar.gz → 1.6.5__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

Files changed (95) hide show
  1. {lollms_client-1.6.4/src/lollms_client.egg-info → lollms_client-1.6.5}/PKG-INFO +1 -1
  2. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/__init__.py +1 -1
  3. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_core.py +3 -1
  4. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tti_bindings/diffusers/__init__.py +104 -57
  5. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tti_bindings/diffusers/server/main.py +264 -112
  6. lollms_client-1.6.5/src/lollms_client/tti_bindings/gemini/__init__.py +260 -0
  7. {lollms_client-1.6.4 → lollms_client-1.6.5/src/lollms_client.egg-info}/PKG-INFO +1 -1
  8. lollms_client-1.6.4/src/lollms_client/tti_bindings/gemini/__init__.py +0 -320
  9. {lollms_client-1.6.4 → lollms_client-1.6.5}/LICENSE +0 -0
  10. {lollms_client-1.6.4 → lollms_client-1.6.5}/README.md +0 -0
  11. {lollms_client-1.6.4 → lollms_client-1.6.5}/pyproject.toml +0 -0
  12. {lollms_client-1.6.4 → lollms_client-1.6.5}/setup.cfg +0 -0
  13. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/assets/models_ctx_sizes.json +0 -0
  14. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/__init__.py +0 -0
  15. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
  16. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/claude/__init__.py +0 -0
  17. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/gemini/__init__.py +0 -0
  18. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/grok/__init__.py +0 -0
  19. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/groq/__init__.py +0 -0
  20. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
  21. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/litellm/__init__.py +0 -0
  22. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
  23. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/lollms/__init__.py +0 -0
  24. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
  25. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/mistral/__init__.py +0 -0
  26. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/novita_ai/__init__.py +0 -0
  27. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/ollama/__init__.py +0 -0
  28. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/open_router/__init__.py +0 -0
  29. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/openai/__init__.py +0 -0
  30. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/openllm/__init__.py +0 -0
  31. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/openwebui/__init__.py +0 -0
  32. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/perplexity/__init__.py +0 -0
  33. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
  34. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
  35. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/transformers/__init__.py +0 -0
  36. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
  37. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_agentic.py +0 -0
  38. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_config.py +0 -0
  39. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_discussion.py +0 -0
  40. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_js_analyzer.py +0 -0
  41. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_llm_binding.py +0 -0
  42. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_mcp_binding.py +0 -0
  43. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_mcp_security.py +0 -0
  44. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_personality.py +0 -0
  45. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_python_analyzer.py +0 -0
  46. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_stt_binding.py +0 -0
  47. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_tti_binding.py +0 -0
  48. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_ttm_binding.py +0 -0
  49. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_tts_binding.py +0 -0
  50. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_ttv_binding.py +0 -0
  51. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_types.py +0 -0
  52. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/lollms_utilities.py +0 -0
  53. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
  54. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
  55. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
  56. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
  57. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
  58. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
  59. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
  60. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/stt_bindings/__init__.py +0 -0
  61. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/stt_bindings/lollms/__init__.py +0 -0
  62. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/stt_bindings/whisper/__init__.py +0 -0
  63. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
  64. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tti_bindings/__init__.py +0 -0
  65. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py +0 -0
  66. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tti_bindings/lollms/__init__.py +0 -0
  67. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tti_bindings/novita_ai/__init__.py +0 -0
  68. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tti_bindings/openai/__init__.py +0 -0
  69. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tti_bindings/stability_ai/__init__.py +0 -0
  70. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/ttm_bindings/__init__.py +0 -0
  71. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
  72. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py +0 -0
  73. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
  74. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/ttm_bindings/replicate/__init__.py +0 -0
  75. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/ttm_bindings/stability_ai/__init__.py +0 -0
  76. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/ttm_bindings/topmediai/__init__.py +0 -0
  77. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/__init__.py +0 -0
  78. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/bark/__init__.py +0 -0
  79. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/bark/server/install_bark.py +0 -0
  80. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/bark/server/main.py +0 -0
  81. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/lollms/__init__.py +0 -0
  82. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
  83. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/piper_tts/server/install_piper.py +0 -0
  84. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/piper_tts/server/main.py +0 -0
  85. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/piper_tts/server/setup_voices.py +0 -0
  86. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/xtts/__init__.py +0 -0
  87. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/xtts/server/main.py +0 -0
  88. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/tts_bindings/xtts/server/setup_voices.py +0 -0
  89. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/ttv_bindings/__init__.py +0 -0
  90. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
  91. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client.egg-info/SOURCES.txt +0 -0
  92. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client.egg-info/dependency_links.txt +0 -0
  93. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client.egg-info/requires.txt +0 -0
  94. {lollms_client-1.6.4 → lollms_client-1.6.5}/src/lollms_client.egg-info/top_level.txt +0 -0
  95. {lollms_client-1.6.4 → lollms_client-1.6.5}/test/test_lollms_discussion.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.6.4
3
+ Version: 1.6.5
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache License
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.6.4" # Updated version
11
+ __version__ = "1.6.5" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -1,4 +1,6 @@
1
1
  # lollms_client/lollms_core.py
2
+ # author: ParisNeo
3
+ # description: LollmsClient definition file
2
4
  import requests
3
5
  from ascii_colors import ASCIIColors, trace_exception
4
6
  from lollms_client.lollms_types import MSG_TYPE, ELF_COMPLETION_FORMAT
@@ -519,7 +521,7 @@ class LollmsClient():
519
521
  Union[str, dict]: Generated text or error dictionary if failed.
520
522
  """
521
523
  if self.llm:
522
-
524
+ images = [str(image) for image in images] if images else None
523
525
  ctx_size = ctx_size if ctx_size is not None else self.llm.default_ctx_size if self.llm.default_ctx_size else None
524
526
  if ctx_size is None:
525
527
  ctx_size = self.llm.get_ctx_size()
@@ -44,15 +44,16 @@ class DiffusersBinding(LollmsTTIBinding):
44
44
  kwargs['model_name'] = kwargs.pop('model')
45
45
 
46
46
  self.config = kwargs
47
- self.host = kwargs.get("host", "localhost")
47
+ self.host = kwargs.get("host", "localhost")
48
48
  self.port = kwargs.get("port", 9630)
49
49
  self.auto_start_server = kwargs.get("auto_start_server", True)
50
50
  self.server_process = None
51
51
  self.base_url = f"http://{self.host}:{self.port}"
52
52
  self.binding_root = Path(__file__).parent
53
53
  self.server_dir = self.binding_root / "server"
54
- self.venv_dir = Path("./venv/tti_diuffusers_venv")
55
- self.models_path = Path(kwargs.get("models_path", "./diffusers_models")).resolve()
54
+ self.venv_dir = Path("./venv/tti_diffusers_venv")
55
+ self.models_path = Path(kwargs.get("models_path", "./data/models/diffusers_models")).resolve()
56
+ self.models_path.mkdir(exist_ok=True, parents=True)
56
57
  if self.auto_start_server:
57
58
  self.ensure_server_is_running()
58
59
 
@@ -66,25 +67,35 @@ class DiffusersBinding(LollmsTTIBinding):
66
67
  return False
67
68
  return False
68
69
 
69
- def ensure_server_is_running(self):
70
+
71
+ def ensure_server_is_running(self, continue_if_locked: bool = True):
70
72
  """
71
73
  Ensures the Diffusers server is running. If not, it attempts to start it
72
74
  in a process-safe manner using a file lock.
75
+
76
+ Args:
77
+ continue_if_locked (bool): If True, return immediately if another process
78
+ already holds the lock.
73
79
  """
74
80
  self.server_dir.mkdir(exist_ok=True)
75
- lock_path = self.server_dir / "diffusers_server.lock"
76
- lock = FileLock(lock_path, timeout=60) # Increased timeout for long installs
81
+ lock_path = self.models_path / "diffusers_server.lock"
82
+ lock = FileLock(lock_path)
77
83
 
78
84
  ASCIIColors.info("Attempting to start or connect to the Diffusers server...")
79
85
  try:
80
- with lock:
86
+ # Try to acquire lock immediately if continue_if_locked=True
87
+ with lock.acquire(timeout=0 if continue_if_locked else 60):
81
88
  if not self.is_server_running():
82
89
  ASCIIColors.yellow("Lock acquired. Starting dedicated Diffusers server...")
83
90
  self.start_server()
84
91
  else:
85
92
  ASCIIColors.green("Server was started by another process. Connected successfully.")
86
93
  except Timeout:
87
- ASCIIColors.yellow("Could not acquire lock. Another process is likely starting the server. Waiting for it to become available...")
94
+ if continue_if_locked:
95
+ ASCIIColors.yellow("Lock held by another process. Skipping server startup and continuing execution.")
96
+ return
97
+ else:
98
+ ASCIIColors.yellow("Could not acquire lock within timeout. Waiting for server to become available...")
88
99
 
89
100
  self._wait_for_server()
90
101
 
@@ -97,6 +108,24 @@ class DiffusersBinding(LollmsTTIBinding):
97
108
  pm_v = pm.PackageManager(venv_path=str(self.venv_dir))
98
109
 
99
110
  # --- PyTorch Installation ---
111
+ ASCIIColors.info(f"Installing server dependencies")
112
+ pm_v.ensure_packages([
113
+ "requests", "uvicorn", "fastapi", "python-multipart", "filelock"
114
+ ])
115
+ ASCIIColors.info(f"Installing parisneo libraries")
116
+ pm_v.ensure_packages([
117
+ "ascii_colors","pipmaster"
118
+ ])
119
+ ASCIIColors.info(f"Installing misc libraries (numpy, tqdm...)")
120
+ pm_v.ensure_packages([
121
+ "tqdm", "numpy"
122
+ ])
123
+ ASCIIColors.info(f"Installing Pillow")
124
+ pm_v.ensure_packages([
125
+ "pillow"
126
+ ])
127
+
128
+ ASCIIColors.info(f"Installing pytorch")
100
129
  torch_index_url = None
101
130
  if sys.platform == "win32":
102
131
  try:
@@ -104,21 +133,27 @@ class DiffusersBinding(LollmsTTIBinding):
104
133
  result = subprocess.run(["nvidia-smi"], capture_output=True, text=True, check=True)
105
134
  ASCIIColors.green("NVIDIA GPU detected. Installing CUDA-enabled PyTorch.")
106
135
  # Using a common and stable CUDA version. Adjust if needed.
107
- torch_index_url = "https://download.pytorch.org/whl/cu121"
136
+ torch_index_url = "https://download.pytorch.org/whl/cu128"
108
137
  except (FileNotFoundError, subprocess.CalledProcessError):
109
138
  ASCIIColors.yellow("`nvidia-smi` not found or failed. Installing standard PyTorch. If you have an NVIDIA GPU, please ensure drivers are installed and in PATH.")
110
-
139
+
111
140
  # Base packages including torch. pm.ensure_packages handles verbose output.
112
141
  pm_v.ensure_packages(["torch", "torchvision"], index_url=torch_index_url)
113
142
 
114
-
115
143
  # Standard dependencies
144
+ ASCIIColors.info(f"Installing transformers dependencies")
116
145
  pm_v.ensure_packages([
117
- "pillow", "transformers", "safetensors", "requests", "tqdm", "numpy",
118
- "accelerate", "uvicorn", "fastapi", "python-multipart", "filelock", "ascii_colors"
146
+ "transformers", "safetensors", "accelerate"
119
147
  ])
120
-
148
+ ASCIIColors.info(f"[Optional] Installing xformers")
149
+ try:
150
+ pm_v.ensure_packages([
151
+ "xformers"
152
+ ])
153
+ except:
154
+ pass
121
155
  # Git-based diffusers to get the latest version
156
+ ASCIIColors.info(f"Installing diffusers library from github")
122
157
  pm_v.ensure_packages([
123
158
  {
124
159
  "name": "diffusers",
@@ -127,14 +162,6 @@ class DiffusersBinding(LollmsTTIBinding):
127
162
  }
128
163
  ])
129
164
 
130
- # XFormers (optional but recommended for NVIDIA)
131
- if torch_index_url: # Only try to install xformers if CUDA is likely present
132
- try:
133
- ASCIIColors.info("Attempting to install xformers for performance optimization...")
134
- pm_v.ensure_packages(["xformers"], upgrade=True)
135
- except Exception as e:
136
- ASCIIColors.warning(f"Could not install xformers. It's optional but recommended for performance on NVIDIA GPUs. Error: {e}")
137
-
138
165
  ASCIIColors.green("Server dependencies are satisfied.")
139
166
 
140
167
  def start_server(self):
@@ -167,7 +194,7 @@ class DiffusersBinding(LollmsTTIBinding):
167
194
  # Use DETACHED_PROCESS on Windows to allow the server to run independently of the parent process.
168
195
  # On Linux/macOS, the process will be daemonized enough to not be killed with the worker.
169
196
  creationflags = subprocess.DETACHED_PROCESS if sys.platform == "win32" else 0
170
-
197
+
171
198
  self.server_process = subprocess.Popen(command, creationflags=creationflags)
172
199
  ASCIIColors.info("Diffusers server process launched in the background.")
173
200
 
@@ -191,11 +218,11 @@ class DiffusersBinding(LollmsTTIBinding):
191
218
  time.sleep(2)
192
219
  raise RuntimeError("Failed to connect to the Diffusers server within the specified timeout.")
193
220
 
194
- def _post_request(self, endpoint: str, data: Optional[dict] = None, files: Optional[dict] = None) -> requests.Response:
195
- """Helper to make POST requests to the server."""
221
+ def _post_json_request(self, endpoint: str, data: Optional[dict] = None) -> requests.Response:
222
+ """Helper to make POST requests with a JSON body."""
196
223
  try:
197
224
  url = f"{self.base_url}{endpoint}"
198
- response = requests.post(url, json=data, files=files, timeout=3600) # Long timeout for generation
225
+ response = requests.post(url, json=data, timeout=3600) # Long timeout for generation
199
226
  response.raise_for_status()
200
227
  return response
201
228
  except requests.exceptions.RequestException as e:
@@ -208,6 +235,24 @@ class DiffusersBinding(LollmsTTIBinding):
208
235
  ASCIIColors.error(f"Server raw response: {e.response.text}")
209
236
  raise RuntimeError("Communication with the Diffusers server failed.") from e
210
237
 
238
+ def _post_multipart_request(self, endpoint: str, data: Optional[dict] = None, files: Optional[list] = None) -> requests.Response:
239
+ """Helper to make multipart/form-data POST requests for file uploads."""
240
+ try:
241
+ url = f"{self.base_url}{endpoint}"
242
+ response = requests.post(url, data=data, files=files, timeout=3600)
243
+ response.raise_for_status()
244
+ return response
245
+ except requests.exceptions.RequestException as e:
246
+ # (Error handling is the same as above)
247
+ ASCIIColors.error(f"Failed to communicate with Diffusers server at {url}.")
248
+ ASCIIColors.error(f"Error details: {e}")
249
+ if hasattr(e, 'response') and e.response:
250
+ try:
251
+ ASCIIColors.error(f"Server response: {e.response.json().get('detail', e.response.text)}")
252
+ except json.JSONDecodeError:
253
+ ASCIIColors.error(f"Server raw response: {e.response.text}")
254
+ raise RuntimeError("Communication with the Diffusers server failed.") from e
255
+
211
256
  def _get_request(self, endpoint: str, params: Optional[dict] = None) -> requests.Response:
212
257
  """Helper to make GET requests to the server."""
213
258
  try:
@@ -222,13 +267,14 @@ class DiffusersBinding(LollmsTTIBinding):
222
267
  def unload_model(self):
223
268
  ASCIIColors.info("Requesting server to unload the current model...")
224
269
  try:
225
- self._post_request("/unload_model")
270
+ self._post_json_request("/unload_model")
226
271
  except Exception as e:
227
272
  ASCIIColors.warning(f"Could not send unload request to server: {e}")
228
273
  pass
229
274
 
230
275
  def generate_image(self, prompt: str, negative_prompt: str = "", **kwargs) -> bytes:
231
- response = self._post_request("/generate_image", data={
276
+ # This is a pure JSON request
277
+ response = self._post_json_request("/generate_image", data={
232
278
  "prompt": prompt,
233
279
  "negative_prompt": negative_prompt,
234
280
  "params": kwargs
@@ -236,55 +282,56 @@ class DiffusersBinding(LollmsTTIBinding):
236
282
  return response.content
237
283
 
238
284
  def edit_image(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], prompt: str, **kwargs) -> bytes:
239
- files = {}
240
- image_paths = []
241
-
285
+ images_b64 = []
242
286
  if not isinstance(images, list):
243
287
  images = [images]
244
288
 
245
- for i, img in enumerate(images):
246
- if hasattr(img, 'save'): # PIL Image
289
+
290
+ for img in images:
291
+ # Case 1: Input is a PIL Image object
292
+ if hasattr(img, 'save'):
247
293
  buffer = BytesIO()
248
294
  img.save(buffer, format="PNG")
249
- buffer.seek(0)
250
- files[f"image_{i}"] = (f"image_{i}.png", buffer, "image/png")
251
- elif isinstance(img, str) and Path(img).is_file():
252
- # The server will load this path directly
253
- image_paths.append(img)
254
- elif isinstance(img, str): # Handle base64 strings
295
+ b64_string = base64.b64encode(buffer.getvalue()).decode('utf-8')
296
+ images_b64.append(b64_string)
297
+
298
+ # Case 2: Input is a string (could be path or already base64)
299
+ elif isinstance(img, str):
255
300
  try:
256
- # Simple base64 check
257
- if img.startswith("data:image/") and ";base64," in img:
258
- b64_data = img.split(";base64,")[1]
259
- img_bytes = base64.b64decode(b64_data)
260
- files[f"image_{i}"] = (f"image_{i}.png", img_bytes, "image/png")
301
+ b64_string = img.split(";base64,")[1] if ";base64," in img else img
302
+ base64.b64decode(b64_string) # Validate
303
+ images_b64.append(b64_string)
261
304
  except Exception:
262
- raise ValueError(f"Unsupported string image format in edit_image: {img[:100]}")
305
+ ASCIIColors.warning(f"Warning: A string input was not a valid file path or base64. Skipping.")
263
306
  else:
264
- raise ValueError(f"Unsupported image type in edit_image: {type(img)}")
307
+ raise ValueError(f"Unsupported image type in edit_image: {type(img)}")
308
+ if not images_b64:
309
+ raise ValueError("No valid images were provided to the edit_image function.")
310
+
311
+ # Translate "mask" to "mask_image" for server compatibility
312
+ if "mask" in kwargs and kwargs["mask"]:
313
+ kwargs["mask_image"] = kwargs.pop("mask")
265
314
 
266
- data_payload = {
315
+ json_payload = {
267
316
  "prompt": prompt,
268
- "image_paths": image_paths,
317
+ "images_b64": images_b64,
269
318
  "params": kwargs
270
319
  }
271
-
272
- # FastAPI needs separate form fields for json and files
273
- response = self._post_request("/edit_image", data={"json_payload": json.dumps(data_payload)}, files=files)
320
+ response = self._post_json_request("/edit_image", data=json_payload)
274
321
  return response.content
275
-
322
+
276
323
  def list_models(self) -> List[Dict[str, Any]]:
277
324
  return self._get_request("/list_models").json()
278
325
 
279
326
  def list_local_models(self) -> List[str]:
280
327
  return self._get_request("/list_local_models").json()
281
-
328
+
282
329
  def list_available_models(self) -> List[str]:
283
330
  return self._get_request("/list_available_models").json()
284
-
331
+
285
332
  def list_services(self, **kwargs) -> List[Dict[str, str]]:
286
333
  return self._get_request("/list_models").json()
287
-
334
+
288
335
  def get_settings(self, **kwargs) -> List[Dict[str, Any]]:
289
336
  # The server holds the state, so we fetch it.
290
337
  return self._get_request("/get_settings").json()
@@ -292,7 +339,7 @@ class DiffusersBinding(LollmsTTIBinding):
292
339
  def set_settings(self, settings: Union[Dict[str, Any], List[Dict[str, Any]]], **kwargs) -> bool:
293
340
  # Normalize settings from list of dicts to a single dict if needed
294
341
  parsed_settings = settings if isinstance(settings, dict) else {s["name"]: s["value"] for s in settings if "name" in s and "value" in s}
295
- response = self._post_request("/set_settings", data=parsed_settings)
342
+ response = self._post_json_request("/set_settings", data=parsed_settings)
296
343
  return response.json().get("success", False)
297
344
 
298
345
  def ps(self) -> List[dict]:
@@ -304,4 +351,4 @@ class DiffusersBinding(LollmsTTIBinding):
304
351
  def __del__(self):
305
352
  # The client destructor does not stop the server,
306
353
  # as it is a shared resource for all worker processes.
307
- pass
354
+ pass