lollms-client 1.1.0__py3-none-any.whl → 1.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.1.0" # Updated version
11
+ __version__ = "1.1.2" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -434,7 +434,7 @@ class OllamaBinding(LollmsLLMBinding):
434
434
  list: List of individual characters.
435
435
  """
436
436
  ## Since ollama has no endpoints to tokenize the text, we use tiktoken to have a rough estimate
437
- return tiktoken.model.encoding_for_model("gpt-3.5-turbo").encode(text)
437
+ return tiktoken.model.encoding_for_model("gpt-3.5-turbo").encode(text, disallowed_special=())
438
438
 
439
439
  def detokenize(self, tokens: list) -> str:
440
440
  """
@@ -373,7 +373,8 @@ class LollmsLLMBinding(ABC):
373
373
  if messages[-1]["content"]=="":
374
374
  del messages[-1]
375
375
  return messages
376
-
376
+ def ps(self):
377
+ return []
377
378
 
378
379
 
379
380
 
@@ -4,32 +4,142 @@ import importlib
4
4
  from io import BytesIO
5
5
  from typing import Optional, List, Dict, Any, Union
6
6
  from pathlib import Path
7
+ import pipmaster as pm
8
+ # --- Concurrency Imports ---
9
+ import threading
10
+ import queue
11
+ from concurrent.futures import Future
12
+ import time
13
+ import hashlib
14
+ import re
15
+ # -------------------------
16
+ # --- Download Imports ---
17
+ import requests
18
+ from tqdm import tqdm
19
+ # --------------------
20
+
21
+ pm.ensure_packages(["torch","torchvision"],index_url="https://download.pytorch.org/whl/cu126")
22
+ pm.ensure_packages(["diffusers","pillow","transformers","safetensors", "requests", "tqdm"])
7
23
 
8
24
  # Attempt to import core dependencies and set availability flag
9
25
  try:
10
26
  import torch
11
- from diffusers import AutoPipelineForText2Image, DiffusionPipeline
12
- from diffusers.utils import load_image # Potentially for future img2img etc.
27
+ from diffusers import AutoPipelineForText2Image, DiffusionPipeline, StableDiffusionPipeline
28
+ from diffusers.utils import load_image
13
29
  from PIL import Image
14
30
  DIFFUSERS_AVAILABLE = True
15
31
  except ImportError:
16
32
  torch = None
17
33
  AutoPipelineForText2Image = None
18
34
  DiffusionPipeline = None
35
+ StableDiffusionPipeline = None
19
36
  Image = None
20
37
  load_image = None
21
38
  DIFFUSERS_AVAILABLE = False
22
- # A detailed error will be raised in __init__ if the user tries to use the binding.
23
39
 
24
40
  from lollms_client.lollms_tti_binding import LollmsTTIBinding
25
41
  from ascii_colors import trace_exception, ASCIIColors
26
- import json # For potential JSONDecodeError and settings
42
+ import json
27
43
  import shutil
28
44
 
29
45
  # Defines the binding name for the manager
30
46
  BindingName = "DiffusersTTIBinding_Impl"
31
47
 
32
- # Helper for torch.dtype string conversion, handles case where torch is not installed
48
+ # --- START: Civitai Model Definitions ---
49
+ # Expanded list of popular Civitai models (as single .safetensors files)
50
+ CIVITAI_MODELS = {
51
+ # --- Photorealistic ---
52
+ "realistic-vision-v6": {
53
+ "display_name": "Realistic Vision V6.0",
54
+ "url": "https://civitai.com/api/download/models/130072",
55
+ "filename": "realisticVisionV60_v60B1.safetensors",
56
+ "description": "One of the most popular photorealistic models.",
57
+ "owned_by": "civitai"
58
+ },
59
+ "absolute-reality": {
60
+ "display_name": "Absolute Reality",
61
+ "url": "https://civitai.com/api/download/models/132760",
62
+ "filename": "absolutereality_v181.safetensors",
63
+ "description": "A top-tier model for generating realistic images.",
64
+ "owned_by": "civitai"
65
+ },
66
+ # --- General / Artistic ---
67
+ "dreamshaper-8": {
68
+ "display_name": "DreamShaper 8",
69
+ "url": "https://civitai.com/api/download/models/128713",
70
+ "filename": "dreamshaper_8.safetensors",
71
+ "description": "A very popular and versatile general-purpose model.",
72
+ "owned_by": "civitai"
73
+ },
74
+ "juggernaut-xl": {
75
+ "display_name": "Juggernaut XL",
76
+ "url": "https://civitai.com/api/download/models/133005",
77
+ "filename": "juggernautXL_version6Rundiffusion.safetensors",
78
+ "description": "High-quality artistic model, great for cinematic styles (SDXL-based).",
79
+ "owned_by": "civitai"
80
+ },
81
+ "lyriel-v1.6": {
82
+ "display_name": "Lyriel v1.6",
83
+ "url": "https://civitai.com/api/download/models/92407",
84
+ "filename": "lyriel_v16.safetensors",
85
+ "description": "A popular artistic model for fantasy and stylized images.",
86
+ "owned_by": "civitai"
87
+ },
88
+ # --- Anime / Illustration ---
89
+ "anything-v5": {
90
+ "display_name": "Anything V5",
91
+ "url": "https://civitai.com/api/download/models/9409",
92
+ "filename": "anythingV5_PrtRE.safetensors",
93
+ "description": "A classic and highly popular model for anime-style generation.",
94
+ "owned_by": "civitai"
95
+ },
96
+ "meinamix": {
97
+ "display_name": "MeinaMix",
98
+ "url": "https://civitai.com/api/download/models/119057",
99
+ "filename": "meinamix_meinaV11.safetensors",
100
+ "description": "A highly popular model for generating illustrative and vibrant anime-style images.",
101
+ "owned_by": "civitai"
102
+ },
103
+ # --- Game Assets & Specialized Styles ---
104
+ "rpg-v5": {
105
+ "display_name": "RPG v5",
106
+ "url": "https://civitai.com/api/download/models/137379",
107
+ "filename": "rpg_v5.safetensors",
108
+ "description": "Specialized in generating fantasy characters and assets in the style of classic RPGs.",
109
+ "owned_by": "civitai"
110
+ },
111
+ "pixel-art-xl": {
112
+ "display_name": "Pixel Art XL",
113
+ "url": "https://civitai.com/api/download/models/252919",
114
+ "filename": "pixelartxl_v11.safetensors",
115
+ "description": "A dedicated SDXL model for generating high-quality pixel art sprites and scenes.",
116
+ "owned_by": "civitai"
117
+ },
118
+ "lowpoly-world": {
119
+ "display_name": "Lowpoly World",
120
+ "url": "https://civitai.com/api/download/models/90299",
121
+ "filename": "lowpoly_world_v10.safetensors",
122
+ "description": "Generates assets and scenes with a stylized low-polygon, 3D render aesthetic.",
123
+ "owned_by": "civitai"
124
+ },
125
+ "toonyou": {
126
+ "display_name": "ToonYou",
127
+ "url": "https://civitai.com/api/download/models/152361",
128
+ "filename": "toonyou_beta6.safetensors",
129
+ "description": "Excellent for creating expressive, high-quality cartoon and Disney-style characters.",
130
+ "owned_by": "civitai"
131
+ },
132
+ "papercut": {
133
+ "display_name": "Papercut",
134
+ "url": "https://civitai.com/api/download/models/45579",
135
+ "filename": "papercut_v1.safetensors",
136
+ "description": "Creates unique images with a distinct paper cutout and layered diorama style.",
137
+ "owned_by": "civitai"
138
+ }
139
+ }
140
+ # --- END: Civitai Model Definitions ---
141
+
142
+ # Helper for torch.dtype string conversion
33
143
  TORCH_DTYPE_MAP_STR_TO_OBJ = {
34
144
  "float16": getattr(torch, 'float16', 'float16'),
35
145
  "bfloat16": getattr(torch, 'bfloat16', 'bfloat16'),
@@ -37,210 +147,214 @@ TORCH_DTYPE_MAP_STR_TO_OBJ = {
37
147
  "auto": "auto"
38
148
  }
39
149
  TORCH_DTYPE_MAP_OBJ_TO_STR = {v: k for k, v in TORCH_DTYPE_MAP_STR_TO_OBJ.items()}
40
- if torch: # Add None mapping if torch is loaded
150
+ if torch:
41
151
  TORCH_DTYPE_MAP_OBJ_TO_STR[None] = "None"
42
152
 
43
-
44
- # Common Schedulers mapping (User-friendly name to Class name)
153
+ # Common Schedulers mapping
45
154
  SCHEDULER_MAPPING = {
46
- "default": None, # Use model's default
47
- "ddim": "DDIMScheduler",
48
- "ddpm": "DDPMScheduler",
49
- "deis_multistep": "DEISMultistepScheduler",
50
- "dpm_multistep": "DPMSolverMultistepScheduler",
51
- "dpm_multistep_karras": "DPMSolverMultistepScheduler",
52
- "dpm_single": "DPMSolverSinglestepScheduler",
53
- "dpm_adaptive": "DPMSolverPlusPlusScheduler",
54
- "dpm++_2m": "DPMSolverMultistepScheduler",
55
- "dpm++_2m_karras": "DPMSolverMultistepScheduler",
56
- "dpm++_2s_ancestral": "DPMSolverAncestralDiscreteScheduler",
57
- "dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler",
58
- "dpm++_sde": "DPMSolverSDEScheduler",
59
- "dpm++_sde_karras": "DPMSolverSDEScheduler",
60
- "euler_ancestral_discrete": "EulerAncestralDiscreteScheduler",
61
- "euler_discrete": "EulerDiscreteScheduler",
62
- "heun_discrete": "HeunDiscreteScheduler",
63
- "heun_karras": "HeunDiscreteScheduler",
64
- "lms_discrete": "LMSDiscreteScheduler",
65
- "lms_karras": "LMSDiscreteScheduler",
66
- "pndm": "PNDMScheduler",
67
- "unipc_multistep": "UniPCMultistepScheduler",
155
+ "default": None, "ddim": "DDIMScheduler", "ddpm": "DDPMScheduler", "deis_multistep": "DEISMultistepScheduler",
156
+ "dpm_multistep": "DPMSolverMultistepScheduler", "dpm_multistep_karras": "DPMSolverMultistepScheduler",
157
+ "dpm_single": "DPMSolverSinglestepScheduler", "dpm_adaptive": "DPMSolverPlusPlusScheduler",
158
+ "dpm++_2m": "DPMSolverMultistepScheduler", "dpm++_2m_karras": "DPMSolverMultistepScheduler",
159
+ "dpm++_2s_ancestral": "DPMSolverAncestralDiscreteScheduler", "dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler",
160
+ "dpm++_sde": "DPMSolverSDEScheduler", "dpm++_sde_karras": "DPMSolverSDEScheduler",
161
+ "euler_ancestral_discrete": "EulerAncestralDiscreteScheduler", "euler_discrete": "EulerDiscreteScheduler",
162
+ "heun_discrete": "HeunDiscreteScheduler", "heun_karras": "HeunDiscreteScheduler",
163
+ "lms_discrete": "LMSDiscreteScheduler", "lms_karras": "LMSDiscreteScheduler",
164
+ "pndm": "PNDMScheduler", "unipc_multistep": "UniPCMultistepScheduler",
68
165
  }
69
166
  SCHEDULER_USES_KARRAS_SIGMAS = [
70
167
  "dpm_multistep_karras", "dpm++_2m_karras", "dpm++_2s_ancestral_karras",
71
168
  "dpm++_sde_karras", "heun_karras", "lms_karras"
72
169
  ]
73
170
 
171
+ # --- START: Concurrency and Singleton Management ---
74
172
 
75
- class DiffusersTTIBinding_Impl(LollmsTTIBinding):
173
+ class ModelManager:
76
174
  """
77
- Concrete implementation of LollmsTTIBinding for Hugging Face Diffusers library.
78
- Allows running various text-to-image models locally.
175
+ Manages a single pipeline instance, its generation queue, and a worker thread.
176
+ This ensures all interactions with a specific model are thread-safe.
79
177
  """
80
- DEFAULT_CONFIG = {
81
- "model_name": "",
82
- "device": "auto",
83
- "torch_dtype_str": "auto",
84
- "use_safetensors": True,
85
- "scheduler_name": "default",
86
- "safety_checker_on": True,
87
- "num_inference_steps": 25,
88
- "guidance_scale": 7.5,
89
- "default_width": 768,
90
- "default_height": 768,
91
- "seed": -1,
92
- "enable_cpu_offload": False,
93
- "enable_sequential_cpu_offload": False,
94
- "enable_xformers": False,
95
- "hf_variant": None,
96
- "hf_token": None,
97
- "hf_cache_path": None,
98
- "local_files_only": False,
99
- }
100
-
101
- def __init__(self, **kwargs):
102
- """
103
- Initialize the Diffusers TTI binding.
104
-
105
- Args:
106
- **kwargs: A dictionary of configuration parameters.
107
- Expected keys:
108
- - model_name (str): The name of the model to use. Can be a Hugging Face Hub ID
109
- (e.g., 'stabilityai/stable-diffusion-xl-base-1.0') or the name of a local
110
- model directory located in `models_path`.
111
- - models_path (str or Path): The path to the directory where local models are stored.
112
- Defaults to a 'models' folder next to this file.
113
- - hf_cache_path (str or Path, optional): Path to a directory for Hugging Face
114
- to cache downloaded models and files.
115
- - Other settings from the DEFAULT_CONFIG can be overridden here.
116
- """
117
- super().__init__(binding_name=BindingName)
118
-
119
- if not DIFFUSERS_AVAILABLE:
120
- raise ImportError(
121
- "Diffusers library or its dependencies (torch, Pillow, transformers) are not installed. "
122
- "Please install them using: pip install torch diffusers Pillow transformers safetensors"
123
- )
124
-
125
- # Merge default config with user-provided kwargs
126
- self.config = {**self.DEFAULT_CONFIG, **kwargs}
127
-
128
- # model_name is crucial, get it from the merged config
129
- self.model_name = self.config.get("model_name", "")
130
-
131
- # models_path is also special, handle it with its default logic
132
- self.models_path = Path(kwargs.get("models_path", Path(__file__).parent / "models"))
133
- self.models_path.mkdir(parents=True, exist_ok=True)
134
-
178
+ def __init__(self, config: Dict[str, Any], models_path: Path):
179
+ self.config = config
180
+ self.models_path = models_path
135
181
  self.pipeline: Optional[DiffusionPipeline] = None
136
- self.current_model_id_or_path = None
137
-
138
- self._resolve_device_and_dtype()
182
+ self.ref_count = 0
183
+ self.lock = threading.Lock()
184
+ self.queue = queue.Queue()
185
+ self.worker_thread = threading.Thread(target=self._generation_worker, daemon=True)
186
+ self._stop_event = threading.Event()
187
+ self.is_loaded = False
139
188
 
140
- if self.model_name:
141
- self.load_model()
142
- else:
143
- ASCIIColors.warning("No model_name provided during initialization. The binding is idle.")
189
+ self.worker_thread.start()
144
190
 
191
+ def acquire(self):
192
+ with self.lock:
193
+ self.ref_count += 1
194
+ return self
145
195
 
146
- def _resolve_device_and_dtype(self):
147
- """Resolves auto settings for device and dtype from config."""
148
- if self.config["device"].lower() == "auto":
149
- if torch.cuda.is_available():
150
- self.config["device"] = "cuda"
151
- elif torch.backends.mps.is_available():
152
- self.config["device"] = "mps"
153
- else:
154
- self.config["device"] = "cpu"
196
+ def release(self):
197
+ with self.lock:
198
+ self.ref_count -= 1
199
+ return self.ref_count
155
200
 
156
- if self.config["torch_dtype_str"].lower() == "auto":
157
- self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
201
+ def stop(self):
202
+ self._stop_event.set()
203
+ self.queue.put(None)
204
+ self.worker_thread.join(timeout=5)
158
205
 
159
- self.torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower(), torch.float32)
160
- if self.torch_dtype == "auto": # Final fallback
161
- self.torch_dtype = torch.float16 if self.config["device"] != "cpu" else torch.float32
162
- self.config["torch_dtype_str"] = TORCH_DTYPE_MAP_OBJ_TO_STR.get(self.torch_dtype, "float32")
206
+ def _load_pipeline(self):
207
+ if self.pipeline:
208
+ return
163
209
 
164
- def _resolve_model_path(self, model_name: str) -> str:
165
- """
166
- Resolves a model name to a full path if it's a local model,
167
- otherwise returns it as is (assuming it's a Hugging Face Hub ID).
168
- """
210
+ model_name = self.config.get("model_name", "")
169
211
  if not model_name:
170
- raise ValueError("Model name cannot be empty.")
212
+ raise ValueError("Model name cannot be empty for loading.")
171
213
 
172
- if Path(model_name).is_absolute() and Path(model_name).is_dir():
173
- ASCIIColors.info(f"Using absolute path for model: {model_name}")
174
- return model_name
175
-
176
- local_model_path = self.models_path / model_name
177
- if local_model_path.exists() and local_model_path.is_dir():
178
- ASCIIColors.info(f"Found local model in '{self.models_path}': {local_model_path}")
179
- return str(local_model_path)
214
+ ASCIIColors.info(f"Loading Diffusers model: {model_name}")
215
+ model_path = self._resolve_model_path(model_name)
216
+ torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower())
180
217
 
181
- ASCIIColors.info(f"'{model_name}' not found locally. Assuming it is a Hugging Face Hub ID.")
182
- return model_name
218
+ try:
219
+ if str(model_path).endswith(".safetensors"):
220
+ ASCIIColors.info(f"Loading from single safetensors file: {model_path}")
221
+ try:
222
+ # Modern, preferred method for newer diffusers versions
223
+ self.pipeline = AutoPipelineForText2Image.from_single_file(
224
+ model_path,
225
+ torch_dtype=torch_dtype,
226
+ cache_dir=self.config.get("hf_cache_path")
227
+ )
228
+ except AttributeError:
229
+ # Fallback for older diffusers versions
230
+ ASCIIColors.warning("AutoPipelineForText2Image.from_single_file not found. Falling back to StableDiffusionPipeline.")
231
+ ASCIIColors.warning("Consider updating diffusers for better compatibility: pip install --upgrade diffusers")
232
+ self.pipeline = StableDiffusionPipeline.from_single_file(
233
+ model_path,
234
+ torch_dtype=torch_dtype,
235
+ cache_dir=self.config.get("hf_cache_path")
236
+ )
237
+ else:
238
+ ASCIIColors.info(f"Loading from pretrained folder/repo: {model_path}")
239
+ load_args = {
240
+ "torch_dtype": torch_dtype, "use_safetensors": self.config["use_safetensors"],
241
+ "token": self.config["hf_token"], "local_files_only": self.config["local_files_only"],
242
+ }
243
+ if self.config["hf_variant"]: load_args["variant"] = self.config["hf_variant"]
244
+ if not self.config["safety_checker_on"]: load_args["safety_checker"] = None
245
+ if self.config.get("hf_cache_path"): load_args["cache_dir"] = str(self.config["hf_cache_path"])
246
+ self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **load_args)
183
247
 
184
- def load_model(self):
185
- """Loads the Diffusers pipeline based on current configuration."""
186
- ASCIIColors.info("Loading Diffusers model...")
187
- if self.pipeline is not None:
188
- self.unload_model()
248
+ except Exception as e:
249
+ error_str = str(e).lower()
250
+ if "401" in error_str or "gated" in error_str or "authorization" in error_str:
251
+ auth_error_msg = (
252
+ f"AUTHENTICATION FAILED for model '{model_name}'. This is likely a 'gated' model on Hugging Face.\n"
253
+ "Please ensure you have accepted its license and provided a valid HF Access Token in the settings."
254
+ )
255
+ raise RuntimeError(auth_error_msg) from e
256
+ else:
257
+ raise e
189
258
 
190
- try:
191
- model_path = self._resolve_model_path(self.model_name)
192
- self.current_model_id_or_path = model_path
193
-
194
- load_args = {
195
- "torch_dtype": self.torch_dtype,
196
- "use_safetensors": self.config["use_safetensors"],
197
- "token": self.config["hf_token"],
198
- "local_files_only": self.config["local_files_only"],
199
- }
200
- if self.config["hf_variant"]:
201
- load_args["variant"] = self.config["hf_variant"]
202
-
203
- if not self.config["safety_checker_on"]:
204
- load_args["safety_checker"] = None
205
-
206
- if self.config.get("hf_cache_path"):
207
- load_args["cache_dir"] = str(self.config["hf_cache_path"])
259
+ self._set_scheduler()
260
+ self.pipeline.to(self.config["device"])
208
261
 
209
- self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **load_args)
210
-
211
- self._set_scheduler()
262
+ if self.config["enable_xformers"]:
263
+ try:
264
+ self.pipeline.enable_xformers_memory_efficient_attention()
265
+ except Exception as e:
266
+ ASCIIColors.warning(f"Could not enable xFormers: {e}.")
267
+
268
+ if self.config["enable_cpu_offload"] and self.config["device"] != "cpu":
269
+ self.pipeline.enable_model_cpu_offload()
270
+ elif self.config["enable_sequential_cpu_offload"] and self.config["device"] != "cpu":
271
+ self.pipeline.enable_sequential_cpu_offload()
272
+
273
+ self.is_loaded = True
274
+ ASCIIColors.green(f"Model '{model_name}' loaded successfully on '{self.config['device']}'.")
212
275
 
213
- self.pipeline.to(self.config["device"])
276
+ def _unload_pipeline(self):
277
+ if self.pipeline:
278
+ del self.pipeline
279
+ self.pipeline = None
280
+ if torch and torch.cuda.is_available():
281
+ torch.cuda.empty_cache()
282
+ self.is_loaded = False
283
+ ASCIIColors.info(f"Model '{self.config.get('model_name')}' unloaded.")
214
284
 
215
- if self.config["enable_xformers"]:
285
+ def _generation_worker(self):
286
+ while not self._stop_event.is_set():
287
+ try:
288
+ job = self.queue.get(timeout=1)
289
+ if job is None:
290
+ break
291
+ future, pipeline_args = job
216
292
  try:
217
- self.pipeline.enable_xformers_memory_efficient_attention()
218
- ASCIIColors.info("xFormers memory efficient attention enabled.")
293
+ with self.lock:
294
+ if not self.pipeline:
295
+ self._load_pipeline()
296
+ with torch.no_grad():
297
+ pipeline_output = self.pipeline(**pipeline_args)
298
+ pil_image: Image.Image = pipeline_output.images[0]
299
+ img_byte_arr = BytesIO()
300
+ pil_image.save(img_byte_arr, format="PNG")
301
+ future.set_result(img_byte_arr.getvalue())
219
302
  except Exception as e:
220
- ASCIIColors.warning(f"Could not enable xFormers: {e}. Proceeding without it.")
303
+ trace_exception(e)
304
+ future.set_exception(e)
305
+ finally:
306
+ self.queue.task_done()
307
+ except queue.Empty:
308
+ continue
309
+
310
+ def _download_civitai_model(self, model_key: str):
311
+ model_info = CIVITAI_MODELS[model_key]
312
+ url = model_info["url"]
313
+ filename = model_info["filename"]
314
+ dest_path = self.models_path / filename
315
+ temp_path = dest_path.with_suffix(".temp")
316
+
317
+ ASCIIColors.cyan(f"Downloading '{filename}' from Civitai...")
318
+ try:
319
+ with requests.get(url, stream=True) as r:
320
+ r.raise_for_status()
321
+ total_size = int(r.headers.get('content-length', 0))
322
+ with open(temp_path, 'wb') as f, tqdm(
323
+ total=total_size, unit='iB', unit_scale=True, desc=f"Downloading {filename}"
324
+ ) as bar:
325
+ for chunk in r.iter_content(chunk_size=8192):
326
+ f.write(chunk)
327
+ bar.update(len(chunk))
221
328
 
222
- if self.config["enable_cpu_offload"] and self.config["device"] != "cpu":
223
- self.pipeline.enable_model_cpu_offload()
224
- ASCIIColors.info("Model CPU offload enabled.")
225
- elif self.config["enable_sequential_cpu_offload"] and self.config["device"] != "cpu":
226
- self.pipeline.enable_sequential_cpu_offload()
227
- ASCIIColors.info("Sequential CPU offload enabled.")
228
-
229
- ASCIIColors.green(f"Diffusers model '{model_path}' loaded on device '{self.config['device']}'.")
230
-
329
+ shutil.move(temp_path, dest_path)
330
+ ASCIIColors.green(f"Model '{filename}' downloaded successfully.")
231
331
  except Exception as e:
232
- trace_exception(e)
233
- self.pipeline = None
234
- raise RuntimeError(f"Failed to load Diffusers model '{self.model_name}': {e}") from e
332
+ if temp_path.exists():
333
+ temp_path.unlink()
334
+ raise Exception(f"Failed to download model {filename}: {e}") from e
335
+
336
+ def _resolve_model_path(self, model_name: str) -> Union[str, Path]:
337
+ path_obj = Path(model_name)
338
+ if path_obj.is_absolute() and path_obj.exists():
339
+ return model_name
340
+
341
+ if model_name in CIVITAI_MODELS:
342
+ filename = CIVITAI_MODELS[model_name]["filename"]
343
+ local_path = self.models_path / filename
344
+ if not local_path.exists():
345
+ self._download_civitai_model(model_name)
346
+ return local_path
347
+
348
+ local_path = self.models_path / model_name
349
+ if local_path.exists():
350
+ return local_path
351
+
352
+ return model_name
235
353
 
236
354
  def _set_scheduler(self):
237
- """Sets the scheduler for the pipeline based on config."""
238
355
  if not self.pipeline: return
239
-
240
356
  scheduler_name_key = self.config["scheduler_name"].lower()
241
- if scheduler_name_key == "default":
242
- ASCIIColors.info(f"Using model's default scheduler: {self.pipeline.scheduler.__class__.__name__}")
243
- return
357
+ if scheduler_name_key == "default": return
244
358
 
245
359
  scheduler_class_name = SCHEDULER_MAPPING.get(scheduler_name_key)
246
360
  if scheduler_class_name:
@@ -249,25 +363,148 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
249
363
  scheduler_config = self.pipeline.scheduler.config
250
364
  scheduler_config["use_karras_sigmas"] = scheduler_name_key in SCHEDULER_USES_KARRAS_SIGMAS
251
365
  self.pipeline.scheduler = SchedulerClass.from_config(scheduler_config)
252
- ASCIIColors.info(f"Switched scheduler to {scheduler_name_key} ({scheduler_class_name}).")
253
366
  except Exception as e:
254
367
  ASCIIColors.warning(f"Could not switch scheduler to {scheduler_name_key}: {e}. Using current default.")
255
- else:
256
- ASCIIColors.warning(f"Unknown scheduler: '{self.config['scheduler_name']}'. Using model default.")
368
+
369
+ class PipelineRegistry:
370
+ _instance = None
371
+ _lock = threading.Lock()
372
+
373
+ def __new__(cls, *args, **kwargs):
374
+ with cls._lock:
375
+ if cls._instance is None:
376
+ cls._instance = super().__new__(cls)
377
+ cls._instance._managers = {}
378
+ cls._instance._registry_lock = threading.Lock()
379
+ return cls._instance
380
+
381
+ def _get_config_key(self, config: Dict[str, Any]) -> str:
382
+ critical_keys = [
383
+ "model_name", "device", "torch_dtype_str", "use_safetensors",
384
+ "safety_checker_on", "hf_variant", "enable_cpu_offload",
385
+ "enable_sequential_cpu_offload", "enable_xformers",
386
+ "local_files_only", "hf_cache_path"
387
+ ]
388
+ key_data = tuple(sorted((k, config.get(k)) for k in critical_keys))
389
+ return hashlib.sha256(str(key_data).encode('utf-8')).hexdigest()
390
+
391
+ def get_manager(self, config: Dict[str, Any], models_path: Path) -> ModelManager:
392
+ key = self._get_config_key(config)
393
+ with self._registry_lock:
394
+ if key not in self._managers:
395
+ self._managers[key] = ModelManager(config.copy(), models_path)
396
+ return self._managers[key].acquire()
397
+
398
+ def release_manager(self, config: Dict[str, Any]):
399
+ key = self._get_config_key(config)
400
+ with self._registry_lock:
401
+ if key in self._managers:
402
+ manager = self._managers[key]
403
+ ref_count = manager.release()
404
+ if ref_count == 0:
405
+ ASCIIColors.info(f"Reference count for model '{config.get('model_name')}' is zero. Cleaning up.")
406
+ manager.stop()
407
+ manager._unload_pipeline()
408
+ del self._managers[key]
409
+
410
+ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
411
+ DEFAULT_CONFIG = {
412
+ "model_name": "", "device": "auto", "torch_dtype_str": "auto", "use_safetensors": True,
413
+ "scheduler_name": "default", "safety_checker_on": True, "num_inference_steps": 25,
414
+ "guidance_scale": 7.0, "default_width": 512, "default_height": 512, "seed": -1,
415
+ "enable_cpu_offload": False, "enable_sequential_cpu_offload": False, "enable_xformers": False,
416
+ "hf_variant": None, "hf_token": None, "hf_cache_path": None, "local_files_only": False,
417
+ }
418
+
419
+ def __init__(self, **kwargs):
420
+ super().__init__(binding_name=BindingName)
421
+
422
+ if not DIFFUSERS_AVAILABLE:
423
+ raise ImportError(
424
+ "Diffusers or its dependencies not installed. "
425
+ "Please run: pip install torch torchvision diffusers Pillow transformers safetensors requests tqdm"
426
+ )
427
+
428
+ self.config = {**self.DEFAULT_CONFIG, **kwargs}
429
+ self.model_name = self.config.get("model_name", "")
430
+ self.models_path = Path(kwargs.get("models_path", Path(__file__).parent / "models"))
431
+ self.models_path.mkdir(parents=True, exist_ok=True)
432
+
433
+ self.registry = PipelineRegistry()
434
+ self.manager: Optional[ModelManager] = None
435
+
436
+ self._resolve_device_and_dtype()
437
+ if self.model_name:
438
+ self._acquire_manager()
439
+
440
+ def _acquire_manager(self):
441
+ if self.manager:
442
+ self.registry.release_manager(self.manager.config)
443
+ self.manager = self.registry.get_manager(self.config, self.models_path)
444
+ ASCIIColors.info(f"Binding instance acquired manager for '{self.config['model_name']}'.")
445
+
446
+ def _resolve_device_and_dtype(self):
447
+ if self.config["device"].lower() == "auto":
448
+ self.config["device"] = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
449
+
450
+ if self.config["torch_dtype_str"].lower() == "auto":
451
+ self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
452
+
453
+ def list_safetensor_models(self) -> List[str]:
454
+ if not self.models_path.exists(): return []
455
+ return sorted([f.name for f in self.models_path.iterdir() if f.is_file() and f.suffix == ".safetensors"])
456
+
457
+ def listModels(self) -> list:
458
+ # Start with hardcoded Civitai and Hugging Face models
459
+ civitai_list = [
460
+ {'model_name': key, 'display_name': info['display_name'], 'description': info['description'], 'owned_by': info['owned_by']}
461
+ for key, info in CIVITAI_MODELS.items()
462
+ ]
463
+ hf_default_list = [
464
+ # SDXL Models (1024x1024 native)
465
+ {'model_name': "stabilityai/stable-diffusion-xl-base-1.0", 'display_name': "Stable Diffusion XL 1.0", 'description': "Official SDXL base model from Stability AI. Native resolution is 1024x1024.", 'owned_by': 'HuggingFace'},
466
+ {'model_name': "playgroundai/playground-v2.5-1024px-aesthetic", 'display_name': "Playground v2.5", 'description': "Known for high aesthetic quality. Native resolution is 1024x1024.", 'owned_by': 'HuggingFace'},
467
+ # SD 1.5 Models (512x512 native)
468
+ {'model_name': "runwayml/stable-diffusion-v1-5", 'display_name': "Stable Diffusion 1.5", 'description': "A popular and versatile open-access text-to-image model.", 'owned_by': 'HuggingFace'},
469
+ {'model_name': "dataautogpt3/OpenDalleV1.1", 'display_name': "OpenDalle v1.1", 'description': "An open-source reproduction of DALL-E 3, good for prompt adherence.", 'owned_by': 'HuggingFace'},
470
+ {'model_name': "stabilityai/stable-diffusion-2-1-base", 'display_name': "Stable Diffusion 2.1 (512px)", 'description': "A 512x512 resolution model from Stability AI.", 'owned_by': 'HuggingFace'},
471
+ {'model_name': "CompVis/stable-diffusion-v1-4", 'display_name': "Stable Diffusion 1.4 (Gated)", 'description': "Original SD v1.4. Requires accepting license on Hugging Face and an HF token.", 'owned_by': 'HuggingFace'}
472
+ ]
473
+
474
+ # Discover local .safetensors files
475
+ custom_local_models = []
476
+ civitai_filenames = {info['filename'] for info in CIVITAI_MODELS.values()}
477
+ local_safetensors = self.list_safetensor_models()
478
+
479
+ for filename in local_safetensors:
480
+ if filename not in civitai_filenames:
481
+ custom_local_models.append({
482
+ 'model_name': filename,
483
+ 'display_name': filename,
484
+ 'description': 'Local safetensors file from your models folder.',
485
+ 'owned_by': 'local_user'
486
+ })
487
+
488
+ return civitai_list + hf_default_list + custom_local_models
489
+
490
+ def load_model(self):
491
+ ASCIIColors.info("load_model() called. Loading is now automatic.")
492
+ if self.model_name and not self.manager:
493
+ self._acquire_manager()
257
494
 
258
495
  def unload_model(self):
259
- if self.pipeline is not None:
260
- del self.pipeline
261
- self.pipeline = None
262
- if torch and torch.cuda.is_available():
263
- torch.cuda.empty_cache()
264
- ASCIIColors.info("Diffusers pipeline unloaded.")
496
+ if self.manager:
497
+ ASCIIColors.info(f"Binding instance releasing manager for '{self.manager.config['model_name']}'.")
498
+ self.registry.release_manager(self.manager.config)
499
+ self.manager = None
265
500
 
266
501
  def generate_image(self, prompt: str, negative_prompt: str = "", width: int = None, height: int = None, **kwargs) -> bytes:
267
- """Generates an image using the loaded Diffusers pipeline."""
268
- if not self.pipeline:
269
- raise RuntimeError("Diffusers pipeline is not loaded. Cannot generate image.")
270
-
502
+ if not self.model_name:
503
+ raise RuntimeError("No model_name configured. Please select a model in settings.")
504
+
505
+ if not self.manager:
506
+ self._acquire_manager()
507
+
271
508
  _width = width or self.config["default_width"]
272
509
  _height = height or self.config["default_height"]
273
510
  _num_inference_steps = kwargs.get("num_inference_steps", self.config["num_inference_steps"])
@@ -277,179 +514,137 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
277
514
  generator = torch.Generator(device=self.config["device"]).manual_seed(_seed) if _seed != -1 else None
278
515
 
279
516
  pipeline_args = {
280
- "prompt": prompt,
281
- "negative_prompt": negative_prompt or None,
282
- "width": _width,
283
- "height": _height,
284
- "num_inference_steps": _num_inference_steps,
285
- "guidance_scale": _guidance_scale,
286
- "generator": generator,
517
+ "prompt": prompt, "negative_prompt": negative_prompt or None, "width": _width,
518
+ "height": _height, "num_inference_steps": _num_inference_steps,
519
+ "guidance_scale": _guidance_scale, "generator": generator,
287
520
  }
288
- ASCIIColors.info(f"Generating image with prompt: '{prompt[:100]}...'")
289
-
521
+
522
+ future = Future()
523
+ self.manager.queue.put((future, pipeline_args))
524
+ ASCIIColors.info(f"Job for prompt '{prompt[:50]}...' queued. Waiting...")
525
+
290
526
  try:
291
- with torch.no_grad():
292
- pipeline_output = self.pipeline(**pipeline_args)
293
-
294
- pil_image: Image.Image = pipeline_output.images[0]
295
- img_byte_arr = BytesIO()
296
- pil_image.save(img_byte_arr, format="PNG")
297
-
527
+ image_bytes = future.result()
298
528
  ASCIIColors.green("Image generated successfully.")
299
- return img_byte_arr.getvalue()
300
-
529
+ return image_bytes
301
530
  except Exception as e:
302
- trace_exception(e)
303
- raise Exception(f"Diffusers image generation failed: {e}") from e
531
+ raise Exception(f"Image generation failed: {e}") from e
304
532
 
305
- def list_models(self) -> List[str]:
306
- """Lists available local models from the models_path."""
307
- if not self.models_path.exists():
308
- return []
533
+ def list_local_models(self) -> List[str]:
534
+ if not self.models_path.exists(): return []
535
+
536
+ folders = [
537
+ d.name for d in self.models_path.iterdir()
538
+ if d.is_dir() and ((d / "model_index.json").exists() or (d / "unet" / "config.json").exists())
539
+ ]
540
+ safetensors = self.list_safetensor_models()
541
+ return sorted(folders + safetensors)
542
+
543
+ def list_available_models(self) -> List[str]:
544
+ discoverable_models = [m['model_name'] for m in self.listModels()]
545
+ local_models = self.list_local_models()
309
546
 
310
- models = []
311
- for model_dir in self.models_path.iterdir():
312
- if model_dir.is_dir():
313
- # Check for key files indicating a valid diffusers model directory
314
- if (model_dir / "model_index.json").exists() or (model_dir / "unet" / "config.json").exists():
315
- models.append(model_dir.name)
316
- return sorted(models)
547
+ combined_list = sorted(list(set(local_models + discoverable_models)))
548
+ return combined_list
317
549
 
318
550
  def list_services(self, **kwargs) -> List[Dict[str, str]]:
319
- """Lists available local models from the models_path."""
320
- models = self.list_models()
551
+ models = self.list_available_models()
552
+ local_models = self.list_local_models()
553
+
321
554
  if not models:
322
- return [{
323
- "name": "diffusers_no_local_models",
324
- "caption": "No local Diffusers models found",
325
- "help": f"Place Diffusers model folders inside '{self.models_path.resolve()}' or specify a Hugging Face model ID in settings to download one."
326
- }]
327
-
328
- return [{
329
- "name": model_name,
330
- "caption": f"Diffusers: {model_name}",
331
- "help": f"Local Diffusers model from: {self.models_path.resolve()}"
332
- } for model_name in models]
555
+ return [{"name": "diffusers_no_models", "caption": "No models found", "help": f"Place models in '{self.models_path.resolve()}'."}]
556
+
557
+ services = []
558
+ for m in models:
559
+ help_text = "Hugging Face model ID"
560
+ if m in local_models:
561
+ help_text = f"Local model from: {self.models_path.resolve()}"
562
+ elif m in CIVITAI_MODELS:
563
+ filename = CIVITAI_MODELS[m]['filename']
564
+ help_text = f"Civitai model (downloads as {filename})"
565
+
566
+ services.append({"name": m, "caption": f"Diffusers: {m}", "help": help_text})
567
+ return services
333
568
 
334
569
  def get_settings(self, **kwargs) -> List[Dict[str, Any]]:
335
- """Retrieves the current configurable settings for the binding."""
336
- local_models = self.list_models()
570
+ available_models = self.list_available_models()
337
571
  return [
338
- {"name": "model_name", "type": "str", "value": self.model_name, "description": "Hugging Face model ID or a local model name from the models folder.", "options": local_models},
339
- {"name": "device", "type": "str", "value": self.config["device"], "description": f"Device for inference. Current resolved: {self.config['device']}", "options": ["auto", "cuda", "mps", "cpu"]},
340
- {"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype. Current resolved: {self.config['torch_dtype_str']}", "options": ["auto", "float16", "bfloat16", "float32"]},
341
- {"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "Model variant from HF (e.g., 'fp16', 'bf16'). Optional."},
342
- {"name": "use_safetensors", "type": "bool", "value": self.config["use_safetensors"], "description": "Prefer loading models from .safetensors files."},
572
+ {"name": "model_name", "type": "str", "value": self.model_name, "description": "Local, Civitai, or Hugging Face model.", "options": available_models},
573
+ {"name": "device", "type": "str", "value": self.config["device"], "description": f"Inference device. Resolved: {self.config['device']}", "options": ["auto", "cuda", "mps", "cpu"]},
574
+ {"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype. Resolved: {self.config['torch_dtype_str']}", "options": ["auto", "float16", "bfloat16", "float32"]},
575
+ {"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "HF model variant (e.g., 'fp16')."},
576
+ {"name": "use_safetensors", "type": "bool", "value": self.config["use_safetensors"], "description": "Prefer .safetensors when loading from Hugging Face."},
343
577
  {"name": "scheduler_name", "type": "str", "value": self.config["scheduler_name"], "description": "Scheduler for diffusion.", "options": list(SCHEDULER_MAPPING.keys())},
344
- {"name": "safety_checker_on", "type": "bool", "value": self.config["safety_checker_on"], "description": "Enable the safety checker (if model has one)."},
578
+ {"name": "safety_checker_on", "type": "bool", "value": self.config["safety_checker_on"], "description": "Enable the safety checker."},
345
579
  {"name": "enable_cpu_offload", "type": "bool", "value": self.config["enable_cpu_offload"], "description": "Enable model CPU offload (saves VRAM, slower)."},
346
580
  {"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload (more VRAM savings, much slower)."},
347
581
  {"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention."},
348
- {"name": "default_width", "type": "int", "value": self.config["default_width"], "description": "Default width for generated images."},
349
- {"name": "default_height", "type": "int", "value": self.config["default_height"], "description": "Default height for generated images."},
350
- {"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default number of inference steps."},
582
+ {"name": "default_width", "type": "int", "value": self.config["default_width"], "description": "Default image width. Note: SDXL models prefer 1024."},
583
+ {"name": "default_height", "type": "int", "value": self.config["default_height"], "description": "Default image height. Note: SDXL models prefer 1024."},
584
+ {"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default inference steps."},
351
585
  {"name": "guidance_scale", "type": "float", "value": self.config["guidance_scale"], "description": "Default guidance scale (CFG)."},
352
- {"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed for generation (-1 for random)."},
353
- {"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "Hugging Face API token (for private models).", "is_secret": True},
354
- {"name": "hf_cache_path", "type": "str", "value": self.config["hf_cache_path"], "description": "Path to Hugging Face cache. Defaults to ~/.cache/huggingface."},
355
- {"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Only use local files, do not download."},
586
+ {"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed (-1 for random)."},
587
+ {"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "HF API token (for private/gated models).", "is_secret": True},
588
+ {"name": "hf_cache_path", "type": "str", "value": self.config["hf_cache_path"], "description": "Path to HF cache."},
589
+ {"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Do not download from Hugging Face."},
356
590
  ]
357
591
 
358
592
  def set_settings(self, settings: Union[Dict[str, Any], List[Dict[str, Any]]], **kwargs) -> bool:
359
- """Applies new settings to the binding. Some may trigger a model reload."""
360
593
  parsed_settings = settings if isinstance(settings, dict) else \
361
594
  {item["name"]: item["value"] for item in settings if "name" in item and "value" in item}
362
595
 
363
- needs_reload = False
364
- critical_keys = ["model_name", "device", "torch_dtype_str", "use_safetensors",
365
- "safety_checker_on", "hf_variant", "enable_cpu_offload",
366
- "enable_sequential_cpu_offload", "enable_xformers", "hf_token",
367
- "local_files_only", "hf_cache_path"]
596
+ critical_keys = self.registry._get_config_key({}).__self__.critical_keys
597
+ needs_manager_swap = False
368
598
 
369
599
  for key, value in parsed_settings.items():
370
- current_value = getattr(self, key, self.config.get(key))
371
- if current_value != value:
600
+ if self.config.get(key) != value:
372
601
  ASCIIColors.info(f"Setting '{key}' changed to: {value}")
373
- if key == "model_name":
374
- self.model_name = value
375
602
  self.config[key] = value
376
- if key in critical_keys:
377
- needs_reload = True
378
- elif key == "scheduler_name" and self.pipeline:
379
- self._set_scheduler()
603
+ if key == "model_name": self.model_name = value
604
+ if key in critical_keys: needs_manager_swap = True
605
+
606
+ if needs_manager_swap and self.model_name:
607
+ ASCIIColors.info("Critical settings changed. Swapping model manager...")
608
+ self._resolve_device_and_dtype()
609
+ self._acquire_manager()
610
+
611
+ if not needs_manager_swap and self.manager:
612
+ self.manager.config.update(parsed_settings)
613
+ if 'scheduler_name' in parsed_settings and self.manager.pipeline:
614
+ with self.manager.lock:
615
+ self.manager._set_scheduler()
380
616
 
381
- if needs_reload and self.model_name:
382
- ASCIIColors.info("Reloading model due to settings changes...")
383
- try:
384
- self._resolve_device_and_dtype()
385
- self.load_model()
386
- ASCIIColors.green("Model reloaded successfully.")
387
- except Exception as e:
388
- trace_exception(e)
389
- ASCIIColors.error(f"Failed to reload model with new settings: {e}. Binding may be unstable.")
390
- return False
391
617
  return True
392
618
 
393
619
  def __del__(self):
394
620
  self.unload_model()
395
621
 
396
- # Example Usage (for testing within this file)
622
+ # Example Usage
397
623
  if __name__ == '__main__':
398
624
  ASCIIColors.magenta("--- Diffusers TTI Binding Test ---")
399
625
 
400
626
  if not DIFFUSERS_AVAILABLE:
401
- ASCIIColors.error("Diffusers or its dependencies are not available. Cannot run test.")
627
+ ASCIIColors.error("Diffusers not available. Cannot run test.")
402
628
  exit(1)
403
629
 
404
630
  temp_paths_dir = Path(__file__).parent / "temp_lollms_paths_diffusers"
405
631
  temp_models_path = temp_paths_dir / "models"
406
- temp_cache_path = temp_paths_dir / "shared_cache"
407
632
 
408
- # Clean up previous runs
409
- if temp_paths_dir.exists():
410
- shutil.rmtree(temp_paths_dir)
633
+ if temp_paths_dir.exists(): shutil.rmtree(temp_paths_dir)
411
634
  temp_models_path.mkdir(parents=True, exist_ok=True)
412
- temp_cache_path.mkdir(parents=True, exist_ok=True)
413
635
 
414
- # A very small, fast model for testing from Hugging Face.
415
- test_model_id = "hf-internal-testing/tiny-stable-diffusion-torch"
416
-
417
636
  try:
418
- ASCIIColors.cyan("\n1. Initializing binding without a model...")
419
- binding = DiffusersTTIBinding_Impl(
420
- models_path=str(temp_models_path),
421
- hf_cache_path=str(temp_cache_path)
422
- )
423
- assert binding.pipeline is None, "Pipeline should not be loaded initially."
424
- ASCIIColors.green("Initialization successful.")
425
-
426
- ASCIIColors.cyan("\n2. Listing services (should be empty)...")
427
- services = binding.list_services()
428
- ASCIIColors.info(json.dumps(services, indent=2))
429
- assert services[0]["name"] == "diffusers_no_local_models"
430
-
431
- ASCIIColors.cyan(f"\n3. Setting model_name to '{test_model_id}' to trigger load...")
432
- binding.set_settings({"model_name": test_model_id})
433
- assert binding.model_name == test_model_id
434
- assert binding.pipeline is not None, "Pipeline should be loaded after setting model_name."
435
- ASCIIColors.green("Model loaded successfully.")
436
-
437
- ASCIIColors.cyan("\n4. Generating an image...")
438
- image_bytes = binding.generate_image(
439
- prompt="A tiny robot",
440
- width=64, height=64,
441
- num_inference_steps=2
442
- )
443
- assert image_bytes and isinstance(image_bytes, bytes)
444
- ASCIIColors.green(f"Image generated (size: {len(image_bytes)} bytes).")
445
- test_image_path = Path(__file__).parent / "test_diffusers_image.png"
446
- with open(test_image_path, "wb") as f:
447
- f.write(image_bytes)
448
- ASCIIColors.info(f"Test image saved to: {test_image_path.resolve()}")
449
-
450
- ASCIIColors.cyan("\n5. Unloading model...")
451
- binding.unload_model()
452
- assert binding.pipeline is None, "Pipeline should be None after unload."
637
+ ASCIIColors.cyan("\n--- Test: Loading a Hugging Face model ---")
638
+ # Using a very small model for fast testing
639
+ binding_config = {"models_path": str(temp_models_path), "model_name": "hf-internal-testing/tiny-stable-diffusion-torch"}
640
+ binding = DiffusersTTIBinding_Impl(**binding_config)
641
+
642
+ img_bytes = binding.generate_image("a tiny robot", width=64, height=64, num_inference_steps=2)
643
+ assert len(img_bytes) > 1000, "Image generation from HF model should succeed."
644
+ ASCIIColors.green("HF model loading and generation successful.")
645
+
646
+ del binding
647
+ time.sleep(0.1)
453
648
 
454
649
  except Exception as e:
455
650
  trace_exception(e)
@@ -458,23 +653,4 @@ if __name__ == '__main__':
458
653
  ASCIIColors.cyan("\nCleaning up temporary directories...")
459
654
  if temp_paths_dir.exists():
460
655
  shutil.rmtree(temp_paths_dir)
461
- ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
462
-
463
- def listModels(self) -> list:
464
- """Lists models"""
465
- # TODO: use the models from the folder if set
466
- formatted_models=[
467
- {
468
- 'model_name': "dummy model 1",
469
- 'display_name': "Test dummy model 1",
470
- 'description': "A test dummy model",
471
- 'owned_by': 'parisneo'
472
- },
473
- {
474
- 'model_name': "dummy model 2",
475
- 'display_name': "Test dummy model 2",
476
- 'description': "A test dummy model",
477
- 'owned_by': 'parisneo'
478
- }
479
- ]
480
- return formatted_models
656
+ ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.1.0
3
+ Version: 1.1.2
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -1,9 +1,9 @@
1
- lollms_client/__init__.py,sha256=FmmUQyLm9hgGyCc71AtEoRRxx-YfI8Gl_4H7k3VRFpI,1146
1
+ lollms_client/__init__.py,sha256=RkXIPHds2g_ttWbvQMSCbSKe3_0WIlk4gSu3Ix5VTjc,1146
2
2
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
3
3
  lollms_client/lollms_core.py,sha256=zqaxEJJDXiwpDd3E-PFDLJOnmG1d9cOiL_CrYRUa7Z0,167361
4
4
  lollms_client/lollms_discussion.py,sha256=wkadV6qiegxOzukMVn5vukdeJivnlyygSzZBkzOi9Gc,106714
5
5
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
6
- lollms_client/lollms_llm_binding.py,sha256=EJGIKb_a3sVEia5zxs_EcOl21xmfJV8JzyY3sfymSBw,24984
6
+ lollms_client/lollms_llm_binding.py,sha256=5-Vknm0YILPd6ZiwZynsXMfns__Yd_1tDDc2fciRiiA,25020
7
7
  lollms_client/lollms_mcp_binding.py,sha256=psb27A23VFWDfZsR2WUbQXQxiZDW5yfOak6ZtbMfszI,10222
8
8
  lollms_client/lollms_mcp_security.py,sha256=FhVTDhSBjksGEZnopVnjFmEF5dv7D8bBTqoaj4BiF0E,3562
9
9
  lollms_client/lollms_personality.py,sha256=O-9nqZhazcITOkxjT24ENTxTmIoZLgqIsQ9WtWs0Id0,8719
@@ -27,7 +27,7 @@ lollms_client/llm_bindings/llamacpp/__init__.py,sha256=4CbNYpfquVEgfsxuLsxQta_dZ
27
27
  lollms_client/llm_bindings/lollms/__init__.py,sha256=a4gNH4axiDgsri8NGAcq0OitgYdnzBDLNkzUMhkFArA,24781
28
28
  lollms_client/llm_bindings/lollms_webui/__init__.py,sha256=iuDfhZZoLC-PDEPLHrcjk5-962S5c7OeCI7PMdJxI_A,17753
29
29
  lollms_client/llm_bindings/mistral/__init__.py,sha256=cddz9xIj8NRFLKHe2JMxzstpUrNIu5s9juci3mhiHfo,14133
30
- lollms_client/llm_bindings/ollama/__init__.py,sha256=d61pSEWlJ2KOvnaztji2wblvadu0oTelEJeHG4IcL9I,41193
30
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=3Jyb9Yyp6lcz52HKF8nEiB4s3ChKBCjs7I27ohNOT4A,41216
31
31
  lollms_client/llm_bindings/open_router/__init__.py,sha256=cAFWtCWJx0WjIe1w2JReCf6WlAZjrXYA4jZ8l3zqxMs,14915
32
32
  lollms_client/llm_bindings/openai/__init__.py,sha256=J8v7XU9TrvXJd1ffwhYkya5YeXxWnNiFuNBAwRfoHDk,26066
33
33
  lollms_client/llm_bindings/openllm/__init__.py,sha256=RC9dVeopslS-zXTsSJ7VC4iVsKgZCBwfmccmr_LCHA0,29971
@@ -48,7 +48,7 @@ lollms_client/stt_bindings/whisper/__init__.py,sha256=1Ej67GdRKBy1bba14jMaYDYHiZ
48
48
  lollms_client/stt_bindings/whispercpp/__init__.py,sha256=xSAQRjAhljak3vWCpkP0Vmdb6WmwTzPjXyaIB85KLGU,21439
49
49
  lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  lollms_client/tti_bindings/dalle/__init__.py,sha256=1nE36XamKEJOMpm6QUow8OyM1KdpejCLM0KUSXlcePo,24135
51
- lollms_client/tti_bindings/diffusers/__init__.py,sha256=OQOHE1WtB4TamsWd17EEL4NOdxIIonDqDMS_xaONUSI,23510
51
+ lollms_client/tti_bindings/diffusers/__init__.py,sha256=weqBwPgdcuoPQB5ZsSdpHbKTxy4M_x5WO-Q6s-HY5gY,32304
52
52
  lollms_client/tti_bindings/gemini/__init__.py,sha256=f9fPuqnrBZ1Z-obcoP6EVvbEXNbNCSg21cd5efLCk8U,16707
53
53
  lollms_client/tti_bindings/lollms/__init__.py,sha256=5Tnsn4b17djvieQkcjtIDBm3qf0pg5ZWWov-4_2wmo0,8762
54
54
  lollms_client/ttm_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -62,8 +62,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
62
62
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
63
63
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
64
64
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
65
- lollms_client-1.1.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
66
- lollms_client-1.1.0.dist-info/METADATA,sha256=_frP9CKjj2dGhIX57fY7NLo9z0-Bfc449rznQMOUSGs,58549
67
- lollms_client-1.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
- lollms_client-1.1.0.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
69
- lollms_client-1.1.0.dist-info/RECORD,,
65
+ lollms_client-1.1.2.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
66
+ lollms_client-1.1.2.dist-info/METADATA,sha256=wPUzIZIA8FStsJk7Hp9Tv-4WZYMZV1IVG-kXPJlOkCs,58549
67
+ lollms_client-1.1.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
+ lollms_client-1.1.2.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
69
+ lollms_client-1.1.2.dist-info/RECORD,,