lollms-client 1.1.3__py3-none-any.whl → 1.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -2,144 +2,137 @@
2
2
  import os
3
3
  import importlib
4
4
  from io import BytesIO
5
- from typing import Optional, List, Dict, Any, Union
5
+ from typing import Optional, List, Dict, Any, Union, Tuple
6
6
  from pathlib import Path
7
+ import base64
7
8
  import pipmaster as pm
8
- # --- Concurrency Imports ---
9
9
  import threading
10
10
  import queue
11
11
  from concurrent.futures import Future
12
12
  import time
13
13
  import hashlib
14
- import re
15
- # -------------------------
16
- # --- Download Imports ---
17
14
  import requests
18
15
  from tqdm import tqdm
19
- # --------------------
16
+ import json
17
+ import shutil
18
+ from lollms_client.lollms_tti_binding import LollmsTTIBinding
19
+ from ascii_colors import trace_exception, ASCIIColors
20
20
 
21
21
  pm.ensure_packages(["torch","torchvision"],index_url="https://download.pytorch.org/whl/cu126")
22
- pm.ensure_packages(["diffusers","pillow","transformers","safetensors", "requests", "tqdm"])
22
+ pm.ensure_packages(["diffusers","pillow","transformers","safetensors","requests","tqdm"])
23
23
 
24
- # Attempt to import core dependencies and set availability flag
25
24
  try:
26
25
  import torch
27
- from diffusers import AutoPipelineForText2Image, DiffusionPipeline, StableDiffusionPipeline
26
+ from diffusers import (
27
+ AutoPipelineForText2Image,
28
+ AutoPipelineForImage2Image,
29
+ AutoPipelineForInpainting,
30
+ DiffusionPipeline,
31
+ StableDiffusionPipeline
32
+ )
28
33
  from diffusers.utils import load_image
29
34
  from PIL import Image
30
35
  DIFFUSERS_AVAILABLE = True
31
36
  except ImportError:
32
37
  torch = None
33
38
  AutoPipelineForText2Image = None
39
+ AutoPipelineForImage2Image = None
40
+ AutoPipelineForInpainting = None
34
41
  DiffusionPipeline = None
35
42
  StableDiffusionPipeline = None
36
43
  Image = None
37
44
  load_image = None
38
45
  DIFFUSERS_AVAILABLE = False
39
46
 
40
- from lollms_client.lollms_tti_binding import LollmsTTIBinding
41
- from ascii_colors import trace_exception, ASCIIColors
42
- import json
43
- import shutil
44
-
45
- # Defines the binding name for the manager
46
47
  BindingName = "DiffusersTTIBinding_Impl"
47
48
 
48
- # --- START: Civitai Model Definitions ---
49
- # Expanded list of popular Civitai models (as single .safetensors files)
50
49
  CIVITAI_MODELS = {
51
- # --- Photorealistic ---
52
50
  "realistic-vision-v6": {
53
51
  "display_name": "Realistic Vision V6.0",
54
52
  "url": "https://civitai.com/api/download/models/130072",
55
53
  "filename": "realisticVisionV60_v60B1.safetensors",
56
- "description": "One of the most popular photorealistic models.",
54
+ "description": "Photorealistic SD1.5 checkpoint.",
57
55
  "owned_by": "civitai"
58
56
  },
59
57
  "absolute-reality": {
60
58
  "display_name": "Absolute Reality",
61
59
  "url": "https://civitai.com/api/download/models/132760",
62
60
  "filename": "absolutereality_v181.safetensors",
63
- "description": "A top-tier model for generating realistic images.",
61
+ "description": "General realistic SD1.5.",
64
62
  "owned_by": "civitai"
65
63
  },
66
- # --- General / Artistic ---
67
64
  "dreamshaper-8": {
68
65
  "display_name": "DreamShaper 8",
69
66
  "url": "https://civitai.com/api/download/models/128713",
70
67
  "filename": "dreamshaper_8.safetensors",
71
- "description": "A very popular and versatile general-purpose model.",
68
+ "description": "Versatile SD1.5 style model.",
72
69
  "owned_by": "civitai"
73
70
  },
74
71
  "juggernaut-xl": {
75
72
  "display_name": "Juggernaut XL",
76
73
  "url": "https://civitai.com/api/download/models/133005",
77
74
  "filename": "juggernautXL_version6Rundiffusion.safetensors",
78
- "description": "High-quality artistic model, great for cinematic styles (SDXL-based).",
75
+ "description": "Artistic SDXL.",
79
76
  "owned_by": "civitai"
80
77
  },
81
78
  "lyriel-v1.6": {
82
79
  "display_name": "Lyriel v1.6",
83
80
  "url": "https://civitai.com/api/download/models/92407",
84
81
  "filename": "lyriel_v16.safetensors",
85
- "description": "A popular artistic model for fantasy and stylized images.",
82
+ "description": "Fantasy/stylized SD1.5.",
86
83
  "owned_by": "civitai"
87
84
  },
88
- # --- Anime / Illustration ---
89
85
  "anything-v5": {
90
86
  "display_name": "Anything V5",
91
87
  "url": "https://civitai.com/api/download/models/9409",
92
88
  "filename": "anythingV5_PrtRE.safetensors",
93
- "description": "A classic and highly popular model for anime-style generation.",
89
+ "description": "Anime SD1.5.",
94
90
  "owned_by": "civitai"
95
91
  },
96
92
  "meinamix": {
97
93
  "display_name": "MeinaMix",
98
94
  "url": "https://civitai.com/api/download/models/119057",
99
95
  "filename": "meinamix_meinaV11.safetensors",
100
- "description": "A highly popular model for generating illustrative and vibrant anime-style images.",
96
+ "description": "Anime/illustration SD1.5.",
101
97
  "owned_by": "civitai"
102
98
  },
103
- # --- Game Assets & Specialized Styles ---
104
99
  "rpg-v5": {
105
100
  "display_name": "RPG v5",
106
101
  "url": "https://civitai.com/api/download/models/137379",
107
102
  "filename": "rpg_v5.safetensors",
108
- "description": "Specialized in generating fantasy characters and assets in the style of classic RPGs.",
103
+ "description": "RPG assets SD1.5.",
109
104
  "owned_by": "civitai"
110
105
  },
111
106
  "pixel-art-xl": {
112
107
  "display_name": "Pixel Art XL",
113
108
  "url": "https://civitai.com/api/download/models/252919",
114
109
  "filename": "pixelartxl_v11.safetensors",
115
- "description": "A dedicated SDXL model for generating high-quality pixel art sprites and scenes.",
110
+ "description": "Pixel art SDXL.",
116
111
  "owned_by": "civitai"
117
112
  },
118
113
  "lowpoly-world": {
119
114
  "display_name": "Lowpoly World",
120
115
  "url": "https://civitai.com/api/download/models/90299",
121
116
  "filename": "lowpoly_world_v10.safetensors",
122
- "description": "Generates assets and scenes with a stylized low-polygon, 3D render aesthetic.",
117
+ "description": "Lowpoly style SD1.5.",
123
118
  "owned_by": "civitai"
124
119
  },
125
120
  "toonyou": {
126
121
  "display_name": "ToonYou",
127
122
  "url": "https://civitai.com/api/download/models/152361",
128
123
  "filename": "toonyou_beta6.safetensors",
129
- "description": "Excellent for creating expressive, high-quality cartoon and Disney-style characters.",
124
+ "description": "Cartoon/Disney SD1.5.",
130
125
  "owned_by": "civitai"
131
126
  },
132
127
  "papercut": {
133
128
  "display_name": "Papercut",
134
129
  "url": "https://civitai.com/api/download/models/45579",
135
130
  "filename": "papercut_v1.safetensors",
136
- "description": "Creates unique images with a distinct paper cutout and layered diorama style.",
131
+ "description": "Paper cutout SD1.5.",
137
132
  "owned_by": "civitai"
138
133
  }
139
134
  }
140
- # --- END: Civitai Model Definitions ---
141
135
 
142
- # Helper for torch.dtype string conversion
143
136
  TORCH_DTYPE_MAP_STR_TO_OBJ = {
144
137
  "float16": getattr(torch, 'float16', 'float16'),
145
138
  "bfloat16": getattr(torch, 'bfloat16', 'bfloat16'),
@@ -150,7 +143,6 @@ TORCH_DTYPE_MAP_OBJ_TO_STR = {v: k for k, v in TORCH_DTYPE_MAP_STR_TO_OBJ.items(
150
143
  if torch:
151
144
  TORCH_DTYPE_MAP_OBJ_TO_STR[None] = "None"
152
145
 
153
- # Common Schedulers mapping
154
146
  SCHEDULER_MAPPING = {
155
147
  "default": None,
156
148
  "ddim": "DDIMScheduler",
@@ -159,10 +151,10 @@ SCHEDULER_MAPPING = {
159
151
  "dpm_multistep": "DPMSolverMultistepScheduler",
160
152
  "dpm_multistep_karras": "DPMSolverMultistepScheduler",
161
153
  "dpm_single": "DPMSolverSinglestepScheduler",
162
- "dpm_adaptive": "DPMSolverPlusPlusScheduler", # Retained; no direct Diffusers equivalent confirmed, may require custom config
154
+ "dpm_adaptive": "DPMSolverPlusPlusScheduler",
163
155
  "dpm++_2m": "DPMSolverMultistepScheduler",
164
156
  "dpm++_2m_karras": "DPMSolverMultistepScheduler",
165
- "dpm++_2s_ancestral": "DPMSolverAncestralDiscreteScheduler", # Retained; consider "KDPM2AncestralDiscreteScheduler" as alternative if class unavailable
157
+ "dpm++_2s_ancestral": "DPMSolverAncestralDiscreteScheduler",
166
158
  "dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler",
167
159
  "dpm++_sde": "DPMSolverSDEScheduler",
168
160
  "dpm++_sde_karras": "DPMSolverSDEScheduler",
@@ -174,7 +166,6 @@ SCHEDULER_MAPPING = {
174
166
  "lms_karras": "LMSDiscreteScheduler",
175
167
  "pndm": "PNDMScheduler",
176
168
  "unipc_multistep": "UniPCMultistepScheduler",
177
- # Additions
178
169
  "dpm++_2m_sde": "DPMSolverMultistepScheduler",
179
170
  "dpm++_2m_sde_karras": "DPMSolverMultistepScheduler",
180
171
  "dpm2": "KDPM2DiscreteScheduler",
@@ -184,42 +175,32 @@ SCHEDULER_MAPPING = {
184
175
  "euler": "EulerDiscreteScheduler",
185
176
  "euler_a": "EulerAncestralDiscreteScheduler",
186
177
  "heun": "HeunDiscreteScheduler",
187
- "lms": "LMSDiscreteScheduler",
178
+ "lms": "LMSDiscreteScheduler"
188
179
  }
189
180
  SCHEDULER_USES_KARRAS_SIGMAS = [
190
- "dpm_multistep_karras", "dpm++_2m_karras", "dpm++_2s_ancestral_karras",
191
- "dpm++_sde_karras", "heun_karras", "lms_karras",
192
- # Additions
193
- "dpm++_2m_sde_karras", "dpm2_karras", "dpm2_a_karras",
181
+ "dpm_multistep_karras","dpm++_2m_karras","dpm++_2s_ancestral_karras",
182
+ "dpm++_sde_karras","heun_karras","lms_karras",
183
+ "dpm++_2m_sde_karras","dpm2_karras","dpm2_a_karras"
194
184
  ]
195
185
 
196
- # --- START: Concurrency and Singleton Management ---
197
-
198
186
  class ModelManager:
199
- """
200
- Manages a single pipeline instance, its generation queue, a worker thread,
201
- and an optional auto-unload timer.
202
- """
203
187
  def __init__(self, config: Dict[str, Any], models_path: Path):
204
188
  self.config = config
205
189
  self.models_path = models_path
206
190
  self.pipeline: Optional[DiffusionPipeline] = None
191
+ self.current_task: Optional[str] = None
207
192
  self.ref_count = 0
208
193
  self.lock = threading.Lock()
209
194
  self.queue = queue.Queue()
210
195
  self.is_loaded = False
211
196
  self.last_used_time = time.time()
212
-
213
- # --- Worker and Monitor Threads ---
214
197
  self._stop_event = threading.Event()
215
198
  self.worker_thread = threading.Thread(target=self._generation_worker, daemon=True)
216
199
  self.worker_thread.start()
217
-
218
200
  self._stop_monitor_event = threading.Event()
219
201
  self._unload_monitor_thread = None
220
202
  self._start_unload_monitor()
221
203
 
222
-
223
204
  def acquire(self):
224
205
  with self.lock:
225
206
  self.ref_count += 1
@@ -235,7 +216,7 @@ class ModelManager:
235
216
  if self._unload_monitor_thread:
236
217
  self._stop_monitor_event.set()
237
218
  self._unload_monitor_thread.join(timeout=2)
238
- self.queue.put(None) # Sentinel to unblock queue.get()
219
+ self.queue.put(None)
239
220
  self.worker_thread.join(timeout=5)
240
221
 
241
222
  def _start_unload_monitor(self):
@@ -247,92 +228,141 @@ class ModelManager:
247
228
 
248
229
  def _unload_monitor(self):
249
230
  unload_after = self.config.get("unload_inactive_model_after", 0)
250
- if unload_after <= 0: return
251
-
231
+ if unload_after <= 0:
232
+ return
252
233
  ASCIIColors.info(f"Starting inactivity monitor for '{self.config['model_name']}' (timeout: {unload_after}s).")
253
- while not self._stop_monitor_event.wait(timeout=5.0): # Check every 5 seconds
234
+ while not self._stop_monitor_event.wait(timeout=5.0):
254
235
  with self.lock:
255
236
  if not self.is_loaded:
256
237
  continue
257
-
258
238
  if time.time() - self.last_used_time > unload_after:
259
239
  ASCIIColors.info(f"Model '{self.config['model_name']}' has been inactive. Unloading.")
260
240
  self._unload_pipeline()
261
241
 
262
- def _load_pipeline(self):
263
- # This method assumes a lock is already held
264
- if self.pipeline:
242
+ def _resolve_model_path(self, model_name: str) -> Union[str, Path]:
243
+ path_obj = Path(model_name)
244
+ if path_obj.is_absolute() and path_obj.exists():
245
+ return model_name
246
+ if model_name in CIVITAI_MODELS:
247
+ filename = CIVITAI_MODELS[model_name]["filename"]
248
+ local_path = self.models_path / filename
249
+ if not local_path.exists():
250
+ self._download_civitai_model(model_name)
251
+ return local_path
252
+ local_path = self.models_path / model_name
253
+ if local_path.exists():
254
+ return local_path
255
+ return model_name
256
+
257
+ def _download_civitai_model(self, model_key: str):
258
+ model_info = CIVITAI_MODELS[model_key]
259
+ url = model_info["url"]
260
+ filename = model_info["filename"]
261
+ dest_path = self.models_path / filename
262
+ temp_path = dest_path.with_suffix(".temp")
263
+ ASCIIColors.cyan(f"Downloading '{filename}' from Civitai...")
264
+ try:
265
+ with requests.get(url, stream=True) as r:
266
+ r.raise_for_status()
267
+ total_size = int(r.headers.get('content-length', 0))
268
+ with open(temp_path, 'wb') as f, tqdm(total=total_size, unit='iB', unit_scale=True, desc=f"Downloading {filename}") as bar:
269
+ for chunk in r.iter_content(chunk_size=8192):
270
+ f.write(chunk)
271
+ bar.update(len(chunk))
272
+ shutil.move(temp_path, dest_path)
273
+ ASCIIColors.green(f"Model '{filename}' downloaded successfully.")
274
+ except Exception as e:
275
+ if temp_path.exists():
276
+ temp_path.unlink()
277
+ raise Exception(f"Failed to download model {filename}: {e}") from e
278
+
279
+ def _set_scheduler(self):
280
+ if not self.pipeline:
265
281
  return
282
+ scheduler_name_key = self.config["scheduler_name"].lower()
283
+ if scheduler_name_key == "default":
284
+ return
285
+ scheduler_class_name = SCHEDULER_MAPPING.get(scheduler_name_key)
286
+ if scheduler_class_name:
287
+ try:
288
+ SchedulerClass = getattr(importlib.import_module("diffusers.schedulers"), scheduler_class_name)
289
+ scheduler_config = self.pipeline.scheduler.config
290
+ scheduler_config["use_karras_sigmas"] = scheduler_name_key in SCHEDULER_USES_KARRAS_SIGMAS
291
+ self.pipeline.scheduler = SchedulerClass.from_config(scheduler_config)
292
+ ASCIIColors.info(f"Switched scheduler to {scheduler_class_name}")
293
+ except Exception as e:
294
+ ASCIIColors.warning(f"Could not switch scheduler to {scheduler_name_key}: {e}. Using current default.")
266
295
 
296
+ def _load_pipeline_for_task(self, task: str):
297
+ if self.pipeline and self.current_task == task:
298
+ return
299
+ if self.pipeline:
300
+ self._unload_pipeline()
267
301
  model_name = self.config.get("model_name", "")
268
302
  if not model_name:
269
303
  raise ValueError("Model name cannot be empty for loading.")
270
-
271
- ASCIIColors.info(f"Loading Diffusers model: {model_name}")
304
+ ASCIIColors.info(f"Loading Diffusers model: {model_name} for task: {task}")
272
305
  model_path = self._resolve_model_path(model_name)
273
306
  torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower())
274
-
275
307
  try:
308
+ load_args = {}
309
+ if self.config.get("hf_cache_path"):
310
+ load_args["cache_dir"] = str(self.config["hf_cache_path"])
276
311
  if str(model_path).endswith(".safetensors"):
277
- ASCIIColors.info(f"Loading from single safetensors file: {model_path}")
278
- try:
279
- # Modern, preferred method for newer diffusers versions
280
- self.pipeline = AutoPipelineForText2Image.from_single_file(
281
- model_path,
282
- torch_dtype=torch_dtype,
283
- cache_dir=self.config.get("hf_cache_path")
284
- )
285
- except AttributeError:
286
- # Fallback for older diffusers versions
287
- ASCIIColors.warning("AutoPipelineForText2Image.from_single_file not found. Falling back to StableDiffusionPipeline.")
288
- ASCIIColors.warning("Consider updating diffusers for better compatibility: pip install --upgrade diffusers")
289
- self.pipeline = StableDiffusionPipeline.from_single_file(
290
- model_path,
291
- torch_dtype=torch_dtype,
292
- cache_dir=self.config.get("hf_cache_path")
293
- )
312
+ if task == "text2image":
313
+ try:
314
+ self.pipeline = AutoPipelineForText2Image.from_single_file(model_path, torch_dtype=torch_dtype, cache_dir=load_args.get("cache_dir"))
315
+ except AttributeError:
316
+ self.pipeline = StableDiffusionPipeline.from_single_file(model_path, torch_dtype=torch_dtype, cache_dir=load_args.get("cache_dir"))
317
+ elif task == "image2image":
318
+ self.pipeline = AutoPipelineForImage2Image.from_single_file(model_path, torch_dtype=torch_dtype, cache_dir=load_args.get("cache_dir"))
319
+ elif task == "inpainting":
320
+ self.pipeline = AutoPipelineForInpainting.from_single_file(model_path, torch_dtype=torch_dtype, cache_dir=load_args.get("cache_dir"))
294
321
  else:
295
- ASCIIColors.info(f"Loading from pretrained folder/repo: {model_path}")
296
- load_args = {
297
- "torch_dtype": torch_dtype, "use_safetensors": self.config["use_safetensors"],
298
- "token": self.config["hf_token"], "local_files_only": self.config["local_files_only"],
322
+ common_args = {
323
+ "torch_dtype": torch_dtype,
324
+ "use_safetensors": self.config["use_safetensors"],
325
+ "token": self.config["hf_token"],
326
+ "local_files_only": self.config["local_files_only"]
299
327
  }
300
- if self.config["hf_variant"]: load_args["variant"] = self.config["hf_variant"]
301
- if not self.config["safety_checker_on"]: load_args["safety_checker"] = None
302
- if self.config.get("hf_cache_path"): load_args["cache_dir"] = str(self.config["hf_cache_path"])
303
- self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **load_args)
304
-
328
+ if self.config["hf_variant"]:
329
+ common_args["variant"] = self.config["hf_variant"]
330
+ if not self.config["safety_checker_on"]:
331
+ common_args["safety_checker"] = None
332
+ if self.config.get("hf_cache_path"):
333
+ common_args["cache_dir"] = str(self.config["hf_cache_path"])
334
+ if task == "text2image":
335
+ self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **common_args)
336
+ elif task == "image2image":
337
+ self.pipeline = AutoPipelineForImage2Image.from_pretrained(model_path, **common_args)
338
+ elif task == "inpainting":
339
+ self.pipeline = AutoPipelineForInpainting.from_pretrained(model_path, **common_args)
305
340
  except Exception as e:
306
341
  error_str = str(e).lower()
307
342
  if "401" in error_str or "gated" in error_str or "authorization" in error_str:
308
- auth_error_msg = (
309
- f"AUTHENTICATION FAILED for model '{model_name}'. This is likely a 'gated' model on Hugging Face.\n"
310
- "Please ensure you have accepted its license and provided a valid HF Access Token in the settings."
343
+ msg = (
344
+ f"AUTHENTICATION FAILED for model '{model_name}'. "
345
+ "Please ensure you accepted the model license and provided a valid HF token."
311
346
  )
312
- raise RuntimeError(auth_error_msg) from e
313
- else:
314
- raise e
315
-
347
+ raise RuntimeError(msg) from e
348
+ raise e
316
349
  self._set_scheduler()
317
350
  self.pipeline.to(self.config["device"])
318
-
319
351
  if self.config["enable_xformers"]:
320
352
  try:
321
353
  self.pipeline.enable_xformers_memory_efficient_attention()
322
354
  except Exception as e:
323
355
  ASCIIColors.warning(f"Could not enable xFormers: {e}.")
324
-
325
356
  if self.config["enable_cpu_offload"] and self.config["device"] != "cpu":
326
357
  self.pipeline.enable_model_cpu_offload()
327
358
  elif self.config["enable_sequential_cpu_offload"] and self.config["device"] != "cpu":
328
359
  self.pipeline.enable_sequential_cpu_offload()
329
-
330
360
  self.is_loaded = True
361
+ self.current_task = task
331
362
  self.last_used_time = time.time()
332
- ASCIIColors.green(f"Model '{model_name}' loaded successfully on '{self.config['device']}'.")
363
+ ASCIIColors.green(f"Model '{model_name}' loaded successfully on '{self.config['device']}' for task '{task}'.")
333
364
 
334
365
  def _unload_pipeline(self):
335
- # This method assumes a lock is already held
336
366
  if self.pipeline:
337
367
  model_name = self.config.get('model_name', 'Unknown')
338
368
  del self.pipeline
@@ -340,6 +370,7 @@ class ModelManager:
340
370
  if torch and torch.cuda.is_available():
341
371
  torch.cuda.empty_cache()
342
372
  self.is_loaded = False
373
+ self.current_task = None
343
374
  ASCIIColors.info(f"Model '{model_name}' unloaded and VRAM cleared.")
344
375
 
345
376
  def _generation_worker(self):
@@ -348,19 +379,18 @@ class ModelManager:
348
379
  job = self.queue.get(timeout=1)
349
380
  if job is None:
350
381
  break
351
- future, pipeline_args = job
382
+ future, task, pipeline_args = job
352
383
  try:
353
384
  with self.lock:
354
385
  self.last_used_time = time.time()
355
- if not self.is_loaded:
356
- self._load_pipeline()
357
-
386
+ if not self.is_loaded or self.current_task != task:
387
+ self._load_pipeline_for_task(task)
358
388
  with torch.no_grad():
359
- pipeline_output = self.pipeline(**pipeline_args)
360
- pil_image: Image.Image = pipeline_output.images[0]
361
- img_byte_arr = BytesIO()
362
- pil_image.save(img_byte_arr, format="PNG")
363
- future.set_result(img_byte_arr.getvalue())
389
+ output = self.pipeline(**pipeline_args)
390
+ pil = output.images[0]
391
+ buf = BytesIO()
392
+ pil.save(buf, format="PNG")
393
+ future.set_result(buf.getvalue())
364
394
  except Exception as e:
365
395
  trace_exception(e)
366
396
  future.set_exception(e)
@@ -369,70 +399,9 @@ class ModelManager:
369
399
  except queue.Empty:
370
400
  continue
371
401
 
372
- def _download_civitai_model(self, model_key: str):
373
- model_info = CIVITAI_MODELS[model_key]
374
- url = model_info["url"]
375
- filename = model_info["filename"]
376
- dest_path = self.models_path / filename
377
- temp_path = dest_path.with_suffix(".temp")
378
-
379
- ASCIIColors.cyan(f"Downloading '{filename}' from Civitai...")
380
- try:
381
- with requests.get(url, stream=True) as r:
382
- r.raise_for_status()
383
- total_size = int(r.headers.get('content-length', 0))
384
- with open(temp_path, 'wb') as f, tqdm(
385
- total=total_size, unit='iB', unit_scale=True, desc=f"Downloading {filename}"
386
- ) as bar:
387
- for chunk in r.iter_content(chunk_size=8192):
388
- f.write(chunk)
389
- bar.update(len(chunk))
390
-
391
- shutil.move(temp_path, dest_path)
392
- ASCIIColors.green(f"Model '{filename}' downloaded successfully.")
393
- except Exception as e:
394
- if temp_path.exists():
395
- temp_path.unlink()
396
- raise Exception(f"Failed to download model {filename}: {e}") from e
397
-
398
- def _resolve_model_path(self, model_name: str) -> Union[str, Path]:
399
- path_obj = Path(model_name)
400
- if path_obj.is_absolute() and path_obj.exists():
401
- return model_name
402
-
403
- if model_name in CIVITAI_MODELS:
404
- filename = CIVITAI_MODELS[model_name]["filename"]
405
- local_path = self.models_path / filename
406
- if not local_path.exists():
407
- self._download_civitai_model(model_name)
408
- return local_path
409
-
410
- local_path = self.models_path / model_name
411
- if local_path.exists():
412
- return local_path
413
-
414
- return model_name
415
-
416
- def _set_scheduler(self):
417
- if not self.pipeline: return
418
- scheduler_name_key = self.config["scheduler_name"].lower()
419
- if scheduler_name_key == "default": return
420
-
421
- scheduler_class_name = SCHEDULER_MAPPING.get(scheduler_name_key)
422
- if scheduler_class_name:
423
- try:
424
- SchedulerClass = getattr(importlib.import_module("diffusers.schedulers"), scheduler_class_name)
425
- scheduler_config = self.pipeline.scheduler.config
426
- scheduler_config["use_karras_sigmas"] = scheduler_name_key in SCHEDULER_USES_KARRAS_SIGMAS
427
- self.pipeline.scheduler = SchedulerClass.from_config(scheduler_config)
428
- ASCIIColors.info(f"Switched scheduler to {scheduler_class_name}")
429
- except Exception as e:
430
- ASCIIColors.warning(f"Could not switch scheduler to {scheduler_name_key}: {e}. Using current default.")
431
-
432
402
  class PipelineRegistry:
433
403
  _instance = None
434
404
  _lock = threading.Lock()
435
-
436
405
  def __new__(cls, *args, **kwargs):
437
406
  with cls._lock:
438
407
  if cls._instance is None:
@@ -440,27 +409,23 @@ class PipelineRegistry:
440
409
  cls._instance._managers = {}
441
410
  cls._instance._registry_lock = threading.Lock()
442
411
  return cls._instance
443
-
444
412
  @staticmethod
445
413
  def _get_critical_keys():
446
414
  return [
447
- "model_name", "device", "torch_dtype_str", "use_safetensors",
448
- "safety_checker_on", "hf_variant", "enable_cpu_offload",
449
- "enable_sequential_cpu_offload", "enable_xformers",
450
- "local_files_only", "hf_cache_path", "unload_inactive_model_after"
415
+ "model_name","device","torch_dtype_str","use_safetensors",
416
+ "safety_checker_on","hf_variant","enable_cpu_offload",
417
+ "enable_sequential_cpu_offload","enable_xformers",
418
+ "local_files_only","hf_cache_path","unload_inactive_model_after"
451
419
  ]
452
-
453
420
  def _get_config_key(self, config: Dict[str, Any]) -> str:
454
421
  key_data = tuple(sorted((k, config.get(k)) for k in self._get_critical_keys()))
455
422
  return hashlib.sha256(str(key_data).encode('utf-8')).hexdigest()
456
-
457
423
  def get_manager(self, config: Dict[str, Any], models_path: Path) -> ModelManager:
458
424
  key = self._get_config_key(config)
459
425
  with self._registry_lock:
460
426
  if key not in self._managers:
461
427
  self._managers[key] = ModelManager(config.copy(), models_path)
462
428
  return self._managers[key].acquire()
463
-
464
429
  def release_manager(self, config: Dict[str, Any]):
465
430
  key = self._get_config_key(config)
466
431
  with self._registry_lock:
@@ -473,87 +438,89 @@ class PipelineRegistry:
473
438
  with manager.lock:
474
439
  manager._unload_pipeline()
475
440
  del self._managers[key]
476
-
477
441
  def get_active_managers(self) -> List[ModelManager]:
478
442
  with self._registry_lock:
479
443
  return [m for m in self._managers.values() if m.is_loaded]
480
444
 
481
445
  class DiffusersTTIBinding_Impl(LollmsTTIBinding):
482
446
  DEFAULT_CONFIG = {
483
- "model_name": "", "device": "auto", "torch_dtype_str": "auto", "use_safetensors": True,
484
- "scheduler_name": "default", "safety_checker_on": True, "num_inference_steps": 25,
485
- "guidance_scale": 7.0, "default_width": 512, "default_height": 512, "seed": -1,
486
- "enable_cpu_offload": False, "enable_sequential_cpu_offload": False, "enable_xformers": False,
487
- "hf_variant": None, "hf_token": None, "hf_cache_path": None, "local_files_only": False,
488
- "unload_inactive_model_after": 0,
447
+ "model_name": "",
448
+ "device": "auto",
449
+ "torch_dtype_str": "auto",
450
+ "use_safetensors": True,
451
+ "scheduler_name": "default",
452
+ "safety_checker_on": True,
453
+ "num_inference_steps": 25,
454
+ "guidance_scale": 7.0,
455
+ "default_width": 512,
456
+ "default_height": 512,
457
+ "seed": -1,
458
+ "enable_cpu_offload": False,
459
+ "enable_sequential_cpu_offload": False,
460
+ "enable_xformers": False,
461
+ "hf_variant": None,
462
+ "hf_token": None,
463
+ "hf_cache_path": None,
464
+ "local_files_only": False,
465
+ "unload_inactive_model_after": 0
489
466
  }
467
+ HF_DEFAULT_MODELS = [
468
+ {"family": "SDXL", "model_name": "stabilityai/stable-diffusion-xl-base-1.0", "display_name": "SDXL Base 1.0", "desc": "Text2Image 1024 native."},
469
+ {"family": "SDXL", "model_name": "stabilityai/stable-diffusion-xl-refiner-1.0", "display_name": "SDXL Refiner 1.0", "desc": "Refiner for SDXL."},
470
+ {"family": "SD 1.x", "model_name": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion 1.5", "desc": "Classic SD1.5."},
471
+ {"family": "SD 2.x", "model_name": "stabilityai/stable-diffusion-2-1", "display_name": "Stable Diffusion 2.1", "desc": "SD2.1 base."},
472
+ {"family": "SD3", "model_name": "stabilityai/stable-diffusion-3-medium-diffusers", "display_name": "Stable Diffusion 3 Medium", "desc": "SD3 medium."},
473
+ {"family": "Specialized", "model_name": "playgroundai/playground-v2.5-1024px-aesthetic", "display_name": "Playground v2.5", "desc": "High aesthetic 1024."},
474
+ {"family": "Editors", "model_name": "Qwen/Qwen-Image-Edit", "display_name": "Qwen Image Edit", "desc": "Dedicated image editing."}
475
+ ]
490
476
 
491
477
  def __init__(self, **kwargs):
492
478
  super().__init__(binding_name=BindingName)
493
-
479
+ self.manager: Optional[ModelManager] = None
494
480
  if not DIFFUSERS_AVAILABLE:
495
- raise ImportError(
496
- "Diffusers or its dependencies not installed. "
497
- "Please run: pip install torch torchvision diffusers Pillow transformers safetensors requests tqdm"
498
- )
499
-
500
- # Initialize config with defaults, then override with user kwargs
481
+ raise ImportError("Diffusers not available. Please install required packages.")
501
482
  self.config = self.DEFAULT_CONFIG.copy()
502
483
  self.config.update(kwargs)
503
-
504
484
  self.model_name = self.config.get("model_name", "")
505
485
  models_path_str = kwargs.get("models_path", str(Path(__file__).parent / "models"))
506
486
  self.models_path = Path(models_path_str)
507
487
  self.models_path.mkdir(parents=True, exist_ok=True)
508
-
509
488
  self.registry = PipelineRegistry()
510
- self.manager: Optional[ModelManager] = None
511
-
512
489
  self._resolve_device_and_dtype()
513
490
  if self.model_name:
514
491
  self._acquire_manager()
515
492
 
516
493
  def ps(self) -> List[dict]:
517
- """
518
- Lists running models in a standardized, flat format.
519
- """
520
494
  if not self.registry:
521
- ASCIIColors.warning("Diffusers PipelineRegistry not available.")
522
495
  return []
523
-
524
496
  try:
525
- active_managers = self.registry.get_active_managers()
526
- standardized_models = []
527
-
528
- for manager in active_managers:
529
- with manager.lock:
530
- config = manager.config
531
- pipeline = manager.pipeline
532
-
497
+ active = self.registry.get_active_managers()
498
+ out = []
499
+ for m in active:
500
+ with m.lock:
501
+ cfg = m.config
502
+ pipe = m.pipeline
533
503
  vram_usage_bytes = 0
534
- if torch.cuda.is_available() and config.get("device") == "cuda" and pipeline:
535
- for component in pipeline.components.values():
536
- if hasattr(component, 'parameters'):
537
- mem_params = sum(p.nelement() * p.element_size() for p in component.parameters())
538
- mem_bufs = sum(b.nelement() * b.element_size() for b in component.buffers())
504
+ if torch.cuda.is_available() and cfg.get("device") == "cuda" and pipe:
505
+ for comp in pipe.components.values():
506
+ if hasattr(comp, 'parameters'):
507
+ mem_params = sum(p.nelement() * p.element_size() for p in comp.parameters())
508
+ mem_bufs = sum(b.nelement() * b.element_size() for b in comp.buffers())
539
509
  vram_usage_bytes += (mem_params + mem_bufs)
540
-
541
- flat_model_info = {
542
- "model_name": config.get("model_name"),
510
+ out.append({
511
+ "model_name": cfg.get("model_name"),
543
512
  "vram_size": vram_usage_bytes,
544
- "device": config.get("device"),
545
- "torch_dtype": str(pipeline.dtype) if pipeline else config.get("torch_dtype_str"),
546
- "pipeline_type": pipeline.__class__.__name__ if pipeline else "N/A",
547
- "scheduler_class": pipeline.scheduler.__class__.__name__ if pipeline and hasattr(pipeline, 'scheduler') else "N/A",
548
- "status": "Active" if manager.is_loaded else "Idle",
549
- "queue_size": manager.queue.qsize(),
550
- }
551
- standardized_models.append(flat_model_info)
552
-
553
- return standardized_models
554
-
513
+ "device": cfg.get("device"),
514
+ "torch_dtype": str(pipe.dtype) if pipe else cfg.get("torch_dtype_str"),
515
+ "pipeline_type": pipe.__class__.__name__ if pipe else "N/A",
516
+ "scheduler_class": pipe.scheduler.__class__.__name__ if pipe and hasattr(pipe, 'scheduler') else "N/A",
517
+ "status": "Active" if m.is_loaded else "Idle",
518
+ "queue_size": m.queue.qsize(),
519
+ "task": m.current_task or "N/A"
520
+ })
521
+ return out
555
522
  except Exception as e:
556
- ASCIIColors.error(f"Failed to list running models from Diffusers registry: {e}")
523
+ ASCIIColors.error(f"Failed to list running models: {e}")
557
524
  return []
558
525
 
559
526
  def _acquire_manager(self):
@@ -565,40 +532,57 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
565
532
  def _resolve_device_and_dtype(self):
566
533
  if self.config["device"].lower() == "auto":
567
534
  self.config["device"] = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
568
-
569
535
  if self.config["torch_dtype_str"].lower() == "auto":
570
536
  self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
571
537
 
538
+ def _decode_image_input(self, item: str) -> Image.Image:
539
+ s = item.strip()
540
+ if s.startswith("data:image/") and ";base64," in s:
541
+ b64 = s.split(";base64,")[-1]
542
+ raw = base64.b64decode(b64)
543
+ return Image.open(BytesIO(raw)).convert("RGB")
544
+ if re_b64 := (s[:30].replace("\n","")):
545
+ try:
546
+ raw = base64.b64decode(s, validate=True)
547
+ return Image.open(BytesIO(raw)).convert("RGB")
548
+ except Exception:
549
+ pass
550
+ try:
551
+ return load_image(s).convert("RGB")
552
+ except Exception:
553
+ return Image.open(s).convert("RGB")
554
+
555
+ def _prepare_seed(self, kwargs: Dict[str, Any]) -> Optional[torch.Generator]:
556
+ seed = kwargs.pop("seed", self.config["seed"])
557
+ if seed == -1:
558
+ return None
559
+ return torch.Generator(device=self.config["device"]).manual_seed(seed)
560
+
572
561
  def list_safetensor_models(self) -> List[str]:
573
- if not self.models_path.exists(): return []
562
+ if not self.models_path.exists():
563
+ return []
574
564
  return sorted([f.name for f in self.models_path.iterdir() if f.is_file() and f.suffix == ".safetensors"])
575
565
 
576
566
  def listModels(self) -> list:
577
- # Implementation is unchanged...
578
567
  civitai_list = [
579
568
  {'model_name': key, 'display_name': info['display_name'], 'description': info['description'], 'owned_by': info['owned_by']}
580
569
  for key, info in CIVITAI_MODELS.items()
581
570
  ]
582
- hf_default_list = [
583
- {'model_name': "stabilityai/stable-diffusion-xl-base-1.0", 'display_name': "Stable Diffusion XL 1.0", 'description': "Official SDXL base model from Stability AI. Native resolution is 1024x1024.", 'owned_by': 'HuggingFace'},
584
- {'model_name': "playgroundai/playground-v2.5-1024px-aesthetic", 'display_name': "Playground v2.5", 'description': "Known for high aesthetic quality. Native resolution is 1024x1024.", 'owned_by': 'HuggingFace'},
585
- {'model_name': "runwayml/stable-diffusion-v1-5", 'display_name': "Stable Diffusion 1.5", 'description': "A popular and versatile open-access text-to-image model.", 'owned_by': 'HuggingFace'},
571
+ hf_list = [
572
+ {'model_name': m["model_name"], 'display_name': m["display_name"], 'description': m["desc"], 'owned_by': 'HuggingFace', 'family': m["family"]}
573
+ for m in self.HF_DEFAULT_MODELS
586
574
  ]
587
- custom_local_models = []
575
+ custom_local = []
588
576
  civitai_filenames = {info['filename'] for info in CIVITAI_MODELS.values()}
589
- local_safetensors = self.list_safetensor_models()
590
- for filename in local_safetensors:
577
+ for filename in self.list_safetensor_models():
591
578
  if filename not in civitai_filenames:
592
- custom_local_models.append({
593
- 'model_name': filename, 'display_name': filename,
594
- 'description': 'Local safetensors file from your models folder.', 'owned_by': 'local_user'
595
- })
596
- return civitai_list + hf_default_list + custom_local_models
579
+ custom_local.append({'model_name': filename, 'display_name': filename, 'description': 'Local safetensors file.', 'owned_by': 'local_user'})
580
+ return hf_list + civitai_list + custom_local
597
581
 
598
582
  def load_model(self):
599
- ASCIIColors.info("load_model() called. Loading is now automatic on first use.")
583
+ ASCIIColors.info("load_model() called. Loading is automatic on first use.")
600
584
  if self.model_name and not self.manager:
601
- self._acquire_manager()
585
+ self._acquire_manager()
602
586
 
603
587
  def unload_model(self):
604
588
  if self.manager:
@@ -609,55 +593,139 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
609
593
  def generate_image(self, prompt: str, negative_prompt: str = "", width: int|None = None, height: int|None = None, **kwargs) -> bytes:
610
594
  if not self.model_name:
611
595
  raise RuntimeError("No model_name configured. Please select a model in settings.")
612
-
613
596
  if not self.manager:
614
597
  self._acquire_manager()
615
-
616
- # Build pipeline arguments, prioritizing kwargs over config defaults
617
- seed = kwargs.pop("seed", self.config["seed"])
618
- generator = torch.Generator(device=self.config["device"]).manual_seed(seed) if seed != -1 else None
619
-
598
+ generator = self._prepare_seed(kwargs)
620
599
  pipeline_args = {
621
600
  "prompt": prompt,
622
601
  "negative_prompt": negative_prompt or None,
623
602
  "width": width if width is not None else self.config["default_width"],
624
603
  "height": height if height is not None else self.config["default_height"],
625
- "num_inference_steps": self.config["num_inference_steps"],
626
- "guidance_scale": self.config["guidance_scale"],
627
- "generator": generator,
604
+ "num_inference_steps": kwargs.pop("num_inference_steps", self.config["num_inference_steps"]),
605
+ "guidance_scale": kwargs.pop("guidance_scale", self.config["guidance_scale"]),
606
+ "generator": generator
628
607
  }
629
- # Allow any other valid pipeline kwargs to be passed through
630
608
  pipeline_args.update(kwargs)
631
-
632
609
  future = Future()
633
- self.manager.queue.put((future, pipeline_args))
634
- ASCIIColors.info(f"Job for prompt '{prompt[:50]}...' queued. Waiting...")
635
-
610
+ self.manager.queue.put((future, "text2image", pipeline_args))
611
+ ASCIIColors.info(f"Job (t2i) '{prompt[:50]}...' queued.")
636
612
  try:
637
- image_bytes = future.result()
638
- ASCIIColors.green("Image generated successfully.")
639
- return image_bytes
613
+ return future.result()
640
614
  except Exception as e:
641
615
  raise Exception(f"Image generation failed: {e}") from e
642
616
 
617
+ def _encode_image_to_latents(self, pil: Image.Image, width: int, height: int) -> Tuple[torch.Tensor, Tuple[int,int]]:
618
+ pil = pil.convert("RGB").resize((width, height))
619
+ with self.manager.lock:
620
+ self.manager._load_pipeline_for_task("text2image")
621
+ vae = self.manager.pipeline.vae
622
+ img = torch.from_numpy(torch.ByteTensor(bytearray(pil.tobytes())).numpy()).float() # not efficient but avoids np dep
623
+ img = img.view(pil.height, pil.width, 3).permute(2,0,1).unsqueeze(0) / 255.0
624
+ img = (img * 2.0) - 1.0
625
+ img = img.to(self.config["device"], dtype=getattr(torch, self.config["torch_dtype_str"]))
626
+ with torch.no_grad():
627
+ posterior = vae.encode(img)
628
+ latents = posterior.latent_dist.sample()
629
+ sf = getattr(vae.config, "scaling_factor", 0.18215)
630
+ latents = latents * sf
631
+ return latents, (pil.width, pil.height)
632
+
633
+ def edit_image(self,
634
+ images: Union[str, List[str]],
635
+ prompt: str,
636
+ negative_prompt: Optional[str] = "",
637
+ mask: Optional[str] = None,
638
+ width: Optional[int] = None,
639
+ height: Optional[int] = None,
640
+ **kwargs) -> bytes:
641
+ if not self.model_name:
642
+ raise RuntimeError("No model_name configured. Please select a model in settings.")
643
+ if not self.manager:
644
+ self._acquire_manager()
645
+ imgs = [images] if isinstance(images, str) else list(images)
646
+ pil_images = [self._decode_image_input(s) for s in imgs]
647
+ out_w = width if width is not None else self.config["default_width"]
648
+ out_h = height if height is not None else self.config["default_height"]
649
+ generator = self._prepare_seed(kwargs)
650
+ steps = kwargs.pop("num_inference_steps", self.config["num_inference_steps"])
651
+ guidance = kwargs.pop("guidance_scale", self.config["guidance_scale"])
652
+ if mask is not None and len(pil_images) == 1:
653
+ try:
654
+ mask_img = self._decode_image_input(mask).convert("L")
655
+ except Exception as e:
656
+ raise ValueError(f"Failed to decode mask image: {e}") from e
657
+ pipeline_args = {
658
+ "image": pil_images[0],
659
+ "mask_image": mask_img,
660
+ "prompt": prompt,
661
+ "negative_prompt": negative_prompt or None,
662
+ "width": out_w,
663
+ "height": out_h,
664
+ "num_inference_steps": steps,
665
+ "guidance_scale": guidance,
666
+ "generator": generator
667
+ }
668
+ pipeline_args.update(kwargs)
669
+ future = Future()
670
+ self.manager.queue.put((future, "inpainting", pipeline_args))
671
+ ASCIIColors.info("Job (inpaint) queued.")
672
+ return future.result()
673
+ try:
674
+ pipeline_args = {
675
+ "image": pil_images if len(pil_images) > 1 else pil_images[0],
676
+ "prompt": prompt,
677
+ "negative_prompt": negative_prompt or None,
678
+ "strength": kwargs.pop("strength", 0.6),
679
+ "width": out_w,
680
+ "height": out_h,
681
+ "num_inference_steps": steps,
682
+ "guidance_scale": guidance,
683
+ "generator": generator
684
+ }
685
+ pipeline_args.update(kwargs)
686
+ future = Future()
687
+ self.manager.queue.put((future, "image2image", pipeline_args))
688
+ ASCIIColors.info("Job (i2i) queued.")
689
+ return future.result()
690
+ except Exception:
691
+ pass
692
+ try:
693
+ base = pil_images[0]
694
+ latents, _ = self._encode_image_to_latents(base, out_w, out_h)
695
+ pipeline_args = {
696
+ "prompt": prompt,
697
+ "negative_prompt": negative_prompt or None,
698
+ "latents": latents,
699
+ "num_inference_steps": steps,
700
+ "guidance_scale": guidance,
701
+ "generator": generator,
702
+ "width": out_w,
703
+ "height": out_h
704
+ }
705
+ pipeline_args.update(kwargs)
706
+ future = Future()
707
+ self.manager.queue.put((future, "text2image", pipeline_args))
708
+ ASCIIColors.info("Job (t2i with init latents) queued.")
709
+ return future.result()
710
+ except Exception as e:
711
+ raise Exception(f"Image edit failed: {e}") from e
712
+
643
713
  def list_local_models(self) -> List[str]:
644
- # Implementation is unchanged...
645
- if not self.models_path.exists(): return []
714
+ if not self.models_path.exists():
715
+ return []
646
716
  folders = [
647
717
  d.name for d in self.models_path.iterdir()
648
718
  if d.is_dir() and ((d / "model_index.json").exists() or (d / "unet" / "config.json").exists())
649
719
  ]
650
720
  safetensors = self.list_safetensor_models()
651
721
  return sorted(folders + safetensors)
652
-
722
+
653
723
  def list_available_models(self) -> List[str]:
654
- # Implementation is unchanged...
655
- discoverable_models = [m['model_name'] for m in self.listModels()]
724
+ discoverable = [m['model_name'] for m in self.listModels()]
656
725
  local_models = self.list_local_models()
657
- return sorted(list(set(local_models + discoverable_models)))
726
+ return sorted(list(set(local_models + discoverable)))
658
727
 
659
728
  def list_services(self, **kwargs) -> List[Dict[str, str]]:
660
- # Implementation is unchanged...
661
729
  models = self.list_available_models()
662
730
  local_models = self.list_local_models()
663
731
  if not models:
@@ -665,8 +733,10 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
665
733
  services = []
666
734
  for m in models:
667
735
  help_text = "Hugging Face model ID"
668
- if m in local_models: help_text = f"Local model from: {self.models_path.resolve()}"
669
- elif m in CIVITAI_MODELS: help_text = f"Civitai model (downloads as {CIVITAI_MODELS[m]['filename']})"
736
+ if m in local_models:
737
+ help_text = f"Local model from: {self.models_path.resolve()}"
738
+ elif m in CIVITAI_MODELS:
739
+ help_text = f"Civitai model (downloads as {CIVITAI_MODELS[m]['filename']})"
670
740
  services.append({"name": m, "caption": f"Diffusers: {m}", "help": help_text})
671
741
  return services
672
742
 
@@ -675,83 +745,70 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
675
745
  return [
676
746
  {"name": "model_name", "type": "str", "value": self.model_name, "description": "Local, Civitai, or Hugging Face model.", "options": available_models},
677
747
  {"name": "unload_inactive_model_after", "type": "int", "value": self.config["unload_inactive_model_after"], "description": "Unload model after X seconds of inactivity (0 to disable)."},
678
- {"name": "device", "type": "str", "value": self.config["device"], "description": f"Inference device. Resolved: {self.config['device']}", "options": ["auto", "cuda", "mps", "cpu"]},
679
- {"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype. Resolved: {self.config['torch_dtype_str']}", "options": ["auto", "float16", "bfloat16", "float32"]},
748
+ {"name": "device", "type": "str", "value": self.config["device"], "description": f"Inference device. Resolved: {self.config['device']}", "options": ["auto","cuda","mps","cpu"]},
749
+ {"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype. Resolved: {self.config['torch_dtype_str']}", "options": ["auto","float16","bfloat16","float32"]},
680
750
  {"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "HF model variant (e.g., 'fp16')."},
681
751
  {"name": "use_safetensors", "type": "bool", "value": self.config["use_safetensors"], "description": "Prefer .safetensors when loading from Hugging Face."},
682
752
  {"name": "scheduler_name", "type": "str", "value": self.config["scheduler_name"], "description": "Scheduler for diffusion.", "options": list(SCHEDULER_MAPPING.keys())},
683
753
  {"name": "safety_checker_on", "type": "bool", "value": self.config["safety_checker_on"], "description": "Enable the safety checker."},
684
754
  {"name": "enable_cpu_offload", "type": "bool", "value": self.config["enable_cpu_offload"], "description": "Enable model CPU offload (saves VRAM, slower)."},
685
- {"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload (more VRAM savings, much slower)."},
755
+ {"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload."},
686
756
  {"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention."},
687
- {"name": "default_width", "type": "int", "value": self.config["default_width"], "description": "Default image width. Note: SDXL models prefer 1024."},
688
- {"name": "default_height", "type": "int", "value": self.config["default_height"], "description": "Default image height. Note: SDXL models prefer 1024."},
757
+ {"name": "default_width", "type": "int", "value": self.config["default_width"], "description": "Default image width."},
758
+ {"name": "default_height", "type": "int", "value": self.config["default_height"], "description": "Default image height."},
689
759
  {"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default inference steps."},
690
760
  {"name": "guidance_scale", "type": "float", "value": self.config["guidance_scale"], "description": "Default guidance scale (CFG)."},
691
761
  {"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed (-1 for random)."},
692
762
  {"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "HF API token (for private/gated models).", "is_secret": True},
693
763
  {"name": "hf_cache_path", "type": "str", "value": self.config["hf_cache_path"], "description": "Path to HF cache."},
694
- {"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Do not download from Hugging Face."},
764
+ {"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Do not download from Hugging Face."}
695
765
  ]
696
766
 
697
767
  def set_settings(self, settings: Union[Dict[str, Any], List[Dict[str, Any]]], **kwargs) -> bool:
698
- parsed_settings = settings if isinstance(settings, dict) else \
699
- {item["name"]: item["value"] for item in settings if "name" in item and "value" in item}
700
-
768
+ parsed = settings if isinstance(settings, dict) else {i["name"]: i["value"] for i in settings if "name" in i and "value" in i}
701
769
  critical_keys = self.registry._get_critical_keys()
702
- needs_manager_swap = False
703
-
704
- for key, value in parsed_settings.items():
770
+ needs_swap = False
771
+ for key, value in parsed.items():
705
772
  if self.config.get(key) != value:
706
773
  ASCIIColors.info(f"Setting '{key}' changed to: {value}")
707
774
  self.config[key] = value
708
- if key == "model_name": self.model_name = value
709
- if key in critical_keys: needs_manager_swap = True
710
-
711
- if needs_manager_swap and self.model_name:
775
+ if key == "model_name":
776
+ self.model_name = value
777
+ if key in critical_keys:
778
+ needs_swap = True
779
+ if needs_swap and self.model_name:
712
780
  ASCIIColors.info("Critical settings changed. Swapping model manager...")
713
781
  self._resolve_device_and_dtype()
714
782
  self._acquire_manager()
715
-
716
- if not needs_manager_swap and self.manager:
717
- # Update non-critical settings on the existing manager
718
- self.manager.config.update(parsed_settings)
719
- if 'scheduler_name' in parsed_settings and self.manager.pipeline:
720
- with self.manager.lock:
783
+ if not needs_swap and self.manager:
784
+ self.manager.config.update(parsed)
785
+ if 'scheduler_name' in parsed and self.manager.pipeline:
786
+ with self.manager.lock:
721
787
  self.manager._set_scheduler()
722
-
723
788
  return True
724
789
 
725
790
  def __del__(self):
726
791
  self.unload_model()
727
792
 
728
- # Example Usage
729
793
  if __name__ == '__main__':
730
794
  ASCIIColors.magenta("--- Diffusers TTI Binding Test ---")
731
-
732
795
  if not DIFFUSERS_AVAILABLE:
733
796
  ASCIIColors.error("Diffusers not available. Cannot run test.")
734
797
  exit(1)
735
-
736
- temp_paths_dir = Path(__file__).parent / "temp_lollms_paths_diffusers"
798
+ temp_paths_dir = Path(__file__).parent / "tmp"
737
799
  temp_models_path = temp_paths_dir / "models"
738
-
739
- if temp_paths_dir.exists(): shutil.rmtree(temp_paths_dir)
800
+ if temp_paths_dir.exists():
801
+ shutil.rmtree(temp_paths_dir)
740
802
  temp_models_path.mkdir(parents=True, exist_ok=True)
741
-
742
803
  try:
743
- ASCIIColors.cyan("\n--- Test: Loading a Hugging Face model ---")
744
- # Using a very small model for fast testing
745
- binding_config = {"models_path": str(temp_models_path), "model_name": "hf-internal-testing/tiny-stable-diffusion-torch"}
746
- binding = DiffusersTTIBinding_Impl(**binding_config)
747
-
804
+ ASCIIColors.cyan("\n--- Test: Loading a small HF model ---")
805
+ cfg = {"models_path": str(temp_models_path), "model_name": "hf-internal-testing/tiny-stable-diffusion-torch"}
806
+ binding = DiffusersTTIBinding_Impl(**cfg)
748
807
  img_bytes = binding.generate_image("a tiny robot", width=64, height=64, num_inference_steps=2)
749
- assert len(img_bytes) > 1000, "Image generation from HF model should succeed."
750
- ASCIIColors.green("HF model loading and generation successful.")
751
-
808
+ assert len(img_bytes) > 1000
809
+ ASCIIColors.green("HF t2i generation OK.")
752
810
  del binding
753
811
  time.sleep(0.1)
754
-
755
812
  except Exception as e:
756
813
  trace_exception(e)
757
814
  ASCIIColors.error(f"Diffusers binding test failed: {e}")
@@ -759,4 +816,4 @@ if __name__ == '__main__':
759
816
  ASCIIColors.cyan("\nCleaning up temporary directories...")
760
817
  if temp_paths_dir.exists():
761
818
  shutil.rmtree(temp_paths_dir)
762
- ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
819
+ ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")