lollms-client 1.1.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

lollms_client/__init__.py CHANGED
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
8
8
  from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
9
9
  from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
10
10
 
11
- __version__ = "1.1.0" # Updated version
11
+ __version__ = "1.1.1" # Updated version
12
12
 
13
13
  # Optionally, you could define __all__ if you want to be explicit about exports
14
14
  __all__ = [
@@ -434,7 +434,7 @@ class OllamaBinding(LollmsLLMBinding):
434
434
  list: List of individual characters.
435
435
  """
436
436
  ## Since ollama has no endpoints to tokenize the text, we use tiktoken to have a rough estimate
437
- return tiktoken.model.encoding_for_model("gpt-3.5-turbo").encode(text)
437
+ return tiktoken.model.encoding_for_model("gpt-3.5-turbo").encode(text, disallowed_special=())
438
438
 
439
439
  def detokenize(self, tokens: list) -> str:
440
440
  """
@@ -373,7 +373,8 @@ class LollmsLLMBinding(ABC):
373
373
  if messages[-1]["content"]=="":
374
374
  del messages[-1]
375
375
  return messages
376
-
376
+ def ps(self):
377
+ return []
377
378
 
378
379
 
379
380
 
@@ -4,32 +4,99 @@ import importlib
4
4
  from io import BytesIO
5
5
  from typing import Optional, List, Dict, Any, Union
6
6
  from pathlib import Path
7
+ import pipmaster as pm
8
+ # --- Concurrency Imports ---
9
+ import threading
10
+ import queue
11
+ from concurrent.futures import Future
12
+ import time
13
+ import hashlib
14
+ import re
15
+ # -------------------------
16
+ # --- Download Imports ---
17
+ import requests
18
+ from tqdm import tqdm
19
+ # --------------------
20
+
21
+ pm.ensure_packages(["torch","torchvision"],index_url="https://download.pytorch.org/whl/cu126")
22
+ pm.ensure_packages(["diffusers","pillow","transformers","safetensors", "requests", "tqdm"])
7
23
 
8
24
  # Attempt to import core dependencies and set availability flag
9
25
  try:
10
26
  import torch
11
- from diffusers import AutoPipelineForText2Image, DiffusionPipeline
12
- from diffusers.utils import load_image # Potentially for future img2img etc.
27
+ from diffusers import AutoPipelineForText2Image, DiffusionPipeline, StableDiffusionPipeline
28
+ from diffusers.utils import load_image
13
29
  from PIL import Image
14
30
  DIFFUSERS_AVAILABLE = True
15
31
  except ImportError:
16
32
  torch = None
17
33
  AutoPipelineForText2Image = None
18
34
  DiffusionPipeline = None
35
+ StableDiffusionPipeline = None
19
36
  Image = None
20
37
  load_image = None
21
38
  DIFFUSERS_AVAILABLE = False
22
- # A detailed error will be raised in __init__ if the user tries to use the binding.
23
39
 
24
40
  from lollms_client.lollms_tti_binding import LollmsTTIBinding
25
41
  from ascii_colors import trace_exception, ASCIIColors
26
- import json # For potential JSONDecodeError and settings
42
+ import json
27
43
  import shutil
28
44
 
29
45
  # Defines the binding name for the manager
30
46
  BindingName = "DiffusersTTIBinding_Impl"
31
47
 
32
- # Helper for torch.dtype string conversion, handles case where torch is not installed
48
+ # --- START: Civitai Model Definitions ---
49
+ # Expanded list of popular Civitai models (as single .safetensors files)
50
+ CIVITAI_MODELS = {
51
+ # Photorealistic
52
+ "realistic-vision-v6": {
53
+ "display_name": "Realistic Vision V6.0",
54
+ "url": "https://civitai.com/api/download/models/130072",
55
+ "filename": "realisticVisionV60_v60B1.safetensors",
56
+ "description": "One of the most popular photorealistic models.",
57
+ "owned_by": "civitai"
58
+ },
59
+ "absolute-reality": {
60
+ "display_name": "Absolute Reality",
61
+ "url": "https://civitai.com/api/download/models/132760",
62
+ "filename": "absolutereality_v181.safetensors",
63
+ "description": "A top-tier model for generating realistic images.",
64
+ "owned_by": "civitai"
65
+ },
66
+ # Artistic / General Purpose
67
+ "dreamshaper-8": {
68
+ "display_name": "DreamShaper 8",
69
+ "url": "https://civitai.com/api/download/models/128713",
70
+ "filename": "dreamshaper_8.safetensors",
71
+ "description": "A very popular and versatile general-purpose model.",
72
+ "owned_by": "civitai"
73
+ },
74
+ "juggernaut-xl": {
75
+ "display_name": "Juggernaut XL",
76
+ "url": "https://civitai.com/api/download/models/133005",
77
+ "filename": "juggernautXL_version6Rundiffusion.safetensors",
78
+ "description": "High-quality artistic model, great for cinematic styles (SDXL-based).",
79
+ "owned_by": "civitai"
80
+ },
81
+ # Anime
82
+ "anything-v5": {
83
+ "display_name": "Anything V5",
84
+ "url": "https://civitai.com/api/download/models/9409",
85
+ "filename": "anythingV5_PrtRE.safetensors",
86
+ "description": "A classic and highly popular model for anime-style generation.",
87
+ "owned_by": "civitai"
88
+ },
89
+ "lyriel-v1.6": {
90
+ "display_name": "Lyriel v1.6",
91
+ "url": "https://civitai.com/api/download/models/92407",
92
+ "filename": "lyriel_v16.safetensors",
93
+ "description": "A popular artistic model for fantasy and stylized images.",
94
+ "owned_by": "civitai"
95
+ }
96
+ }
97
+ # --- END: Civitai Model Definitions ---
98
+
99
+ # Helper for torch.dtype string conversion
33
100
  TORCH_DTYPE_MAP_STR_TO_OBJ = {
34
101
  "float16": getattr(torch, 'float16', 'float16'),
35
102
  "bfloat16": getattr(torch, 'bfloat16', 'bfloat16'),
@@ -37,210 +104,214 @@ TORCH_DTYPE_MAP_STR_TO_OBJ = {
37
104
  "auto": "auto"
38
105
  }
39
106
  TORCH_DTYPE_MAP_OBJ_TO_STR = {v: k for k, v in TORCH_DTYPE_MAP_STR_TO_OBJ.items()}
40
- if torch: # Add None mapping if torch is loaded
107
+ if torch:
41
108
  TORCH_DTYPE_MAP_OBJ_TO_STR[None] = "None"
42
109
 
43
-
44
- # Common Schedulers mapping (User-friendly name to Class name)
110
+ # Common Schedulers mapping
45
111
  SCHEDULER_MAPPING = {
46
- "default": None, # Use model's default
47
- "ddim": "DDIMScheduler",
48
- "ddpm": "DDPMScheduler",
49
- "deis_multistep": "DEISMultistepScheduler",
50
- "dpm_multistep": "DPMSolverMultistepScheduler",
51
- "dpm_multistep_karras": "DPMSolverMultistepScheduler",
52
- "dpm_single": "DPMSolverSinglestepScheduler",
53
- "dpm_adaptive": "DPMSolverPlusPlusScheduler",
54
- "dpm++_2m": "DPMSolverMultistepScheduler",
55
- "dpm++_2m_karras": "DPMSolverMultistepScheduler",
56
- "dpm++_2s_ancestral": "DPMSolverAncestralDiscreteScheduler",
57
- "dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler",
58
- "dpm++_sde": "DPMSolverSDEScheduler",
59
- "dpm++_sde_karras": "DPMSolverSDEScheduler",
60
- "euler_ancestral_discrete": "EulerAncestralDiscreteScheduler",
61
- "euler_discrete": "EulerDiscreteScheduler",
62
- "heun_discrete": "HeunDiscreteScheduler",
63
- "heun_karras": "HeunDiscreteScheduler",
64
- "lms_discrete": "LMSDiscreteScheduler",
65
- "lms_karras": "LMSDiscreteScheduler",
66
- "pndm": "PNDMScheduler",
67
- "unipc_multistep": "UniPCMultistepScheduler",
112
+ "default": None, "ddim": "DDIMScheduler", "ddpm": "DDPMScheduler", "deis_multistep": "DEISMultistepScheduler",
113
+ "dpm_multistep": "DPMSolverMultistepScheduler", "dpm_multistep_karras": "DPMSolverMultistepScheduler",
114
+ "dpm_single": "DPMSolverSinglestepScheduler", "dpm_adaptive": "DPMSolverPlusPlusScheduler",
115
+ "dpm++_2m": "DPMSolverMultistepScheduler", "dpm++_2m_karras": "DPMSolverMultistepScheduler",
116
+ "dpm++_2s_ancestral": "DPMSolverAncestralDiscreteScheduler", "dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler",
117
+ "dpm++_sde": "DPMSolverSDEScheduler", "dpm++_sde_karras": "DPMSolverSDEScheduler",
118
+ "euler_ancestral_discrete": "EulerAncestralDiscreteScheduler", "euler_discrete": "EulerDiscreteScheduler",
119
+ "heun_discrete": "HeunDiscreteScheduler", "heun_karras": "HeunDiscreteScheduler",
120
+ "lms_discrete": "LMSDiscreteScheduler", "lms_karras": "LMSDiscreteScheduler",
121
+ "pndm": "PNDMScheduler", "unipc_multistep": "UniPCMultistepScheduler",
68
122
  }
69
123
  SCHEDULER_USES_KARRAS_SIGMAS = [
70
124
  "dpm_multistep_karras", "dpm++_2m_karras", "dpm++_2s_ancestral_karras",
71
125
  "dpm++_sde_karras", "heun_karras", "lms_karras"
72
126
  ]
73
127
 
128
+ # --- START: Concurrency and Singleton Management ---
74
129
 
75
- class DiffusersTTIBinding_Impl(LollmsTTIBinding):
130
+ class ModelManager:
76
131
  """
77
- Concrete implementation of LollmsTTIBinding for Hugging Face Diffusers library.
78
- Allows running various text-to-image models locally.
132
+ Manages a single pipeline instance, its generation queue, and a worker thread.
133
+ This ensures all interactions with a specific model are thread-safe.
79
134
  """
80
- DEFAULT_CONFIG = {
81
- "model_name": "",
82
- "device": "auto",
83
- "torch_dtype_str": "auto",
84
- "use_safetensors": True,
85
- "scheduler_name": "default",
86
- "safety_checker_on": True,
87
- "num_inference_steps": 25,
88
- "guidance_scale": 7.5,
89
- "default_width": 768,
90
- "default_height": 768,
91
- "seed": -1,
92
- "enable_cpu_offload": False,
93
- "enable_sequential_cpu_offload": False,
94
- "enable_xformers": False,
95
- "hf_variant": None,
96
- "hf_token": None,
97
- "hf_cache_path": None,
98
- "local_files_only": False,
99
- }
100
-
101
- def __init__(self, **kwargs):
102
- """
103
- Initialize the Diffusers TTI binding.
104
-
105
- Args:
106
- **kwargs: A dictionary of configuration parameters.
107
- Expected keys:
108
- - model_name (str): The name of the model to use. Can be a Hugging Face Hub ID
109
- (e.g., 'stabilityai/stable-diffusion-xl-base-1.0') or the name of a local
110
- model directory located in `models_path`.
111
- - models_path (str or Path): The path to the directory where local models are stored.
112
- Defaults to a 'models' folder next to this file.
113
- - hf_cache_path (str or Path, optional): Path to a directory for Hugging Face
114
- to cache downloaded models and files.
115
- - Other settings from the DEFAULT_CONFIG can be overridden here.
116
- """
117
- super().__init__(binding_name=BindingName)
118
-
119
- if not DIFFUSERS_AVAILABLE:
120
- raise ImportError(
121
- "Diffusers library or its dependencies (torch, Pillow, transformers) are not installed. "
122
- "Please install them using: pip install torch diffusers Pillow transformers safetensors"
123
- )
124
-
125
- # Merge default config with user-provided kwargs
126
- self.config = {**self.DEFAULT_CONFIG, **kwargs}
127
-
128
- # model_name is crucial, get it from the merged config
129
- self.model_name = self.config.get("model_name", "")
130
-
131
- # models_path is also special, handle it with its default logic
132
- self.models_path = Path(kwargs.get("models_path", Path(__file__).parent / "models"))
133
- self.models_path.mkdir(parents=True, exist_ok=True)
134
-
135
+ def __init__(self, config: Dict[str, Any], models_path: Path):
136
+ self.config = config
137
+ self.models_path = models_path
135
138
  self.pipeline: Optional[DiffusionPipeline] = None
136
- self.current_model_id_or_path = None
137
-
138
- self._resolve_device_and_dtype()
139
+ self.ref_count = 0
140
+ self.lock = threading.Lock()
141
+ self.queue = queue.Queue()
142
+ self.worker_thread = threading.Thread(target=self._generation_worker, daemon=True)
143
+ self._stop_event = threading.Event()
144
+ self.is_loaded = False
139
145
 
140
- if self.model_name:
141
- self.load_model()
142
- else:
143
- ASCIIColors.warning("No model_name provided during initialization. The binding is idle.")
146
+ self.worker_thread.start()
144
147
 
148
+ def acquire(self):
149
+ with self.lock:
150
+ self.ref_count += 1
151
+ return self
145
152
 
146
- def _resolve_device_and_dtype(self):
147
- """Resolves auto settings for device and dtype from config."""
148
- if self.config["device"].lower() == "auto":
149
- if torch.cuda.is_available():
150
- self.config["device"] = "cuda"
151
- elif torch.backends.mps.is_available():
152
- self.config["device"] = "mps"
153
- else:
154
- self.config["device"] = "cpu"
153
+ def release(self):
154
+ with self.lock:
155
+ self.ref_count -= 1
156
+ return self.ref_count
155
157
 
156
- if self.config["torch_dtype_str"].lower() == "auto":
157
- self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
158
+ def stop(self):
159
+ self._stop_event.set()
160
+ self.queue.put(None)
161
+ self.worker_thread.join(timeout=5)
158
162
 
159
- self.torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower(), torch.float32)
160
- if self.torch_dtype == "auto": # Final fallback
161
- self.torch_dtype = torch.float16 if self.config["device"] != "cpu" else torch.float32
162
- self.config["torch_dtype_str"] = TORCH_DTYPE_MAP_OBJ_TO_STR.get(self.torch_dtype, "float32")
163
+ def _load_pipeline(self):
164
+ if self.pipeline:
165
+ return
163
166
 
164
- def _resolve_model_path(self, model_name: str) -> str:
165
- """
166
- Resolves a model name to a full path if it's a local model,
167
- otherwise returns it as is (assuming it's a Hugging Face Hub ID).
168
- """
167
+ model_name = self.config.get("model_name", "")
169
168
  if not model_name:
170
- raise ValueError("Model name cannot be empty.")
169
+ raise ValueError("Model name cannot be empty for loading.")
171
170
 
172
- if Path(model_name).is_absolute() and Path(model_name).is_dir():
173
- ASCIIColors.info(f"Using absolute path for model: {model_name}")
174
- return model_name
175
-
176
- local_model_path = self.models_path / model_name
177
- if local_model_path.exists() and local_model_path.is_dir():
178
- ASCIIColors.info(f"Found local model in '{self.models_path}': {local_model_path}")
179
- return str(local_model_path)
171
+ ASCIIColors.info(f"Loading Diffusers model: {model_name}")
172
+ model_path = self._resolve_model_path(model_name)
173
+ torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower())
180
174
 
181
- ASCIIColors.info(f"'{model_name}' not found locally. Assuming it is a Hugging Face Hub ID.")
182
- return model_name
175
+ try:
176
+ if str(model_path).endswith(".safetensors"):
177
+ ASCIIColors.info(f"Loading from single safetensors file: {model_path}")
178
+ try:
179
+ # Modern, preferred method for newer diffusers versions
180
+ self.pipeline = AutoPipelineForText2Image.from_single_file(
181
+ model_path,
182
+ torch_dtype=torch_dtype,
183
+ cache_dir=self.config.get("hf_cache_path")
184
+ )
185
+ except AttributeError:
186
+ # Fallback for older diffusers versions
187
+ ASCIIColors.warning("AutoPipelineForText2Image.from_single_file not found. Falling back to StableDiffusionPipeline.")
188
+ ASCIIColors.warning("Consider updating diffusers for better compatibility: pip install --upgrade diffusers")
189
+ self.pipeline = StableDiffusionPipeline.from_single_file(
190
+ model_path,
191
+ torch_dtype=torch_dtype,
192
+ cache_dir=self.config.get("hf_cache_path")
193
+ )
194
+ else:
195
+ ASCIIColors.info(f"Loading from pretrained folder/repo: {model_path}")
196
+ load_args = {
197
+ "torch_dtype": torch_dtype, "use_safetensors": self.config["use_safetensors"],
198
+ "token": self.config["hf_token"], "local_files_only": self.config["local_files_only"],
199
+ }
200
+ if self.config["hf_variant"]: load_args["variant"] = self.config["hf_variant"]
201
+ if not self.config["safety_checker_on"]: load_args["safety_checker"] = None
202
+ if self.config.get("hf_cache_path"): load_args["cache_dir"] = str(self.config["hf_cache_path"])
203
+ self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **load_args)
183
204
 
184
- def load_model(self):
185
- """Loads the Diffusers pipeline based on current configuration."""
186
- ASCIIColors.info("Loading Diffusers model...")
187
- if self.pipeline is not None:
188
- self.unload_model()
205
+ except Exception as e:
206
+ error_str = str(e).lower()
207
+ if "401" in error_str or "gated" in error_str or "authorization" in error_str:
208
+ auth_error_msg = (
209
+ f"AUTHENTICATION FAILED for model '{model_name}'. This is likely a 'gated' model on Hugging Face.\n"
210
+ "Please ensure you have accepted its license and provided a valid HF Access Token in the settings."
211
+ )
212
+ raise RuntimeError(auth_error_msg) from e
213
+ else:
214
+ raise e
189
215
 
190
- try:
191
- model_path = self._resolve_model_path(self.model_name)
192
- self.current_model_id_or_path = model_path
193
-
194
- load_args = {
195
- "torch_dtype": self.torch_dtype,
196
- "use_safetensors": self.config["use_safetensors"],
197
- "token": self.config["hf_token"],
198
- "local_files_only": self.config["local_files_only"],
199
- }
200
- if self.config["hf_variant"]:
201
- load_args["variant"] = self.config["hf_variant"]
202
-
203
- if not self.config["safety_checker_on"]:
204
- load_args["safety_checker"] = None
205
-
206
- if self.config.get("hf_cache_path"):
207
- load_args["cache_dir"] = str(self.config["hf_cache_path"])
216
+ self._set_scheduler()
217
+ self.pipeline.to(self.config["device"])
208
218
 
209
- self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **load_args)
210
-
211
- self._set_scheduler()
219
+ if self.config["enable_xformers"]:
220
+ try:
221
+ self.pipeline.enable_xformers_memory_efficient_attention()
222
+ except Exception as e:
223
+ ASCIIColors.warning(f"Could not enable xFormers: {e}.")
224
+
225
+ if self.config["enable_cpu_offload"] and self.config["device"] != "cpu":
226
+ self.pipeline.enable_model_cpu_offload()
227
+ elif self.config["enable_sequential_cpu_offload"] and self.config["device"] != "cpu":
228
+ self.pipeline.enable_sequential_cpu_offload()
229
+
230
+ self.is_loaded = True
231
+ ASCIIColors.green(f"Model '{model_name}' loaded successfully on '{self.config['device']}'.")
212
232
 
213
- self.pipeline.to(self.config["device"])
233
+ def _unload_pipeline(self):
234
+ if self.pipeline:
235
+ del self.pipeline
236
+ self.pipeline = None
237
+ if torch and torch.cuda.is_available():
238
+ torch.cuda.empty_cache()
239
+ self.is_loaded = False
240
+ ASCIIColors.info(f"Model '{self.config.get('model_name')}' unloaded.")
214
241
 
215
- if self.config["enable_xformers"]:
242
+ def _generation_worker(self):
243
+ while not self._stop_event.is_set():
244
+ try:
245
+ job = self.queue.get(timeout=1)
246
+ if job is None:
247
+ break
248
+ future, pipeline_args = job
216
249
  try:
217
- self.pipeline.enable_xformers_memory_efficient_attention()
218
- ASCIIColors.info("xFormers memory efficient attention enabled.")
250
+ with self.lock:
251
+ if not self.pipeline:
252
+ self._load_pipeline()
253
+ with torch.no_grad():
254
+ pipeline_output = self.pipeline(**pipeline_args)
255
+ pil_image: Image.Image = pipeline_output.images[0]
256
+ img_byte_arr = BytesIO()
257
+ pil_image.save(img_byte_arr, format="PNG")
258
+ future.set_result(img_byte_arr.getvalue())
219
259
  except Exception as e:
220
- ASCIIColors.warning(f"Could not enable xFormers: {e}. Proceeding without it.")
260
+ trace_exception(e)
261
+ future.set_exception(e)
262
+ finally:
263
+ self.queue.task_done()
264
+ except queue.Empty:
265
+ continue
266
+
267
+ def _download_civitai_model(self, model_key: str):
268
+ model_info = CIVITAI_MODELS[model_key]
269
+ url = model_info["url"]
270
+ filename = model_info["filename"]
271
+ dest_path = self.models_path / filename
272
+ temp_path = dest_path.with_suffix(".temp")
273
+
274
+ ASCIIColors.cyan(f"Downloading '{filename}' from Civitai...")
275
+ try:
276
+ with requests.get(url, stream=True) as r:
277
+ r.raise_for_status()
278
+ total_size = int(r.headers.get('content-length', 0))
279
+ with open(temp_path, 'wb') as f, tqdm(
280
+ total=total_size, unit='iB', unit_scale=True, desc=f"Downloading {filename}"
281
+ ) as bar:
282
+ for chunk in r.iter_content(chunk_size=8192):
283
+ f.write(chunk)
284
+ bar.update(len(chunk))
221
285
 
222
- if self.config["enable_cpu_offload"] and self.config["device"] != "cpu":
223
- self.pipeline.enable_model_cpu_offload()
224
- ASCIIColors.info("Model CPU offload enabled.")
225
- elif self.config["enable_sequential_cpu_offload"] and self.config["device"] != "cpu":
226
- self.pipeline.enable_sequential_cpu_offload()
227
- ASCIIColors.info("Sequential CPU offload enabled.")
228
-
229
- ASCIIColors.green(f"Diffusers model '{model_path}' loaded on device '{self.config['device']}'.")
230
-
286
+ shutil.move(temp_path, dest_path)
287
+ ASCIIColors.green(f"Model '{filename}' downloaded successfully.")
231
288
  except Exception as e:
232
- trace_exception(e)
233
- self.pipeline = None
234
- raise RuntimeError(f"Failed to load Diffusers model '{self.model_name}': {e}") from e
289
+ if temp_path.exists():
290
+ temp_path.unlink()
291
+ raise Exception(f"Failed to download model {filename}: {e}") from e
292
+
293
+ def _resolve_model_path(self, model_name: str) -> Union[str, Path]:
294
+ path_obj = Path(model_name)
295
+ if path_obj.is_absolute() and path_obj.exists():
296
+ return model_name
297
+
298
+ if model_name in CIVITAI_MODELS:
299
+ filename = CIVITAI_MODELS[model_name]["filename"]
300
+ local_path = self.models_path / filename
301
+ if not local_path.exists():
302
+ self._download_civitai_model(model_name)
303
+ return local_path
304
+
305
+ local_path = self.models_path / model_name
306
+ if local_path.exists():
307
+ return local_path
308
+
309
+ return model_name
235
310
 
236
311
  def _set_scheduler(self):
237
- """Sets the scheduler for the pipeline based on config."""
238
312
  if not self.pipeline: return
239
-
240
313
  scheduler_name_key = self.config["scheduler_name"].lower()
241
- if scheduler_name_key == "default":
242
- ASCIIColors.info(f"Using model's default scheduler: {self.pipeline.scheduler.__class__.__name__}")
243
- return
314
+ if scheduler_name_key == "default": return
244
315
 
245
316
  scheduler_class_name = SCHEDULER_MAPPING.get(scheduler_name_key)
246
317
  if scheduler_class_name:
@@ -249,25 +320,148 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
249
320
  scheduler_config = self.pipeline.scheduler.config
250
321
  scheduler_config["use_karras_sigmas"] = scheduler_name_key in SCHEDULER_USES_KARRAS_SIGMAS
251
322
  self.pipeline.scheduler = SchedulerClass.from_config(scheduler_config)
252
- ASCIIColors.info(f"Switched scheduler to {scheduler_name_key} ({scheduler_class_name}).")
253
323
  except Exception as e:
254
324
  ASCIIColors.warning(f"Could not switch scheduler to {scheduler_name_key}: {e}. Using current default.")
255
- else:
256
- ASCIIColors.warning(f"Unknown scheduler: '{self.config['scheduler_name']}'. Using model default.")
325
+
326
+ class PipelineRegistry:
327
+ _instance = None
328
+ _lock = threading.Lock()
329
+
330
+ def __new__(cls, *args, **kwargs):
331
+ with cls._lock:
332
+ if cls._instance is None:
333
+ cls._instance = super().__new__(cls)
334
+ cls._instance._managers = {}
335
+ cls._instance._registry_lock = threading.Lock()
336
+ return cls._instance
337
+
338
+ def _get_config_key(self, config: Dict[str, Any]) -> str:
339
+ critical_keys = [
340
+ "model_name", "device", "torch_dtype_str", "use_safetensors",
341
+ "safety_checker_on", "hf_variant", "enable_cpu_offload",
342
+ "enable_sequential_cpu_offload", "enable_xformers",
343
+ "local_files_only", "hf_cache_path"
344
+ ]
345
+ key_data = tuple(sorted((k, config.get(k)) for k in critical_keys))
346
+ return hashlib.sha256(str(key_data).encode('utf-8')).hexdigest()
347
+
348
+ def get_manager(self, config: Dict[str, Any], models_path: Path) -> ModelManager:
349
+ key = self._get_config_key(config)
350
+ with self._registry_lock:
351
+ if key not in self._managers:
352
+ self._managers[key] = ModelManager(config.copy(), models_path)
353
+ return self._managers[key].acquire()
354
+
355
+ def release_manager(self, config: Dict[str, Any]):
356
+ key = self._get_config_key(config)
357
+ with self._registry_lock:
358
+ if key in self._managers:
359
+ manager = self._managers[key]
360
+ ref_count = manager.release()
361
+ if ref_count == 0:
362
+ ASCIIColors.info(f"Reference count for model '{config.get('model_name')}' is zero. Cleaning up.")
363
+ manager.stop()
364
+ manager._unload_pipeline()
365
+ del self._managers[key]
366
+
367
+ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
368
+ DEFAULT_CONFIG = {
369
+ "model_name": "", "device": "auto", "torch_dtype_str": "auto", "use_safetensors": True,
370
+ "scheduler_name": "default", "safety_checker_on": True, "num_inference_steps": 25,
371
+ "guidance_scale": 7.0, "default_width": 512, "default_height": 512, "seed": -1,
372
+ "enable_cpu_offload": False, "enable_sequential_cpu_offload": False, "enable_xformers": False,
373
+ "hf_variant": None, "hf_token": None, "hf_cache_path": None, "local_files_only": False,
374
+ }
375
+
376
+ def __init__(self, **kwargs):
377
+ super().__init__(binding_name=BindingName)
378
+
379
+ if not DIFFUSERS_AVAILABLE:
380
+ raise ImportError(
381
+ "Diffusers or its dependencies not installed. "
382
+ "Please run: pip install torch torchvision diffusers Pillow transformers safetensors requests tqdm"
383
+ )
384
+
385
+ self.config = {**self.DEFAULT_CONFIG, **kwargs}
386
+ self.model_name = self.config.get("model_name", "")
387
+ self.models_path = Path(kwargs.get("models_path", Path(__file__).parent / "models"))
388
+ self.models_path.mkdir(parents=True, exist_ok=True)
389
+
390
+ self.registry = PipelineRegistry()
391
+ self.manager: Optional[ModelManager] = None
392
+
393
+ self._resolve_device_and_dtype()
394
+ if self.model_name:
395
+ self._acquire_manager()
396
+
397
+ def _acquire_manager(self):
398
+ if self.manager:
399
+ self.registry.release_manager(self.manager.config)
400
+ self.manager = self.registry.get_manager(self.config, self.models_path)
401
+ ASCIIColors.info(f"Binding instance acquired manager for '{self.config['model_name']}'.")
402
+
403
+ def _resolve_device_and_dtype(self):
404
+ if self.config["device"].lower() == "auto":
405
+ self.config["device"] = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
406
+
407
+ if self.config["torch_dtype_str"].lower() == "auto":
408
+ self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
409
+
410
+ def list_safetensor_models(self) -> List[str]:
411
+ if not self.models_path.exists(): return []
412
+ return sorted([f.name for f in self.models_path.iterdir() if f.is_file() and f.suffix == ".safetensors"])
413
+
414
+ def listModels(self) -> list:
415
+ # Start with hardcoded Civitai and Hugging Face models
416
+ civitai_list = [
417
+ {'model_name': key, 'display_name': info['display_name'], 'description': info['description'], 'owned_by': info['owned_by']}
418
+ for key, info in CIVITAI_MODELS.items()
419
+ ]
420
+ hf_default_list = [
421
+ # SDXL Models (1024x1024 native)
422
+ {'model_name': "stabilityai/stable-diffusion-xl-base-1.0", 'display_name': "Stable Diffusion XL 1.0", 'description': "Official SDXL base model from Stability AI. Native resolution is 1024x1024.", 'owned_by': 'HuggingFace'},
423
+ {'model_name': "playgroundai/playground-v2.5-1024px-aesthetic", 'display_name': "Playground v2.5", 'description': "Known for high aesthetic quality. Native resolution is 1024x1024.", 'owned_by': 'HuggingFace'},
424
+ # SD 1.5 Models (512x512 native)
425
+ {'model_name': "runwayml/stable-diffusion-v1-5", 'display_name': "Stable Diffusion 1.5", 'description': "A popular and versatile open-access text-to-image model.", 'owned_by': 'HuggingFace'},
426
+ {'model_name': "dataautogpt3/OpenDalleV1.1", 'display_name': "OpenDalle v1.1", 'description': "An open-source reproduction of DALL-E 3, good for prompt adherence.", 'owned_by': 'HuggingFace'},
427
+ {'model_name': "stabilityai/stable-diffusion-2-1-base", 'display_name': "Stable Diffusion 2.1 (512px)", 'description': "A 512x512 resolution model from Stability AI.", 'owned_by': 'HuggingFace'},
428
+ {'model_name': "CompVis/stable-diffusion-v1-4", 'display_name': "Stable Diffusion 1.4 (Gated)", 'description': "Original SD v1.4. Requires accepting license on Hugging Face and an HF token.", 'owned_by': 'HuggingFace'}
429
+ ]
430
+
431
+ # Discover local .safetensors files
432
+ custom_local_models = []
433
+ civitai_filenames = {info['filename'] for info in CIVITAI_MODELS.values()}
434
+ local_safetensors = self.list_safetensor_models()
435
+
436
+ for filename in local_safetensors:
437
+ if filename not in civitai_filenames:
438
+ custom_local_models.append({
439
+ 'model_name': filename,
440
+ 'display_name': filename,
441
+ 'description': 'Local safetensors file from your models folder.',
442
+ 'owned_by': 'local_user'
443
+ })
444
+
445
+ return civitai_list + hf_default_list + custom_local_models
446
+
447
+ def load_model(self):
448
+ ASCIIColors.info("load_model() called. Loading is now automatic.")
449
+ if self.model_name and not self.manager:
450
+ self._acquire_manager()
257
451
 
258
452
  def unload_model(self):
259
- if self.pipeline is not None:
260
- del self.pipeline
261
- self.pipeline = None
262
- if torch and torch.cuda.is_available():
263
- torch.cuda.empty_cache()
264
- ASCIIColors.info("Diffusers pipeline unloaded.")
453
+ if self.manager:
454
+ ASCIIColors.info(f"Binding instance releasing manager for '{self.manager.config['model_name']}'.")
455
+ self.registry.release_manager(self.manager.config)
456
+ self.manager = None
265
457
 
266
458
  def generate_image(self, prompt: str, negative_prompt: str = "", width: int = None, height: int = None, **kwargs) -> bytes:
267
- """Generates an image using the loaded Diffusers pipeline."""
268
- if not self.pipeline:
269
- raise RuntimeError("Diffusers pipeline is not loaded. Cannot generate image.")
270
-
459
+ if not self.model_name:
460
+ raise RuntimeError("No model_name configured. Please select a model in settings.")
461
+
462
+ if not self.manager:
463
+ self._acquire_manager()
464
+
271
465
  _width = width or self.config["default_width"]
272
466
  _height = height or self.config["default_height"]
273
467
  _num_inference_steps = kwargs.get("num_inference_steps", self.config["num_inference_steps"])
@@ -277,179 +471,137 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
277
471
  generator = torch.Generator(device=self.config["device"]).manual_seed(_seed) if _seed != -1 else None
278
472
 
279
473
  pipeline_args = {
280
- "prompt": prompt,
281
- "negative_prompt": negative_prompt or None,
282
- "width": _width,
283
- "height": _height,
284
- "num_inference_steps": _num_inference_steps,
285
- "guidance_scale": _guidance_scale,
286
- "generator": generator,
474
+ "prompt": prompt, "negative_prompt": negative_prompt or None, "width": _width,
475
+ "height": _height, "num_inference_steps": _num_inference_steps,
476
+ "guidance_scale": _guidance_scale, "generator": generator,
287
477
  }
288
- ASCIIColors.info(f"Generating image with prompt: '{prompt[:100]}...'")
289
-
478
+
479
+ future = Future()
480
+ self.manager.queue.put((future, pipeline_args))
481
+ ASCIIColors.info(f"Job for prompt '{prompt[:50]}...' queued. Waiting...")
482
+
290
483
  try:
291
- with torch.no_grad():
292
- pipeline_output = self.pipeline(**pipeline_args)
293
-
294
- pil_image: Image.Image = pipeline_output.images[0]
295
- img_byte_arr = BytesIO()
296
- pil_image.save(img_byte_arr, format="PNG")
297
-
484
+ image_bytes = future.result()
298
485
  ASCIIColors.green("Image generated successfully.")
299
- return img_byte_arr.getvalue()
300
-
486
+ return image_bytes
301
487
  except Exception as e:
302
- trace_exception(e)
303
- raise Exception(f"Diffusers image generation failed: {e}") from e
488
+ raise Exception(f"Image generation failed: {e}") from e
304
489
 
305
- def list_models(self) -> List[str]:
306
- """Lists available local models from the models_path."""
307
- if not self.models_path.exists():
308
- return []
490
+ def list_local_models(self) -> List[str]:
491
+ if not self.models_path.exists(): return []
492
+
493
+ folders = [
494
+ d.name for d in self.models_path.iterdir()
495
+ if d.is_dir() and ((d / "model_index.json").exists() or (d / "unet" / "config.json").exists())
496
+ ]
497
+ safetensors = self.list_safetensor_models()
498
+ return sorted(folders + safetensors)
499
+
500
+ def list_available_models(self) -> List[str]:
501
+ discoverable_models = [m['model_name'] for m in self.listModels()]
502
+ local_models = self.list_local_models()
309
503
 
310
- models = []
311
- for model_dir in self.models_path.iterdir():
312
- if model_dir.is_dir():
313
- # Check for key files indicating a valid diffusers model directory
314
- if (model_dir / "model_index.json").exists() or (model_dir / "unet" / "config.json").exists():
315
- models.append(model_dir.name)
316
- return sorted(models)
504
+ combined_list = sorted(list(set(local_models + discoverable_models)))
505
+ return combined_list
317
506
 
318
507
  def list_services(self, **kwargs) -> List[Dict[str, str]]:
319
- """Lists available local models from the models_path."""
320
- models = self.list_models()
508
+ models = self.list_available_models()
509
+ local_models = self.list_local_models()
510
+
321
511
  if not models:
322
- return [{
323
- "name": "diffusers_no_local_models",
324
- "caption": "No local Diffusers models found",
325
- "help": f"Place Diffusers model folders inside '{self.models_path.resolve()}' or specify a Hugging Face model ID in settings to download one."
326
- }]
327
-
328
- return [{
329
- "name": model_name,
330
- "caption": f"Diffusers: {model_name}",
331
- "help": f"Local Diffusers model from: {self.models_path.resolve()}"
332
- } for model_name in models]
512
+ return [{"name": "diffusers_no_models", "caption": "No models found", "help": f"Place models in '{self.models_path.resolve()}'."}]
513
+
514
+ services = []
515
+ for m in models:
516
+ help_text = "Hugging Face model ID"
517
+ if m in local_models:
518
+ help_text = f"Local model from: {self.models_path.resolve()}"
519
+ elif m in CIVITAI_MODELS:
520
+ filename = CIVITAI_MODELS[m]['filename']
521
+ help_text = f"Civitai model (downloads as {filename})"
522
+
523
+ services.append({"name": m, "caption": f"Diffusers: {m}", "help": help_text})
524
+ return services
333
525
 
334
526
  def get_settings(self, **kwargs) -> List[Dict[str, Any]]:
335
- """Retrieves the current configurable settings for the binding."""
336
- local_models = self.list_models()
527
+ available_models = self.list_available_models()
337
528
  return [
338
- {"name": "model_name", "type": "str", "value": self.model_name, "description": "Hugging Face model ID or a local model name from the models folder.", "options": local_models},
339
- {"name": "device", "type": "str", "value": self.config["device"], "description": f"Device for inference. Current resolved: {self.config['device']}", "options": ["auto", "cuda", "mps", "cpu"]},
340
- {"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype. Current resolved: {self.config['torch_dtype_str']}", "options": ["auto", "float16", "bfloat16", "float32"]},
341
- {"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "Model variant from HF (e.g., 'fp16', 'bf16'). Optional."},
342
- {"name": "use_safetensors", "type": "bool", "value": self.config["use_safetensors"], "description": "Prefer loading models from .safetensors files."},
529
+ {"name": "model_name", "type": "str", "value": self.model_name, "description": "Local, Civitai, or Hugging Face model.", "options": available_models},
530
+ {"name": "device", "type": "str", "value": self.config["device"], "description": f"Inference device. Resolved: {self.config['device']}", "options": ["auto", "cuda", "mps", "cpu"]},
531
+ {"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype. Resolved: {self.config['torch_dtype_str']}", "options": ["auto", "float16", "bfloat16", "float32"]},
532
+ {"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "HF model variant (e.g., 'fp16')."},
533
+ {"name": "use_safetensors", "type": "bool", "value": self.config["use_safetensors"], "description": "Prefer .safetensors when loading from Hugging Face."},
343
534
  {"name": "scheduler_name", "type": "str", "value": self.config["scheduler_name"], "description": "Scheduler for diffusion.", "options": list(SCHEDULER_MAPPING.keys())},
344
- {"name": "safety_checker_on", "type": "bool", "value": self.config["safety_checker_on"], "description": "Enable the safety checker (if model has one)."},
535
+ {"name": "safety_checker_on", "type": "bool", "value": self.config["safety_checker_on"], "description": "Enable the safety checker."},
345
536
  {"name": "enable_cpu_offload", "type": "bool", "value": self.config["enable_cpu_offload"], "description": "Enable model CPU offload (saves VRAM, slower)."},
346
537
  {"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload (more VRAM savings, much slower)."},
347
538
  {"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention."},
348
- {"name": "default_width", "type": "int", "value": self.config["default_width"], "description": "Default width for generated images."},
349
- {"name": "default_height", "type": "int", "value": self.config["default_height"], "description": "Default height for generated images."},
350
- {"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default number of inference steps."},
539
+ {"name": "default_width", "type": "int", "value": self.config["default_width"], "description": "Default image width. Note: SDXL models prefer 1024."},
540
+ {"name": "default_height", "type": "int", "value": self.config["default_height"], "description": "Default image height. Note: SDXL models prefer 1024."},
541
+ {"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default inference steps."},
351
542
  {"name": "guidance_scale", "type": "float", "value": self.config["guidance_scale"], "description": "Default guidance scale (CFG)."},
352
- {"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed for generation (-1 for random)."},
353
- {"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "Hugging Face API token (for private models).", "is_secret": True},
354
- {"name": "hf_cache_path", "type": "str", "value": self.config["hf_cache_path"], "description": "Path to Hugging Face cache. Defaults to ~/.cache/huggingface."},
355
- {"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Only use local files, do not download."},
543
+ {"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed (-1 for random)."},
544
+ {"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "HF API token (for private/gated models).", "is_secret": True},
545
+ {"name": "hf_cache_path", "type": "str", "value": self.config["hf_cache_path"], "description": "Path to HF cache."},
546
+ {"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Do not download from Hugging Face."},
356
547
  ]
357
548
 
358
549
  def set_settings(self, settings: Union[Dict[str, Any], List[Dict[str, Any]]], **kwargs) -> bool:
359
- """Applies new settings to the binding. Some may trigger a model reload."""
360
550
  parsed_settings = settings if isinstance(settings, dict) else \
361
551
  {item["name"]: item["value"] for item in settings if "name" in item and "value" in item}
362
552
 
363
- needs_reload = False
364
- critical_keys = ["model_name", "device", "torch_dtype_str", "use_safetensors",
365
- "safety_checker_on", "hf_variant", "enable_cpu_offload",
366
- "enable_sequential_cpu_offload", "enable_xformers", "hf_token",
367
- "local_files_only", "hf_cache_path"]
553
+ critical_keys = self.registry._get_config_key({}).__self__.critical_keys
554
+ needs_manager_swap = False
368
555
 
369
556
  for key, value in parsed_settings.items():
370
- current_value = getattr(self, key, self.config.get(key))
371
- if current_value != value:
557
+ if self.config.get(key) != value:
372
558
  ASCIIColors.info(f"Setting '{key}' changed to: {value}")
373
- if key == "model_name":
374
- self.model_name = value
375
559
  self.config[key] = value
376
- if key in critical_keys:
377
- needs_reload = True
378
- elif key == "scheduler_name" and self.pipeline:
379
- self._set_scheduler()
560
+ if key == "model_name": self.model_name = value
561
+ if key in critical_keys: needs_manager_swap = True
562
+
563
+ if needs_manager_swap and self.model_name:
564
+ ASCIIColors.info("Critical settings changed. Swapping model manager...")
565
+ self._resolve_device_and_dtype()
566
+ self._acquire_manager()
567
+
568
+ if not needs_manager_swap and self.manager:
569
+ self.manager.config.update(parsed_settings)
570
+ if 'scheduler_name' in parsed_settings and self.manager.pipeline:
571
+ with self.manager.lock:
572
+ self.manager._set_scheduler()
380
573
 
381
- if needs_reload and self.model_name:
382
- ASCIIColors.info("Reloading model due to settings changes...")
383
- try:
384
- self._resolve_device_and_dtype()
385
- self.load_model()
386
- ASCIIColors.green("Model reloaded successfully.")
387
- except Exception as e:
388
- trace_exception(e)
389
- ASCIIColors.error(f"Failed to reload model with new settings: {e}. Binding may be unstable.")
390
- return False
391
574
  return True
392
575
 
393
576
  def __del__(self):
394
577
  self.unload_model()
395
578
 
396
- # Example Usage (for testing within this file)
579
+ # Example Usage
397
580
  if __name__ == '__main__':
398
581
  ASCIIColors.magenta("--- Diffusers TTI Binding Test ---")
399
582
 
400
583
  if not DIFFUSERS_AVAILABLE:
401
- ASCIIColors.error("Diffusers or its dependencies are not available. Cannot run test.")
584
+ ASCIIColors.error("Diffusers not available. Cannot run test.")
402
585
  exit(1)
403
586
 
404
587
  temp_paths_dir = Path(__file__).parent / "temp_lollms_paths_diffusers"
405
588
  temp_models_path = temp_paths_dir / "models"
406
- temp_cache_path = temp_paths_dir / "shared_cache"
407
589
 
408
- # Clean up previous runs
409
- if temp_paths_dir.exists():
410
- shutil.rmtree(temp_paths_dir)
590
+ if temp_paths_dir.exists(): shutil.rmtree(temp_paths_dir)
411
591
  temp_models_path.mkdir(parents=True, exist_ok=True)
412
- temp_cache_path.mkdir(parents=True, exist_ok=True)
413
592
 
414
- # A very small, fast model for testing from Hugging Face.
415
- test_model_id = "hf-internal-testing/tiny-stable-diffusion-torch"
416
-
417
593
  try:
418
- ASCIIColors.cyan("\n1. Initializing binding without a model...")
419
- binding = DiffusersTTIBinding_Impl(
420
- models_path=str(temp_models_path),
421
- hf_cache_path=str(temp_cache_path)
422
- )
423
- assert binding.pipeline is None, "Pipeline should not be loaded initially."
424
- ASCIIColors.green("Initialization successful.")
425
-
426
- ASCIIColors.cyan("\n2. Listing services (should be empty)...")
427
- services = binding.list_services()
428
- ASCIIColors.info(json.dumps(services, indent=2))
429
- assert services[0]["name"] == "diffusers_no_local_models"
430
-
431
- ASCIIColors.cyan(f"\n3. Setting model_name to '{test_model_id}' to trigger load...")
432
- binding.set_settings({"model_name": test_model_id})
433
- assert binding.model_name == test_model_id
434
- assert binding.pipeline is not None, "Pipeline should be loaded after setting model_name."
435
- ASCIIColors.green("Model loaded successfully.")
436
-
437
- ASCIIColors.cyan("\n4. Generating an image...")
438
- image_bytes = binding.generate_image(
439
- prompt="A tiny robot",
440
- width=64, height=64,
441
- num_inference_steps=2
442
- )
443
- assert image_bytes and isinstance(image_bytes, bytes)
444
- ASCIIColors.green(f"Image generated (size: {len(image_bytes)} bytes).")
445
- test_image_path = Path(__file__).parent / "test_diffusers_image.png"
446
- with open(test_image_path, "wb") as f:
447
- f.write(image_bytes)
448
- ASCIIColors.info(f"Test image saved to: {test_image_path.resolve()}")
449
-
450
- ASCIIColors.cyan("\n5. Unloading model...")
451
- binding.unload_model()
452
- assert binding.pipeline is None, "Pipeline should be None after unload."
594
+ ASCIIColors.cyan("\n--- Test: Loading a Hugging Face model ---")
595
+ # Using a very small model for fast testing
596
+ binding_config = {"models_path": str(temp_models_path), "model_name": "hf-internal-testing/tiny-stable-diffusion-torch"}
597
+ binding = DiffusersTTIBinding_Impl(**binding_config)
598
+
599
+ img_bytes = binding.generate_image("a tiny robot", width=64, height=64, num_inference_steps=2)
600
+ assert len(img_bytes) > 1000, "Image generation from HF model should succeed."
601
+ ASCIIColors.green("HF model loading and generation successful.")
602
+
603
+ del binding
604
+ time.sleep(0.1)
453
605
 
454
606
  except Exception as e:
455
607
  trace_exception(e)
@@ -458,23 +610,4 @@ if __name__ == '__main__':
458
610
  ASCIIColors.cyan("\nCleaning up temporary directories...")
459
611
  if temp_paths_dir.exists():
460
612
  shutil.rmtree(temp_paths_dir)
461
- ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
462
-
463
- def listModels(self) -> list:
464
- """Lists models"""
465
- # TODO: use the models from the folder if set
466
- formatted_models=[
467
- {
468
- 'model_name': "dummy model 1",
469
- 'display_name': "Test dummy model 1",
470
- 'description': "A test dummy model",
471
- 'owned_by': 'parisneo'
472
- },
473
- {
474
- 'model_name': "dummy model 2",
475
- 'display_name': "Test dummy model 2",
476
- 'description': "A test dummy model",
477
- 'owned_by': 'parisneo'
478
- }
479
- ]
480
- return formatted_models
613
+ ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lollms_client
3
- Version: 1.1.0
3
+ Version: 1.1.1
4
4
  Summary: A client library for LoLLMs generate endpoint
5
5
  Author-email: ParisNeo <parisneoai@gmail.com>
6
6
  License: Apache Software License
@@ -1,9 +1,9 @@
1
- lollms_client/__init__.py,sha256=FmmUQyLm9hgGyCc71AtEoRRxx-YfI8Gl_4H7k3VRFpI,1146
1
+ lollms_client/__init__.py,sha256=GeDlYB0SLi4cbO3qk5kj3P84Z_9oPXXSGC7JLDWnTB0,1146
2
2
  lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
3
3
  lollms_client/lollms_core.py,sha256=zqaxEJJDXiwpDd3E-PFDLJOnmG1d9cOiL_CrYRUa7Z0,167361
4
4
  lollms_client/lollms_discussion.py,sha256=wkadV6qiegxOzukMVn5vukdeJivnlyygSzZBkzOi9Gc,106714
5
5
  lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
6
- lollms_client/lollms_llm_binding.py,sha256=EJGIKb_a3sVEia5zxs_EcOl21xmfJV8JzyY3sfymSBw,24984
6
+ lollms_client/lollms_llm_binding.py,sha256=5-Vknm0YILPd6ZiwZynsXMfns__Yd_1tDDc2fciRiiA,25020
7
7
  lollms_client/lollms_mcp_binding.py,sha256=psb27A23VFWDfZsR2WUbQXQxiZDW5yfOak6ZtbMfszI,10222
8
8
  lollms_client/lollms_mcp_security.py,sha256=FhVTDhSBjksGEZnopVnjFmEF5dv7D8bBTqoaj4BiF0E,3562
9
9
  lollms_client/lollms_personality.py,sha256=O-9nqZhazcITOkxjT24ENTxTmIoZLgqIsQ9WtWs0Id0,8719
@@ -27,7 +27,7 @@ lollms_client/llm_bindings/llamacpp/__init__.py,sha256=4CbNYpfquVEgfsxuLsxQta_dZ
27
27
  lollms_client/llm_bindings/lollms/__init__.py,sha256=a4gNH4axiDgsri8NGAcq0OitgYdnzBDLNkzUMhkFArA,24781
28
28
  lollms_client/llm_bindings/lollms_webui/__init__.py,sha256=iuDfhZZoLC-PDEPLHrcjk5-962S5c7OeCI7PMdJxI_A,17753
29
29
  lollms_client/llm_bindings/mistral/__init__.py,sha256=cddz9xIj8NRFLKHe2JMxzstpUrNIu5s9juci3mhiHfo,14133
30
- lollms_client/llm_bindings/ollama/__init__.py,sha256=d61pSEWlJ2KOvnaztji2wblvadu0oTelEJeHG4IcL9I,41193
30
+ lollms_client/llm_bindings/ollama/__init__.py,sha256=3Jyb9Yyp6lcz52HKF8nEiB4s3ChKBCjs7I27ohNOT4A,41216
31
31
  lollms_client/llm_bindings/open_router/__init__.py,sha256=cAFWtCWJx0WjIe1w2JReCf6WlAZjrXYA4jZ8l3zqxMs,14915
32
32
  lollms_client/llm_bindings/openai/__init__.py,sha256=J8v7XU9TrvXJd1ffwhYkya5YeXxWnNiFuNBAwRfoHDk,26066
33
33
  lollms_client/llm_bindings/openllm/__init__.py,sha256=RC9dVeopslS-zXTsSJ7VC4iVsKgZCBwfmccmr_LCHA0,29971
@@ -48,7 +48,7 @@ lollms_client/stt_bindings/whisper/__init__.py,sha256=1Ej67GdRKBy1bba14jMaYDYHiZ
48
48
  lollms_client/stt_bindings/whispercpp/__init__.py,sha256=xSAQRjAhljak3vWCpkP0Vmdb6WmwTzPjXyaIB85KLGU,21439
49
49
  lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  lollms_client/tti_bindings/dalle/__init__.py,sha256=1nE36XamKEJOMpm6QUow8OyM1KdpejCLM0KUSXlcePo,24135
51
- lollms_client/tti_bindings/diffusers/__init__.py,sha256=OQOHE1WtB4TamsWd17EEL4NOdxIIonDqDMS_xaONUSI,23510
51
+ lollms_client/tti_bindings/diffusers/__init__.py,sha256=oQzvV0MSbLUEqWrercgB_npKJXzpylXqbqSP1E18its,30296
52
52
  lollms_client/tti_bindings/gemini/__init__.py,sha256=f9fPuqnrBZ1Z-obcoP6EVvbEXNbNCSg21cd5efLCk8U,16707
53
53
  lollms_client/tti_bindings/lollms/__init__.py,sha256=5Tnsn4b17djvieQkcjtIDBm3qf0pg5ZWWov-4_2wmo0,8762
54
54
  lollms_client/ttm_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -62,8 +62,8 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=0IEWG4zH3_sOkSb9WbZzkeV5
62
62
  lollms_client/tts_bindings/xtts/__init__.py,sha256=FgcdUH06X6ZR806WQe5ixaYx0QoxtAcOgYo87a2qxYc,18266
63
63
  lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
64
64
  lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
65
- lollms_client-1.1.0.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
66
- lollms_client-1.1.0.dist-info/METADATA,sha256=_frP9CKjj2dGhIX57fY7NLo9z0-Bfc449rznQMOUSGs,58549
67
- lollms_client-1.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
- lollms_client-1.1.0.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
69
- lollms_client-1.1.0.dist-info/RECORD,,
65
+ lollms_client-1.1.1.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
66
+ lollms_client-1.1.1.dist-info/METADATA,sha256=yIpolI5PhLK1ETh8MVA1JKk1itCOUrrJYvErDa07E9c,58549
67
+ lollms_client-1.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
68
+ lollms_client-1.1.1.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
69
+ lollms_client-1.1.1.dist-info/RECORD,,