lollms-client 1.0.0__py3-none-any.whl → 1.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lollms-client might be problematic. Click here for more details.

@@ -5,51 +5,7 @@ from io import BytesIO
5
5
  from typing import Optional, List, Dict, Any, Union
6
6
  from pathlib import Path
7
7
 
8
-
9
- try:
10
- import pipmaster as pm
11
- import platform # For OS detection for torch index
12
-
13
- # Determine initial device preference to guide torch installation
14
- preferred_torch_device_for_install = "cpu" # Default assumption
15
-
16
- # Tentatively set preference based on OS, assuming user might want GPU if available
17
- if platform.system() == "Linux" or platform.system() == "Windows":
18
- # On Linux/Windows, CUDA is the primary GPU acceleration for PyTorch.
19
- # We will try to install a CUDA version of PyTorch.
20
- preferred_torch_device_for_install = "cuda"
21
- elif platform.system() == "Darwin":
22
- # On macOS, MPS is the acceleration. Standard torch install usually handles this.
23
- preferred_torch_device_for_install = "mps" # or keep cpu if mps detection is later
24
-
25
- torch_pkgs = ["torch", "torchaudio", "torchvision", "xformers"]
26
- diffusers_core_pkgs = ["diffusers", "Pillow", "transformers", "safetensors"]
27
-
28
- torch_index_url = None
29
- if preferred_torch_device_for_install == "cuda":
30
- # Specify a common CUDA version index. Pip should resolve the correct torch version.
31
- # As of late 2023/early 2024, cu118 or cu121 are common. Let's use cu126.
32
- # Users with different CUDA setups might need to pre-install torch manually.
33
- torch_index_url = "https://download.pytorch.org/whl/cu126"
34
- ASCIIColors.info(f"Attempting to ensure PyTorch with CUDA support (target index: {torch_index_url})")
35
- # Install torch and torchaudio first from the specific index
36
- pm.ensure_packages(torch_pkgs, index_url=torch_index_url)
37
- # Then install audiocraft and other dependencies; pip should use the already installed torch
38
- pm.ensure_packages(diffusers_core_pkgs)
39
- else:
40
- # For CPU, MPS, or if no specific CUDA preference was determined for install
41
- ASCIIColors.info("Ensuring PyTorch, AudioCraft, and dependencies using default PyPI index.")
42
- pm.ensure_packages(torch_pkgs + diffusers_core_pkgs)
43
-
44
- import whisper
45
- import torch
46
- _whisper_installed = True
47
- except Exception as e:
48
- _whisper_installation_error = str(e)
49
- whisper = None
50
- torch = None
51
-
52
-
8
+ # Attempt to import core dependencies and set availability flag
53
9
  try:
54
10
  import torch
55
11
  from diffusers import AutoPipelineForText2Image, DiffusionPipeline
@@ -63,20 +19,21 @@ except ImportError:
63
19
  Image = None
64
20
  load_image = None
65
21
  DIFFUSERS_AVAILABLE = False
66
- # Detailed error will be raised in __init__ if user tries to use it
22
+ # A detailed error will be raised in __init__ if the user tries to use the binding.
67
23
 
68
24
  from lollms_client.lollms_tti_binding import LollmsTTIBinding
69
25
  from ascii_colors import trace_exception, ASCIIColors
70
26
  import json # For potential JSONDecodeError and settings
27
+ import shutil
71
28
 
72
29
  # Defines the binding name for the manager
73
30
  BindingName = "DiffusersTTIBinding_Impl"
74
31
 
75
- # Helper for torch.dtype string conversion
32
+ # Helper for torch.dtype string conversion, handles case where torch is not installed
76
33
  TORCH_DTYPE_MAP_STR_TO_OBJ = {
77
- "float16": torch.float16 if torch else "float16", # Keep string if torch not loaded
78
- "bfloat16": torch.bfloat16 if torch else "bfloat16",
79
- "float32": torch.float32 if torch else "float32",
34
+ "float16": getattr(torch, 'float16', 'float16'),
35
+ "bfloat16": getattr(torch, 'bfloat16', 'bfloat16'),
36
+ "float32": getattr(torch, 'float32', 'float32'),
80
37
  "auto": "auto"
81
38
  }
82
39
  TORCH_DTYPE_MAP_OBJ_TO_STR = {v: k for k, v in TORCH_DTYPE_MAP_STR_TO_OBJ.items()}
@@ -90,22 +47,22 @@ SCHEDULER_MAPPING = {
90
47
  "ddim": "DDIMScheduler",
91
48
  "ddpm": "DDPMScheduler",
92
49
  "deis_multistep": "DEISMultistepScheduler",
93
- "dpm_multistep": "DPMSolverMultistepScheduler", # Alias
94
- "dpm_multistep_karras": "DPMSolverMultistepScheduler", # Configured with use_karras_sigmas=True
50
+ "dpm_multistep": "DPMSolverMultistepScheduler",
51
+ "dpm_multistep_karras": "DPMSolverMultistepScheduler",
95
52
  "dpm_single": "DPMSolverSinglestepScheduler",
96
- "dpm_adaptive": "DP soluzioniPlusPlusScheduler", # DPM++ 2M Karras in A1111
97
- "dpm++_2m": "DPMSolverMultistepScheduler",
98
- "dpm++_2m_karras": "DPMSolverMultistepScheduler", # Configured with use_karras_sigmas=True
53
+ "dpm_adaptive": "DPMSolverPlusPlusScheduler",
54
+ "dpm++_2m": "DPMSolverMultistepScheduler",
55
+ "dpm++_2m_karras": "DPMSolverMultistepScheduler",
99
56
  "dpm++_2s_ancestral": "DPMSolverAncestralDiscreteScheduler",
100
- "dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler", # Configured with use_karras_sigmas=True
57
+ "dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler",
101
58
  "dpm++_sde": "DPMSolverSDEScheduler",
102
- "dpm++_sde_karras": "DPMSolverSDEScheduler", # Configured with use_karras_sigmas=True
59
+ "dpm++_sde_karras": "DPMSolverSDEScheduler",
103
60
  "euler_ancestral_discrete": "EulerAncestralDiscreteScheduler",
104
61
  "euler_discrete": "EulerDiscreteScheduler",
105
62
  "heun_discrete": "HeunDiscreteScheduler",
106
- "heun_karras": "HeunDiscreteScheduler", # Configured with use_karras_sigmas=True
63
+ "heun_karras": "HeunDiscreteScheduler",
107
64
  "lms_discrete": "LMSDiscreteScheduler",
108
- "lms_karras": "LMSDiscreteScheduler", # Configured with use_karras_sigmas=True
65
+ "lms_karras": "LMSDiscreteScheduler",
109
66
  "pndm": "PNDMScheduler",
110
67
  "unipc_multistep": "UniPCMultistepScheduler",
111
68
  }
@@ -121,81 +78,73 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
121
78
  Allows running various text-to-image models locally.
122
79
  """
123
80
  DEFAULT_CONFIG = {
124
- "model_id_or_path": "stabilityai/stable-diffusion-2-1-base",
125
- "device": "auto", # "auto", "cuda", "mps", "cpu"
126
- "torch_dtype_str": "auto", # "auto", "float16", "bfloat16", "float32"
81
+ "model_name": "",
82
+ "device": "auto",
83
+ "torch_dtype_str": "auto",
127
84
  "use_safetensors": True,
128
85
  "scheduler_name": "default",
129
- "safety_checker_on": True, # Note: Diffusers default is ON
86
+ "safety_checker_on": True,
130
87
  "num_inference_steps": 25,
131
88
  "guidance_scale": 7.5,
132
- "default_width": 768, # Default for SD 2.1 base
133
- "default_height": 768, # Default for SD 2.1 base
134
- "seed": -1, # -1 for random on each call
89
+ "default_width": 768,
90
+ "default_height": 768,
91
+ "seed": -1,
135
92
  "enable_cpu_offload": False,
136
93
  "enable_sequential_cpu_offload": False,
137
- "enable_xformers": False, # Explicit opt-in for xformers
138
- "hf_variant": None, # e.g., "fp16"
94
+ "enable_xformers": False,
95
+ "hf_variant": None,
139
96
  "hf_token": None,
97
+ "hf_cache_path": None,
140
98
  "local_files_only": False,
141
99
  }
142
100
 
143
- def __init__(self,
144
- config: Optional[Dict[str, Any]] = None,
145
- lollms_paths: Optional[Dict[str, Union[str, Path]]] = None,
146
- **kwargs # Catches other potential parameters like 'service_key' or 'client_id'
147
- ):
101
+ def __init__(self, **kwargs):
148
102
  """
149
103
  Initialize the Diffusers TTI binding.
150
104
 
151
105
  Args:
152
- config (Optional[Dict[str, Any]]): Configuration dictionary for the binding.
153
- Overrides DEFAULT_CONFIG.
154
- lollms_paths (Optional[Dict[str, Union[str, Path]]]): Dictionary of LOLLMS paths.
155
- Used for model/cache directories.
156
- **kwargs: Catches other parameters (e.g. service_key).
106
+ **kwargs: A dictionary of configuration parameters.
107
+ Expected keys:
108
+ - model_name (str): The name of the model to use. Can be a Hugging Face Hub ID
109
+ (e.g., 'stabilityai/stable-diffusion-xl-base-1.0') or the name of a local
110
+ model directory located in `models_path`.
111
+ - models_path (str or Path): The path to the directory where local models are stored.
112
+ Defaults to a 'models' folder next to this file.
113
+ - hf_cache_path (str or Path, optional): Path to a directory for Hugging Face
114
+ to cache downloaded models and files.
115
+ - Other settings from the DEFAULT_CONFIG can be overridden here.
157
116
  """
158
- super().__init__(binding_name="diffusers")
117
+ super().__init__(binding_name=BindingName)
159
118
 
160
119
  if not DIFFUSERS_AVAILABLE:
161
- ASCIIColors.error("Diffusers library or its dependencies (torch, Pillow, transformers) are not installed or failed to import.")
162
- ASCIIColors.info("Attempting to install/verify packages...")
163
- pm.ensure_packages(["torch", "diffusers", "Pillow", "transformers", "safetensors"])
164
- try:
165
- import torch as _torch
166
- from diffusers import AutoPipelineForText2Image as _AutoPipelineForText2Image
167
- from diffusers import DiffusionPipeline as _DiffusionPipeline
168
- from PIL import Image as _Image
169
- globals()['torch'] = _torch
170
- globals()['AutoPipelineForText2Image'] = _AutoPipelineForText2Image
171
- globals()['DiffusionPipeline'] = _DiffusionPipeline
172
- globals()['Image'] = _Image
173
-
174
- # Re-populate torch dtype maps if torch was just loaded
175
- global TORCH_DTYPE_MAP_STR_TO_OBJ, TORCH_DTYPE_MAP_OBJ_TO_STR
176
- TORCH_DTYPE_MAP_STR_TO_OBJ = {
177
- "float16": _torch.float16, "bfloat16": _torch.bfloat16,
178
- "float32": _torch.float32, "auto": "auto"
179
- }
180
- TORCH_DTYPE_MAP_OBJ_TO_STR = {v: k for k, v in TORCH_DTYPE_MAP_STR_TO_OBJ.items()}
181
- TORCH_DTYPE_MAP_OBJ_TO_STR[None] = "None"
182
- ASCIIColors.green("Dependencies seem to be available now.")
183
- except ImportError as e:
184
- trace_exception(e)
185
- raise ImportError(
186
- "Diffusers binding dependencies are still not met after trying to ensure them. "
187
- "Please install torch, diffusers, Pillow, and transformers manually. "
188
- f"Error: {e}"
189
- ) from e
120
+ raise ImportError(
121
+ "Diffusers library or its dependencies (torch, Pillow, transformers) are not installed. "
122
+ "Please install them using: pip install torch diffusers Pillow transformers safetensors"
123
+ )
124
+
125
+ # Merge default config with user-provided kwargs
126
+ self.config = {**self.DEFAULT_CONFIG, **kwargs}
190
127
 
191
- # Merge configs, lollms_paths, and kwargs
192
- self.config = {**self.DEFAULT_CONFIG, **(config or {}), **kwargs}
193
- self.lollms_paths = {k: Path(v) for k, v in (lollms_paths or {}).items()} if lollms_paths else {}
128
+ # model_name is crucial, get it from the merged config
129
+ self.model_name = self.config.get("model_name", "")
130
+
131
+ # models_path is also special, handle it with its default logic
132
+ self.models_path = Path(kwargs.get("models_path", Path(__file__).parent / "models"))
133
+ self.models_path.mkdir(parents=True, exist_ok=True)
194
134
 
195
135
  self.pipeline: Optional[DiffusionPipeline] = None
196
- self.current_model_id_or_path = None # To track if model needs reload
136
+ self.current_model_id_or_path = None
137
+
138
+ self._resolve_device_and_dtype()
139
+
140
+ if self.model_name:
141
+ self.load_model()
142
+ else:
143
+ ASCIIColors.warning("No model_name provided during initialization. The binding is idle.")
144
+
197
145
 
198
- # Resolve auto settings for device and dtype
146
+ def _resolve_device_and_dtype(self):
147
+ """Resolves auto settings for device and dtype from config."""
199
148
  if self.config["device"].lower() == "auto":
200
149
  if torch.cuda.is_available():
201
150
  self.config["device"] = "cuda"
@@ -205,54 +154,42 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
205
154
  self.config["device"] = "cpu"
206
155
 
207
156
  if self.config["torch_dtype_str"].lower() == "auto":
208
- if self.config["device"] == "cpu":
209
- self.config["torch_dtype_str"] = "float32" # CPU usually float32
210
- else:
211
- self.config["torch_dtype_str"] = "float16" # Common default for GPU
157
+ self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
212
158
 
213
159
  self.torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower(), torch.float32)
214
- if self.torch_dtype == "auto": # Should have been resolved above
160
+ if self.torch_dtype == "auto": # Final fallback
215
161
  self.torch_dtype = torch.float16 if self.config["device"] != "cpu" else torch.float32
216
162
  self.config["torch_dtype_str"] = TORCH_DTYPE_MAP_OBJ_TO_STR.get(self.torch_dtype, "float32")
217
163
 
218
- # For potential lollms client specific features
219
- self.client_id = kwargs.get("service_key", kwargs.get("client_id", "diffusers_client_user"))
220
-
221
- self.load_model()
222
-
223
-
224
- def _resolve_model_path(self, model_id_or_path: str) -> str:
225
- """Resolves a model name/path against lollms_paths if not absolute."""
226
- if os.path.isabs(model_id_or_path):
227
- return model_id_or_path
164
+ def _resolve_model_path(self, model_name: str) -> str:
165
+ """
166
+ Resolves a model name to a full path if it's a local model,
167
+ otherwise returns it as is (assuming it's a Hugging Face Hub ID).
168
+ """
169
+ if not model_name:
170
+ raise ValueError("Model name cannot be empty.")
171
+
172
+ if Path(model_name).is_absolute() and Path(model_name).is_dir():
173
+ ASCIIColors.info(f"Using absolute path for model: {model_name}")
174
+ return model_name
228
175
 
229
- # Check personal_models_path/diffusers_models/<name>
230
- if self.lollms_paths.get('personal_models_path'):
231
- personal_diffusers_path = self.lollms_paths['personal_models_path'] / "diffusers_models" / model_id_or_path
232
- if personal_diffusers_path.exists() and personal_diffusers_path.is_dir():
233
- ASCIIColors.info(f"Found local model in personal_models_path: {personal_diffusers_path}")
234
- return str(personal_diffusers_path)
235
-
236
- # Check models_zoo_path/diffusers_models/<name> (if different from personal)
237
- if self.lollms_paths.get('models_zoo_path') and \
238
- self.lollms_paths.get('models_zoo_path') != self.lollms_paths.get('personal_models_path'):
239
- zoo_diffusers_path = self.lollms_paths['models_zoo_path'] / "diffusers_models" / model_id_or_path
240
- if zoo_diffusers_path.exists() and zoo_diffusers_path.is_dir():
241
- ASCIIColors.info(f"Found local model in models_zoo_path: {zoo_diffusers_path}")
242
- return str(zoo_diffusers_path)
176
+ local_model_path = self.models_path / model_name
177
+ if local_model_path.exists() and local_model_path.is_dir():
178
+ ASCIIColors.info(f"Found local model in '{self.models_path}': {local_model_path}")
179
+ return str(local_model_path)
243
180
 
244
- ASCIIColors.info(f"Assuming '{model_id_or_path}' is a Hugging Face Hub ID or already fully qualified.")
245
- return model_id_or_path
181
+ ASCIIColors.info(f"'{model_name}' not found locally. Assuming it is a Hugging Face Hub ID.")
182
+ return model_name
246
183
 
247
184
  def load_model(self):
248
185
  """Loads the Diffusers pipeline based on current configuration."""
249
186
  ASCIIColors.info("Loading Diffusers model...")
250
187
  if self.pipeline is not None:
251
- self.unload_model() # Ensure old model is cleared
188
+ self.unload_model()
252
189
 
253
190
  try:
254
- model_path = self._resolve_model_path(self.config["model_id_or_path"])
255
- self.current_model_id_or_path = model_path # Store what's actually loaded
191
+ model_path = self._resolve_model_path(self.model_name)
192
+ self.current_model_id_or_path = model_path
256
193
 
257
194
  load_args = {
258
195
  "torch_dtype": self.torch_dtype,
@@ -266,26 +203,11 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
266
203
  if not self.config["safety_checker_on"]:
267
204
  load_args["safety_checker"] = None
268
205
 
269
- if self.lollms_paths.get("shared_cache_path"):
270
- load_args["cache_dir"] = str(self.lollms_paths["shared_cache_path"] / "huggingface_diffusers")
271
-
206
+ if self.config.get("hf_cache_path"):
207
+ load_args["cache_dir"] = str(self.config["hf_cache_path"])
272
208
 
273
- # Use AutoPipelineForText2Image for flexibility
274
- pipeline_class_to_load = AutoPipelineForText2Image
275
- custom_pipeline_class_name = self.config.get("pipeline_class_name")
276
-
277
- if custom_pipeline_class_name:
278
- try:
279
- diffusers_module = importlib.import_module("diffusers")
280
- pipeline_class_to_load = getattr(diffusers_module, custom_pipeline_class_name)
281
- ASCIIColors.info(f"Using specified pipeline class: {custom_pipeline_class_name}")
282
- except (ImportError, AttributeError) as e:
283
- ASCIIColors.warning(f"Could not load custom pipeline class {custom_pipeline_class_name}: {e}. Falling back to AutoPipelineForText2Image.")
284
- pipeline_class_to_load = AutoPipelineForText2Image
285
-
286
- self.pipeline = pipeline_class_to_load.from_pretrained(model_path, **load_args)
209
+ self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **load_args)
287
210
 
288
- # Scheduler
289
211
  self._set_scheduler()
290
212
 
291
213
  self.pipeline.to(self.config["device"])
@@ -301,18 +223,18 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
301
223
  self.pipeline.enable_model_cpu_offload()
302
224
  ASCIIColors.info("Model CPU offload enabled.")
303
225
  elif self.config["enable_sequential_cpu_offload"] and self.config["device"] != "cpu":
304
- self.pipeline.enable_sequential_cpu_offload() # More aggressive
226
+ self.pipeline.enable_sequential_cpu_offload()
305
227
  ASCIIColors.info("Sequential CPU offload enabled.")
306
228
 
307
-
308
- ASCIIColors.green(f"Diffusers model '{model_path}' loaded successfully on device '{self.config['device']}' with dtype '{self.config['torch_dtype_str']}'.")
229
+ ASCIIColors.green(f"Diffusers model '{model_path}' loaded on device '{self.config['device']}'.")
309
230
 
310
231
  except Exception as e:
311
232
  trace_exception(e)
312
233
  self.pipeline = None
313
- raise RuntimeError(f"Failed to load Diffusers model '{self.config['model_id_or_path']}': {e}") from e
234
+ raise RuntimeError(f"Failed to load Diffusers model '{self.model_name}': {e}") from e
314
235
 
315
236
  def _set_scheduler(self):
237
+ """Sets the scheduler for the pipeline based on config."""
316
238
  if not self.pipeline: return
317
239
 
318
240
  scheduler_name_key = self.config["scheduler_name"].lower()
@@ -323,221 +245,148 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
323
245
  scheduler_class_name = SCHEDULER_MAPPING.get(scheduler_name_key)
324
246
  if scheduler_class_name:
325
247
  try:
326
- scheduler_module = importlib.import_module("diffusers.schedulers")
327
- SchedulerClass = getattr(scheduler_module, scheduler_class_name)
328
-
248
+ SchedulerClass = getattr(importlib.import_module("diffusers.schedulers"), scheduler_class_name)
329
249
  scheduler_config = self.pipeline.scheduler.config
330
- if scheduler_name_key in SCHEDULER_USES_KARRAS_SIGMAS:
331
- scheduler_config["use_karras_sigmas"] = True
332
- else: # Ensure it's False if not a karras variant for this scheduler
333
- if "use_karras_sigmas" in scheduler_config:
334
- scheduler_config["use_karras_sigmas"] = False
335
-
336
-
250
+ scheduler_config["use_karras_sigmas"] = scheduler_name_key in SCHEDULER_USES_KARRAS_SIGMAS
337
251
  self.pipeline.scheduler = SchedulerClass.from_config(scheduler_config)
338
252
  ASCIIColors.info(f"Switched scheduler to {scheduler_name_key} ({scheduler_class_name}).")
339
253
  except Exception as e:
340
- trace_exception(e)
341
254
  ASCIIColors.warning(f"Could not switch scheduler to {scheduler_name_key}: {e}. Using current default.")
342
255
  else:
343
- ASCIIColors.warning(f"Unknown scheduler name: {self.config['scheduler_name']}. Using model default.")
344
-
256
+ ASCIIColors.warning(f"Unknown scheduler: '{self.config['scheduler_name']}'. Using model default.")
345
257
 
346
258
  def unload_model(self):
347
259
  if self.pipeline is not None:
348
260
  del self.pipeline
349
261
  self.pipeline = None
262
+ if torch and torch.cuda.is_available():
263
+ torch.cuda.empty_cache()
350
264
  ASCIIColors.info("Diffusers pipeline unloaded.")
351
- if torch and torch.cuda.is_available():
352
- torch.cuda.empty_cache()
353
-
354
- def generate_image(self,
355
- prompt: str,
356
- negative_prompt: Optional[str] = "",
357
- width: Optional[int] = None, # Uses default from config if None
358
- height: Optional[int] = None, # Uses default from config if None
359
- **kwargs) -> bytes:
360
- """
361
- Generates image data using the Diffusers pipeline.
362
265
 
363
- Args:
364
- prompt (str): The positive text prompt.
365
- negative_prompt (Optional[str]): The negative prompt.
366
- width (int): Image width. Overrides default.
367
- height (int): Image height. Overrides default.
368
- **kwargs: Additional parameters for the pipeline:
369
- - num_inference_steps (int)
370
- - guidance_scale (float)
371
- - seed (int)
372
- - eta (float, for DDIM)
373
- - num_images_per_prompt (int, though this binding returns one)
374
- - clip_skip (int, if supported by pipeline - advanced)
375
- Returns:
376
- bytes: The generated image data (PNG format).
377
- Raises:
378
- Exception: If the request fails or image generation fails.
379
- """
266
+ def generate_image(self, prompt: str, negative_prompt: str = "", width: int = None, height: int = None, **kwargs) -> bytes:
267
+ """Generates an image using the loaded Diffusers pipeline."""
380
268
  if not self.pipeline:
381
269
  raise RuntimeError("Diffusers pipeline is not loaded. Cannot generate image.")
382
270
 
383
- # Use call-specific or configured defaults
384
- _width = width if width is not None else self.config["default_width"]
385
- _height = height if height is not None else self.config["default_height"]
271
+ _width = width or self.config["default_width"]
272
+ _height = height or self.config["default_height"]
386
273
  _num_inference_steps = kwargs.get("num_inference_steps", self.config["num_inference_steps"])
387
274
  _guidance_scale = kwargs.get("guidance_scale", self.config["guidance_scale"])
388
275
  _seed = kwargs.get("seed", self.config["seed"])
389
276
 
390
- generator = None
391
- if _seed != -1: # -1 means random seed
392
- generator = torch.Generator(device=self.config["device"]).manual_seed(_seed)
277
+ generator = torch.Generator(device=self.config["device"]).manual_seed(_seed) if _seed != -1 else None
393
278
 
394
279
  pipeline_args = {
395
280
  "prompt": prompt,
396
- "negative_prompt": negative_prompt if negative_prompt else None,
281
+ "negative_prompt": negative_prompt or None,
397
282
  "width": _width,
398
283
  "height": _height,
399
284
  "num_inference_steps": _num_inference_steps,
400
285
  "guidance_scale": _guidance_scale,
401
286
  "generator": generator,
402
- "num_images_per_prompt": kwargs.get("num_images_per_prompt", 1)
403
287
  }
404
- if "eta" in kwargs: pipeline_args["eta"] = kwargs["eta"]
405
- if "clip_skip" in kwargs and hasattr(self.pipeline, "clip_skip"): # Handle clip_skip if supported
406
- pipeline_args["clip_skip"] = kwargs["clip_skip"]
407
-
408
-
409
288
  ASCIIColors.info(f"Generating image with prompt: '{prompt[:100]}...'")
410
- ASCIIColors.debug(f"Pipeline args: {pipeline_args}")
411
289
 
412
290
  try:
413
- with torch.no_grad(): # Important for inference
291
+ with torch.no_grad():
414
292
  pipeline_output = self.pipeline(**pipeline_args)
415
293
 
416
294
  pil_image: Image.Image = pipeline_output.images[0]
417
-
418
- # Convert PIL Image to bytes (PNG)
419
295
  img_byte_arr = BytesIO()
420
296
  pil_image.save(img_byte_arr, format="PNG")
421
- img_bytes = img_byte_arr.getvalue()
422
297
 
423
298
  ASCIIColors.green("Image generated successfully.")
424
- return img_bytes
299
+ return img_byte_arr.getvalue()
425
300
 
426
301
  except Exception as e:
427
302
  trace_exception(e)
428
303
  raise Exception(f"Diffusers image generation failed: {e}") from e
429
304
 
305
+ def list_models(self) -> List[str]:
306
+ """Lists available local models from the models_path."""
307
+ if not self.models_path.exists():
308
+ return []
309
+
310
+ models = []
311
+ for model_dir in self.models_path.iterdir():
312
+ if model_dir.is_dir():
313
+ # Check for key files indicating a valid diffusers model directory
314
+ if (model_dir / "model_index.json").exists() or (model_dir / "unet" / "config.json").exists():
315
+ models.append(model_dir.name)
316
+ return sorted(models)
317
+
430
318
  def list_services(self, **kwargs) -> List[Dict[str, str]]:
431
- """
432
- Lists the currently loaded model as the available service.
433
- Future: Could scan local model directories or list known HF models.
434
- """
435
- if self.pipeline and self.current_model_id_or_path:
319
+ """Lists available local models from the models_path."""
320
+ models = self.list_models()
321
+ if not models:
436
322
  return [{
437
- "name": os.path.basename(self.current_model_id_or_path),
438
- "caption": f"Diffusers: {os.path.basename(self.current_model_id_or_path)}",
439
- "help": (f"Currently loaded model. Path/ID: {self.current_model_id_or_path}. "
440
- f"Device: {self.config['device']}. DType: {self.config['torch_dtype_str']}. "
441
- f"Scheduler: {self.pipeline.scheduler.__class__.__name__}.")
323
+ "name": "diffusers_no_local_models",
324
+ "caption": "No local Diffusers models found",
325
+ "help": f"Place Diffusers model folders inside '{self.models_path.resolve()}' or specify a Hugging Face model ID in settings to download one."
442
326
  }]
443
- return [{"name": "diffusers_unloaded", "caption": "No Diffusers model loaded", "help": "Configure a model in settings."}]
327
+
328
+ return [{
329
+ "name": model_name,
330
+ "caption": f"Diffusers: {model_name}",
331
+ "help": f"Local Diffusers model from: {self.models_path.resolve()}"
332
+ } for model_name in models]
444
333
 
445
334
  def get_settings(self, **kwargs) -> List[Dict[str, Any]]:
446
- """
447
- Retrieves the current configurable settings for the Diffusers binding.
448
- """
449
- # Actual device and dtype after auto-resolution
450
- resolved_device = self.config['device']
451
- resolved_dtype_str = self.config['torch_dtype_str']
452
-
453
- # For display, show the original 'auto' if it was set that way, plus the resolved value
454
- display_device = self.config['device'] if self.config['device'].lower() != 'auto' else f"auto ({resolved_device})"
455
- display_dtype = self.config['torch_dtype_str'] if self.config['torch_dtype_str'].lower() != 'auto' else f"auto ({resolved_dtype_str})"
456
-
457
- settings = [
458
- {"name": "model_id_or_path", "type": "str", "value": self.config["model_id_or_path"], "description": "Hugging Face model ID or local path to Diffusers model directory."},
459
- {"name": "device", "type": "str", "value": self.config["device"], "description": f"Device for inference. Current resolved: {resolved_device}", "options": ["auto", "cuda", "mps", "cpu"]},
460
- {"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype for model. Current resolved: {resolved_dtype_str}", "options": ["auto", "float16", "bfloat16", "float32"]},
461
- {"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "Model variant (e.g., 'fp16', 'bf16'). Optional."},
335
+ """Retrieves the current configurable settings for the binding."""
336
+ local_models = self.list_models()
337
+ return [
338
+ {"name": "model_name", "type": "str", "value": self.model_name, "description": "Hugging Face model ID or a local model name from the models folder.", "options": local_models},
339
+ {"name": "device", "type": "str", "value": self.config["device"], "description": f"Device for inference. Current resolved: {self.config['device']}", "options": ["auto", "cuda", "mps", "cpu"]},
340
+ {"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype. Current resolved: {self.config['torch_dtype_str']}", "options": ["auto", "float16", "bfloat16", "float32"]},
341
+ {"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "Model variant from HF (e.g., 'fp16', 'bf16'). Optional."},
462
342
  {"name": "use_safetensors", "type": "bool", "value": self.config["use_safetensors"], "description": "Prefer loading models from .safetensors files."},
463
- {"name": "scheduler_name", "type": "str", "value": self.config["scheduler_name"], "description": "Scheduler to use for diffusion.", "options": list(SCHEDULER_MAPPING.keys())},
343
+ {"name": "scheduler_name", "type": "str", "value": self.config["scheduler_name"], "description": "Scheduler for diffusion.", "options": list(SCHEDULER_MAPPING.keys())},
464
344
  {"name": "safety_checker_on", "type": "bool", "value": self.config["safety_checker_on"], "description": "Enable the safety checker (if model has one)."},
465
345
  {"name": "enable_cpu_offload", "type": "bool", "value": self.config["enable_cpu_offload"], "description": "Enable model CPU offload (saves VRAM, slower)."},
466
346
  {"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload (more VRAM savings, much slower)."},
467
- {"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention (if available)."},
347
+ {"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention."},
468
348
  {"name": "default_width", "type": "int", "value": self.config["default_width"], "description": "Default width for generated images."},
469
349
  {"name": "default_height", "type": "int", "value": self.config["default_height"], "description": "Default height for generated images."},
470
350
  {"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default number of inference steps."},
471
351
  {"name": "guidance_scale", "type": "float", "value": self.config["guidance_scale"], "description": "Default guidance scale (CFG)."},
472
352
  {"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed for generation (-1 for random)."},
473
- {"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "Hugging Face API token (for private/gated models). Set to 'None' or empty if not needed. Store securely.", "is_secret": True},
474
- {"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Only use local files, do not try to download."},
353
+ {"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "Hugging Face API token (for private models).", "is_secret": True},
354
+ {"name": "hf_cache_path", "type": "str", "value": self.config["hf_cache_path"], "description": "Path to Hugging Face cache. Defaults to ~/.cache/huggingface."},
355
+ {"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Only use local files, do not download."},
475
356
  ]
476
- return settings
477
357
 
478
358
  def set_settings(self, settings: Union[Dict[str, Any], List[Dict[str, Any]]], **kwargs) -> bool:
479
- """
480
- Applies new settings to the Diffusers binding. Some settings may trigger a model reload.
481
- """
482
- if isinstance(settings, list): # Convert from ConfigTemplate list format
483
- parsed_settings = {item["name"]: item["value"] for item in settings if "name" in item and "value" in item}
484
- elif isinstance(settings, dict):
485
- parsed_settings = settings
486
- else:
487
- ASCIIColors.error("Invalid settings format. Expected a dictionary or list of dictionaries.")
488
- return False
359
+ """Applies new settings to the binding. Some may trigger a model reload."""
360
+ parsed_settings = settings if isinstance(settings, dict) else \
361
+ {item["name"]: item["value"] for item in settings if "name" in item and "value" in item}
489
362
 
490
- old_config = self.config.copy()
491
363
  needs_reload = False
364
+ critical_keys = ["model_name", "device", "torch_dtype_str", "use_safetensors",
365
+ "safety_checker_on", "hf_variant", "enable_cpu_offload",
366
+ "enable_sequential_cpu_offload", "enable_xformers", "hf_token",
367
+ "local_files_only", "hf_cache_path"]
492
368
 
493
369
  for key, value in parsed_settings.items():
494
- if key in self.config:
495
- if self.config[key] != value:
496
- self.config[key] = value
497
- ASCIIColors.info(f"Setting '{key}' changed to: {value}")
498
- if key in ["model_id_or_path", "device", "torch_dtype_str",
499
- "use_safetensors", "safety_checker_on", "hf_variant",
500
- "enable_cpu_offload", "enable_sequential_cpu_offload", "enable_xformers",
501
- "hf_token", "local_files_only"]:
502
- needs_reload = True
503
- elif key == "scheduler_name" and self.pipeline: # Scheduler can be changed on loaded pipeline
504
- self._set_scheduler() # Attempt to apply immediately
505
- else:
506
- ASCIIColors.warning(f"Unknown setting '{key}' ignored.")
507
-
508
- if needs_reload:
370
+ current_value = getattr(self, key, self.config.get(key))
371
+ if current_value != value:
372
+ ASCIIColors.info(f"Setting '{key}' changed to: {value}")
373
+ if key == "model_name":
374
+ self.model_name = value
375
+ self.config[key] = value
376
+ if key in critical_keys:
377
+ needs_reload = True
378
+ elif key == "scheduler_name" and self.pipeline:
379
+ self._set_scheduler()
380
+
381
+ if needs_reload and self.model_name:
509
382
  ASCIIColors.info("Reloading model due to settings changes...")
510
383
  try:
511
- # Resolve auto device/dtype again if they were part of the change
512
- if "device" in parsed_settings and self.config["device"].lower() == "auto":
513
- if torch.cuda.is_available(): self.config["device"] = "cuda"
514
- elif torch.backends.mps.is_available(): self.config["device"] = "mps"
515
- else: self.config["device"] = "cpu"
516
-
517
- if "torch_dtype_str" in parsed_settings and self.config["torch_dtype_str"].lower() == "auto":
518
- self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
519
-
520
- # Update torch_dtype object from string
521
- self.torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower(), torch.float32)
522
- if self.torch_dtype == "auto": # Should be resolved by now
523
- self.torch_dtype = torch.float16 if self.config["device"] != "cpu" else torch.float32
524
- self.config["torch_dtype_str"] = TORCH_DTYPE_MAP_OBJ_TO_STR.get(self.torch_dtype, "float32")
525
-
526
-
384
+ self._resolve_device_and_dtype()
527
385
  self.load_model()
528
- ASCIIColors.green("Model reloaded successfully with new settings.")
386
+ ASCIIColors.green("Model reloaded successfully.")
529
387
  except Exception as e:
530
388
  trace_exception(e)
531
- ASCIIColors.error(f"Failed to reload model with new settings: {e}. Reverting critical settings.")
532
- # Revert critical settings and try to reload with old config
533
- self.config = old_config
534
- self.torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower(), torch.float32)
535
- try:
536
- self.load_model()
537
- ASCIIColors.info("Reverted to previous model configuration.")
538
- except Exception as e_revert:
539
- trace_exception(e_revert)
540
- ASCIIColors.error(f"Failed to revert to previous model configuration: {e_revert}. Binding may be unstable.")
389
+ ASCIIColors.error(f"Failed to reload model with new settings: {e}. Binding may be unstable.")
541
390
  return False
542
391
  return True
543
392
 
@@ -550,147 +399,82 @@ if __name__ == '__main__':
550
399
 
551
400
  if not DIFFUSERS_AVAILABLE:
552
401
  ASCIIColors.error("Diffusers or its dependencies are not available. Cannot run test.")
553
- # Attempt to guide user for installation
554
- print("Please ensure PyTorch, Diffusers, Pillow, and Transformers are installed.")
555
- print("For PyTorch with CUDA: visit https://pytorch.org/get-started/locally/")
556
- print("Then: pip install diffusers Pillow transformers safetensors")
557
402
  exit(1)
558
403
 
559
- # --- Configuration ---
560
- # Small, fast model for testing. Replace with a full model for real use.
561
- # "CompVis/stable-diffusion-v1-4" is ~5GB
562
- # "google/ddpm-cat-256" is smaller, but a DDPM, not Stable Diffusion.
563
- # Using a tiny SD model if one exists, or a small variant.
564
- # For a quick test, let's try a small LCM LoRA with SD1.5 if possible or just a base model.
565
- # Note: "runwayml/stable-diffusion-v1-5" is a good standard test model.
566
- # For a *very* quick CI-like test, one might use a dummy model or a very small one.
567
- # Let's use a smaller SD variant if available, otherwise default to 2.1-base.
568
- test_model_id = "runwayml/stable-diffusion-v1-5" # ~4GB download. Use a smaller one if you have it locally.
569
- # test_model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" # Very small, for testing structure
570
-
571
- # Create dummy lollms_paths
572
404
  temp_paths_dir = Path(__file__).parent / "temp_lollms_paths_diffusers"
573
- temp_paths_dir.mkdir(parents=True, exist_ok=True)
574
- mock_lollms_paths = {
575
- "personal_models_path": temp_paths_dir / "personal_models",
576
- "models_zoo_path": temp_paths_dir / "models_zoo",
577
- "shared_cache_path": temp_paths_dir / "shared_cache", # For Hugging Face cache
578
- }
579
- for p in mock_lollms_paths.values(): Path(p).mkdir(parents=True, exist_ok=True)
580
- (Path(mock_lollms_paths["personal_models_path"]) / "diffusers_models").mkdir(exist_ok=True)
581
-
582
-
583
- binding_config = {
584
- "model_id_or_path": test_model_id,
585
- "device": "auto", # Let it auto-detect
586
- "torch_dtype_str": "auto",
587
- "num_inference_steps": 10, # Faster for testing
588
- "default_width": 256, # Smaller for faster testing
589
- "default_height": 256,
590
- "safety_checker_on": False, # Often disabled for local use flexibility
591
- "hf_variant": "fp16" if test_model_id == "runwayml/stable-diffusion-v1-5" else None, # SD 1.5 has fp16 variant
592
- }
593
-
405
+ temp_models_path = temp_paths_dir / "models"
406
+ temp_cache_path = temp_paths_dir / "shared_cache"
407
+
408
+ # Clean up previous runs
409
+ if temp_paths_dir.exists():
410
+ shutil.rmtree(temp_paths_dir)
411
+ temp_models_path.mkdir(parents=True, exist_ok=True)
412
+ temp_cache_path.mkdir(parents=True, exist_ok=True)
413
+
414
+ # A very small, fast model for testing from Hugging Face.
415
+ test_model_id = "hf-internal-testing/tiny-stable-diffusion-torch"
416
+
594
417
  try:
595
- ASCIIColors.cyan("\n1. Initializing DiffusersTTIBinding_Impl...")
596
- binding = DiffusersTTIBinding_Impl(config=binding_config, lollms_paths=mock_lollms_paths)
418
+ ASCIIColors.cyan("\n1. Initializing binding without a model...")
419
+ binding = DiffusersTTIBinding_Impl(
420
+ models_path=str(temp_models_path),
421
+ hf_cache_path=str(temp_cache_path)
422
+ )
423
+ assert binding.pipeline is None, "Pipeline should not be loaded initially."
597
424
  ASCIIColors.green("Initialization successful.")
598
- ASCIIColors.info(f"Loaded model: {binding.current_model_id_or_path}")
599
- ASCIIColors.info(f"Device: {binding.config['device']}, DType: {binding.config['torch_dtype_str']}")
600
- ASCIIColors.info(f"Scheduler: {binding.pipeline.scheduler.__class__.__name__ if binding.pipeline else 'N/A'}")
601
-
602
425
 
603
- ASCIIColors.cyan("\n2. Listing services...")
426
+ ASCIIColors.cyan("\n2. Listing services (should be empty)...")
604
427
  services = binding.list_services()
605
428
  ASCIIColors.info(json.dumps(services, indent=2))
606
- assert services and services[0]["name"] == os.path.basename(binding.current_model_id_or_path)
607
-
608
- ASCIIColors.cyan("\n3. Getting settings...")
609
- settings_list = binding.get_settings()
610
- ASCIIColors.info(json.dumps(settings_list, indent=2, default=str)) # default=str for Path objects if any
611
- # Find model_id_or_path in settings
612
- found_model_setting = any(s['name'] == 'model_id_or_path' and s['value'] == test_model_id for s in settings_list)
613
- assert found_model_setting, "Model ID not found or incorrect in get_settings"
429
+ assert services[0]["name"] == "diffusers_no_local_models"
614
430
 
431
+ ASCIIColors.cyan(f"\n3. Setting model_name to '{test_model_id}' to trigger load...")
432
+ binding.set_settings({"model_name": test_model_id})
433
+ assert binding.model_name == test_model_id
434
+ assert binding.pipeline is not None, "Pipeline should be loaded after setting model_name."
435
+ ASCIIColors.green("Model loaded successfully.")
615
436
 
616
437
  ASCIIColors.cyan("\n4. Generating an image...")
617
- test_prompt = "A vibrant cat astronaut exploring a neon galaxy"
618
- test_negative_prompt = "blurry, low quality, text, watermark"
619
-
620
- # Use smaller dimensions for test if default are large
621
- gen_width = min(binding.config["default_width"], 256)
622
- gen_height = min(binding.config["default_height"], 256)
623
-
624
438
  image_bytes = binding.generate_image(
625
- prompt=test_prompt,
626
- negative_prompt=test_negative_prompt,
627
- width=gen_width, height=gen_height,
628
- num_inference_steps=8 # Even fewer for speed
439
+ prompt="A tiny robot",
440
+ width=64, height=64,
441
+ num_inference_steps=2
629
442
  )
630
443
  assert image_bytes and isinstance(image_bytes, bytes)
631
- ASCIIColors.green(f"Image generated successfully (size: {len(image_bytes)} bytes).")
632
- # Save the image for verification
444
+ ASCIIColors.green(f"Image generated (size: {len(image_bytes)} bytes).")
633
445
  test_image_path = Path(__file__).parent / "test_diffusers_image.png"
634
446
  with open(test_image_path, "wb") as f:
635
447
  f.write(image_bytes)
636
448
  ASCIIColors.info(f"Test image saved to: {test_image_path.resolve()}")
637
449
 
638
-
639
- ASCIIColors.cyan("\n5. Setting new settings (changing scheduler and guidance_scale)...")
640
- new_settings_dict = {
641
- "scheduler_name": "ddim", # Change scheduler
642
- "guidance_scale": 5.0, # Change guidance scale
643
- "num_inference_steps": 12 # Change inference steps
644
- }
645
- binding.set_settings(new_settings_dict)
646
- assert binding.config["scheduler_name"] == "ddim"
647
- assert binding.config["guidance_scale"] == 5.0
648
- assert binding.config["num_inference_steps"] == 12
649
- ASCIIColors.info(f"New scheduler (intended): ddim, Actual: {binding.pipeline.scheduler.__class__.__name__}")
650
- ASCIIColors.info(f"New guidance_scale: {binding.config['guidance_scale']}")
651
-
652
- ASCIIColors.cyan("\n6. Generating another image with new settings...")
653
- image_bytes_2 = binding.generate_image(
654
- prompt="A serene landscape with a crystal river",
655
- width=gen_width, height=gen_height
656
- )
657
- assert image_bytes_2 and isinstance(image_bytes_2, bytes)
658
- ASCIIColors.green(f"Second image generated successfully (size: {len(image_bytes_2)} bytes).")
659
- test_image_path_2 = Path(__file__).parent / "test_diffusers_image_2.png"
660
- with open(test_image_path_2, "wb") as f:
661
- f.write(image_bytes_2)
662
- ASCIIColors.info(f"Second test image saved to: {test_image_path_2.resolve()}")
663
-
664
- # Test model reload by changing a critical parameter (e.g. safety_checker_on)
665
- # This requires a different model or a config that can be easily toggled.
666
- # For now, assume reload on critical param change works if no error is thrown.
667
- ASCIIColors.cyan("\n7. Testing settings change requiring model reload (safety_checker_on)...")
668
- current_safety_on = binding.config["safety_checker_on"]
669
- binding.set_settings({"safety_checker_on": not current_safety_on})
670
- assert binding.config["safety_checker_on"] == (not current_safety_on)
671
- ASCIIColors.green("Model reload due to safety_checker_on change seems successful.")
672
-
450
+ ASCIIColors.cyan("\n5. Unloading model...")
451
+ binding.unload_model()
452
+ assert binding.pipeline is None, "Pipeline should be None after unload."
673
453
 
674
454
  except Exception as e:
675
455
  trace_exception(e)
676
456
  ASCIIColors.error(f"Diffusers binding test failed: {e}")
677
457
  finally:
678
- ASCIIColors.cyan("\nCleaning up...")
679
- if 'binding' in locals() and binding:
680
- binding.unload_model()
681
-
682
- # Clean up temp_lollms_paths
683
- import shutil
458
+ ASCIIColors.cyan("\nCleaning up temporary directories...")
684
459
  if temp_paths_dir.exists():
685
- try:
686
- shutil.rmtree(temp_paths_dir)
687
- ASCIIColors.info(f"Cleaned up temporary directory: {temp_paths_dir}")
688
- except Exception as e_clean:
689
- ASCIIColors.warning(f"Could not fully clean up {temp_paths_dir}: {e_clean}")
690
- if 'test_image_path' in locals() and test_image_path.exists():
691
- # os.remove(test_image_path) # Keep for manual check
692
- pass
693
- if 'test_image_path_2' in locals() and test_image_path_2.exists():
694
- # os.remove(test_image_path_2) # Keep for manual check
695
- pass
696
- ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
460
+ shutil.rmtree(temp_paths_dir)
461
+ ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
462
+
463
+ def listModels(self) -> list:
464
+ """Lists models"""
465
+ # TODO: use the models from the folder if set
466
+ formatted_models=[
467
+ {
468
+ 'model_name': "dummy model 1",
469
+ 'display_name': "Test dummy model 1",
470
+ 'description': "A test dummy model",
471
+ 'owned_by': 'parisneo'
472
+ },
473
+ {
474
+ 'model_name': "dummy model 2",
475
+ 'display_name': "Test dummy model 2",
476
+ 'description': "A test dummy model",
477
+ 'owned_by': 'parisneo'
478
+ }
479
+ ]
480
+ return formatted_models