lollms-client 0.33.0__py3-none-any.whl → 1.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/azure_openai/__init__.py +6 -10
- lollms_client/llm_bindings/claude/__init__.py +4 -7
- lollms_client/llm_bindings/gemini/__init__.py +3 -7
- lollms_client/llm_bindings/grok/__init__.py +3 -7
- lollms_client/llm_bindings/groq/__init__.py +4 -6
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +4 -6
- lollms_client/llm_bindings/litellm/__init__.py +15 -6
- lollms_client/llm_bindings/llamacpp/__init__.py +27 -9
- lollms_client/llm_bindings/lollms/__init__.py +24 -14
- lollms_client/llm_bindings/lollms_webui/__init__.py +6 -12
- lollms_client/llm_bindings/mistral/__init__.py +3 -5
- lollms_client/llm_bindings/ollama/__init__.py +6 -11
- lollms_client/llm_bindings/open_router/__init__.py +4 -6
- lollms_client/llm_bindings/openai/__init__.py +7 -14
- lollms_client/llm_bindings/openllm/__init__.py +12 -12
- lollms_client/llm_bindings/pythonllamacpp/__init__.py +1 -1
- lollms_client/llm_bindings/tensor_rt/__init__.py +8 -13
- lollms_client/llm_bindings/transformers/__init__.py +14 -6
- lollms_client/llm_bindings/vllm/__init__.py +16 -12
- lollms_client/lollms_core.py +303 -490
- lollms_client/lollms_discussion.py +431 -78
- lollms_client/lollms_llm_binding.py +192 -381
- lollms_client/lollms_mcp_binding.py +33 -2
- lollms_client/lollms_tti_binding.py +107 -2
- lollms_client/mcp_bindings/local_mcp/__init__.py +3 -2
- lollms_client/mcp_bindings/remote_mcp/__init__.py +6 -5
- lollms_client/mcp_bindings/standard_mcp/__init__.py +3 -5
- lollms_client/stt_bindings/lollms/__init__.py +6 -8
- lollms_client/stt_bindings/whisper/__init__.py +2 -4
- lollms_client/stt_bindings/whispercpp/__init__.py +15 -16
- lollms_client/tti_bindings/dalle/__init__.py +50 -29
- lollms_client/tti_bindings/diffusers/__init__.py +227 -439
- lollms_client/tti_bindings/gemini/__init__.py +320 -0
- lollms_client/tti_bindings/lollms/__init__.py +8 -9
- lollms_client-1.1.0.dist-info/METADATA +1214 -0
- lollms_client-1.1.0.dist-info/RECORD +69 -0
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/top_level.txt +0 -2
- examples/article_summary/article_summary.py +0 -58
- examples/console_discussion/console_app.py +0 -266
- examples/console_discussion.py +0 -448
- examples/deep_analyze/deep_analyse.py +0 -30
- examples/deep_analyze/deep_analyze_multiple_files.py +0 -32
- examples/function_calling_with_local_custom_mcp.py +0 -250
- examples/generate_a_benchmark_for_safe_store.py +0 -89
- examples/generate_and_speak/generate_and_speak.py +0 -251
- examples/generate_game_sfx/generate_game_fx.py +0 -240
- examples/generate_text_with_multihop_rag_example.py +0 -210
- examples/gradio_chat_app.py +0 -228
- examples/gradio_lollms_chat.py +0 -259
- examples/internet_search_with_rag.py +0 -226
- examples/lollms_chat/calculator.py +0 -59
- examples/lollms_chat/derivative.py +0 -48
- examples/lollms_chat/test_openai_compatible_with_lollms_chat.py +0 -12
- examples/lollms_discussions_test.py +0 -155
- examples/mcp_examples/external_mcp.py +0 -267
- examples/mcp_examples/local_mcp.py +0 -171
- examples/mcp_examples/openai_mcp.py +0 -203
- examples/mcp_examples/run_remote_mcp_example_v2.py +0 -290
- examples/mcp_examples/run_standard_mcp_example.py +0 -204
- examples/simple_text_gen_test.py +0 -173
- examples/simple_text_gen_with_image_test.py +0 -178
- examples/test_local_models/local_chat.py +0 -9
- examples/text_2_audio.py +0 -77
- examples/text_2_image.py +0 -144
- examples/text_2_image_diffusers.py +0 -274
- examples/text_and_image_2_audio.py +0 -59
- examples/text_gen.py +0 -30
- examples/text_gen_system_prompt.py +0 -29
- lollms_client-0.33.0.dist-info/METADATA +0 -854
- lollms_client-0.33.0.dist-info/RECORD +0 -101
- test/test_lollms_discussion.py +0 -368
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/WHEEL +0 -0
- {lollms_client-0.33.0.dist-info → lollms_client-1.1.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -5,51 +5,7 @@ from io import BytesIO
|
|
|
5
5
|
from typing import Optional, List, Dict, Any, Union
|
|
6
6
|
from pathlib import Path
|
|
7
7
|
|
|
8
|
-
|
|
9
|
-
try:
|
|
10
|
-
import pipmaster as pm
|
|
11
|
-
import platform # For OS detection for torch index
|
|
12
|
-
|
|
13
|
-
# Determine initial device preference to guide torch installation
|
|
14
|
-
preferred_torch_device_for_install = "cpu" # Default assumption
|
|
15
|
-
|
|
16
|
-
# Tentatively set preference based on OS, assuming user might want GPU if available
|
|
17
|
-
if platform.system() == "Linux" or platform.system() == "Windows":
|
|
18
|
-
# On Linux/Windows, CUDA is the primary GPU acceleration for PyTorch.
|
|
19
|
-
# We will try to install a CUDA version of PyTorch.
|
|
20
|
-
preferred_torch_device_for_install = "cuda"
|
|
21
|
-
elif platform.system() == "Darwin":
|
|
22
|
-
# On macOS, MPS is the acceleration. Standard torch install usually handles this.
|
|
23
|
-
preferred_torch_device_for_install = "mps" # or keep cpu if mps detection is later
|
|
24
|
-
|
|
25
|
-
torch_pkgs = ["torch", "torchaudio", "torchvision", "xformers"]
|
|
26
|
-
diffusers_core_pkgs = ["diffusers", "Pillow", "transformers", "safetensors"]
|
|
27
|
-
|
|
28
|
-
torch_index_url = None
|
|
29
|
-
if preferred_torch_device_for_install == "cuda":
|
|
30
|
-
# Specify a common CUDA version index. Pip should resolve the correct torch version.
|
|
31
|
-
# As of late 2023/early 2024, cu118 or cu121 are common. Let's use cu126.
|
|
32
|
-
# Users with different CUDA setups might need to pre-install torch manually.
|
|
33
|
-
torch_index_url = "https://download.pytorch.org/whl/cu126"
|
|
34
|
-
ASCIIColors.info(f"Attempting to ensure PyTorch with CUDA support (target index: {torch_index_url})")
|
|
35
|
-
# Install torch and torchaudio first from the specific index
|
|
36
|
-
pm.ensure_packages(torch_pkgs, index_url=torch_index_url)
|
|
37
|
-
# Then install audiocraft and other dependencies; pip should use the already installed torch
|
|
38
|
-
pm.ensure_packages(diffusers_core_pkgs)
|
|
39
|
-
else:
|
|
40
|
-
# For CPU, MPS, or if no specific CUDA preference was determined for install
|
|
41
|
-
ASCIIColors.info("Ensuring PyTorch, AudioCraft, and dependencies using default PyPI index.")
|
|
42
|
-
pm.ensure_packages(torch_pkgs + diffusers_core_pkgs)
|
|
43
|
-
|
|
44
|
-
import whisper
|
|
45
|
-
import torch
|
|
46
|
-
_whisper_installed = True
|
|
47
|
-
except Exception as e:
|
|
48
|
-
_whisper_installation_error = str(e)
|
|
49
|
-
whisper = None
|
|
50
|
-
torch = None
|
|
51
|
-
|
|
52
|
-
|
|
8
|
+
# Attempt to import core dependencies and set availability flag
|
|
53
9
|
try:
|
|
54
10
|
import torch
|
|
55
11
|
from diffusers import AutoPipelineForText2Image, DiffusionPipeline
|
|
@@ -63,20 +19,21 @@ except ImportError:
|
|
|
63
19
|
Image = None
|
|
64
20
|
load_image = None
|
|
65
21
|
DIFFUSERS_AVAILABLE = False
|
|
66
|
-
#
|
|
22
|
+
# A detailed error will be raised in __init__ if the user tries to use the binding.
|
|
67
23
|
|
|
68
24
|
from lollms_client.lollms_tti_binding import LollmsTTIBinding
|
|
69
25
|
from ascii_colors import trace_exception, ASCIIColors
|
|
70
26
|
import json # For potential JSONDecodeError and settings
|
|
27
|
+
import shutil
|
|
71
28
|
|
|
72
29
|
# Defines the binding name for the manager
|
|
73
30
|
BindingName = "DiffusersTTIBinding_Impl"
|
|
74
31
|
|
|
75
|
-
# Helper for torch.dtype string conversion
|
|
32
|
+
# Helper for torch.dtype string conversion, handles case where torch is not installed
|
|
76
33
|
TORCH_DTYPE_MAP_STR_TO_OBJ = {
|
|
77
|
-
"float16": torch
|
|
78
|
-
"bfloat16": torch
|
|
79
|
-
"float32": torch
|
|
34
|
+
"float16": getattr(torch, 'float16', 'float16'),
|
|
35
|
+
"bfloat16": getattr(torch, 'bfloat16', 'bfloat16'),
|
|
36
|
+
"float32": getattr(torch, 'float32', 'float32'),
|
|
80
37
|
"auto": "auto"
|
|
81
38
|
}
|
|
82
39
|
TORCH_DTYPE_MAP_OBJ_TO_STR = {v: k for k, v in TORCH_DTYPE_MAP_STR_TO_OBJ.items()}
|
|
@@ -90,22 +47,22 @@ SCHEDULER_MAPPING = {
|
|
|
90
47
|
"ddim": "DDIMScheduler",
|
|
91
48
|
"ddpm": "DDPMScheduler",
|
|
92
49
|
"deis_multistep": "DEISMultistepScheduler",
|
|
93
|
-
"dpm_multistep": "DPMSolverMultistepScheduler",
|
|
94
|
-
"dpm_multistep_karras": "DPMSolverMultistepScheduler",
|
|
50
|
+
"dpm_multistep": "DPMSolverMultistepScheduler",
|
|
51
|
+
"dpm_multistep_karras": "DPMSolverMultistepScheduler",
|
|
95
52
|
"dpm_single": "DPMSolverSinglestepScheduler",
|
|
96
|
-
"dpm_adaptive": "
|
|
97
|
-
"dpm++_2m": "DPMSolverMultistepScheduler",
|
|
98
|
-
"dpm++_2m_karras": "DPMSolverMultistepScheduler",
|
|
53
|
+
"dpm_adaptive": "DPMSolverPlusPlusScheduler",
|
|
54
|
+
"dpm++_2m": "DPMSolverMultistepScheduler",
|
|
55
|
+
"dpm++_2m_karras": "DPMSolverMultistepScheduler",
|
|
99
56
|
"dpm++_2s_ancestral": "DPMSolverAncestralDiscreteScheduler",
|
|
100
|
-
"dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler",
|
|
57
|
+
"dpm++_2s_ancestral_karras": "DPMSolverAncestralDiscreteScheduler",
|
|
101
58
|
"dpm++_sde": "DPMSolverSDEScheduler",
|
|
102
|
-
"dpm++_sde_karras": "DPMSolverSDEScheduler",
|
|
59
|
+
"dpm++_sde_karras": "DPMSolverSDEScheduler",
|
|
103
60
|
"euler_ancestral_discrete": "EulerAncestralDiscreteScheduler",
|
|
104
61
|
"euler_discrete": "EulerDiscreteScheduler",
|
|
105
62
|
"heun_discrete": "HeunDiscreteScheduler",
|
|
106
|
-
"heun_karras": "HeunDiscreteScheduler",
|
|
63
|
+
"heun_karras": "HeunDiscreteScheduler",
|
|
107
64
|
"lms_discrete": "LMSDiscreteScheduler",
|
|
108
|
-
"lms_karras": "LMSDiscreteScheduler",
|
|
65
|
+
"lms_karras": "LMSDiscreteScheduler",
|
|
109
66
|
"pndm": "PNDMScheduler",
|
|
110
67
|
"unipc_multistep": "UniPCMultistepScheduler",
|
|
111
68
|
}
|
|
@@ -121,134 +78,118 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
121
78
|
Allows running various text-to-image models locally.
|
|
122
79
|
"""
|
|
123
80
|
DEFAULT_CONFIG = {
|
|
124
|
-
"
|
|
125
|
-
"device": "auto",
|
|
126
|
-
"torch_dtype_str": "auto",
|
|
81
|
+
"model_name": "",
|
|
82
|
+
"device": "auto",
|
|
83
|
+
"torch_dtype_str": "auto",
|
|
127
84
|
"use_safetensors": True,
|
|
128
85
|
"scheduler_name": "default",
|
|
129
|
-
"safety_checker_on": True,
|
|
86
|
+
"safety_checker_on": True,
|
|
130
87
|
"num_inference_steps": 25,
|
|
131
88
|
"guidance_scale": 7.5,
|
|
132
|
-
"default_width": 768,
|
|
133
|
-
"default_height": 768,
|
|
134
|
-
"seed": -1,
|
|
89
|
+
"default_width": 768,
|
|
90
|
+
"default_height": 768,
|
|
91
|
+
"seed": -1,
|
|
135
92
|
"enable_cpu_offload": False,
|
|
136
93
|
"enable_sequential_cpu_offload": False,
|
|
137
|
-
"enable_xformers": False,
|
|
138
|
-
"hf_variant": None,
|
|
94
|
+
"enable_xformers": False,
|
|
95
|
+
"hf_variant": None,
|
|
139
96
|
"hf_token": None,
|
|
97
|
+
"hf_cache_path": None,
|
|
140
98
|
"local_files_only": False,
|
|
141
99
|
}
|
|
142
100
|
|
|
143
|
-
|
|
144
|
-
def __init__(self,
|
|
145
|
-
config: Optional[Dict[str, Any]] = None,
|
|
146
|
-
lollms_paths: Optional[Dict[str, Union[str, Path]]] = None,
|
|
147
|
-
**kwargs # Catches other potential parameters like 'service_key' or 'client_id'
|
|
148
|
-
):
|
|
101
|
+
def __init__(self, **kwargs):
|
|
149
102
|
"""
|
|
150
103
|
Initialize the Diffusers TTI binding.
|
|
151
104
|
|
|
152
105
|
Args:
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
106
|
+
**kwargs: A dictionary of configuration parameters.
|
|
107
|
+
Expected keys:
|
|
108
|
+
- model_name (str): The name of the model to use. Can be a Hugging Face Hub ID
|
|
109
|
+
(e.g., 'stabilityai/stable-diffusion-xl-base-1.0') or the name of a local
|
|
110
|
+
model directory located in `models_path`.
|
|
111
|
+
- models_path (str or Path): The path to the directory where local models are stored.
|
|
112
|
+
Defaults to a 'models' folder next to this file.
|
|
113
|
+
- hf_cache_path (str or Path, optional): Path to a directory for Hugging Face
|
|
114
|
+
to cache downloaded models and files.
|
|
115
|
+
- Other settings from the DEFAULT_CONFIG can be overridden here.
|
|
158
116
|
"""
|
|
159
|
-
super().__init__(binding_name=
|
|
160
|
-
|
|
117
|
+
super().__init__(binding_name=BindingName)
|
|
118
|
+
|
|
161
119
|
if not DIFFUSERS_AVAILABLE:
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
globals()['DiffusionPipeline'] = _DiffusionPipeline
|
|
173
|
-
globals()['Image'] = _Image
|
|
174
|
-
|
|
175
|
-
# Re-populate torch dtype maps if torch was just loaded
|
|
176
|
-
global TORCH_DTYPE_MAP_STR_TO_OBJ, TORCH_DTYPE_MAP_OBJ_TO_STR
|
|
177
|
-
TORCH_DTYPE_MAP_STR_TO_OBJ = {
|
|
178
|
-
"float16": _torch.float16, "bfloat16": _torch.bfloat16,
|
|
179
|
-
"float32": _torch.float32, "auto": "auto"
|
|
180
|
-
}
|
|
181
|
-
TORCH_DTYPE_MAP_OBJ_TO_STR = {v: k for k, v in TORCH_DTYPE_MAP_STR_TO_OBJ.items()}
|
|
182
|
-
TORCH_DTYPE_MAP_OBJ_TO_STR[None] = "None"
|
|
183
|
-
ASCIIColors.green("Dependencies seem to be available now.")
|
|
184
|
-
except ImportError as e:
|
|
185
|
-
trace_exception(e)
|
|
186
|
-
raise ImportError(
|
|
187
|
-
"Diffusers binding dependencies are still not met after trying to ensure them. "
|
|
188
|
-
"Please install torch, diffusers, Pillow, and transformers manually. "
|
|
189
|
-
f"Error: {e}"
|
|
190
|
-
) from e
|
|
191
|
-
|
|
192
|
-
self.config = {**self.DEFAULT_CONFIG, **(config or {}), **kwargs}
|
|
193
|
-
self.lollms_paths = {k: Path(v) for k, v in lollms_paths.items()} if lollms_paths else {}
|
|
120
|
+
raise ImportError(
|
|
121
|
+
"Diffusers library or its dependencies (torch, Pillow, transformers) are not installed. "
|
|
122
|
+
"Please install them using: pip install torch diffusers Pillow transformers safetensors"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# Merge default config with user-provided kwargs
|
|
126
|
+
self.config = {**self.DEFAULT_CONFIG, **kwargs}
|
|
127
|
+
|
|
128
|
+
# model_name is crucial, get it from the merged config
|
|
129
|
+
self.model_name = self.config.get("model_name", "")
|
|
194
130
|
|
|
131
|
+
# models_path is also special, handle it with its default logic
|
|
132
|
+
self.models_path = Path(kwargs.get("models_path", Path(__file__).parent / "models"))
|
|
133
|
+
self.models_path.mkdir(parents=True, exist_ok=True)
|
|
134
|
+
|
|
195
135
|
self.pipeline: Optional[DiffusionPipeline] = None
|
|
196
|
-
self.current_model_id_or_path = None
|
|
136
|
+
self.current_model_id_or_path = None
|
|
197
137
|
|
|
198
|
-
|
|
199
|
-
if self.config["device"].lower() == "auto":
|
|
200
|
-
if torch.cuda.is_available(): self.config["device"] = "cuda"
|
|
201
|
-
elif torch.backends.mps.is_available(): self.config["device"] = "mps"
|
|
202
|
-
else: self.config["device"] = "cpu"
|
|
138
|
+
self._resolve_device_and_dtype()
|
|
203
139
|
|
|
204
|
-
if self.
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
self.torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower(), torch.float32)
|
|
209
|
-
if self.torch_dtype == "auto": # Should have been resolved above
|
|
210
|
-
self.torch_dtype = torch.float16 if self.config["device"] != "cpu" else torch.float32
|
|
211
|
-
self.config["torch_dtype_str"] = TORCH_DTYPE_MAP_OBJ_TO_STR.get(self.torch_dtype, "float32")
|
|
140
|
+
if self.model_name:
|
|
141
|
+
self.load_model()
|
|
142
|
+
else:
|
|
143
|
+
ASCIIColors.warning("No model_name provided during initialization. The binding is idle.")
|
|
212
144
|
|
|
213
145
|
|
|
214
|
-
|
|
215
|
-
|
|
146
|
+
def _resolve_device_and_dtype(self):
|
|
147
|
+
"""Resolves auto settings for device and dtype from config."""
|
|
148
|
+
if self.config["device"].lower() == "auto":
|
|
149
|
+
if torch.cuda.is_available():
|
|
150
|
+
self.config["device"] = "cuda"
|
|
151
|
+
elif torch.backends.mps.is_available():
|
|
152
|
+
self.config["device"] = "mps"
|
|
153
|
+
else:
|
|
154
|
+
self.config["device"] = "cpu"
|
|
216
155
|
|
|
217
|
-
self.
|
|
156
|
+
if self.config["torch_dtype_str"].lower() == "auto":
|
|
157
|
+
self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
|
|
218
158
|
|
|
159
|
+
self.torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower(), torch.float32)
|
|
160
|
+
if self.torch_dtype == "auto": # Final fallback
|
|
161
|
+
self.torch_dtype = torch.float16 if self.config["device"] != "cpu" else torch.float32
|
|
162
|
+
self.config["torch_dtype_str"] = TORCH_DTYPE_MAP_OBJ_TO_STR.get(self.torch_dtype, "float32")
|
|
219
163
|
|
|
220
|
-
def _resolve_model_path(self,
|
|
221
|
-
"""
|
|
222
|
-
if
|
|
223
|
-
|
|
164
|
+
def _resolve_model_path(self, model_name: str) -> str:
|
|
165
|
+
"""
|
|
166
|
+
Resolves a model name to a full path if it's a local model,
|
|
167
|
+
otherwise returns it as is (assuming it's a Hugging Face Hub ID).
|
|
168
|
+
"""
|
|
169
|
+
if not model_name:
|
|
170
|
+
raise ValueError("Model name cannot be empty.")
|
|
171
|
+
|
|
172
|
+
if Path(model_name).is_absolute() and Path(model_name).is_dir():
|
|
173
|
+
ASCIIColors.info(f"Using absolute path for model: {model_name}")
|
|
174
|
+
return model_name
|
|
224
175
|
|
|
225
|
-
|
|
226
|
-
if
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
ASCIIColors.info(f"Found local model in personal_models_path: {personal_diffusers_path}")
|
|
230
|
-
return str(personal_diffusers_path)
|
|
231
|
-
|
|
232
|
-
# Check models_zoo_path/diffusers_models/<name> (if different from personal)
|
|
233
|
-
if self.lollms_paths.get('models_zoo_path') and \
|
|
234
|
-
self.lollms_paths.get('models_zoo_path') != self.lollms_paths.get('personal_models_path'):
|
|
235
|
-
zoo_diffusers_path = self.lollms_paths['models_zoo_path'] / "diffusers_models" / model_id_or_path
|
|
236
|
-
if zoo_diffusers_path.exists() and zoo_diffusers_path.is_dir():
|
|
237
|
-
ASCIIColors.info(f"Found local model in models_zoo_path: {zoo_diffusers_path}")
|
|
238
|
-
return str(zoo_diffusers_path)
|
|
176
|
+
local_model_path = self.models_path / model_name
|
|
177
|
+
if local_model_path.exists() and local_model_path.is_dir():
|
|
178
|
+
ASCIIColors.info(f"Found local model in '{self.models_path}': {local_model_path}")
|
|
179
|
+
return str(local_model_path)
|
|
239
180
|
|
|
240
|
-
ASCIIColors.info(f"
|
|
241
|
-
return
|
|
181
|
+
ASCIIColors.info(f"'{model_name}' not found locally. Assuming it is a Hugging Face Hub ID.")
|
|
182
|
+
return model_name
|
|
242
183
|
|
|
243
184
|
def load_model(self):
|
|
244
185
|
"""Loads the Diffusers pipeline based on current configuration."""
|
|
245
186
|
ASCIIColors.info("Loading Diffusers model...")
|
|
246
187
|
if self.pipeline is not None:
|
|
247
|
-
self.unload_model()
|
|
188
|
+
self.unload_model()
|
|
248
189
|
|
|
249
190
|
try:
|
|
250
|
-
model_path = self._resolve_model_path(self.
|
|
251
|
-
self.current_model_id_or_path = model_path
|
|
191
|
+
model_path = self._resolve_model_path(self.model_name)
|
|
192
|
+
self.current_model_id_or_path = model_path
|
|
252
193
|
|
|
253
194
|
load_args = {
|
|
254
195
|
"torch_dtype": self.torch_dtype,
|
|
@@ -262,26 +203,11 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
262
203
|
if not self.config["safety_checker_on"]:
|
|
263
204
|
load_args["safety_checker"] = None
|
|
264
205
|
|
|
265
|
-
if self.
|
|
266
|
-
load_args["cache_dir"] = str(self.
|
|
267
|
-
|
|
206
|
+
if self.config.get("hf_cache_path"):
|
|
207
|
+
load_args["cache_dir"] = str(self.config["hf_cache_path"])
|
|
268
208
|
|
|
269
|
-
|
|
270
|
-
pipeline_class_to_load = AutoPipelineForText2Image
|
|
271
|
-
custom_pipeline_class_name = self.config.get("pipeline_class_name")
|
|
272
|
-
|
|
273
|
-
if custom_pipeline_class_name:
|
|
274
|
-
try:
|
|
275
|
-
diffusers_module = importlib.import_module("diffusers")
|
|
276
|
-
pipeline_class_to_load = getattr(diffusers_module, custom_pipeline_class_name)
|
|
277
|
-
ASCIIColors.info(f"Using specified pipeline class: {custom_pipeline_class_name}")
|
|
278
|
-
except (ImportError, AttributeError) as e:
|
|
279
|
-
ASCIIColors.warning(f"Could not load custom pipeline class {custom_pipeline_class_name}: {e}. Falling back to AutoPipelineForText2Image.")
|
|
280
|
-
pipeline_class_to_load = AutoPipelineForText2Image
|
|
209
|
+
self.pipeline = AutoPipelineForText2Image.from_pretrained(model_path, **load_args)
|
|
281
210
|
|
|
282
|
-
self.pipeline = pipeline_class_to_load.from_pretrained(model_path, **load_args)
|
|
283
|
-
|
|
284
|
-
# Scheduler
|
|
285
211
|
self._set_scheduler()
|
|
286
212
|
|
|
287
213
|
self.pipeline.to(self.config["device"])
|
|
@@ -297,18 +223,18 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
297
223
|
self.pipeline.enable_model_cpu_offload()
|
|
298
224
|
ASCIIColors.info("Model CPU offload enabled.")
|
|
299
225
|
elif self.config["enable_sequential_cpu_offload"] and self.config["device"] != "cpu":
|
|
300
|
-
self.pipeline.enable_sequential_cpu_offload()
|
|
226
|
+
self.pipeline.enable_sequential_cpu_offload()
|
|
301
227
|
ASCIIColors.info("Sequential CPU offload enabled.")
|
|
302
228
|
|
|
303
|
-
|
|
304
|
-
ASCIIColors.green(f"Diffusers model '{model_path}' loaded successfully on device '{self.config['device']}' with dtype '{self.config['torch_dtype_str']}'.")
|
|
229
|
+
ASCIIColors.green(f"Diffusers model '{model_path}' loaded on device '{self.config['device']}'.")
|
|
305
230
|
|
|
306
231
|
except Exception as e:
|
|
307
232
|
trace_exception(e)
|
|
308
233
|
self.pipeline = None
|
|
309
|
-
raise RuntimeError(f"Failed to load Diffusers model '{self.
|
|
234
|
+
raise RuntimeError(f"Failed to load Diffusers model '{self.model_name}': {e}") from e
|
|
310
235
|
|
|
311
236
|
def _set_scheduler(self):
|
|
237
|
+
"""Sets the scheduler for the pipeline based on config."""
|
|
312
238
|
if not self.pipeline: return
|
|
313
239
|
|
|
314
240
|
scheduler_name_key = self.config["scheduler_name"].lower()
|
|
@@ -319,221 +245,148 @@ class DiffusersTTIBinding_Impl(LollmsTTIBinding):
|
|
|
319
245
|
scheduler_class_name = SCHEDULER_MAPPING.get(scheduler_name_key)
|
|
320
246
|
if scheduler_class_name:
|
|
321
247
|
try:
|
|
322
|
-
|
|
323
|
-
SchedulerClass = getattr(scheduler_module, scheduler_class_name)
|
|
324
|
-
|
|
248
|
+
SchedulerClass = getattr(importlib.import_module("diffusers.schedulers"), scheduler_class_name)
|
|
325
249
|
scheduler_config = self.pipeline.scheduler.config
|
|
326
|
-
|
|
327
|
-
scheduler_config["use_karras_sigmas"] = True
|
|
328
|
-
else: # Ensure it's False if not a karras variant for this scheduler
|
|
329
|
-
if "use_karras_sigmas" in scheduler_config:
|
|
330
|
-
scheduler_config["use_karras_sigmas"] = False
|
|
331
|
-
|
|
332
|
-
|
|
250
|
+
scheduler_config["use_karras_sigmas"] = scheduler_name_key in SCHEDULER_USES_KARRAS_SIGMAS
|
|
333
251
|
self.pipeline.scheduler = SchedulerClass.from_config(scheduler_config)
|
|
334
252
|
ASCIIColors.info(f"Switched scheduler to {scheduler_name_key} ({scheduler_class_name}).")
|
|
335
253
|
except Exception as e:
|
|
336
|
-
trace_exception(e)
|
|
337
254
|
ASCIIColors.warning(f"Could not switch scheduler to {scheduler_name_key}: {e}. Using current default.")
|
|
338
255
|
else:
|
|
339
|
-
ASCIIColors.warning(f"Unknown scheduler
|
|
340
|
-
|
|
256
|
+
ASCIIColors.warning(f"Unknown scheduler: '{self.config['scheduler_name']}'. Using model default.")
|
|
341
257
|
|
|
342
258
|
def unload_model(self):
|
|
343
259
|
if self.pipeline is not None:
|
|
344
260
|
del self.pipeline
|
|
345
261
|
self.pipeline = None
|
|
262
|
+
if torch and torch.cuda.is_available():
|
|
263
|
+
torch.cuda.empty_cache()
|
|
346
264
|
ASCIIColors.info("Diffusers pipeline unloaded.")
|
|
347
|
-
if torch and torch.cuda.is_available():
|
|
348
|
-
torch.cuda.empty_cache()
|
|
349
|
-
|
|
350
|
-
def generate_image(self,
|
|
351
|
-
prompt: str,
|
|
352
|
-
negative_prompt: Optional[str] = "",
|
|
353
|
-
width: Optional[int] = None, # Uses default from config if None
|
|
354
|
-
height: Optional[int] = None, # Uses default from config if None
|
|
355
|
-
**kwargs) -> bytes:
|
|
356
|
-
"""
|
|
357
|
-
Generates image data using the Diffusers pipeline.
|
|
358
265
|
|
|
359
|
-
|
|
360
|
-
|
|
361
|
-
negative_prompt (Optional[str]): The negative prompt.
|
|
362
|
-
width (int): Image width. Overrides default.
|
|
363
|
-
height (int): Image height. Overrides default.
|
|
364
|
-
**kwargs: Additional parameters for the pipeline:
|
|
365
|
-
- num_inference_steps (int)
|
|
366
|
-
- guidance_scale (float)
|
|
367
|
-
- seed (int)
|
|
368
|
-
- eta (float, for DDIM)
|
|
369
|
-
- num_images_per_prompt (int, though this binding returns one)
|
|
370
|
-
- clip_skip (int, if supported by pipeline - advanced)
|
|
371
|
-
Returns:
|
|
372
|
-
bytes: The generated image data (PNG format).
|
|
373
|
-
Raises:
|
|
374
|
-
Exception: If the request fails or image generation fails.
|
|
375
|
-
"""
|
|
266
|
+
def generate_image(self, prompt: str, negative_prompt: str = "", width: int = None, height: int = None, **kwargs) -> bytes:
|
|
267
|
+
"""Generates an image using the loaded Diffusers pipeline."""
|
|
376
268
|
if not self.pipeline:
|
|
377
269
|
raise RuntimeError("Diffusers pipeline is not loaded. Cannot generate image.")
|
|
378
270
|
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
_height = height if height is not None else self.config["default_height"]
|
|
271
|
+
_width = width or self.config["default_width"]
|
|
272
|
+
_height = height or self.config["default_height"]
|
|
382
273
|
_num_inference_steps = kwargs.get("num_inference_steps", self.config["num_inference_steps"])
|
|
383
274
|
_guidance_scale = kwargs.get("guidance_scale", self.config["guidance_scale"])
|
|
384
275
|
_seed = kwargs.get("seed", self.config["seed"])
|
|
385
276
|
|
|
386
|
-
generator = None
|
|
387
|
-
if _seed != -1: # -1 means random seed
|
|
388
|
-
generator = torch.Generator(device=self.config["device"]).manual_seed(_seed)
|
|
277
|
+
generator = torch.Generator(device=self.config["device"]).manual_seed(_seed) if _seed != -1 else None
|
|
389
278
|
|
|
390
279
|
pipeline_args = {
|
|
391
280
|
"prompt": prompt,
|
|
392
|
-
"negative_prompt": negative_prompt
|
|
281
|
+
"negative_prompt": negative_prompt or None,
|
|
393
282
|
"width": _width,
|
|
394
283
|
"height": _height,
|
|
395
284
|
"num_inference_steps": _num_inference_steps,
|
|
396
285
|
"guidance_scale": _guidance_scale,
|
|
397
286
|
"generator": generator,
|
|
398
|
-
"num_images_per_prompt": kwargs.get("num_images_per_prompt", 1)
|
|
399
287
|
}
|
|
400
|
-
if "eta" in kwargs: pipeline_args["eta"] = kwargs["eta"]
|
|
401
|
-
if "clip_skip" in kwargs and hasattr(self.pipeline, "clip_skip"): # Handle clip_skip if supported
|
|
402
|
-
pipeline_args["clip_skip"] = kwargs["clip_skip"]
|
|
403
|
-
|
|
404
|
-
|
|
405
288
|
ASCIIColors.info(f"Generating image with prompt: '{prompt[:100]}...'")
|
|
406
|
-
ASCIIColors.debug(f"Pipeline args: {pipeline_args}")
|
|
407
289
|
|
|
408
290
|
try:
|
|
409
|
-
with torch.no_grad():
|
|
291
|
+
with torch.no_grad():
|
|
410
292
|
pipeline_output = self.pipeline(**pipeline_args)
|
|
411
293
|
|
|
412
294
|
pil_image: Image.Image = pipeline_output.images[0]
|
|
413
|
-
|
|
414
|
-
# Convert PIL Image to bytes (PNG)
|
|
415
295
|
img_byte_arr = BytesIO()
|
|
416
296
|
pil_image.save(img_byte_arr, format="PNG")
|
|
417
|
-
img_bytes = img_byte_arr.getvalue()
|
|
418
297
|
|
|
419
298
|
ASCIIColors.green("Image generated successfully.")
|
|
420
|
-
return
|
|
299
|
+
return img_byte_arr.getvalue()
|
|
421
300
|
|
|
422
301
|
except Exception as e:
|
|
423
302
|
trace_exception(e)
|
|
424
303
|
raise Exception(f"Diffusers image generation failed: {e}") from e
|
|
425
304
|
|
|
305
|
+
def list_models(self) -> List[str]:
|
|
306
|
+
"""Lists available local models from the models_path."""
|
|
307
|
+
if not self.models_path.exists():
|
|
308
|
+
return []
|
|
309
|
+
|
|
310
|
+
models = []
|
|
311
|
+
for model_dir in self.models_path.iterdir():
|
|
312
|
+
if model_dir.is_dir():
|
|
313
|
+
# Check for key files indicating a valid diffusers model directory
|
|
314
|
+
if (model_dir / "model_index.json").exists() or (model_dir / "unet" / "config.json").exists():
|
|
315
|
+
models.append(model_dir.name)
|
|
316
|
+
return sorted(models)
|
|
317
|
+
|
|
426
318
|
def list_services(self, **kwargs) -> List[Dict[str, str]]:
|
|
427
|
-
"""
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
"""
|
|
431
|
-
if self.pipeline and self.current_model_id_or_path:
|
|
319
|
+
"""Lists available local models from the models_path."""
|
|
320
|
+
models = self.list_models()
|
|
321
|
+
if not models:
|
|
432
322
|
return [{
|
|
433
|
-
"name":
|
|
434
|
-
"caption":
|
|
435
|
-
"help":
|
|
436
|
-
f"Device: {self.config['device']}. DType: {self.config['torch_dtype_str']}. "
|
|
437
|
-
f"Scheduler: {self.pipeline.scheduler.__class__.__name__}.")
|
|
323
|
+
"name": "diffusers_no_local_models",
|
|
324
|
+
"caption": "No local Diffusers models found",
|
|
325
|
+
"help": f"Place Diffusers model folders inside '{self.models_path.resolve()}' or specify a Hugging Face model ID in settings to download one."
|
|
438
326
|
}]
|
|
439
|
-
|
|
327
|
+
|
|
328
|
+
return [{
|
|
329
|
+
"name": model_name,
|
|
330
|
+
"caption": f"Diffusers: {model_name}",
|
|
331
|
+
"help": f"Local Diffusers model from: {self.models_path.resolve()}"
|
|
332
|
+
} for model_name in models]
|
|
440
333
|
|
|
441
334
|
def get_settings(self, **kwargs) -> List[Dict[str, Any]]:
|
|
442
|
-
"""
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
# For display, show the original 'auto' if it was set that way, plus the resolved value
|
|
450
|
-
display_device = self.config['device'] if self.config['device'].lower() != 'auto' else f"auto ({resolved_device})"
|
|
451
|
-
display_dtype = self.config['torch_dtype_str'] if self.config['torch_dtype_str'].lower() != 'auto' else f"auto ({resolved_dtype_str})"
|
|
452
|
-
|
|
453
|
-
settings = [
|
|
454
|
-
{"name": "model_id_or_path", "type": "str", "value": self.config["model_id_or_path"], "description": "Hugging Face model ID or local path to Diffusers model directory."},
|
|
455
|
-
{"name": "device", "type": "str", "value": self.config["device"], "description": f"Device for inference. Current resolved: {resolved_device}", "options": ["auto", "cuda", "mps", "cpu"]},
|
|
456
|
-
{"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype for model. Current resolved: {resolved_dtype_str}", "options": ["auto", "float16", "bfloat16", "float32"]},
|
|
457
|
-
{"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "Model variant (e.g., 'fp16', 'bf16'). Optional."},
|
|
335
|
+
"""Retrieves the current configurable settings for the binding."""
|
|
336
|
+
local_models = self.list_models()
|
|
337
|
+
return [
|
|
338
|
+
{"name": "model_name", "type": "str", "value": self.model_name, "description": "Hugging Face model ID or a local model name from the models folder.", "options": local_models},
|
|
339
|
+
{"name": "device", "type": "str", "value": self.config["device"], "description": f"Device for inference. Current resolved: {self.config['device']}", "options": ["auto", "cuda", "mps", "cpu"]},
|
|
340
|
+
{"name": "torch_dtype_str", "type": "str", "value": self.config["torch_dtype_str"], "description": f"Torch dtype. Current resolved: {self.config['torch_dtype_str']}", "options": ["auto", "float16", "bfloat16", "float32"]},
|
|
341
|
+
{"name": "hf_variant", "type": "str", "value": self.config["hf_variant"], "description": "Model variant from HF (e.g., 'fp16', 'bf16'). Optional."},
|
|
458
342
|
{"name": "use_safetensors", "type": "bool", "value": self.config["use_safetensors"], "description": "Prefer loading models from .safetensors files."},
|
|
459
|
-
{"name": "scheduler_name", "type": "str", "value": self.config["scheduler_name"], "description": "Scheduler
|
|
343
|
+
{"name": "scheduler_name", "type": "str", "value": self.config["scheduler_name"], "description": "Scheduler for diffusion.", "options": list(SCHEDULER_MAPPING.keys())},
|
|
460
344
|
{"name": "safety_checker_on", "type": "bool", "value": self.config["safety_checker_on"], "description": "Enable the safety checker (if model has one)."},
|
|
461
345
|
{"name": "enable_cpu_offload", "type": "bool", "value": self.config["enable_cpu_offload"], "description": "Enable model CPU offload (saves VRAM, slower)."},
|
|
462
346
|
{"name": "enable_sequential_cpu_offload", "type": "bool", "value": self.config["enable_sequential_cpu_offload"], "description": "Enable sequential CPU offload (more VRAM savings, much slower)."},
|
|
463
|
-
{"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention
|
|
347
|
+
{"name": "enable_xformers", "type": "bool", "value": self.config["enable_xformers"], "description": "Enable xFormers memory efficient attention."},
|
|
464
348
|
{"name": "default_width", "type": "int", "value": self.config["default_width"], "description": "Default width for generated images."},
|
|
465
349
|
{"name": "default_height", "type": "int", "value": self.config["default_height"], "description": "Default height for generated images."},
|
|
466
350
|
{"name": "num_inference_steps", "type": "int", "value": self.config["num_inference_steps"], "description": "Default number of inference steps."},
|
|
467
351
|
{"name": "guidance_scale", "type": "float", "value": self.config["guidance_scale"], "description": "Default guidance scale (CFG)."},
|
|
468
352
|
{"name": "seed", "type": "int", "value": self.config["seed"], "description": "Default seed for generation (-1 for random)."},
|
|
469
|
-
{"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "Hugging Face API token (for private
|
|
470
|
-
{"name": "
|
|
353
|
+
{"name": "hf_token", "type": "str", "value": self.config["hf_token"], "description": "Hugging Face API token (for private models).", "is_secret": True},
|
|
354
|
+
{"name": "hf_cache_path", "type": "str", "value": self.config["hf_cache_path"], "description": "Path to Hugging Face cache. Defaults to ~/.cache/huggingface."},
|
|
355
|
+
{"name": "local_files_only", "type": "bool", "value": self.config["local_files_only"], "description": "Only use local files, do not download."},
|
|
471
356
|
]
|
|
472
|
-
return settings
|
|
473
357
|
|
|
474
358
|
def set_settings(self, settings: Union[Dict[str, Any], List[Dict[str, Any]]], **kwargs) -> bool:
|
|
475
|
-
"""
|
|
476
|
-
|
|
477
|
-
|
|
478
|
-
if isinstance(settings, list): # Convert from ConfigTemplate list format
|
|
479
|
-
parsed_settings = {item["name"]: item["value"] for item in settings if "name" in item and "value" in item}
|
|
480
|
-
elif isinstance(settings, dict):
|
|
481
|
-
parsed_settings = settings
|
|
482
|
-
else:
|
|
483
|
-
ASCIIColors.error("Invalid settings format. Expected a dictionary or list of dictionaries.")
|
|
484
|
-
return False
|
|
359
|
+
"""Applies new settings to the binding. Some may trigger a model reload."""
|
|
360
|
+
parsed_settings = settings if isinstance(settings, dict) else \
|
|
361
|
+
{item["name"]: item["value"] for item in settings if "name" in item and "value" in item}
|
|
485
362
|
|
|
486
|
-
old_config = self.config.copy()
|
|
487
363
|
needs_reload = False
|
|
364
|
+
critical_keys = ["model_name", "device", "torch_dtype_str", "use_safetensors",
|
|
365
|
+
"safety_checker_on", "hf_variant", "enable_cpu_offload",
|
|
366
|
+
"enable_sequential_cpu_offload", "enable_xformers", "hf_token",
|
|
367
|
+
"local_files_only", "hf_cache_path"]
|
|
488
368
|
|
|
489
369
|
for key, value in parsed_settings.items():
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
|
|
502
|
-
ASCIIColors.warning(f"Unknown setting '{key}' ignored.")
|
|
503
|
-
|
|
504
|
-
if needs_reload:
|
|
370
|
+
current_value = getattr(self, key, self.config.get(key))
|
|
371
|
+
if current_value != value:
|
|
372
|
+
ASCIIColors.info(f"Setting '{key}' changed to: {value}")
|
|
373
|
+
if key == "model_name":
|
|
374
|
+
self.model_name = value
|
|
375
|
+
self.config[key] = value
|
|
376
|
+
if key in critical_keys:
|
|
377
|
+
needs_reload = True
|
|
378
|
+
elif key == "scheduler_name" and self.pipeline:
|
|
379
|
+
self._set_scheduler()
|
|
380
|
+
|
|
381
|
+
if needs_reload and self.model_name:
|
|
505
382
|
ASCIIColors.info("Reloading model due to settings changes...")
|
|
506
383
|
try:
|
|
507
|
-
|
|
508
|
-
if "device" in parsed_settings and self.config["device"].lower() == "auto":
|
|
509
|
-
if torch.cuda.is_available(): self.config["device"] = "cuda"
|
|
510
|
-
elif torch.backends.mps.is_available(): self.config["device"] = "mps"
|
|
511
|
-
else: self.config["device"] = "cpu"
|
|
512
|
-
|
|
513
|
-
if "torch_dtype_str" in parsed_settings and self.config["torch_dtype_str"].lower() == "auto":
|
|
514
|
-
self.config["torch_dtype_str"] = "float16" if self.config["device"] != "cpu" else "float32"
|
|
515
|
-
|
|
516
|
-
# Update torch_dtype object from string
|
|
517
|
-
self.torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower(), torch.float32)
|
|
518
|
-
if self.torch_dtype == "auto": # Should be resolved by now
|
|
519
|
-
self.torch_dtype = torch.float16 if self.config["device"] != "cpu" else torch.float32
|
|
520
|
-
self.config["torch_dtype_str"] = TORCH_DTYPE_MAP_OBJ_TO_STR.get(self.torch_dtype, "float32")
|
|
521
|
-
|
|
522
|
-
|
|
384
|
+
self._resolve_device_and_dtype()
|
|
523
385
|
self.load_model()
|
|
524
|
-
ASCIIColors.green("Model reloaded successfully
|
|
386
|
+
ASCIIColors.green("Model reloaded successfully.")
|
|
525
387
|
except Exception as e:
|
|
526
388
|
trace_exception(e)
|
|
527
|
-
ASCIIColors.error(f"Failed to reload model with new settings: {e}.
|
|
528
|
-
# Revert critical settings and try to reload with old config
|
|
529
|
-
self.config = old_config
|
|
530
|
-
self.torch_dtype = TORCH_DTYPE_MAP_STR_TO_OBJ.get(self.config["torch_dtype_str"].lower(), torch.float32)
|
|
531
|
-
try:
|
|
532
|
-
self.load_model()
|
|
533
|
-
ASCIIColors.info("Reverted to previous model configuration.")
|
|
534
|
-
except Exception as e_revert:
|
|
535
|
-
trace_exception(e_revert)
|
|
536
|
-
ASCIIColors.error(f"Failed to revert to previous model configuration: {e_revert}. Binding may be unstable.")
|
|
389
|
+
ASCIIColors.error(f"Failed to reload model with new settings: {e}. Binding may be unstable.")
|
|
537
390
|
return False
|
|
538
391
|
return True
|
|
539
392
|
|
|
@@ -546,147 +399,82 @@ if __name__ == '__main__':
|
|
|
546
399
|
|
|
547
400
|
if not DIFFUSERS_AVAILABLE:
|
|
548
401
|
ASCIIColors.error("Diffusers or its dependencies are not available. Cannot run test.")
|
|
549
|
-
# Attempt to guide user for installation
|
|
550
|
-
print("Please ensure PyTorch, Diffusers, Pillow, and Transformers are installed.")
|
|
551
|
-
print("For PyTorch with CUDA: visit https://pytorch.org/get-started/locally/")
|
|
552
|
-
print("Then: pip install diffusers Pillow transformers safetensors")
|
|
553
402
|
exit(1)
|
|
554
403
|
|
|
555
|
-
# --- Configuration ---
|
|
556
|
-
# Small, fast model for testing. Replace with a full model for real use.
|
|
557
|
-
# "CompVis/stable-diffusion-v1-4" is ~5GB
|
|
558
|
-
# "google/ddpm-cat-256" is smaller, but a DDPM, not Stable Diffusion.
|
|
559
|
-
# Using a tiny SD model if one exists, or a small variant.
|
|
560
|
-
# For a quick test, let's try a small LCM LoRA with SD1.5 if possible or just a base model.
|
|
561
|
-
# Note: "runwayml/stable-diffusion-v1-5" is a good standard test model.
|
|
562
|
-
# For a *very* quick CI-like test, one might use a dummy model or a very small one.
|
|
563
|
-
# Let's use a smaller SD variant if available, otherwise default to 2.1-base.
|
|
564
|
-
test_model_id = "runwayml/stable-diffusion-v1-5" # ~4GB download. Use a smaller one if you have it locally.
|
|
565
|
-
# test_model_id = "hf-internal-testing/tiny-stable-diffusion-pipe" # Very small, for testing structure
|
|
566
|
-
|
|
567
|
-
# Create dummy lollms_paths
|
|
568
404
|
temp_paths_dir = Path(__file__).parent / "temp_lollms_paths_diffusers"
|
|
569
|
-
|
|
570
|
-
|
|
571
|
-
|
|
572
|
-
|
|
573
|
-
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
|
|
577
|
-
|
|
578
|
-
|
|
579
|
-
|
|
580
|
-
|
|
581
|
-
"device": "auto", # Let it auto-detect
|
|
582
|
-
"torch_dtype_str": "auto",
|
|
583
|
-
"num_inference_steps": 10, # Faster for testing
|
|
584
|
-
"default_width": 256, # Smaller for faster testing
|
|
585
|
-
"default_height": 256,
|
|
586
|
-
"safety_checker_on": False, # Often disabled for local use flexibility
|
|
587
|
-
"hf_variant": "fp16" if test_model_id == "runwayml/stable-diffusion-v1-5" else None, # SD 1.5 has fp16 variant
|
|
588
|
-
}
|
|
589
|
-
|
|
405
|
+
temp_models_path = temp_paths_dir / "models"
|
|
406
|
+
temp_cache_path = temp_paths_dir / "shared_cache"
|
|
407
|
+
|
|
408
|
+
# Clean up previous runs
|
|
409
|
+
if temp_paths_dir.exists():
|
|
410
|
+
shutil.rmtree(temp_paths_dir)
|
|
411
|
+
temp_models_path.mkdir(parents=True, exist_ok=True)
|
|
412
|
+
temp_cache_path.mkdir(parents=True, exist_ok=True)
|
|
413
|
+
|
|
414
|
+
# A very small, fast model for testing from Hugging Face.
|
|
415
|
+
test_model_id = "hf-internal-testing/tiny-stable-diffusion-torch"
|
|
416
|
+
|
|
590
417
|
try:
|
|
591
|
-
ASCIIColors.cyan("\n1. Initializing
|
|
592
|
-
binding = DiffusersTTIBinding_Impl(
|
|
418
|
+
ASCIIColors.cyan("\n1. Initializing binding without a model...")
|
|
419
|
+
binding = DiffusersTTIBinding_Impl(
|
|
420
|
+
models_path=str(temp_models_path),
|
|
421
|
+
hf_cache_path=str(temp_cache_path)
|
|
422
|
+
)
|
|
423
|
+
assert binding.pipeline is None, "Pipeline should not be loaded initially."
|
|
593
424
|
ASCIIColors.green("Initialization successful.")
|
|
594
|
-
ASCIIColors.info(f"Loaded model: {binding.current_model_id_or_path}")
|
|
595
|
-
ASCIIColors.info(f"Device: {binding.config['device']}, DType: {binding.config['torch_dtype_str']}")
|
|
596
|
-
ASCIIColors.info(f"Scheduler: {binding.pipeline.scheduler.__class__.__name__ if binding.pipeline else 'N/A'}")
|
|
597
|
-
|
|
598
425
|
|
|
599
|
-
ASCIIColors.cyan("\n2. Listing services...")
|
|
426
|
+
ASCIIColors.cyan("\n2. Listing services (should be empty)...")
|
|
600
427
|
services = binding.list_services()
|
|
601
428
|
ASCIIColors.info(json.dumps(services, indent=2))
|
|
602
|
-
assert services
|
|
603
|
-
|
|
604
|
-
ASCIIColors.cyan("\n3. Getting settings...")
|
|
605
|
-
settings_list = binding.get_settings()
|
|
606
|
-
ASCIIColors.info(json.dumps(settings_list, indent=2, default=str)) # default=str for Path objects if any
|
|
607
|
-
# Find model_id_or_path in settings
|
|
608
|
-
found_model_setting = any(s['name'] == 'model_id_or_path' and s['value'] == test_model_id for s in settings_list)
|
|
609
|
-
assert found_model_setting, "Model ID not found or incorrect in get_settings"
|
|
429
|
+
assert services[0]["name"] == "diffusers_no_local_models"
|
|
610
430
|
|
|
431
|
+
ASCIIColors.cyan(f"\n3. Setting model_name to '{test_model_id}' to trigger load...")
|
|
432
|
+
binding.set_settings({"model_name": test_model_id})
|
|
433
|
+
assert binding.model_name == test_model_id
|
|
434
|
+
assert binding.pipeline is not None, "Pipeline should be loaded after setting model_name."
|
|
435
|
+
ASCIIColors.green("Model loaded successfully.")
|
|
611
436
|
|
|
612
437
|
ASCIIColors.cyan("\n4. Generating an image...")
|
|
613
|
-
test_prompt = "A vibrant cat astronaut exploring a neon galaxy"
|
|
614
|
-
test_negative_prompt = "blurry, low quality, text, watermark"
|
|
615
|
-
|
|
616
|
-
# Use smaller dimensions for test if default are large
|
|
617
|
-
gen_width = min(binding.config["default_width"], 256)
|
|
618
|
-
gen_height = min(binding.config["default_height"], 256)
|
|
619
|
-
|
|
620
438
|
image_bytes = binding.generate_image(
|
|
621
|
-
prompt=
|
|
622
|
-
|
|
623
|
-
|
|
624
|
-
num_inference_steps=8 # Even fewer for speed
|
|
439
|
+
prompt="A tiny robot",
|
|
440
|
+
width=64, height=64,
|
|
441
|
+
num_inference_steps=2
|
|
625
442
|
)
|
|
626
443
|
assert image_bytes and isinstance(image_bytes, bytes)
|
|
627
|
-
ASCIIColors.green(f"Image generated
|
|
628
|
-
# Save the image for verification
|
|
444
|
+
ASCIIColors.green(f"Image generated (size: {len(image_bytes)} bytes).")
|
|
629
445
|
test_image_path = Path(__file__).parent / "test_diffusers_image.png"
|
|
630
446
|
with open(test_image_path, "wb") as f:
|
|
631
447
|
f.write(image_bytes)
|
|
632
448
|
ASCIIColors.info(f"Test image saved to: {test_image_path.resolve()}")
|
|
633
449
|
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
"scheduler_name": "ddim", # Change scheduler
|
|
638
|
-
"guidance_scale": 5.0, # Change guidance scale
|
|
639
|
-
"num_inference_steps": 12 # Change inference steps
|
|
640
|
-
}
|
|
641
|
-
binding.set_settings(new_settings_dict)
|
|
642
|
-
assert binding.config["scheduler_name"] == "ddim"
|
|
643
|
-
assert binding.config["guidance_scale"] == 5.0
|
|
644
|
-
assert binding.config["num_inference_steps"] == 12
|
|
645
|
-
ASCIIColors.info(f"New scheduler (intended): ddim, Actual: {binding.pipeline.scheduler.__class__.__name__}")
|
|
646
|
-
ASCIIColors.info(f"New guidance_scale: {binding.config['guidance_scale']}")
|
|
647
|
-
|
|
648
|
-
ASCIIColors.cyan("\n6. Generating another image with new settings...")
|
|
649
|
-
image_bytes_2 = binding.generate_image(
|
|
650
|
-
prompt="A serene landscape with a crystal river",
|
|
651
|
-
width=gen_width, height=gen_height
|
|
652
|
-
)
|
|
653
|
-
assert image_bytes_2 and isinstance(image_bytes_2, bytes)
|
|
654
|
-
ASCIIColors.green(f"Second image generated successfully (size: {len(image_bytes_2)} bytes).")
|
|
655
|
-
test_image_path_2 = Path(__file__).parent / "test_diffusers_image_2.png"
|
|
656
|
-
with open(test_image_path_2, "wb") as f:
|
|
657
|
-
f.write(image_bytes_2)
|
|
658
|
-
ASCIIColors.info(f"Second test image saved to: {test_image_path_2.resolve()}")
|
|
659
|
-
|
|
660
|
-
# Test model reload by changing a critical parameter (e.g. safety_checker_on)
|
|
661
|
-
# This requires a different model or a config that can be easily toggled.
|
|
662
|
-
# For now, assume reload on critical param change works if no error is thrown.
|
|
663
|
-
ASCIIColors.cyan("\n7. Testing settings change requiring model reload (safety_checker_on)...")
|
|
664
|
-
current_safety_on = binding.config["safety_checker_on"]
|
|
665
|
-
binding.set_settings({"safety_checker_on": not current_safety_on})
|
|
666
|
-
assert binding.config["safety_checker_on"] == (not current_safety_on)
|
|
667
|
-
ASCIIColors.green("Model reload due to safety_checker_on change seems successful.")
|
|
668
|
-
|
|
450
|
+
ASCIIColors.cyan("\n5. Unloading model...")
|
|
451
|
+
binding.unload_model()
|
|
452
|
+
assert binding.pipeline is None, "Pipeline should be None after unload."
|
|
669
453
|
|
|
670
454
|
except Exception as e:
|
|
671
455
|
trace_exception(e)
|
|
672
456
|
ASCIIColors.error(f"Diffusers binding test failed: {e}")
|
|
673
457
|
finally:
|
|
674
|
-
ASCIIColors.cyan("\nCleaning up...")
|
|
675
|
-
if 'binding' in locals() and binding:
|
|
676
|
-
binding.unload_model()
|
|
677
|
-
|
|
678
|
-
# Clean up temp_lollms_paths
|
|
679
|
-
import shutil
|
|
458
|
+
ASCIIColors.cyan("\nCleaning up temporary directories...")
|
|
680
459
|
if temp_paths_dir.exists():
|
|
681
|
-
|
|
682
|
-
|
|
683
|
-
|
|
684
|
-
|
|
685
|
-
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
|
|
692
|
-
|
|
460
|
+
shutil.rmtree(temp_paths_dir)
|
|
461
|
+
ASCIIColors.magenta("--- Diffusers TTI Binding Test Finished ---")
|
|
462
|
+
|
|
463
|
+
def listModels(self) -> list:
|
|
464
|
+
"""Lists models"""
|
|
465
|
+
# TODO: use the models from the folder if set
|
|
466
|
+
formatted_models=[
|
|
467
|
+
{
|
|
468
|
+
'model_name': "dummy model 1",
|
|
469
|
+
'display_name': "Test dummy model 1",
|
|
470
|
+
'description': "A test dummy model",
|
|
471
|
+
'owned_by': 'parisneo'
|
|
472
|
+
},
|
|
473
|
+
{
|
|
474
|
+
'model_name': "dummy model 2",
|
|
475
|
+
'display_name': "Test dummy model 2",
|
|
476
|
+
'description': "A test dummy model",
|
|
477
|
+
'owned_by': 'parisneo'
|
|
478
|
+
}
|
|
479
|
+
]
|
|
480
|
+
return formatted_models
|