lollms-client 1.7.10__py3-none-any.whl → 1.7.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/claude/__init__.py +0 -1
- lollms_client/llm_bindings/grok/__init__.py +0 -1
- lollms_client/llm_bindings/llama_cpp_server/__init__.py +605 -0
- lollms_client/llm_bindings/ollama/__init__.py +40 -2
- lollms_client/lollms_discussion.py +40 -28
- lollms_client/lollms_llm_binding.py +15 -1
- lollms_client/lollms_mcp_binding.py +15 -2
- lollms_client/lollms_stt_binding.py +15 -1
- lollms_client/lollms_tti_binding.py +15 -1
- lollms_client/lollms_ttm_binding.py +15 -1
- lollms_client/lollms_tts_binding.py +15 -1
- lollms_client/lollms_ttv_binding.py +15 -1
- lollms_client/tti_bindings/diffusers/__init__.py +132 -79
- lollms_client/tti_bindings/diffusers/server/main.py +76 -65
- lollms_client/tts_bindings/xtts/__init__.py +1 -1
- {lollms_client-1.7.10.dist-info → lollms_client-1.7.13.dist-info}/METADATA +1 -1
- {lollms_client-1.7.10.dist-info → lollms_client-1.7.13.dist-info}/RECORD +21 -20
- {lollms_client-1.7.10.dist-info → lollms_client-1.7.13.dist-info}/WHEEL +0 -0
- {lollms_client-1.7.10.dist-info → lollms_client-1.7.13.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.7.10.dist-info → lollms_client-1.7.13.dist-info}/top_level.txt +0 -0
|
@@ -26,26 +26,20 @@ except ImportError:
|
|
|
26
26
|
from lollms_client.lollms_tti_binding import LollmsTTIBinding
|
|
27
27
|
from ascii_colors import ASCIIColors
|
|
28
28
|
|
|
29
|
-
BindingName = "
|
|
30
|
-
|
|
31
|
-
class
|
|
32
|
-
|
|
33
|
-
Client binding for a dedicated, managed Diffusers server.
|
|
34
|
-
This architecture prevents multiple models from being loaded into memory
|
|
35
|
-
in a multi-worker environment, solving OOM errors.
|
|
36
|
-
"""
|
|
37
|
-
def __init__(self,
|
|
38
|
-
**kwargs):
|
|
29
|
+
BindingName = "DiffusersTTIBinding"
|
|
30
|
+
|
|
31
|
+
class DiffusersTTIBinding(LollmsTTIBinding):
|
|
32
|
+
def __init__(self, **kwargs):
|
|
39
33
|
# Prioritize 'model_name' but accept 'model' as an alias from config files.
|
|
40
34
|
if 'model' in kwargs and 'model_name' not in kwargs:
|
|
41
35
|
kwargs['model_name'] = kwargs.pop('model')
|
|
42
36
|
super().__init__(binding_name=BindingName, config=kwargs)
|
|
43
37
|
|
|
44
|
-
|
|
45
|
-
|
|
38
|
+
self.config = kwargs
|
|
46
39
|
self.host = kwargs.get("host", "localhost")
|
|
47
40
|
self.port = kwargs.get("port", 9632)
|
|
48
|
-
self.auto_start_server = kwargs.get("auto_start_server",
|
|
41
|
+
self.auto_start_server = kwargs.get("auto_start_server", False)
|
|
42
|
+
self.wait_for_server = kwargs.get("wait_for_server", False)
|
|
49
43
|
self.server_process = None
|
|
50
44
|
self.base_url = f"http://{self.host}:{self.port}"
|
|
51
45
|
self.binding_root = Path(__file__).parent
|
|
@@ -53,9 +47,11 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
53
47
|
self.venv_dir = Path("./venv/tti_diffusers_venv")
|
|
54
48
|
self.models_path = Path(kwargs.get("models_path", "./data/models/diffusers_models")).resolve()
|
|
55
49
|
self.extra_models_path = kwargs.get("extra_models_path")
|
|
50
|
+
self.hf_token = kwargs.get("hf_token", "") # NEW
|
|
56
51
|
self.models_path.mkdir(exist_ok=True, parents=True)
|
|
57
52
|
if self.auto_start_server:
|
|
58
|
-
self.ensure_server_is_running()
|
|
53
|
+
self.ensure_server_is_running(self.wait_for_server)
|
|
54
|
+
|
|
59
55
|
|
|
60
56
|
def is_server_running(self) -> bool:
|
|
61
57
|
"""Checks if the server is already running and responsive."""
|
|
@@ -68,47 +64,21 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
68
64
|
return False
|
|
69
65
|
|
|
70
66
|
|
|
71
|
-
def ensure_server_is_running(self):
|
|
67
|
+
def ensure_server_is_running(self, wait= False):
|
|
72
68
|
"""
|
|
73
69
|
Ensures the Diffusers server is running. If not, it attempts to start it
|
|
74
70
|
in a process-safe manner using a file lock. This method is designed to
|
|
75
71
|
prevent race conditions in multi-worker environments.
|
|
76
72
|
"""
|
|
77
73
|
self.server_dir.mkdir(exist_ok=True)
|
|
78
|
-
# Use a lock file in the binding's server directory for consistency across instances
|
|
79
|
-
lock_path = self.server_dir / "diffusers_server.lock"
|
|
80
|
-
lock = FileLock(lock_path)
|
|
81
|
-
|
|
82
74
|
ASCIIColors.info("Attempting to start or connect to the Diffusers server...")
|
|
83
75
|
|
|
84
76
|
# First, perform a quick check without the lock to avoid unnecessary waiting.
|
|
85
77
|
if self.is_server_running():
|
|
86
78
|
ASCIIColors.green("Diffusers Server is already running and responsive.")
|
|
87
79
|
return
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
# Try to acquire the lock with a timeout. If another process is starting
|
|
91
|
-
# the server, this will wait until it's finished.
|
|
92
|
-
with lock.acquire(timeout=3):
|
|
93
|
-
# After acquiring the lock, we MUST re-check if the server is running.
|
|
94
|
-
# Another process might have started it and released the lock while we were waiting.
|
|
95
|
-
if not self.is_server_running():
|
|
96
|
-
ASCIIColors.yellow("Lock acquired. Starting dedicated Diffusers server...")
|
|
97
|
-
self.start_server()
|
|
98
|
-
# The process that starts the server is responsible for waiting for it to be ready
|
|
99
|
-
# BEFORE releasing the lock. This is the key to preventing race conditions.
|
|
100
|
-
self._wait_for_server()
|
|
101
|
-
else:
|
|
102
|
-
ASCIIColors.green("Server was started by another process while we waited. Connected successfully.")
|
|
103
|
-
except Timeout:
|
|
104
|
-
# This happens if the process holding the lock takes more than 60 seconds to start the server.
|
|
105
|
-
# We don't try to start another one. We just wait for the existing one to be ready.
|
|
106
|
-
ASCIIColors.yellow("Could not acquire lock, another process is taking a long time to start the server. Waiting...")
|
|
107
|
-
self._wait_for_server(timeout=60) # Give it a longer timeout here just in case.
|
|
108
|
-
|
|
109
|
-
# A final verification to ensure we are connected.
|
|
110
|
-
if not self.is_server_running():
|
|
111
|
-
raise RuntimeError("Failed to start or connect to the Diffusers server after all attempts.")
|
|
80
|
+
else:
|
|
81
|
+
self.start_server(wait)
|
|
112
82
|
|
|
113
83
|
def install_server_dependencies(self):
|
|
114
84
|
"""
|
|
@@ -175,43 +145,70 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
175
145
|
|
|
176
146
|
ASCIIColors.green("Server dependencies are satisfied.")
|
|
177
147
|
|
|
178
|
-
def start_server(self):
|
|
148
|
+
def start_server(self, wait=True, timeout_s=20):
|
|
179
149
|
"""
|
|
180
|
-
|
|
150
|
+
Launches the FastAPI server in a background thread and returns immediately.
|
|
181
151
|
This method should only be called from within a file lock.
|
|
182
152
|
"""
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
# Fallback for old structure
|
|
186
|
-
server_script = self.binding_root / "server.py"
|
|
187
|
-
if not server_script.exists():
|
|
188
|
-
raise FileNotFoundError(f"Server script not found at {server_script}. Make sure it's in a 'server' subdirectory.")
|
|
189
|
-
if not self.venv_dir.exists():
|
|
190
|
-
self.install_server_dependencies()
|
|
191
|
-
|
|
192
|
-
if sys.platform == "win32":
|
|
193
|
-
python_executable = self.venv_dir / "Scripts" / "python.exe"
|
|
194
|
-
else:
|
|
195
|
-
python_executable = self.venv_dir / "bin" / "python"
|
|
196
|
-
|
|
197
|
-
command = [
|
|
198
|
-
str(python_executable),
|
|
199
|
-
str(server_script),
|
|
200
|
-
"--host", self.host,
|
|
201
|
-
"--port", str(self.port),
|
|
202
|
-
"--models-path", str(self.models_path.resolve()) # Pass models_path to server
|
|
203
|
-
]
|
|
204
|
-
|
|
205
|
-
if self.extra_models_path:
|
|
206
|
-
resolved_extra_path = Path(self.extra_models_path).resolve()
|
|
207
|
-
command.extend(["--extra-models-path", str(resolved_extra_path)])
|
|
153
|
+
import threading
|
|
154
|
+
|
|
208
155
|
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
156
|
+
def _start_server_background():
|
|
157
|
+
"""Helper method to start the server in a background thread."""
|
|
158
|
+
# Use a lock file in the binding's server directory for consistency across instances
|
|
159
|
+
lock_path = self.server_dir / "diffusers_server.lock"
|
|
160
|
+
lock = FileLock(lock_path)
|
|
161
|
+
with lock.acquire(timeout=0):
|
|
162
|
+
try:
|
|
163
|
+
server_script = self.server_dir / "main.py"
|
|
164
|
+
if not server_script.exists():
|
|
165
|
+
# Fallback for old structure
|
|
166
|
+
server_script = self.binding_root / "server.py"
|
|
167
|
+
if not server_script.exists():
|
|
168
|
+
raise FileNotFoundError(f"Server script not found at {server_script}. Make sure it's in a 'server' subdirectory.")
|
|
169
|
+
if not self.venv_dir.exists():
|
|
170
|
+
self.install_server_dependencies()
|
|
171
|
+
|
|
172
|
+
if sys.platform == "win32":
|
|
173
|
+
python_executable = self.venv_dir / "Scripts" / "python.exe"
|
|
174
|
+
else:
|
|
175
|
+
python_executable = self.venv_dir / "bin" / "python"
|
|
176
|
+
|
|
177
|
+
command = [
|
|
178
|
+
str(python_executable),
|
|
179
|
+
str(server_script),
|
|
180
|
+
"--host", self.host,
|
|
181
|
+
"--port", str(self.port),
|
|
182
|
+
"--models-path", str(self.models_path.resolve())
|
|
183
|
+
]
|
|
184
|
+
|
|
185
|
+
if self.extra_models_path:
|
|
186
|
+
resolved_extra_path = Path(self.extra_models_path).resolve()
|
|
187
|
+
command.extend(["--extra-models-path", str(resolved_extra_path)])
|
|
188
|
+
|
|
189
|
+
if self.hf_token:
|
|
190
|
+
command.extend(["--hf-token", self.hf_token])
|
|
191
|
+
|
|
192
|
+
if self.extra_models_path:
|
|
193
|
+
resolved_extra_path = Path(self.extra_models_path).resolve()
|
|
194
|
+
command.extend(["--extra-models-path", str(resolved_extra_path)])
|
|
195
|
+
|
|
196
|
+
creationflags = subprocess.DETACHED_PROCESS if sys.platform == "win32" else 0
|
|
197
|
+
self.server_process = subprocess.Popen(command, creationflags=creationflags)
|
|
198
|
+
ASCIIColors.info("Diffusers server process launched in the background.")
|
|
199
|
+
while(not self.is_server_running()):
|
|
200
|
+
time.sleep(1)
|
|
201
|
+
|
|
202
|
+
except Exception as e:
|
|
203
|
+
ASCIIColors.error(f"Failed to start Diffusers server: {e}")
|
|
204
|
+
raise
|
|
205
|
+
|
|
206
|
+
# Start the server in a background thread
|
|
207
|
+
thread = threading.Thread(target=_start_server_background, daemon=True)
|
|
208
|
+
thread.start()
|
|
209
|
+
if wait:
|
|
210
|
+
thread.join()
|
|
212
211
|
|
|
213
|
-
self.server_process = subprocess.Popen(command, creationflags=creationflags)
|
|
214
|
-
ASCIIColors.info("Diffusers server process launched in the background.")
|
|
215
212
|
|
|
216
213
|
def _wait_for_server(self, timeout=30):
|
|
217
214
|
"""Waits for the server to become responsive."""
|
|
@@ -288,6 +285,7 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
288
285
|
pass
|
|
289
286
|
|
|
290
287
|
def generate_image(self, prompt: str, negative_prompt: str = "", **kwargs) -> bytes:
|
|
288
|
+
self.ensure_server_is_running(True)
|
|
291
289
|
params = kwargs.copy()
|
|
292
290
|
if "model_name" not in params and self.config.get("model_name"):
|
|
293
291
|
params["model_name"] = self.config["model_name"]
|
|
@@ -300,6 +298,7 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
300
298
|
return response.content
|
|
301
299
|
|
|
302
300
|
def edit_image(self, images: Union[str, List[str], "Image.Image", List["Image.Image"]], prompt: str, **kwargs) -> bytes:
|
|
301
|
+
self.ensure_server_is_running(True)
|
|
303
302
|
images_b64 = []
|
|
304
303
|
if not isinstance(images, list):
|
|
305
304
|
images = [images]
|
|
@@ -342,24 +341,78 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
342
341
|
response = self._post_json_request("/edit_image", data=json_payload)
|
|
343
342
|
return response.content
|
|
344
343
|
|
|
345
|
-
def list_models(self) ->
|
|
346
|
-
|
|
344
|
+
def list_models(self) -> list:
|
|
345
|
+
"""
|
|
346
|
+
Lists only models that are available locally on disk.
|
|
347
|
+
|
|
348
|
+
The Diffusers server scans `models_path` and `extra_models_path` for:
|
|
349
|
+
- Diffusers pipeline folders (with model_index.json, etc.)
|
|
350
|
+
- .safetensors checkpoints and associated configs.
|
|
351
|
+
|
|
352
|
+
Returns list of dicts: {"model_name": str, "display_name": str, "description": str}
|
|
353
|
+
"""
|
|
354
|
+
self.ensure_server_is_running(True)
|
|
355
|
+
try:
|
|
356
|
+
response = self._get_request("/list_models")
|
|
357
|
+
data = response.json()
|
|
358
|
+
if not isinstance(data, list):
|
|
359
|
+
return []
|
|
360
|
+
return data
|
|
361
|
+
except Exception as e:
|
|
362
|
+
ASCIIColors.warning(f"Failed to list local Diffusers models: {e}")
|
|
363
|
+
return []
|
|
364
|
+
|
|
347
365
|
|
|
348
366
|
def list_local_models(self) -> List[str]:
|
|
367
|
+
self.ensure_server_is_running(True)
|
|
349
368
|
return self._get_request("/list_local_models").json()
|
|
350
369
|
|
|
351
370
|
def list_available_models(self) -> List[str]:
|
|
371
|
+
self.ensure_server_is_running(True)
|
|
352
372
|
return self._get_request("/list_available_models").json()
|
|
353
373
|
|
|
354
374
|
def list_services(self, **kwargs) -> List[Dict[str, str]]:
|
|
375
|
+
self.ensure_server_is_running(True)
|
|
355
376
|
return self._get_request("/list_models").json()
|
|
356
377
|
|
|
357
378
|
def get_settings(self, **kwargs) -> List[Dict[str, Any]]:
|
|
379
|
+
self.ensure_server_is_running(True)
|
|
358
380
|
# The server holds the state, so we fetch it.
|
|
359
381
|
return self._get_request("/get_settings").json()
|
|
382
|
+
|
|
383
|
+
def get_zoo(self):
|
|
384
|
+
return [
|
|
385
|
+
{"name": "Stable Diffusion 1.5", "description": "The classic and versatile SD1.5 base model.", "size": "4GB", "type": "checkpoint", "link": "runwayml/stable-diffusion-v1-5"},
|
|
386
|
+
{"name": "Stable Diffusion 2.1", "description": "The 768x768 base model from the SD2.x series.", "size": "5GB", "type": "checkpoint", "link": "stabilityai/stable-diffusion-2-1"},
|
|
387
|
+
{"name": "Stable Diffusion XL 1.0", "description": "Official 1024x1024 text-to-image model from Stability AI.", "size": "7GB", "type": "checkpoint", "link": "stabilityai/stable-diffusion-xl-base-1.0"},
|
|
388
|
+
{"name": "SDXL Turbo", "description": "A fast, real-time text-to-image model based on SDXL.", "size": "7GB", "type": "checkpoint", "link": "stabilityai/sdxl-turbo"},
|
|
389
|
+
{"name": "Kandinsky 3", "description": "A powerful multilingual model with strong prompt understanding.", "size": "Unknown", "type": "checkpoint", "link": "kandinsky-community/kandinsky-3"},
|
|
390
|
+
{"name": "Playground v2.5", "description": "A high-quality model focused on aesthetic outputs.", "size": "Unknown", "type": "checkpoint", "link": "playgroundai/playground-v2.5-1024px-aesthetic"},
|
|
391
|
+
{"name": "epiCRealism", "description": "A popular community model for generating photorealistic images.", "size": "2GB", "type": "checkpoint", "link": "emilianJR/epiCRealism"},
|
|
392
|
+
{"name": "Realistic Vision 5.1", "description": "One of the most popular realistic models, great for portraits.", "size": "2GB", "type": "checkpoint", "link": "SG161222/Realistic_Vision_V5.1_noVAE"},
|
|
393
|
+
{"name": "Photon", "description": "A model known for high-quality, realistic images with good lighting.", "size": "2GB", "type": "checkpoint", "link": "Photon-v1"},
|
|
394
|
+
{"name": "Waifu Diffusion 1.4", "description": "A widely-used model for generating high-quality anime-style images.", "size": "2GB", "type": "checkpoint", "link": "hakurei/waifu-diffusion"},
|
|
395
|
+
{"name": "Counterfeit V3.0", "description": "A strong model for illustrative and 2.5D anime styles.", "size": "2GB", "type": "checkpoint", "link": "gsdf/Counterfeit-V3.0"},
|
|
396
|
+
{"name": "Animagine XL 3.0", "description": "A state-of-the-art anime model on the SDXL architecture.", "size": "7GB", "type": "checkpoint", "link": "cagliostrolab/animagine-xl-3.0"},
|
|
397
|
+
{"name": "DreamShaper 8", "description": "Versatile SD1.5 style model (CivitAI).", "size": "2GB", "type": "checkpoint", "link": "https://civitai.com/api/download/models/128713"},
|
|
398
|
+
{"name": "Juggernaut XL", "description": "Artistic SDXL (CivitAI).", "size": "7GB", "type": "checkpoint", "link": "https://civitai.com/api/download/models/133005"},
|
|
399
|
+
{"name": "Stable Diffusion 3 Medium", "description": "SOTA model with advanced prompt understanding (Gated).", "size": "Unknown", "type": "checkpoint", "link": "stabilityai/stable-diffusion-3-medium-diffusers"},
|
|
400
|
+
{"name": "FLUX.1 Schnell", "description": "Powerful and fast next-gen model (Gated).", "size": "Unknown", "type": "checkpoint", "link": "black-forest-labs/FLUX.1-schnell"},
|
|
401
|
+
{"name": "FLUX.1 Dev", "description": "Larger developer version of FLUX.1 (Gated).", "size": "Unknown", "type": "checkpoint", "link": "black-forest-labs/FLUX.1-dev"},
|
|
402
|
+
]
|
|
403
|
+
|
|
404
|
+
def download_from_zoo(self, index: int, progress_callback: Callable[[dict], None] = None) -> dict:
|
|
405
|
+
zoo = self.get_zoo()
|
|
406
|
+
if index < 0 or index >= len(zoo):
|
|
407
|
+
msg = "Index out of bounds"
|
|
408
|
+
ASCIIColors.error(msg)
|
|
409
|
+
return {"status": False, "message": msg}
|
|
410
|
+
item = zoo[index]
|
|
411
|
+
return self.pull_model(item["link"], progress_callback=progress_callback)
|
|
360
412
|
|
|
361
413
|
def set_settings(self, settings: Union[Dict[str, Any], List[Dict[str, Any]]], **kwargs) -> bool:
|
|
362
|
-
|
|
414
|
+
self.ensure_server_is_running(True)
|
|
415
|
+
# Normalize settings from list of dicts to a single dict if needed
|
|
363
416
|
parsed_settings = settings if isinstance(settings, dict) else {s["name"]: s["value"] for s in settings if "name" in s and "value" in s}
|
|
364
417
|
response = self._post_json_request("/set_settings", data=parsed_settings)
|
|
365
418
|
return response.json().get("success", False)
|
|
@@ -448,4 +501,4 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
448
501
|
def __del__(self):
|
|
449
502
|
# The client destructor does not stop the server,
|
|
450
503
|
# as it is a shared resource for all worker processes.
|
|
451
|
-
pass
|
|
504
|
+
pass
|
|
@@ -835,7 +835,7 @@ async def edit_image(request: EditRequestJSON):
|
|
|
835
835
|
def pull_model_endpoint(payload: PullModelRequest):
|
|
836
836
|
if not payload.hf_id and not payload.safetensors_url:
|
|
837
837
|
raise HTTPException(status_code=400, detail="Provide either 'hf_id' or 'safetensors_url'.")
|
|
838
|
-
|
|
838
|
+
|
|
839
839
|
# 1) Pull Hugging Face model into a folder
|
|
840
840
|
if payload.hf_id:
|
|
841
841
|
model_id = payload.hf_id.strip()
|
|
@@ -902,83 +902,85 @@ def pull_model_endpoint(payload: PullModelRequest):
|
|
|
902
902
|
@router.get("/list_local_models")
|
|
903
903
|
def list_local_models_endpoint():
|
|
904
904
|
local_models = set()
|
|
905
|
+
models_root = Path(args.models_path)
|
|
906
|
+
extra_root = Path(args.extra_models_path) if args.extra_models_path else None
|
|
905
907
|
|
|
906
|
-
|
|
907
|
-
|
|
908
|
-
local_models.add(f.name)
|
|
909
|
-
|
|
910
|
-
if state.extra_models_path and state.extra_models_path.exists():
|
|
911
|
-
for f in state.extra_models_path.glob("*.safetensors"):
|
|
912
|
-
local_models.add(f.name)
|
|
913
|
-
|
|
914
|
-
# 2) Folder-based HF/diffusers models: treat folder name as the model
|
|
915
|
-
def add_folder_models(base: Path):
|
|
916
|
-
if not base or not base.exists():
|
|
908
|
+
def scan_root(root: Path):
|
|
909
|
+
if not root or not root.exists():
|
|
917
910
|
return
|
|
918
|
-
|
|
919
|
-
|
|
911
|
+
|
|
912
|
+
# 1. Diffusers folders (Recursive)
|
|
913
|
+
for model_index in root.rglob("model_index.json"):
|
|
914
|
+
# For listing just the name, we probably want the folder name or relative path
|
|
915
|
+
# Keeping it simple: folder name.
|
|
916
|
+
local_models.add(model_index.parent.name)
|
|
917
|
+
|
|
918
|
+
# 2. Safetensors files (Recursive)
|
|
919
|
+
for safepath in root.rglob("*.safetensors"):
|
|
920
|
+
if (safepath.parent / "model_index.json").exists():
|
|
920
921
|
continue
|
|
921
|
-
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
|
|
922
|
+
local_models.add(safepath.name)
|
|
923
|
+
|
|
924
|
+
scan_root(models_root)
|
|
925
|
+
scan_root(extra_root)
|
|
926
|
+
|
|
927
|
+
return sorted(list(local_models))
|
|
925
928
|
|
|
926
|
-
|
|
927
|
-
|
|
929
|
+
@app.get("/list_models")
|
|
930
|
+
def list_models() -> list[dict]:
|
|
931
|
+
models_root = Path(args.models_path)
|
|
932
|
+
extra_root = Path(args.extra_models_path) if args.extra_models_path else None
|
|
933
|
+
result = []
|
|
934
|
+
seen_paths = set()
|
|
928
935
|
|
|
929
|
-
|
|
936
|
+
def scan_root(root: Path):
|
|
937
|
+
if not root or not root.exists():
|
|
938
|
+
return
|
|
939
|
+
|
|
940
|
+
# 1. Diffusers folders (Recursive)
|
|
941
|
+
# We look for model_index.json
|
|
942
|
+
for model_index in root.rglob("model_index.json"):
|
|
943
|
+
folder = model_index.parent
|
|
944
|
+
resolved_path = str(folder.resolve())
|
|
945
|
+
if resolved_path in seen_paths:
|
|
946
|
+
continue
|
|
947
|
+
seen_paths.add(resolved_path)
|
|
948
|
+
|
|
949
|
+
result.append({
|
|
950
|
+
"model_name": resolved_path,
|
|
951
|
+
"display_name": folder.name,
|
|
952
|
+
"description": "Local Diffusers pipeline"
|
|
953
|
+
})
|
|
954
|
+
|
|
955
|
+
# 2. Safetensors files (Recursive)
|
|
956
|
+
for safepath in root.rglob("*.safetensors"):
|
|
957
|
+
# Skip if part of a diffusers folder
|
|
958
|
+
if (safepath.parent / "model_index.json").exists():
|
|
959
|
+
continue
|
|
960
|
+
|
|
961
|
+
resolved_path = str(safepath.resolve())
|
|
962
|
+
if resolved_path in seen_paths:
|
|
963
|
+
continue
|
|
964
|
+
seen_paths.add(resolved_path)
|
|
930
965
|
|
|
931
|
-
|
|
932
|
-
|
|
933
|
-
|
|
934
|
-
|
|
935
|
-
# 1) Local models - ensure dict format
|
|
936
|
-
local_files = list_local_models_endpoint()
|
|
937
|
-
for model_name in local_files:
|
|
938
|
-
models.append({
|
|
939
|
-
"model_name": model_name,
|
|
940
|
-
"display_name": model_name,
|
|
941
|
-
"description": "(Local) Folder model" if not model_name.endswith(".safetensors") else "(Local) Local safetensors file",
|
|
942
|
-
"owned_by": "local_user"
|
|
943
|
-
})
|
|
944
|
-
|
|
945
|
-
# 2) HF Public models - already dicts from HF_PUBLIC_MODELS
|
|
946
|
-
for category, hf_models in HF_PUBLIC_MODELS.items():
|
|
947
|
-
for model_info in hf_models:
|
|
948
|
-
models.append({
|
|
949
|
-
"model_name": model_info["model_name"],
|
|
950
|
-
"display_name": model_info["display_name"],
|
|
951
|
-
"description": f"({category}) {model_info['desc']}",
|
|
952
|
-
"owned_by": "huggingface"
|
|
966
|
+
result.append({
|
|
967
|
+
"model_name": resolved_path,
|
|
968
|
+
"display_name": safepath.stem,
|
|
969
|
+
"description": "Local .safetensors checkpoint"
|
|
953
970
|
})
|
|
954
971
|
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
for model_info in gated_models:
|
|
959
|
-
models.append({
|
|
960
|
-
"model_name": model_info["model_name"],
|
|
961
|
-
"display_name": model_info["display_name"],
|
|
962
|
-
"description": f"({category}) {model_info['desc']}",
|
|
963
|
-
"owned_by": "huggingface"
|
|
964
|
-
})
|
|
972
|
+
scan_root(models_root)
|
|
973
|
+
scan_root(extra_root)
|
|
974
|
+
return result
|
|
965
975
|
|
|
966
|
-
# 4) Civitai models - ensure dict format
|
|
967
|
-
for key, info in CIVITAI_MODELS.items():
|
|
968
|
-
models.append({
|
|
969
|
-
"model_name": key,
|
|
970
|
-
"display_name": info["display_name"],
|
|
971
|
-
"description": f"(Civitai) {info['description']}",
|
|
972
|
-
"owned_by": info["owned_by"]
|
|
973
|
-
})
|
|
974
|
-
|
|
975
|
-
return models # Plain list of dicts - JSON serializable
|
|
976
976
|
|
|
977
977
|
|
|
978
978
|
|
|
979
979
|
@router.get("/list_available_models")
|
|
980
980
|
def list_available_models_endpoint():
|
|
981
|
-
|
|
981
|
+
# Use list_models() to get all available models (dicts) then extract names
|
|
982
|
+
models_dicts = list_models()
|
|
983
|
+
discoverable = [m['model_name'] for m in models_dicts]
|
|
982
984
|
return sorted(list(set(discoverable)))
|
|
983
985
|
|
|
984
986
|
@router.get("/get_settings")
|
|
@@ -1032,12 +1034,21 @@ if __name__ == "__main__":
|
|
|
1032
1034
|
parser.add_argument("--port", type=int, default=9630, help="Port to bind to.")
|
|
1033
1035
|
parser.add_argument("--models-path", type=str, required=True, help="Path to the models directory.")
|
|
1034
1036
|
parser.add_argument("--extra-models-path", type=str, default=None, help="Path to an extra models directory.")
|
|
1037
|
+
parser.add_argument(
|
|
1038
|
+
"--hf-token",
|
|
1039
|
+
type=str,
|
|
1040
|
+
default=None,
|
|
1041
|
+
help="Optional Hugging Face access token used to download private or gated repos."
|
|
1042
|
+
)
|
|
1043
|
+
|
|
1035
1044
|
args = parser.parse_args()
|
|
1036
1045
|
|
|
1037
1046
|
MODELS_PATH = Path(args.models_path)
|
|
1038
1047
|
EXTRA_MODELS_PATH = Path(args.extra_models_path) if args.extra_models_path else None
|
|
1039
1048
|
state = ServerState(MODELS_PATH, EXTRA_MODELS_PATH)
|
|
1040
|
-
|
|
1049
|
+
if args.hf_token:
|
|
1050
|
+
state.config["hf_token"] = args.hf_token
|
|
1051
|
+
ASCIIColors.info("Hugging Face token received via CLI and stored in server config.")
|
|
1041
1052
|
ASCIIColors.cyan(f"--- Diffusers TTI Server ---")
|
|
1042
1053
|
ASCIIColors.green(f"Starting server on http://{args.host}:{args.port}")
|
|
1043
1054
|
ASCIIColors.green(f"Serving models from: {MODELS_PATH.resolve()}")
|
|
@@ -33,7 +33,7 @@ class XTTSClientBinding(LollmsTTSBinding):
|
|
|
33
33
|
self.config = kwargs
|
|
34
34
|
self.host = kwargs.get("host", "localhost")
|
|
35
35
|
self.port = kwargs.get("port", 9633)
|
|
36
|
-
self.auto_start_server = kwargs.get("auto_start_server",
|
|
36
|
+
self.auto_start_server = kwargs.get("auto_start_server", False)
|
|
37
37
|
self.server_process = None
|
|
38
38
|
self.base_url = f"http://{self.host}:{self.port}"
|
|
39
39
|
self.binding_root = Path(__file__).parent
|
|
@@ -1,37 +1,38 @@
|
|
|
1
|
-
lollms_client/__init__.py,sha256=
|
|
1
|
+
lollms_client/__init__.py,sha256=k3Na6pbO8newVrsPcgw-qKCbxyKiBuJh7BlX0qRBlqU,1147
|
|
2
2
|
lollms_client/lollms_agentic.py,sha256=ljalnmeSU-sbzH19-c9TzrJ-HhEeo4mxXmpJGkXj720,14094
|
|
3
3
|
lollms_client/lollms_base_binding.py,sha256=5nVnj7idw9nY1et_qXL9gg8qHvr4kpZF92HUJlvlitE,2019
|
|
4
4
|
lollms_client/lollms_config.py,sha256=goEseDwDxYJf3WkYJ4IrLXwg3Tfw73CXV2Avg45M_hE,21876
|
|
5
5
|
lollms_client/lollms_core.py,sha256=JAyVDYQeyQE6e-p9M8bzyKb1fHWK7lQrapuryds60PU,240436
|
|
6
|
-
lollms_client/lollms_discussion.py,sha256=
|
|
6
|
+
lollms_client/lollms_discussion.py,sha256=kMU6lCZHTXWO7odxy9xKXBW0grcjsmLfrxn4aeeRwDo,125244
|
|
7
7
|
lollms_client/lollms_js_analyzer.py,sha256=01zUvuO2F_lnUe_0NLxe1MF5aHE1hO8RZi48mNPv-aw,8361
|
|
8
|
-
lollms_client/lollms_llm_binding.py,sha256=
|
|
9
|
-
lollms_client/lollms_mcp_binding.py,sha256=
|
|
8
|
+
lollms_client/lollms_llm_binding.py,sha256=J64w21Y_HUNAHKEmjpwAOw-PodM4qCK0l3cQ5w_GJLw,17726
|
|
9
|
+
lollms_client/lollms_mcp_binding.py,sha256=XRfsEKOpduWEgWzBDda4E320jrE8bu5nsIXcxpRYObU,8022
|
|
10
10
|
lollms_client/lollms_mcp_security.py,sha256=FhVTDhSBjksGEZnopVnjFmEF5dv7D8bBTqoaj4BiF0E,3562
|
|
11
11
|
lollms_client/lollms_personality.py,sha256=kGuFwmgA9QDLcQlLQ9sKeceMujdEo0Aw28fN5H8MpjI,8847
|
|
12
12
|
lollms_client/lollms_python_analyzer.py,sha256=7gf1fdYgXCOkPUkBAPNmr6S-66hMH4_KonOMsADASxc,10246
|
|
13
|
-
lollms_client/lollms_stt_binding.py,sha256=
|
|
14
|
-
lollms_client/lollms_tti_binding.py,sha256=
|
|
15
|
-
lollms_client/lollms_ttm_binding.py,sha256=
|
|
16
|
-
lollms_client/lollms_tts_binding.py,sha256=
|
|
17
|
-
lollms_client/lollms_ttv_binding.py,sha256=
|
|
13
|
+
lollms_client/lollms_stt_binding.py,sha256=kExBromnSVsZZCndzveApFZ9raWSzGTRhEiPvf7QFEc,7143
|
|
14
|
+
lollms_client/lollms_tti_binding.py,sha256=Lj7O4oMKmBYiEArz1Tr_UjvtmYUyJ5rA-1SxQ2Gd1jo,8283
|
|
15
|
+
lollms_client/lollms_ttm_binding.py,sha256=93aa7ABwqT8oKtqf9hWsSUBjImwbEwAqvkzHYp4F59A,4135
|
|
16
|
+
lollms_client/lollms_tts_binding.py,sha256=Zh29KQdTsDL_G9v9j2wDQnk33e9WLQw_VbcLVeyRNZA,6115
|
|
17
|
+
lollms_client/lollms_ttv_binding.py,sha256=crsF2G06EyD2Zp0NgqIO-ogBTtCh0tg5ratrMJimPH8,4133
|
|
18
18
|
lollms_client/lollms_types.py,sha256=FuN7BPhsz9tzCwOkoLt_MvC_t4VkeU3elNA6ooGy_t4,3593
|
|
19
19
|
lollms_client/lollms_utilities.py,sha256=3DAsII2X9uhRzRL-D0QlALcEdRg82y7OIL4yHVF32gY,19446
|
|
20
20
|
lollms_client/assets/models_ctx_sizes.json,sha256=jFDLW4GoT431544QXXyi9fA5tqIBmTrwaIA1_syoZ-Y,14972
|
|
21
21
|
lollms_client/llm_bindings/__init__.py,sha256=9sWGpmWSSj6KQ8H4lKGCjpLYwhnVdL_2N7gXCphPqh4,14
|
|
22
22
|
lollms_client/llm_bindings/azure_openai/__init__.py,sha256=bTb_JdG060RZB5rWLPKyvTUA0I-_XAx2FDWg4y3Eyi8,16503
|
|
23
|
-
lollms_client/llm_bindings/claude/__init__.py,sha256=
|
|
23
|
+
lollms_client/llm_bindings/claude/__init__.py,sha256=_d1bYuUyHIlbnSMo2OIQKObxmeaId_bxwvZJZcRawKs,30179
|
|
24
24
|
lollms_client/llm_bindings/gemini/__init__.py,sha256=CblEpmGTVJDrbRFVNQBuUTMj6j5RpI9h5M3KUjMUKxk,25910
|
|
25
|
-
lollms_client/llm_bindings/grok/__init__.py,sha256=
|
|
25
|
+
lollms_client/llm_bindings/grok/__init__.py,sha256=gxbQYUhT5TN45aiy1JjtceDI5vaH8N9h_uPpNrNnwMg,25506
|
|
26
26
|
lollms_client/llm_bindings/groq/__init__.py,sha256=JQCbvMUOWelHPxKX8PYAtqJgb4UzuTtp_qdMILn8zm8,12122
|
|
27
27
|
lollms_client/llm_bindings/hugging_face_inference_api/__init__.py,sha256=szA1NRa7urNfFG3JrRU0FtJsyde24rciZhGaLlJyKWs,13940
|
|
28
28
|
lollms_client/llm_bindings/litellm/__init__.py,sha256=zVIlYW7DyIWnEIyt1E7m0N7lEL0AHIz2rZ-1vDpvm_0,12757
|
|
29
|
+
lollms_client/llm_bindings/llama_cpp_server/__init__.py,sha256=nDoasnxZ5g9bhVY26lT0aV7_GjoAMCbNqpE0lBP-gqc,25710
|
|
29
30
|
lollms_client/llm_bindings/llamacpp/__init__.py,sha256=bDlcrfKMDwX7O_d2HST2TwkR3PVcvgQ96-kwWFWz4Ao,63039
|
|
30
31
|
lollms_client/llm_bindings/lollms/__init__.py,sha256=wRkj-aw7SokjALsD6tjQ8afNASm2Brj3lQQj6Ui2m7M,30025
|
|
31
32
|
lollms_client/llm_bindings/lollms_webui/__init__.py,sha256=nGIGv9sS3hdlNIcxYvOtq1_oSbPWX2RlCvA7bTRmwkE,17754
|
|
32
33
|
lollms_client/llm_bindings/mistral/__init__.py,sha256=S_6skFAhGKNcr5IVWlqoaZ4gfWd94vzIuGcmF3nAeyg,14135
|
|
33
34
|
lollms_client/llm_bindings/novita_ai/__init__.py,sha256=7t6D43hT-SCLgBor08VQTxRJQq27puS5CHRlAeYsczc,21492
|
|
34
|
-
lollms_client/llm_bindings/ollama/__init__.py,sha256=
|
|
35
|
+
lollms_client/llm_bindings/ollama/__init__.py,sha256=u5T03aERRedTcFNeBgbValNZUgjDt0ssvoL866myTTA,60672
|
|
35
36
|
lollms_client/llm_bindings/open_router/__init__.py,sha256=fEVjNW1Q3rpXT659KLkMeaN7mHeas8Q0LxcXoiJvVKM,14917
|
|
36
37
|
lollms_client/llm_bindings/openai/__init__.py,sha256=wno0j62kQ2joDjpeKQLvckxJIJSJCOiFrKTKKc550bw,31342
|
|
37
38
|
lollms_client/llm_bindings/openllm/__init__.py,sha256=N6MAyZtcIpGpLDVhoqyLPFDXGQOzCB_hMldlEMUc7jA,15799
|
|
@@ -53,8 +54,8 @@ lollms_client/stt_bindings/lollms/__init__.py,sha256=9Vmn1sQQZKLGLe7nZnc-0LnNeSY
|
|
|
53
54
|
lollms_client/stt_bindings/whisper/__init__.py,sha256=OCF5ncriFN0ukFz47dJhQoJxQ2NMA7kZ81sVdBtQBKo,19191
|
|
54
55
|
lollms_client/stt_bindings/whispercpp/__init__.py,sha256=5YQKFy3UaN-S-HGZiFCIcuPGTJTELPgqqct1AcTqz-Q,21595
|
|
55
56
|
lollms_client/tti_bindings/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
56
|
-
lollms_client/tti_bindings/diffusers/__init__.py,sha256=
|
|
57
|
-
lollms_client/tti_bindings/diffusers/server/main.py,sha256=
|
|
57
|
+
lollms_client/tti_bindings/diffusers/__init__.py,sha256=2VhT0KF51UrSRLn9s_rihkv-e6hbfIlQY57a5N1WNXI,25633
|
|
58
|
+
lollms_client/tti_bindings/diffusers/server/main.py,sha256=RdHVVSuY5YA5vsa4FqDnhJJUGqRr_sZ3isYFyTLpk34,53018
|
|
58
59
|
lollms_client/tti_bindings/gemini/__init__.py,sha256=_9MifhHOi2uNoW8vqmMIPHvjiF6fJq28Cq3Ckg309tA,13184
|
|
59
60
|
lollms_client/tti_bindings/leonardo_ai/__init__.py,sha256=rO6FFLfXFMqgirDdO2J2lelpYrhyaj_Uhu2NK-gBd7g,6075
|
|
60
61
|
lollms_client/tti_bindings/lollms/__init__.py,sha256=J_EH-A13Zj4G2pEbOjSCT9Hw4oSHGl7n6FEBBQODn20,8983
|
|
@@ -77,13 +78,13 @@ lollms_client/tts_bindings/piper_tts/__init__.py,sha256=Atd1WmtvbJ5XJrh4R8JRr_5p
|
|
|
77
78
|
lollms_client/tts_bindings/piper_tts/server/install_piper.py,sha256=g71Ne2T18wAytOPipfQ9DNeTAOD9PrII5qC-vr9DtLA,3256
|
|
78
79
|
lollms_client/tts_bindings/piper_tts/server/main.py,sha256=DMozfSR1aCbrlmOXltRFjtXhYhXajsGcNKQjsWgRwZk,17402
|
|
79
80
|
lollms_client/tts_bindings/piper_tts/server/setup_voices.py,sha256=UdHaPa5aNcw8dR-aRGkZr2OfSFFejH79lXgfwT0P3ss,1964
|
|
80
|
-
lollms_client/tts_bindings/xtts/__init__.py,sha256
|
|
81
|
+
lollms_client/tts_bindings/xtts/__init__.py,sha256=-jIaXDUWEQ32JzS7I1O6NeS0rmduWBSaN8kJuuXSxBg,8172
|
|
81
82
|
lollms_client/tts_bindings/xtts/server/main.py,sha256=feTAX4eAo2HY6PpcDTrgRMak5AXocO7UIhKPuGuWpxY,12303
|
|
82
83
|
lollms_client/tts_bindings/xtts/server/setup_voices.py,sha256=UdHaPa5aNcw8dR-aRGkZr2OfSFFejH79lXgfwT0P3ss,1964
|
|
83
84
|
lollms_client/ttv_bindings/__init__.py,sha256=UZ8o2izQOJLQgtZ1D1cXoNST7rzqW22rL2Vufc7ddRc,3141
|
|
84
85
|
lollms_client/ttv_bindings/lollms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
85
|
-
lollms_client-1.7.
|
|
86
|
-
lollms_client-1.7.
|
|
87
|
-
lollms_client-1.7.
|
|
88
|
-
lollms_client-1.7.
|
|
89
|
-
lollms_client-1.7.
|
|
86
|
+
lollms_client-1.7.13.dist-info/licenses/LICENSE,sha256=HrhfyXIkWY2tGFK11kg7vPCqhgh5DcxleloqdhrpyMY,11558
|
|
87
|
+
lollms_client-1.7.13.dist-info/METADATA,sha256=g0VnLxnIjQJ7Me-78LS8gyKKOpNaySh9b4UvN4IWlUY,77177
|
|
88
|
+
lollms_client-1.7.13.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
|
89
|
+
lollms_client-1.7.13.dist-info/top_level.txt,sha256=Bk_kz-ri6Arwsk7YG-T5VsRorV66uVhcHGvb_g2WqgE,14
|
|
90
|
+
lollms_client-1.7.13.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|