lollms-client 1.6.5__tar.gz → 1.6.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- {lollms_client-1.6.5/src/lollms_client.egg-info → lollms_client-1.6.6}/PKG-INFO +1 -1
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/__init__.py +1 -1
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/diffusers/__init__.py +45 -22
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/diffusers/server/main.py +105 -21
- lollms_client-1.6.6/src/lollms_client/tts_bindings/xtts/__init__.py +195 -0
- lollms_client-1.6.6/src/lollms_client/tts_bindings/xtts/server/main.py +275 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6/src/lollms_client.egg-info}/PKG-INFO +1 -1
- lollms_client-1.6.5/src/lollms_client/tts_bindings/xtts/__init__.py +0 -170
- lollms_client-1.6.5/src/lollms_client/tts_bindings/xtts/server/main.py +0 -330
- {lollms_client-1.6.5 → lollms_client-1.6.6}/LICENSE +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/README.md +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/pyproject.toml +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/setup.cfg +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/assets/models_ctx_sizes.json +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/claude/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/gemini/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/grok/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/groq/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/litellm/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/mistral/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/novita_ai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/ollama/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/open_router/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/openai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/openllm/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/openwebui/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/perplexity/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/transformers/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_agentic.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_config.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_core.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_discussion.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_llm_binding.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_mcp_binding.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_mcp_security.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_personality.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_stt_binding.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_tti_binding.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_ttm_binding.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_tts_binding.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_ttv_binding.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_types.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/stt_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/stt_bindings/whisper/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/gemini/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/novita_ai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/openai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/stability_ai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/ttm_bindings/replicate/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/ttm_bindings/stability_ai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/ttm_bindings/topmediai/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/bark/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/bark/server/install_bark.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/bark/server/main.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/piper_tts/server/install_piper.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/piper_tts/server/main.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/piper_tts/server/setup_voices.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tts_bindings/xtts/server/setup_voices.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client.egg-info/SOURCES.txt +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client.egg-info/requires.txt +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-1.6.5 → lollms_client-1.6.6}/test/test_lollms_discussion.py +0 -0
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "1.6.
|
|
11
|
+
__version__ = "1.6.6" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
{lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/diffusers/__init__.py
RENAMED
|
@@ -53,6 +53,7 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
53
53
|
self.server_dir = self.binding_root / "server"
|
|
54
54
|
self.venv_dir = Path("./venv/tti_diffusers_venv")
|
|
55
55
|
self.models_path = Path(kwargs.get("models_path", "./data/models/diffusers_models")).resolve()
|
|
56
|
+
self.extra_models_path = kwargs.get("extra_models_path")
|
|
56
57
|
self.models_path.mkdir(exist_ok=True, parents=True)
|
|
57
58
|
if self.auto_start_server:
|
|
58
59
|
self.ensure_server_is_running()
|
|
@@ -68,36 +69,47 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
68
69
|
return False
|
|
69
70
|
|
|
70
71
|
|
|
71
|
-
def ensure_server_is_running(self
|
|
72
|
+
def ensure_server_is_running(self):
|
|
72
73
|
"""
|
|
73
74
|
Ensures the Diffusers server is running. If not, it attempts to start it
|
|
74
|
-
in a process-safe manner using a file lock.
|
|
75
|
-
|
|
76
|
-
Args:
|
|
77
|
-
continue_if_locked (bool): If True, return immediately if another process
|
|
78
|
-
already holds the lock.
|
|
75
|
+
in a process-safe manner using a file lock. This method is designed to
|
|
76
|
+
prevent race conditions in multi-worker environments.
|
|
79
77
|
"""
|
|
80
78
|
self.server_dir.mkdir(exist_ok=True)
|
|
81
|
-
|
|
79
|
+
# Use a lock file in the binding's server directory for consistency across instances
|
|
80
|
+
lock_path = self.server_dir / "diffusers_server.lock"
|
|
82
81
|
lock = FileLock(lock_path)
|
|
83
82
|
|
|
84
83
|
ASCIIColors.info("Attempting to start or connect to the Diffusers server...")
|
|
84
|
+
|
|
85
|
+
# First, perform a quick check without the lock to avoid unnecessary waiting.
|
|
86
|
+
if self.is_server_running():
|
|
87
|
+
ASCIIColors.green("Diffusers Server is already running and responsive.")
|
|
88
|
+
return
|
|
89
|
+
|
|
85
90
|
try:
|
|
86
|
-
# Try to acquire lock
|
|
87
|
-
|
|
91
|
+
# Try to acquire the lock with a timeout. If another process is starting
|
|
92
|
+
# the server, this will wait until it's finished.
|
|
93
|
+
with lock.acquire(timeout=60):
|
|
94
|
+
# After acquiring the lock, we MUST re-check if the server is running.
|
|
95
|
+
# Another process might have started it and released the lock while we were waiting.
|
|
88
96
|
if not self.is_server_running():
|
|
89
97
|
ASCIIColors.yellow("Lock acquired. Starting dedicated Diffusers server...")
|
|
90
98
|
self.start_server()
|
|
99
|
+
# The process that starts the server is responsible for waiting for it to be ready
|
|
100
|
+
# BEFORE releasing the lock. This is the key to preventing race conditions.
|
|
101
|
+
self._wait_for_server()
|
|
91
102
|
else:
|
|
92
|
-
ASCIIColors.green("Server was started by another process. Connected successfully.")
|
|
103
|
+
ASCIIColors.green("Server was started by another process while we waited. Connected successfully.")
|
|
93
104
|
except Timeout:
|
|
94
|
-
if
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
ASCIIColors.yellow("Could not acquire lock within timeout. Waiting for server to become available...")
|
|
105
|
+
# This happens if the process holding the lock takes more than 60 seconds to start the server.
|
|
106
|
+
# We don't try to start another one. We just wait for the existing one to be ready.
|
|
107
|
+
ASCIIColors.yellow("Could not acquire lock, another process is taking a long time to start the server. Waiting...")
|
|
108
|
+
self._wait_for_server(timeout=300) # Give it a longer timeout here just in case.
|
|
99
109
|
|
|
100
|
-
|
|
110
|
+
# A final verification to ensure we are connected.
|
|
111
|
+
if not self.is_server_running():
|
|
112
|
+
raise RuntimeError("Failed to start or connect to the Diffusers server after all attempts.")
|
|
101
113
|
|
|
102
114
|
def install_server_dependencies(self):
|
|
103
115
|
"""
|
|
@@ -191,6 +203,10 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
191
203
|
"--models-path", str(self.models_path.resolve()) # Pass models_path to server
|
|
192
204
|
]
|
|
193
205
|
|
|
206
|
+
if self.extra_models_path:
|
|
207
|
+
resolved_extra_path = Path(self.extra_models_path).resolve()
|
|
208
|
+
command.extend(["--extra-models-path", str(resolved_extra_path)])
|
|
209
|
+
|
|
194
210
|
# Use DETACHED_PROCESS on Windows to allow the server to run independently of the parent process.
|
|
195
211
|
# On Linux/macOS, the process will be daemonized enough to not be killed with the worker.
|
|
196
212
|
creationflags = subprocess.DETACHED_PROCESS if sys.platform == "win32" else 0
|
|
@@ -273,11 +289,14 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
273
289
|
pass
|
|
274
290
|
|
|
275
291
|
def generate_image(self, prompt: str, negative_prompt: str = "", **kwargs) -> bytes:
|
|
276
|
-
|
|
292
|
+
params = kwargs.copy()
|
|
293
|
+
if "model_name" not in params and self.config.get("model_name"):
|
|
294
|
+
params["model_name"] = self.config["model_name"]
|
|
295
|
+
|
|
277
296
|
response = self._post_json_request("/generate_image", data={
|
|
278
297
|
"prompt": prompt,
|
|
279
298
|
"negative_prompt": negative_prompt,
|
|
280
|
-
"params":
|
|
299
|
+
"params": params
|
|
281
300
|
})
|
|
282
301
|
return response.content
|
|
283
302
|
|
|
@@ -307,15 +326,19 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
307
326
|
raise ValueError(f"Unsupported image type in edit_image: {type(img)}")
|
|
308
327
|
if not images_b64:
|
|
309
328
|
raise ValueError("No valid images were provided to the edit_image function.")
|
|
329
|
+
|
|
330
|
+
params = kwargs.copy()
|
|
331
|
+
if "model_name" not in params and self.config.get("model_name"):
|
|
332
|
+
params["model_name"] = self.config["model_name"]
|
|
310
333
|
|
|
311
334
|
# Translate "mask" to "mask_image" for server compatibility
|
|
312
|
-
if "mask" in
|
|
313
|
-
|
|
335
|
+
if "mask" in params and params["mask"]:
|
|
336
|
+
params["mask_image"] = params.pop("mask")
|
|
314
337
|
|
|
315
338
|
json_payload = {
|
|
316
339
|
"prompt": prompt,
|
|
317
340
|
"images_b64": images_b64,
|
|
318
|
-
"params":
|
|
341
|
+
"params": params
|
|
319
342
|
}
|
|
320
343
|
response = self._post_json_request("/edit_image", data=json_payload)
|
|
321
344
|
return response.content
|
|
@@ -351,4 +374,4 @@ class DiffusersBinding(LollmsTTIBinding):
|
|
|
351
374
|
def __del__(self):
|
|
352
375
|
# The client destructor does not stop the server,
|
|
353
376
|
# as it is a shared resource for all worker processes.
|
|
354
|
-
pass
|
|
377
|
+
pass
|
{lollms_client-1.6.5 → lollms_client-1.6.6}/src/lollms_client/tti_bindings/diffusers/server/main.py
RENAMED
|
@@ -62,7 +62,7 @@ MODELS_PATH = Path("./models")
|
|
|
62
62
|
CIVITAI_MODELS = {
|
|
63
63
|
"realistic-vision-v6": {
|
|
64
64
|
"display_name": "Realistic Vision V6.0", "url": "https://civitai.com/api/download/models/501240?type=Model&format=SafeTensor&size=pruned&fp=fp16",
|
|
65
|
-
"filename": "realisticVisionV60_v60B1.
|
|
65
|
+
"filename": "realisticVisionV60_v60B1.safensors", "description": "Photorealistic SD1.5 checkpoint.", "owned_by": "civitai"
|
|
66
66
|
},
|
|
67
67
|
"absolute-reality": {
|
|
68
68
|
"display_name": "Absolute Reality", "url": "https://civitai.com/api/download/models/132760?type=Model&format=SafeTensor&size=pruned&fp=fp16",
|
|
@@ -122,19 +122,45 @@ CIVITAI_MODELS = {
|
|
|
122
122
|
},
|
|
123
123
|
}
|
|
124
124
|
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
125
|
+
HF_PUBLIC_MODELS = {
|
|
126
|
+
"General Purpose & SDXL": [
|
|
127
|
+
{"model_name": "stabilityai/stable-diffusion-xl-base-1.0", "display_name": "Stable Diffusion XL 1.0", "desc": "Official 1024x1024 text-to-image model from Stability AI."},
|
|
128
|
+
{"model_name": "stabilityai/sdxl-turbo", "display_name": "SDXL Turbo", "desc": "A fast, real-time text-to-image model based on SDXL."},
|
|
129
|
+
{"model_name": "kandinsky-community/kandinsky-3", "display_name": "Kandinsky 3", "desc": "A powerful multilingual model with strong prompt understanding and aesthetic quality."},
|
|
130
|
+
{"model_name": "playgroundai/playground-v2.5-1024px-aesthetic", "display_name": "Playground v2.5", "desc": "A high-quality model focused on aesthetic outputs."},
|
|
131
|
+
],
|
|
132
|
+
"Photorealistic": [
|
|
133
|
+
{"model_name": "emilianJR/epiCRealism", "display_name": "epiCRealism", "desc": "A popular community model for generating photorealistic images."},
|
|
134
|
+
{"model_name": "SG161222/Realistic_Vision_V5.1_noVAE", "display_name": "Realistic Vision 5.1", "desc": "One of the most popular realistic models, great for portraits and scenes."},
|
|
135
|
+
{"model_name": "Photon-v1", "display_name": "Photon", "desc": "A model known for high-quality, realistic images with good lighting and detail."},
|
|
136
|
+
],
|
|
137
|
+
"Anime & Illustration": [
|
|
138
|
+
{"model_name": "hakurei/waifu-diffusion", "display_name": "Waifu Diffusion 1.4", "desc": "A widely-used model for generating high-quality anime-style images."},
|
|
139
|
+
{"model_name": "gsdf/Counterfeit-V3.0", "display_name": "Counterfeit V3.0", "desc": "A strong model for illustrative and 2.5D anime styles."},
|
|
140
|
+
{"model_name": "cagliostrolab/animagine-xl-3.0", "display_name": "Animagine XL 3.0", "desc": "A state-of-the-art anime model on the SDXL architecture."},
|
|
141
|
+
],
|
|
142
|
+
"Artistic & Stylized": [
|
|
143
|
+
{"model_name": "wavymulder/Analog-Diffusion", "display_name": "Analog Diffusion", "desc": "Creates images with a vintage, analog film aesthetic."},
|
|
144
|
+
{"model_name": "dreamlike-art/dreamlike-photoreal-2.0", "display_name": "Dreamlike Photoreal 2.0", "desc": "Produces stunning, artistic, and photorealistic images."},
|
|
145
|
+
],
|
|
146
|
+
"Image Editing Tools": [
|
|
147
|
+
{"model_name": "stabilityai/stable-diffusion-xl-refiner-1.0", "display_name": "SDXL Refiner 1.0", "desc": "A dedicated refiner model to improve details in SDXL generations."},
|
|
148
|
+
{"model_name": "Qwen/Qwen-Image-Edit", "display_name": "Qwen Image Edit", "desc": "An instruction-based model for various image editing tasks."},
|
|
149
|
+
{"model_name": "Qwen/Qwen-Image-Edit-2509", "display_name": "Qwen Image Edit Plus", "desc": "Advanced multi-image editing, fusion, and pose transfer."},
|
|
150
|
+
],
|
|
151
|
+
"Legacy & Base Models": [
|
|
152
|
+
{"model_name": "runwayml/stable-diffusion-v1-5", "display_name": "Stable Diffusion 1.5", "desc": "The classic and versatile SD1.5 base model."},
|
|
153
|
+
{"model_name": "stabilityai/stable-diffusion-2-1", "display_name": "Stable Diffusion 2.1", "desc": "The 768x768 base model from the SD2.x series."},
|
|
154
|
+
]
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
HF_GATED_MODELS = {
|
|
158
|
+
"Next-Generation (Gated Access Required)": [
|
|
159
|
+
{"model_name": "stabilityai/stable-diffusion-3-medium-diffusers", "display_name": "Stable Diffusion 3 Medium", "desc": "State-of-the-art model with advanced prompt understanding. Requires free registration."},
|
|
160
|
+
{"model_name": "black-forest-labs/FLUX.1-schnell", "display_name": "FLUX.1 Schnell", "desc": "A powerful and extremely fast next-generation model. Requires access request."},
|
|
161
|
+
{"model_name": "black-forest-labs/FLUX.1-dev", "display_name": "FLUX.1 Dev", "desc": "The larger developer version of the FLUX.1 model. Requires access request."},
|
|
162
|
+
]
|
|
163
|
+
}
|
|
138
164
|
|
|
139
165
|
|
|
140
166
|
TORCH_DTYPE_MAP_STR_TO_OBJ = {
|
|
@@ -228,9 +254,25 @@ class ModelManager:
|
|
|
228
254
|
if not local_path.exists():
|
|
229
255
|
self._download_civitai_model(model_name)
|
|
230
256
|
return local_path
|
|
257
|
+
|
|
258
|
+
# Search in extra models path
|
|
259
|
+
if state.extra_models_path and state.extra_models_path.exists():
|
|
260
|
+
found_paths = list(state.extra_models_path.rglob(model_name))
|
|
261
|
+
if found_paths:
|
|
262
|
+
ASCIIColors.info(f"Found model in extra path: {found_paths[0]}")
|
|
263
|
+
return found_paths[0]
|
|
264
|
+
|
|
265
|
+
# Search in primary models path
|
|
266
|
+
found_paths = list(self.models_path.rglob(model_name))
|
|
267
|
+
if found_paths:
|
|
268
|
+
ASCIIColors.info(f"Found model in primary path: {found_paths[0]}")
|
|
269
|
+
return found_paths[0]
|
|
270
|
+
|
|
271
|
+
# Fallback for HF hub models that are folders, not single files.
|
|
231
272
|
local_path = self.models_path / model_name
|
|
232
273
|
if local_path.exists():
|
|
233
274
|
return local_path
|
|
275
|
+
|
|
234
276
|
return model_name
|
|
235
277
|
|
|
236
278
|
def _download_civitai_model(self, model_key: str):
|
|
@@ -535,9 +577,12 @@ class PipelineRegistry:
|
|
|
535
577
|
return list(self._managers.values())
|
|
536
578
|
|
|
537
579
|
class ServerState:
|
|
538
|
-
def __init__(self, models_path: Path):
|
|
580
|
+
def __init__(self, models_path: Path, extra_models_path: Optional[Path] = None):
|
|
539
581
|
self.models_path = models_path
|
|
582
|
+
self.extra_models_path = extra_models_path
|
|
540
583
|
self.models_path.mkdir(parents=True, exist_ok=True)
|
|
584
|
+
if self.extra_models_path:
|
|
585
|
+
self.extra_models_path.mkdir(parents=True, exist_ok=True)
|
|
541
586
|
self.config_path = self.models_path.parent / "diffusers_server_config.json"
|
|
542
587
|
self.registry = PipelineRegistry()
|
|
543
588
|
self.manager: Optional[ModelManager] = None
|
|
@@ -802,14 +847,49 @@ async def edit_image(request: EditRequestJSON):
|
|
|
802
847
|
|
|
803
848
|
@router.get("/list_models")
|
|
804
849
|
def list_models_endpoint():
|
|
805
|
-
|
|
806
|
-
|
|
807
|
-
|
|
808
|
-
|
|
850
|
+
huggingface_models = []
|
|
851
|
+
# Add public models, organized by category
|
|
852
|
+
for category, models in HF_PUBLIC_MODELS.items():
|
|
853
|
+
for model_info in models:
|
|
854
|
+
huggingface_models.append({
|
|
855
|
+
'model_name': model_info['model_name'],
|
|
856
|
+
'display_name': model_info['display_name'],
|
|
857
|
+
'description': f"({category}) {model_info['desc']}",
|
|
858
|
+
'owned_by': 'huggingface'
|
|
859
|
+
})
|
|
860
|
+
|
|
861
|
+
# Conditionally add gated models if an HF token is provided in the server config
|
|
862
|
+
if state.config.get("hf_token"):
|
|
863
|
+
ASCIIColors.info("HF token detected, including gated models in the list.")
|
|
864
|
+
for category, models in HF_GATED_MODELS.items():
|
|
865
|
+
for model_info in models:
|
|
866
|
+
huggingface_models.append({
|
|
867
|
+
'model_name': model_info['model_name'],
|
|
868
|
+
'display_name': model_info['display_name'],
|
|
869
|
+
'description': f"({category}) {model_info['desc']}",
|
|
870
|
+
'owned_by': 'huggingface'
|
|
871
|
+
})
|
|
872
|
+
else:
|
|
873
|
+
ASCIIColors.info("No HF token found, showing public models only.")
|
|
874
|
+
|
|
875
|
+
civitai_models = [{'model_name': key, 'display_name': info['display_name'], 'description': f"(Civitai) {info['description']}", 'owned_by': info['owned_by']} for key, info in CIVITAI_MODELS.items()]
|
|
876
|
+
|
|
877
|
+
local_files = list_local_models_endpoint()
|
|
878
|
+
local_models = [{'model_name': filename, 'display_name': Path(filename).stem, 'description': '(Local) Local safetensors file.', 'owned_by': 'local_user'} for filename in local_files]
|
|
879
|
+
|
|
880
|
+
return huggingface_models + civitai_models + local_models
|
|
809
881
|
|
|
810
882
|
@router.get("/list_local_models")
|
|
811
883
|
def list_local_models_endpoint():
|
|
812
|
-
|
|
884
|
+
local_models = set()
|
|
885
|
+
# Main models path
|
|
886
|
+
for f in state.models_path.glob("**/*.safetensors"):
|
|
887
|
+
local_models.add(f.name)
|
|
888
|
+
# Extra models path
|
|
889
|
+
if state.extra_models_path and state.extra_models_path.exists():
|
|
890
|
+
for f in state.extra_models_path.glob("**/*.safetensors"):
|
|
891
|
+
local_models.add(f.name)
|
|
892
|
+
return sorted(list(local_models))
|
|
813
893
|
|
|
814
894
|
@router.get("/list_available_models")
|
|
815
895
|
def list_available_models_endpoint():
|
|
@@ -866,14 +946,18 @@ if __name__ == "__main__":
|
|
|
866
946
|
parser.add_argument("--host", type=str, default="localhost", help="Host to bind to.")
|
|
867
947
|
parser.add_argument("--port", type=int, default=9630, help="Port to bind to.")
|
|
868
948
|
parser.add_argument("--models-path", type=str, required=True, help="Path to the models directory.")
|
|
949
|
+
parser.add_argument("--extra-models-path", type=str, default=None, help="Path to an extra models directory.")
|
|
869
950
|
args = parser.parse_args()
|
|
870
951
|
|
|
871
952
|
MODELS_PATH = Path(args.models_path)
|
|
872
|
-
|
|
953
|
+
EXTRA_MODELS_PATH = Path(args.extra_models_path) if args.extra_models_path else None
|
|
954
|
+
state = ServerState(MODELS_PATH, EXTRA_MODELS_PATH)
|
|
873
955
|
|
|
874
956
|
ASCIIColors.cyan(f"--- Diffusers TTI Server ---")
|
|
875
957
|
ASCIIColors.green(f"Starting server on http://{args.host}:{args.port}")
|
|
876
958
|
ASCIIColors.green(f"Serving models from: {MODELS_PATH.resolve()}")
|
|
959
|
+
if EXTRA_MODELS_PATH:
|
|
960
|
+
ASCIIColors.green(f"Serving extra models from: {EXTRA_MODELS_PATH.resolve()}")
|
|
877
961
|
if not DIFFUSERS_AVAILABLE:
|
|
878
962
|
ASCIIColors.error("Diffusers or its dependencies are not installed correctly in the server's environment!")
|
|
879
963
|
else:
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import requests
|
|
4
|
+
import subprocess
|
|
5
|
+
import time
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Optional, List
|
|
8
|
+
|
|
9
|
+
# Ensure pipmaster is available.
|
|
10
|
+
try:
|
|
11
|
+
import pipmaster as pm
|
|
12
|
+
except ImportError:
|
|
13
|
+
print("FATAL: pipmaster is not installed. Please install it using: pip install pipmaster")
|
|
14
|
+
sys.exit(1)
|
|
15
|
+
|
|
16
|
+
# Ensure filelock is available for process-safe server startup.
|
|
17
|
+
try:
|
|
18
|
+
from filelock import FileLock, Timeout
|
|
19
|
+
except ImportError:
|
|
20
|
+
print("FATAL: The 'filelock' library is required. Please install it by running: pip install filelock")
|
|
21
|
+
sys.exit(1)
|
|
22
|
+
|
|
23
|
+
from lollms_client.lollms_tts_binding import LollmsTTSBinding
|
|
24
|
+
from ascii_colors import ASCIIColors
|
|
25
|
+
|
|
26
|
+
BindingName = "XTTSClientBinding"
|
|
27
|
+
|
|
28
|
+
class XTTSClientBinding(LollmsTTSBinding):
|
|
29
|
+
"""
|
|
30
|
+
Client binding for a dedicated, managed XTTS server.
|
|
31
|
+
This architecture prevents the heavy XTTS model from being loaded into memory
|
|
32
|
+
by multiple worker processes, solving potential OOM errors and speeding up TTS generation.
|
|
33
|
+
"""
|
|
34
|
+
def __init__(self,
|
|
35
|
+
**kwargs):
|
|
36
|
+
|
|
37
|
+
binding_name = "xtts"
|
|
38
|
+
super().__init__(binding_name=binding_name, **kwargs)
|
|
39
|
+
|
|
40
|
+
self.config = kwargs
|
|
41
|
+
self.host = kwargs.get("host", "localhost")
|
|
42
|
+
self.port = kwargs.get("port", 8081)
|
|
43
|
+
self.auto_start_server = kwargs.get("auto_start_server", True)
|
|
44
|
+
self.server_process = None
|
|
45
|
+
self.base_url = f"http://{self.host}:{self.port}"
|
|
46
|
+
self.binding_root = Path(__file__).parent
|
|
47
|
+
self.server_dir = self.binding_root / "server"
|
|
48
|
+
self.venv_dir = Path("./venv/tts_xtts_venv")
|
|
49
|
+
|
|
50
|
+
if self.auto_start_server:
|
|
51
|
+
self.ensure_server_is_running()
|
|
52
|
+
|
|
53
|
+
def is_server_running(self) -> bool:
|
|
54
|
+
"""Checks if the server is already running and responsive."""
|
|
55
|
+
try:
|
|
56
|
+
response = requests.get(f"{self.base_url}/status", timeout=2)
|
|
57
|
+
if response.status_code == 200 and response.json().get("status") == "running":
|
|
58
|
+
return True
|
|
59
|
+
except requests.exceptions.RequestException:
|
|
60
|
+
return False
|
|
61
|
+
return False
|
|
62
|
+
|
|
63
|
+
def ensure_server_is_running(self):
|
|
64
|
+
"""
|
|
65
|
+
Ensures the XTTS server is running. If not, it attempts to start it
|
|
66
|
+
in a process-safe manner using a file lock.
|
|
67
|
+
"""
|
|
68
|
+
self.server_dir.mkdir(exist_ok=True)
|
|
69
|
+
lock_path = self.server_dir / "xtts_server.lock"
|
|
70
|
+
lock = FileLock(lock_path)
|
|
71
|
+
|
|
72
|
+
ASCIIColors.info("Attempting to start or connect to the XTTS server...")
|
|
73
|
+
|
|
74
|
+
if self.is_server_running():
|
|
75
|
+
ASCIIColors.green("XTTS Server is already running and responsive.")
|
|
76
|
+
return
|
|
77
|
+
|
|
78
|
+
try:
|
|
79
|
+
with lock.acquire(timeout=60):
|
|
80
|
+
if not self.is_server_running():
|
|
81
|
+
ASCIIColors.yellow("Lock acquired. Starting dedicated XTTS server...")
|
|
82
|
+
self.start_server()
|
|
83
|
+
self._wait_for_server()
|
|
84
|
+
else:
|
|
85
|
+
ASCIIColors.green("Server was started by another process while we waited. Connected successfully.")
|
|
86
|
+
except Timeout:
|
|
87
|
+
ASCIIColors.yellow("Could not acquire lock, another process is starting the server. Waiting...")
|
|
88
|
+
self._wait_for_server(timeout=180)
|
|
89
|
+
|
|
90
|
+
if not self.is_server_running():
|
|
91
|
+
raise RuntimeError("Failed to start or connect to the XTTS server after all attempts.")
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def install_server_dependencies(self):
|
|
95
|
+
"""
|
|
96
|
+
Installs the server's dependencies into a dedicated virtual environment
|
|
97
|
+
using pipmaster, which handles complex packages like PyTorch.
|
|
98
|
+
"""
|
|
99
|
+
ASCIIColors.info(f"Setting up virtual environment in: {self.venv_dir}")
|
|
100
|
+
pm_v = pm.PackageManager(venv_path=str(self.venv_dir))
|
|
101
|
+
|
|
102
|
+
requirements_file = self.server_dir / "requirements.txt"
|
|
103
|
+
|
|
104
|
+
ASCIIColors.info("Installing server dependencies from requirements.txt...")
|
|
105
|
+
success = pm_v.ensure_requirements(str(requirements_file), verbose=True)
|
|
106
|
+
|
|
107
|
+
if not success:
|
|
108
|
+
ASCIIColors.error("Failed to install server dependencies. Please check the console output for errors.")
|
|
109
|
+
raise RuntimeError("XTTS server dependency installation failed.")
|
|
110
|
+
|
|
111
|
+
ASCIIColors.green("Server dependencies are satisfied.")
|
|
112
|
+
|
|
113
|
+
|
|
114
|
+
def start_server(self):
|
|
115
|
+
"""
|
|
116
|
+
Installs dependencies and launches the FastAPI server as a background subprocess.
|
|
117
|
+
This method should only be called from within a file lock.
|
|
118
|
+
"""
|
|
119
|
+
server_script = self.server_dir / "main.py"
|
|
120
|
+
if not server_script.exists():
|
|
121
|
+
raise FileNotFoundError(f"Server script not found at {server_script}.")
|
|
122
|
+
|
|
123
|
+
if not self.venv_dir.exists():
|
|
124
|
+
self.install_server_dependencies()
|
|
125
|
+
|
|
126
|
+
if sys.platform == "win32":
|
|
127
|
+
python_executable = self.venv_dir / "Scripts" / "python.exe"
|
|
128
|
+
else:
|
|
129
|
+
python_executable = self.venv_dir / "bin" / "python"
|
|
130
|
+
|
|
131
|
+
command = [
|
|
132
|
+
str(python_executable),
|
|
133
|
+
str(server_script),
|
|
134
|
+
"--host", self.host,
|
|
135
|
+
"--port", str(self.port)
|
|
136
|
+
]
|
|
137
|
+
|
|
138
|
+
# Use DETACHED_PROCESS on Windows to allow the server to run independently.
|
|
139
|
+
creationflags = subprocess.DETACHED_PROCESS if sys.platform == "win32" else 0
|
|
140
|
+
|
|
141
|
+
self.server_process = subprocess.Popen(command, creationflags=creationflags)
|
|
142
|
+
ASCIIColors.info("XTTS server process launched in the background.")
|
|
143
|
+
|
|
144
|
+
def _wait_for_server(self, timeout=120):
|
|
145
|
+
"""Waits for the server to become responsive."""
|
|
146
|
+
ASCIIColors.info("Waiting for XTTS server to become available...")
|
|
147
|
+
start_time = time.time()
|
|
148
|
+
while time.time() - start_time < timeout:
|
|
149
|
+
if self.is_server_running():
|
|
150
|
+
ASCIIColors.green("XTTS Server is up and running.")
|
|
151
|
+
return
|
|
152
|
+
time.sleep(2)
|
|
153
|
+
raise RuntimeError("Failed to connect to the XTTS server within the specified timeout.")
|
|
154
|
+
|
|
155
|
+
def __del__(self):
|
|
156
|
+
# The client destructor does not stop the server,
|
|
157
|
+
# as it is a shared resource for other processes.
|
|
158
|
+
pass
|
|
159
|
+
|
|
160
|
+
def generate_audio(self, text: str, voice: Optional[str] = None, **kwargs) -> bytes:
|
|
161
|
+
"""Generate audio by calling the server's API"""
|
|
162
|
+
payload = {"text": text, "voice": voice}
|
|
163
|
+
# Pass other kwargs from the description file (language, split_sentences)
|
|
164
|
+
payload.update(kwargs)
|
|
165
|
+
|
|
166
|
+
try:
|
|
167
|
+
response = requests.post(f"{self.base_url}/generate_audio", json=payload, timeout=300)
|
|
168
|
+
response.raise_for_status()
|
|
169
|
+
return response.content
|
|
170
|
+
except requests.exceptions.RequestException as e:
|
|
171
|
+
ASCIIColors.error(f"Failed to communicate with XTTS server at {self.base_url}.")
|
|
172
|
+
ASCIIColors.error(f"Error details: {e}")
|
|
173
|
+
raise RuntimeError("Communication with the XTTS server failed.") from e
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
def list_voices(self, **kwargs) -> List[str]:
|
|
177
|
+
"""Get available voices from the server"""
|
|
178
|
+
try:
|
|
179
|
+
response = requests.get(f"{self.base_url}/list_voices")
|
|
180
|
+
response.raise_for_status()
|
|
181
|
+
return response.json().get("voices", [])
|
|
182
|
+
except requests.exceptions.RequestException as e:
|
|
183
|
+
ASCIIColors.error(f"Failed to get voices from XTTS server: {e}")
|
|
184
|
+
return []
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def list_models(self, **kwargs) -> list:
|
|
188
|
+
"""Lists models supported by the server"""
|
|
189
|
+
try:
|
|
190
|
+
response = requests.get(f"{self.base_url}/list_models")
|
|
191
|
+
response.raise_for_status()
|
|
192
|
+
return response.json().get("models", [])
|
|
193
|
+
except requests.exceptions.RequestException as e:
|
|
194
|
+
ASCIIColors.error(f"Failed to get models from XTTS server: {e}")
|
|
195
|
+
return []
|