lollms-client 1.6.1__tar.gz → 1.6.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- {lollms_client-1.6.1/src/lollms_client.egg-info → lollms_client-1.6.2}/PKG-INFO +5 -2
- {lollms_client-1.6.1 → lollms_client-1.6.2}/README.md +5 -2
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/__init__.py +1 -1
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_core.py +15 -10
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_tts_binding.py +15 -13
- lollms_client-1.6.2/src/lollms_client/tts_bindings/xtts/__init__.py +164 -0
- lollms_client-1.6.2/src/lollms_client/tts_bindings/xtts/server/main.py +317 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2/src/lollms_client.egg-info}/PKG-INFO +5 -2
- lollms_client-1.6.1/src/lollms_client/tts_bindings/xtts/__init__.py +0 -111
- lollms_client-1.6.1/src/lollms_client/tts_bindings/xtts/server/main.py +0 -314
- {lollms_client-1.6.1 → lollms_client-1.6.2}/LICENSE +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/pyproject.toml +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/setup.cfg +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/assets/models_ctx_sizes.json +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/azure_openai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/claude/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/gemini/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/grok/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/groq/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/litellm/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/llamacpp/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/lollms_webui/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/mistral/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/novita_ai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/ollama/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/open_router/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/openai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/openllm/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/openwebui/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/perplexity/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/pythonllamacpp/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/tensor_rt/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/transformers/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/llm_bindings/vllm/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_agentic.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_config.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_discussion.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_js_analyzer.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_llm_binding.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_mcp_binding.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_mcp_security.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_personality.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_python_analyzer.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_stt_binding.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_tti_binding.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_ttm_binding.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_ttv_binding.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_types.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/lollms_utilities.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/mcp_bindings/local_mcp/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/mcp_bindings/local_mcp/default_tools/file_writer/file_writer.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/mcp_bindings/local_mcp/default_tools/generate_image_from_prompt/generate_image_from_prompt.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/mcp_bindings/local_mcp/default_tools/internet_search/internet_search.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/mcp_bindings/local_mcp/default_tools/python_interpreter/python_interpreter.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/mcp_bindings/remote_mcp/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/mcp_bindings/standard_mcp/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/stt_bindings/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/stt_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/stt_bindings/whisper/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/stt_bindings/whispercpp/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tti_bindings/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tti_bindings/diffusers/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tti_bindings/gemini/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tti_bindings/leonardo_ai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tti_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tti_bindings/novita_ai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tti_bindings/openai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tti_bindings/stability_ai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/ttm_bindings/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/ttm_bindings/audiocraft/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/ttm_bindings/beatoven_ai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/ttm_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/ttm_bindings/replicate/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/ttm_bindings/stability_ai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/ttm_bindings/topmediai/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/bark/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/bark/server/install_bark.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/bark/server/main.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/piper_tts/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/piper_tts/server/install_piper.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/piper_tts/server/main.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/piper_tts/server/setup_voices.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/tts_bindings/xtts/server/setup_voices.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/ttv_bindings/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client/ttv_bindings/lollms/__init__.py +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client.egg-info/SOURCES.txt +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client.egg-info/dependency_links.txt +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client.egg-info/requires.txt +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/src/lollms_client.egg-info/top_level.txt +0 -0
- {lollms_client-1.6.1 → lollms_client-1.6.2}/test/test_lollms_discussion.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 1.6.
|
|
3
|
+
Version: 1.6.2
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -1302,6 +1302,7 @@ try:
|
|
|
1302
1302
|
except Exception as e:
|
|
1303
1303
|
ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
|
|
1304
1304
|
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")```
|
|
1305
|
+
```
|
|
1305
1306
|
|
|
1306
1307
|
---
|
|
1307
1308
|
|
|
@@ -1403,7 +1404,9 @@ else:
|
|
|
1403
1404
|
|
|
1404
1405
|
except Exception as e:
|
|
1405
1406
|
ASCIIColors.error(f"An error occurred during multi-image fusion: {e}")
|
|
1406
|
-
```
|
|
1407
|
+
```
|
|
1408
|
+
|
|
1409
|
+
This powerful feature allows for complex creative tasks like character swapping, background replacement, and style transfer directly through the `lollms_client` library.
|
|
1407
1410
|
|
|
1408
1411
|
### Listing Available Models
|
|
1409
1412
|
|
|
@@ -1069,6 +1069,7 @@ try:
|
|
|
1069
1069
|
except Exception as e:
|
|
1070
1070
|
ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
|
|
1071
1071
|
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")```
|
|
1072
|
+
```
|
|
1072
1073
|
|
|
1073
1074
|
---
|
|
1074
1075
|
|
|
@@ -1170,7 +1171,9 @@ else:
|
|
|
1170
1171
|
|
|
1171
1172
|
except Exception as e:
|
|
1172
1173
|
ASCIIColors.error(f"An error occurred during multi-image fusion: {e}")
|
|
1173
|
-
```
|
|
1174
|
+
```
|
|
1175
|
+
|
|
1176
|
+
This powerful feature allows for complex creative tasks like character swapping, background replacement, and style transfer directly through the `lollms_client` library.
|
|
1174
1177
|
|
|
1175
1178
|
### Listing Available Models
|
|
1176
1179
|
|
|
@@ -1282,4 +1285,4 @@ This project is licensed under the **Apache 2.0 License**. See the [LICENSE](LIC
|
|
|
1282
1285
|
|
|
1283
1286
|
## Changelog
|
|
1284
1287
|
|
|
1285
|
-
For a list of changes and updates, please refer to the [CHANGELOG.md](CHANGELOG.md) file.
|
|
1288
|
+
For a list of changes and updates, please refer to the [CHANGELOG.md](CHANGELOG.md) file.
|
|
@@ -8,7 +8,7 @@ from lollms_client.lollms_utilities import PromptReshaper # Keep general utiliti
|
|
|
8
8
|
from lollms_client.lollms_mcp_binding import LollmsMCPBinding, LollmsMCPBindingManager
|
|
9
9
|
from lollms_client.lollms_llm_binding import LollmsLLMBindingManager
|
|
10
10
|
|
|
11
|
-
__version__ = "1.6.
|
|
11
|
+
__version__ = "1.6.2" # Updated version
|
|
12
12
|
|
|
13
13
|
# Optionally, you could define __all__ if you want to be explicit about exports
|
|
14
14
|
__all__ = [
|
|
@@ -143,16 +143,21 @@ class LollmsClient():
|
|
|
143
143
|
ASCIIColors.warning(f"Failed to create LLM binding: {llm_binding_name}. Available: {available}")
|
|
144
144
|
|
|
145
145
|
if tts_binding_name:
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
146
|
+
try:
|
|
147
|
+
params = {
|
|
148
|
+
k: v
|
|
149
|
+
for k, v in (tts_binding_config or {}).items()
|
|
150
|
+
if k != "binding_name"
|
|
151
|
+
}
|
|
152
|
+
self.tts = self.tts_binding_manager.create_binding(
|
|
153
|
+
binding_name=tts_binding_name,
|
|
154
|
+
**params
|
|
155
|
+
)
|
|
156
|
+
if self.tts is None:
|
|
157
|
+
ASCIIColors.warning(f"Failed to create TTS binding: {tts_binding_name}. Available: {self.tts_binding_manager.get_available_bindings()}")
|
|
158
|
+
except Exception as e:
|
|
159
|
+
trace_exception(e)
|
|
160
|
+
ASCIIColors.warning(f"Exception occurred while creating TTS binding: {str(e)}")
|
|
156
161
|
|
|
157
162
|
if tti_binding_name:
|
|
158
163
|
if tti_binding_config:
|
|
@@ -49,26 +49,28 @@ class LollmsTTSBindingManager:
|
|
|
49
49
|
except Exception as e:
|
|
50
50
|
trace_exception(e)
|
|
51
51
|
print(f"Failed to load TTS binding {binding_name}: {str(e)}")
|
|
52
|
-
|
|
53
|
-
def create_binding(self,
|
|
52
|
+
def create_binding(self,
|
|
54
53
|
binding_name: str,
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
54
|
+
**kwargs) -> Optional[LollmsTTSBinding]:
|
|
55
|
+
"""
|
|
56
|
+
Create an instance of a specific binding.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
binding_name (str): Name of the binding to create.
|
|
60
|
+
kwargs: binding specific arguments
|
|
61
|
+
|
|
62
|
+
Returns:
|
|
63
|
+
Optional[LollmsLLMBinding]: Binding instance or None if creation failed.
|
|
64
|
+
"""
|
|
59
65
|
if binding_name not in self.available_bindings:
|
|
60
66
|
self._load_binding(binding_name)
|
|
61
|
-
|
|
67
|
+
|
|
62
68
|
binding_class = self.available_bindings.get(binding_name)
|
|
63
69
|
if binding_class:
|
|
64
|
-
|
|
65
|
-
return binding_class(**config)
|
|
66
|
-
except Exception as e:
|
|
67
|
-
trace_exception(e)
|
|
68
|
-
print(f"Failed to instantiate TTS binding {binding_name}: {str(e)}")
|
|
69
|
-
return None
|
|
70
|
+
return binding_class(**kwargs)
|
|
70
71
|
return None
|
|
71
72
|
|
|
73
|
+
|
|
72
74
|
@staticmethod
|
|
73
75
|
def _get_fallback_description(binding_name: str) -> Dict:
|
|
74
76
|
return {
|
|
@@ -0,0 +1,164 @@
|
|
|
1
|
+
from lollms_client.lollms_tts_binding import LollmsTTSBinding
|
|
2
|
+
from typing import Optional, List
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import requests
|
|
5
|
+
import subprocess
|
|
6
|
+
import sys
|
|
7
|
+
import time
|
|
8
|
+
import pipmaster as pm
|
|
9
|
+
|
|
10
|
+
# New import for process-safe file locking
|
|
11
|
+
try:
|
|
12
|
+
from filelock import FileLock, Timeout
|
|
13
|
+
except ImportError:
|
|
14
|
+
print("FATAL: The 'filelock' library is required. Please install it by running: pip install filelock")
|
|
15
|
+
sys.exit(1)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
BindingName = "XTTSClientBinding"
|
|
19
|
+
|
|
20
|
+
class XTTSClientBinding(LollmsTTSBinding):
|
|
21
|
+
def __init__(self,
|
|
22
|
+
host: str = "localhost",
|
|
23
|
+
port: int = 8081,
|
|
24
|
+
auto_start_server: bool = True,
|
|
25
|
+
**kwargs):
|
|
26
|
+
|
|
27
|
+
binding_name = "xtts"
|
|
28
|
+
super().__init__(binding_name=binding_name, **kwargs)
|
|
29
|
+
self.host = host
|
|
30
|
+
self.port = port
|
|
31
|
+
self.auto_start_server = auto_start_server
|
|
32
|
+
self.server_process = None
|
|
33
|
+
self.base_url = f"http://{self.host}:{self.port}"
|
|
34
|
+
self.binding_root = Path(__file__).parent
|
|
35
|
+
self.server_dir = self.binding_root / "server"
|
|
36
|
+
|
|
37
|
+
if self.auto_start_server:
|
|
38
|
+
self.ensure_server_is_running()
|
|
39
|
+
|
|
40
|
+
def is_server_running(self) -> bool:
|
|
41
|
+
"""Checks if the server is already running and responsive."""
|
|
42
|
+
try:
|
|
43
|
+
response = requests.get(f"{self.base_url}/status", timeout=1)
|
|
44
|
+
if response.status_code == 200 and response.json().get("status") == "running":
|
|
45
|
+
return True
|
|
46
|
+
except requests.ConnectionError:
|
|
47
|
+
return False
|
|
48
|
+
return False
|
|
49
|
+
|
|
50
|
+
def ensure_server_is_running(self):
|
|
51
|
+
"""
|
|
52
|
+
Ensures the XTTS server is running. If not, it attempts to start it
|
|
53
|
+
in a process-safe manner using a file lock.
|
|
54
|
+
"""
|
|
55
|
+
if self.is_server_running():
|
|
56
|
+
print("XTTS Server is already running.")
|
|
57
|
+
return
|
|
58
|
+
|
|
59
|
+
lock_path = self.server_dir / "xtts_server.lock"
|
|
60
|
+
lock = FileLock(lock_path, timeout=10) # Wait a maximum of 10 seconds for the lock
|
|
61
|
+
|
|
62
|
+
print("Attempting to start or wait for the XTTS server...")
|
|
63
|
+
try:
|
|
64
|
+
with lock:
|
|
65
|
+
# Double-check after acquiring the lock to handle race conditions
|
|
66
|
+
if not self.is_server_running():
|
|
67
|
+
print("Lock acquired. Starting dedicated XTTS server...")
|
|
68
|
+
self.start_server()
|
|
69
|
+
else:
|
|
70
|
+
print("Server was started by another process while waiting for the lock.")
|
|
71
|
+
except Timeout:
|
|
72
|
+
print("Could not acquire lock. Another process is likely starting the server. Waiting...")
|
|
73
|
+
|
|
74
|
+
# All workers (the one that started the server and those that waited) will verify the server is ready
|
|
75
|
+
self._wait_for_server()
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def start_server(self):
|
|
79
|
+
"""
|
|
80
|
+
Installs dependencies and launches the server as a background subprocess.
|
|
81
|
+
This method should only be called from within a file lock.
|
|
82
|
+
"""
|
|
83
|
+
requirements_file = self.server_dir / "requirements.txt"
|
|
84
|
+
server_script = self.server_dir / "main.py"
|
|
85
|
+
|
|
86
|
+
# 1. Ensure a virtual environment and dependencies
|
|
87
|
+
venv_path = self.server_dir / "venv"
|
|
88
|
+
print(f"Ensuring virtual environment and dependencies in: {venv_path}")
|
|
89
|
+
pm_v = pm.PackageManager(venv_path=str(venv_path))
|
|
90
|
+
|
|
91
|
+
success = pm_v.ensure_requirements(
|
|
92
|
+
str(requirements_file),
|
|
93
|
+
verbose=True
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
if not success:
|
|
97
|
+
print("FATAL: Failed to install server dependencies. Aborting launch.")
|
|
98
|
+
return
|
|
99
|
+
|
|
100
|
+
print("Dependencies are satisfied. Proceeding to launch server...")
|
|
101
|
+
|
|
102
|
+
# 2. Get the python executable from the venv
|
|
103
|
+
if sys.platform == "win32":
|
|
104
|
+
python_executable = venv_path / "Scripts" / "python.exe"
|
|
105
|
+
else:
|
|
106
|
+
python_executable = venv_path / "bin" / "python"
|
|
107
|
+
|
|
108
|
+
# 3. Launch the server as a detached subprocess
|
|
109
|
+
command = [
|
|
110
|
+
str(python_executable),
|
|
111
|
+
str(server_script),
|
|
112
|
+
"--host", self.host,
|
|
113
|
+
"--port", str(self.port)
|
|
114
|
+
]
|
|
115
|
+
|
|
116
|
+
# The server is started as a background process and is not tied to this specific worker's lifecycle
|
|
117
|
+
subprocess.Popen(command)
|
|
118
|
+
print("XTTS Server process launched in the background.")
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _wait_for_server(self, timeout=60):
|
|
122
|
+
print("Waiting for XTTS server to become available...")
|
|
123
|
+
start_time = time.time()
|
|
124
|
+
while time.time() - start_time < timeout:
|
|
125
|
+
if self.is_server_running():
|
|
126
|
+
print("XTTS Server is up and running.")
|
|
127
|
+
return
|
|
128
|
+
time.sleep(1)
|
|
129
|
+
|
|
130
|
+
raise RuntimeError("Failed to connect to the XTTS server within the specified timeout.")
|
|
131
|
+
|
|
132
|
+
def stop_server(self):
|
|
133
|
+
"""
|
|
134
|
+
In a multi-worker setup, a single client instance should not stop the shared server.
|
|
135
|
+
The server will continue running until the main application is terminated.
|
|
136
|
+
"""
|
|
137
|
+
if self.server_process:
|
|
138
|
+
print("XTTS Client: An instance is shutting down, but the shared server will remain active for other workers.")
|
|
139
|
+
self.server_process = None
|
|
140
|
+
|
|
141
|
+
def __del__(self):
|
|
142
|
+
"""
|
|
143
|
+
The destructor does not stop the server to prevent disrupting other workers.
|
|
144
|
+
"""
|
|
145
|
+
pass
|
|
146
|
+
|
|
147
|
+
def generate_audio(self, text: str, voice: Optional[str] = None, **kwargs) -> bytes:
|
|
148
|
+
"""Generate audio by calling the server's API"""
|
|
149
|
+
payload = {"text": text, "voice": voice, **kwargs}
|
|
150
|
+
response = requests.post(f"{self.base_url}/generate_audio", json=payload)
|
|
151
|
+
response.raise_for_status()
|
|
152
|
+
return response.content
|
|
153
|
+
|
|
154
|
+
def list_voices(self, **kwargs) -> List[str]:
|
|
155
|
+
"""Get available voices from the server"""
|
|
156
|
+
response = requests.get(f"{self.base_url}/list_voices")
|
|
157
|
+
response.raise_for_status()
|
|
158
|
+
return response.json().get("voices", [])
|
|
159
|
+
|
|
160
|
+
def list_models(self, **kwargs) -> List[str]:
|
|
161
|
+
"""Get available models from the server"""
|
|
162
|
+
response = requests.get(f"{self.base_url}/list_models")
|
|
163
|
+
response.raise_for_status()
|
|
164
|
+
return response.json().get("models", [])
|
|
@@ -0,0 +1,317 @@
|
|
|
1
|
+
try:
|
|
2
|
+
import uvicorn
|
|
3
|
+
from fastapi import FastAPI, APIRouter, HTTPException
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
import argparse
|
|
6
|
+
import sys
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
import asyncio
|
|
9
|
+
import traceback
|
|
10
|
+
import os
|
|
11
|
+
from typing import Optional, List
|
|
12
|
+
import io
|
|
13
|
+
import wave
|
|
14
|
+
import numpy as np
|
|
15
|
+
import tempfile
|
|
16
|
+
|
|
17
|
+
# --- XTTS Implementation ---
|
|
18
|
+
try:
|
|
19
|
+
print("Server: Loading XTTS dependencies...")
|
|
20
|
+
import torch
|
|
21
|
+
import torchaudio
|
|
22
|
+
from TTS.api import TTS
|
|
23
|
+
print("Server: XTTS dependencies loaded successfully")
|
|
24
|
+
|
|
25
|
+
# Check for CUDA availability
|
|
26
|
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
27
|
+
print(f"Server: Using device: {device}")
|
|
28
|
+
|
|
29
|
+
xtts_available = True
|
|
30
|
+
|
|
31
|
+
except Exception as e:
|
|
32
|
+
print(f"Server: Failed to load XTTS dependencies: {e}")
|
|
33
|
+
print(f"Server: Traceback:\n{traceback.format_exc()}")
|
|
34
|
+
xtts_available = False
|
|
35
|
+
|
|
36
|
+
# --- API Models ---
|
|
37
|
+
class GenerationRequest(BaseModel):
|
|
38
|
+
text: str
|
|
39
|
+
voice: Optional[str] = None
|
|
40
|
+
language: Optional[str] = "en"
|
|
41
|
+
speaker_wav: Optional[str] = None
|
|
42
|
+
|
|
43
|
+
class XTTSServer:
|
|
44
|
+
def __init__(self):
|
|
45
|
+
self.model = None
|
|
46
|
+
self.model_loaded = False
|
|
47
|
+
self.model_loading = False # Flag to prevent concurrent loading
|
|
48
|
+
self.available_voices = self._load_available_voices()
|
|
49
|
+
self.available_models = ["xtts_v2"]
|
|
50
|
+
|
|
51
|
+
# Don't initialize model here - do it lazily on first request
|
|
52
|
+
print("Server: XTTS server initialized (model will be loaded on first request)")
|
|
53
|
+
|
|
54
|
+
async def _ensure_model_loaded(self):
|
|
55
|
+
"""Ensure the XTTS model is loaded (lazy loading)"""
|
|
56
|
+
if self.model_loaded:
|
|
57
|
+
return
|
|
58
|
+
|
|
59
|
+
if self.model_loading:
|
|
60
|
+
# Another request is already loading the model, wait for it
|
|
61
|
+
while self.model_loading and not self.model_loaded:
|
|
62
|
+
await asyncio.sleep(0.1)
|
|
63
|
+
return
|
|
64
|
+
|
|
65
|
+
if not xtts_available:
|
|
66
|
+
raise RuntimeError("XTTS library not available")
|
|
67
|
+
|
|
68
|
+
try:
|
|
69
|
+
self.model_loading = True
|
|
70
|
+
print("Server: Loading XTTS model for the first time (this may take a few minutes)...")
|
|
71
|
+
|
|
72
|
+
# Initialize XTTS model
|
|
73
|
+
self.model = TTS("tts_models/multilingual/multi-dataset/xtts_v2").to(device)
|
|
74
|
+
|
|
75
|
+
self.model_loaded = True
|
|
76
|
+
print("Server: XTTS model loaded successfully")
|
|
77
|
+
|
|
78
|
+
except Exception as e:
|
|
79
|
+
print(f"Server: Error loading XTTS model: {e}")
|
|
80
|
+
print(f"Server: Traceback:\n{traceback.format_exc()}")
|
|
81
|
+
self.model_loaded = False
|
|
82
|
+
raise
|
|
83
|
+
finally:
|
|
84
|
+
self.model_loading = False
|
|
85
|
+
|
|
86
|
+
def _load_available_voices(self) -> List[str]:
|
|
87
|
+
"""Load and return available voices"""
|
|
88
|
+
try:
|
|
89
|
+
# Look for voice files in voices directory
|
|
90
|
+
voices_dir = Path(__file__).parent / "voices"
|
|
91
|
+
voices = []
|
|
92
|
+
|
|
93
|
+
if voices_dir.exists():
|
|
94
|
+
# Look for WAV files in voices directory
|
|
95
|
+
for voice_file in voices_dir.glob("*.wav"):
|
|
96
|
+
voices.append(voice_file.stem)
|
|
97
|
+
|
|
98
|
+
# If no custom voices found, provide some default names
|
|
99
|
+
if not voices:
|
|
100
|
+
voices = ["default", "female", "male"]
|
|
101
|
+
|
|
102
|
+
return voices
|
|
103
|
+
|
|
104
|
+
except Exception as e:
|
|
105
|
+
print(f"Server: Error loading voices: {e}")
|
|
106
|
+
return ["default"]
|
|
107
|
+
|
|
108
|
+
async def generate_audio(self, text: str, voice: Optional[str] = None,
|
|
109
|
+
language: str = "en", speaker_wav: Optional[str] = None) -> bytes:
|
|
110
|
+
"""Generate audio from text using XTTS"""
|
|
111
|
+
# Ensure model is loaded before proceeding
|
|
112
|
+
await self._ensure_model_loaded()
|
|
113
|
+
|
|
114
|
+
if not self.model_loaded or self.model is None:
|
|
115
|
+
raise RuntimeError("XTTS model failed to load")
|
|
116
|
+
|
|
117
|
+
try:
|
|
118
|
+
print(f"Server: Generating audio for: '{text[:50]}{'...' if len(text) > 50 else ''}'")
|
|
119
|
+
print(f"Server: Using voice: {voice}, language: {language}")
|
|
120
|
+
|
|
121
|
+
# Handle voice/speaker selection
|
|
122
|
+
speaker_wav_path = None
|
|
123
|
+
|
|
124
|
+
# First priority: use provided speaker_wav parameter
|
|
125
|
+
if speaker_wav:
|
|
126
|
+
speaker_wav_path = speaker_wav
|
|
127
|
+
print(f"Server: Using provided speaker_wav: {speaker_wav_path}")
|
|
128
|
+
|
|
129
|
+
# Second priority: check if voice parameter is a file path
|
|
130
|
+
elif voice and voice != "default":
|
|
131
|
+
if os.path.exists(voice):
|
|
132
|
+
# Voice parameter is a full file path
|
|
133
|
+
speaker_wav_path = voice
|
|
134
|
+
print(f"Server: Using voice as file path: {speaker_wav_path}")
|
|
135
|
+
else:
|
|
136
|
+
# Look for voice file in voices directory
|
|
137
|
+
voices_dir = Path(__file__).parent / "voices"
|
|
138
|
+
potential_voice_path = voices_dir / f"{voice}.wav"
|
|
139
|
+
if potential_voice_path.exists():
|
|
140
|
+
speaker_wav_path = str(potential_voice_path)
|
|
141
|
+
print(f"Server: Using custom voice file: {speaker_wav_path}")
|
|
142
|
+
else:
|
|
143
|
+
print(f"Server: Voice '{voice}' not found in voices directory")
|
|
144
|
+
|
|
145
|
+
# Create a temporary file for output
|
|
146
|
+
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_file:
|
|
147
|
+
temp_output_path = temp_file.name
|
|
148
|
+
|
|
149
|
+
try:
|
|
150
|
+
# Generate audio using XTTS
|
|
151
|
+
if speaker_wav_path and os.path.exists(speaker_wav_path):
|
|
152
|
+
print(f"Server: Generating with speaker reference: {speaker_wav_path}")
|
|
153
|
+
self.model.tts_to_file(
|
|
154
|
+
text=text,
|
|
155
|
+
speaker_wav=speaker_wav_path,
|
|
156
|
+
language=language,
|
|
157
|
+
file_path=temp_output_path
|
|
158
|
+
)
|
|
159
|
+
else:
|
|
160
|
+
print("Server: No valid speaker reference found, trying default")
|
|
161
|
+
# For XTTS without speaker reference, try to find a default
|
|
162
|
+
default_speaker = self._get_default_speaker_file()
|
|
163
|
+
if default_speaker and os.path.exists(default_speaker):
|
|
164
|
+
print(f"Server: Using default speaker: {default_speaker}")
|
|
165
|
+
self.model.tts_to_file(
|
|
166
|
+
text=text,
|
|
167
|
+
speaker_wav=default_speaker,
|
|
168
|
+
language=language,
|
|
169
|
+
file_path=temp_output_path
|
|
170
|
+
)
|
|
171
|
+
else:
|
|
172
|
+
# Create a more helpful error message
|
|
173
|
+
available_voices = self._get_all_available_voice_files()
|
|
174
|
+
error_msg = f"No speaker reference available. XTTS requires a speaker reference file.\n"
|
|
175
|
+
error_msg += f"Attempted to use: {speaker_wav_path if speaker_wav_path else 'None'}\n"
|
|
176
|
+
error_msg += f"Available voice files: {available_voices}"
|
|
177
|
+
raise RuntimeError(error_msg)
|
|
178
|
+
|
|
179
|
+
# Read the generated audio file
|
|
180
|
+
with open(temp_output_path, 'rb') as f:
|
|
181
|
+
audio_bytes = f.read()
|
|
182
|
+
|
|
183
|
+
print(f"Server: Generated {len(audio_bytes)} bytes of audio")
|
|
184
|
+
return audio_bytes
|
|
185
|
+
|
|
186
|
+
finally:
|
|
187
|
+
# Clean up temporary file
|
|
188
|
+
if os.path.exists(temp_output_path):
|
|
189
|
+
os.unlink(temp_output_path)
|
|
190
|
+
|
|
191
|
+
except Exception as e:
|
|
192
|
+
print(f"Server: Error generating audio: {e}")
|
|
193
|
+
print(f"Server: Traceback:\n{traceback.format_exc()}")
|
|
194
|
+
raise
|
|
195
|
+
|
|
196
|
+
def _get_all_available_voice_files(self) -> List[str]:
|
|
197
|
+
"""Get list of all available voice files for debugging"""
|
|
198
|
+
voices_dir = Path(__file__).parent / "voices"
|
|
199
|
+
voice_files = []
|
|
200
|
+
|
|
201
|
+
if voices_dir.exists():
|
|
202
|
+
voice_files = [str(f) for f in voices_dir.glob("*.wav")]
|
|
203
|
+
|
|
204
|
+
return voice_files
|
|
205
|
+
|
|
206
|
+
def _get_default_speaker_file(self) -> Optional[str]:
|
|
207
|
+
"""Get path to default speaker file"""
|
|
208
|
+
voices_dir = Path(__file__).parent / "voices"
|
|
209
|
+
|
|
210
|
+
# Look for a default speaker file
|
|
211
|
+
for filename in ["default.wav", "speaker.wav", "reference.wav"]:
|
|
212
|
+
potential_path = voices_dir / filename
|
|
213
|
+
if potential_path.exists():
|
|
214
|
+
return str(potential_path)
|
|
215
|
+
|
|
216
|
+
# If no default found, look for any wav file
|
|
217
|
+
wav_files = list(voices_dir.glob("*.wav"))
|
|
218
|
+
if wav_files:
|
|
219
|
+
return str(wav_files[0])
|
|
220
|
+
|
|
221
|
+
return None
|
|
222
|
+
|
|
223
|
+
def list_voices(self) -> List[str]:
|
|
224
|
+
"""Return list of available voices"""
|
|
225
|
+
return self.available_voices
|
|
226
|
+
|
|
227
|
+
def list_models(self) -> List[str]:
|
|
228
|
+
"""Return list of available models"""
|
|
229
|
+
return self.available_models
|
|
230
|
+
|
|
231
|
+
# --- Globals ---
|
|
232
|
+
app = FastAPI(title="XTTS Server")
|
|
233
|
+
router = APIRouter()
|
|
234
|
+
xtts_server = XTTSServer()
|
|
235
|
+
model_lock = asyncio.Lock() # Ensure thread-safe access
|
|
236
|
+
|
|
237
|
+
# --- API Endpoints ---
|
|
238
|
+
@router.post("/generate_audio")
|
|
239
|
+
async def generate_audio(request: GenerationRequest):
|
|
240
|
+
async with model_lock:
|
|
241
|
+
try:
|
|
242
|
+
audio_bytes = await xtts_server.generate_audio(
|
|
243
|
+
text=request.text,
|
|
244
|
+
voice=request.voice,
|
|
245
|
+
language=request.language,
|
|
246
|
+
speaker_wav=request.speaker_wav
|
|
247
|
+
)
|
|
248
|
+
from fastapi.responses import Response
|
|
249
|
+
return Response(content=audio_bytes, media_type="audio/wav")
|
|
250
|
+
except Exception as e:
|
|
251
|
+
print(f"Server: ERROR in generate_audio endpoint: {e}")
|
|
252
|
+
print(f"Server: ERROR traceback:\n{traceback.format_exc()}")
|
|
253
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
254
|
+
|
|
255
|
+
@router.get("/list_voices")
|
|
256
|
+
async def list_voices():
|
|
257
|
+
try:
|
|
258
|
+
voices = xtts_server.list_voices()
|
|
259
|
+
print(f"Server: Returning {len(voices)} voices: {voices}")
|
|
260
|
+
return {"voices": voices}
|
|
261
|
+
except Exception as e:
|
|
262
|
+
print(f"Server: ERROR in list_voices endpoint: {e}")
|
|
263
|
+
print(f"Server: ERROR traceback:\n{traceback.format_exc()}")
|
|
264
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
265
|
+
|
|
266
|
+
@router.get("/list_models")
|
|
267
|
+
async def list_models():
|
|
268
|
+
try:
|
|
269
|
+
models = xtts_server.list_models()
|
|
270
|
+
print(f"Server: Returning {len(models)} models: {models}")
|
|
271
|
+
return {"models": models}
|
|
272
|
+
except Exception as e:
|
|
273
|
+
print(f"Server: ERROR in list_models endpoint: {e}")
|
|
274
|
+
print(f"Server: ERROR traceback:\n{traceback.format_exc()}")
|
|
275
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
276
|
+
|
|
277
|
+
@router.get("/status")
|
|
278
|
+
async def status():
|
|
279
|
+
return {
|
|
280
|
+
"status": "running",
|
|
281
|
+
"xtts_available": xtts_available,
|
|
282
|
+
"model_loaded": xtts_server.model_loaded,
|
|
283
|
+
"model_loading": xtts_server.model_loading,
|
|
284
|
+
"voices_count": len(xtts_server.available_voices),
|
|
285
|
+
"device": torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU"
|
|
286
|
+
}
|
|
287
|
+
|
|
288
|
+
# Add a health check endpoint that responds immediately
|
|
289
|
+
@router.get("/health")
|
|
290
|
+
async def health_check():
|
|
291
|
+
return {"status": "healthy", "ready": True}
|
|
292
|
+
|
|
293
|
+
app.include_router(router)
|
|
294
|
+
|
|
295
|
+
# --- Server Startup ---
|
|
296
|
+
if __name__ == '__main__':
|
|
297
|
+
parser = argparse.ArgumentParser(description="XTTS TTS Server")
|
|
298
|
+
parser.add_argument("--host", type=str, default="localhost", help="Host to bind the server to.")
|
|
299
|
+
parser.add_argument("--port", type=int, default="8081", help="Port to bind the server to.")
|
|
300
|
+
|
|
301
|
+
args = parser.parse_args()
|
|
302
|
+
|
|
303
|
+
print(f"Server: Starting XTTS server on {args.host}:{args.port}")
|
|
304
|
+
print(f"Server: XTTS available: {xtts_available}")
|
|
305
|
+
print(f"Server: Model will be loaded on first audio generation request")
|
|
306
|
+
print(f"Server: Available voices: {len(xtts_server.available_voices)}")
|
|
307
|
+
if xtts_available:
|
|
308
|
+
print(f"Server: Device: {torch.cuda.get_device_name(0) if torch.cuda.is_available() else 'CPU'}")
|
|
309
|
+
|
|
310
|
+
# Create voices directory if it doesn't exist
|
|
311
|
+
voices_dir = Path(__file__).parent / "voices"
|
|
312
|
+
voices_dir.mkdir(exist_ok=True)
|
|
313
|
+
print(f"Server: Voices directory: {voices_dir}")
|
|
314
|
+
|
|
315
|
+
uvicorn.run(app, host=args.host, port=args.port)
|
|
316
|
+
except Exception as e:
|
|
317
|
+
print(f"Server: CRITICAL ERROR during startup: {e}")
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lollms_client
|
|
3
|
-
Version: 1.6.
|
|
3
|
+
Version: 1.6.2
|
|
4
4
|
Summary: A client library for LoLLMs generate endpoint
|
|
5
5
|
Author-email: ParisNeo <parisneoai@gmail.com>
|
|
6
6
|
License: Apache License
|
|
@@ -1302,6 +1302,7 @@ try:
|
|
|
1302
1302
|
except Exception as e:
|
|
1303
1303
|
ASCIIColors.error(f"Error initializing Hugging Face Inference API binding: {e}")
|
|
1304
1304
|
ASCIIColors.info("Please ensure your Hugging Face API token is correctly set and you have access to the specified model.")```
|
|
1305
|
+
```
|
|
1305
1306
|
|
|
1306
1307
|
---
|
|
1307
1308
|
|
|
@@ -1403,7 +1404,9 @@ else:
|
|
|
1403
1404
|
|
|
1404
1405
|
except Exception as e:
|
|
1405
1406
|
ASCIIColors.error(f"An error occurred during multi-image fusion: {e}")
|
|
1406
|
-
```
|
|
1407
|
+
```
|
|
1408
|
+
|
|
1409
|
+
This powerful feature allows for complex creative tasks like character swapping, background replacement, and style transfer directly through the `lollms_client` library.
|
|
1407
1410
|
|
|
1408
1411
|
### Listing Available Models
|
|
1409
1412
|
|