lollms-client 1.6.1__py3-none-any.whl → 1.6.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lollms-client might be problematic. Click here for more details.
- lollms_client/__init__.py +1 -1
- lollms_client/llm_bindings/azure_openai/__init__.py +2 -2
- lollms_client/llm_bindings/claude/__init__.py +2 -2
- lollms_client/llm_bindings/gemini/__init__.py +2 -2
- lollms_client/llm_bindings/grok/__init__.py +2 -2
- lollms_client/llm_bindings/groq/__init__.py +2 -2
- lollms_client/llm_bindings/hugging_face_inference_api/__init__.py +2 -2
- lollms_client/llm_bindings/litellm/__init__.py +1 -1
- lollms_client/llm_bindings/llamacpp/__init__.py +2 -2
- lollms_client/llm_bindings/lollms/__init__.py +1 -1
- lollms_client/llm_bindings/lollms_webui/__init__.py +1 -1
- lollms_client/llm_bindings/mistral/__init__.py +2 -2
- lollms_client/llm_bindings/novita_ai/__init__.py +2 -2
- lollms_client/llm_bindings/ollama/__init__.py +7 -4
- lollms_client/llm_bindings/open_router/__init__.py +2 -2
- lollms_client/llm_bindings/openai/__init__.py +1 -1
- lollms_client/llm_bindings/openllm/__init__.py +2 -2
- lollms_client/llm_bindings/openwebui/__init__.py +1 -1
- lollms_client/llm_bindings/perplexity/__init__.py +2 -2
- lollms_client/llm_bindings/pythonllamacpp/__init__.py +3 -3
- lollms_client/llm_bindings/tensor_rt/__init__.py +1 -1
- lollms_client/llm_bindings/transformers/__init__.py +4 -4
- lollms_client/llm_bindings/vllm/__init__.py +1 -1
- lollms_client/lollms_core.py +19 -1452
- lollms_client/lollms_llm_binding.py +1 -1
- lollms_client/lollms_tti_binding.py +1 -1
- lollms_client/lollms_tts_binding.py +15 -13
- lollms_client/tti_bindings/diffusers/__init__.py +276 -856
- lollms_client/tti_bindings/diffusers/server/main.py +730 -0
- lollms_client/tti_bindings/gemini/__init__.py +1 -1
- lollms_client/tti_bindings/leonardo_ai/__init__.py +1 -1
- lollms_client/tti_bindings/novita_ai/__init__.py +1 -1
- lollms_client/tti_bindings/stability_ai/__init__.py +1 -1
- lollms_client/tts_bindings/lollms/__init__.py +6 -1
- lollms_client/tts_bindings/piper_tts/__init__.py +1 -1
- lollms_client/tts_bindings/xtts/__init__.py +97 -38
- lollms_client/tts_bindings/xtts/server/main.py +288 -272
- {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/METADATA +6 -3
- {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/RECORD +42 -41
- {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/WHEEL +0 -0
- {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/licenses/LICENSE +0 -0
- {lollms_client-1.6.1.dist-info → lollms_client-1.6.4.dist-info}/top_level.txt +0 -0
|
@@ -307,7 +307,7 @@ class GeminiTTIBinding_Impl(LollmsTTIBinding):
|
|
|
307
307
|
|
|
308
308
|
return applied_some_settings
|
|
309
309
|
|
|
310
|
-
def
|
|
310
|
+
def list_models(self) -> list:
|
|
311
311
|
"""Lists available Imagen models in a standardized format."""
|
|
312
312
|
models = IMAGEN_VERTEX_MODELS if self.auth_method == "vertex_ai" else IMAGEN_GEMINI_API_MODELS
|
|
313
313
|
return [
|
|
@@ -38,7 +38,7 @@ class LeonardoAITTIBinding(LollmsTTIBinding):
|
|
|
38
38
|
self.base_url = "https://cloud.leonardo.ai/api/rest/v1"
|
|
39
39
|
self.headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
|
40
40
|
|
|
41
|
-
def
|
|
41
|
+
def list_models(self) -> list:
|
|
42
42
|
# You could also fetch this dynamically from /models endpoint
|
|
43
43
|
return LEONARDO_AI_MODELS
|
|
44
44
|
|
|
@@ -36,7 +36,7 @@ class NovitaAITTIBinding(LollmsTTIBinding):
|
|
|
36
36
|
self.base_url = "https://api.novita.ai/v3"
|
|
37
37
|
self.headers = {"Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json"}
|
|
38
38
|
|
|
39
|
-
def
|
|
39
|
+
def list_models(self) -> list:
|
|
40
40
|
return NOVITA_AI_MODELS
|
|
41
41
|
|
|
42
42
|
def generate_image(self, prompt: str, negative_prompt: str = "", width: int = 1024, height: int = 1024, **kwargs) -> bytes:
|
|
@@ -40,7 +40,7 @@ class StabilityAITTIBinding(LollmsTTIBinding):
|
|
|
40
40
|
raise ValueError("Stability AI API key is required. Please set it in the configuration or as STABILITY_API_KEY environment variable.")
|
|
41
41
|
self.model_name = self.config.get("model_name", "stable-diffusion-3-medium")
|
|
42
42
|
|
|
43
|
-
def
|
|
43
|
+
def list_models(self) -> list:
|
|
44
44
|
return STABILITY_AI_MODELS
|
|
45
45
|
|
|
46
46
|
def _get_api_url(self, task: str) -> str:
|
|
@@ -29,6 +29,7 @@ class LollmsTTSBinding_Impl(LollmsTTSBinding):
|
|
|
29
29
|
model_name=model_name,
|
|
30
30
|
service_key=service_key, # Stored in the parent class
|
|
31
31
|
verify_ssl_certificate=verify_ssl_certificate)
|
|
32
|
+
self.host_address = host_address
|
|
32
33
|
# self.client_id = service_key # Can access via self.service_key from parent
|
|
33
34
|
|
|
34
35
|
def generate_audio(self, text: str, voice: Optional[str] = None, **kwargs) -> bytes:
|
|
@@ -142,4 +143,8 @@ class LollmsTTSBinding_Impl(LollmsTTSBinding):
|
|
|
142
143
|
except Exception as e:
|
|
143
144
|
ASCIIColors.error(f"An unexpected error occurred while listing voices: {e}")
|
|
144
145
|
trace_exception(e)
|
|
145
|
-
return ["main_voice"]
|
|
146
|
+
return ["main_voice"]
|
|
147
|
+
|
|
148
|
+
def list_models(self) -> list:
|
|
149
|
+
"""Lists models"""
|
|
150
|
+
return ["lollms"]
|
|
@@ -104,7 +104,7 @@ class PiperClientBinding(LollmsTTSBinding):
|
|
|
104
104
|
response.raise_for_status()
|
|
105
105
|
return response.json().get("voices", [])
|
|
106
106
|
|
|
107
|
-
def list_models(self
|
|
107
|
+
def list_models(self) -> List[str]:
|
|
108
108
|
"""Get available models from the server"""
|
|
109
109
|
response = requests.get(f"{self.base_url}/list_models")
|
|
110
110
|
response.raise_for_status()
|
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
# File: lollms_client/tts_bindings/xtts/__init__.py
|
|
2
1
|
from lollms_client.lollms_tts_binding import LollmsTTSBinding
|
|
3
2
|
from typing import Optional, List
|
|
4
3
|
from pathlib import Path
|
|
@@ -8,6 +7,14 @@ import sys
|
|
|
8
7
|
import time
|
|
9
8
|
import pipmaster as pm
|
|
10
9
|
|
|
10
|
+
# New import for process-safe file locking
|
|
11
|
+
try:
|
|
12
|
+
from filelock import FileLock, Timeout
|
|
13
|
+
except ImportError:
|
|
14
|
+
print("FATAL: The 'filelock' library is required. Please install it by running: pip install filelock")
|
|
15
|
+
sys.exit(1)
|
|
16
|
+
|
|
17
|
+
|
|
11
18
|
BindingName = "XTTSClientBinding"
|
|
12
19
|
|
|
13
20
|
class XTTSClientBinding(LollmsTTSBinding):
|
|
@@ -24,29 +31,85 @@ class XTTSClientBinding(LollmsTTSBinding):
|
|
|
24
31
|
self.auto_start_server = auto_start_server
|
|
25
32
|
self.server_process = None
|
|
26
33
|
self.base_url = f"http://{self.host}:{self.port}"
|
|
34
|
+
self.binding_root = Path(__file__).parent
|
|
35
|
+
self.server_dir = self.binding_root / "server"
|
|
27
36
|
|
|
28
37
|
if self.auto_start_server:
|
|
29
|
-
self.
|
|
38
|
+
self.ensure_server_is_running()
|
|
39
|
+
|
|
40
|
+
def is_server_running(self) -> bool:
|
|
41
|
+
"""Checks if the server is already running and responsive."""
|
|
42
|
+
try:
|
|
43
|
+
response = requests.get(f"{self.base_url}/status", timeout=1)
|
|
44
|
+
if response.status_code == 200 and response.json().get("status") == "running":
|
|
45
|
+
return True
|
|
46
|
+
except requests.ConnectionError:
|
|
47
|
+
return False
|
|
48
|
+
return False
|
|
49
|
+
|
|
50
|
+
def ensure_server_is_running(self):
|
|
51
|
+
"""
|
|
52
|
+
Ensures the XTTS server is running. If not, it attempts to start it
|
|
53
|
+
in a process-safe manner using a file lock.
|
|
54
|
+
"""
|
|
55
|
+
if self.is_server_running():
|
|
56
|
+
print("XTTS Server is already running.")
|
|
57
|
+
return
|
|
58
|
+
|
|
59
|
+
lock_path = self.server_dir / "xtts_server.lock"
|
|
60
|
+
lock = FileLock(lock_path, timeout=10) # Wait a maximum of 10 seconds for the lock
|
|
61
|
+
|
|
62
|
+
print("Attempting to start or wait for the XTTS server...")
|
|
63
|
+
try:
|
|
64
|
+
with lock:
|
|
65
|
+
# Double-check after acquiring the lock to handle race conditions
|
|
66
|
+
if not self.is_server_running():
|
|
67
|
+
print("Lock acquired. Starting dedicated XTTS server...")
|
|
68
|
+
self.start_server()
|
|
69
|
+
else:
|
|
70
|
+
print("Server was started by another process while waiting for the lock.")
|
|
71
|
+
except Timeout:
|
|
72
|
+
print("Could not acquire lock. Another process is likely starting the server. Waiting...")
|
|
73
|
+
|
|
74
|
+
# All workers (the one that started the server and those that waited) will verify the server is ready
|
|
75
|
+
self._wait_for_server()
|
|
76
|
+
|
|
77
|
+
def install(self, venv_path, requirements_file):
|
|
78
|
+
print(f"Ensuring virtual environment and dependencies in: {venv_path}")
|
|
79
|
+
pm_v = pm.PackageManager(venv_path=str(venv_path))
|
|
80
|
+
|
|
81
|
+
success = pm_v.ensure_requirements(
|
|
82
|
+
str(requirements_file),
|
|
83
|
+
verbose=True
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
if not success:
|
|
87
|
+
print("FATAL: Failed to install server dependencies. Aborting launch.")
|
|
88
|
+
return
|
|
89
|
+
|
|
90
|
+
print("Dependencies are satisfied. Proceeding to launch server...")
|
|
30
91
|
|
|
31
92
|
def start_server(self):
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
93
|
+
"""
|
|
94
|
+
Installs dependencies and launches the server as a background subprocess.
|
|
95
|
+
This method should only be called from within a file lock.
|
|
96
|
+
"""
|
|
97
|
+
requirements_file = self.server_dir / "requirements.txt"
|
|
98
|
+
server_script = self.server_dir / "main.py"
|
|
37
99
|
|
|
38
100
|
# 1. Ensure a virtual environment and dependencies
|
|
39
|
-
venv_path =
|
|
40
|
-
pm_v = pm.PackageManager(venv_path=venv_path)
|
|
41
|
-
pm_v.ensure_requirements(str(requirements_file))
|
|
101
|
+
venv_path = Path("./venv/xtts_venv")
|
|
42
102
|
|
|
103
|
+
if not venv_path.exists():
|
|
104
|
+
self.install(venv_path, requirements_file)
|
|
105
|
+
|
|
43
106
|
# 2. Get the python executable from the venv
|
|
44
107
|
if sys.platform == "win32":
|
|
45
108
|
python_executable = venv_path / "Scripts" / "python.exe"
|
|
46
109
|
else:
|
|
47
110
|
python_executable = venv_path / "bin" / "python"
|
|
48
111
|
|
|
49
|
-
# 3. Launch the server as a subprocess
|
|
112
|
+
# 3. Launch the server as a detached subprocess
|
|
50
113
|
command = [
|
|
51
114
|
str(python_executable),
|
|
52
115
|
str(server_script),
|
|
@@ -54,41 +117,36 @@ class XTTSClientBinding(LollmsTTSBinding):
|
|
|
54
117
|
"--port", str(self.port)
|
|
55
118
|
]
|
|
56
119
|
|
|
57
|
-
#
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
stderr=None, # Inherit parent's stderr (shows in console)
|
|
62
|
-
)
|
|
63
|
-
|
|
64
|
-
# 4. Wait for the server to be ready
|
|
65
|
-
self._wait_for_server()
|
|
120
|
+
# The server is started as a background process and is not tied to this specific worker's lifecycle
|
|
121
|
+
subprocess.Popen(command)
|
|
122
|
+
print("XTTS Server process launched in the background.")
|
|
123
|
+
|
|
66
124
|
|
|
67
125
|
def _wait_for_server(self, timeout=60):
|
|
126
|
+
print("Waiting for XTTS server to become available...")
|
|
68
127
|
start_time = time.time()
|
|
69
128
|
while time.time() - start_time < timeout:
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
return
|
|
75
|
-
except requests.ConnectionError:
|
|
76
|
-
time.sleep(1)
|
|
129
|
+
if self.is_server_running():
|
|
130
|
+
print("XTTS Server is up and running.")
|
|
131
|
+
return
|
|
132
|
+
time.sleep(1)
|
|
77
133
|
|
|
78
|
-
|
|
79
|
-
raise RuntimeError("Failed to start the XTTS server in the specified timeout.")
|
|
134
|
+
raise RuntimeError("Failed to connect to the XTTS server within the specified timeout.")
|
|
80
135
|
|
|
81
136
|
def stop_server(self):
|
|
137
|
+
"""
|
|
138
|
+
In a multi-worker setup, a single client instance should not stop the shared server.
|
|
139
|
+
The server will continue running until the main application is terminated.
|
|
140
|
+
"""
|
|
82
141
|
if self.server_process:
|
|
83
|
-
print("XTTS Client:
|
|
84
|
-
self.server_process.terminate()
|
|
85
|
-
self.server_process.wait()
|
|
142
|
+
print("XTTS Client: An instance is shutting down, but the shared server will remain active for other workers.")
|
|
86
143
|
self.server_process = None
|
|
87
|
-
print("Server stopped.")
|
|
88
144
|
|
|
89
145
|
def __del__(self):
|
|
90
|
-
|
|
91
|
-
|
|
146
|
+
"""
|
|
147
|
+
The destructor does not stop the server to prevent disrupting other workers.
|
|
148
|
+
"""
|
|
149
|
+
pass
|
|
92
150
|
|
|
93
151
|
def generate_audio(self, text: str, voice: Optional[str] = None, **kwargs) -> bytes:
|
|
94
152
|
"""Generate audio by calling the server's API"""
|
|
@@ -103,9 +161,10 @@ class XTTSClientBinding(LollmsTTSBinding):
|
|
|
103
161
|
response.raise_for_status()
|
|
104
162
|
return response.json().get("voices", [])
|
|
105
163
|
|
|
106
|
-
|
|
107
|
-
|
|
164
|
+
|
|
165
|
+
def list_models(self) -> list:
|
|
166
|
+
"""Lists models"""
|
|
108
167
|
response = requests.get(f"{self.base_url}/list_models")
|
|
109
168
|
response.raise_for_status()
|
|
110
169
|
return response.json().get("models", [])
|
|
111
|
-
|
|
170
|
+
|