webscout 8.3__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +4 -4
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/LambdaChat.py +7 -1
- webscout/Provider/OPENAI/BLACKBOXAI.py +1049 -1017
- webscout/Provider/OPENAI/Qwen3.py +303 -303
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/__init__.py +2 -1
- webscout/Provider/OPENAI/api.py +298 -13
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +89 -12
- webscout/Provider/OPENAI/chatgpt.py +15 -2
- webscout/Provider/OPENAI/chatgptclone.py +14 -3
- webscout/Provider/OPENAI/deepinfra.py +339 -328
- webscout/Provider/OPENAI/e2b.py +295 -73
- webscout/Provider/OPENAI/opkfc.py +18 -6
- webscout/Provider/OPENAI/scirachat.py +3 -2
- webscout/Provider/OPENAI/toolbaz.py +0 -1
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +367 -367
- webscout/Provider/OPENAI/yep.py +383 -383
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -0
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/METADATA +1 -1
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/RECORD +61 -51
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/WHEEL +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import random
|
|
3
|
+
import string
|
|
4
|
+
import json
|
|
5
|
+
import time
|
|
6
|
+
from typing import Optional, List, Dict, Any
|
|
7
|
+
from webscout.Provider.TTI.utils import (
|
|
8
|
+
ImageData,
|
|
9
|
+
ImageResponse
|
|
10
|
+
)
|
|
11
|
+
from webscout.Provider.TTI.base import TTICompatibleProvider, BaseImages
|
|
12
|
+
from io import BytesIO
|
|
13
|
+
import os
|
|
14
|
+
import tempfile
|
|
15
|
+
from webscout.litagent import LitAgent
|
|
16
|
+
from requests.adapters import HTTPAdapter
|
|
17
|
+
from urllib3.util.retry import Retry
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Images(BaseImages):
|
|
21
|
+
def __init__(self, client):
|
|
22
|
+
self._client = client
|
|
23
|
+
self.base_url = "https://api.together.xyz/v1"
|
|
24
|
+
# Create a session - it will automatically get proxies from the global monkey patch!
|
|
25
|
+
self.session = requests.Session()
|
|
26
|
+
self._setup_session_with_retries()
|
|
27
|
+
|
|
28
|
+
def _setup_session_with_retries(self):
|
|
29
|
+
"""Setup session with retry strategy and timeout configurations"""
|
|
30
|
+
# Configure retry strategy
|
|
31
|
+
retry_strategy = Retry(
|
|
32
|
+
total=3,
|
|
33
|
+
status_forcelist=[429, 500, 502, 503, 504],
|
|
34
|
+
backoff_factor=1,
|
|
35
|
+
allowed_methods=["HEAD", "GET", "OPTIONS", "POST"],
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
adapter = HTTPAdapter(max_retries=retry_strategy)
|
|
39
|
+
self.session.mount("http://", adapter)
|
|
40
|
+
self.session.mount("https://", adapter)
|
|
41
|
+
|
|
42
|
+
def get_api_key(self) -> str:
|
|
43
|
+
"""Get API key from activation endpoint or cache"""
|
|
44
|
+
if hasattr(self._client, '_api_key_cache') and self._client._api_key_cache:
|
|
45
|
+
return self._client._api_key_cache
|
|
46
|
+
|
|
47
|
+
try:
|
|
48
|
+
activation_endpoint = "https://www.codegeneration.ai/activate-v2"
|
|
49
|
+
response = requests.get(
|
|
50
|
+
activation_endpoint,
|
|
51
|
+
headers={"Accept": "application/json"},
|
|
52
|
+
timeout=30
|
|
53
|
+
)
|
|
54
|
+
response.raise_for_status()
|
|
55
|
+
activation_data = response.json()
|
|
56
|
+
api_key = activation_data["openAIParams"]["apiKey"]
|
|
57
|
+
self._client._api_key_cache = api_key
|
|
58
|
+
return api_key
|
|
59
|
+
except Exception as e:
|
|
60
|
+
raise Exception(f"Failed to get activation key: {e}")
|
|
61
|
+
|
|
62
|
+
def build_headers(self, extra: Optional[Dict[str, str]] = None) -> Dict[str, str]:
|
|
63
|
+
"""Build headers with API authorization"""
|
|
64
|
+
api_key = self.get_api_key()
|
|
65
|
+
|
|
66
|
+
agent = LitAgent()
|
|
67
|
+
fp = agent.generate_fingerprint("chrome")
|
|
68
|
+
headers = {
|
|
69
|
+
"Authorization": f"Bearer {api_key}",
|
|
70
|
+
"Content-Type": "application/json",
|
|
71
|
+
"accept": "application/json",
|
|
72
|
+
"accept-language": fp["accept_language"],
|
|
73
|
+
"user-agent": fp["user_agent"],
|
|
74
|
+
"sec-ch-ua": fp["sec_ch_ua"],
|
|
75
|
+
"sec-ch-ua-mobile": "?0",
|
|
76
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
77
|
+
"sec-fetch-dest": "empty",
|
|
78
|
+
"sec-fetch-mode": "cors",
|
|
79
|
+
"sec-fetch-site": "cross-site",
|
|
80
|
+
}
|
|
81
|
+
if extra:
|
|
82
|
+
headers.update(extra)
|
|
83
|
+
return headers
|
|
84
|
+
|
|
85
|
+
def create(
|
|
86
|
+
self,
|
|
87
|
+
model: str = None,
|
|
88
|
+
prompt: str = None,
|
|
89
|
+
n: int = 1,
|
|
90
|
+
size: str = "1024x1024",
|
|
91
|
+
response_format: str = "url",
|
|
92
|
+
user: Optional[str] = None,
|
|
93
|
+
style: str = None,
|
|
94
|
+
aspect_ratio: str = None,
|
|
95
|
+
timeout: int = 120,
|
|
96
|
+
image_format: str = "png",
|
|
97
|
+
enhance: bool = True,
|
|
98
|
+
steps: int = 20,
|
|
99
|
+
seed: Optional[int] = None,
|
|
100
|
+
**kwargs,
|
|
101
|
+
) -> ImageResponse:
|
|
102
|
+
"""
|
|
103
|
+
Create images using Together.xyz image models
|
|
104
|
+
|
|
105
|
+
Args:
|
|
106
|
+
model: Image model to use (defaults to first available)
|
|
107
|
+
prompt: Text description of the image to generate
|
|
108
|
+
n: Number of images to generate (1-4)
|
|
109
|
+
size: Image size in format "WIDTHxHEIGHT"
|
|
110
|
+
response_format: "url" or "b64_json"
|
|
111
|
+
timeout: Request timeout in seconds
|
|
112
|
+
steps: Number of inference steps (1-50)
|
|
113
|
+
seed: Random seed for reproducible results
|
|
114
|
+
**kwargs: Additional model-specific parameters
|
|
115
|
+
"""
|
|
116
|
+
if not prompt:
|
|
117
|
+
raise ValueError(
|
|
118
|
+
"Describe the image you want to create (use the 'prompt' property)."
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Use provided model or default to first available
|
|
122
|
+
if not model:
|
|
123
|
+
model = self._client.AVAILABLE_MODELS[0]
|
|
124
|
+
elif model not in self._client.AVAILABLE_MODELS:
|
|
125
|
+
raise ValueError(f"Model '{model}' not available. Choose from: {self._client.AVAILABLE_MODELS}")
|
|
126
|
+
|
|
127
|
+
# Parse size
|
|
128
|
+
if 'x' in size:
|
|
129
|
+
width, height = map(int, size.split('x'))
|
|
130
|
+
else:
|
|
131
|
+
width = height = int(size)
|
|
132
|
+
|
|
133
|
+
# Build request body
|
|
134
|
+
body = {
|
|
135
|
+
"model": model,
|
|
136
|
+
"prompt": prompt,
|
|
137
|
+
"width": width,
|
|
138
|
+
"height": height,
|
|
139
|
+
# Clamp steps to 1-4 as required by Together.xyz API
|
|
140
|
+
"steps": min(max(steps, 1), 4),
|
|
141
|
+
"n": min(max(n, 1), 4), # Clamp between 1-4
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
# Add optional parameters
|
|
145
|
+
if seed is not None:
|
|
146
|
+
body["seed"] = seed
|
|
147
|
+
|
|
148
|
+
# Add any additional kwargs
|
|
149
|
+
body.update(kwargs)
|
|
150
|
+
|
|
151
|
+
try:
|
|
152
|
+
resp = self.session.request(
|
|
153
|
+
"post",
|
|
154
|
+
f"{self.base_url}/images/generations",
|
|
155
|
+
json=body,
|
|
156
|
+
headers=self.build_headers(),
|
|
157
|
+
timeout=timeout,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
data = resp.json()
|
|
161
|
+
|
|
162
|
+
# Check for errors
|
|
163
|
+
if "error" in data:
|
|
164
|
+
error_msg = data["error"].get("message", str(data["error"]))
|
|
165
|
+
raise RuntimeError(f"Together.xyz API error: {error_msg}")
|
|
166
|
+
|
|
167
|
+
if not data.get("data") or len(data["data"]) == 0:
|
|
168
|
+
raise RuntimeError("Failed to process image. No data found.")
|
|
169
|
+
|
|
170
|
+
result = data["data"]
|
|
171
|
+
result_data = []
|
|
172
|
+
|
|
173
|
+
for i, item in enumerate(result):
|
|
174
|
+
if response_format == "url":
|
|
175
|
+
if "url" in item:
|
|
176
|
+
result_data.append(ImageData(url=item["url"]))
|
|
177
|
+
else: # b64_json
|
|
178
|
+
if "b64_json" in item:
|
|
179
|
+
result_data.append(ImageData(b64_json=item["b64_json"]))
|
|
180
|
+
|
|
181
|
+
if not result_data:
|
|
182
|
+
raise RuntimeError("No valid image data found in response")
|
|
183
|
+
|
|
184
|
+
return ImageResponse(data=result_data)
|
|
185
|
+
|
|
186
|
+
except requests.exceptions.Timeout:
|
|
187
|
+
raise RuntimeError(f"Request timed out after {timeout} seconds. Try reducing image size or steps.")
|
|
188
|
+
except requests.exceptions.RequestException as e:
|
|
189
|
+
# Print the response content for debugging if available
|
|
190
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
191
|
+
try:
|
|
192
|
+
print("[Together.xyz API error details]", e.response.text)
|
|
193
|
+
except Exception:
|
|
194
|
+
pass
|
|
195
|
+
raise RuntimeError(f"Network error: {str(e)}")
|
|
196
|
+
except json.JSONDecodeError:
|
|
197
|
+
raise RuntimeError("Invalid JSON response from Together.xyz API")
|
|
198
|
+
except Exception as e:
|
|
199
|
+
raise RuntimeError(f"An error occurred: {str(e)}")
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
class TogetherImage(TTICompatibleProvider):
|
|
203
|
+
"""
|
|
204
|
+
Together.xyz Text-to-Image provider
|
|
205
|
+
Updated: 2025-06-02 10:42:41 UTC by OEvortex
|
|
206
|
+
Supports FLUX and other image generation models
|
|
207
|
+
"""
|
|
208
|
+
|
|
209
|
+
# Image models from Together.xyz API (filtered for image type only)
|
|
210
|
+
AVAILABLE_MODELS = [
|
|
211
|
+
"black-forest-labs/FLUX.1-schnell-Free",
|
|
212
|
+
"black-forest-labs/FLUX.1.1-pro",
|
|
213
|
+
"black-forest-labs/FLUX.1-pro",
|
|
214
|
+
"black-forest-labs/FLUX.1-redux",
|
|
215
|
+
"black-forest-labs/FLUX.1-depth",
|
|
216
|
+
"black-forest-labs/FLUX.1-canny",
|
|
217
|
+
"black-forest-labs/FLUX.1-kontext-max",
|
|
218
|
+
"black-forest-labs/FLUX.1-dev-lora",
|
|
219
|
+
"black-forest-labs/FLUX.1-schnell",
|
|
220
|
+
"black-forest-labs/FLUX.1-dev",
|
|
221
|
+
"black-forest-labs/FLUX.1-kontext-pro",
|
|
222
|
+
]
|
|
223
|
+
|
|
224
|
+
def __init__(self):
|
|
225
|
+
self.images = Images(self)
|
|
226
|
+
self._api_key_cache = None
|
|
227
|
+
|
|
228
|
+
@property
|
|
229
|
+
def models(self):
|
|
230
|
+
class _ModelList:
|
|
231
|
+
def list(inner_self):
|
|
232
|
+
return TogetherImage.AVAILABLE_MODELS
|
|
233
|
+
|
|
234
|
+
return _ModelList()
|
|
235
|
+
|
|
236
|
+
def convert_model_name(self, model: str) -> str:
|
|
237
|
+
"""Convert model alias to full model name"""
|
|
238
|
+
if model in self.AVAILABLE_MODELS:
|
|
239
|
+
return model
|
|
240
|
+
|
|
241
|
+
# Default to first available model
|
|
242
|
+
return self.AVAILABLE_MODELS[0]
|
|
243
|
+
|
|
244
|
+
# def fetch_available_models(self) -> List[str]:
|
|
245
|
+
# """Fetch current image models from Together.xyz API"""
|
|
246
|
+
# try:
|
|
247
|
+
# api_key = self.images.get_api_key()
|
|
248
|
+
# headers = {
|
|
249
|
+
# "Authorization": f"Bearer {api_key}",
|
|
250
|
+
# "Accept": "application/json"
|
|
251
|
+
# }
|
|
252
|
+
|
|
253
|
+
# response = requests.get(
|
|
254
|
+
# "https://api.together.xyz/v1/models",
|
|
255
|
+
# headers=headers,
|
|
256
|
+
# timeout=30
|
|
257
|
+
# )
|
|
258
|
+
# response.raise_for_status()
|
|
259
|
+
# models_data = response.json()
|
|
260
|
+
|
|
261
|
+
# # Filter image models
|
|
262
|
+
# image_models = []
|
|
263
|
+
# for model in models_data:
|
|
264
|
+
# if isinstance(model, dict) and model.get("type", "").lower() == "image":
|
|
265
|
+
# image_models.append(model["id"])
|
|
266
|
+
|
|
267
|
+
# return sorted(image_models)
|
|
268
|
+
|
|
269
|
+
# except Exception as e:
|
|
270
|
+
# return self.AVAILABLE_MODELS
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
if __name__ == "__main__":
|
|
274
|
+
from rich import print
|
|
275
|
+
client = TogetherImage()
|
|
276
|
+
|
|
277
|
+
# Test with a sample prompt
|
|
278
|
+
response = client.images.create(
|
|
279
|
+
model="black-forest-labs/FLUX.1-schnell-Free", # Free FLUX model
|
|
280
|
+
prompt="A majestic dragon flying over a mystical forest, fantasy art, highly detailed",
|
|
281
|
+
size="1024x1024",
|
|
282
|
+
n=1,
|
|
283
|
+
steps=25,
|
|
284
|
+
response_format="url",
|
|
285
|
+
timeout=120,
|
|
286
|
+
)
|
|
287
|
+
print(response)
|
webscout/Provider/TTI/utils.py
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
|
+
import time
|
|
1
2
|
from typing import List, Optional
|
|
2
3
|
from pydantic import BaseModel, Field
|
|
3
|
-
import time
|
|
4
4
|
|
|
5
5
|
class ImageData(BaseModel):
|
|
6
6
|
url: Optional[str] = None
|
|
7
7
|
b64_json: Optional[str] = None
|
|
8
8
|
|
|
9
|
+
|
|
9
10
|
class ImageResponse(BaseModel):
|
|
10
11
|
created: int = Field(default_factory=lambda: int(time.time()))
|
|
11
12
|
data: List[ImageData]
|
webscout/Provider/TTS/README.md
CHANGED
|
@@ -0,0 +1,140 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import requests
|
|
3
|
+
from datetime import datetime
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from webscout.Provider.TTS import BaseTTSProvider
|
|
6
|
+
from webscout.litagent import LitAgent
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class FreeTTS(BaseTTSProvider):
|
|
10
|
+
"""
|
|
11
|
+
Text-to-speech provider using the FreeTTS API.
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
headers = {
|
|
15
|
+
"accept": "*/*",
|
|
16
|
+
"accept-language": "ru-RU,ru;q=0.8",
|
|
17
|
+
"cache-control": "no-cache",
|
|
18
|
+
"pragma": "no-cache",
|
|
19
|
+
"sec-ch-ua": '"Brave";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
|
|
20
|
+
"sec-ch-ua-mobile": "?0",
|
|
21
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
22
|
+
"sec-fetch-dest": "empty",
|
|
23
|
+
"sec-fetch-mode": "cors",
|
|
24
|
+
"sec-fetch-site": "same-origin",
|
|
25
|
+
"sec-gpc": "1",
|
|
26
|
+
"User-Agent": LitAgent().random()
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
def __init__(self, lang="ru-RU", timeout: int = 30, proxies: dict = None):
|
|
30
|
+
"""Initializes the FreeTTS TTS client."""
|
|
31
|
+
super().__init__()
|
|
32
|
+
self.lang = lang
|
|
33
|
+
self.url = "https://freetts.ru/api/v1/tts"
|
|
34
|
+
self.select_url = "https://freetts.ru/api/v1/select"
|
|
35
|
+
self.audio_base_url = "https://freetts.ru"
|
|
36
|
+
self.session = requests.Session()
|
|
37
|
+
self.session.headers.update(self.headers)
|
|
38
|
+
if proxies:
|
|
39
|
+
self.session.proxies.update(proxies)
|
|
40
|
+
self.timeout = timeout
|
|
41
|
+
self.voices = {}
|
|
42
|
+
self.load_voices()
|
|
43
|
+
|
|
44
|
+
def load_voices(self):
|
|
45
|
+
"""Загружает данные о голосах и приводит их к нужному виду"""
|
|
46
|
+
try:
|
|
47
|
+
response = self.session.get(self.select_url, timeout=self.timeout)
|
|
48
|
+
if response.status_code == 200:
|
|
49
|
+
data = response.json()
|
|
50
|
+
voices_data = data["data"]["voice"]
|
|
51
|
+
|
|
52
|
+
if isinstance(voices_data, list):
|
|
53
|
+
for voice_info in voices_data:
|
|
54
|
+
if isinstance(voice_info, dict):
|
|
55
|
+
voice_id = voice_info.get("code")
|
|
56
|
+
voice_name = voice_info.get("name", voice_id)
|
|
57
|
+
if voice_id and voice_id.startswith(self.lang):
|
|
58
|
+
self.voices[voice_id] = voice_name
|
|
59
|
+
else:
|
|
60
|
+
print("Error")
|
|
61
|
+
print("Done")
|
|
62
|
+
else:
|
|
63
|
+
print(f"Error HTTP: {response.status_code}")
|
|
64
|
+
except Exception as e:
|
|
65
|
+
print(f"Error downloading voice: {e}")
|
|
66
|
+
|
|
67
|
+
def get_available_voices(self):
|
|
68
|
+
"""Возвращает все доступные голоса в формате строки"""
|
|
69
|
+
if not self.voices:
|
|
70
|
+
return "Error"
|
|
71
|
+
voices_list = [f"{voice_id}: {name}" for voice_id, name in self.voices.items()]
|
|
72
|
+
return "\n".join(voices_list)
|
|
73
|
+
|
|
74
|
+
def tts(self, text: str, voiceid: str = None) -> str:
|
|
75
|
+
"""
|
|
76
|
+
Converts text to speech using the FreeTTS API and saves it to a file.
|
|
77
|
+
Args:
|
|
78
|
+
text (str): The text to convert to speech
|
|
79
|
+
voiceid (str): Voice ID to use for TTS (default: first available)
|
|
80
|
+
Returns:
|
|
81
|
+
str: Path to the generated audio file (MP3)
|
|
82
|
+
Raises:
|
|
83
|
+
AssertionError: If no voices are available
|
|
84
|
+
requests.RequestException: If there's an error communicating with the API
|
|
85
|
+
RuntimeError: If there's an error processing the audio
|
|
86
|
+
"""
|
|
87
|
+
try:
|
|
88
|
+
if not self.voices:
|
|
89
|
+
raise RuntimeError(f"No voices available for language '{self.lang}'")
|
|
90
|
+
|
|
91
|
+
available_voices = self.get_available_voices()
|
|
92
|
+
if not available_voices:
|
|
93
|
+
print(f"There are no available voices for the language '{self.lang}'")
|
|
94
|
+
return ""
|
|
95
|
+
|
|
96
|
+
if voiceid is None:
|
|
97
|
+
voiceid = next(iter(available_voices.keys()))
|
|
98
|
+
|
|
99
|
+
payload = {
|
|
100
|
+
"text": text,
|
|
101
|
+
"voiceid": voiceid
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
response = requests.post(self.url, json=payload, headers=self.headers)
|
|
105
|
+
|
|
106
|
+
if response.status_code == 200:
|
|
107
|
+
data = response.json()
|
|
108
|
+
mp3_path = data.get("data", {}).get("src", "")
|
|
109
|
+
|
|
110
|
+
if not mp3_path:
|
|
111
|
+
print("The path to the audio file in the response was not found.")
|
|
112
|
+
return ""
|
|
113
|
+
|
|
114
|
+
mp3_url = self.audio_base_url + mp3_path
|
|
115
|
+
|
|
116
|
+
mp3_filename = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + ".mp3"
|
|
117
|
+
full_path = os.path.abspath(mp3_filename)
|
|
118
|
+
|
|
119
|
+
with requests.get(mp3_url, stream=True) as r:
|
|
120
|
+
r.raise_for_status()
|
|
121
|
+
with open(mp3_filename, "wb") as f:
|
|
122
|
+
for chunk in r.iter_content(chunk_size=1024):
|
|
123
|
+
f.write(chunk)
|
|
124
|
+
|
|
125
|
+
print(f"File '{mp3_filename}'saved successfully!")
|
|
126
|
+
return full_path
|
|
127
|
+
|
|
128
|
+
except Exception as e:
|
|
129
|
+
print(e)
|
|
130
|
+
|
|
131
|
+
if __name__ == "__main__":
|
|
132
|
+
tts = FreeTTS(lang="ru")
|
|
133
|
+
available_voices = tts.get_available_voices()
|
|
134
|
+
print("Available voices:", available_voices)
|
|
135
|
+
|
|
136
|
+
text_to_speak = input("\nEnter text: ")
|
|
137
|
+
voice_id = "ru-RU001"
|
|
138
|
+
print("[debug] Generating audio...")
|
|
139
|
+
audio_file = tts.tts(text=text_to_speak, voiceid=voice_id)
|
|
140
|
+
print(f"Audio saved to: {audio_file}")
|