webscout 7.5__py3-none-any.whl → 7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +5 -53
- webscout/AIutel.py +8 -318
- webscout/DWEBS.py +460 -489
- webscout/Extra/YTToolkit/YTdownloader.py +14 -53
- webscout/Extra/YTToolkit/transcriber.py +12 -13
- webscout/Extra/YTToolkit/ytapi/video.py +0 -1
- webscout/Extra/__init__.py +0 -1
- webscout/Extra/autocoder/autocoder_utiles.py +0 -4
- webscout/Extra/autocoder/rawdog.py +13 -41
- webscout/Extra/gguf.py +652 -428
- webscout/Extra/weather.py +178 -156
- webscout/Extra/weather_ascii.py +70 -17
- webscout/Litlogger/core/logger.py +1 -2
- webscout/Litlogger/handlers/file.py +1 -1
- webscout/Litlogger/styles/formats.py +0 -2
- webscout/Litlogger/utils/detectors.py +0 -1
- webscout/Provider/AISEARCH/DeepFind.py +0 -1
- webscout/Provider/AISEARCH/ISou.py +1 -1
- webscout/Provider/AISEARCH/felo_search.py +0 -1
- webscout/Provider/AllenAI.py +24 -9
- webscout/Provider/C4ai.py +29 -11
- webscout/Provider/ChatGPTGratis.py +24 -56
- webscout/Provider/DeepSeek.py +25 -17
- webscout/Provider/Deepinfra.py +115 -48
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/Glider.py +25 -8
- webscout/Provider/HF_space/qwen_qwen2.py +2 -2
- webscout/Provider/HeckAI.py +23 -7
- webscout/Provider/Jadve.py +20 -5
- webscout/Provider/Netwrck.py +42 -19
- webscout/Provider/PI.py +4 -2
- webscout/Provider/Perplexitylabs.py +26 -6
- webscout/Provider/PizzaGPT.py +10 -51
- webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
- webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
- webscout/Provider/TTI/__init__.py +2 -3
- webscout/Provider/TTI/aiarta/async_aiarta.py +14 -14
- webscout/Provider/TTI/aiarta/sync_aiarta.py +52 -21
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
- webscout/Provider/TTS/__init__.py +2 -2
- webscout/Provider/TTS/deepgram.py +12 -39
- webscout/Provider/TTS/elevenlabs.py +14 -40
- webscout/Provider/TTS/gesserit.py +11 -35
- webscout/Provider/TTS/murfai.py +13 -39
- webscout/Provider/TTS/parler.py +17 -40
- webscout/Provider/TTS/speechma.py +180 -0
- webscout/Provider/TTS/streamElements.py +17 -44
- webscout/Provider/TextPollinationsAI.py +39 -59
- webscout/Provider/Venice.py +25 -8
- webscout/Provider/WiseCat.py +27 -5
- webscout/Provider/Youchat.py +64 -37
- webscout/Provider/__init__.py +0 -6
- webscout/Provider/akashgpt.py +20 -5
- webscout/Provider/flowith.py +20 -5
- webscout/Provider/freeaichat.py +32 -45
- webscout/Provider/koala.py +20 -5
- webscout/Provider/llamatutor.py +1 -1
- webscout/Provider/llmchat.py +30 -8
- webscout/Provider/multichat.py +65 -9
- webscout/Provider/talkai.py +1 -0
- webscout/Provider/turboseek.py +3 -0
- webscout/Provider/tutorai.py +2 -0
- webscout/Provider/typegpt.py +154 -64
- webscout/Provider/x0gpt.py +3 -1
- webscout/Provider/yep.py +102 -20
- webscout/__init__.py +3 -0
- webscout/cli.py +4 -40
- webscout/conversation.py +1 -10
- webscout/litagent/__init__.py +2 -2
- webscout/litagent/agent.py +351 -20
- webscout/litagent/constants.py +34 -5
- webscout/litprinter/__init__.py +0 -3
- webscout/models.py +181 -0
- webscout/optimizers.py +1 -1
- webscout/prompt_manager.py +2 -8
- webscout/scout/core/scout.py +1 -4
- webscout/scout/core/search_result.py +1 -1
- webscout/scout/core/text_utils.py +1 -1
- webscout/scout/core.py +2 -5
- webscout/scout/element.py +1 -1
- webscout/scout/parsers/html_parser.py +1 -1
- webscout/scout/utils.py +0 -1
- webscout/swiftcli/__init__.py +1 -3
- webscout/tempid.py +1 -1
- webscout/update_checker.py +1 -3
- webscout/version.py +1 -1
- webscout/webscout_search_async.py +1 -2
- webscout/yep_search.py +297 -297
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/LICENSE.md +4 -4
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/METADATA +101 -390
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/RECORD +104 -110
- webscout/Extra/autollama.py +0 -231
- webscout/Provider/Amigo.py +0 -274
- webscout/Provider/Bing.py +0 -243
- webscout/Provider/DiscordRocks.py +0 -253
- webscout/Provider/TTI/blackbox/__init__.py +0 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
- webscout/Provider/TTI/deepinfra/__init__.py +0 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
- webscout/Provider/TTI/imgninza/__init__.py +0 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
- webscout/Provider/TTS/voicepod.py +0 -117
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
- {webscout-7.5.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,247 @@
|
|
|
1
|
+
"""FastFluxImager Synchronous Provider - Generate stunning AI art with FastFlux! 🎨
|
|
2
|
+
|
|
3
|
+
Examples:
|
|
4
|
+
>>> from webscout import FastFluxImager
|
|
5
|
+
>>> provider = FastFluxImager()
|
|
6
|
+
>>>
|
|
7
|
+
>>> # Generate a single image
|
|
8
|
+
>>> images = provider.generate("A cool cyberpunk city at night")
|
|
9
|
+
>>> paths = provider.save(images, dir="my_images")
|
|
10
|
+
>>>
|
|
11
|
+
>>> # Generate multiple images with different settings
|
|
12
|
+
>>> images = provider.generate(
|
|
13
|
+
... prompt="Epic dragon breathing fire",
|
|
14
|
+
... amount=2,
|
|
15
|
+
... model="flux_1_schnell"
|
|
16
|
+
... )
|
|
17
|
+
>>> provider.save(images, dir="dragon_pics")
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import requests
|
|
21
|
+
import base64
|
|
22
|
+
import json
|
|
23
|
+
import os
|
|
24
|
+
import time
|
|
25
|
+
from typing import List, Optional, Union
|
|
26
|
+
from requests.exceptions import RequestException
|
|
27
|
+
from pathlib import Path
|
|
28
|
+
|
|
29
|
+
from webscout.AIbase import ImageProvider
|
|
30
|
+
from webscout.litagent import LitAgent
|
|
31
|
+
|
|
32
|
+
# Get a fresh user agent! 🔄
|
|
33
|
+
agent = LitAgent()
|
|
34
|
+
|
|
35
|
+
class FastFluxImager(ImageProvider):
|
|
36
|
+
"""Your go-to provider for generating fire images with FastFlux! 🎨
|
|
37
|
+
|
|
38
|
+
Examples:
|
|
39
|
+
>>> provider = FastFluxImager()
|
|
40
|
+
>>> # Generate one image with default model
|
|
41
|
+
>>> image = provider.generate("A futuristic city")
|
|
42
|
+
>>> provider.save(image, "city.png")
|
|
43
|
+
>>>
|
|
44
|
+
>>> # Generate multiple images with specific model
|
|
45
|
+
>>> images = provider.generate(
|
|
46
|
+
... prompt="Space station",
|
|
47
|
+
... amount=3,
|
|
48
|
+
... model="flux_1_dev"
|
|
49
|
+
... )
|
|
50
|
+
>>> provider.save(images, dir="space_pics")
|
|
51
|
+
"""
|
|
52
|
+
|
|
53
|
+
AVAILABLE_MODELS = [
|
|
54
|
+
"flux_1_schnell", # Fast generation model (default)
|
|
55
|
+
"flux_1_dev", # Developer model
|
|
56
|
+
"sana_1_6b" # SANA 1.6B model
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def __init__(self, timeout: int = 60, proxies: dict = None):
|
|
61
|
+
"""Initialize your FastFluxImager provider with custom settings
|
|
62
|
+
|
|
63
|
+
Examples:
|
|
64
|
+
>>> provider = FastFluxImager(timeout=120)
|
|
65
|
+
>>> provider = FastFluxImager(proxies={"http": "http://proxy:8080"})
|
|
66
|
+
|
|
67
|
+
Args:
|
|
68
|
+
timeout (int): HTTP request timeout in seconds (default: 60)
|
|
69
|
+
proxies (dict, optional): Proxy configuration for requests
|
|
70
|
+
logging (bool): Enable/disable logging (default: True)
|
|
71
|
+
"""
|
|
72
|
+
self.api_endpoint = "https://api.fastflux.co/v1/images/generate"
|
|
73
|
+
self.headers = {
|
|
74
|
+
"accept": "application/json, text/plain, */*",
|
|
75
|
+
"content-type": "application/json",
|
|
76
|
+
"origin": "https://fastflux.co",
|
|
77
|
+
"referer": "https://fastflux.co/",
|
|
78
|
+
"user-agent": agent.random()
|
|
79
|
+
}
|
|
80
|
+
self.session = requests.Session()
|
|
81
|
+
self.session.headers.update(self.headers)
|
|
82
|
+
if proxies:
|
|
83
|
+
self.session.proxies.update(proxies)
|
|
84
|
+
self.timeout = timeout
|
|
85
|
+
self.prompt: str = "AI-generated image - webscout"
|
|
86
|
+
self.image_extension: str = "png"
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def generate(
|
|
90
|
+
self,
|
|
91
|
+
prompt: str,
|
|
92
|
+
amount: int = 1,
|
|
93
|
+
model: str = "flux_1_schnell",
|
|
94
|
+
size: str = "1_1",
|
|
95
|
+
is_public: bool = False,
|
|
96
|
+
max_retries: int = 3,
|
|
97
|
+
retry_delay: int = 5
|
|
98
|
+
) -> List[bytes]:
|
|
99
|
+
"""Generate some fire images from your prompt! 🎨
|
|
100
|
+
|
|
101
|
+
Examples:
|
|
102
|
+
>>> provider = FastFluxImager()
|
|
103
|
+
>>> # Basic usage
|
|
104
|
+
>>> images = provider.generate("Cool art")
|
|
105
|
+
>>> # Advanced usage
|
|
106
|
+
>>> images = provider.generate(
|
|
107
|
+
... prompt="Epic dragon",
|
|
108
|
+
... amount=2,
|
|
109
|
+
... model="flux_1_dev",
|
|
110
|
+
... size="16_9"
|
|
111
|
+
... )
|
|
112
|
+
|
|
113
|
+
Args:
|
|
114
|
+
prompt (str): Your image description
|
|
115
|
+
amount (int): How many images you want (default: 1)
|
|
116
|
+
model (str): Model to use - check AVAILABLE_MODELS (default: "flux_1_schnell")
|
|
117
|
+
size (str): Image size ratio (default: "1_1")
|
|
118
|
+
is_public (bool): Whether to make the image public (default: False)
|
|
119
|
+
max_retries (int): Max retry attempts if something fails (default: 3)
|
|
120
|
+
retry_delay (int): Seconds to wait between retries (default: 5)
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
List[bytes]: Your generated images
|
|
124
|
+
|
|
125
|
+
Raises:
|
|
126
|
+
ValueError: If the inputs ain't valid
|
|
127
|
+
RequestException: If the API calls fail after retries
|
|
128
|
+
"""
|
|
129
|
+
if not prompt:
|
|
130
|
+
raise ValueError("Yo fam, the prompt can't be empty! 🤔")
|
|
131
|
+
if not isinstance(amount, int) or amount < 1:
|
|
132
|
+
raise ValueError("Amount needs to be a positive number! 📈")
|
|
133
|
+
if model not in self.AVAILABLE_MODELS:
|
|
134
|
+
raise ValueError(f"Model must be one of {self.AVAILABLE_MODELS}! 🎯")
|
|
135
|
+
if size not in self.AVAILABLE_SIZES:
|
|
136
|
+
raise ValueError(f"Size must be one of {self.AVAILABLE_SIZES}! 📏")
|
|
137
|
+
|
|
138
|
+
self.prompt = prompt
|
|
139
|
+
response = []
|
|
140
|
+
|
|
141
|
+
# Prepare payload
|
|
142
|
+
payload = {
|
|
143
|
+
"prompt": prompt,
|
|
144
|
+
"model": model,
|
|
145
|
+
"size": size,
|
|
146
|
+
"isPublic": is_public
|
|
147
|
+
}
|
|
148
|
+
|
|
149
|
+
for i in range(amount):
|
|
150
|
+
for attempt in range(max_retries):
|
|
151
|
+
try:
|
|
152
|
+
if self.logging:
|
|
153
|
+
print(f"Generating image {i+1}/{amount}... 🎨")
|
|
154
|
+
|
|
155
|
+
resp = self.session.post(
|
|
156
|
+
self.api_endpoint,
|
|
157
|
+
json=payload,
|
|
158
|
+
timeout=self.timeout
|
|
159
|
+
)
|
|
160
|
+
resp.raise_for_status()
|
|
161
|
+
result = resp.json()
|
|
162
|
+
|
|
163
|
+
if result and 'result' in result:
|
|
164
|
+
# Get base64 data and remove header
|
|
165
|
+
image_data = result['result']
|
|
166
|
+
base64_data = image_data.split(',')[1]
|
|
167
|
+
|
|
168
|
+
# Decode base64 data
|
|
169
|
+
image_bytes = base64.b64decode(base64_data)
|
|
170
|
+
response.append(image_bytes)
|
|
171
|
+
|
|
172
|
+
break
|
|
173
|
+
else:
|
|
174
|
+
raise RequestException("Invalid response format")
|
|
175
|
+
|
|
176
|
+
except RequestException as e:
|
|
177
|
+
if attempt == max_retries - 1:
|
|
178
|
+
raise RequestException(f"Failed to generate image after {max_retries} attempts: {e}")
|
|
179
|
+
|
|
180
|
+
time.sleep(retry_delay)
|
|
181
|
+
|
|
182
|
+
return response
|
|
183
|
+
|
|
184
|
+
def save(
|
|
185
|
+
self,
|
|
186
|
+
response: List[bytes],
|
|
187
|
+
name: Optional[str] = None,
|
|
188
|
+
dir: Optional[Union[str, Path]] = None,
|
|
189
|
+
filenames_prefix: str = "",
|
|
190
|
+
) -> List[str]:
|
|
191
|
+
"""Save your fire generated images! 💾
|
|
192
|
+
|
|
193
|
+
Examples:
|
|
194
|
+
>>> provider = FastFluxImager()
|
|
195
|
+
>>> images = provider.generate("Cool art")
|
|
196
|
+
>>> # Save with default settings
|
|
197
|
+
>>> paths = provider.save(images)
|
|
198
|
+
>>> # Save with custom name and directory
|
|
199
|
+
>>> paths = provider.save(
|
|
200
|
+
... images,
|
|
201
|
+
... name="my_art",
|
|
202
|
+
... dir="my_images",
|
|
203
|
+
... filenames_prefix="test_"
|
|
204
|
+
... )
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
response (List[bytes]): Your generated images
|
|
208
|
+
name (Optional[str]): Custom name for your images
|
|
209
|
+
dir (Optional[Union[str, Path]]): Where to save the images (default: current directory)
|
|
210
|
+
filenames_prefix (str): Prefix for your image files
|
|
211
|
+
|
|
212
|
+
Returns:
|
|
213
|
+
List[str]: Paths to your saved images
|
|
214
|
+
"""
|
|
215
|
+
save_dir = dir if dir else os.getcwd()
|
|
216
|
+
if not os.path.exists(save_dir):
|
|
217
|
+
os.makedirs(save_dir)
|
|
218
|
+
|
|
219
|
+
name = self.prompt if name is None else name
|
|
220
|
+
|
|
221
|
+
# Clean up name for filename use
|
|
222
|
+
safe_name = "".join(c if c.isalnum() or c in "_-" else "_" for c in name)
|
|
223
|
+
safe_name = safe_name[:50] # Truncate if too long
|
|
224
|
+
|
|
225
|
+
filenames = []
|
|
226
|
+
|
|
227
|
+
for i, image in enumerate(response):
|
|
228
|
+
filename = f"{filenames_prefix}{safe_name}_{i}.{self.image_extension}"
|
|
229
|
+
filepath = os.path.join(save_dir, filename)
|
|
230
|
+
|
|
231
|
+
with open(filepath, "wb") as f:
|
|
232
|
+
f.write(image)
|
|
233
|
+
|
|
234
|
+
filenames.append(filename)
|
|
235
|
+
|
|
236
|
+
return filenames
|
|
237
|
+
|
|
238
|
+
|
|
239
|
+
if __name__ == "__main__":
|
|
240
|
+
# Example usage
|
|
241
|
+
provider = FastFluxImager()
|
|
242
|
+
try:
|
|
243
|
+
images = provider.generate("A cyberpunk city at night with neon lights", amount=1)
|
|
244
|
+
paths = provider.save(images, dir="generated_images")
|
|
245
|
+
print(f"Successfully saved images to: {paths}")
|
|
246
|
+
except Exception as e:
|
|
247
|
+
print(f"Oops, something went wrong: {e}")
|
|
@@ -2,12 +2,11 @@ import time
|
|
|
2
2
|
import requests
|
|
3
3
|
import pathlib
|
|
4
4
|
import base64
|
|
5
|
+
import tempfile
|
|
5
6
|
from io import BytesIO
|
|
6
|
-
from playsound import playsound
|
|
7
7
|
from webscout import exceptions
|
|
8
8
|
from webscout.AIbase import TTSProvider
|
|
9
9
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
10
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
11
10
|
from webscout.litagent import LitAgent
|
|
12
11
|
from . import utils
|
|
13
12
|
|
|
@@ -19,7 +18,6 @@ class DeepgramTTS(TTSProvider):
|
|
|
19
18
|
headers: dict[str, str] = {
|
|
20
19
|
"User-Agent": LitAgent().random()
|
|
21
20
|
}
|
|
22
|
-
cache_dir = pathlib.Path("./audio_cache")
|
|
23
21
|
all_voices: dict[str, str] = {
|
|
24
22
|
"Asteria": "aura-asteria-en", "Arcas": "aura-arcas-en", "Luna": "aura-luna-en",
|
|
25
23
|
"Zeus": "aura-zeus-en", "Orpheus": "aura-orpheus-en", "Angus": "aura-angus-en",
|
|
@@ -34,11 +32,7 @@ class DeepgramTTS(TTSProvider):
|
|
|
34
32
|
if proxies:
|
|
35
33
|
self.session.proxies.update(proxies)
|
|
36
34
|
self.timeout = timeout
|
|
37
|
-
self.
|
|
38
|
-
name="DeepgramTTS",
|
|
39
|
-
format=LogFormat.MODERN_EMOJI,
|
|
40
|
-
|
|
41
|
-
)
|
|
35
|
+
self.temp_dir = tempfile.mkdtemp(prefix="webscout_tts_")
|
|
42
36
|
|
|
43
37
|
def tts(self, text: str, voice: str = "Brian", verbose: bool = True) -> str:
|
|
44
38
|
"""
|
|
@@ -62,13 +56,13 @@ class DeepgramTTS(TTSProvider):
|
|
|
62
56
|
), f"Voice '{voice}' not one of [{', '.join(self.all_voices.keys())}]"
|
|
63
57
|
|
|
64
58
|
url = "https://deepgram.com/api/ttsAudioGeneration"
|
|
65
|
-
filename =
|
|
59
|
+
filename = pathlib.Path(tempfile.mktemp(suffix=".mp3", dir=self.temp_dir))
|
|
66
60
|
|
|
67
61
|
# Split text into sentences using the utils module
|
|
68
62
|
sentences = utils.split_sentences(text)
|
|
69
63
|
if verbose:
|
|
70
64
|
for index, sen in enumerate(sentences):
|
|
71
|
-
|
|
65
|
+
print(f"[debug] Sentence {index}: {sen}")
|
|
72
66
|
|
|
73
67
|
def generate_audio_for_chunk(part_text: str, part_number: int):
|
|
74
68
|
"""
|
|
@@ -103,15 +97,15 @@ class DeepgramTTS(TTSProvider):
|
|
|
103
97
|
if response_data:
|
|
104
98
|
audio_data = base64.b64decode(response_data)
|
|
105
99
|
if verbose:
|
|
106
|
-
|
|
100
|
+
print(f"[debug] Chunk {part_number} processed successfully")
|
|
107
101
|
return part_number, audio_data
|
|
108
102
|
|
|
109
103
|
if verbose:
|
|
110
|
-
|
|
104
|
+
print(f"[debug] No data received for chunk {part_number}. Attempt {retry_count + 1}/{max_retries}")
|
|
111
105
|
|
|
112
106
|
except requests.RequestException as e:
|
|
113
107
|
if verbose:
|
|
114
|
-
|
|
108
|
+
print(f"[debug] Error processing chunk {part_number}: {str(e)}. Attempt {retry_count + 1}/{max_retries}")
|
|
115
109
|
if retry_count == max_retries - 1:
|
|
116
110
|
raise
|
|
117
111
|
|
|
@@ -121,9 +115,6 @@ class DeepgramTTS(TTSProvider):
|
|
|
121
115
|
raise RuntimeError(f"Failed to generate audio for chunk {part_number} after {max_retries} attempts")
|
|
122
116
|
|
|
123
117
|
try:
|
|
124
|
-
# Create the audio_cache directory if it doesn't exist
|
|
125
|
-
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
|
126
|
-
|
|
127
118
|
# Using ThreadPoolExecutor to handle requests concurrently
|
|
128
119
|
with ThreadPoolExecutor() as executor:
|
|
129
120
|
futures = {
|
|
@@ -148,36 +139,18 @@ class DeepgramTTS(TTSProvider):
|
|
|
148
139
|
f.write(audio_chunks[chunk_num])
|
|
149
140
|
|
|
150
141
|
if verbose:
|
|
151
|
-
|
|
142
|
+
print(f"[debug] Audio saved to {filename}")
|
|
152
143
|
return str(filename)
|
|
153
144
|
|
|
154
145
|
except Exception as e:
|
|
155
|
-
|
|
146
|
+
print(f"[debug] Failed to generate audio: {str(e)}") if verbose else None
|
|
156
147
|
raise RuntimeError(f"Failed to generate audio: {str(e)}")
|
|
157
148
|
|
|
158
|
-
def play_audio(self, filename: str):
|
|
159
|
-
"""
|
|
160
|
-
Plays an audio file using playsound.
|
|
161
|
-
|
|
162
|
-
Args:
|
|
163
|
-
filename (str): The path to the audio file.
|
|
164
|
-
|
|
165
|
-
Raises:
|
|
166
|
-
RuntimeError: If there is an error playing the audio.
|
|
167
|
-
"""
|
|
168
|
-
try:
|
|
169
|
-
playsound(filename)
|
|
170
|
-
except Exception as e:
|
|
171
|
-
self.logger.error(f"Failed to play audio: {str(e)} 🚨")
|
|
172
|
-
raise RuntimeError(f"Failed to play audio: {str(e)}")
|
|
173
|
-
|
|
174
149
|
# Example usage
|
|
175
150
|
if __name__ == "__main__":
|
|
176
151
|
deepgram = DeepgramTTS()
|
|
177
152
|
text = "This is a test of the DeepgramTTS text-to-speech API. It supports multiple sentences. Let's see how it works!"
|
|
178
153
|
|
|
179
|
-
|
|
180
|
-
audio_file = deepgram.tts(text, voice="
|
|
181
|
-
|
|
182
|
-
deepgram.logger.info("Playing audio...")
|
|
183
|
-
deepgram.play_audio(audio_file)
|
|
154
|
+
print("[debug] Generating audio...")
|
|
155
|
+
audio_file = deepgram.tts(text, voice="Asteria")
|
|
156
|
+
print(f"Audio saved to: {audio_file}")
|
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
import time
|
|
2
2
|
import requests
|
|
3
3
|
import pathlib
|
|
4
|
+
import tempfile
|
|
4
5
|
from io import BytesIO
|
|
5
|
-
from playsound import playsound
|
|
6
6
|
from webscout import exceptions
|
|
7
7
|
from webscout.AIbase import TTSProvider
|
|
8
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
9
8
|
from webscout.litagent import LitAgent
|
|
10
9
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
11
10
|
from . import utils
|
|
@@ -18,7 +17,6 @@ class ElevenlabsTTS(TTSProvider):
|
|
|
18
17
|
headers: dict[str, str] = {
|
|
19
18
|
"User-Agent": LitAgent().random()
|
|
20
19
|
}
|
|
21
|
-
cache_dir = pathlib.Path("./audio_cache")
|
|
22
20
|
all_voices: dict[str, str] = {"Brian": "nPczCjzI2devNBz1zQrb", "Alice":"Xb7hH8MSUJpSbSDYk0k2", "Bill":"pqHfZKP75CvOlQylNhV4", "Callum":"N2lVS1w4EtoT3dr4eOWO", "Charlie":"IKne3meq5aSn9XLyUdCD", "Charlotte":"XB0fDUnXU5powFXDhCwa", "Chris":"iP95p4xoKVk53GoZ742B", "Daniel":"onwK4e9ZLuTAKqWW03F9", "Eric":"cjVigY5qzO86Huf0OWal", "George":"JBFqnCBsd6RMkjVDRZzb", "Jessica":"cgSgspJ2msm6clMCkdW9", "Laura":"FGY2WhTYpPnrIDTdsKH5", "Liam":"TX3LPaxmHKxFdv7VOQHJ", "Lily":"pFZP5JQG7iQjIQuC4Bku", "Matilda":"XrExE9yKIg1WjnnlVkGX", "Sarah":"EXAVITQu4vr4xnSDxMaL", "Will":"bIHbv24MWmeRgasZH58o"}
|
|
23
21
|
|
|
24
22
|
def __init__(self, timeout: int = 20, proxies: dict = None):
|
|
@@ -29,11 +27,7 @@ class ElevenlabsTTS(TTSProvider):
|
|
|
29
27
|
self.session.proxies.update(proxies)
|
|
30
28
|
self.timeout = timeout
|
|
31
29
|
self.params = {'allow_unauthenticated': '1'}
|
|
32
|
-
self.
|
|
33
|
-
name="ElevenlabsTTS",
|
|
34
|
-
format=LogFormat.MODERN_EMOJI,
|
|
35
|
-
|
|
36
|
-
)
|
|
30
|
+
self.temp_dir = tempfile.mkdtemp(prefix="webscout_tts_")
|
|
37
31
|
|
|
38
32
|
def tts(self, text: str, voice: str = "Brian", verbose:bool = True) -> str:
|
|
39
33
|
"""
|
|
@@ -43,7 +37,7 @@ class ElevenlabsTTS(TTSProvider):
|
|
|
43
37
|
voice in self.all_voices
|
|
44
38
|
), f"Voice '{voice}' not one of [{', '.join(self.all_voices.keys())}]"
|
|
45
39
|
|
|
46
|
-
filename =
|
|
40
|
+
filename = pathlib.Path(tempfile.mktemp(suffix=".mp3", dir=self.temp_dir))
|
|
47
41
|
|
|
48
42
|
# Split text into sentences
|
|
49
43
|
sentences = utils.split_sentences(text)
|
|
@@ -56,20 +50,17 @@ class ElevenlabsTTS(TTSProvider):
|
|
|
56
50
|
response = self.session.post(f'https://api.elevenlabs.io/v1/text-to-speech/{self.all_voices[voice]}',params=self.params, headers=self.headers, json=json_data, timeout=self.timeout)
|
|
57
51
|
response.raise_for_status()
|
|
58
52
|
|
|
59
|
-
# Create the audio_cache directory if it doesn't exist
|
|
60
|
-
self.cache_dir.mkdir(parents=True, exist_ok=True)
|
|
61
|
-
|
|
62
53
|
# Check if the request was successful
|
|
63
54
|
if response.ok and response.status_code == 200:
|
|
64
55
|
if verbose:
|
|
65
|
-
|
|
56
|
+
print(f"[debug] Chunk {part_number} processed successfully")
|
|
66
57
|
return part_number, response.content
|
|
67
58
|
else:
|
|
68
59
|
if verbose:
|
|
69
|
-
|
|
60
|
+
print(f"[debug] No data received for chunk {part_number}. Retrying...")
|
|
70
61
|
except requests.RequestException as e:
|
|
71
62
|
if verbose:
|
|
72
|
-
|
|
63
|
+
print(f"[debug] Error for chunk {part_number}: {e}. Retrying...")
|
|
73
64
|
time.sleep(1)
|
|
74
65
|
try:
|
|
75
66
|
# Using ThreadPoolExecutor to handle requests concurrently
|
|
@@ -87,51 +78,34 @@ class ElevenlabsTTS(TTSProvider):
|
|
|
87
78
|
audio_chunks[part_number] = audio_data # Store the audio data in correct sequence
|
|
88
79
|
except Exception as e:
|
|
89
80
|
if verbose:
|
|
90
|
-
|
|
81
|
+
print(f"[debug] Failed to generate audio for chunk {chunk_num}: {e}")
|
|
91
82
|
|
|
92
83
|
# Combine audio chunks in the correct sequence
|
|
93
84
|
combined_audio = BytesIO()
|
|
94
85
|
for part_number in sorted(audio_chunks.keys()):
|
|
95
86
|
combined_audio.write(audio_chunks[part_number])
|
|
96
87
|
if verbose:
|
|
97
|
-
|
|
88
|
+
print(f"[debug] Added chunk {part_number} to the combined file.")
|
|
98
89
|
|
|
99
90
|
# Save the combined audio data to a single file
|
|
100
91
|
with open(filename, 'wb') as f:
|
|
101
92
|
f.write(combined_audio.getvalue())
|
|
102
93
|
if verbose:
|
|
103
|
-
|
|
94
|
+
print(f"[debug] Final Audio Saved as {filename}")
|
|
104
95
|
return filename.as_posix()
|
|
105
96
|
|
|
106
97
|
except requests.exceptions.RequestException as e:
|
|
107
|
-
|
|
98
|
+
if verbose:
|
|
99
|
+
print(f"[debug] Failed to perform the operation: {e}")
|
|
108
100
|
raise exceptions.FailedToGenerateResponseError(
|
|
109
101
|
f"Failed to perform the operation: {e}"
|
|
110
102
|
)
|
|
111
|
-
|
|
112
|
-
def play_audio(self, filename: str):
|
|
113
|
-
"""
|
|
114
|
-
Plays an audio file using playsound.
|
|
115
|
-
|
|
116
|
-
Args:
|
|
117
|
-
filename (str): The path to the audio file.
|
|
118
|
-
|
|
119
|
-
Raises:
|
|
120
|
-
RuntimeError: If there is an error playing the audio.
|
|
121
|
-
"""
|
|
122
|
-
try:
|
|
123
|
-
playsound(filename)
|
|
124
|
-
except Exception as e:
|
|
125
|
-
self.logger.error(f"Error playing audio: {e} 🔇")
|
|
126
|
-
raise RuntimeError(f"Error playing audio: {e}")
|
|
127
103
|
|
|
128
104
|
# Example usage
|
|
129
105
|
if __name__ == "__main__":
|
|
130
106
|
elevenlabs = ElevenlabsTTS()
|
|
131
107
|
text = "This is a test of the ElevenlabsTTS text-to-speech API. It supports multiple sentences and advanced logging."
|
|
132
108
|
|
|
133
|
-
|
|
134
|
-
audio_file = elevenlabs.tts(text, voice="Brian")
|
|
135
|
-
|
|
136
|
-
elevenlabs.logger.info("Playing audio...")
|
|
137
|
-
elevenlabs.play_audio(audio_file)
|
|
109
|
+
print("[debug] Generating audio...")
|
|
110
|
+
audio_file = elevenlabs.tts(text, voice="Brian")
|
|
111
|
+
print(f"Audio saved to: {audio_file}")
|
|
@@ -3,10 +3,8 @@ import requests
|
|
|
3
3
|
import pathlib
|
|
4
4
|
import base64
|
|
5
5
|
from io import BytesIO
|
|
6
|
-
from playsound import playsound
|
|
7
6
|
from webscout import exceptions
|
|
8
7
|
from webscout.AIbase import TTSProvider
|
|
9
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
10
8
|
from webscout.litagent import LitAgent
|
|
11
9
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
|
12
10
|
from . import utils
|
|
@@ -37,11 +35,6 @@ class GesseritTTS(TTSProvider):
|
|
|
37
35
|
if proxies:
|
|
38
36
|
self.session.proxies.update(proxies)
|
|
39
37
|
self.timeout = timeout
|
|
40
|
-
self.logger = Logger(
|
|
41
|
-
name="GesseritTTS",
|
|
42
|
-
format=LogFormat.MODERN_EMOJI,
|
|
43
|
-
|
|
44
|
-
)
|
|
45
38
|
|
|
46
39
|
def tts(self, text: str, voice: str = "Oliver", verbose:bool = True) -> str:
|
|
47
40
|
"""Converts text to speech using the GesseritTTS API and saves it to a file."""
|
|
@@ -76,14 +69,14 @@ class GesseritTTS(TTSProvider):
|
|
|
76
69
|
audio_base64 = data["audioUrl"].split(",")[1]
|
|
77
70
|
audio_data = base64.b64decode(audio_base64)
|
|
78
71
|
if verbose:
|
|
79
|
-
|
|
72
|
+
print(f"[debug] Chunk {part_number} processed successfully")
|
|
80
73
|
return part_number, audio_data
|
|
81
74
|
else:
|
|
82
75
|
if verbose:
|
|
83
|
-
|
|
76
|
+
print(f"[debug] No data received for chunk {part_number}. Retrying...")
|
|
84
77
|
except requests.RequestException as e:
|
|
85
78
|
if verbose:
|
|
86
|
-
|
|
79
|
+
print(f"[debug] Error for chunk {part_number}: {e}. Retrying...")
|
|
87
80
|
time.sleep(1)
|
|
88
81
|
try:
|
|
89
82
|
# Using ThreadPoolExecutor to handle requests concurrently
|
|
@@ -101,51 +94,34 @@ class GesseritTTS(TTSProvider):
|
|
|
101
94
|
audio_chunks[part_number] = audio_data # Store the audio data in correct sequence
|
|
102
95
|
except Exception as e:
|
|
103
96
|
if verbose:
|
|
104
|
-
|
|
97
|
+
print(f"[debug] Failed to generate audio for chunk {chunk_num}: {e}")
|
|
105
98
|
|
|
106
99
|
# Combine audio chunks in the correct sequence
|
|
107
100
|
combined_audio = BytesIO()
|
|
108
101
|
for part_number in sorted(audio_chunks.keys()):
|
|
109
102
|
combined_audio.write(audio_chunks[part_number])
|
|
110
103
|
if verbose:
|
|
111
|
-
|
|
104
|
+
print(f"[debug] Added chunk {part_number} to the combined file.")
|
|
112
105
|
|
|
113
106
|
# Save the combined audio data to a single file
|
|
114
107
|
with open(filename, 'wb') as f:
|
|
115
108
|
f.write(combined_audio.getvalue())
|
|
116
109
|
if verbose:
|
|
117
|
-
|
|
110
|
+
print(f"[debug] Final Audio Saved as {filename}")
|
|
118
111
|
return filename.as_posix()
|
|
119
112
|
|
|
120
113
|
except requests.exceptions.RequestException as e:
|
|
121
|
-
|
|
114
|
+
if verbose:
|
|
115
|
+
print(f"[debug] Failed to perform the operation: {e}")
|
|
122
116
|
raise exceptions.FailedToGenerateResponseError(
|
|
123
117
|
f"Failed to perform the operation: {e}"
|
|
124
118
|
)
|
|
125
|
-
|
|
126
|
-
def play_audio(self, filename: str):
|
|
127
|
-
"""
|
|
128
|
-
Plays an audio file using playsound.
|
|
129
|
-
|
|
130
|
-
Args:
|
|
131
|
-
filename (str): The path to the audio file.
|
|
132
|
-
|
|
133
|
-
Raises:
|
|
134
|
-
RuntimeError: If there is an error playing the audio.
|
|
135
|
-
"""
|
|
136
|
-
try:
|
|
137
|
-
playsound(filename)
|
|
138
|
-
except Exception as e:
|
|
139
|
-
self.logger.error(f"Error playing audio: {e} 🔇")
|
|
140
|
-
raise RuntimeError(f"Error playing audio: {e}")
|
|
141
119
|
|
|
142
120
|
# Example usage
|
|
143
121
|
if __name__ == "__main__":
|
|
144
122
|
gesserit = GesseritTTS()
|
|
145
123
|
text = "This is a test of the GesseritTTS text-to-speech API. It supports multiple sentences and advanced logging."
|
|
146
124
|
|
|
147
|
-
|
|
148
|
-
audio_file = gesserit.tts(text, voice="Oliver")
|
|
149
|
-
|
|
150
|
-
gesserit.logger.info("Playing audio...")
|
|
151
|
-
gesserit.play_audio(audio_file)
|
|
125
|
+
print("[debug] Generating audio...")
|
|
126
|
+
audio_file = gesserit.tts(text, voice="Oliver")
|
|
127
|
+
print(f"Audio saved to: {audio_file}")
|