webscout 8.3.5__py3-none-any.whl → 8.3.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Bard.py +12 -6
- webscout/DWEBS.py +66 -57
- webscout/Provider/{UNFINISHED → AISEARCH}/PERPLEXED_search.py +34 -74
- webscout/Provider/AISEARCH/__init__.py +1 -1
- webscout/Provider/Deepinfra.py +6 -0
- webscout/Provider/Flowith.py +6 -1
- webscout/Provider/GithubChat.py +1 -0
- webscout/Provider/GptOss.py +207 -0
- webscout/Provider/Kimi.py +445 -0
- webscout/Provider/Netwrck.py +3 -6
- webscout/Provider/OPENAI/README.md +2 -1
- webscout/Provider/OPENAI/TogetherAI.py +50 -55
- webscout/Provider/OPENAI/__init__.py +4 -2
- webscout/Provider/OPENAI/copilot.py +20 -4
- webscout/Provider/OPENAI/deepinfra.py +6 -0
- webscout/Provider/OPENAI/e2b.py +60 -8
- webscout/Provider/OPENAI/flowith.py +4 -3
- webscout/Provider/OPENAI/generate_api_key.py +48 -0
- webscout/Provider/OPENAI/gptoss.py +288 -0
- webscout/Provider/OPENAI/kimi.py +469 -0
- webscout/Provider/OPENAI/netwrck.py +8 -12
- webscout/Provider/OPENAI/refact.py +274 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -6
- webscout/Provider/OPENAI/toolbaz.py +1 -0
- webscout/Provider/TTI/bing.py +14 -2
- webscout/Provider/TTI/together.py +10 -9
- webscout/Provider/TTS/README.md +0 -1
- webscout/Provider/TTS/__init__.py +0 -1
- webscout/Provider/TTS/base.py +479 -159
- webscout/Provider/TTS/deepgram.py +409 -156
- webscout/Provider/TTS/elevenlabs.py +425 -111
- webscout/Provider/TTS/freetts.py +317 -140
- webscout/Provider/TTS/gesserit.py +192 -128
- webscout/Provider/TTS/murfai.py +248 -113
- webscout/Provider/TTS/openai_fm.py +347 -129
- webscout/Provider/TTS/speechma.py +620 -586
- webscout/Provider/TextPollinationsAI.py +3 -6
- webscout/Provider/TogetherAI.py +50 -55
- webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
- webscout/Provider/__init__.py +2 -90
- webscout/Provider/cerebras.py +83 -33
- webscout/Provider/copilot.py +42 -23
- webscout/Provider/toolbaz.py +1 -0
- webscout/conversation.py +22 -20
- webscout/sanitize.py +14 -10
- webscout/scout/README.md +20 -23
- webscout/scout/core/crawler.py +125 -38
- webscout/scout/core/scout.py +26 -5
- webscout/version.py +1 -1
- webscout/webscout_search.py +13 -6
- webscout/webscout_search_async.py +10 -8
- webscout/yep_search.py +13 -5
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/METADATA +2 -1
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/RECORD +59 -56
- webscout/Provider/Glider.py +0 -225
- webscout/Provider/OPENAI/c4ai.py +0 -394
- webscout/Provider/OPENAI/glider.py +0 -330
- webscout/Provider/TTS/sthir.py +0 -94
- /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.5.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
webscout/Provider/TTS/freetts.py
CHANGED
|
@@ -1,140 +1,317 @@
|
|
|
1
|
-
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
from
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
"
|
|
25
|
-
"
|
|
26
|
-
"
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
""
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
print(
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
1
|
+
##################################################################################
|
|
2
|
+
## FreeTTS Provider ##
|
|
3
|
+
##################################################################################
|
|
4
|
+
import os
|
|
5
|
+
import requests
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from webscout.Provider.TTS.base import BaseTTSProvider
|
|
8
|
+
from webscout.litagent import LitAgent
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class FreeTTS(BaseTTSProvider):
|
|
12
|
+
"""
|
|
13
|
+
Text-to-speech provider using the FreeTTS API with OpenAI-compatible interface.
|
|
14
|
+
|
|
15
|
+
This provider follows the OpenAI TTS API structure with support for:
|
|
16
|
+
- Multiple TTS models (gpt-4o-mini-tts, tts-1, tts-1-hd)
|
|
17
|
+
- Dynamic voice loading based on language
|
|
18
|
+
- Voice instructions for controlling speech aspects
|
|
19
|
+
- Multiple output formats
|
|
20
|
+
- Streaming support
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
headers = {
|
|
24
|
+
"accept": "*/*",
|
|
25
|
+
"accept-language": "ru-RU,ru;q=0.8",
|
|
26
|
+
"cache-control": "no-cache",
|
|
27
|
+
"pragma": "no-cache",
|
|
28
|
+
"sec-ch-ua": '"Brave";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
|
|
29
|
+
"sec-ch-ua-mobile": "?0",
|
|
30
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
31
|
+
"sec-fetch-dest": "empty",
|
|
32
|
+
"sec-fetch-mode": "cors",
|
|
33
|
+
"sec-fetch-site": "same-origin",
|
|
34
|
+
"sec-gpc": "1",
|
|
35
|
+
"User-Agent": LitAgent().random()
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
# Override supported models for FreeTTS
|
|
39
|
+
SUPPORTED_MODELS = None
|
|
40
|
+
|
|
41
|
+
# Override supported voices (will be loaded dynamically)
|
|
42
|
+
SUPPORTED_VOICES = []
|
|
43
|
+
|
|
44
|
+
# Override supported formats
|
|
45
|
+
SUPPORTED_FORMATS = [
|
|
46
|
+
"mp3", # Default format for FreeTTS
|
|
47
|
+
"wav", # Alternative format
|
|
48
|
+
"aac" # Additional format support
|
|
49
|
+
]
|
|
50
|
+
|
|
51
|
+
def __init__(self, lang="ru-RU", timeout: int = 30, proxies: dict = None):
|
|
52
|
+
"""
|
|
53
|
+
Initialize the FreeTTS TTS client.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
lang (str): Language code for voice selection
|
|
57
|
+
timeout (int): Request timeout in seconds
|
|
58
|
+
proxies (dict): Proxy configuration
|
|
59
|
+
"""
|
|
60
|
+
super().__init__()
|
|
61
|
+
self.lang = lang
|
|
62
|
+
self.url = "https://freetts.ru/api/v1/tts"
|
|
63
|
+
self.select_url = "https://freetts.ru/api/v1/select"
|
|
64
|
+
self.audio_base_url = "https://freetts.ru"
|
|
65
|
+
self.session = requests.Session()
|
|
66
|
+
self.session.headers.update(self.headers)
|
|
67
|
+
if proxies:
|
|
68
|
+
self.session.proxies.update(proxies)
|
|
69
|
+
self.timeout = timeout
|
|
70
|
+
self.voices = {}
|
|
71
|
+
self.load_voices()
|
|
72
|
+
# Set default voice to first available
|
|
73
|
+
self.default_voice = next(iter(self.voices.keys())) if self.voices else "ru-RU001"
|
|
74
|
+
|
|
75
|
+
def load_voices(self):
|
|
76
|
+
"""Load voice data and format it appropriately"""
|
|
77
|
+
try:
|
|
78
|
+
response = self.session.get(self.select_url, timeout=self.timeout)
|
|
79
|
+
if response.status_code == 200:
|
|
80
|
+
data = response.json()
|
|
81
|
+
voices_data = data["data"]["voice"]
|
|
82
|
+
|
|
83
|
+
if isinstance(voices_data, list):
|
|
84
|
+
for voice_info in voices_data:
|
|
85
|
+
if isinstance(voice_info, dict):
|
|
86
|
+
voice_id = voice_info.get("code")
|
|
87
|
+
voice_name = voice_info.get("name", voice_id)
|
|
88
|
+
if voice_id and voice_id.startswith(self.lang):
|
|
89
|
+
self.voices[voice_id] = voice_name
|
|
90
|
+
# Add to supported voices list
|
|
91
|
+
if voice_id not in self.SUPPORTED_VOICES:
|
|
92
|
+
self.SUPPORTED_VOICES.append(voice_id)
|
|
93
|
+
print("Voices loaded successfully")
|
|
94
|
+
else:
|
|
95
|
+
print(f"HTTP Error: {response.status_code}")
|
|
96
|
+
except Exception as e:
|
|
97
|
+
print(f"Error loading voices: {e}")
|
|
98
|
+
|
|
99
|
+
def get_available_voices(self):
|
|
100
|
+
"""Return all available voices in string format"""
|
|
101
|
+
if not self.voices:
|
|
102
|
+
return "No voices available"
|
|
103
|
+
voices_list = [f"{voice_id}: {name}" for voice_id, name in self.voices.items()]
|
|
104
|
+
return "\n".join(voices_list)
|
|
105
|
+
|
|
106
|
+
def validate_voice(self, voice: str) -> str:
|
|
107
|
+
"""
|
|
108
|
+
Validate and return the voice ID.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
voice (str): Voice ID to validate
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
str: Validated voice ID
|
|
115
|
+
|
|
116
|
+
Raises:
|
|
117
|
+
ValueError: If voice is not supported
|
|
118
|
+
"""
|
|
119
|
+
if voice not in self.voices:
|
|
120
|
+
raise ValueError(f"Voice '{voice}' not supported. Available voices: {', '.join(self.voices.keys())}")
|
|
121
|
+
return voice
|
|
122
|
+
|
|
123
|
+
def tts(
|
|
124
|
+
self,
|
|
125
|
+
text: str,
|
|
126
|
+
model: str = "gpt-4o-mini-tts",
|
|
127
|
+
voice: str = None,
|
|
128
|
+
response_format: str = "mp3",
|
|
129
|
+
instructions: str = None,
|
|
130
|
+
verbose: bool = True
|
|
131
|
+
) -> str:
|
|
132
|
+
"""
|
|
133
|
+
Convert text to speech using FreeTTS API with OpenAI-compatible parameters.
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
text (str): The text to convert to speech
|
|
137
|
+
model (str): The TTS model to use (gpt-4o-mini-tts, tts-1, tts-1-hd)
|
|
138
|
+
voice (str): Voice ID to use for TTS (default: first available)
|
|
139
|
+
response_format (str): Audio format (mp3, wav, aac)
|
|
140
|
+
instructions (str): Voice instructions (not used by FreeTTS but kept for compatibility)
|
|
141
|
+
verbose (bool): Whether to print debug information
|
|
142
|
+
|
|
143
|
+
Returns:
|
|
144
|
+
str: Path to the generated audio file
|
|
145
|
+
|
|
146
|
+
Raises:
|
|
147
|
+
ValueError: If input parameters are invalid
|
|
148
|
+
RuntimeError: If there is an error generating or saving the audio
|
|
149
|
+
"""
|
|
150
|
+
# Validate input parameters
|
|
151
|
+
if not text or not isinstance(text, str):
|
|
152
|
+
raise ValueError("Input text must be a non-empty string")
|
|
153
|
+
if len(text) > 10000:
|
|
154
|
+
raise ValueError("Input text exceeds maximum allowed length of 10,000 characters")
|
|
155
|
+
|
|
156
|
+
# Validate model and format using base class methods
|
|
157
|
+
model = self.validate_model(model)
|
|
158
|
+
response_format = self.validate_format(response_format)
|
|
159
|
+
|
|
160
|
+
# Use default voice if not provided
|
|
161
|
+
if voice is None:
|
|
162
|
+
voice = self.default_voice
|
|
163
|
+
|
|
164
|
+
# Validate voice
|
|
165
|
+
voice = self.validate_voice(voice)
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
if not self.voices:
|
|
169
|
+
raise RuntimeError(f"No voices available for language '{self.lang}'")
|
|
170
|
+
|
|
171
|
+
available_voices = self.get_available_voices()
|
|
172
|
+
if not available_voices:
|
|
173
|
+
if verbose:
|
|
174
|
+
print(f"No available voices for language '{self.lang}'")
|
|
175
|
+
return ""
|
|
176
|
+
|
|
177
|
+
payload = {
|
|
178
|
+
"text": text,
|
|
179
|
+
"voiceid": voice,
|
|
180
|
+
"model": model,
|
|
181
|
+
"format": response_format
|
|
182
|
+
}
|
|
183
|
+
|
|
184
|
+
response = self.session.post(self.url, json=payload, headers=self.headers, timeout=self.timeout)
|
|
185
|
+
|
|
186
|
+
if response.status_code == 200:
|
|
187
|
+
data = response.json()
|
|
188
|
+
mp3_path = data.get("data", {}).get("src", "")
|
|
189
|
+
|
|
190
|
+
if not mp3_path:
|
|
191
|
+
raise RuntimeError("Audio file path not found in response")
|
|
192
|
+
|
|
193
|
+
mp3_url = self.audio_base_url + mp3_path
|
|
194
|
+
|
|
195
|
+
# Create filename with appropriate extension
|
|
196
|
+
file_extension = f".{response_format}" if response_format != "pcm" else ".wav"
|
|
197
|
+
filename = datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + file_extension
|
|
198
|
+
full_path = os.path.abspath(filename)
|
|
199
|
+
|
|
200
|
+
with requests.get(mp3_url, stream=True, timeout=self.timeout) as r:
|
|
201
|
+
r.raise_for_status()
|
|
202
|
+
with open(filename, "wb") as f:
|
|
203
|
+
for chunk in r.iter_content(chunk_size=1024):
|
|
204
|
+
f.write(chunk)
|
|
205
|
+
|
|
206
|
+
if verbose:
|
|
207
|
+
print(f"[debug] Speech generated successfully")
|
|
208
|
+
print(f"[debug] Model: {model}")
|
|
209
|
+
print(f"[debug] Voice: {voice}")
|
|
210
|
+
print(f"[debug] Format: {response_format}")
|
|
211
|
+
print(f"[debug] Audio saved to: {filename}")
|
|
212
|
+
|
|
213
|
+
return full_path
|
|
214
|
+
else:
|
|
215
|
+
raise RuntimeError(f"API request failed with status code: {response.status_code}")
|
|
216
|
+
|
|
217
|
+
except Exception as e:
|
|
218
|
+
if verbose:
|
|
219
|
+
print(f"[debug] Error generating speech: {e}")
|
|
220
|
+
raise RuntimeError(f"Failed to generate speech: {str(e)}")
|
|
221
|
+
|
|
222
|
+
def create_speech(
|
|
223
|
+
self,
|
|
224
|
+
input: str,
|
|
225
|
+
model: str = "gpt-4o-mini-tts",
|
|
226
|
+
voice: str = None,
|
|
227
|
+
response_format: str = "mp3",
|
|
228
|
+
instructions: str = None,
|
|
229
|
+
verbose: bool = False
|
|
230
|
+
) -> str:
|
|
231
|
+
"""
|
|
232
|
+
OpenAI-compatible speech creation interface.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
input (str): The text to convert to speech
|
|
236
|
+
model (str): The TTS model to use
|
|
237
|
+
voice (str): The voice to use
|
|
238
|
+
response_format (str): Audio format
|
|
239
|
+
instructions (str): Voice instructions (not used by FreeTTS)
|
|
240
|
+
verbose (bool): Whether to print debug information
|
|
241
|
+
|
|
242
|
+
Returns:
|
|
243
|
+
str: Path to the generated audio file
|
|
244
|
+
"""
|
|
245
|
+
return self.tts(
|
|
246
|
+
text=input,
|
|
247
|
+
model=model,
|
|
248
|
+
voice=voice,
|
|
249
|
+
response_format=response_format,
|
|
250
|
+
instructions=instructions,
|
|
251
|
+
verbose=verbose
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
def stream_audio(
|
|
255
|
+
self,
|
|
256
|
+
input: str,
|
|
257
|
+
model: str = "gpt-4o-mini-tts",
|
|
258
|
+
voice: str = None,
|
|
259
|
+
response_format: str = "mp3",
|
|
260
|
+
instructions: str = None,
|
|
261
|
+
chunk_size: int = 1024,
|
|
262
|
+
verbose: bool = False
|
|
263
|
+
):
|
|
264
|
+
"""
|
|
265
|
+
Stream audio response in chunks.
|
|
266
|
+
|
|
267
|
+
Args:
|
|
268
|
+
input (str): The text to convert to speech
|
|
269
|
+
model (str): The TTS model to use
|
|
270
|
+
voice (str): The voice to use
|
|
271
|
+
response_format (str): Audio format
|
|
272
|
+
instructions (str): Voice instructions
|
|
273
|
+
chunk_size (int): Size of audio chunks to yield
|
|
274
|
+
verbose (bool): Whether to print debug information
|
|
275
|
+
|
|
276
|
+
Yields:
|
|
277
|
+
bytes: Audio data chunks
|
|
278
|
+
"""
|
|
279
|
+
# Generate the audio file using create_speech
|
|
280
|
+
audio_file = self.create_speech(
|
|
281
|
+
input=input,
|
|
282
|
+
model=model,
|
|
283
|
+
voice=voice,
|
|
284
|
+
response_format=response_format,
|
|
285
|
+
instructions=instructions,
|
|
286
|
+
verbose=verbose
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
# Stream the file in chunks
|
|
290
|
+
with open(audio_file, 'rb') as f:
|
|
291
|
+
while chunk := f.read(chunk_size):
|
|
292
|
+
yield chunk
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
# Example usage
|
|
296
|
+
if __name__ == "__main__":
|
|
297
|
+
# Initialize the FreeTTS client
|
|
298
|
+
tts_client = FreeTTS(lang="ru-RU")
|
|
299
|
+
|
|
300
|
+
# Print available voices
|
|
301
|
+
print("Available voices:")
|
|
302
|
+
print(tts_client.get_available_voices())
|
|
303
|
+
|
|
304
|
+
# Convert text to speech
|
|
305
|
+
try:
|
|
306
|
+
audio_file = tts_client.create_speech(
|
|
307
|
+
input="Привет, как дела?",
|
|
308
|
+
model="gpt-4o-mini-tts",
|
|
309
|
+
voice="ru-RU001",
|
|
310
|
+
response_format="mp3",
|
|
311
|
+
verbose=True
|
|
312
|
+
)
|
|
313
|
+
print(f"Audio saved to: {audio_file}")
|
|
314
|
+
except Exception as e:
|
|
315
|
+
print(f"Error: {e}")
|
|
316
|
+
except Exception as e:
|
|
317
|
+
print(f"Error: {e}")
|