webscout 8.3.4__py3-none-any.whl → 8.3.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (98) hide show
  1. webscout/AIutel.py +52 -1016
  2. webscout/Bard.py +12 -6
  3. webscout/DWEBS.py +66 -57
  4. webscout/Provider/AISEARCH/PERPLEXED_search.py +214 -0
  5. webscout/Provider/AISEARCH/__init__.py +11 -10
  6. webscout/Provider/AISEARCH/felo_search.py +7 -3
  7. webscout/Provider/AISEARCH/scira_search.py +2 -0
  8. webscout/Provider/AISEARCH/stellar_search.py +53 -8
  9. webscout/Provider/Deepinfra.py +13 -1
  10. webscout/Provider/Flowith.py +6 -1
  11. webscout/Provider/GithubChat.py +1 -0
  12. webscout/Provider/GptOss.py +207 -0
  13. webscout/Provider/Kimi.py +445 -0
  14. webscout/Provider/Netwrck.py +3 -6
  15. webscout/Provider/OPENAI/README.md +2 -1
  16. webscout/Provider/OPENAI/TogetherAI.py +12 -8
  17. webscout/Provider/OPENAI/TwoAI.py +94 -1
  18. webscout/Provider/OPENAI/__init__.py +4 -4
  19. webscout/Provider/OPENAI/copilot.py +20 -4
  20. webscout/Provider/OPENAI/deepinfra.py +12 -0
  21. webscout/Provider/OPENAI/e2b.py +60 -8
  22. webscout/Provider/OPENAI/flowith.py +4 -3
  23. webscout/Provider/OPENAI/generate_api_key.py +48 -0
  24. webscout/Provider/OPENAI/gptoss.py +288 -0
  25. webscout/Provider/OPENAI/kimi.py +469 -0
  26. webscout/Provider/OPENAI/netwrck.py +8 -12
  27. webscout/Provider/OPENAI/refact.py +274 -0
  28. webscout/Provider/OPENAI/scirachat.py +4 -0
  29. webscout/Provider/OPENAI/textpollinations.py +11 -10
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/OPENAI/venice.py +1 -0
  32. webscout/Provider/Perplexitylabs.py +163 -147
  33. webscout/Provider/Qodo.py +30 -6
  34. webscout/Provider/TTI/__init__.py +1 -0
  35. webscout/Provider/TTI/bing.py +14 -2
  36. webscout/Provider/TTI/together.py +11 -9
  37. webscout/Provider/TTI/venice.py +368 -0
  38. webscout/Provider/TTS/README.md +0 -1
  39. webscout/Provider/TTS/__init__.py +0 -1
  40. webscout/Provider/TTS/base.py +479 -159
  41. webscout/Provider/TTS/deepgram.py +409 -156
  42. webscout/Provider/TTS/elevenlabs.py +425 -111
  43. webscout/Provider/TTS/freetts.py +317 -140
  44. webscout/Provider/TTS/gesserit.py +192 -128
  45. webscout/Provider/TTS/murfai.py +248 -113
  46. webscout/Provider/TTS/openai_fm.py +347 -129
  47. webscout/Provider/TTS/speechma.py +620 -586
  48. webscout/Provider/TextPollinationsAI.py +11 -10
  49. webscout/Provider/TogetherAI.py +12 -4
  50. webscout/Provider/TwoAI.py +96 -2
  51. webscout/Provider/TypliAI.py +33 -27
  52. webscout/Provider/UNFINISHED/VercelAIGateway.py +339 -0
  53. webscout/Provider/UNFINISHED/fetch_together_models.py +6 -11
  54. webscout/Provider/Venice.py +1 -0
  55. webscout/Provider/WiseCat.py +18 -20
  56. webscout/Provider/__init__.py +2 -96
  57. webscout/Provider/cerebras.py +83 -33
  58. webscout/Provider/copilot.py +42 -23
  59. webscout/Provider/scira_chat.py +4 -0
  60. webscout/Provider/toolbaz.py +6 -10
  61. webscout/Provider/typefully.py +1 -11
  62. webscout/__init__.py +3 -15
  63. webscout/auth/__init__.py +19 -4
  64. webscout/auth/api_key_manager.py +189 -189
  65. webscout/auth/auth_system.py +25 -40
  66. webscout/auth/config.py +105 -6
  67. webscout/auth/database.py +377 -22
  68. webscout/auth/models.py +185 -130
  69. webscout/auth/request_processing.py +175 -11
  70. webscout/auth/routes.py +99 -2
  71. webscout/auth/server.py +9 -2
  72. webscout/auth/simple_logger.py +236 -0
  73. webscout/conversation.py +22 -20
  74. webscout/sanitize.py +1078 -0
  75. webscout/scout/README.md +20 -23
  76. webscout/scout/core/crawler.py +125 -38
  77. webscout/scout/core/scout.py +26 -5
  78. webscout/version.py +1 -1
  79. webscout/webscout_search.py +13 -6
  80. webscout/webscout_search_async.py +10 -8
  81. webscout/yep_search.py +13 -5
  82. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/METADATA +10 -149
  83. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/RECORD +88 -87
  84. webscout/Provider/Glider.py +0 -225
  85. webscout/Provider/OPENAI/README_AUTOPROXY.md +0 -238
  86. webscout/Provider/OPENAI/c4ai.py +0 -394
  87. webscout/Provider/OPENAI/glider.py +0 -330
  88. webscout/Provider/OPENAI/typegpt.py +0 -368
  89. webscout/Provider/OPENAI/uncovrAI.py +0 -477
  90. webscout/Provider/TTS/sthir.py +0 -94
  91. webscout/Provider/WritingMate.py +0 -273
  92. webscout/Provider/typegpt.py +0 -284
  93. webscout/Provider/uncovr.py +0 -333
  94. /webscout/Provider/{samurai.py → UNFINISHED/samurai.py} +0 -0
  95. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/WHEEL +0 -0
  96. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/entry_points.txt +0 -0
  97. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/licenses/LICENSE.md +0 -0
  98. {webscout-8.3.4.dist-info → webscout-8.3.6.dist-info}/top_level.txt +0 -0
@@ -1,477 +0,0 @@
1
- import time
2
- import uuid
3
- import re
4
- import json
5
- import cloudscraper
6
- from typing import List, Dict, Optional, Union, Generator, Any
7
-
8
- from webscout.litagent import LitAgent
9
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
10
- from .utils import (
11
- ChatCompletion,
12
- ChatCompletionChunk,
13
- Choice,
14
- ChatCompletionMessage,
15
- ChoiceDelta,
16
- CompletionUsage,
17
- format_prompt,
18
- get_system_prompt,
19
- get_last_user_message,
20
- count_tokens
21
- )
22
-
23
- # ANSI escape codes for formatting
24
- BOLD = "\033[1m"
25
- RED = "\033[91m"
26
- RESET = "\033[0m"
27
-
28
- class Completions(BaseCompletions):
29
- def __init__(self, client: 'UncovrAI'):
30
- self._client = client
31
-
32
- def create(
33
- self,
34
- *,
35
- model: str,
36
- messages: List[Dict[str, str]],
37
- max_tokens: Optional[int] = None,
38
- stream: bool = False,
39
- temperature: Optional[float] = None,
40
- top_p: Optional[float] = None,
41
- timeout: Optional[int] = None,
42
- proxies: Optional[Dict[str, str]] = None,
43
- **kwargs: Any
44
- ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
45
- """
46
- Create a chat completion using the UncovrAI API.
47
-
48
- Args:
49
- model: The model to use for completion
50
- messages: A list of messages in the conversation
51
- max_tokens: Maximum number of tokens to generate
52
- stream: Whether to stream the response
53
- temperature: Controls randomness (mapped to UncovrAI's temperature)
54
- top_p: Controls diversity (not directly used by UncovrAI)
55
- **kwargs: Additional parameters
56
-
57
- Returns:
58
- A ChatCompletion object or a generator of ChatCompletionChunk objects
59
- """
60
- # Validate model
61
- if model not in self._client.AVAILABLE_MODELS:
62
- raise ValueError(f"Invalid model: {model}. Choose from: {self._client.AVAILABLE_MODELS}")
63
-
64
- # Map temperature to UncovrAI's scale (0-100)
65
- # Default to 32 (medium) if not provided
66
- uncovr_temperature = 32
67
- if temperature is not None:
68
- # Map from 0-1 scale to 0-100 scale
69
- uncovr_temperature = int(temperature * 100)
70
- # Ensure it's within bounds
71
- uncovr_temperature = max(0, min(100, uncovr_temperature))
72
-
73
- # Map creativity from kwargs or use default
74
- creativity = kwargs.get("creativity", "medium")
75
-
76
- # Get focus and tools from kwargs or use defaults
77
- selected_focus = kwargs.get("selected_focus", ["web"])
78
- selected_tools = kwargs.get("selected_tools", ["quick-cards"])
79
-
80
- # Generate request ID and timestamp
81
- request_id = str(uuid.uuid4())
82
- created_time = int(time.time())
83
-
84
- # Format the conversation using utility functions
85
- conversation_prompt = format_prompt(messages, add_special_tokens=False, do_continue=True)
86
-
87
- # Prepare the request payload
88
- payload = {
89
- "content": conversation_prompt,
90
- "chatId": self._client.chat_id,
91
- "userMessageId": str(uuid.uuid4()),
92
- "ai_config": {
93
- "selectedFocus": selected_focus,
94
- "selectedTools": selected_tools,
95
- "agentId": "chat",
96
- "modelId": model,
97
- "temperature": uncovr_temperature,
98
- "creativity": creativity
99
- }
100
- }
101
-
102
- # Handle streaming response
103
- if stream:
104
- return self._handle_streaming_response(
105
- payload=payload,
106
- model=model,
107
- request_id=request_id,
108
- created_time=created_time,
109
- timeout=timeout,
110
- proxies=proxies
111
- )
112
-
113
- # Handle non-streaming response
114
- return self._handle_non_streaming_response(
115
- payload=payload,
116
- model=model,
117
- request_id=request_id,
118
- created_time=created_time,
119
- timeout=timeout,
120
- proxies=proxies
121
- )
122
-
123
- def _handle_streaming_response(
124
- self,
125
- *,
126
- payload: Dict[str, Any],
127
- model: str,
128
- request_id: str,
129
- created_time: int,
130
- timeout: Optional[int] = None,
131
- proxies: Optional[Dict[str, str]] = None
132
- ) -> Generator[ChatCompletionChunk, None, None]:
133
- """Handle streaming response from UncovrAI API."""
134
- try:
135
- with self._client.session.post(
136
- self._client.url,
137
- json=payload,
138
- stream=True,
139
- timeout=timeout or self._client.timeout,
140
- proxies=proxies or getattr(self._client, "proxies", None)
141
- ) as response:
142
- if response.status_code != 200:
143
- # If we get a non-200 response, try refreshing our identity once
144
- if response.status_code in [403, 429]:
145
- self._client.refresh_identity()
146
- # Retry with new identity
147
- with self._client.session.post(
148
- self._client.url,
149
- json=payload,
150
- stream=True,
151
- timeout=timeout or self._client.timeout,
152
- proxies=proxies or getattr(self._client, "proxies", None)
153
- ) as retry_response:
154
- if not retry_response.ok:
155
- raise IOError(
156
- f"Failed to generate response after identity refresh - "
157
- f"({retry_response.status_code}, {retry_response.reason}) - "
158
- f"{retry_response.text}"
159
- )
160
- response = retry_response
161
- else:
162
- raise IOError(f"Request failed with status code {response.status_code}")
163
-
164
- # Process the streaming response
165
- streaming_text = ""
166
- for line in response.iter_lines():
167
- if line:
168
- try:
169
- line = line.decode('utf-8')
170
-
171
- # Use regex to match content messages
172
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
173
- if content_match: # Content message
174
- content = content_match.group(1)
175
- # Format the content to handle escape sequences
176
- content = self._client.format_text(content)
177
- streaming_text += content
178
-
179
- # Create a chunk for this part of the response
180
- delta = ChoiceDelta(content=content)
181
- choice = Choice(
182
- index=0,
183
- delta=delta,
184
- finish_reason=None
185
- )
186
- chunk = ChatCompletionChunk(
187
- id=request_id,
188
- choices=[choice],
189
- created=created_time,
190
- model=model
191
- )
192
-
193
- yield chunk
194
-
195
- # Check for error messages
196
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
197
- if error_match:
198
- error_msg = error_match.group(1)
199
- raise IOError(f"API Error: {error_msg}")
200
-
201
- except (json.JSONDecodeError, UnicodeDecodeError):
202
- continue
203
-
204
- # Yield a final chunk with finish_reason="stop"
205
- delta = ChoiceDelta()
206
- choice = Choice(
207
- index=0,
208
- delta=delta,
209
- finish_reason="stop"
210
- )
211
- chunk = ChatCompletionChunk(
212
- id=request_id,
213
- choices=[choice],
214
- created=created_time,
215
- model=model
216
- )
217
- yield chunk
218
-
219
- except Exception as e:
220
- print(f"{RED}Error during UncovrAI streaming request: {e}{RESET}")
221
- raise IOError(f"UncovrAI streaming request failed: {e}") from e
222
-
223
- def _handle_non_streaming_response(
224
- self,
225
- *,
226
- payload: Dict[str, Any],
227
- model: str,
228
- request_id: str,
229
- created_time: int,
230
- timeout: Optional[int] = None,
231
- proxies: Optional[Dict[str, str]] = None
232
- ) -> ChatCompletion:
233
- """Handle non-streaming response from UncovrAI API."""
234
- try:
235
- response = self._client.session.post(
236
- self._client.url,
237
- json=payload,
238
- timeout=timeout or self._client.timeout,
239
- proxies=proxies or getattr(self._client, "proxies", None)
240
- )
241
-
242
- if response.status_code != 200:
243
- if response.status_code in [403, 429]:
244
- self._client.refresh_identity()
245
- response = self._client.session.post(
246
- self._client.url,
247
- json=payload,
248
- timeout=timeout or self._client.timeout,
249
- proxies=proxies or getattr(self._client, "proxies", None)
250
- )
251
- if not response.ok:
252
- raise IOError(
253
- f"Failed to generate response after identity refresh - "
254
- f"({response.status_code}, {response.reason}) - "
255
- f"{response.text}"
256
- )
257
- else:
258
- raise IOError(f"Request failed with status code {response.status_code}")
259
-
260
- full_response = ""
261
- for line in response.iter_lines():
262
- if line:
263
- try:
264
- line = line.decode('utf-8')
265
- content_match = re.match(r'^0:\s*"?(.*?)"?$', line)
266
- if content_match:
267
- content = content_match.group(1)
268
- full_response += content
269
-
270
- # Check for error messages
271
- error_match = re.match(r'^2:\[{"type":"error","error":"(.*?)"}]$', line)
272
- if error_match:
273
- error_msg = error_match.group(1)
274
- raise IOError(f"API Error: {error_msg}")
275
-
276
- except (json.JSONDecodeError, UnicodeDecodeError):
277
- continue
278
-
279
- # Format the full response to handle escape sequences
280
- full_response = self._client.format_text(full_response)
281
-
282
- # Create message, choice, and usage objects
283
- message = ChatCompletionMessage(
284
- role="assistant",
285
- content=full_response
286
- )
287
-
288
- choice = Choice(
289
- index=0,
290
- message=message,
291
- finish_reason="stop"
292
- )
293
-
294
- # Estimate token usage using count_tokens
295
- prompt_tokens = count_tokens(payload.get("content", ""))
296
- completion_tokens = count_tokens(full_response)
297
- total_tokens = prompt_tokens + completion_tokens
298
-
299
- usage = CompletionUsage(
300
- prompt_tokens=prompt_tokens,
301
- completion_tokens=completion_tokens,
302
- total_tokens=total_tokens
303
- )
304
-
305
- # Create the completion object
306
- completion = ChatCompletion(
307
- id=request_id,
308
- choices=[choice],
309
- created=created_time,
310
- model=model,
311
- usage=usage,
312
- )
313
-
314
- return completion
315
-
316
- except Exception as e:
317
- print(f"{RED}Error during UncovrAI non-stream request: {e}{RESET}")
318
- raise IOError(f"UncovrAI request failed: {e}") from e
319
-
320
- class Chat(BaseChat):
321
- def __init__(self, client: 'UncovrAI'):
322
- self.completions = Completions(client)
323
-
324
- class UncovrAI(OpenAICompatibleProvider):
325
- """
326
- OpenAI-compatible client for Uncovr AI API.
327
-
328
- Usage:
329
- client = UncovrAI()
330
- response = client.chat.completions.create(
331
- model="default",
332
- messages=[{"role": "user", "content": "Hello!"}]
333
- )
334
- print(response.choices[0].message.content)
335
- """
336
-
337
- AVAILABLE_MODELS = [
338
- "default",
339
- "gpt-4o-mini",
340
- "gemini-2-flash",
341
- "gemini-2-flash-lite",
342
- "groq-llama-3-1-8b",
343
- "o3-mini",
344
- "deepseek-r1-distill-qwen-32b",
345
- # The following models are not available in the free plan:
346
- # "claude-3-7-sonnet",
347
- # "gpt-4o",
348
- # "claude-3-5-sonnet-v2",
349
- # "deepseek-r1-distill-llama-70b",
350
- # "gemini-2-flash-lite-preview",
351
- # "qwen-qwq-32b"
352
- ]
353
-
354
- def __init__(
355
- self,
356
- timeout: int = 30,
357
- browser: str = "chrome",
358
- chat_id: Optional[str] = None,
359
- user_id: Optional[str] = None,
360
- proxies: dict = {}
361
- ):
362
- """
363
- Initialize the UncovrAI client.
364
-
365
- Args:
366
- timeout: Request timeout in seconds
367
- browser: Browser name for LitAgent to generate fingerprint
368
- chat_id: Optional chat ID (will generate one if not provided)
369
- user_id: Optional user ID (will generate one if not provided)
370
- proxies: Optional proxy configuration
371
- """
372
- self.url = "https://uncovr.app/api/workflows/chat"
373
- self.timeout = timeout
374
-
375
- # Initialize LitAgent for user agent generation
376
- self.agent = LitAgent()
377
-
378
- # Use fingerprinting to create a consistent browser identity
379
- self.fingerprint = self.agent.generate_fingerprint(browser)
380
-
381
- # Use the fingerprint for headers
382
- self.headers = {
383
- "Accept": self.fingerprint["accept"],
384
- "Accept-Encoding": "gzip, deflate, br, zstd",
385
- "Accept-Language": self.fingerprint["accept_language"],
386
- "Content-Type": "application/json",
387
- "Origin": "https://uncovr.app",
388
- "Referer": "https://uncovr.app/",
389
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
390
- "Sec-CH-UA-Mobile": "?0",
391
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
392
- "User-Agent": self.fingerprint["user_agent"],
393
- "Sec-Fetch-Dest": "empty",
394
- "Sec-Fetch-Mode": "cors",
395
- "Sec-Fetch-Site": "same-origin"
396
- }
397
-
398
- # Use cloudscraper to bypass Cloudflare protection
399
- self.session = cloudscraper.create_scraper()
400
- self.session.headers.update(self.headers)
401
- self.session.proxies.update(proxies)
402
-
403
- # Set chat and user IDs
404
- self.chat_id = chat_id or str(uuid.uuid4())
405
- self.user_id = user_id or f"user_{str(uuid.uuid4())[:8].upper()}"
406
-
407
- # Initialize chat interface
408
- self.chat = Chat(self)
409
-
410
- def refresh_identity(self, browser: str = None):
411
- """
412
- Refreshes the browser identity fingerprint.
413
-
414
- Args:
415
- browser: Specific browser to use for the new fingerprint
416
- """
417
- browser = browser or self.fingerprint.get("browser_type", "chrome")
418
- self.fingerprint = self.agent.generate_fingerprint(browser)
419
-
420
- # Update headers with new fingerprint
421
- self.headers.update({
422
- "Accept": self.fingerprint["accept"],
423
- "Accept-Language": self.fingerprint["accept_language"],
424
- "Sec-CH-UA": self.fingerprint["sec_ch_ua"] or self.headers["Sec-CH-UA"],
425
- "Sec-CH-UA-Platform": f'"{self.fingerprint["platform"]}"',
426
- "User-Agent": self.fingerprint["user_agent"],
427
- })
428
-
429
- # Update session headers
430
- for header, value in self.headers.items():
431
- self.session.headers[header] = value
432
-
433
- return self.fingerprint
434
-
435
- def format_text(self, text: str) -> str:
436
- """
437
- Format text by replacing escaped newlines with actual newlines.
438
-
439
- Args:
440
- text: Text to format
441
-
442
- Returns:
443
- Formatted text
444
- """
445
- # Use a more comprehensive approach to handle all escape sequences
446
- try:
447
- # First handle double backslashes to avoid issues
448
- text = text.replace('\\\\', '\\')
449
-
450
- # Handle common escape sequences
451
- text = text.replace('\\n', '\n')
452
- text = text.replace('\\r', '\r')
453
- text = text.replace('\\t', '\t')
454
- text = text.replace('\\"', '"')
455
- text = text.replace("\\'", "'")
456
-
457
- # Handle any remaining escape sequences using JSON decoding
458
- try:
459
- # Add quotes to make it a valid JSON string
460
- json_str = f'"{text}"'
461
- # Use json module to decode all escape sequences
462
- decoded = json.loads(json_str)
463
- return decoded
464
- except json.JSONDecodeError:
465
- # If JSON decoding fails, return the text with the replacements we've already done
466
- return text
467
- except Exception as e:
468
- # If any error occurs, return the original text
469
- print(f"{RED}Warning: Error formatting text: {e}{RESET}")
470
- return text
471
-
472
- @property
473
- def models(self):
474
- class _ModelList:
475
- def list(inner_self):
476
- return type(self).AVAILABLE_MODELS
477
- return _ModelList()
@@ -1,94 +0,0 @@
1
- import time
2
- import requests
3
- import pathlib
4
- import tempfile
5
- from io import BytesIO
6
- from webscout import exceptions
7
- from webscout.litagent import LitAgent
8
- from concurrent.futures import ThreadPoolExecutor, as_completed
9
- from webscout.Provider.TTS import utils
10
- from webscout.Provider.TTS.base import BaseTTSProvider
11
-
12
- class SthirTTS(BaseTTSProvider):
13
- """
14
- Text-to-speech provider using the Sthir.org TTS API.
15
- """
16
- headers = {
17
- "Content-Type": "application/json",
18
- "User-Agent": LitAgent().random(),
19
- }
20
-
21
- all_voices = {
22
- "aura-luna-en": "Sophie (American, Feminine)",
23
- "aura-stella-en": "Isabella (American, Feminine)",
24
- "aura-athena-en": "Emma (British, Feminine)",
25
- "aura-hera-en": "Victoria (American, Feminine)",
26
- "aura-asteria-en": "Maria (American, Feminine)",
27
- "aura-arcas-en": "Alex (American, Masculine)",
28
- "aura-zeus-en": "Thomas (American, Masculine)",
29
- "aura-perseus-en": "Michael (American, Masculine)",
30
- "aura-angus-en": "Connor (Irish, Masculine)",
31
- "aura-orpheus-en": "James (American, Masculine)",
32
- "aura-helios-en": "William (British, Masculine)",
33
- "aura-orion-en": "Daniel (American, Masculine)",
34
- }
35
-
36
- def __init__(self, timeout: int = 20, proxies: dict = None):
37
- """Initializes the SthirTTS client."""
38
- super().__init__()
39
- self.api_url = "https://sthir.org/com.api/tts-api.php"
40
- self.session = requests.Session()
41
- self.session.headers.update(self.headers)
42
- if proxies:
43
- self.session.proxies.update(proxies)
44
- self.timeout = timeout
45
-
46
- def tts(self, text: str, voice: str = "aura-luna-en") -> str:
47
- """
48
- Converts text to speech using the Sthir.org API and saves it to a file.
49
-
50
- Args:
51
- text (str): The text to convert to speech
52
- voice (str): The voice to use for TTS (default: "aura-luna-en")
53
-
54
- Returns:
55
- str: Path to the generated audio file
56
-
57
- Raises:
58
- exceptions.FailedToGenerateResponseError: If there is an error generating or saving the audio.
59
- """
60
- assert (
61
- voice in self.all_voices
62
- ), f"Voice '{voice}' not one of [{', '.join(self.all_voices.keys())}]"
63
-
64
- filename = pathlib.Path(tempfile.mktemp(suffix=".mp3", dir=self.temp_dir))
65
- payload = {"text": text, "voice": voice}
66
-
67
- try:
68
- response = self.session.post(
69
- self.api_url,
70
- headers=self.headers,
71
- json=payload,
72
- timeout=self.timeout
73
- )
74
- if response.status_code == 200 and len(response.content) > 0:
75
- with open(filename, "wb") as f:
76
- f.write(response.content)
77
- return filename.as_posix()
78
- else:
79
- try:
80
- error_data = response.json()
81
- if "error" in error_data:
82
- raise exceptions.FailedToGenerateResponseError(f"API error: {error_data['error']}")
83
- except Exception:
84
- pass
85
- raise exceptions.FailedToGenerateResponseError(f"Sthir API error: {response.text}")
86
- except Exception as e:
87
- raise exceptions.FailedToGenerateResponseError(f"Failed to perform the operation: {e}")
88
-
89
- # Example usage
90
- if __name__ == "__main__":
91
- sthir = SthirTTS()
92
- text = "This is a test of the Sthir.org text-to-speech API. It supports multiple voices."
93
- audio_file = sthir.tts(text, voice="aura-luna-en")
94
- print(f"Audio saved to: {audio_file}")