webscout 8.3__py3-none-any.whl → 8.3.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (120) hide show
  1. webscout/AIauto.py +4 -4
  2. webscout/AIbase.py +61 -1
  3. webscout/AIutel.py +46 -53
  4. webscout/Bing_search.py +418 -0
  5. webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
  6. webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
  7. webscout/Extra/YTToolkit/ytapi/video.py +10 -10
  8. webscout/Extra/autocoder/autocoder_utiles.py +1 -1
  9. webscout/Extra/gguf.py +706 -177
  10. webscout/Litlogger/formats.py +9 -0
  11. webscout/Litlogger/handlers.py +18 -0
  12. webscout/Litlogger/logger.py +43 -1
  13. webscout/Provider/AISEARCH/genspark_search.py +7 -7
  14. webscout/Provider/AISEARCH/scira_search.py +3 -2
  15. webscout/Provider/GeminiProxy.py +140 -0
  16. webscout/Provider/LambdaChat.py +7 -1
  17. webscout/Provider/MCPCore.py +78 -75
  18. webscout/Provider/OPENAI/BLACKBOXAI.py +1046 -1017
  19. webscout/Provider/OPENAI/GeminiProxy.py +328 -0
  20. webscout/Provider/OPENAI/Qwen3.py +303 -303
  21. webscout/Provider/OPENAI/README.md +5 -0
  22. webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
  23. webscout/Provider/OPENAI/TogetherAI.py +355 -0
  24. webscout/Provider/OPENAI/__init__.py +16 -1
  25. webscout/Provider/OPENAI/autoproxy.py +332 -0
  26. webscout/Provider/OPENAI/base.py +101 -14
  27. webscout/Provider/OPENAI/chatgpt.py +15 -2
  28. webscout/Provider/OPENAI/chatgptclone.py +14 -3
  29. webscout/Provider/OPENAI/deepinfra.py +339 -328
  30. webscout/Provider/OPENAI/e2b.py +295 -74
  31. webscout/Provider/OPENAI/mcpcore.py +109 -70
  32. webscout/Provider/OPENAI/opkfc.py +18 -6
  33. webscout/Provider/OPENAI/scirachat.py +59 -50
  34. webscout/Provider/OPENAI/toolbaz.py +2 -10
  35. webscout/Provider/OPENAI/writecream.py +166 -166
  36. webscout/Provider/OPENAI/x0gpt.py +367 -367
  37. webscout/Provider/OPENAI/xenai.py +514 -0
  38. webscout/Provider/OPENAI/yep.py +389 -383
  39. webscout/Provider/STT/__init__.py +3 -0
  40. webscout/Provider/STT/base.py +281 -0
  41. webscout/Provider/STT/elevenlabs.py +265 -0
  42. webscout/Provider/TTI/__init__.py +4 -1
  43. webscout/Provider/TTI/aiarta.py +399 -365
  44. webscout/Provider/TTI/base.py +74 -2
  45. webscout/Provider/TTI/bing.py +231 -0
  46. webscout/Provider/TTI/fastflux.py +63 -30
  47. webscout/Provider/TTI/gpt1image.py +149 -0
  48. webscout/Provider/TTI/imagen.py +196 -0
  49. webscout/Provider/TTI/magicstudio.py +60 -29
  50. webscout/Provider/TTI/piclumen.py +43 -32
  51. webscout/Provider/TTI/pixelmuse.py +232 -225
  52. webscout/Provider/TTI/pollinations.py +43 -32
  53. webscout/Provider/TTI/together.py +287 -0
  54. webscout/Provider/TTI/utils.py +2 -1
  55. webscout/Provider/TTS/README.md +1 -0
  56. webscout/Provider/TTS/__init__.py +2 -1
  57. webscout/Provider/TTS/freetts.py +140 -0
  58. webscout/Provider/TTS/speechma.py +45 -39
  59. webscout/Provider/TogetherAI.py +366 -0
  60. webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
  61. webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
  62. webscout/Provider/XenAI.py +324 -0
  63. webscout/Provider/__init__.py +8 -0
  64. webscout/Provider/deepseek_assistant.py +378 -0
  65. webscout/Provider/scira_chat.py +3 -2
  66. webscout/Provider/toolbaz.py +0 -1
  67. webscout/auth/__init__.py +44 -0
  68. webscout/auth/api_key_manager.py +189 -0
  69. webscout/auth/auth_system.py +100 -0
  70. webscout/auth/config.py +76 -0
  71. webscout/auth/database.py +400 -0
  72. webscout/auth/exceptions.py +67 -0
  73. webscout/auth/middleware.py +248 -0
  74. webscout/auth/models.py +130 -0
  75. webscout/auth/providers.py +257 -0
  76. webscout/auth/rate_limiter.py +254 -0
  77. webscout/auth/request_models.py +127 -0
  78. webscout/auth/request_processing.py +226 -0
  79. webscout/auth/routes.py +526 -0
  80. webscout/auth/schemas.py +103 -0
  81. webscout/auth/server.py +312 -0
  82. webscout/auth/static/favicon.svg +11 -0
  83. webscout/auth/swagger_ui.py +203 -0
  84. webscout/auth/templates/components/authentication.html +237 -0
  85. webscout/auth/templates/components/base.html +103 -0
  86. webscout/auth/templates/components/endpoints.html +750 -0
  87. webscout/auth/templates/components/examples.html +491 -0
  88. webscout/auth/templates/components/footer.html +75 -0
  89. webscout/auth/templates/components/header.html +27 -0
  90. webscout/auth/templates/components/models.html +286 -0
  91. webscout/auth/templates/components/navigation.html +70 -0
  92. webscout/auth/templates/static/api.js +455 -0
  93. webscout/auth/templates/static/icons.js +168 -0
  94. webscout/auth/templates/static/main.js +784 -0
  95. webscout/auth/templates/static/particles.js +201 -0
  96. webscout/auth/templates/static/styles.css +3353 -0
  97. webscout/auth/templates/static/ui.js +374 -0
  98. webscout/auth/templates/swagger_ui.html +170 -0
  99. webscout/client.py +49 -3
  100. webscout/litagent/Readme.md +12 -3
  101. webscout/litagent/agent.py +99 -62
  102. webscout/scout/core/scout.py +104 -26
  103. webscout/scout/element.py +139 -18
  104. webscout/swiftcli/core/cli.py +14 -3
  105. webscout/swiftcli/decorators/output.py +59 -9
  106. webscout/update_checker.py +31 -49
  107. webscout/version.py +1 -1
  108. webscout/webscout_search.py +4 -12
  109. webscout/webscout_search_async.py +3 -10
  110. webscout/yep_search.py +2 -11
  111. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/METADATA +41 -11
  112. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/RECORD +116 -68
  113. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/entry_points.txt +1 -1
  114. webscout/Provider/HF_space/__init__.py +0 -0
  115. webscout/Provider/HF_space/qwen_qwen2.py +0 -206
  116. webscout/Provider/OPENAI/api.py +0 -1035
  117. webscout/Provider/TTI/artbit.py +0 -0
  118. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/WHEEL +0 -0
  119. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/licenses/LICENSE.md +0 -0
  120. {webscout-8.3.dist-info → webscout-8.3.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,3 @@
1
+ # This file marks the directory as a Python package.
2
+ from .base import *
3
+ from .elevenlabs import *
@@ -0,0 +1,281 @@
1
+ """
2
+ Base classes for OpenAI-compatible STT providers.
3
+
4
+ This module provides the base structure for STT providers that follow
5
+ the OpenAI Whisper API interface pattern.
6
+ """
7
+
8
+ import json
9
+ import time
10
+ from abc import ABC, abstractmethod
11
+ from typing import Any, Dict, Generator, List, Optional, Union, BinaryIO
12
+ from pathlib import Path
13
+
14
+ # Import OpenAI response types from the main OPENAI module
15
+ try:
16
+ from webscout.Provider.OPENAI.pydantic_imports import (
17
+ ChatCompletion, ChatCompletionChunk, Choice, ChoiceDelta,
18
+ Message, Usage, count_tokens
19
+ )
20
+ except ImportError:
21
+ # Fallback if pydantic_imports is not available
22
+ from dataclasses import dataclass
23
+
24
+ @dataclass
25
+ class Usage:
26
+ prompt_tokens: int = 0
27
+ completion_tokens: int = 0
28
+ total_tokens: int = 0
29
+
30
+ @dataclass
31
+ class Message:
32
+ role: str
33
+ content: str
34
+
35
+ @dataclass
36
+ class Choice:
37
+ index: int
38
+ message: Message
39
+ finish_reason: Optional[str] = None
40
+
41
+ @dataclass
42
+ class ChoiceDelta:
43
+ content: Optional[str] = None
44
+ role: Optional[str] = None
45
+
46
+ @dataclass
47
+ class ChatCompletionChunk:
48
+ id: str
49
+ choices: List[Dict[str, Any]]
50
+ created: int
51
+ model: str
52
+ object: str = "chat.completion.chunk"
53
+
54
+ @dataclass
55
+ class ChatCompletion:
56
+ id: str
57
+ choices: List[Choice]
58
+ created: int
59
+ model: str
60
+ usage: Usage
61
+ object: str = "chat.completion"
62
+
63
+ def count_tokens(text: str) -> int:
64
+ return len(text.split())
65
+
66
+
67
+ class TranscriptionResponse:
68
+ """Response object that mimics OpenAI's transcription response."""
69
+
70
+ def __init__(self, data: Dict[str, Any], response_format: str = "json"):
71
+ self._data = data
72
+ self._response_format = response_format
73
+
74
+ @property
75
+ def text(self) -> str:
76
+ """Get the transcribed text."""
77
+ return self._data.get("text", "")
78
+
79
+ @property
80
+ def language(self) -> Optional[str]:
81
+ """Get the detected language."""
82
+ return self._data.get("language")
83
+
84
+ @property
85
+ def duration(self) -> Optional[float]:
86
+ """Get the audio duration."""
87
+ return self._data.get("duration")
88
+
89
+ @property
90
+ def segments(self) -> Optional[list]:
91
+ """Get the segments with timestamps."""
92
+ return self._data.get("segments")
93
+
94
+ @property
95
+ def words(self) -> Optional[list]:
96
+ """Get the words with timestamps."""
97
+ return self._data.get("words")
98
+
99
+ def __str__(self) -> str:
100
+ """Return string representation based on response format."""
101
+ if self._response_format == "text":
102
+ return self.text
103
+ elif self._response_format == "srt":
104
+ return self._to_srt()
105
+ elif self._response_format == "vtt":
106
+ return self._to_vtt()
107
+ else: # json or verbose_json
108
+ return json.dumps(self._data, indent=2)
109
+
110
+ def _to_srt(self) -> str:
111
+ """Convert to SRT subtitle format."""
112
+ if not self.segments:
113
+ return ""
114
+
115
+ srt_content = []
116
+ for i, segment in enumerate(self.segments, 1):
117
+ start_time = self._format_time_srt(segment.get("start", 0))
118
+ end_time = self._format_time_srt(segment.get("end", 0))
119
+ text = segment.get("text", "").strip()
120
+
121
+ srt_content.append(f"{i}")
122
+ srt_content.append(f"{start_time} --> {end_time}")
123
+ srt_content.append(text)
124
+ srt_content.append("")
125
+
126
+ return "\n".join(srt_content)
127
+
128
+ def _to_vtt(self) -> str:
129
+ """Convert to VTT subtitle format."""
130
+ if not self.segments:
131
+ return "WEBVTT\n\n"
132
+
133
+ vtt_content = ["WEBVTT", ""]
134
+ for segment in self.segments:
135
+ start_time = self._format_time_vtt(segment.get("start", 0))
136
+ end_time = self._format_time_vtt(segment.get("end", 0))
137
+ text = segment.get("text", "").strip()
138
+
139
+ vtt_content.append(f"{start_time} --> {end_time}")
140
+ vtt_content.append(text)
141
+ vtt_content.append("")
142
+
143
+ return "\n".join(vtt_content)
144
+
145
+ def _format_time_srt(self, seconds: float) -> str:
146
+ """Format time for SRT format (HH:MM:SS,mmm)."""
147
+ hours = int(seconds // 3600)
148
+ minutes = int((seconds % 3600) // 60)
149
+ secs = int(seconds % 60)
150
+ millisecs = int((seconds % 1) * 1000)
151
+ return f"{hours:02d}:{minutes:02d}:{secs:02d},{millisecs:03d}"
152
+
153
+ def _format_time_vtt(self, seconds: float) -> str:
154
+ """Format time for VTT format (HH:MM:SS.mmm)."""
155
+ hours = int(seconds // 3600)
156
+ minutes = int((seconds % 3600) // 60)
157
+ secs = int(seconds % 60)
158
+ millisecs = int((seconds % 1) * 1000)
159
+ return f"{hours:02d}:{minutes:02d}:{secs:02d}.{millisecs:03d}"
160
+
161
+
162
+ class BaseSTTTranscriptions(ABC):
163
+ """Base class for STT transcriptions interface."""
164
+
165
+ def __init__(self, client):
166
+ self._client = client
167
+
168
+ @abstractmethod
169
+ def create(
170
+ self,
171
+ *,
172
+ model: str,
173
+ file: Union[BinaryIO, str, Path],
174
+ language: Optional[str] = None,
175
+ prompt: Optional[str] = None,
176
+ response_format: str = "json",
177
+ temperature: Optional[float] = None,
178
+ timestamp_granularities: Optional[List[str]] = None,
179
+ stream: bool = False,
180
+ timeout: Optional[int] = None,
181
+ proxies: Optional[dict] = None,
182
+ **kwargs: Any
183
+ ) -> Union[TranscriptionResponse, Generator[str, None, None]]:
184
+ """
185
+ Create a transcription of the given audio file.
186
+
187
+ Args:
188
+ model: Model to use for transcription
189
+ file: Audio file to transcribe
190
+ language: Language of the audio (ISO-639-1 format)
191
+ prompt: Optional text to guide the model's style
192
+ response_format: Format of the response
193
+ temperature: Sampling temperature (0 to 1)
194
+ timestamp_granularities: Timestamp granularities to include
195
+ stream: Whether to stream the response
196
+ timeout: Request timeout
197
+ proxies: Proxy configuration
198
+ **kwargs: Additional parameters
199
+
200
+ Returns:
201
+ TranscriptionResponse or generator of SSE strings if streaming
202
+ """
203
+ raise NotImplementedError
204
+
205
+
206
+ class BaseSTTAudio(ABC):
207
+ """Base class for STT audio interface."""
208
+
209
+ def __init__(self, client):
210
+ self.transcriptions = self._create_transcriptions(client)
211
+
212
+ @abstractmethod
213
+ def _create_transcriptions(self, client) -> BaseSTTTranscriptions:
214
+ """Create the transcriptions interface."""
215
+ raise NotImplementedError
216
+
217
+
218
+ class BaseSTTChat:
219
+ """Base chat interface for STT providers (placeholder for consistency)."""
220
+
221
+ def __init__(self, client):
222
+ _ = client # Unused but kept for interface consistency
223
+ self.completions = None # STT providers don't have completions
224
+
225
+
226
+ class STTCompatibleProvider(ABC):
227
+ """
228
+ Abstract Base Class for STT providers mimicking the OpenAI structure.
229
+ Requires a nested 'audio.transcriptions' structure.
230
+ """
231
+
232
+ audio: BaseSTTAudio
233
+
234
+ @abstractmethod
235
+ def __init__(self, **kwargs: Any):
236
+ """Initialize the STT provider."""
237
+ pass
238
+
239
+ @property
240
+ @abstractmethod
241
+ def models(self):
242
+ """
243
+ Property that returns an object with a .list() method returning available models.
244
+ """
245
+ pass
246
+
247
+
248
+ class STTModels:
249
+ """Models interface for STT providers."""
250
+
251
+ def __init__(self, available_models: List[str]):
252
+ self._available_models = available_models
253
+
254
+ def list(self) -> List[Dict[str, Any]]:
255
+ """List available models."""
256
+ return [
257
+ {
258
+ "id": model,
259
+ "object": "model",
260
+ "created": int(time.time()),
261
+ "owned_by": "webscout"
262
+ }
263
+ for model in self._available_models
264
+ ]
265
+
266
+
267
+ __all__ = [
268
+ 'TranscriptionResponse',
269
+ 'BaseSTTTranscriptions',
270
+ 'BaseSTTAudio',
271
+ 'BaseSTTChat',
272
+ 'STTCompatibleProvider',
273
+ 'STTModels',
274
+ 'ChatCompletion',
275
+ 'ChatCompletionChunk',
276
+ 'Choice',
277
+ 'ChoiceDelta',
278
+ 'Message',
279
+ 'Usage',
280
+ 'count_tokens'
281
+ ]
@@ -0,0 +1,265 @@
1
+ """
2
+ ElevenLabs STT provider with OpenAI-compatible interface.
3
+
4
+ This module provides an OpenAI Whisper API-compatible interface for ElevenLabs
5
+ speech-to-text transcription service.
6
+ """
7
+
8
+ import json
9
+ import time
10
+ import uuid
11
+ from pathlib import Path
12
+ from typing import Any, Dict, Generator, List, Optional, Union, BinaryIO
13
+
14
+ import requests
15
+ from webscout.litagent import LitAgent
16
+ from webscout import exceptions
17
+
18
+ from webscout.Provider.STT.base import (
19
+ BaseSTTTranscriptions, BaseSTTAudio, STTCompatibleProvider,
20
+ STTModels, TranscriptionResponse
21
+ )
22
+
23
+
24
+ class ElevenLabsTranscriptions(BaseSTTTranscriptions):
25
+ """ElevenLabs transcriptions interface."""
26
+
27
+ def create(
28
+ self,
29
+ *,
30
+ model: str,
31
+ file: Union[BinaryIO, str, Path],
32
+ language: Optional[str] = None,
33
+ prompt: Optional[str] = None,
34
+ response_format: str = "json",
35
+ temperature: Optional[float] = None,
36
+ timestamp_granularities: Optional[List[str]] = None,
37
+ stream: bool = False,
38
+ timeout: Optional[int] = None,
39
+ proxies: Optional[dict] = None,
40
+ **kwargs: Any
41
+ ) -> Union[TranscriptionResponse, Generator[str, None, None]]:
42
+ """Create a transcription using ElevenLabs API."""
43
+ # Always use file as file-like object
44
+ if isinstance(file, (str, Path)):
45
+ audio_file = open(str(file), "rb")
46
+ close_file = True
47
+ else:
48
+ audio_file = file
49
+ close_file = False
50
+ try:
51
+ if stream:
52
+ return self._create_stream(
53
+ audio_file=audio_file,
54
+ model=model,
55
+ language=language,
56
+ prompt=prompt,
57
+ response_format=response_format,
58
+ temperature=temperature,
59
+ timestamp_granularities=timestamp_granularities,
60
+ timeout=timeout,
61
+ proxies=proxies,
62
+ **kwargs
63
+ )
64
+ else:
65
+ result = self._create_non_stream(
66
+ audio_file=audio_file,
67
+ model=model,
68
+ language=language,
69
+ prompt=prompt,
70
+ response_format=response_format,
71
+ temperature=temperature,
72
+ timestamp_granularities=timestamp_granularities,
73
+ timeout=timeout,
74
+ proxies=proxies,
75
+ **kwargs
76
+ )
77
+ return result
78
+ finally:
79
+ if close_file:
80
+ audio_file.close()
81
+
82
+ def _create_non_stream(
83
+ self,
84
+ audio_file: BinaryIO,
85
+ model: str,
86
+ language: Optional[str] = None,
87
+ prompt: Optional[str] = None,
88
+ response_format: str = "json",
89
+ temperature: Optional[float] = None,
90
+ timestamp_granularities: Optional[List[str]] = None,
91
+ timeout: Optional[int] = None,
92
+ proxies: Optional[dict] = None,
93
+ **kwargs: Any
94
+ ) -> TranscriptionResponse:
95
+ """Create non-streaming transcription."""
96
+ try:
97
+ headers = {
98
+ 'Accept': 'application/json, text/plain, */*',
99
+ 'Accept-Language': 'en-US,en;q=0.9',
100
+ 'User-Agent': LitAgent().random()
101
+ }
102
+ api_url = self._client.api_url
103
+ if getattr(self._client, 'allow_unauthenticated', False):
104
+ if '?' in api_url:
105
+ api_url += '&allow_unauthenticated=1'
106
+ else:
107
+ api_url += '?allow_unauthenticated=1'
108
+ files = {
109
+ 'file': audio_file,
110
+ 'model_id': (None, self._client.model_id),
111
+ 'tag_audio_events': (None, 'true' if self._client.tag_audio_events else 'false'),
112
+ 'diarize': (None, 'true' if self._client.diarize else 'false')
113
+ }
114
+ if language:
115
+ files['language'] = (None, language)
116
+ response = requests.post(
117
+ api_url,
118
+ files=files,
119
+ headers=headers,
120
+ timeout=timeout or self._client.timeout,
121
+ proxies=proxies or getattr(self._client, "proxies", None)
122
+ )
123
+ if response.status_code != 200:
124
+ raise exceptions.FailedToGenerateResponseError(
125
+ f"ElevenLabs API returned error: {response.status_code} - {response.text}"
126
+ )
127
+ result = response.json()
128
+ simple_result = {
129
+ "text": result.get("text", "")
130
+ }
131
+ return TranscriptionResponse(simple_result, response_format)
132
+ except Exception as e:
133
+ raise exceptions.FailedToGenerateResponseError(f"ElevenLabs transcription failed: {str(e)}")
134
+
135
+ def _create_stream(
136
+ self,
137
+ audio_file: BinaryIO,
138
+ model: str,
139
+ language: Optional[str] = None,
140
+ prompt: Optional[str] = None,
141
+ response_format: str = "json",
142
+ temperature: Optional[float] = None,
143
+ timestamp_granularities: Optional[List[str]] = None,
144
+ timeout: Optional[int] = None,
145
+ proxies: Optional[dict] = None,
146
+ **kwargs: Any
147
+ ) -> Generator[str, None, None]:
148
+ """Create streaming transcription using requests.post(..., stream=True)."""
149
+ headers = {
150
+ 'Accept': 'application/json, text/plain, */*',
151
+ 'Accept-Language': 'en-US,en;q=0.9',
152
+ 'User-Agent': LitAgent().random()
153
+ }
154
+ api_url = self._client.api_url
155
+ if getattr(self._client, 'allow_unauthenticated', False):
156
+ if '?' in api_url:
157
+ api_url += '&allow_unauthenticated=1'
158
+ else:
159
+ api_url += '?allow_unauthenticated=1'
160
+ files = {
161
+ 'file': audio_file,
162
+ 'model_id': (None, self._client.model_id),
163
+ 'tag_audio_events': (None, 'true' if self._client.tag_audio_events else 'false'),
164
+ 'diarize': (None, 'true' if self._client.diarize else 'false')
165
+ }
166
+ if language:
167
+ files['language'] = (None, language)
168
+ response = requests.post(
169
+ api_url,
170
+ files=files,
171
+ headers=headers,
172
+ timeout=timeout or self._client.timeout,
173
+ proxies=proxies or getattr(self._client, "proxies", None),
174
+ stream=True
175
+ )
176
+ if response.status_code != 200:
177
+ raise exceptions.FailedToGenerateResponseError(
178
+ f"ElevenLabs API returned error: {response.status_code} - {response.text}"
179
+ )
180
+ # Stream the response, decode utf-8
181
+ for line in response.iter_lines(decode_unicode=True):
182
+ if line:
183
+ yield line
184
+
185
+
186
+
187
+ class ElevenLabsAudio(BaseSTTAudio):
188
+ """ElevenLabs audio interface."""
189
+
190
+ def _create_transcriptions(self, client) -> ElevenLabsTranscriptions:
191
+ return ElevenLabsTranscriptions(client)
192
+
193
+
194
+ class ElevenLabsSTT(STTCompatibleProvider):
195
+ """
196
+ OpenAI-compatible client for ElevenLabs STT API.
197
+
198
+ Usage:
199
+ client = ElevenLabsSTT()
200
+ audio_file = open("audio.mp3", "rb")
201
+ transcription = client.audio.transcriptions.create(
202
+ model="scribe_v1",
203
+ file=audio_file,
204
+ response_format="text"
205
+ )
206
+ print(transcription.text)
207
+ """
208
+
209
+ AVAILABLE_MODELS = [
210
+ "scribe_v1",
211
+ ]
212
+
213
+ def __init__(
214
+ self,
215
+ model_id: str = "scribe_v1",
216
+ allow_unauthenticated: bool = True,
217
+ tag_audio_events: bool = True,
218
+ diarize: bool = True,
219
+ timeout: int = 60,
220
+ proxies: Optional[dict] = None
221
+ ):
222
+ """Initialize ElevenLabs STT provider."""
223
+ self.model_id = model_id
224
+ self.allow_unauthenticated = allow_unauthenticated
225
+ self.tag_audio_events = tag_audio_events
226
+ self.diarize = diarize
227
+ self.timeout = timeout
228
+ self.proxies = proxies
229
+
230
+ # API configuration
231
+ self.api_url = "https://api.elevenlabs.io/v1/speech-to-text"
232
+
233
+ # Initialize interfaces
234
+ self.audio = ElevenLabsAudio(self)
235
+ self._models = STTModels(self.AVAILABLE_MODELS)
236
+
237
+ @property
238
+ def models(self):
239
+ """Get models interface."""
240
+ return self._models
241
+ if __name__ == "__main__":
242
+ from rich import print
243
+ client = ElevenLabsSTT()
244
+
245
+ # Example audio file path - replace with your own
246
+ audio_file_path = r"C:\Users\koula\Downloads\audio_2025-05-12_22-30-47.ogg"
247
+
248
+ print("=== Non-streaming example ===")
249
+ with open(audio_file_path, "rb") as audio_file:
250
+ transcription = client.audio.transcriptions.create(
251
+ model="scribe_v1",
252
+ file=audio_file,
253
+ stream=False
254
+ )
255
+ print(transcription.text)
256
+
257
+ print("\n=== Streaming example ===")
258
+ with open(audio_file_path, "rb") as audio_file:
259
+ stream = client.audio.transcriptions.create(
260
+ model="scribe_v1",
261
+ file=audio_file,
262
+ stream=True
263
+ )
264
+ for chunk in stream:
265
+ print(chunk.strip())
@@ -1,7 +1,10 @@
1
-
2
1
  from .pollinations import *
3
2
  from .piclumen import *
4
3
  from .magicstudio import *
5
4
  from .fastflux import *
6
5
  from .pixelmuse import *
7
6
  from .aiarta import *
7
+ from .gpt1image import *
8
+ from .imagen import *
9
+ from .together import *
10
+ from .bing import *