webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (100) hide show
  1. webscout/AIauto.py +6 -6
  2. webscout/AIbase.py +61 -1
  3. webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
  4. webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
  5. webscout/Extra/YTToolkit/ytapi/video.py +10 -10
  6. webscout/Extra/autocoder/autocoder_utiles.py +1 -1
  7. webscout/Litlogger/formats.py +9 -0
  8. webscout/Litlogger/handlers.py +18 -0
  9. webscout/Litlogger/logger.py +43 -1
  10. webscout/Provider/AISEARCH/scira_search.py +3 -2
  11. webscout/Provider/Blackboxai.py +2 -0
  12. webscout/Provider/ChatSandbox.py +2 -1
  13. webscout/Provider/Deepinfra.py +1 -1
  14. webscout/Provider/HeckAI.py +1 -1
  15. webscout/Provider/LambdaChat.py +8 -1
  16. webscout/Provider/MCPCore.py +7 -3
  17. webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
  18. webscout/Provider/OPENAI/Cloudflare.py +31 -14
  19. webscout/Provider/OPENAI/FalconH1.py +457 -0
  20. webscout/Provider/OPENAI/FreeGemini.py +29 -13
  21. webscout/Provider/OPENAI/NEMOTRON.py +26 -14
  22. webscout/Provider/OPENAI/PI.py +427 -0
  23. webscout/Provider/OPENAI/Qwen3.py +161 -140
  24. webscout/Provider/OPENAI/README.md +3 -0
  25. webscout/Provider/OPENAI/TogetherAI.py +355 -0
  26. webscout/Provider/OPENAI/TwoAI.py +29 -12
  27. webscout/Provider/OPENAI/__init__.py +4 -1
  28. webscout/Provider/OPENAI/ai4chat.py +33 -23
  29. webscout/Provider/OPENAI/api.py +375 -24
  30. webscout/Provider/OPENAI/autoproxy.py +39 -0
  31. webscout/Provider/OPENAI/base.py +91 -12
  32. webscout/Provider/OPENAI/c4ai.py +31 -10
  33. webscout/Provider/OPENAI/chatgpt.py +56 -24
  34. webscout/Provider/OPENAI/chatgptclone.py +46 -16
  35. webscout/Provider/OPENAI/chatsandbox.py +7 -3
  36. webscout/Provider/OPENAI/copilot.py +26 -10
  37. webscout/Provider/OPENAI/deepinfra.py +29 -12
  38. webscout/Provider/OPENAI/e2b.py +358 -158
  39. webscout/Provider/OPENAI/exaai.py +13 -10
  40. webscout/Provider/OPENAI/exachat.py +10 -6
  41. webscout/Provider/OPENAI/flowith.py +7 -3
  42. webscout/Provider/OPENAI/freeaichat.py +10 -6
  43. webscout/Provider/OPENAI/glider.py +10 -6
  44. webscout/Provider/OPENAI/heckai.py +11 -8
  45. webscout/Provider/OPENAI/llmchatco.py +9 -7
  46. webscout/Provider/OPENAI/mcpcore.py +10 -7
  47. webscout/Provider/OPENAI/multichat.py +3 -1
  48. webscout/Provider/OPENAI/netwrck.py +10 -6
  49. webscout/Provider/OPENAI/oivscode.py +12 -9
  50. webscout/Provider/OPENAI/opkfc.py +31 -8
  51. webscout/Provider/OPENAI/scirachat.py +17 -10
  52. webscout/Provider/OPENAI/sonus.py +10 -6
  53. webscout/Provider/OPENAI/standardinput.py +18 -9
  54. webscout/Provider/OPENAI/textpollinations.py +14 -7
  55. webscout/Provider/OPENAI/toolbaz.py +16 -11
  56. webscout/Provider/OPENAI/typefully.py +14 -7
  57. webscout/Provider/OPENAI/typegpt.py +10 -6
  58. webscout/Provider/OPENAI/uncovrAI.py +22 -8
  59. webscout/Provider/OPENAI/venice.py +10 -6
  60. webscout/Provider/OPENAI/writecream.py +13 -10
  61. webscout/Provider/OPENAI/x0gpt.py +11 -9
  62. webscout/Provider/OPENAI/yep.py +12 -10
  63. webscout/Provider/PI.py +2 -1
  64. webscout/Provider/STT/__init__.py +3 -0
  65. webscout/Provider/STT/base.py +281 -0
  66. webscout/Provider/STT/elevenlabs.py +265 -0
  67. webscout/Provider/TTI/__init__.py +3 -1
  68. webscout/Provider/TTI/aiarta.py +399 -365
  69. webscout/Provider/TTI/base.py +74 -2
  70. webscout/Provider/TTI/fastflux.py +63 -30
  71. webscout/Provider/TTI/gpt1image.py +149 -0
  72. webscout/Provider/TTI/imagen.py +196 -0
  73. webscout/Provider/TTI/magicstudio.py +60 -29
  74. webscout/Provider/TTI/piclumen.py +43 -32
  75. webscout/Provider/TTI/pixelmuse.py +232 -225
  76. webscout/Provider/TTI/pollinations.py +43 -32
  77. webscout/Provider/TTI/together.py +287 -0
  78. webscout/Provider/TTI/utils.py +2 -1
  79. webscout/Provider/TTS/README.md +1 -0
  80. webscout/Provider/TTS/__init__.py +2 -1
  81. webscout/Provider/TTS/freetts.py +140 -0
  82. webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
  83. webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
  84. webscout/Provider/__init__.py +3 -2
  85. webscout/Provider/granite.py +41 -6
  86. webscout/Provider/oivscode.py +37 -37
  87. webscout/Provider/scira_chat.py +3 -2
  88. webscout/Provider/scnet.py +1 -0
  89. webscout/Provider/toolbaz.py +0 -1
  90. webscout/litagent/Readme.md +12 -3
  91. webscout/litagent/agent.py +99 -62
  92. webscout/version.py +1 -1
  93. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
  94. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
  95. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
  96. webscout/Provider/ChatGPTGratis.py +0 -194
  97. webscout/Provider/TTI/artbit.py +0 -0
  98. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
  99. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
  100. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
@@ -38,6 +38,8 @@ class Completions(BaseCompletions):
38
38
  stream: bool = False,
39
39
  temperature: Optional[float] = None,
40
40
  top_p: Optional[float] = None,
41
+ timeout: Optional[int] = None,
42
+ proxies: Optional[Dict[str, str]] = None,
41
43
  **kwargs: Any
42
44
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
43
45
  """
@@ -103,7 +105,9 @@ class Completions(BaseCompletions):
103
105
  payload=payload,
104
106
  model=model,
105
107
  request_id=request_id,
106
- created_time=created_time
108
+ created_time=created_time,
109
+ timeout=timeout,
110
+ proxies=proxies
107
111
  )
108
112
 
109
113
  # Handle non-streaming response
@@ -111,7 +115,9 @@ class Completions(BaseCompletions):
111
115
  payload=payload,
112
116
  model=model,
113
117
  request_id=request_id,
114
- created_time=created_time
118
+ created_time=created_time,
119
+ timeout=timeout,
120
+ proxies=proxies
115
121
  )
116
122
 
117
123
  def _handle_streaming_response(
@@ -120,7 +126,9 @@ class Completions(BaseCompletions):
120
126
  payload: Dict[str, Any],
121
127
  model: str,
122
128
  request_id: str,
123
- created_time: int
129
+ created_time: int,
130
+ timeout: Optional[int] = None,
131
+ proxies: Optional[Dict[str, str]] = None
124
132
  ) -> Generator[ChatCompletionChunk, None, None]:
125
133
  """Handle streaming response from UncovrAI API."""
126
134
  try:
@@ -128,7 +136,8 @@ class Completions(BaseCompletions):
128
136
  self._client.url,
129
137
  json=payload,
130
138
  stream=True,
131
- timeout=self._client.timeout
139
+ timeout=timeout or self._client.timeout,
140
+ proxies=proxies or getattr(self._client, "proxies", None)
132
141
  ) as response:
133
142
  if response.status_code != 200:
134
143
  # If we get a non-200 response, try refreshing our identity once
@@ -139,7 +148,8 @@ class Completions(BaseCompletions):
139
148
  self._client.url,
140
149
  json=payload,
141
150
  stream=True,
142
- timeout=self._client.timeout
151
+ timeout=timeout or self._client.timeout,
152
+ proxies=proxies or getattr(self._client, "proxies", None)
143
153
  ) as retry_response:
144
154
  if not retry_response.ok:
145
155
  raise IOError(
@@ -216,14 +226,17 @@ class Completions(BaseCompletions):
216
226
  payload: Dict[str, Any],
217
227
  model: str,
218
228
  request_id: str,
219
- created_time: int
229
+ created_time: int,
230
+ timeout: Optional[int] = None,
231
+ proxies: Optional[Dict[str, str]] = None
220
232
  ) -> ChatCompletion:
221
233
  """Handle non-streaming response from UncovrAI API."""
222
234
  try:
223
235
  response = self._client.session.post(
224
236
  self._client.url,
225
237
  json=payload,
226
- timeout=self._client.timeout
238
+ timeout=timeout or self._client.timeout,
239
+ proxies=proxies or getattr(self._client, "proxies", None)
227
240
  )
228
241
 
229
242
  if response.status_code != 200:
@@ -232,7 +245,8 @@ class Completions(BaseCompletions):
232
245
  response = self._client.session.post(
233
246
  self._client.url,
234
247
  json=payload,
235
- timeout=self._client.timeout
248
+ timeout=timeout or self._client.timeout,
249
+ proxies=proxies or getattr(self._client, "proxies", None)
236
250
  )
237
251
  if not response.ok:
238
252
  raise IOError(
@@ -32,6 +32,8 @@ class Completions(BaseCompletions):
32
32
  stream: bool = False,
33
33
  temperature: Optional[float] = 0.8,
34
34
  top_p: Optional[float] = 0.9,
35
+ timeout: Optional[int] = None,
36
+ proxies: Optional[Dict[str, str]] = None,
35
37
  **kwargs: Any
36
38
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
39
  """
@@ -73,19 +75,20 @@ class Completions(BaseCompletions):
73
75
  created_time = int(time.time())
74
76
 
75
77
  if stream:
76
- return self._create_stream(request_id, created_time, model, payload)
78
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
77
79
  else:
78
- return self._create_non_stream(request_id, created_time, model, payload)
80
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
79
81
 
80
82
  def _create_stream(
81
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
83
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
82
84
  ) -> Generator[ChatCompletionChunk, None, None]:
83
85
  try:
84
86
  response = self._client.session.post(
85
87
  self._client.api_endpoint,
86
88
  json=payload,
87
89
  stream=True,
88
- timeout=self._client.timeout
90
+ timeout=timeout or self._client.timeout,
91
+ proxies=proxies or getattr(self._client, "proxies", None)
89
92
  )
90
93
 
91
94
  # Handle non-200 responses
@@ -211,7 +214,7 @@ class Completions(BaseCompletions):
211
214
  raise IOError(f"Venice request failed: {e}") from e
212
215
 
213
216
  def _create_non_stream(
214
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
217
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
215
218
  ) -> ChatCompletion:
216
219
  try:
217
220
  # For non-streaming, we still use streaming internally to collect the full response
@@ -219,7 +222,8 @@ class Completions(BaseCompletions):
219
222
  self._client.api_endpoint,
220
223
  json=payload,
221
224
  stream=True,
222
- timeout=self._client.timeout
225
+ timeout=timeout or self._client.timeout,
226
+ proxies=proxies or getattr(self._client, "proxies", None)
223
227
  )
224
228
 
225
229
  # Handle non-200 responses
@@ -22,14 +22,16 @@ class Completions(BaseCompletions):
22
22
  self._client = client
23
23
 
24
24
  def create(
25
- *,
26
25
  self,
26
+ *,
27
27
  model: str = None, # Not used by Writecream, for compatibility
28
28
  messages: List[Dict[str, str]],
29
29
  max_tokens: Optional[int] = None, # Not used by Writecream
30
30
  stream: bool = False,
31
31
  temperature: Optional[float] = None, # Not used by Writecream
32
32
  top_p: Optional[float] = None, # Not used by Writecream
33
+ timeout: Optional[int] = None,
34
+ proxies: Optional[Dict[str, str]] = None,
33
35
  **kwargs: Any
34
36
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
37
  """
@@ -40,15 +42,15 @@ class Completions(BaseCompletions):
40
42
  request_id = f"chatcmpl-{uuid.uuid4()}"
41
43
  created_time = int(time.time())
42
44
  if stream:
43
- return self._create_stream(request_id, created_time, payload)
45
+ return self._create_stream(request_id, created_time, payload, timeout, proxies)
44
46
  else:
45
- return self._create_non_stream(request_id, created_time, payload)
47
+ return self._create_non_stream(request_id, created_time, payload, timeout, proxies)
46
48
 
47
49
  def _create_stream(
48
- self, request_id: str, created_time: int, payload: List[Dict[str, str]]
50
+ self, request_id: str, created_time: int, payload: List[Dict[str, str]], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
49
51
  ) -> Generator[ChatCompletionChunk, None, None]:
50
52
  # Writecream does not support streaming, so yield the full response as a single chunk
51
- completion = self._create_non_stream(request_id, created_time, payload)
53
+ completion = self._create_non_stream(request_id, created_time, payload, timeout, proxies)
52
54
  content = completion.choices[0].message.content
53
55
  # Yield as a single chunk
54
56
  delta = ChoiceDelta(content=content)
@@ -72,7 +74,7 @@ class Completions(BaseCompletions):
72
74
  yield chunk
73
75
 
74
76
  def _create_non_stream(
75
- self, request_id: str, created_time: int, payload: List[Dict[str, str]]
77
+ self, request_id: str, created_time: int, payload: List[Dict[str, str]], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
76
78
  ) -> ChatCompletion:
77
79
  try:
78
80
  params = {
@@ -83,7 +85,8 @@ class Completions(BaseCompletions):
83
85
  self._client.base_url,
84
86
  params=params,
85
87
  headers=self._client.headers,
86
- timeout=self._client.timeout
88
+ timeout=timeout or self._client.timeout,
89
+ proxies=proxies or getattr(self._client, "proxies", None)
87
90
  )
88
91
  response.raise_for_status()
89
92
  data = response.json()
@@ -129,8 +132,8 @@ class Writecream(OpenAICompatibleProvider):
129
132
  """
130
133
  AVAILABLE_MODELS = ["writecream"]
131
134
 
132
- def __init__(self, timeout: Optional[int] = 30, browser: str = "chrome"):
133
- self.timeout = timeout
135
+ def __init__(self, browser: str = "chrome"):
136
+ self.timeout = None
134
137
  self.base_url = "https://8pe3nv3qha.execute-api.us-east-1.amazonaws.com/default/llm_chat"
135
138
  self.session = requests.Session()
136
139
  agent = LitAgent()
@@ -160,4 +163,4 @@ if __name__ == "__main__":
160
163
  {"role": "user", "content": "What is the capital of France?"}
161
164
  ]
162
165
  )
163
- print(response.choices[0].message.content)
166
+ print(response.choices[0].message.content)
@@ -30,6 +30,8 @@ class Completions(BaseCompletions):
30
30
  stream: bool = False,
31
31
  temperature: Optional[float] = None,
32
32
  top_p: Optional[float] = None,
33
+ timeout: Optional[int] = None,
34
+ proxies: Optional[Dict[str, str]] = None,
33
35
  **kwargs: Any
34
36
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
35
37
  """
@@ -60,12 +62,12 @@ class Completions(BaseCompletions):
60
62
  created_time = int(time.time())
61
63
 
62
64
  if stream:
63
- return self._create_stream(request_id, created_time, model, payload)
65
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
64
66
  else:
65
- return self._create_non_stream(request_id, created_time, model, payload)
67
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
66
68
 
67
69
  def _create_stream(
68
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
70
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
69
71
  ) -> Generator[ChatCompletionChunk, None, None]:
70
72
  try:
71
73
  response = self._client.session.post(
@@ -73,7 +75,8 @@ class Completions(BaseCompletions):
73
75
  headers=self._client.headers,
74
76
  json=payload,
75
77
  stream=True,
76
- timeout=self._client.timeout
78
+ timeout=timeout or self._client.timeout,
79
+ proxies=proxies or getattr(self._client, "proxies", None)
77
80
  )
78
81
 
79
82
  # Handle non-200 responses
@@ -175,7 +178,7 @@ class Completions(BaseCompletions):
175
178
  raise IOError(f"X0GPT request failed: {e}") from e
176
179
 
177
180
  def _create_non_stream(
178
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
181
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
179
182
  ) -> ChatCompletion:
180
183
  try:
181
184
  response = self._client.session.post(
@@ -183,7 +186,8 @@ class Completions(BaseCompletions):
183
186
  headers=self._client.headers,
184
187
  json=payload,
185
188
  stream=True,
186
- timeout=self._client.timeout
189
+ timeout=timeout or self._client.timeout,
190
+ proxies=proxies or getattr(self._client, "proxies", None)
187
191
  )
188
192
 
189
193
  # Handle non-200 responses
@@ -264,17 +268,15 @@ class X0GPT(OpenAICompatibleProvider):
264
268
 
265
269
  def __init__(
266
270
  self,
267
- timeout: Optional[int] = None,
268
271
  browser: str = "chrome"
269
272
  ):
270
273
  """
271
274
  Initialize the X0GPT client.
272
275
 
273
276
  Args:
274
- timeout: Request timeout in seconds (None for no timeout)
275
277
  browser: Browser to emulate in user agent
276
278
  """
277
- self.timeout = timeout
279
+ self.timeout = None
278
280
  self.api_endpoint = "https://x0-gpt.devwtf.in/api/stream/reply"
279
281
  self.session = requests.Session()
280
282
 
@@ -49,6 +49,8 @@ class Completions(BaseCompletions):
49
49
  temperature: Optional[float] = 0.6,
50
50
  top_p: Optional[float] = 0.7,
51
51
  system_prompt: Optional[str] = None, # Added for consistency, but will be ignored
52
+ timeout: Optional[int] = None,
53
+ proxies: Optional[Dict[str, str]] = None,
52
54
  **kwargs: Any
53
55
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
54
56
  """
@@ -99,12 +101,12 @@ class Completions(BaseCompletions):
99
101
  created_time = int(time.time())
100
102
 
101
103
  if stream:
102
- return self._create_stream(request_id, created_time, model, payload)
104
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
103
105
  else:
104
- return self._create_non_stream(request_id, created_time, model, payload)
106
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
105
107
 
106
108
  def _create_stream(
107
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
109
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
108
110
  ) -> Generator[ChatCompletionChunk, None, None]:
109
111
  try:
110
112
  response = self._client.session.post(
@@ -113,7 +115,8 @@ class Completions(BaseCompletions):
113
115
  cookies=self._client.cookies,
114
116
  json=payload,
115
117
  stream=True,
116
- timeout=self._client.timeout
118
+ timeout=timeout or self._client.timeout,
119
+ proxies=proxies or getattr(self._client, "proxies", None)
117
120
  )
118
121
 
119
122
  if not response.ok:
@@ -191,7 +194,7 @@ class Completions(BaseCompletions):
191
194
  pass
192
195
 
193
196
  def _create_non_stream(
194
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
197
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
195
198
  ) -> ChatCompletion:
196
199
  full_response_content = ""
197
200
  finish_reason = "stop"
@@ -204,7 +207,8 @@ class Completions(BaseCompletions):
204
207
  headers=self._client.headers,
205
208
  cookies=self._client.cookies,
206
209
  json=payload_copy,
207
- timeout=self._client.timeout
210
+ timeout=timeout or self._client.timeout,
211
+ proxies=proxies or getattr(self._client, "proxies", None)
208
212
  )
209
213
  if not response.ok:
210
214
  raise IOError(
@@ -271,17 +275,15 @@ class YEPCHAT(OpenAICompatibleProvider):
271
275
 
272
276
  def __init__(
273
277
  self,
274
- timeout: int = 30,
275
278
  browser: str = "chrome"
276
279
  ):
277
280
  """
278
281
  Initialize the YEPCHAT client.
279
282
 
280
283
  Args:
281
- timeout: Request timeout in seconds.
282
284
  browser: Browser name for LitAgent to generate User-Agent.
283
285
  """
284
- self.timeout = timeout
286
+ self.timeout = None
285
287
  self.api_endpoint = "https://api.yep.com/v1/chat/completions"
286
288
  self.session = cloudscraper.create_scraper() # Use cloudscraper
287
289
 
@@ -379,4 +381,4 @@ if __name__ == '__main__':
379
381
  print() # Add a newline at the end
380
382
 
381
383
  except Exception as e:
382
- print(f"Streaming Test Failed: {e}")
384
+ print(f"Streaming Test Failed: {e}")
webscout/Provider/PI.py CHANGED
@@ -158,7 +158,8 @@ class PiAI(Provider):
158
158
  except CurlError as e: # Catch CurlError
159
159
  raise exceptions.FailedToGenerateResponseError(f"Failed to start conversation (CurlError): {e}") from e
160
160
  except Exception as e: # Catch other potential exceptions (like HTTPError, JSONDecodeError)
161
- err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
161
+ # Extract error text from the response if available
162
+ err_text = e.response.text if hasattr(e, 'response') and hasattr(e.response, 'text') else ''
162
163
  raise exceptions.FailedToGenerateResponseError(f"Failed to start conversation ({type(e).__name__}): {e} - {err_text}") from e
163
164
 
164
165
  def ask(
@@ -0,0 +1,3 @@
1
+ # This file marks the directory as a Python package.
2
+ from .base import *
3
+ from .elevenlabs import *
@@ -0,0 +1,281 @@
1
+ """
2
+ Base classes for OpenAI-compatible STT providers.
3
+
4
+ This module provides the base structure for STT providers that follow
5
+ the OpenAI Whisper API interface pattern.
6
+ """
7
+
8
+ import json
9
+ import time
10
+ from abc import ABC, abstractmethod
11
+ from typing import Any, Dict, Generator, List, Optional, Union, BinaryIO
12
+ from pathlib import Path
13
+
14
+ # Import OpenAI response types from the main OPENAI module
15
+ try:
16
+ from webscout.Provider.OPENAI.pydantic_imports import (
17
+ ChatCompletion, ChatCompletionChunk, Choice, ChoiceDelta,
18
+ Message, Usage, count_tokens
19
+ )
20
+ except ImportError:
21
+ # Fallback if pydantic_imports is not available
22
+ from dataclasses import dataclass
23
+
24
+ @dataclass
25
+ class Usage:
26
+ prompt_tokens: int = 0
27
+ completion_tokens: int = 0
28
+ total_tokens: int = 0
29
+
30
+ @dataclass
31
+ class Message:
32
+ role: str
33
+ content: str
34
+
35
+ @dataclass
36
+ class Choice:
37
+ index: int
38
+ message: Message
39
+ finish_reason: Optional[str] = None
40
+
41
+ @dataclass
42
+ class ChoiceDelta:
43
+ content: Optional[str] = None
44
+ role: Optional[str] = None
45
+
46
+ @dataclass
47
+ class ChatCompletionChunk:
48
+ id: str
49
+ choices: List[Dict[str, Any]]
50
+ created: int
51
+ model: str
52
+ object: str = "chat.completion.chunk"
53
+
54
+ @dataclass
55
+ class ChatCompletion:
56
+ id: str
57
+ choices: List[Choice]
58
+ created: int
59
+ model: str
60
+ usage: Usage
61
+ object: str = "chat.completion"
62
+
63
+ def count_tokens(text: str) -> int:
64
+ return len(text.split())
65
+
66
+
67
+ class TranscriptionResponse:
68
+ """Response object that mimics OpenAI's transcription response."""
69
+
70
+ def __init__(self, data: Dict[str, Any], response_format: str = "json"):
71
+ self._data = data
72
+ self._response_format = response_format
73
+
74
+ @property
75
+ def text(self) -> str:
76
+ """Get the transcribed text."""
77
+ return self._data.get("text", "")
78
+
79
+ @property
80
+ def language(self) -> Optional[str]:
81
+ """Get the detected language."""
82
+ return self._data.get("language")
83
+
84
+ @property
85
+ def duration(self) -> Optional[float]:
86
+ """Get the audio duration."""
87
+ return self._data.get("duration")
88
+
89
+ @property
90
+ def segments(self) -> Optional[list]:
91
+ """Get the segments with timestamps."""
92
+ return self._data.get("segments")
93
+
94
+ @property
95
+ def words(self) -> Optional[list]:
96
+ """Get the words with timestamps."""
97
+ return self._data.get("words")
98
+
99
+ def __str__(self) -> str:
100
+ """Return string representation based on response format."""
101
+ if self._response_format == "text":
102
+ return self.text
103
+ elif self._response_format == "srt":
104
+ return self._to_srt()
105
+ elif self._response_format == "vtt":
106
+ return self._to_vtt()
107
+ else: # json or verbose_json
108
+ return json.dumps(self._data, indent=2)
109
+
110
+ def _to_srt(self) -> str:
111
+ """Convert to SRT subtitle format."""
112
+ if not self.segments:
113
+ return ""
114
+
115
+ srt_content = []
116
+ for i, segment in enumerate(self.segments, 1):
117
+ start_time = self._format_time_srt(segment.get("start", 0))
118
+ end_time = self._format_time_srt(segment.get("end", 0))
119
+ text = segment.get("text", "").strip()
120
+
121
+ srt_content.append(f"{i}")
122
+ srt_content.append(f"{start_time} --> {end_time}")
123
+ srt_content.append(text)
124
+ srt_content.append("")
125
+
126
+ return "\n".join(srt_content)
127
+
128
+ def _to_vtt(self) -> str:
129
+ """Convert to VTT subtitle format."""
130
+ if not self.segments:
131
+ return "WEBVTT\n\n"
132
+
133
+ vtt_content = ["WEBVTT", ""]
134
+ for segment in self.segments:
135
+ start_time = self._format_time_vtt(segment.get("start", 0))
136
+ end_time = self._format_time_vtt(segment.get("end", 0))
137
+ text = segment.get("text", "").strip()
138
+
139
+ vtt_content.append(f"{start_time} --> {end_time}")
140
+ vtt_content.append(text)
141
+ vtt_content.append("")
142
+
143
+ return "\n".join(vtt_content)
144
+
145
+ def _format_time_srt(self, seconds: float) -> str:
146
+ """Format time for SRT format (HH:MM:SS,mmm)."""
147
+ hours = int(seconds // 3600)
148
+ minutes = int((seconds % 3600) // 60)
149
+ secs = int(seconds % 60)
150
+ millisecs = int((seconds % 1) * 1000)
151
+ return f"{hours:02d}:{minutes:02d}:{secs:02d},{millisecs:03d}"
152
+
153
+ def _format_time_vtt(self, seconds: float) -> str:
154
+ """Format time for VTT format (HH:MM:SS.mmm)."""
155
+ hours = int(seconds // 3600)
156
+ minutes = int((seconds % 3600) // 60)
157
+ secs = int(seconds % 60)
158
+ millisecs = int((seconds % 1) * 1000)
159
+ return f"{hours:02d}:{minutes:02d}:{secs:02d}.{millisecs:03d}"
160
+
161
+
162
+ class BaseSTTTranscriptions(ABC):
163
+ """Base class for STT transcriptions interface."""
164
+
165
+ def __init__(self, client):
166
+ self._client = client
167
+
168
+ @abstractmethod
169
+ def create(
170
+ self,
171
+ *,
172
+ model: str,
173
+ file: Union[BinaryIO, str, Path],
174
+ language: Optional[str] = None,
175
+ prompt: Optional[str] = None,
176
+ response_format: str = "json",
177
+ temperature: Optional[float] = None,
178
+ timestamp_granularities: Optional[List[str]] = None,
179
+ stream: bool = False,
180
+ timeout: Optional[int] = None,
181
+ proxies: Optional[dict] = None,
182
+ **kwargs: Any
183
+ ) -> Union[TranscriptionResponse, Generator[str, None, None]]:
184
+ """
185
+ Create a transcription of the given audio file.
186
+
187
+ Args:
188
+ model: Model to use for transcription
189
+ file: Audio file to transcribe
190
+ language: Language of the audio (ISO-639-1 format)
191
+ prompt: Optional text to guide the model's style
192
+ response_format: Format of the response
193
+ temperature: Sampling temperature (0 to 1)
194
+ timestamp_granularities: Timestamp granularities to include
195
+ stream: Whether to stream the response
196
+ timeout: Request timeout
197
+ proxies: Proxy configuration
198
+ **kwargs: Additional parameters
199
+
200
+ Returns:
201
+ TranscriptionResponse or generator of SSE strings if streaming
202
+ """
203
+ raise NotImplementedError
204
+
205
+
206
+ class BaseSTTAudio(ABC):
207
+ """Base class for STT audio interface."""
208
+
209
+ def __init__(self, client):
210
+ self.transcriptions = self._create_transcriptions(client)
211
+
212
+ @abstractmethod
213
+ def _create_transcriptions(self, client) -> BaseSTTTranscriptions:
214
+ """Create the transcriptions interface."""
215
+ raise NotImplementedError
216
+
217
+
218
+ class BaseSTTChat:
219
+ """Base chat interface for STT providers (placeholder for consistency)."""
220
+
221
+ def __init__(self, client):
222
+ _ = client # Unused but kept for interface consistency
223
+ self.completions = None # STT providers don't have completions
224
+
225
+
226
+ class STTCompatibleProvider(ABC):
227
+ """
228
+ Abstract Base Class for STT providers mimicking the OpenAI structure.
229
+ Requires a nested 'audio.transcriptions' structure.
230
+ """
231
+
232
+ audio: BaseSTTAudio
233
+
234
+ @abstractmethod
235
+ def __init__(self, **kwargs: Any):
236
+ """Initialize the STT provider."""
237
+ pass
238
+
239
+ @property
240
+ @abstractmethod
241
+ def models(self):
242
+ """
243
+ Property that returns an object with a .list() method returning available models.
244
+ """
245
+ pass
246
+
247
+
248
+ class STTModels:
249
+ """Models interface for STT providers."""
250
+
251
+ def __init__(self, available_models: List[str]):
252
+ self._available_models = available_models
253
+
254
+ def list(self) -> List[Dict[str, Any]]:
255
+ """List available models."""
256
+ return [
257
+ {
258
+ "id": model,
259
+ "object": "model",
260
+ "created": int(time.time()),
261
+ "owned_by": "webscout"
262
+ }
263
+ for model in self._available_models
264
+ ]
265
+
266
+
267
+ __all__ = [
268
+ 'TranscriptionResponse',
269
+ 'BaseSTTTranscriptions',
270
+ 'BaseSTTAudio',
271
+ 'BaseSTTChat',
272
+ 'STTCompatibleProvider',
273
+ 'STTModels',
274
+ 'ChatCompletion',
275
+ 'ChatCompletionChunk',
276
+ 'Choice',
277
+ 'ChoiceDelta',
278
+ 'Message',
279
+ 'Usage',
280
+ 'count_tokens'
281
+ ]