webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (100) hide show
  1. webscout/AIauto.py +6 -6
  2. webscout/AIbase.py +61 -1
  3. webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
  4. webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
  5. webscout/Extra/YTToolkit/ytapi/video.py +10 -10
  6. webscout/Extra/autocoder/autocoder_utiles.py +1 -1
  7. webscout/Litlogger/formats.py +9 -0
  8. webscout/Litlogger/handlers.py +18 -0
  9. webscout/Litlogger/logger.py +43 -1
  10. webscout/Provider/AISEARCH/scira_search.py +3 -2
  11. webscout/Provider/Blackboxai.py +2 -0
  12. webscout/Provider/ChatSandbox.py +2 -1
  13. webscout/Provider/Deepinfra.py +1 -1
  14. webscout/Provider/HeckAI.py +1 -1
  15. webscout/Provider/LambdaChat.py +8 -1
  16. webscout/Provider/MCPCore.py +7 -3
  17. webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
  18. webscout/Provider/OPENAI/Cloudflare.py +31 -14
  19. webscout/Provider/OPENAI/FalconH1.py +457 -0
  20. webscout/Provider/OPENAI/FreeGemini.py +29 -13
  21. webscout/Provider/OPENAI/NEMOTRON.py +26 -14
  22. webscout/Provider/OPENAI/PI.py +427 -0
  23. webscout/Provider/OPENAI/Qwen3.py +161 -140
  24. webscout/Provider/OPENAI/README.md +3 -0
  25. webscout/Provider/OPENAI/TogetherAI.py +355 -0
  26. webscout/Provider/OPENAI/TwoAI.py +29 -12
  27. webscout/Provider/OPENAI/__init__.py +4 -1
  28. webscout/Provider/OPENAI/ai4chat.py +33 -23
  29. webscout/Provider/OPENAI/api.py +375 -24
  30. webscout/Provider/OPENAI/autoproxy.py +39 -0
  31. webscout/Provider/OPENAI/base.py +91 -12
  32. webscout/Provider/OPENAI/c4ai.py +31 -10
  33. webscout/Provider/OPENAI/chatgpt.py +56 -24
  34. webscout/Provider/OPENAI/chatgptclone.py +46 -16
  35. webscout/Provider/OPENAI/chatsandbox.py +7 -3
  36. webscout/Provider/OPENAI/copilot.py +26 -10
  37. webscout/Provider/OPENAI/deepinfra.py +29 -12
  38. webscout/Provider/OPENAI/e2b.py +358 -158
  39. webscout/Provider/OPENAI/exaai.py +13 -10
  40. webscout/Provider/OPENAI/exachat.py +10 -6
  41. webscout/Provider/OPENAI/flowith.py +7 -3
  42. webscout/Provider/OPENAI/freeaichat.py +10 -6
  43. webscout/Provider/OPENAI/glider.py +10 -6
  44. webscout/Provider/OPENAI/heckai.py +11 -8
  45. webscout/Provider/OPENAI/llmchatco.py +9 -7
  46. webscout/Provider/OPENAI/mcpcore.py +10 -7
  47. webscout/Provider/OPENAI/multichat.py +3 -1
  48. webscout/Provider/OPENAI/netwrck.py +10 -6
  49. webscout/Provider/OPENAI/oivscode.py +12 -9
  50. webscout/Provider/OPENAI/opkfc.py +31 -8
  51. webscout/Provider/OPENAI/scirachat.py +17 -10
  52. webscout/Provider/OPENAI/sonus.py +10 -6
  53. webscout/Provider/OPENAI/standardinput.py +18 -9
  54. webscout/Provider/OPENAI/textpollinations.py +14 -7
  55. webscout/Provider/OPENAI/toolbaz.py +16 -11
  56. webscout/Provider/OPENAI/typefully.py +14 -7
  57. webscout/Provider/OPENAI/typegpt.py +10 -6
  58. webscout/Provider/OPENAI/uncovrAI.py +22 -8
  59. webscout/Provider/OPENAI/venice.py +10 -6
  60. webscout/Provider/OPENAI/writecream.py +13 -10
  61. webscout/Provider/OPENAI/x0gpt.py +11 -9
  62. webscout/Provider/OPENAI/yep.py +12 -10
  63. webscout/Provider/PI.py +2 -1
  64. webscout/Provider/STT/__init__.py +3 -0
  65. webscout/Provider/STT/base.py +281 -0
  66. webscout/Provider/STT/elevenlabs.py +265 -0
  67. webscout/Provider/TTI/__init__.py +3 -1
  68. webscout/Provider/TTI/aiarta.py +399 -365
  69. webscout/Provider/TTI/base.py +74 -2
  70. webscout/Provider/TTI/fastflux.py +63 -30
  71. webscout/Provider/TTI/gpt1image.py +149 -0
  72. webscout/Provider/TTI/imagen.py +196 -0
  73. webscout/Provider/TTI/magicstudio.py +60 -29
  74. webscout/Provider/TTI/piclumen.py +43 -32
  75. webscout/Provider/TTI/pixelmuse.py +232 -225
  76. webscout/Provider/TTI/pollinations.py +43 -32
  77. webscout/Provider/TTI/together.py +287 -0
  78. webscout/Provider/TTI/utils.py +2 -1
  79. webscout/Provider/TTS/README.md +1 -0
  80. webscout/Provider/TTS/__init__.py +2 -1
  81. webscout/Provider/TTS/freetts.py +140 -0
  82. webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
  83. webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
  84. webscout/Provider/__init__.py +3 -2
  85. webscout/Provider/granite.py +41 -6
  86. webscout/Provider/oivscode.py +37 -37
  87. webscout/Provider/scira_chat.py +3 -2
  88. webscout/Provider/scnet.py +1 -0
  89. webscout/Provider/toolbaz.py +0 -1
  90. webscout/litagent/Readme.md +12 -3
  91. webscout/litagent/agent.py +99 -62
  92. webscout/version.py +1 -1
  93. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
  94. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
  95. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
  96. webscout/Provider/ChatGPTGratis.py +0 -194
  97. webscout/Provider/TTI/artbit.py +0 -0
  98. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
  99. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
  100. {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
@@ -33,6 +33,8 @@ class Completions(BaseCompletions):
33
33
  stream: bool = False,
34
34
  temperature: Optional[float] = None,
35
35
  top_p: Optional[float] = None,
36
+ timeout: Optional[int] = None,
37
+ proxies: Optional[dict] = None,
36
38
  **kwargs: Any
37
39
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
38
40
  """
@@ -49,17 +51,24 @@ class Completions(BaseCompletions):
49
51
  image = kwargs.get("image")
50
52
 
51
53
  if stream:
52
- return self._create_stream(request_id, created_time, model, formatted_prompt, image)
54
+ return self._create_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
53
55
  else:
54
- return self._create_non_stream(request_id, created_time, model, formatted_prompt, image)
56
+ return self._create_non_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
55
57
 
56
58
  def _create_stream(
57
- self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None
59
+ self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
60
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
58
61
  ) -> Generator[ChatCompletionChunk, None, None]:
62
+ original_proxies = self._client.session.proxies
63
+ if proxies is not None:
64
+ self._client.session.proxies = proxies
65
+ else:
66
+ self._client.session.proxies = {}
59
67
  try:
68
+ timeout_val = timeout if timeout is not None else self._client.timeout
60
69
  s = self._client.session
61
70
  # Create a new conversation if needed
62
- r = s.post(self._client.conversation_url)
71
+ r = s.post(self._client.conversation_url, timeout=timeout_val)
63
72
  if r.status_code != 200:
64
73
  raise RuntimeError(f"Failed to create conversation: {r.text}")
65
74
  conv_id = r.json().get("id")
@@ -70,13 +79,15 @@ class Completions(BaseCompletions):
70
79
  r = s.post(
71
80
  f"{self._client.url}/c/api/attachments",
72
81
  headers={"content-type": "image/jpeg"},
73
- data=image
82
+ data=image,
83
+ timeout=timeout_val
74
84
  )
75
85
  if r.status_code != 200:
76
86
  raise RuntimeError(f"Image upload failed: {r.text}")
77
87
  images.append({"type": "image", "url": r.json().get("url")})
78
88
 
79
89
  # Connect to websocket
90
+ # Note: ws_connect might not use timeout in the same way as POST/GET
80
91
  ws = s.ws_connect(self._client.websocket_url)
81
92
 
82
93
  # Use model to set mode ("reasoning" for Think Deeper)
@@ -165,12 +176,16 @@ class Completions(BaseCompletions):
165
176
 
166
177
  except Exception as e:
167
178
  raise RuntimeError(f"Stream error: {e}") from e
179
+ finally:
180
+ self._client.session.proxies = original_proxies
168
181
 
169
182
  def _create_non_stream(
170
- self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None
183
+ self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
184
+ timeout: Optional[int] = None, proxies: Optional[dict] = None
171
185
  ) -> ChatCompletion:
172
186
  result = ""
173
- for chunk in self._create_stream(request_id, created_time, model, prompt_text, image):
187
+ # Pass timeout and proxies to the underlying _create_stream call
188
+ for chunk in self._create_stream(request_id, created_time, model, prompt_text, image, timeout=timeout, proxies=proxies):
174
189
  if hasattr(chunk, 'choices') and chunk.choices and hasattr(chunk.choices[0], 'delta') and chunk.choices[0].delta.content:
175
190
  result += chunk.choices[0].delta.content
176
191
 
@@ -222,9 +237,10 @@ class Copilot(OpenAICompatibleProvider):
222
237
 
223
238
  AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
224
239
 
225
- def __init__(self, timeout: int = 900, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
226
- self.timeout = timeout
227
- self.session = Session(timeout=timeout, impersonate=browser)
240
+ def __init__(self, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
241
+ self.timeout = 900
242
+ self.session = Session(impersonate=browser)
243
+ self.session.proxies = {}
228
244
 
229
245
  # Initialize tools
230
246
  self.available_tools = {}
@@ -5,8 +5,8 @@ import uuid
5
5
  from typing import List, Dict, Optional, Union, Generator, Any
6
6
 
7
7
  # Import base classes and utility structures
8
- from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
- from .utils import (
8
+ from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
9
+ from webscout.Provider.OPENAI.utils import (
10
10
  ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
11
11
  ChatCompletionMessage, CompletionUsage
12
12
  )
@@ -32,6 +32,8 @@ class Completions(BaseCompletions):
32
32
  stream: bool = False,
33
33
  temperature: Optional[float] = None,
34
34
  top_p: Optional[float] = None,
35
+ timeout: Optional[int] = None,
36
+ proxies: Optional[Dict[str, str]] = None,
35
37
  **kwargs: Any
36
38
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
39
  """
@@ -55,12 +57,13 @@ class Completions(BaseCompletions):
55
57
  created_time = int(time.time())
56
58
 
57
59
  if stream:
58
- return self._create_stream(request_id, created_time, model, payload)
60
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
59
61
  else:
60
- return self._create_non_stream(request_id, created_time, model, payload)
62
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
61
63
 
62
64
  def _create_stream(
63
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
65
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
66
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
64
67
  ) -> Generator[ChatCompletionChunk, None, None]:
65
68
  try:
66
69
  response = self._client.session.post(
@@ -68,7 +71,8 @@ class Completions(BaseCompletions):
68
71
  headers=self._client.headers,
69
72
  json=payload,
70
73
  stream=True,
71
- timeout=self._client.timeout
74
+ timeout=timeout or self._client.timeout,
75
+ proxies=proxies
72
76
  )
73
77
  response.raise_for_status()
74
78
 
@@ -168,14 +172,16 @@ class Completions(BaseCompletions):
168
172
  raise
169
173
 
170
174
  def _create_non_stream(
171
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
175
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
176
+ timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
172
177
  ) -> ChatCompletion:
173
178
  try:
174
179
  response = self._client.session.post(
175
180
  self._client.base_url,
176
181
  headers=self._client.headers,
177
182
  json=payload,
178
- timeout=self._client.timeout
183
+ timeout=timeout or self._client.timeout,
184
+ proxies=proxies
179
185
  )
180
186
  response.raise_for_status()
181
187
  data = response.json()
@@ -227,7 +233,7 @@ class DeepInfra(OpenAICompatibleProvider):
227
233
 
228
234
  AVAILABLE_MODELS = [
229
235
  # "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
230
-
236
+ "deepseek-ai/DeepSeek-R1-0528",
231
237
  "deepseek-ai/DeepSeek-R1",
232
238
  "deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
233
239
  "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
@@ -284,8 +290,8 @@ class DeepInfra(OpenAICompatibleProvider):
284
290
  # "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
285
291
  ]
286
292
 
287
- def __init__(self, timeout: Optional[int] = None, browser: str = "chrome"):
288
- self.timeout = timeout
293
+ def __init__(self, browser: str = "chrome"):
294
+ self.timeout = None # Default timeout
289
295
  self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
290
296
  self.session = requests.Session()
291
297
 
@@ -319,4 +325,15 @@ class DeepInfra(OpenAICompatibleProvider):
319
325
  class _ModelList:
320
326
  def list(inner_self):
321
327
  return type(self).AVAILABLE_MODELS
322
- return _ModelList()
328
+ return _ModelList()
329
+
330
+ if __name__ == "__main__":
331
+ # Example usage
332
+ client = DeepInfra()
333
+ response = client.chat.completions.create(
334
+ model="deepseek-ai/DeepSeek-R1-0528",
335
+ messages=[{"role": "user", "content": "Hello, how are you?"}],
336
+ max_tokens=100,
337
+ stream=False
338
+ )
339
+ print(response)