webscout 8.2.9__py3-none-any.whl → 8.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (63) hide show
  1. webscout/AIauto.py +2 -2
  2. webscout/Provider/Blackboxai.py +2 -0
  3. webscout/Provider/ChatSandbox.py +2 -1
  4. webscout/Provider/Deepinfra.py +1 -1
  5. webscout/Provider/HeckAI.py +1 -1
  6. webscout/Provider/LambdaChat.py +1 -0
  7. webscout/Provider/MCPCore.py +7 -3
  8. webscout/Provider/OPENAI/BLACKBOXAI.py +1017 -766
  9. webscout/Provider/OPENAI/Cloudflare.py +31 -14
  10. webscout/Provider/OPENAI/FalconH1.py +457 -0
  11. webscout/Provider/OPENAI/FreeGemini.py +29 -13
  12. webscout/Provider/OPENAI/NEMOTRON.py +26 -14
  13. webscout/Provider/OPENAI/PI.py +427 -0
  14. webscout/Provider/OPENAI/Qwen3.py +303 -282
  15. webscout/Provider/OPENAI/TwoAI.py +29 -12
  16. webscout/Provider/OPENAI/__init__.py +3 -1
  17. webscout/Provider/OPENAI/ai4chat.py +33 -23
  18. webscout/Provider/OPENAI/api.py +78 -12
  19. webscout/Provider/OPENAI/base.py +2 -0
  20. webscout/Provider/OPENAI/c4ai.py +31 -10
  21. webscout/Provider/OPENAI/chatgpt.py +41 -22
  22. webscout/Provider/OPENAI/chatgptclone.py +32 -13
  23. webscout/Provider/OPENAI/chatsandbox.py +7 -3
  24. webscout/Provider/OPENAI/copilot.py +26 -10
  25. webscout/Provider/OPENAI/deepinfra.py +327 -321
  26. webscout/Provider/OPENAI/e2b.py +77 -99
  27. webscout/Provider/OPENAI/exaai.py +13 -10
  28. webscout/Provider/OPENAI/exachat.py +10 -6
  29. webscout/Provider/OPENAI/flowith.py +7 -3
  30. webscout/Provider/OPENAI/freeaichat.py +10 -6
  31. webscout/Provider/OPENAI/glider.py +10 -6
  32. webscout/Provider/OPENAI/heckai.py +11 -8
  33. webscout/Provider/OPENAI/llmchatco.py +9 -7
  34. webscout/Provider/OPENAI/mcpcore.py +10 -7
  35. webscout/Provider/OPENAI/multichat.py +3 -1
  36. webscout/Provider/OPENAI/netwrck.py +10 -6
  37. webscout/Provider/OPENAI/oivscode.py +12 -9
  38. webscout/Provider/OPENAI/opkfc.py +14 -3
  39. webscout/Provider/OPENAI/scirachat.py +14 -8
  40. webscout/Provider/OPENAI/sonus.py +10 -6
  41. webscout/Provider/OPENAI/standardinput.py +18 -9
  42. webscout/Provider/OPENAI/textpollinations.py +14 -7
  43. webscout/Provider/OPENAI/toolbaz.py +16 -10
  44. webscout/Provider/OPENAI/typefully.py +14 -7
  45. webscout/Provider/OPENAI/typegpt.py +10 -6
  46. webscout/Provider/OPENAI/uncovrAI.py +22 -8
  47. webscout/Provider/OPENAI/venice.py +10 -6
  48. webscout/Provider/OPENAI/writecream.py +166 -163
  49. webscout/Provider/OPENAI/x0gpt.py +367 -365
  50. webscout/Provider/OPENAI/yep.py +384 -382
  51. webscout/Provider/PI.py +2 -1
  52. webscout/Provider/__init__.py +0 -2
  53. webscout/Provider/granite.py +41 -6
  54. webscout/Provider/oivscode.py +37 -37
  55. webscout/Provider/scnet.py +1 -0
  56. webscout/version.py +1 -1
  57. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/METADATA +2 -1
  58. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/RECORD +62 -61
  59. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
  60. webscout/Provider/ChatGPTGratis.py +0 -194
  61. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/entry_points.txt +0 -0
  62. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
  63. {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
@@ -10,8 +10,8 @@ from datetime import datetime
10
10
  from typing import List, Dict, Optional, Union, Generator, Any
11
11
 
12
12
  from webscout.litagent import LitAgent
13
- from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
14
- from .utils import (
13
+ from webscout.Provider.OPENAI.base import BaseChat, BaseCompletions, OpenAICompatibleProvider
14
+ from webscout.Provider.OPENAI.utils import (
15
15
  ChatCompletion,
16
16
  ChatCompletionChunk,
17
17
  Choice,
@@ -40,6 +40,8 @@ class Completions(BaseCompletions):
40
40
  stream: bool = False,
41
41
  temperature: Optional[float] = None,
42
42
  top_p: Optional[float] = None,
43
+ timeout: Optional[int] = None,
44
+ proxies: Optional[Dict[str, str]] = None,
43
45
  **kwargs: Any
44
46
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
45
47
  """
@@ -68,16 +70,18 @@ class Completions(BaseCompletions):
68
70
 
69
71
  # Handle streaming response
70
72
  if stream:
71
- return self._handle_streaming_response(request_id, created_time, model, data)
73
+ return self._handle_streaming_response(request_id, created_time, model, data, timeout, proxies)
72
74
  else:
73
- return self._handle_non_streaming_response(request_id, created_time, model, data)
75
+ return self._handle_non_streaming_response(request_id, created_time, model, data, timeout, proxies)
74
76
 
75
77
  def _handle_streaming_response(
76
78
  self,
77
79
  request_id: str,
78
80
  created_time: int,
79
81
  model: str,
80
- data: Dict[str, Any]
82
+ data: Dict[str, Any],
83
+ timeout: Optional[int] = None,
84
+ proxies: Optional[Dict[str, str]] = None
81
85
  ) -> Generator[ChatCompletionChunk, None, None]:
82
86
  """Handle streaming response from Toolbaz API"""
83
87
  try:
@@ -85,8 +89,8 @@ class Completions(BaseCompletions):
85
89
  "https://data.toolbaz.com/writing.php",
86
90
  data=data,
87
91
  stream=True,
88
- proxies=self._client.proxies,
89
- timeout=self._client.timeout
92
+ proxies=proxies or getattr(self._client, "proxies", None),
93
+ timeout=timeout or self._client.timeout
90
94
  )
91
95
  resp.raise_for_status()
92
96
 
@@ -219,15 +223,17 @@ class Completions(BaseCompletions):
219
223
  request_id: str,
220
224
  created_time: int,
221
225
  model: str,
222
- data: Dict[str, Any]
226
+ data: Dict[str, Any],
227
+ timeout: Optional[int] = None,
228
+ proxies: Optional[Dict[str, str]] = None
223
229
  ) -> ChatCompletion:
224
230
  """Handle non-streaming response from Toolbaz API"""
225
231
  try:
226
232
  resp = self._client.session.post(
227
233
  "https://data.toolbaz.com/writing.php",
228
234
  data=data,
229
- proxies=self._client.proxies,
230
- timeout=self._client.timeout
235
+ proxies=proxies or getattr(self._client, "proxies", None),
236
+ timeout=timeout or self._client.timeout
231
237
  )
232
238
  resp.raise_for_status()
233
239
 
@@ -36,6 +36,8 @@ class Completions(BaseCompletions):
36
36
  max_tokens: Optional[int] = None,
37
37
  stream: bool = False,
38
38
  temperature: Optional[float] = None,
39
+ timeout: Optional[int] = None,
40
+ proxies: Optional[Dict[str, str]] = None,
39
41
  **kwargs: Any
40
42
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
41
43
  """
@@ -67,16 +69,18 @@ class Completions(BaseCompletions):
67
69
  created_time = int(time.time())
68
70
 
69
71
  if stream:
70
- return self._create_streaming(request_id, created_time, model, payload)
72
+ return self._create_streaming(request_id, created_time, model, payload, timeout, proxies)
71
73
  else:
72
- return self._create_non_streaming(request_id, created_time, model, payload)
74
+ return self._create_non_streaming(request_id, created_time, model, payload, timeout, proxies)
73
75
 
74
76
  def _create_streaming(
75
77
  self,
76
78
  request_id: str,
77
79
  created_time: int,
78
80
  model: str,
79
- payload: Dict[str, Any]
81
+ payload: Dict[str, Any],
82
+ timeout: Optional[int] = None,
83
+ proxies: Optional[Dict[str, str]] = None
80
84
  ) -> Generator[ChatCompletionChunk, None, None]:
81
85
  """Implementation for streaming chat completions."""
82
86
  try:
@@ -86,7 +90,8 @@ class Completions(BaseCompletions):
86
90
  headers=self._client.headers,
87
91
  json=payload,
88
92
  stream=True,
89
- timeout=self._client.timeout,
93
+ timeout=timeout or self._client.timeout,
94
+ proxies=proxies or getattr(self._client, "proxies", None),
90
95
  impersonate="chrome120"
91
96
  )
92
97
 
@@ -161,7 +166,9 @@ class Completions(BaseCompletions):
161
166
  request_id: str,
162
167
  created_time: int,
163
168
  model: str,
164
- payload: Dict[str, Any]
169
+ payload: Dict[str, Any],
170
+ timeout: Optional[int] = None,
171
+ proxies: Optional[Dict[str, str]] = None
165
172
  ) -> ChatCompletion:
166
173
  """Implementation for non-streaming chat completions."""
167
174
  try:
@@ -171,7 +178,8 @@ class Completions(BaseCompletions):
171
178
  headers=self._client.headers,
172
179
  json=payload,
173
180
  stream=True,
174
- timeout=self._client.timeout,
181
+ timeout=timeout or self._client.timeout,
182
+ proxies=proxies or getattr(self._client, "proxies", None),
175
183
  impersonate="chrome120"
176
184
  )
177
185
 
@@ -352,4 +360,3 @@ class TypefullyAI(OpenAICompatibleProvider):
352
360
  def list(inner_self):
353
361
  return type(self).AVAILABLE_MODELS
354
362
  return _ModelList()
355
-
@@ -34,6 +34,8 @@ class Completions(BaseCompletions):
34
34
  top_p: Optional[float] = None,
35
35
  presence_penalty: Optional[float] = None,
36
36
  frequency_penalty: Optional[float] = None,
37
+ timeout: Optional[int] = None,
38
+ proxies: Optional[Dict[str, str]] = None,
37
39
  **kwargs: Any
38
40
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
39
41
  """
@@ -61,12 +63,12 @@ class Completions(BaseCompletions):
61
63
  created_time = int(time.time())
62
64
 
63
65
  if stream:
64
- return self._create_stream(request_id, created_time, model, payload)
66
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
65
67
  else:
66
- return self._create_non_stream(request_id, created_time, model, payload)
68
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
67
69
 
68
70
  def _create_stream(
69
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
71
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
70
72
  ) -> Generator[ChatCompletionChunk, None, None]:
71
73
  try:
72
74
  response = self._client.session.post(
@@ -74,7 +76,8 @@ class Completions(BaseCompletions):
74
76
  headers=self._client.headers,
75
77
  json=payload,
76
78
  stream=True,
77
- timeout=self._client.timeout
79
+ timeout=timeout or self._client.timeout,
80
+ proxies=proxies or getattr(self._client, "proxies", None)
78
81
  )
79
82
 
80
83
  # Handle non-200 responses
@@ -209,14 +212,15 @@ class Completions(BaseCompletions):
209
212
  raise IOError(f"TypeGPT request failed: {e}") from e
210
213
 
211
214
  def _create_non_stream(
212
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
215
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
213
216
  ) -> ChatCompletion:
214
217
  try:
215
218
  response = self._client.session.post(
216
219
  self._client.api_endpoint,
217
220
  headers=self._client.headers,
218
221
  json=payload,
219
- timeout=self._client.timeout
222
+ timeout=timeout or self._client.timeout,
223
+ proxies=proxies or getattr(self._client, "proxies", None)
220
224
  )
221
225
 
222
226
  # Handle non-200 responses
@@ -38,6 +38,8 @@ class Completions(BaseCompletions):
38
38
  stream: bool = False,
39
39
  temperature: Optional[float] = None,
40
40
  top_p: Optional[float] = None,
41
+ timeout: Optional[int] = None,
42
+ proxies: Optional[Dict[str, str]] = None,
41
43
  **kwargs: Any
42
44
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
43
45
  """
@@ -103,7 +105,9 @@ class Completions(BaseCompletions):
103
105
  payload=payload,
104
106
  model=model,
105
107
  request_id=request_id,
106
- created_time=created_time
108
+ created_time=created_time,
109
+ timeout=timeout,
110
+ proxies=proxies
107
111
  )
108
112
 
109
113
  # Handle non-streaming response
@@ -111,7 +115,9 @@ class Completions(BaseCompletions):
111
115
  payload=payload,
112
116
  model=model,
113
117
  request_id=request_id,
114
- created_time=created_time
118
+ created_time=created_time,
119
+ timeout=timeout,
120
+ proxies=proxies
115
121
  )
116
122
 
117
123
  def _handle_streaming_response(
@@ -120,7 +126,9 @@ class Completions(BaseCompletions):
120
126
  payload: Dict[str, Any],
121
127
  model: str,
122
128
  request_id: str,
123
- created_time: int
129
+ created_time: int,
130
+ timeout: Optional[int] = None,
131
+ proxies: Optional[Dict[str, str]] = None
124
132
  ) -> Generator[ChatCompletionChunk, None, None]:
125
133
  """Handle streaming response from UncovrAI API."""
126
134
  try:
@@ -128,7 +136,8 @@ class Completions(BaseCompletions):
128
136
  self._client.url,
129
137
  json=payload,
130
138
  stream=True,
131
- timeout=self._client.timeout
139
+ timeout=timeout or self._client.timeout,
140
+ proxies=proxies or getattr(self._client, "proxies", None)
132
141
  ) as response:
133
142
  if response.status_code != 200:
134
143
  # If we get a non-200 response, try refreshing our identity once
@@ -139,7 +148,8 @@ class Completions(BaseCompletions):
139
148
  self._client.url,
140
149
  json=payload,
141
150
  stream=True,
142
- timeout=self._client.timeout
151
+ timeout=timeout or self._client.timeout,
152
+ proxies=proxies or getattr(self._client, "proxies", None)
143
153
  ) as retry_response:
144
154
  if not retry_response.ok:
145
155
  raise IOError(
@@ -216,14 +226,17 @@ class Completions(BaseCompletions):
216
226
  payload: Dict[str, Any],
217
227
  model: str,
218
228
  request_id: str,
219
- created_time: int
229
+ created_time: int,
230
+ timeout: Optional[int] = None,
231
+ proxies: Optional[Dict[str, str]] = None
220
232
  ) -> ChatCompletion:
221
233
  """Handle non-streaming response from UncovrAI API."""
222
234
  try:
223
235
  response = self._client.session.post(
224
236
  self._client.url,
225
237
  json=payload,
226
- timeout=self._client.timeout
238
+ timeout=timeout or self._client.timeout,
239
+ proxies=proxies or getattr(self._client, "proxies", None)
227
240
  )
228
241
 
229
242
  if response.status_code != 200:
@@ -232,7 +245,8 @@ class Completions(BaseCompletions):
232
245
  response = self._client.session.post(
233
246
  self._client.url,
234
247
  json=payload,
235
- timeout=self._client.timeout
248
+ timeout=timeout or self._client.timeout,
249
+ proxies=proxies or getattr(self._client, "proxies", None)
236
250
  )
237
251
  if not response.ok:
238
252
  raise IOError(
@@ -32,6 +32,8 @@ class Completions(BaseCompletions):
32
32
  stream: bool = False,
33
33
  temperature: Optional[float] = 0.8,
34
34
  top_p: Optional[float] = 0.9,
35
+ timeout: Optional[int] = None,
36
+ proxies: Optional[Dict[str, str]] = None,
35
37
  **kwargs: Any
36
38
  ) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
37
39
  """
@@ -73,19 +75,20 @@ class Completions(BaseCompletions):
73
75
  created_time = int(time.time())
74
76
 
75
77
  if stream:
76
- return self._create_stream(request_id, created_time, model, payload)
78
+ return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
77
79
  else:
78
- return self._create_non_stream(request_id, created_time, model, payload)
80
+ return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
79
81
 
80
82
  def _create_stream(
81
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
83
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
82
84
  ) -> Generator[ChatCompletionChunk, None, None]:
83
85
  try:
84
86
  response = self._client.session.post(
85
87
  self._client.api_endpoint,
86
88
  json=payload,
87
89
  stream=True,
88
- timeout=self._client.timeout
90
+ timeout=timeout or self._client.timeout,
91
+ proxies=proxies or getattr(self._client, "proxies", None)
89
92
  )
90
93
 
91
94
  # Handle non-200 responses
@@ -211,7 +214,7 @@ class Completions(BaseCompletions):
211
214
  raise IOError(f"Venice request failed: {e}") from e
212
215
 
213
216
  def _create_non_stream(
214
- self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
217
+ self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
215
218
  ) -> ChatCompletion:
216
219
  try:
217
220
  # For non-streaming, we still use streaming internally to collect the full response
@@ -219,7 +222,8 @@ class Completions(BaseCompletions):
219
222
  self._client.api_endpoint,
220
223
  json=payload,
221
224
  stream=True,
222
- timeout=self._client.timeout
225
+ timeout=timeout or self._client.timeout,
226
+ proxies=proxies or getattr(self._client, "proxies", None)
223
227
  )
224
228
 
225
229
  # Handle non-200 responses