webscout 8.2.9__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +2 -2
- webscout/Provider/Blackboxai.py +2 -0
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +1017 -766
- webscout/Provider/OPENAI/Cloudflare.py +31 -14
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +29 -13
- webscout/Provider/OPENAI/NEMOTRON.py +26 -14
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +303 -282
- webscout/Provider/OPENAI/TwoAI.py +29 -12
- webscout/Provider/OPENAI/__init__.py +3 -1
- webscout/Provider/OPENAI/ai4chat.py +33 -23
- webscout/Provider/OPENAI/api.py +78 -12
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +31 -10
- webscout/Provider/OPENAI/chatgpt.py +41 -22
- webscout/Provider/OPENAI/chatgptclone.py +32 -13
- webscout/Provider/OPENAI/chatsandbox.py +7 -3
- webscout/Provider/OPENAI/copilot.py +26 -10
- webscout/Provider/OPENAI/deepinfra.py +327 -321
- webscout/Provider/OPENAI/e2b.py +77 -99
- webscout/Provider/OPENAI/exaai.py +13 -10
- webscout/Provider/OPENAI/exachat.py +10 -6
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +10 -6
- webscout/Provider/OPENAI/glider.py +10 -6
- webscout/Provider/OPENAI/heckai.py +11 -8
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +10 -7
- webscout/Provider/OPENAI/multichat.py +3 -1
- webscout/Provider/OPENAI/netwrck.py +10 -6
- webscout/Provider/OPENAI/oivscode.py +12 -9
- webscout/Provider/OPENAI/opkfc.py +14 -3
- webscout/Provider/OPENAI/scirachat.py +14 -8
- webscout/Provider/OPENAI/sonus.py +10 -6
- webscout/Provider/OPENAI/standardinput.py +18 -9
- webscout/Provider/OPENAI/textpollinations.py +14 -7
- webscout/Provider/OPENAI/toolbaz.py +16 -10
- webscout/Provider/OPENAI/typefully.py +14 -7
- webscout/Provider/OPENAI/typegpt.py +10 -6
- webscout/Provider/OPENAI/uncovrAI.py +22 -8
- webscout/Provider/OPENAI/venice.py +10 -6
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +367 -365
- webscout/Provider/OPENAI/yep.py +384 -382
- webscout/Provider/PI.py +2 -1
- webscout/Provider/__init__.py +0 -2
- webscout/Provider/granite.py +41 -6
- webscout/Provider/oivscode.py +37 -37
- webscout/Provider/scnet.py +1 -0
- webscout/version.py +1 -1
- {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/METADATA +2 -1
- {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/RECORD +62 -61
- {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- webscout/Provider/ChatGPTGratis.py +0 -194
- {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
|
@@ -46,6 +46,8 @@ class Completions(BaseCompletions):
|
|
|
46
46
|
stream: bool = False,
|
|
47
47
|
temperature: Optional[float] = None,
|
|
48
48
|
top_p: Optional[float] = None,
|
|
49
|
+
timeout: Optional[int] = None,
|
|
50
|
+
proxies: Optional[dict] = None,
|
|
49
51
|
**kwargs: Any
|
|
50
52
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
51
53
|
"""
|
|
@@ -75,21 +77,28 @@ class Completions(BaseCompletions):
|
|
|
75
77
|
created_time = int(time.time())
|
|
76
78
|
|
|
77
79
|
if stream:
|
|
78
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
80
|
+
return self._create_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
79
81
|
else:
|
|
80
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
82
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
81
83
|
|
|
82
84
|
def _create_stream(
|
|
83
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
85
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
86
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
84
87
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
88
|
+
original_proxies = self._client.session.proxies
|
|
89
|
+
if proxies is not None:
|
|
90
|
+
self._client.session.proxies = proxies
|
|
91
|
+
else:
|
|
92
|
+
self._client.session.proxies = {}
|
|
85
93
|
try:
|
|
94
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
86
95
|
response = self._client.session.post(
|
|
87
96
|
f"{self._client.url}/api/chat",
|
|
88
97
|
headers=self._client.headers,
|
|
89
98
|
cookies=self._client.cookies,
|
|
90
99
|
json=payload,
|
|
91
100
|
stream=True,
|
|
92
|
-
timeout=
|
|
101
|
+
timeout=timeout_val
|
|
93
102
|
)
|
|
94
103
|
|
|
95
104
|
# Handle non-200 responses
|
|
@@ -104,7 +113,7 @@ class Completions(BaseCompletions):
|
|
|
104
113
|
cookies=self._client.cookies,
|
|
105
114
|
json=payload,
|
|
106
115
|
stream=True,
|
|
107
|
-
timeout=
|
|
116
|
+
timeout=timeout_val
|
|
108
117
|
)
|
|
109
118
|
if not response.ok:
|
|
110
119
|
raise IOError(
|
|
@@ -230,11 +239,20 @@ class Completions(BaseCompletions):
|
|
|
230
239
|
except Exception as e:
|
|
231
240
|
print(f"Error during ChatGPTClone stream request: {e}")
|
|
232
241
|
raise IOError(f"ChatGPTClone request failed: {e}") from e
|
|
242
|
+
finally:
|
|
243
|
+
self._client.session.proxies = original_proxies
|
|
233
244
|
|
|
234
245
|
def _create_non_stream(
|
|
235
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
246
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
247
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
236
248
|
) -> ChatCompletion:
|
|
249
|
+
original_proxies = self._client.session.proxies
|
|
250
|
+
if proxies is not None:
|
|
251
|
+
self._client.session.proxies = proxies
|
|
252
|
+
else:
|
|
253
|
+
self._client.session.proxies = {}
|
|
237
254
|
try:
|
|
255
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
238
256
|
# For non-streaming, we still use streaming internally to collect the full response
|
|
239
257
|
response = self._client.session.post(
|
|
240
258
|
f"{self._client.url}/api/chat",
|
|
@@ -242,7 +260,7 @@ class Completions(BaseCompletions):
|
|
|
242
260
|
cookies=self._client.cookies,
|
|
243
261
|
json=payload,
|
|
244
262
|
stream=True,
|
|
245
|
-
timeout=
|
|
263
|
+
timeout=timeout_val
|
|
246
264
|
)
|
|
247
265
|
|
|
248
266
|
# Handle non-200 responses
|
|
@@ -257,7 +275,7 @@ class Completions(BaseCompletions):
|
|
|
257
275
|
cookies=self._client.cookies,
|
|
258
276
|
json=payload,
|
|
259
277
|
stream=True,
|
|
260
|
-
timeout=
|
|
278
|
+
timeout=timeout_val
|
|
261
279
|
)
|
|
262
280
|
if not response.ok:
|
|
263
281
|
raise IOError(
|
|
@@ -330,6 +348,8 @@ class Completions(BaseCompletions):
|
|
|
330
348
|
except Exception as e:
|
|
331
349
|
print(f"Error during ChatGPTClone non-stream request: {e}")
|
|
332
350
|
raise IOError(f"ChatGPTClone request failed: {e}") from e
|
|
351
|
+
finally:
|
|
352
|
+
self._client.session.proxies = original_proxies
|
|
333
353
|
|
|
334
354
|
class Chat(BaseChat):
|
|
335
355
|
def __init__(self, client: 'ChatGPTClone'):
|
|
@@ -352,7 +372,6 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
352
372
|
|
|
353
373
|
def __init__(
|
|
354
374
|
self,
|
|
355
|
-
timeout: Optional[int] = None,
|
|
356
375
|
browser: str = "chrome",
|
|
357
376
|
impersonate: str = "chrome120"
|
|
358
377
|
):
|
|
@@ -360,16 +379,16 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
360
379
|
Initialize the ChatGPTClone client.
|
|
361
380
|
|
|
362
381
|
Args:
|
|
363
|
-
timeout: Request timeout in seconds (None for no timeout)
|
|
364
382
|
browser: Browser to emulate in user agent (for LitAgent fallback)
|
|
365
383
|
impersonate: Browser impersonation for curl_cffi (default: chrome120)
|
|
366
384
|
"""
|
|
367
|
-
self.timeout =
|
|
385
|
+
self.timeout = 30
|
|
368
386
|
self.temperature = 0.6 # Default temperature
|
|
369
387
|
self.top_p = 0.7 # Default top_p
|
|
370
388
|
|
|
371
389
|
# Use curl_cffi for Cloudflare bypass and browser impersonation
|
|
372
|
-
self.session = Session(impersonate=impersonate
|
|
390
|
+
self.session = Session(impersonate=impersonate)
|
|
391
|
+
self.session.proxies = {}
|
|
373
392
|
|
|
374
393
|
# Use LitAgent for fingerprint if available, else fallback
|
|
375
394
|
agent = LitAgent()
|
|
@@ -405,7 +424,7 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
405
424
|
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
406
425
|
impersonate = impersonate or "chrome120"
|
|
407
426
|
self.fingerprint = LitAgent().generate_fingerprint(browser)
|
|
408
|
-
self.session = Session(impersonate=impersonate
|
|
427
|
+
self.session = Session(impersonate=impersonate)
|
|
409
428
|
# Update headers with new fingerprint
|
|
410
429
|
self.headers.update({
|
|
411
430
|
"Accept": self.fingerprint["accept"],
|
|
@@ -33,6 +33,8 @@ class Completions(BaseCompletions):
|
|
|
33
33
|
stream: bool = False,
|
|
34
34
|
temperature: Optional[float] = None,
|
|
35
35
|
top_p: Optional[float] = None,
|
|
36
|
+
timeout: Optional[int] = None,
|
|
37
|
+
proxies: Optional[dict] = None,
|
|
36
38
|
**kwargs: Any
|
|
37
39
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
38
40
|
"""
|
|
@@ -70,13 +72,15 @@ class Completions(BaseCompletions):
|
|
|
70
72
|
}
|
|
71
73
|
session = requests.Session()
|
|
72
74
|
session.headers.update(headers)
|
|
75
|
+
session.proxies = proxies if proxies is not None else {}
|
|
76
|
+
|
|
73
77
|
def for_stream():
|
|
74
78
|
try:
|
|
75
79
|
response = session.post(
|
|
76
80
|
url,
|
|
77
81
|
json=payload,
|
|
78
82
|
stream=True,
|
|
79
|
-
timeout=30
|
|
83
|
+
timeout=timeout if timeout is not None else 30
|
|
80
84
|
)
|
|
81
85
|
response.raise_for_status()
|
|
82
86
|
streaming_text = ""
|
|
@@ -116,7 +120,7 @@ class Completions(BaseCompletions):
|
|
|
116
120
|
response = session.post(
|
|
117
121
|
url,
|
|
118
122
|
json=payload,
|
|
119
|
-
timeout=30
|
|
123
|
+
timeout=timeout if timeout is not None else 30
|
|
120
124
|
)
|
|
121
125
|
response.raise_for_status()
|
|
122
126
|
text = response.text
|
|
@@ -152,7 +156,7 @@ class Chat(BaseChat):
|
|
|
152
156
|
self.completions = Completions(client)
|
|
153
157
|
|
|
154
158
|
class ChatSandbox(OpenAICompatibleProvider):
|
|
155
|
-
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large"]
|
|
159
|
+
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large", "deepseek-r1", "deepseek-r1-full", "gemini-thinking", "openai-o1-mini", "llama", "mistral", "gemma-3"]
|
|
156
160
|
chat: Chat
|
|
157
161
|
def __init__(self):
|
|
158
162
|
self.chat = Chat(self)
|
|
@@ -33,6 +33,8 @@ class Completions(BaseCompletions):
|
|
|
33
33
|
stream: bool = False,
|
|
34
34
|
temperature: Optional[float] = None,
|
|
35
35
|
top_p: Optional[float] = None,
|
|
36
|
+
timeout: Optional[int] = None,
|
|
37
|
+
proxies: Optional[dict] = None,
|
|
36
38
|
**kwargs: Any
|
|
37
39
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
38
40
|
"""
|
|
@@ -49,17 +51,24 @@ class Completions(BaseCompletions):
|
|
|
49
51
|
image = kwargs.get("image")
|
|
50
52
|
|
|
51
53
|
if stream:
|
|
52
|
-
return self._create_stream(request_id, created_time, model, formatted_prompt, image)
|
|
54
|
+
return self._create_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
|
|
53
55
|
else:
|
|
54
|
-
return self._create_non_stream(request_id, created_time, model, formatted_prompt, image)
|
|
56
|
+
return self._create_non_stream(request_id, created_time, model, formatted_prompt, image, timeout=timeout, proxies=proxies)
|
|
55
57
|
|
|
56
58
|
def _create_stream(
|
|
57
|
-
self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None
|
|
59
|
+
self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
|
|
60
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
58
61
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
62
|
+
original_proxies = self._client.session.proxies
|
|
63
|
+
if proxies is not None:
|
|
64
|
+
self._client.session.proxies = proxies
|
|
65
|
+
else:
|
|
66
|
+
self._client.session.proxies = {}
|
|
59
67
|
try:
|
|
68
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
60
69
|
s = self._client.session
|
|
61
70
|
# Create a new conversation if needed
|
|
62
|
-
r = s.post(self._client.conversation_url)
|
|
71
|
+
r = s.post(self._client.conversation_url, timeout=timeout_val)
|
|
63
72
|
if r.status_code != 200:
|
|
64
73
|
raise RuntimeError(f"Failed to create conversation: {r.text}")
|
|
65
74
|
conv_id = r.json().get("id")
|
|
@@ -70,13 +79,15 @@ class Completions(BaseCompletions):
|
|
|
70
79
|
r = s.post(
|
|
71
80
|
f"{self._client.url}/c/api/attachments",
|
|
72
81
|
headers={"content-type": "image/jpeg"},
|
|
73
|
-
data=image
|
|
82
|
+
data=image,
|
|
83
|
+
timeout=timeout_val
|
|
74
84
|
)
|
|
75
85
|
if r.status_code != 200:
|
|
76
86
|
raise RuntimeError(f"Image upload failed: {r.text}")
|
|
77
87
|
images.append({"type": "image", "url": r.json().get("url")})
|
|
78
88
|
|
|
79
89
|
# Connect to websocket
|
|
90
|
+
# Note: ws_connect might not use timeout in the same way as POST/GET
|
|
80
91
|
ws = s.ws_connect(self._client.websocket_url)
|
|
81
92
|
|
|
82
93
|
# Use model to set mode ("reasoning" for Think Deeper)
|
|
@@ -165,12 +176,16 @@ class Completions(BaseCompletions):
|
|
|
165
176
|
|
|
166
177
|
except Exception as e:
|
|
167
178
|
raise RuntimeError(f"Stream error: {e}") from e
|
|
179
|
+
finally:
|
|
180
|
+
self._client.session.proxies = original_proxies
|
|
168
181
|
|
|
169
182
|
def _create_non_stream(
|
|
170
|
-
self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None
|
|
183
|
+
self, request_id: str, created_time: int, model: str, prompt_text: str, image: Optional[bytes] = None,
|
|
184
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
171
185
|
) -> ChatCompletion:
|
|
172
186
|
result = ""
|
|
173
|
-
|
|
187
|
+
# Pass timeout and proxies to the underlying _create_stream call
|
|
188
|
+
for chunk in self._create_stream(request_id, created_time, model, prompt_text, image, timeout=timeout, proxies=proxies):
|
|
174
189
|
if hasattr(chunk, 'choices') and chunk.choices and hasattr(chunk.choices[0], 'delta') and chunk.choices[0].delta.content:
|
|
175
190
|
result += chunk.choices[0].delta.content
|
|
176
191
|
|
|
@@ -222,9 +237,10 @@ class Copilot(OpenAICompatibleProvider):
|
|
|
222
237
|
|
|
223
238
|
AVAILABLE_MODELS = ["Copilot", "Think Deeper"]
|
|
224
239
|
|
|
225
|
-
def __init__(self,
|
|
226
|
-
self.timeout =
|
|
227
|
-
self.session = Session(
|
|
240
|
+
def __init__(self, browser: str = "chrome", tools: Optional[List] = None, **kwargs):
|
|
241
|
+
self.timeout = 900
|
|
242
|
+
self.session = Session(impersonate=browser)
|
|
243
|
+
self.session.proxies = {}
|
|
228
244
|
|
|
229
245
|
# Initialize tools
|
|
230
246
|
self.available_tools = {}
|