webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +6 -6
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/Blackboxai.py +2 -0
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/LambdaChat.py +8 -1
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
- webscout/Provider/OPENAI/Cloudflare.py +31 -14
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +29 -13
- webscout/Provider/OPENAI/NEMOTRON.py +26 -14
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +161 -140
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/TwoAI.py +29 -12
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +33 -23
- webscout/Provider/OPENAI/api.py +375 -24
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +91 -12
- webscout/Provider/OPENAI/c4ai.py +31 -10
- webscout/Provider/OPENAI/chatgpt.py +56 -24
- webscout/Provider/OPENAI/chatgptclone.py +46 -16
- webscout/Provider/OPENAI/chatsandbox.py +7 -3
- webscout/Provider/OPENAI/copilot.py +26 -10
- webscout/Provider/OPENAI/deepinfra.py +29 -12
- webscout/Provider/OPENAI/e2b.py +358 -158
- webscout/Provider/OPENAI/exaai.py +13 -10
- webscout/Provider/OPENAI/exachat.py +10 -6
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +10 -6
- webscout/Provider/OPENAI/glider.py +10 -6
- webscout/Provider/OPENAI/heckai.py +11 -8
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +10 -7
- webscout/Provider/OPENAI/multichat.py +3 -1
- webscout/Provider/OPENAI/netwrck.py +10 -6
- webscout/Provider/OPENAI/oivscode.py +12 -9
- webscout/Provider/OPENAI/opkfc.py +31 -8
- webscout/Provider/OPENAI/scirachat.py +17 -10
- webscout/Provider/OPENAI/sonus.py +10 -6
- webscout/Provider/OPENAI/standardinput.py +18 -9
- webscout/Provider/OPENAI/textpollinations.py +14 -7
- webscout/Provider/OPENAI/toolbaz.py +16 -11
- webscout/Provider/OPENAI/typefully.py +14 -7
- webscout/Provider/OPENAI/typegpt.py +10 -6
- webscout/Provider/OPENAI/uncovrAI.py +22 -8
- webscout/Provider/OPENAI/venice.py +10 -6
- webscout/Provider/OPENAI/writecream.py +13 -10
- webscout/Provider/OPENAI/x0gpt.py +11 -9
- webscout/Provider/OPENAI/yep.py +12 -10
- webscout/Provider/PI.py +2 -1
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -2
- webscout/Provider/granite.py +41 -6
- webscout/Provider/oivscode.py +37 -37
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from datetime import datetime
|
|
1
2
|
import time
|
|
2
3
|
import uuid
|
|
3
4
|
import requests
|
|
@@ -6,12 +7,12 @@ import random
|
|
|
6
7
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
8
|
|
|
8
9
|
# Import base classes and utility structures
|
|
9
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
|
-
from .utils import (
|
|
10
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
11
|
+
from webscout.Provider.OPENAI.utils import (
|
|
11
12
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
13
|
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
13
14
|
)
|
|
14
|
-
|
|
15
|
+
from webscout.litagent import LitAgent
|
|
15
16
|
# ANSI escape codes for formatting
|
|
16
17
|
BOLD = "\033[1m"
|
|
17
18
|
RED = "\033[91m"
|
|
@@ -30,6 +31,8 @@ class Completions(BaseCompletions):
|
|
|
30
31
|
stream: bool = False,
|
|
31
32
|
temperature: Optional[float] = None,
|
|
32
33
|
top_p: Optional[float] = None,
|
|
34
|
+
timeout: Optional[int] = None,
|
|
35
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
33
36
|
**kwargs: Any
|
|
34
37
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
35
38
|
"""
|
|
@@ -56,6 +59,8 @@ class Completions(BaseCompletions):
|
|
|
56
59
|
max_tokens=max_tokens,
|
|
57
60
|
temperature=temperature,
|
|
58
61
|
top_p=top_p,
|
|
62
|
+
timeout=timeout,
|
|
63
|
+
proxies=proxies,
|
|
59
64
|
**kwargs
|
|
60
65
|
)
|
|
61
66
|
|
|
@@ -66,6 +71,8 @@ class Completions(BaseCompletions):
|
|
|
66
71
|
max_tokens=max_tokens,
|
|
67
72
|
temperature=temperature,
|
|
68
73
|
top_p=top_p,
|
|
74
|
+
timeout=timeout,
|
|
75
|
+
proxies=proxies,
|
|
69
76
|
**kwargs
|
|
70
77
|
)
|
|
71
78
|
|
|
@@ -77,6 +84,8 @@ class Completions(BaseCompletions):
|
|
|
77
84
|
max_tokens: Optional[int] = None,
|
|
78
85
|
temperature: Optional[float] = None,
|
|
79
86
|
top_p: Optional[float] = None,
|
|
87
|
+
timeout: Optional[int] = None,
|
|
88
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
80
89
|
**kwargs: Any
|
|
81
90
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
82
91
|
"""Implementation for streaming chat completions."""
|
|
@@ -144,7 +153,8 @@ class Completions(BaseCompletions):
|
|
|
144
153
|
headers=headers,
|
|
145
154
|
json=payload,
|
|
146
155
|
stream=True,
|
|
147
|
-
timeout=self._client.timeout
|
|
156
|
+
timeout=timeout or self._client.timeout,
|
|
157
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
148
158
|
)
|
|
149
159
|
response.raise_for_status()
|
|
150
160
|
|
|
@@ -254,6 +264,8 @@ class Completions(BaseCompletions):
|
|
|
254
264
|
max_tokens: Optional[int] = None,
|
|
255
265
|
temperature: Optional[float] = None,
|
|
256
266
|
top_p: Optional[float] = None,
|
|
267
|
+
timeout: Optional[int] = None,
|
|
268
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
257
269
|
**kwargs: Any
|
|
258
270
|
) -> ChatCompletion:
|
|
259
271
|
"""Implementation for non-streaming chat completions."""
|
|
@@ -322,7 +334,8 @@ class Completions(BaseCompletions):
|
|
|
322
334
|
headers=headers,
|
|
323
335
|
json=payload,
|
|
324
336
|
stream=True,
|
|
325
|
-
timeout=self._client.timeout
|
|
337
|
+
timeout=timeout or self._client.timeout,
|
|
338
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
326
339
|
)
|
|
327
340
|
response.raise_for_status()
|
|
328
341
|
|
|
@@ -454,7 +467,9 @@ class OPKFC(OpenAICompatibleProvider):
|
|
|
454
467
|
"auto",
|
|
455
468
|
"o4-mini",
|
|
456
469
|
"gpt-4o-mini",
|
|
457
|
-
"gpt-4o"
|
|
470
|
+
"gpt-4o",
|
|
471
|
+
"gpt-4-1-mini",
|
|
472
|
+
|
|
458
473
|
]
|
|
459
474
|
|
|
460
475
|
def __init__(
|
|
@@ -479,10 +494,10 @@ class OPKFC(OpenAICompatibleProvider):
|
|
|
479
494
|
self.session.proxies.update(proxies)
|
|
480
495
|
|
|
481
496
|
# Set the user agent to match the original script
|
|
482
|
-
self.user_agent =
|
|
497
|
+
self.user_agent = LitAgent().random()
|
|
483
498
|
|
|
484
499
|
# Set the cookie from the original script
|
|
485
|
-
self.cookie = "__vtins__KUc0LhjVWFNXQv11=%7B%22sid%22%3A%20%
|
|
500
|
+
self.cookie = f"__vtins__KUc0LhjVWFNXQv11=%7B%22sid%22%3A%20%{uuid.uuid4().hex}%22%2C%20%22vd%22%3A%201%2C%20%22stt%22%3A%200%2C%20%22dr%22%3A%200%2C%20%22expires%22%3A%201744896723481%2C%20%22ct%22%3A%201744894923481%7D; __51uvsct__KUc0LhjVWFNXQv11=1; __51vcke__KUc0LhjVWFNXQv11=06da852c-bb56-547c-91a8-43a0d485ffed; __51vuft__KUc0LhjVWFNXQv11=1744894923504; gfsessionid=1ochrgv17vy4sbd98xmwt6crpmkxwlqf; oai-nav-state=1; p_uv_id=ad86646801bc60d6d95f6098e4ee7450; _dd_s=rum=0&expire=1744895920821&logs=1&id={uuid.uuid4().hex}&created={int(datetime.utcnow().timestamp() * 1000)}"
|
|
486
501
|
|
|
487
502
|
# Initialize chat interface
|
|
488
503
|
self.chat = Chat(self)
|
|
@@ -494,3 +509,11 @@ class OPKFC(OpenAICompatibleProvider):
|
|
|
494
509
|
return type(self).AVAILABLE_MODELS
|
|
495
510
|
return _ModelList()
|
|
496
511
|
|
|
512
|
+
if __name__ == "__main__":
|
|
513
|
+
# Example usage
|
|
514
|
+
client = OPKFC()
|
|
515
|
+
response = client.chat.completions.create(
|
|
516
|
+
model="auto",
|
|
517
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
518
|
+
)
|
|
519
|
+
print(response.choices[0].message.content)
|
|
@@ -45,6 +45,8 @@ class Completions(BaseCompletions):
|
|
|
45
45
|
stream: bool = False,
|
|
46
46
|
temperature: Optional[float] = None,
|
|
47
47
|
top_p: Optional[float] = None,
|
|
48
|
+
timeout: Optional[int] = None,
|
|
49
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
48
50
|
**kwargs: Any
|
|
49
51
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
50
52
|
"""
|
|
@@ -75,19 +77,20 @@ class Completions(BaseCompletions):
|
|
|
75
77
|
created_time = int(time.time())
|
|
76
78
|
|
|
77
79
|
if stream:
|
|
78
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
80
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
79
81
|
else:
|
|
80
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
82
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
81
83
|
|
|
82
84
|
def _create_stream(
|
|
83
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
85
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
84
86
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
85
87
|
try:
|
|
86
88
|
response = self._client.session.post(
|
|
87
89
|
self._client.api_endpoint,
|
|
88
90
|
json=payload,
|
|
89
91
|
stream=True,
|
|
90
|
-
timeout=self._client.timeout
|
|
92
|
+
timeout=timeout or self._client.timeout,
|
|
93
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
91
94
|
)
|
|
92
95
|
|
|
93
96
|
# Handle non-200 responses
|
|
@@ -100,7 +103,8 @@ class Completions(BaseCompletions):
|
|
|
100
103
|
self._client.api_endpoint,
|
|
101
104
|
json=payload,
|
|
102
105
|
stream=True,
|
|
103
|
-
timeout=self._client.timeout
|
|
106
|
+
timeout=timeout or self._client.timeout,
|
|
107
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
104
108
|
)
|
|
105
109
|
if not response.ok:
|
|
106
110
|
raise IOError(
|
|
@@ -225,13 +229,14 @@ class Completions(BaseCompletions):
|
|
|
225
229
|
raise IOError(f"SciraChat request failed: {e}") from e
|
|
226
230
|
|
|
227
231
|
def _create_non_stream(
|
|
228
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
232
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
229
233
|
) -> ChatCompletion:
|
|
230
234
|
try:
|
|
231
235
|
response = self._client.session.post(
|
|
232
236
|
self._client.api_endpoint,
|
|
233
237
|
json=payload,
|
|
234
|
-
timeout=self._client.timeout
|
|
238
|
+
timeout=timeout or self._client.timeout,
|
|
239
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
235
240
|
)
|
|
236
241
|
|
|
237
242
|
# Handle non-200 responses
|
|
@@ -243,7 +248,8 @@ class Completions(BaseCompletions):
|
|
|
243
248
|
response = self._client.session.post(
|
|
244
249
|
self._client.api_endpoint,
|
|
245
250
|
json=payload,
|
|
246
|
-
timeout=self._client.timeout
|
|
251
|
+
timeout=timeout or self._client.timeout,
|
|
252
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
247
253
|
)
|
|
248
254
|
if not response.ok:
|
|
249
255
|
raise IOError(
|
|
@@ -332,12 +338,13 @@ class SciraChat(OpenAICompatibleProvider):
|
|
|
332
338
|
AVAILABLE_MODELS = {
|
|
333
339
|
"scira-default": "Grok3-mini", # thinking model
|
|
334
340
|
"scira-grok-3": "Grok3",
|
|
335
|
-
"scira-anthropic": "
|
|
341
|
+
"scira-anthropic": "Claude 4 Sonnet",
|
|
342
|
+
"scira-anthropic-thinking": "Claude 4 Sonnet Thinking", # thinking model
|
|
336
343
|
"scira-vision" : "Grok2-Vision", # vision model
|
|
337
344
|
"scira-4o": "GPT4o",
|
|
338
345
|
"scira-qwq": "QWQ-32B",
|
|
339
346
|
"scira-o4-mini": "o4-mini",
|
|
340
|
-
"scira-google": "gemini 2.5 flash",
|
|
347
|
+
"scira-google": "gemini 2.5 flash Thinking", # thinking model
|
|
341
348
|
"scira-google-pro": "gemini 2.5 pro",
|
|
342
349
|
"scira-llama-4": "llama 4 Maverick",
|
|
343
350
|
}
|
|
@@ -35,6 +35,8 @@ class Completions(BaseCompletions):
|
|
|
35
35
|
stream: bool = False,
|
|
36
36
|
temperature: Optional[float] = None, # Not used by SonusAI but kept for compatibility
|
|
37
37
|
top_p: Optional[float] = None, # Not used by SonusAI but kept for compatibility
|
|
38
|
+
timeout: Optional[int] = None,
|
|
39
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
38
40
|
**kwargs: Any # Not used by SonusAI but kept for compatibility
|
|
39
41
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
40
42
|
"""
|
|
@@ -61,12 +63,12 @@ class Completions(BaseCompletions):
|
|
|
61
63
|
created_time = int(time.time())
|
|
62
64
|
|
|
63
65
|
if stream:
|
|
64
|
-
return self._create_stream(request_id, created_time, model, files)
|
|
66
|
+
return self._create_stream(request_id, created_time, model, files, timeout, proxies)
|
|
65
67
|
else:
|
|
66
|
-
return self._create_non_stream(request_id, created_time, model, files)
|
|
68
|
+
return self._create_non_stream(request_id, created_time, model, files, timeout, proxies)
|
|
67
69
|
|
|
68
70
|
def _create_stream(
|
|
69
|
-
self, request_id: str, created_time: int, model: str, files: Dict[str, Any]
|
|
71
|
+
self, request_id: str, created_time: int, model: str, files: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
70
72
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
71
73
|
try:
|
|
72
74
|
response = requests.post(
|
|
@@ -74,7 +76,8 @@ class Completions(BaseCompletions):
|
|
|
74
76
|
files=files,
|
|
75
77
|
headers=self._client.headers,
|
|
76
78
|
stream=True,
|
|
77
|
-
timeout=self._client.timeout
|
|
79
|
+
timeout=timeout or self._client.timeout,
|
|
80
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
78
81
|
)
|
|
79
82
|
response.raise_for_status()
|
|
80
83
|
|
|
@@ -131,14 +134,15 @@ class Completions(BaseCompletions):
|
|
|
131
134
|
raise IOError(f"SonusAI request failed: {e}") from e
|
|
132
135
|
|
|
133
136
|
def _create_non_stream(
|
|
134
|
-
self, request_id: str, created_time: int, model: str, files: Dict[str, Any]
|
|
137
|
+
self, request_id: str, created_time: int, model: str, files: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
135
138
|
) -> ChatCompletion:
|
|
136
139
|
try:
|
|
137
140
|
response = requests.post(
|
|
138
141
|
self._client.url,
|
|
139
142
|
files=files,
|
|
140
143
|
headers=self._client.headers,
|
|
141
|
-
timeout=self._client.timeout
|
|
144
|
+
timeout=timeout or self._client.timeout,
|
|
145
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
142
146
|
)
|
|
143
147
|
response.raise_for_status()
|
|
144
148
|
|
|
@@ -48,6 +48,8 @@ class Completions(BaseCompletions):
|
|
|
48
48
|
stream: bool = False,
|
|
49
49
|
temperature: Optional[float] = None,
|
|
50
50
|
top_p: Optional[float] = None,
|
|
51
|
+
timeout: Optional[int] = None,
|
|
52
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
51
53
|
**kwargs: Any
|
|
52
54
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
53
55
|
"""Create a chat completion."""
|
|
@@ -85,16 +87,18 @@ class Completions(BaseCompletions):
|
|
|
85
87
|
|
|
86
88
|
# Handle streaming vs non-streaming
|
|
87
89
|
if stream:
|
|
88
|
-
return self._stream_request(request_id, created_time, model, payload)
|
|
90
|
+
return self._stream_request(request_id, created_time, model, payload, timeout, proxies)
|
|
89
91
|
else:
|
|
90
|
-
return self._non_stream_request(request_id, created_time, model, payload)
|
|
92
|
+
return self._non_stream_request(request_id, created_time, model, payload, timeout, proxies)
|
|
91
93
|
|
|
92
94
|
def _non_stream_request(
|
|
93
95
|
self,
|
|
94
96
|
request_id: str,
|
|
95
97
|
created_time: int,
|
|
96
98
|
model: str,
|
|
97
|
-
payload: Dict[str, Any]
|
|
99
|
+
payload: Dict[str, Any],
|
|
100
|
+
timeout: Optional[int] = None,
|
|
101
|
+
proxies: Optional[Dict[str, str]] = None
|
|
98
102
|
) -> ChatCompletion:
|
|
99
103
|
"""Handle non-streaming request."""
|
|
100
104
|
try:
|
|
@@ -103,7 +107,8 @@ class Completions(BaseCompletions):
|
|
|
103
107
|
self._client.api_endpoint,
|
|
104
108
|
cookies=self._client.cookies,
|
|
105
109
|
json=payload,
|
|
106
|
-
timeout=self._client.timeout
|
|
110
|
+
timeout=timeout or self._client.timeout,
|
|
111
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
107
112
|
)
|
|
108
113
|
|
|
109
114
|
# Check for errors
|
|
@@ -121,7 +126,8 @@ class Completions(BaseCompletions):
|
|
|
121
126
|
self._client.api_endpoint,
|
|
122
127
|
cookies=self._client.cookies,
|
|
123
128
|
json=payload,
|
|
124
|
-
timeout=self._client.timeout
|
|
129
|
+
timeout=timeout or self._client.timeout,
|
|
130
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
125
131
|
)
|
|
126
132
|
if not response.ok:
|
|
127
133
|
raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
|
|
@@ -189,7 +195,9 @@ class Completions(BaseCompletions):
|
|
|
189
195
|
request_id: str,
|
|
190
196
|
created_time: int,
|
|
191
197
|
model: str,
|
|
192
|
-
payload: Dict[str, Any]
|
|
198
|
+
payload: Dict[str, Any],
|
|
199
|
+
timeout: Optional[int] = None,
|
|
200
|
+
proxies: Optional[Dict[str, str]] = None
|
|
193
201
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
194
202
|
"""Handle streaming request."""
|
|
195
203
|
try:
|
|
@@ -199,7 +207,8 @@ class Completions(BaseCompletions):
|
|
|
199
207
|
cookies=self._client.cookies,
|
|
200
208
|
json=payload,
|
|
201
209
|
stream=True,
|
|
202
|
-
timeout=self._client.timeout
|
|
210
|
+
timeout=timeout or self._client.timeout,
|
|
211
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
203
212
|
)
|
|
204
213
|
|
|
205
214
|
# Check for errors
|
|
@@ -218,7 +227,8 @@ class Completions(BaseCompletions):
|
|
|
218
227
|
cookies=self._client.cookies,
|
|
219
228
|
json=payload,
|
|
220
229
|
stream=True,
|
|
221
|
-
timeout=self._client.timeout
|
|
230
|
+
timeout=timeout or self._client.timeout,
|
|
231
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
222
232
|
)
|
|
223
233
|
if not response.ok:
|
|
224
234
|
raise IOError(f"Failed to generate response after identity refresh - ({response.status_code}, {response.reason}) - {error_content}")
|
|
@@ -430,4 +440,3 @@ class StandardInput(OpenAICompatibleProvider):
|
|
|
430
440
|
def list(inner_self):
|
|
431
441
|
return type(self).AVAILABLE_MODELS
|
|
432
442
|
return _ModelList()
|
|
433
|
-
|
|
@@ -34,6 +34,8 @@ class Completions(BaseCompletions):
|
|
|
34
34
|
top_p: Optional[float] = None,
|
|
35
35
|
tools: Optional[List[Dict[str, Any]]] = None,
|
|
36
36
|
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
|
|
37
|
+
timeout: Optional[int] = None,
|
|
38
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
37
39
|
**kwargs: Any
|
|
38
40
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
39
41
|
"""
|
|
@@ -62,16 +64,18 @@ class Completions(BaseCompletions):
|
|
|
62
64
|
created_time = int(time.time())
|
|
63
65
|
|
|
64
66
|
if stream:
|
|
65
|
-
return self._create_streaming(request_id, created_time, model, payload)
|
|
67
|
+
return self._create_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
66
68
|
else:
|
|
67
|
-
return self._create_non_streaming(request_id, created_time, model, payload)
|
|
69
|
+
return self._create_non_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
68
70
|
|
|
69
71
|
def _create_streaming(
|
|
70
72
|
self,
|
|
71
73
|
request_id: str,
|
|
72
74
|
created_time: int,
|
|
73
75
|
model: str,
|
|
74
|
-
payload: Dict[str, Any]
|
|
76
|
+
payload: Dict[str, Any],
|
|
77
|
+
timeout: Optional[int] = None,
|
|
78
|
+
proxies: Optional[Dict[str, str]] = None
|
|
75
79
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
76
80
|
"""Implementation for streaming chat completions."""
|
|
77
81
|
try:
|
|
@@ -82,7 +86,8 @@ class Completions(BaseCompletions):
|
|
|
82
86
|
headers=self._client.headers,
|
|
83
87
|
json=payload,
|
|
84
88
|
stream=True,
|
|
85
|
-
timeout=self._client.timeout
|
|
89
|
+
timeout=timeout or self._client.timeout,
|
|
90
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
86
91
|
)
|
|
87
92
|
|
|
88
93
|
if not response.ok:
|
|
@@ -163,7 +168,9 @@ class Completions(BaseCompletions):
|
|
|
163
168
|
request_id: str,
|
|
164
169
|
created_time: int,
|
|
165
170
|
model: str,
|
|
166
|
-
payload: Dict[str, Any]
|
|
171
|
+
payload: Dict[str, Any],
|
|
172
|
+
timeout: Optional[int] = None,
|
|
173
|
+
proxies: Optional[Dict[str, str]] = None
|
|
167
174
|
) -> ChatCompletion:
|
|
168
175
|
"""Implementation for non-streaming chat completions."""
|
|
169
176
|
try:
|
|
@@ -173,7 +180,8 @@ class Completions(BaseCompletions):
|
|
|
173
180
|
self._client.api_endpoint,
|
|
174
181
|
headers=self._client.headers,
|
|
175
182
|
json=payload,
|
|
176
|
-
timeout=self._client.timeout
|
|
183
|
+
timeout=timeout or self._client.timeout,
|
|
184
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
177
185
|
)
|
|
178
186
|
|
|
179
187
|
if not response.ok:
|
|
@@ -336,4 +344,3 @@ class TextPollinations(OpenAICompatibleProvider):
|
|
|
336
344
|
def list(inner_self):
|
|
337
345
|
return type(self).AVAILABLE_MODELS
|
|
338
346
|
return _ModelList()
|
|
339
|
-
|
|
@@ -10,8 +10,8 @@ from datetime import datetime
|
|
|
10
10
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
11
11
|
|
|
12
12
|
from webscout.litagent import LitAgent
|
|
13
|
-
from .base import BaseChat, BaseCompletions, OpenAICompatibleProvider
|
|
14
|
-
from .utils import (
|
|
13
|
+
from webscout.Provider.OPENAI.base import BaseChat, BaseCompletions, OpenAICompatibleProvider
|
|
14
|
+
from webscout.Provider.OPENAI.utils import (
|
|
15
15
|
ChatCompletion,
|
|
16
16
|
ChatCompletionChunk,
|
|
17
17
|
Choice,
|
|
@@ -40,6 +40,8 @@ class Completions(BaseCompletions):
|
|
|
40
40
|
stream: bool = False,
|
|
41
41
|
temperature: Optional[float] = None,
|
|
42
42
|
top_p: Optional[float] = None,
|
|
43
|
+
timeout: Optional[int] = None,
|
|
44
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
43
45
|
**kwargs: Any
|
|
44
46
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
45
47
|
"""
|
|
@@ -68,16 +70,18 @@ class Completions(BaseCompletions):
|
|
|
68
70
|
|
|
69
71
|
# Handle streaming response
|
|
70
72
|
if stream:
|
|
71
|
-
return self._handle_streaming_response(request_id, created_time, model, data)
|
|
73
|
+
return self._handle_streaming_response(request_id, created_time, model, data, timeout, proxies)
|
|
72
74
|
else:
|
|
73
|
-
return self._handle_non_streaming_response(request_id, created_time, model, data)
|
|
75
|
+
return self._handle_non_streaming_response(request_id, created_time, model, data, timeout, proxies)
|
|
74
76
|
|
|
75
77
|
def _handle_streaming_response(
|
|
76
78
|
self,
|
|
77
79
|
request_id: str,
|
|
78
80
|
created_time: int,
|
|
79
81
|
model: str,
|
|
80
|
-
data: Dict[str, Any]
|
|
82
|
+
data: Dict[str, Any],
|
|
83
|
+
timeout: Optional[int] = None,
|
|
84
|
+
proxies: Optional[Dict[str, str]] = None
|
|
81
85
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
82
86
|
"""Handle streaming response from Toolbaz API"""
|
|
83
87
|
try:
|
|
@@ -85,8 +89,8 @@ class Completions(BaseCompletions):
|
|
|
85
89
|
"https://data.toolbaz.com/writing.php",
|
|
86
90
|
data=data,
|
|
87
91
|
stream=True,
|
|
88
|
-
proxies=self._client
|
|
89
|
-
timeout=self._client.timeout
|
|
92
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
93
|
+
timeout=timeout or self._client.timeout
|
|
90
94
|
)
|
|
91
95
|
resp.raise_for_status()
|
|
92
96
|
|
|
@@ -219,15 +223,17 @@ class Completions(BaseCompletions):
|
|
|
219
223
|
request_id: str,
|
|
220
224
|
created_time: int,
|
|
221
225
|
model: str,
|
|
222
|
-
data: Dict[str, Any]
|
|
226
|
+
data: Dict[str, Any],
|
|
227
|
+
timeout: Optional[int] = None,
|
|
228
|
+
proxies: Optional[Dict[str, str]] = None
|
|
223
229
|
) -> ChatCompletion:
|
|
224
230
|
"""Handle non-streaming response from Toolbaz API"""
|
|
225
231
|
try:
|
|
226
232
|
resp = self._client.session.post(
|
|
227
233
|
"https://data.toolbaz.com/writing.php",
|
|
228
234
|
data=data,
|
|
229
|
-
proxies=self._client
|
|
230
|
-
timeout=self._client.timeout
|
|
235
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
236
|
+
timeout=timeout or self._client.timeout
|
|
231
237
|
)
|
|
232
238
|
resp.raise_for_status()
|
|
233
239
|
|
|
@@ -296,7 +302,6 @@ class Toolbaz(OpenAICompatibleProvider):
|
|
|
296
302
|
"Llama-4-Scout",
|
|
297
303
|
"Llama-3.3-70B",
|
|
298
304
|
"Qwen2.5-72B",
|
|
299
|
-
"Qwen2-72B",
|
|
300
305
|
"grok-2-1212",
|
|
301
306
|
"grok-3-beta",
|
|
302
307
|
"toolbaz_v3.5_pro",
|
|
@@ -36,6 +36,8 @@ class Completions(BaseCompletions):
|
|
|
36
36
|
max_tokens: Optional[int] = None,
|
|
37
37
|
stream: bool = False,
|
|
38
38
|
temperature: Optional[float] = None,
|
|
39
|
+
timeout: Optional[int] = None,
|
|
40
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
39
41
|
**kwargs: Any
|
|
40
42
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
41
43
|
"""
|
|
@@ -67,16 +69,18 @@ class Completions(BaseCompletions):
|
|
|
67
69
|
created_time = int(time.time())
|
|
68
70
|
|
|
69
71
|
if stream:
|
|
70
|
-
return self._create_streaming(request_id, created_time, model, payload)
|
|
72
|
+
return self._create_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
71
73
|
else:
|
|
72
|
-
return self._create_non_streaming(request_id, created_time, model, payload)
|
|
74
|
+
return self._create_non_streaming(request_id, created_time, model, payload, timeout, proxies)
|
|
73
75
|
|
|
74
76
|
def _create_streaming(
|
|
75
77
|
self,
|
|
76
78
|
request_id: str,
|
|
77
79
|
created_time: int,
|
|
78
80
|
model: str,
|
|
79
|
-
payload: Dict[str, Any]
|
|
81
|
+
payload: Dict[str, Any],
|
|
82
|
+
timeout: Optional[int] = None,
|
|
83
|
+
proxies: Optional[Dict[str, str]] = None
|
|
80
84
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
81
85
|
"""Implementation for streaming chat completions."""
|
|
82
86
|
try:
|
|
@@ -86,7 +90,8 @@ class Completions(BaseCompletions):
|
|
|
86
90
|
headers=self._client.headers,
|
|
87
91
|
json=payload,
|
|
88
92
|
stream=True,
|
|
89
|
-
timeout=self._client.timeout,
|
|
93
|
+
timeout=timeout or self._client.timeout,
|
|
94
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
90
95
|
impersonate="chrome120"
|
|
91
96
|
)
|
|
92
97
|
|
|
@@ -161,7 +166,9 @@ class Completions(BaseCompletions):
|
|
|
161
166
|
request_id: str,
|
|
162
167
|
created_time: int,
|
|
163
168
|
model: str,
|
|
164
|
-
payload: Dict[str, Any]
|
|
169
|
+
payload: Dict[str, Any],
|
|
170
|
+
timeout: Optional[int] = None,
|
|
171
|
+
proxies: Optional[Dict[str, str]] = None
|
|
165
172
|
) -> ChatCompletion:
|
|
166
173
|
"""Implementation for non-streaming chat completions."""
|
|
167
174
|
try:
|
|
@@ -171,7 +178,8 @@ class Completions(BaseCompletions):
|
|
|
171
178
|
headers=self._client.headers,
|
|
172
179
|
json=payload,
|
|
173
180
|
stream=True,
|
|
174
|
-
timeout=self._client.timeout,
|
|
181
|
+
timeout=timeout or self._client.timeout,
|
|
182
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
175
183
|
impersonate="chrome120"
|
|
176
184
|
)
|
|
177
185
|
|
|
@@ -352,4 +360,3 @@ class TypefullyAI(OpenAICompatibleProvider):
|
|
|
352
360
|
def list(inner_self):
|
|
353
361
|
return type(self).AVAILABLE_MODELS
|
|
354
362
|
return _ModelList()
|
|
355
|
-
|
|
@@ -34,6 +34,8 @@ class Completions(BaseCompletions):
|
|
|
34
34
|
top_p: Optional[float] = None,
|
|
35
35
|
presence_penalty: Optional[float] = None,
|
|
36
36
|
frequency_penalty: Optional[float] = None,
|
|
37
|
+
timeout: Optional[int] = None,
|
|
38
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
37
39
|
**kwargs: Any
|
|
38
40
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
39
41
|
"""
|
|
@@ -61,12 +63,12 @@ class Completions(BaseCompletions):
|
|
|
61
63
|
created_time = int(time.time())
|
|
62
64
|
|
|
63
65
|
if stream:
|
|
64
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
66
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
65
67
|
else:
|
|
66
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
68
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
67
69
|
|
|
68
70
|
def _create_stream(
|
|
69
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
71
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
70
72
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
71
73
|
try:
|
|
72
74
|
response = self._client.session.post(
|
|
@@ -74,7 +76,8 @@ class Completions(BaseCompletions):
|
|
|
74
76
|
headers=self._client.headers,
|
|
75
77
|
json=payload,
|
|
76
78
|
stream=True,
|
|
77
|
-
timeout=self._client.timeout
|
|
79
|
+
timeout=timeout or self._client.timeout,
|
|
80
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
78
81
|
)
|
|
79
82
|
|
|
80
83
|
# Handle non-200 responses
|
|
@@ -209,14 +212,15 @@ class Completions(BaseCompletions):
|
|
|
209
212
|
raise IOError(f"TypeGPT request failed: {e}") from e
|
|
210
213
|
|
|
211
214
|
def _create_non_stream(
|
|
212
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
215
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
213
216
|
) -> ChatCompletion:
|
|
214
217
|
try:
|
|
215
218
|
response = self._client.session.post(
|
|
216
219
|
self._client.api_endpoint,
|
|
217
220
|
headers=self._client.headers,
|
|
218
221
|
json=payload,
|
|
219
|
-
timeout=self._client.timeout
|
|
222
|
+
timeout=timeout or self._client.timeout,
|
|
223
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
220
224
|
)
|
|
221
225
|
|
|
222
226
|
# Handle non-200 responses
|