webscout 8.2.9__py3-none-any.whl → 8.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +6 -6
- webscout/AIbase.py +61 -1
- webscout/Extra/YTToolkit/ytapi/patterns.py +45 -45
- webscout/Extra/YTToolkit/ytapi/stream.py +1 -1
- webscout/Extra/YTToolkit/ytapi/video.py +10 -10
- webscout/Extra/autocoder/autocoder_utiles.py +1 -1
- webscout/Litlogger/formats.py +9 -0
- webscout/Litlogger/handlers.py +18 -0
- webscout/Litlogger/logger.py +43 -1
- webscout/Provider/AISEARCH/scira_search.py +3 -2
- webscout/Provider/Blackboxai.py +2 -0
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +1 -1
- webscout/Provider/HeckAI.py +1 -1
- webscout/Provider/LambdaChat.py +8 -1
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +396 -113
- webscout/Provider/OPENAI/Cloudflare.py +31 -14
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +29 -13
- webscout/Provider/OPENAI/NEMOTRON.py +26 -14
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +161 -140
- webscout/Provider/OPENAI/README.md +3 -0
- webscout/Provider/OPENAI/TogetherAI.py +355 -0
- webscout/Provider/OPENAI/TwoAI.py +29 -12
- webscout/Provider/OPENAI/__init__.py +4 -1
- webscout/Provider/OPENAI/ai4chat.py +33 -23
- webscout/Provider/OPENAI/api.py +375 -24
- webscout/Provider/OPENAI/autoproxy.py +39 -0
- webscout/Provider/OPENAI/base.py +91 -12
- webscout/Provider/OPENAI/c4ai.py +31 -10
- webscout/Provider/OPENAI/chatgpt.py +56 -24
- webscout/Provider/OPENAI/chatgptclone.py +46 -16
- webscout/Provider/OPENAI/chatsandbox.py +7 -3
- webscout/Provider/OPENAI/copilot.py +26 -10
- webscout/Provider/OPENAI/deepinfra.py +29 -12
- webscout/Provider/OPENAI/e2b.py +358 -158
- webscout/Provider/OPENAI/exaai.py +13 -10
- webscout/Provider/OPENAI/exachat.py +10 -6
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +10 -6
- webscout/Provider/OPENAI/glider.py +10 -6
- webscout/Provider/OPENAI/heckai.py +11 -8
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +10 -7
- webscout/Provider/OPENAI/multichat.py +3 -1
- webscout/Provider/OPENAI/netwrck.py +10 -6
- webscout/Provider/OPENAI/oivscode.py +12 -9
- webscout/Provider/OPENAI/opkfc.py +31 -8
- webscout/Provider/OPENAI/scirachat.py +17 -10
- webscout/Provider/OPENAI/sonus.py +10 -6
- webscout/Provider/OPENAI/standardinput.py +18 -9
- webscout/Provider/OPENAI/textpollinations.py +14 -7
- webscout/Provider/OPENAI/toolbaz.py +16 -11
- webscout/Provider/OPENAI/typefully.py +14 -7
- webscout/Provider/OPENAI/typegpt.py +10 -6
- webscout/Provider/OPENAI/uncovrAI.py +22 -8
- webscout/Provider/OPENAI/venice.py +10 -6
- webscout/Provider/OPENAI/writecream.py +13 -10
- webscout/Provider/OPENAI/x0gpt.py +11 -9
- webscout/Provider/OPENAI/yep.py +12 -10
- webscout/Provider/PI.py +2 -1
- webscout/Provider/STT/__init__.py +3 -0
- webscout/Provider/STT/base.py +281 -0
- webscout/Provider/STT/elevenlabs.py +265 -0
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +399 -365
- webscout/Provider/TTI/base.py +74 -2
- webscout/Provider/TTI/fastflux.py +63 -30
- webscout/Provider/TTI/gpt1image.py +149 -0
- webscout/Provider/TTI/imagen.py +196 -0
- webscout/Provider/TTI/magicstudio.py +60 -29
- webscout/Provider/TTI/piclumen.py +43 -32
- webscout/Provider/TTI/pixelmuse.py +232 -225
- webscout/Provider/TTI/pollinations.py +43 -32
- webscout/Provider/TTI/together.py +287 -0
- webscout/Provider/TTI/utils.py +2 -1
- webscout/Provider/TTS/README.md +1 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/freetts.py +140 -0
- webscout/Provider/UNFINISHED/ChutesAI.py +314 -0
- webscout/Provider/UNFINISHED/fetch_together_models.py +95 -0
- webscout/Provider/__init__.py +3 -2
- webscout/Provider/granite.py +41 -6
- webscout/Provider/oivscode.py +37 -37
- webscout/Provider/scira_chat.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/toolbaz.py +0 -1
- webscout/litagent/Readme.md +12 -3
- webscout/litagent/agent.py +99 -62
- webscout/version.py +1 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/METADATA +2 -1
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/RECORD +98 -87
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/WHEEL +1 -1
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/artbit.py +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/entry_points.txt +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.9.dist-info → webscout-8.3.1.dist-info}/top_level.txt +0 -0
webscout/Provider/OPENAI/base.py
CHANGED
|
@@ -3,12 +3,11 @@ from typing import List, Dict, Optional, Union, Generator, Any, TypedDict, Calla
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
from dataclasses import dataclass
|
|
6
|
-
|
|
7
6
|
logger = logging.getLogger(__name__)
|
|
8
7
|
|
|
9
8
|
|
|
10
9
|
# Import the utils for response structures
|
|
11
|
-
from webscout.Provider.OPENAI.utils import ChatCompletion, ChatCompletionChunk
|
|
10
|
+
from webscout.Provider.OPENAI.utils import ChatCompletion, ChatCompletionChunk
|
|
12
11
|
|
|
13
12
|
# Define tool-related structures
|
|
14
13
|
class ToolDefinition(TypedDict):
|
|
@@ -78,6 +77,8 @@ class BaseCompletions(ABC):
|
|
|
78
77
|
top_p: Optional[float] = None,
|
|
79
78
|
tools: Optional[List[Union[Tool, Dict[str, Any]]]] = None, # Support for tool definitions
|
|
80
79
|
tool_choice: Optional[Union[str, Dict[str, Any]]] = None, # Support for tool_choice parameter
|
|
80
|
+
timeout: Optional[int] = None,
|
|
81
|
+
proxies: Optional[dict] = None,
|
|
81
82
|
**kwargs: Any
|
|
82
83
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
83
84
|
"""
|
|
@@ -172,10 +173,93 @@ class BaseChat(ABC):
|
|
|
172
173
|
completions: BaseCompletions
|
|
173
174
|
|
|
174
175
|
|
|
176
|
+
# class ProxyAutoMeta(ABCMeta):
|
|
177
|
+
# """
|
|
178
|
+
# Metaclass to ensure all OpenAICompatibleProvider subclasses automatically get proxy support.
|
|
179
|
+
# This will inject proxies into any requests.Session, httpx.Client, or curl_cffi session attributes found on the instance.
|
|
180
|
+
|
|
181
|
+
# To disable automatic proxy injection, set disable_auto_proxy=True in the constructor or
|
|
182
|
+
# set the class attribute DISABLE_AUTO_PROXY = True.
|
|
183
|
+
# """
|
|
184
|
+
# def __call__(cls, *args, **kwargs):
|
|
185
|
+
# instance = super().__call__(*args, **kwargs)
|
|
186
|
+
|
|
187
|
+
# # Check if auto proxy is disabled
|
|
188
|
+
# disable_auto_proxy = kwargs.get('disable_auto_proxy', False) or getattr(cls, 'DISABLE_AUTO_PROXY', False)
|
|
189
|
+
|
|
190
|
+
# proxies = getattr(instance, 'proxies', None) or kwargs.get('proxies', None)
|
|
191
|
+
# if proxies is None and not disable_auto_proxy:
|
|
192
|
+
# try:
|
|
193
|
+
# proxies = {"http": get_auto_proxy(), "https": get_auto_proxy()}
|
|
194
|
+
# except Exception as e:
|
|
195
|
+
# logger.warning(f"Failed to get auto proxy, disabling proxy support: {e}")
|
|
196
|
+
# proxies = {}
|
|
197
|
+
# elif proxies is None:
|
|
198
|
+
# proxies = {}
|
|
199
|
+
# instance.proxies = proxies
|
|
200
|
+
# # Patch sessions if we have valid proxies
|
|
201
|
+
# if proxies:
|
|
202
|
+
# for attr in dir(instance):
|
|
203
|
+
# obj = getattr(instance, attr)
|
|
204
|
+
# if isinstance(obj, requests.Session):
|
|
205
|
+
# obj.proxies.update(proxies)
|
|
206
|
+
# if httpx and isinstance(obj, httpx.Client):
|
|
207
|
+
# try:
|
|
208
|
+
# obj._proxies = proxies
|
|
209
|
+
# except Exception:
|
|
210
|
+
# pass
|
|
211
|
+
# # Patch curl_cffi sessions if present
|
|
212
|
+
# if CurlSession and isinstance(obj, CurlSession):
|
|
213
|
+
# try:
|
|
214
|
+
# obj.proxies.update(proxies)
|
|
215
|
+
# except Exception:
|
|
216
|
+
# pass
|
|
217
|
+
# if CurlAsyncSession and isinstance(obj, CurlAsyncSession):
|
|
218
|
+
# try:
|
|
219
|
+
# obj.proxies.update(proxies)
|
|
220
|
+
# except Exception:
|
|
221
|
+
# pass
|
|
222
|
+
# # Provide helpers for proxied sessions
|
|
223
|
+
# def get_proxied_session():
|
|
224
|
+
# s = requests.Session()
|
|
225
|
+
# s.proxies.update(proxies)
|
|
226
|
+
# return s
|
|
227
|
+
# instance.get_proxied_session = get_proxied_session
|
|
228
|
+
|
|
229
|
+
# def get_proxied_curl_session(impersonate="chrome120", **kwargs):
|
|
230
|
+
# """Get a curl_cffi Session with proxies configured"""
|
|
231
|
+
# if CurlSession:
|
|
232
|
+
# return CurlSession(proxies=proxies, impersonate=impersonate, **kwargs)
|
|
233
|
+
# else:
|
|
234
|
+
# raise ImportError("curl_cffi is not installed")
|
|
235
|
+
# instance.get_proxied_curl_session = get_proxied_curl_session
|
|
236
|
+
|
|
237
|
+
# def get_proxied_curl_async_session(impersonate="chrome120", **kwargs):
|
|
238
|
+
# """Get a curl_cffi AsyncSession with proxies configured"""
|
|
239
|
+
# if CurlAsyncSession:
|
|
240
|
+
# return CurlAsyncSession(proxies=proxies, impersonate=impersonate, **kwargs)
|
|
241
|
+
# else:
|
|
242
|
+
# raise ImportError("curl_cffi is not installed")
|
|
243
|
+
# instance.get_proxied_curl_async_session = get_proxied_curl_async_session
|
|
244
|
+
|
|
245
|
+
# return instance
|
|
246
|
+
# class OPENAICompatibleMeta(ABC, metaclass=ProxyAutoMeta):
|
|
175
247
|
class OpenAICompatibleProvider(ABC):
|
|
176
248
|
"""
|
|
177
249
|
Abstract Base Class for providers mimicking the OpenAI Python client structure.
|
|
178
250
|
Requires a nested 'chat.completions' structure with tool support.
|
|
251
|
+
All subclasses automatically get proxy support via ProxyAutoMeta.
|
|
252
|
+
|
|
253
|
+
# Available proxy helpers:
|
|
254
|
+
# - self.get_proxied_session() - returns a requests.Session with proxies
|
|
255
|
+
# - self.get_proxied_curl_session() - returns a curl_cffi.Session with proxies
|
|
256
|
+
# - self.get_proxied_curl_async_session() - returns a curl_cffi.AsyncSession with proxies
|
|
257
|
+
|
|
258
|
+
# Proxy support is automatically injected into:
|
|
259
|
+
# - requests.Session objects
|
|
260
|
+
# - httpx.Client objects
|
|
261
|
+
# - curl_cffi.requests.Session objects
|
|
262
|
+
# - curl_cffi.requests.AsyncSession objects
|
|
179
263
|
"""
|
|
180
264
|
chat: BaseChat
|
|
181
265
|
available_tools: Dict[str, Tool] = {} # Dictionary of available tools
|
|
@@ -183,19 +267,14 @@ class OpenAICompatibleProvider(ABC):
|
|
|
183
267
|
supports_tool_choice: bool = False # Whether the provider supports tool_choice
|
|
184
268
|
|
|
185
269
|
@abstractmethod
|
|
186
|
-
def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, **kwargs: Any):
|
|
187
|
-
"""
|
|
188
|
-
Initialize the provider, potentially with an API key and tools.
|
|
189
|
-
|
|
190
|
-
Args:
|
|
191
|
-
api_key: Optional API key for the provider
|
|
192
|
-
tools: Optional list of tools to make available to the provider
|
|
193
|
-
**kwargs: Additional provider-specific parameters
|
|
194
|
-
"""
|
|
270
|
+
def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, proxies: Optional[dict] = None, **kwargs: Any):
|
|
195
271
|
self.available_tools = {}
|
|
196
272
|
if tools:
|
|
197
273
|
self.register_tools(tools)
|
|
198
|
-
|
|
274
|
+
# self.proxies is set by ProxyAutoMeta
|
|
275
|
+
# Subclasses should use self.proxies for all network requests
|
|
276
|
+
# Optionally, use self.get_proxied_session() for a requests.Session with proxies
|
|
277
|
+
# raise NotImplementedError # <-- Commented out for metaclass test
|
|
199
278
|
|
|
200
279
|
@property
|
|
201
280
|
@abstractmethod
|
webscout/Provider/OPENAI/c4ai.py
CHANGED
|
@@ -49,6 +49,8 @@ class Completions(BaseCompletions):
|
|
|
49
49
|
stream: bool = False,
|
|
50
50
|
temperature: Optional[float] = None,
|
|
51
51
|
top_p: Optional[float] = None,
|
|
52
|
+
timeout: Optional[int] = None,
|
|
53
|
+
proxies: Optional[dict] = None,
|
|
52
54
|
**kwargs: Any
|
|
53
55
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
54
56
|
"""
|
|
@@ -88,14 +90,21 @@ class Completions(BaseCompletions):
|
|
|
88
90
|
|
|
89
91
|
# Pass the formatted conversation prompt
|
|
90
92
|
if stream:
|
|
91
|
-
return self._create_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
|
|
93
|
+
return self._create_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt, timeout=timeout, proxies=proxies)
|
|
92
94
|
else:
|
|
93
|
-
return self._create_non_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt)
|
|
95
|
+
return self._create_non_stream(request_id, created_time, model, conversation_id, conversation_prompt, system_prompt, timeout=timeout, proxies=proxies)
|
|
94
96
|
|
|
95
97
|
def _create_stream(
|
|
96
|
-
self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
|
|
98
|
+
self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str,
|
|
99
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
97
100
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
101
|
+
original_proxies = self._client.session.proxies
|
|
102
|
+
if proxies is not None:
|
|
103
|
+
self._client.session.proxies = proxies
|
|
104
|
+
else:
|
|
105
|
+
self._client.session.proxies = {}
|
|
98
106
|
try:
|
|
107
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
99
108
|
message_id = self._client._conversation_data[model]["messageId"]
|
|
100
109
|
url = f"{self._client.url}/api/chat/message"
|
|
101
110
|
payload = {
|
|
@@ -117,7 +126,8 @@ class Completions(BaseCompletions):
|
|
|
117
126
|
headers=self._client.headers,
|
|
118
127
|
json=payload,
|
|
119
128
|
stream=True,
|
|
120
|
-
timeout=
|
|
129
|
+
timeout=timeout_val,
|
|
130
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
121
131
|
)
|
|
122
132
|
response.raise_for_status()
|
|
123
133
|
|
|
@@ -160,11 +170,20 @@ class Completions(BaseCompletions):
|
|
|
160
170
|
except Exception as e:
|
|
161
171
|
print(f"Error during C4AI stream request: {e}")
|
|
162
172
|
raise IOError(f"C4AI request failed: {e}") from e
|
|
173
|
+
finally:
|
|
174
|
+
self._client.session.proxies = original_proxies
|
|
163
175
|
|
|
164
176
|
def _create_non_stream(
|
|
165
|
-
self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str
|
|
177
|
+
self, request_id: str, created_time: int, model: str, conversation_id: str, prompt: str, system_prompt: str,
|
|
178
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
166
179
|
) -> ChatCompletion:
|
|
180
|
+
original_proxies = self._client.session.proxies
|
|
181
|
+
if proxies is not None:
|
|
182
|
+
self._client.session.proxies = proxies
|
|
183
|
+
else:
|
|
184
|
+
self._client.session.proxies = {}
|
|
167
185
|
try:
|
|
186
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
168
187
|
message_id = self._client._conversation_data[model]["messageId"]
|
|
169
188
|
url = f"{self._client.url}/api/chat/message"
|
|
170
189
|
payload = {
|
|
@@ -185,7 +204,8 @@ class Completions(BaseCompletions):
|
|
|
185
204
|
url,
|
|
186
205
|
headers=self._client.headers,
|
|
187
206
|
json=payload,
|
|
188
|
-
timeout=
|
|
207
|
+
timeout=timeout_val,
|
|
208
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
189
209
|
)
|
|
190
210
|
response.raise_for_status()
|
|
191
211
|
|
|
@@ -213,6 +233,8 @@ class Completions(BaseCompletions):
|
|
|
213
233
|
except Exception as e:
|
|
214
234
|
print(f"Error during C4AI non-stream request: {e}")
|
|
215
235
|
raise IOError(f"C4AI request failed: {e}") from e
|
|
236
|
+
finally:
|
|
237
|
+
self._client.session.proxies = original_proxies
|
|
216
238
|
|
|
217
239
|
class Chat(BaseChat):
|
|
218
240
|
def __init__(self, client: 'C4AI'):
|
|
@@ -242,19 +264,18 @@ class C4AI(OpenAICompatibleProvider):
|
|
|
242
264
|
|
|
243
265
|
def __init__(
|
|
244
266
|
self,
|
|
245
|
-
timeout: Optional[int] = None,
|
|
246
267
|
browser: str = "chrome"
|
|
247
268
|
):
|
|
248
269
|
"""
|
|
249
270
|
Initialize the C4AI client.
|
|
250
271
|
|
|
251
272
|
Args:
|
|
252
|
-
timeout: Request timeout in seconds.
|
|
253
273
|
browser: Browser name for LitAgent to generate User-Agent.
|
|
254
274
|
"""
|
|
255
|
-
self.timeout =
|
|
275
|
+
self.timeout = 30
|
|
256
276
|
self.url = "https://cohereforai-c4ai-command.hf.space"
|
|
257
277
|
self.session = requests.Session()
|
|
278
|
+
self.session.proxies = {}
|
|
258
279
|
self.max_tokens_to_sample = 2000
|
|
259
280
|
|
|
260
281
|
agent = LitAgent()
|
|
@@ -370,4 +391,4 @@ class C4AI(OpenAICompatibleProvider):
|
|
|
370
391
|
class _ModelList:
|
|
371
392
|
def list(inner_self):
|
|
372
393
|
return type(self).AVAILABLE_MODELS
|
|
373
|
-
return _ModelList()
|
|
394
|
+
return _ModelList()
|
|
@@ -9,8 +9,8 @@ from datetime import datetime, timedelta
|
|
|
9
9
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
10
10
|
|
|
11
11
|
# Import base classes and utility structures
|
|
12
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
13
|
-
from .utils import (
|
|
12
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
13
|
+
from webscout.Provider.OPENAI.utils import (
|
|
14
14
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
15
15
|
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
16
16
|
)
|
|
@@ -21,13 +21,37 @@ RED = "\033[91m"
|
|
|
21
21
|
RESET = "\033[0m"
|
|
22
22
|
|
|
23
23
|
class ChatGPTReversed:
|
|
24
|
+
AVAILABLE_MODELS = [
|
|
25
|
+
"auto",
|
|
26
|
+
"gpt-4o-mini",
|
|
27
|
+
"gpt-4o",
|
|
28
|
+
"o4-mini",
|
|
29
|
+
"gpt-4-1",
|
|
30
|
+
"gpt-4-1-mini",
|
|
31
|
+
"o3",
|
|
32
|
+
"o4-mini-high"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
]
|
|
24
37
|
csrf_token = None
|
|
25
38
|
initialized = False
|
|
26
|
-
|
|
39
|
+
|
|
40
|
+
_instance = None
|
|
41
|
+
|
|
42
|
+
def __new__(cls, model="auto"):
|
|
43
|
+
if cls._instance is None:
|
|
44
|
+
cls._instance = super(ChatGPTReversed, cls).__new__(cls)
|
|
45
|
+
cls._instance.initialized = False
|
|
46
|
+
return cls._instance
|
|
27
47
|
|
|
28
48
|
def __init__(self, model="auto"):
|
|
29
|
-
if
|
|
30
|
-
|
|
49
|
+
if self.initialized:
|
|
50
|
+
# Already initialized, just update model if needed
|
|
51
|
+
if model not in self.AVAILABLE_MODELS:
|
|
52
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
53
|
+
self.model = model
|
|
54
|
+
return
|
|
31
55
|
|
|
32
56
|
if model not in self.AVAILABLE_MODELS:
|
|
33
57
|
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
@@ -332,6 +356,8 @@ class Completions(BaseCompletions):
|
|
|
332
356
|
stream: bool = False,
|
|
333
357
|
temperature: Optional[float] = None,
|
|
334
358
|
top_p: Optional[float] = None,
|
|
359
|
+
timeout: Optional[int] = None,
|
|
360
|
+
proxies: Optional[dict] = None,
|
|
335
361
|
**kwargs: Any
|
|
336
362
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
337
363
|
"""
|
|
@@ -362,6 +388,8 @@ class Completions(BaseCompletions):
|
|
|
362
388
|
max_tokens=max_tokens,
|
|
363
389
|
temperature=temperature,
|
|
364
390
|
top_p=top_p,
|
|
391
|
+
timeout=timeout,
|
|
392
|
+
proxies=proxies,
|
|
365
393
|
**kwargs
|
|
366
394
|
)
|
|
367
395
|
|
|
@@ -372,6 +400,8 @@ class Completions(BaseCompletions):
|
|
|
372
400
|
max_tokens=max_tokens,
|
|
373
401
|
temperature=temperature,
|
|
374
402
|
top_p=top_p,
|
|
403
|
+
timeout=timeout,
|
|
404
|
+
proxies=proxies,
|
|
375
405
|
**kwargs
|
|
376
406
|
)
|
|
377
407
|
|
|
@@ -383,6 +413,8 @@ class Completions(BaseCompletions):
|
|
|
383
413
|
max_tokens: Optional[int] = None,
|
|
384
414
|
temperature: Optional[float] = None,
|
|
385
415
|
top_p: Optional[float] = None,
|
|
416
|
+
timeout: Optional[int] = None,
|
|
417
|
+
proxies: Optional[dict] = None,
|
|
386
418
|
**kwargs: Any
|
|
387
419
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
388
420
|
"""Implementation for streaming chat completions."""
|
|
@@ -448,6 +480,8 @@ class Completions(BaseCompletions):
|
|
|
448
480
|
max_tokens: Optional[int] = None,
|
|
449
481
|
temperature: Optional[float] = None,
|
|
450
482
|
top_p: Optional[float] = None,
|
|
483
|
+
timeout: Optional[int] = None,
|
|
484
|
+
proxies: Optional[dict] = None,
|
|
451
485
|
**kwargs: Any
|
|
452
486
|
) -> ChatCompletion:
|
|
453
487
|
"""Implementation for non-streaming chat completions."""
|
|
@@ -523,34 +557,32 @@ class ChatGPT(OpenAICompatibleProvider):
|
|
|
523
557
|
print(response.choices[0].message.content)
|
|
524
558
|
"""
|
|
525
559
|
|
|
526
|
-
AVAILABLE_MODELS = [
|
|
527
|
-
"auto",
|
|
528
|
-
"gpt-4o-mini",
|
|
529
|
-
"gpt-4o",
|
|
530
|
-
"o4-mini"
|
|
531
|
-
]
|
|
532
|
-
|
|
533
560
|
def __init__(
|
|
534
|
-
self
|
|
535
|
-
timeout: int = 60,
|
|
536
|
-
proxies: dict = {}
|
|
561
|
+
self
|
|
537
562
|
):
|
|
538
563
|
"""
|
|
539
564
|
Initialize the ChatGPT client.
|
|
540
|
-
|
|
541
|
-
Args:
|
|
542
|
-
timeout: Request timeout in seconds
|
|
543
|
-
proxies: Optional proxy configuration
|
|
544
565
|
"""
|
|
545
|
-
self.timeout = timeout
|
|
546
|
-
self.proxies = proxies
|
|
547
|
-
|
|
548
566
|
# Initialize chat interface
|
|
549
567
|
self.chat = Chat(self)
|
|
550
568
|
|
|
569
|
+
@property
|
|
570
|
+
def AVAILABLE_MODELS(self):
|
|
571
|
+
return ChatGPTReversed.AVAILABLE_MODELS
|
|
572
|
+
|
|
551
573
|
@property
|
|
552
574
|
def models(self):
|
|
553
575
|
class _ModelList:
|
|
554
576
|
def list(inner_self):
|
|
555
|
-
return
|
|
556
|
-
return _ModelList()
|
|
577
|
+
return ChatGPTReversed.AVAILABLE_MODELS
|
|
578
|
+
return _ModelList()
|
|
579
|
+
|
|
580
|
+
if __name__ == "__main__":
|
|
581
|
+
# Example usage
|
|
582
|
+
client = ChatGPT()
|
|
583
|
+
response = client.chat.completions.create(
|
|
584
|
+
model="o4-mini-high",
|
|
585
|
+
messages=[{"role": "user", "content": "How manr r in strawberry"}]
|
|
586
|
+
)
|
|
587
|
+
print(response.choices[0].message.content)
|
|
588
|
+
print()
|
|
@@ -7,8 +7,8 @@ import re
|
|
|
7
7
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
8
8
|
|
|
9
9
|
# Import base classes and utility structures
|
|
10
|
-
from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
11
|
-
from .utils import (
|
|
10
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
11
|
+
from webscout.Provider.OPENAI.utils import (
|
|
12
12
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
13
13
|
ChatCompletionMessage, CompletionUsage, count_tokens
|
|
14
14
|
)
|
|
@@ -46,6 +46,8 @@ class Completions(BaseCompletions):
|
|
|
46
46
|
stream: bool = False,
|
|
47
47
|
temperature: Optional[float] = None,
|
|
48
48
|
top_p: Optional[float] = None,
|
|
49
|
+
timeout: Optional[int] = None,
|
|
50
|
+
proxies: Optional[dict] = None,
|
|
49
51
|
**kwargs: Any
|
|
50
52
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
51
53
|
"""
|
|
@@ -75,21 +77,28 @@ class Completions(BaseCompletions):
|
|
|
75
77
|
created_time = int(time.time())
|
|
76
78
|
|
|
77
79
|
if stream:
|
|
78
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
80
|
+
return self._create_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
79
81
|
else:
|
|
80
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
82
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
81
83
|
|
|
82
84
|
def _create_stream(
|
|
83
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
85
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
86
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
84
87
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
88
|
+
original_proxies = self._client.session.proxies
|
|
89
|
+
if proxies is not None:
|
|
90
|
+
self._client.session.proxies = proxies
|
|
91
|
+
else:
|
|
92
|
+
self._client.session.proxies = {}
|
|
85
93
|
try:
|
|
94
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
86
95
|
response = self._client.session.post(
|
|
87
96
|
f"{self._client.url}/api/chat",
|
|
88
97
|
headers=self._client.headers,
|
|
89
98
|
cookies=self._client.cookies,
|
|
90
99
|
json=payload,
|
|
91
100
|
stream=True,
|
|
92
|
-
timeout=
|
|
101
|
+
timeout=timeout_val
|
|
93
102
|
)
|
|
94
103
|
|
|
95
104
|
# Handle non-200 responses
|
|
@@ -104,7 +113,7 @@ class Completions(BaseCompletions):
|
|
|
104
113
|
cookies=self._client.cookies,
|
|
105
114
|
json=payload,
|
|
106
115
|
stream=True,
|
|
107
|
-
timeout=
|
|
116
|
+
timeout=timeout_val
|
|
108
117
|
)
|
|
109
118
|
if not response.ok:
|
|
110
119
|
raise IOError(
|
|
@@ -230,11 +239,20 @@ class Completions(BaseCompletions):
|
|
|
230
239
|
except Exception as e:
|
|
231
240
|
print(f"Error during ChatGPTClone stream request: {e}")
|
|
232
241
|
raise IOError(f"ChatGPTClone request failed: {e}") from e
|
|
242
|
+
finally:
|
|
243
|
+
self._client.session.proxies = original_proxies
|
|
233
244
|
|
|
234
245
|
def _create_non_stream(
|
|
235
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
246
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
247
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
236
248
|
) -> ChatCompletion:
|
|
249
|
+
original_proxies = self._client.session.proxies
|
|
250
|
+
if proxies is not None:
|
|
251
|
+
self._client.session.proxies = proxies
|
|
252
|
+
else:
|
|
253
|
+
self._client.session.proxies = {}
|
|
237
254
|
try:
|
|
255
|
+
timeout_val = timeout if timeout is not None else self._client.timeout
|
|
238
256
|
# For non-streaming, we still use streaming internally to collect the full response
|
|
239
257
|
response = self._client.session.post(
|
|
240
258
|
f"{self._client.url}/api/chat",
|
|
@@ -242,7 +260,7 @@ class Completions(BaseCompletions):
|
|
|
242
260
|
cookies=self._client.cookies,
|
|
243
261
|
json=payload,
|
|
244
262
|
stream=True,
|
|
245
|
-
timeout=
|
|
263
|
+
timeout=timeout_val
|
|
246
264
|
)
|
|
247
265
|
|
|
248
266
|
# Handle non-200 responses
|
|
@@ -257,7 +275,7 @@ class Completions(BaseCompletions):
|
|
|
257
275
|
cookies=self._client.cookies,
|
|
258
276
|
json=payload,
|
|
259
277
|
stream=True,
|
|
260
|
-
timeout=
|
|
278
|
+
timeout=timeout_val
|
|
261
279
|
)
|
|
262
280
|
if not response.ok:
|
|
263
281
|
raise IOError(
|
|
@@ -330,6 +348,8 @@ class Completions(BaseCompletions):
|
|
|
330
348
|
except Exception as e:
|
|
331
349
|
print(f"Error during ChatGPTClone non-stream request: {e}")
|
|
332
350
|
raise IOError(f"ChatGPTClone request failed: {e}") from e
|
|
351
|
+
finally:
|
|
352
|
+
self._client.session.proxies = original_proxies
|
|
333
353
|
|
|
334
354
|
class Chat(BaseChat):
|
|
335
355
|
def __init__(self, client: 'ChatGPTClone'):
|
|
@@ -352,7 +372,6 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
352
372
|
|
|
353
373
|
def __init__(
|
|
354
374
|
self,
|
|
355
|
-
timeout: Optional[int] = None,
|
|
356
375
|
browser: str = "chrome",
|
|
357
376
|
impersonate: str = "chrome120"
|
|
358
377
|
):
|
|
@@ -360,16 +379,16 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
360
379
|
Initialize the ChatGPTClone client.
|
|
361
380
|
|
|
362
381
|
Args:
|
|
363
|
-
timeout: Request timeout in seconds (None for no timeout)
|
|
364
382
|
browser: Browser to emulate in user agent (for LitAgent fallback)
|
|
365
383
|
impersonate: Browser impersonation for curl_cffi (default: chrome120)
|
|
366
384
|
"""
|
|
367
|
-
self.timeout =
|
|
385
|
+
self.timeout = 30
|
|
368
386
|
self.temperature = 0.6 # Default temperature
|
|
369
387
|
self.top_p = 0.7 # Default top_p
|
|
370
388
|
|
|
371
389
|
# Use curl_cffi for Cloudflare bypass and browser impersonation
|
|
372
|
-
self.session = Session(impersonate=impersonate
|
|
390
|
+
self.session = Session(impersonate=impersonate)
|
|
391
|
+
self.session.proxies = {}
|
|
373
392
|
|
|
374
393
|
# Use LitAgent for fingerprint if available, else fallback
|
|
375
394
|
agent = LitAgent()
|
|
@@ -405,7 +424,7 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
405
424
|
browser = browser or self.fingerprint.get("browser_type", "chrome")
|
|
406
425
|
impersonate = impersonate or "chrome120"
|
|
407
426
|
self.fingerprint = LitAgent().generate_fingerprint(browser)
|
|
408
|
-
self.session = Session(impersonate=impersonate
|
|
427
|
+
self.session = Session(impersonate=impersonate)
|
|
409
428
|
# Update headers with new fingerprint
|
|
410
429
|
self.headers.update({
|
|
411
430
|
"Accept": self.fingerprint["accept"],
|
|
@@ -491,4 +510,15 @@ class ChatGPTClone(OpenAICompatibleProvider):
|
|
|
491
510
|
class _ModelList:
|
|
492
511
|
def list(inner_self):
|
|
493
512
|
return type(self).AVAILABLE_MODELS
|
|
494
|
-
return _ModelList()
|
|
513
|
+
return _ModelList()
|
|
514
|
+
if __name__ == "__main__":
|
|
515
|
+
# Example usage
|
|
516
|
+
client = ChatGPTClone()
|
|
517
|
+
response = client.chat.completions.create(
|
|
518
|
+
model="gpt-4",
|
|
519
|
+
messages=[{"role": "user", "content": "Hello!"}]
|
|
520
|
+
)
|
|
521
|
+
print(response.choices[0].message.content)
|
|
522
|
+
print()
|
|
523
|
+
print("Proxies on instance:", client.proxies)
|
|
524
|
+
print("Proxies on session:", client.session.proxies)
|
|
@@ -33,6 +33,8 @@ class Completions(BaseCompletions):
|
|
|
33
33
|
stream: bool = False,
|
|
34
34
|
temperature: Optional[float] = None,
|
|
35
35
|
top_p: Optional[float] = None,
|
|
36
|
+
timeout: Optional[int] = None,
|
|
37
|
+
proxies: Optional[dict] = None,
|
|
36
38
|
**kwargs: Any
|
|
37
39
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
38
40
|
"""
|
|
@@ -70,13 +72,15 @@ class Completions(BaseCompletions):
|
|
|
70
72
|
}
|
|
71
73
|
session = requests.Session()
|
|
72
74
|
session.headers.update(headers)
|
|
75
|
+
session.proxies = proxies if proxies is not None else {}
|
|
76
|
+
|
|
73
77
|
def for_stream():
|
|
74
78
|
try:
|
|
75
79
|
response = session.post(
|
|
76
80
|
url,
|
|
77
81
|
json=payload,
|
|
78
82
|
stream=True,
|
|
79
|
-
timeout=30
|
|
83
|
+
timeout=timeout if timeout is not None else 30
|
|
80
84
|
)
|
|
81
85
|
response.raise_for_status()
|
|
82
86
|
streaming_text = ""
|
|
@@ -116,7 +120,7 @@ class Completions(BaseCompletions):
|
|
|
116
120
|
response = session.post(
|
|
117
121
|
url,
|
|
118
122
|
json=payload,
|
|
119
|
-
timeout=30
|
|
123
|
+
timeout=timeout if timeout is not None else 30
|
|
120
124
|
)
|
|
121
125
|
response.raise_for_status()
|
|
122
126
|
text = response.text
|
|
@@ -152,7 +156,7 @@ class Chat(BaseChat):
|
|
|
152
156
|
self.completions = Completions(client)
|
|
153
157
|
|
|
154
158
|
class ChatSandbox(OpenAICompatibleProvider):
|
|
155
|
-
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large"]
|
|
159
|
+
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large", "deepseek-r1", "deepseek-r1-full", "gemini-thinking", "openai-o1-mini", "llama", "mistral", "gemma-3"]
|
|
156
160
|
chat: Chat
|
|
157
161
|
def __init__(self):
|
|
158
162
|
self.chat = Chat(self)
|