webscout 8.3.1__py3-none-any.whl → 8.3.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +180 -78
- webscout/Bing_search.py +417 -0
- webscout/Extra/gguf.py +706 -177
- webscout/Provider/AISEARCH/__init__.py +1 -0
- webscout/Provider/AISEARCH/genspark_search.py +7 -7
- webscout/Provider/AISEARCH/stellar_search.py +132 -0
- webscout/Provider/ExaChat.py +84 -58
- webscout/Provider/GeminiProxy.py +140 -0
- webscout/Provider/HeckAI.py +85 -80
- webscout/Provider/Jadve.py +56 -50
- webscout/Provider/MCPCore.py +78 -75
- webscout/Provider/MiniMax.py +207 -0
- webscout/Provider/Nemotron.py +41 -13
- webscout/Provider/Netwrck.py +34 -51
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -4
- webscout/Provider/OPENAI/GeminiProxy.py +328 -0
- webscout/Provider/OPENAI/MiniMax.py +298 -0
- webscout/Provider/OPENAI/README.md +32 -29
- webscout/Provider/OPENAI/README_AUTOPROXY.md +238 -0
- webscout/Provider/OPENAI/TogetherAI.py +4 -17
- webscout/Provider/OPENAI/__init__.py +17 -1
- webscout/Provider/OPENAI/autoproxy.py +1067 -39
- webscout/Provider/OPENAI/base.py +17 -76
- webscout/Provider/OPENAI/deepinfra.py +42 -108
- webscout/Provider/OPENAI/e2b.py +0 -1
- webscout/Provider/OPENAI/flowith.py +179 -166
- webscout/Provider/OPENAI/friendli.py +233 -0
- webscout/Provider/OPENAI/mcpcore.py +109 -70
- webscout/Provider/OPENAI/monochat.py +329 -0
- webscout/Provider/OPENAI/pydantic_imports.py +1 -172
- webscout/Provider/OPENAI/scirachat.py +59 -51
- webscout/Provider/OPENAI/toolbaz.py +3 -9
- webscout/Provider/OPENAI/typegpt.py +1 -1
- webscout/Provider/OPENAI/utils.py +19 -42
- webscout/Provider/OPENAI/x0gpt.py +14 -2
- webscout/Provider/OPENAI/xenai.py +514 -0
- webscout/Provider/OPENAI/yep.py +8 -2
- webscout/Provider/OpenGPT.py +54 -32
- webscout/Provider/PI.py +58 -84
- webscout/Provider/StandardInput.py +32 -13
- webscout/Provider/TTI/README.md +9 -9
- webscout/Provider/TTI/__init__.py +3 -1
- webscout/Provider/TTI/aiarta.py +92 -78
- webscout/Provider/TTI/bing.py +231 -0
- webscout/Provider/TTI/infip.py +212 -0
- webscout/Provider/TTI/monochat.py +220 -0
- webscout/Provider/TTS/speechma.py +45 -39
- webscout/Provider/TeachAnything.py +11 -3
- webscout/Provider/TextPollinationsAI.py +78 -70
- webscout/Provider/TogetherAI.py +350 -0
- webscout/Provider/Venice.py +37 -46
- webscout/Provider/VercelAI.py +27 -24
- webscout/Provider/WiseCat.py +35 -35
- webscout/Provider/WrDoChat.py +22 -26
- webscout/Provider/WritingMate.py +26 -22
- webscout/Provider/XenAI.py +324 -0
- webscout/Provider/__init__.py +10 -5
- webscout/Provider/deepseek_assistant.py +378 -0
- webscout/Provider/granite.py +48 -57
- webscout/Provider/koala.py +51 -39
- webscout/Provider/learnfastai.py +49 -64
- webscout/Provider/llmchat.py +79 -93
- webscout/Provider/llmchatco.py +63 -78
- webscout/Provider/multichat.py +51 -40
- webscout/Provider/oivscode.py +1 -1
- webscout/Provider/scira_chat.py +159 -96
- webscout/Provider/scnet.py +13 -13
- webscout/Provider/searchchat.py +13 -13
- webscout/Provider/sonus.py +12 -11
- webscout/Provider/toolbaz.py +25 -8
- webscout/Provider/turboseek.py +41 -42
- webscout/Provider/typefully.py +27 -12
- webscout/Provider/typegpt.py +41 -46
- webscout/Provider/uncovr.py +55 -90
- webscout/Provider/x0gpt.py +33 -17
- webscout/Provider/yep.py +79 -96
- webscout/auth/__init__.py +55 -0
- webscout/auth/api_key_manager.py +189 -0
- webscout/auth/auth_system.py +100 -0
- webscout/auth/config.py +76 -0
- webscout/auth/database.py +400 -0
- webscout/auth/exceptions.py +67 -0
- webscout/auth/middleware.py +248 -0
- webscout/auth/models.py +130 -0
- webscout/auth/providers.py +279 -0
- webscout/auth/rate_limiter.py +254 -0
- webscout/auth/request_models.py +127 -0
- webscout/auth/request_processing.py +226 -0
- webscout/auth/routes.py +550 -0
- webscout/auth/schemas.py +103 -0
- webscout/auth/server.py +367 -0
- webscout/client.py +121 -70
- webscout/litagent/Readme.md +68 -55
- webscout/litagent/agent.py +99 -9
- webscout/scout/core/scout.py +104 -26
- webscout/scout/element.py +139 -18
- webscout/swiftcli/core/cli.py +14 -3
- webscout/swiftcli/decorators/output.py +59 -9
- webscout/update_checker.py +31 -49
- webscout/version.py +1 -1
- webscout/webscout_search.py +4 -12
- webscout/webscout_search_async.py +3 -10
- webscout/yep_search.py +2 -11
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/METADATA +141 -99
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/RECORD +109 -83
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/entry_points.txt +1 -1
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/OPENAI/api.py +0 -1320
- webscout/Provider/TTI/fastflux.py +0 -233
- webscout/Provider/Writecream.py +0 -246
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/WHEEL +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.1.dist-info → webscout-8.3.3.dist-info}/top_level.txt +0 -0
webscout/Provider/OPENAI/base.py
CHANGED
|
@@ -1,9 +1,18 @@
|
|
|
1
1
|
from abc import ABC, abstractmethod
|
|
2
2
|
from typing import List, Dict, Optional, Union, Generator, Any, TypedDict, Callable
|
|
3
3
|
import json
|
|
4
|
-
import logging
|
|
5
4
|
from dataclasses import dataclass
|
|
6
|
-
|
|
5
|
+
|
|
6
|
+
# Import WebScout Litlogger instead of standard logging
|
|
7
|
+
from webscout.Litlogger import Logger, LogLevel
|
|
8
|
+
|
|
9
|
+
logger = Logger(name="OpenAIBase", level=LogLevel.INFO)
|
|
10
|
+
|
|
11
|
+
# Import the LitMeta metaclass from Litproxy
|
|
12
|
+
try:
|
|
13
|
+
from litproxy import LitMeta
|
|
14
|
+
except ImportError:
|
|
15
|
+
from .autoproxy import ProxyAutoMeta as LitMeta
|
|
7
16
|
|
|
8
17
|
|
|
9
18
|
# Import the utils for response structures
|
|
@@ -173,82 +182,11 @@ class BaseChat(ABC):
|
|
|
173
182
|
completions: BaseCompletions
|
|
174
183
|
|
|
175
184
|
|
|
176
|
-
|
|
177
|
-
# """
|
|
178
|
-
# Metaclass to ensure all OpenAICompatibleProvider subclasses automatically get proxy support.
|
|
179
|
-
# This will inject proxies into any requests.Session, httpx.Client, or curl_cffi session attributes found on the instance.
|
|
180
|
-
|
|
181
|
-
# To disable automatic proxy injection, set disable_auto_proxy=True in the constructor or
|
|
182
|
-
# set the class attribute DISABLE_AUTO_PROXY = True.
|
|
183
|
-
# """
|
|
184
|
-
# def __call__(cls, *args, **kwargs):
|
|
185
|
-
# instance = super().__call__(*args, **kwargs)
|
|
186
|
-
|
|
187
|
-
# # Check if auto proxy is disabled
|
|
188
|
-
# disable_auto_proxy = kwargs.get('disable_auto_proxy', False) or getattr(cls, 'DISABLE_AUTO_PROXY', False)
|
|
189
|
-
|
|
190
|
-
# proxies = getattr(instance, 'proxies', None) or kwargs.get('proxies', None)
|
|
191
|
-
# if proxies is None and not disable_auto_proxy:
|
|
192
|
-
# try:
|
|
193
|
-
# proxies = {"http": get_auto_proxy(), "https": get_auto_proxy()}
|
|
194
|
-
# except Exception as e:
|
|
195
|
-
# logger.warning(f"Failed to get auto proxy, disabling proxy support: {e}")
|
|
196
|
-
# proxies = {}
|
|
197
|
-
# elif proxies is None:
|
|
198
|
-
# proxies = {}
|
|
199
|
-
# instance.proxies = proxies
|
|
200
|
-
# # Patch sessions if we have valid proxies
|
|
201
|
-
# if proxies:
|
|
202
|
-
# for attr in dir(instance):
|
|
203
|
-
# obj = getattr(instance, attr)
|
|
204
|
-
# if isinstance(obj, requests.Session):
|
|
205
|
-
# obj.proxies.update(proxies)
|
|
206
|
-
# if httpx and isinstance(obj, httpx.Client):
|
|
207
|
-
# try:
|
|
208
|
-
# obj._proxies = proxies
|
|
209
|
-
# except Exception:
|
|
210
|
-
# pass
|
|
211
|
-
# # Patch curl_cffi sessions if present
|
|
212
|
-
# if CurlSession and isinstance(obj, CurlSession):
|
|
213
|
-
# try:
|
|
214
|
-
# obj.proxies.update(proxies)
|
|
215
|
-
# except Exception:
|
|
216
|
-
# pass
|
|
217
|
-
# if CurlAsyncSession and isinstance(obj, CurlAsyncSession):
|
|
218
|
-
# try:
|
|
219
|
-
# obj.proxies.update(proxies)
|
|
220
|
-
# except Exception:
|
|
221
|
-
# pass
|
|
222
|
-
# # Provide helpers for proxied sessions
|
|
223
|
-
# def get_proxied_session():
|
|
224
|
-
# s = requests.Session()
|
|
225
|
-
# s.proxies.update(proxies)
|
|
226
|
-
# return s
|
|
227
|
-
# instance.get_proxied_session = get_proxied_session
|
|
228
|
-
|
|
229
|
-
# def get_proxied_curl_session(impersonate="chrome120", **kwargs):
|
|
230
|
-
# """Get a curl_cffi Session with proxies configured"""
|
|
231
|
-
# if CurlSession:
|
|
232
|
-
# return CurlSession(proxies=proxies, impersonate=impersonate, **kwargs)
|
|
233
|
-
# else:
|
|
234
|
-
# raise ImportError("curl_cffi is not installed")
|
|
235
|
-
# instance.get_proxied_curl_session = get_proxied_curl_session
|
|
236
|
-
|
|
237
|
-
# def get_proxied_curl_async_session(impersonate="chrome120", **kwargs):
|
|
238
|
-
# """Get a curl_cffi AsyncSession with proxies configured"""
|
|
239
|
-
# if CurlAsyncSession:
|
|
240
|
-
# return CurlAsyncSession(proxies=proxies, impersonate=impersonate, **kwargs)
|
|
241
|
-
# else:
|
|
242
|
-
# raise ImportError("curl_cffi is not installed")
|
|
243
|
-
# instance.get_proxied_curl_async_session = get_proxied_curl_async_session
|
|
244
|
-
|
|
245
|
-
# return instance
|
|
246
|
-
# class OPENAICompatibleMeta(ABC, metaclass=ProxyAutoMeta):
|
|
247
|
-
class OpenAICompatibleProvider(ABC):
|
|
185
|
+
class OpenAICompatibleProvider(ABC, metaclass=LitMeta):
|
|
248
186
|
"""
|
|
249
187
|
Abstract Base Class for providers mimicking the OpenAI Python client structure.
|
|
250
188
|
Requires a nested 'chat.completions' structure with tool support.
|
|
251
|
-
All subclasses automatically get proxy support via
|
|
189
|
+
All subclasses automatically get proxy support via LitMeta.
|
|
252
190
|
|
|
253
191
|
# Available proxy helpers:
|
|
254
192
|
# - self.get_proxied_session() - returns a requests.Session with proxies
|
|
@@ -260,6 +198,8 @@ class OpenAICompatibleProvider(ABC):
|
|
|
260
198
|
# - httpx.Client objects
|
|
261
199
|
# - curl_cffi.requests.Session objects
|
|
262
200
|
# - curl_cffi.requests.AsyncSession objects
|
|
201
|
+
#
|
|
202
|
+
# Inbuilt auto-retry is also enabled for all requests.Session and curl_cffi.Session objects.
|
|
263
203
|
"""
|
|
264
204
|
chat: BaseChat
|
|
265
205
|
available_tools: Dict[str, Tool] = {} # Dictionary of available tools
|
|
@@ -267,13 +207,14 @@ class OpenAICompatibleProvider(ABC):
|
|
|
267
207
|
supports_tool_choice: bool = False # Whether the provider supports tool_choice
|
|
268
208
|
|
|
269
209
|
@abstractmethod
|
|
270
|
-
def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, proxies: Optional[dict] = None, **kwargs: Any):
|
|
210
|
+
def __init__(self, api_key: Optional[str] = None, tools: Optional[List[Tool]] = None, proxies: Optional[dict] = None, disable_auto_proxy: bool = False, **kwargs: Any):
|
|
271
211
|
self.available_tools = {}
|
|
272
212
|
if tools:
|
|
273
213
|
self.register_tools(tools)
|
|
274
214
|
# self.proxies is set by ProxyAutoMeta
|
|
275
215
|
# Subclasses should use self.proxies for all network requests
|
|
276
216
|
# Optionally, use self.get_proxied_session() for a requests.Session with proxies
|
|
217
|
+
# The disable_auto_proxy parameter is handled by ProxyAutoMeta
|
|
277
218
|
# raise NotImplementedError # <-- Commented out for metaclass test
|
|
278
219
|
|
|
279
220
|
@property
|
|
@@ -2,23 +2,20 @@ import requests
|
|
|
2
2
|
import json
|
|
3
3
|
import time
|
|
4
4
|
import uuid
|
|
5
|
+
import collections
|
|
5
6
|
from typing import List, Dict, Optional, Union, Generator, Any
|
|
6
7
|
|
|
7
|
-
# Import base classes and utility structures
|
|
8
8
|
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
9
9
|
from webscout.Provider.OPENAI.utils import (
|
|
10
10
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
11
|
ChatCompletionMessage, CompletionUsage
|
|
12
12
|
)
|
|
13
13
|
|
|
14
|
-
# Attempt to import LitAgent, fallback if not available
|
|
15
14
|
try:
|
|
16
15
|
from webscout.litagent import LitAgent
|
|
17
16
|
except ImportError:
|
|
18
17
|
pass
|
|
19
18
|
|
|
20
|
-
# --- DeepInfra Client ---
|
|
21
|
-
|
|
22
19
|
class Completions(BaseCompletions):
|
|
23
20
|
def __init__(self, client: 'DeepInfra'):
|
|
24
21
|
self._client = client
|
|
@@ -36,10 +33,6 @@ class Completions(BaseCompletions):
|
|
|
36
33
|
proxies: Optional[Dict[str, str]] = None,
|
|
37
34
|
**kwargs: Any
|
|
38
35
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
39
|
-
"""
|
|
40
|
-
Creates a model response for the given chat conversation.
|
|
41
|
-
Mimics openai.chat.completions.create
|
|
42
|
-
"""
|
|
43
36
|
payload = {
|
|
44
37
|
"model": model,
|
|
45
38
|
"messages": messages,
|
|
@@ -50,12 +43,9 @@ class Completions(BaseCompletions):
|
|
|
50
43
|
payload["temperature"] = temperature
|
|
51
44
|
if top_p is not None:
|
|
52
45
|
payload["top_p"] = top_p
|
|
53
|
-
|
|
54
46
|
payload.update(kwargs)
|
|
55
|
-
|
|
56
47
|
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
57
48
|
created_time = int(time.time())
|
|
58
|
-
|
|
59
49
|
if stream:
|
|
60
50
|
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
61
51
|
else:
|
|
@@ -75,52 +65,39 @@ class Completions(BaseCompletions):
|
|
|
75
65
|
proxies=proxies
|
|
76
66
|
)
|
|
77
67
|
response.raise_for_status()
|
|
78
|
-
|
|
79
|
-
# Track token usage across chunks
|
|
80
68
|
prompt_tokens = 0
|
|
81
69
|
completion_tokens = 0
|
|
82
70
|
total_tokens = 0
|
|
83
|
-
|
|
84
|
-
for line in response.iter_lines():
|
|
71
|
+
for line in response.iter_lines(decode_unicode=True):
|
|
85
72
|
if line:
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
if decoded_line.startswith("data: "):
|
|
89
|
-
json_str = decoded_line[6:]
|
|
73
|
+
if line.startswith("data: "):
|
|
74
|
+
json_str = line[6:]
|
|
90
75
|
if json_str == "[DONE]":
|
|
91
|
-
# Format the final [DONE] marker in OpenAI format
|
|
92
|
-
# print("data: [DONE]")
|
|
93
76
|
break
|
|
94
|
-
|
|
95
77
|
try:
|
|
96
78
|
data = json.loads(json_str)
|
|
97
79
|
choice_data = data.get('choices', [{}])[0]
|
|
98
80
|
delta_data = choice_data.get('delta', {})
|
|
99
81
|
finish_reason = choice_data.get('finish_reason')
|
|
100
|
-
|
|
101
|
-
# Update token counts if available
|
|
102
82
|
usage_data = data.get('usage', {})
|
|
103
83
|
if usage_data:
|
|
104
84
|
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
105
85
|
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
106
86
|
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
107
|
-
|
|
108
|
-
|
|
87
|
+
if delta_data.get('content'):
|
|
88
|
+
completion_tokens += 1
|
|
89
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
109
90
|
delta = ChoiceDelta(
|
|
110
91
|
content=delta_data.get('content'),
|
|
111
92
|
role=delta_data.get('role'),
|
|
112
93
|
tool_calls=delta_data.get('tool_calls')
|
|
113
94
|
)
|
|
114
|
-
|
|
115
|
-
# Create the choice object
|
|
116
95
|
choice = Choice(
|
|
117
96
|
index=choice_data.get('index', 0),
|
|
118
97
|
delta=delta,
|
|
119
98
|
finish_reason=finish_reason,
|
|
120
99
|
logprobs=choice_data.get('logprobs')
|
|
121
100
|
)
|
|
122
|
-
|
|
123
|
-
# Create the chunk object
|
|
124
101
|
chunk = ChatCompletionChunk(
|
|
125
102
|
id=request_id,
|
|
126
103
|
choices=[choice],
|
|
@@ -128,48 +105,35 @@ class Completions(BaseCompletions):
|
|
|
128
105
|
model=model,
|
|
129
106
|
system_fingerprint=data.get('system_fingerprint')
|
|
130
107
|
)
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
else:
|
|
136
|
-
chunk_dict = chunk.dict(exclude_none=True)
|
|
137
|
-
|
|
138
|
-
# Add usage information to match OpenAI format
|
|
139
|
-
# Even if we don't have real token counts, include estimated usage
|
|
140
|
-
# This matches the format in the examples
|
|
141
|
-
usage_dict = {
|
|
142
|
-
"prompt_tokens": prompt_tokens or 10,
|
|
143
|
-
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
144
|
-
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
108
|
+
chunk.usage = {
|
|
109
|
+
"prompt_tokens": prompt_tokens,
|
|
110
|
+
"completion_tokens": completion_tokens,
|
|
111
|
+
"total_tokens": total_tokens,
|
|
145
112
|
"estimated_cost": None
|
|
146
113
|
}
|
|
147
|
-
|
|
148
|
-
# Update completion_tokens and total_tokens as we receive more content
|
|
149
|
-
if delta_data.get('content'):
|
|
150
|
-
completion_tokens += 1
|
|
151
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
152
|
-
usage_dict["completion_tokens"] = completion_tokens
|
|
153
|
-
usage_dict["total_tokens"] = total_tokens
|
|
154
|
-
|
|
155
|
-
chunk_dict["usage"] = usage_dict
|
|
156
|
-
|
|
157
|
-
# Format the response in OpenAI format exactly as requested
|
|
158
|
-
# We need to print the raw string and also yield the chunk object
|
|
159
|
-
# This ensures both the console output and the returned object are correct
|
|
160
|
-
# print(f"data: {json.dumps(chunk_dict)}")
|
|
161
|
-
|
|
162
|
-
# Return the chunk object for internal processing
|
|
163
114
|
yield chunk
|
|
164
115
|
except json.JSONDecodeError:
|
|
165
|
-
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
166
116
|
continue
|
|
167
|
-
|
|
117
|
+
# Final chunk with finish_reason="stop"
|
|
118
|
+
delta = ChoiceDelta(content=None, role=None, tool_calls=None)
|
|
119
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop", logprobs=None)
|
|
120
|
+
chunk = ChatCompletionChunk(
|
|
121
|
+
id=request_id,
|
|
122
|
+
choices=[choice],
|
|
123
|
+
created=created_time,
|
|
124
|
+
model=model,
|
|
125
|
+
system_fingerprint=None
|
|
126
|
+
)
|
|
127
|
+
chunk.usage = {
|
|
128
|
+
"prompt_tokens": prompt_tokens,
|
|
129
|
+
"completion_tokens": completion_tokens,
|
|
130
|
+
"total_tokens": total_tokens,
|
|
131
|
+
"estimated_cost": None
|
|
132
|
+
}
|
|
133
|
+
yield chunk
|
|
134
|
+
except Exception as e:
|
|
168
135
|
print(f"Error during DeepInfra stream request: {e}")
|
|
169
136
|
raise IOError(f"DeepInfra request failed: {e}") from e
|
|
170
|
-
except Exception as e:
|
|
171
|
-
print(f"Error processing DeepInfra stream: {e}")
|
|
172
|
-
raise
|
|
173
137
|
|
|
174
138
|
def _create_non_stream(
|
|
175
139
|
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
@@ -185,13 +149,19 @@ class Completions(BaseCompletions):
|
|
|
185
149
|
)
|
|
186
150
|
response.raise_for_status()
|
|
187
151
|
data = response.json()
|
|
188
|
-
|
|
189
152
|
choices_data = data.get('choices', [])
|
|
190
153
|
usage_data = data.get('usage', {})
|
|
191
|
-
|
|
192
154
|
choices = []
|
|
193
155
|
for choice_d in choices_data:
|
|
194
|
-
message_d = choice_d.get('message'
|
|
156
|
+
message_d = choice_d.get('message')
|
|
157
|
+
if not message_d and 'delta' in choice_d:
|
|
158
|
+
delta = choice_d['delta']
|
|
159
|
+
message_d = {
|
|
160
|
+
'role': delta.get('role', 'assistant'),
|
|
161
|
+
'content': delta.get('content', '')
|
|
162
|
+
}
|
|
163
|
+
if not message_d:
|
|
164
|
+
message_d = {'role': 'assistant', 'content': ''}
|
|
195
165
|
message = ChatCompletionMessage(
|
|
196
166
|
role=message_d.get('role', 'assistant'),
|
|
197
167
|
content=message_d.get('content', '')
|
|
@@ -202,13 +172,11 @@ class Completions(BaseCompletions):
|
|
|
202
172
|
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
203
173
|
)
|
|
204
174
|
choices.append(choice)
|
|
205
|
-
|
|
206
175
|
usage = CompletionUsage(
|
|
207
176
|
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
208
177
|
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
209
178
|
total_tokens=usage_data.get('total_tokens', 0)
|
|
210
179
|
)
|
|
211
|
-
|
|
212
180
|
completion = ChatCompletion(
|
|
213
181
|
id=request_id,
|
|
214
182
|
choices=choices,
|
|
@@ -217,22 +185,16 @@ class Completions(BaseCompletions):
|
|
|
217
185
|
usage=usage,
|
|
218
186
|
)
|
|
219
187
|
return completion
|
|
220
|
-
|
|
221
|
-
except requests.exceptions.RequestException as e:
|
|
188
|
+
except Exception as e:
|
|
222
189
|
print(f"Error during DeepInfra non-stream request: {e}")
|
|
223
190
|
raise IOError(f"DeepInfra request failed: {e}") from e
|
|
224
|
-
except Exception as e:
|
|
225
|
-
print(f"Error processing DeepInfra response: {e}")
|
|
226
|
-
raise
|
|
227
191
|
|
|
228
192
|
class Chat(BaseChat):
|
|
229
193
|
def __init__(self, client: 'DeepInfra'):
|
|
230
194
|
self.completions = Completions(client)
|
|
231
195
|
|
|
232
196
|
class DeepInfra(OpenAICompatibleProvider):
|
|
233
|
-
|
|
234
197
|
AVAILABLE_MODELS = [
|
|
235
|
-
# "anthropic/claude-3-7-sonnet-latest", # >>>> NOT WORKING
|
|
236
198
|
"deepseek-ai/DeepSeek-R1-0528",
|
|
237
199
|
"deepseek-ai/DeepSeek-R1",
|
|
238
200
|
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
@@ -265,39 +227,13 @@ class DeepInfra(OpenAICompatibleProvider):
|
|
|
265
227
|
"Qwen/Qwen3-30B-A3B",
|
|
266
228
|
"Qwen/Qwen3-32B",
|
|
267
229
|
"Qwen/Qwen3-235B-A22B",
|
|
268
|
-
# "google/gemini-1.5-flash", # >>>> NOT WORKING
|
|
269
|
-
# "google/gemini-1.5-flash-8b", # >>>> NOT WORKING
|
|
270
|
-
# "google/gemini-2.0-flash-001", # >>>> NOT WORKING
|
|
271
|
-
|
|
272
|
-
# "Gryphe/MythoMax-L2-13b", # >>>> NOT WORKING
|
|
273
|
-
|
|
274
|
-
# "meta-llama/Llama-3.2-1B-Instruct", # >>>> NOT WORKING
|
|
275
|
-
# "meta-llama/Llama-3.2-3B-Instruct", # >>>> NOT WORKING
|
|
276
|
-
# "meta-llama/Llama-3.2-90B-Vision-Instruct", # >>>> NOT WORKING
|
|
277
|
-
# "meta-llama/Llama-3.2-11B-Vision-Instruct", # >>>> NOT WORKING
|
|
278
|
-
# "meta-llama/Meta-Llama-3-70B-Instruct", # >>>> NOT WORKING
|
|
279
|
-
# "meta-llama/Meta-Llama-3-8B-Instruct", # >>>> NOT WORKING
|
|
280
|
-
# "meta-llama/Meta-Llama-3.1-70B-Instruct", # >>>> NOT WORKING
|
|
281
|
-
# "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", # >>>> NOT WORKING
|
|
282
|
-
# "meta-llama/Meta-Llama-3.1-405B-Instruct", # >>>> NOT WORKING
|
|
283
|
-
# "mistralai/Mixtral-8x7B-Instruct-v0.1", # >>>> NOT WORKING
|
|
284
|
-
# "mistralai/Mistral-7B-Instruct-v0.3", # >>>> NOT WORKING
|
|
285
|
-
# "mistralai/Mistral-Nemo-Instruct-2407", # >>>> NOT WORKING
|
|
286
|
-
# "NousResearch/Hermes-3-Llama-3.1-405B", # >>>> NOT WORKING
|
|
287
|
-
# "NovaSky-AI/Sky-T1-32B-Preview", # >>>> NOT WORKING
|
|
288
|
-
# "Qwen/Qwen2.5-7B-Instruct", # >>>> NOT WORKING
|
|
289
|
-
# "Sao10K/L3.1-70B-Euryale-v2.2", # >>>> NOT WORKING
|
|
290
|
-
# "Sao10K/L3.3-70B-Euryale-v2.3", # >>>> NOT WORKING
|
|
291
230
|
]
|
|
292
|
-
|
|
293
231
|
def __init__(self, browser: str = "chrome"):
|
|
294
|
-
self.timeout = None
|
|
232
|
+
self.timeout = None
|
|
295
233
|
self.base_url = "https://api.deepinfra.com/v1/openai/chat/completions"
|
|
296
234
|
self.session = requests.Session()
|
|
297
|
-
|
|
298
235
|
agent = LitAgent()
|
|
299
236
|
fingerprint = agent.generate_fingerprint(browser)
|
|
300
|
-
|
|
301
237
|
self.headers = {
|
|
302
238
|
"Accept": fingerprint["accept"],
|
|
303
239
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
@@ -319,21 +255,19 @@ class DeepInfra(OpenAICompatibleProvider):
|
|
|
319
255
|
}
|
|
320
256
|
self.session.headers.update(self.headers)
|
|
321
257
|
self.chat = Chat(self)
|
|
322
|
-
|
|
323
258
|
@property
|
|
324
259
|
def models(self):
|
|
325
260
|
class _ModelList:
|
|
326
261
|
def list(inner_self):
|
|
327
262
|
return type(self).AVAILABLE_MODELS
|
|
328
263
|
return _ModelList()
|
|
329
|
-
|
|
264
|
+
|
|
330
265
|
if __name__ == "__main__":
|
|
331
|
-
# Example usage
|
|
332
266
|
client = DeepInfra()
|
|
333
267
|
response = client.chat.completions.create(
|
|
334
268
|
model="deepseek-ai/DeepSeek-R1-0528",
|
|
335
269
|
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
336
|
-
max_tokens=
|
|
270
|
+
max_tokens=10000,
|
|
337
271
|
stream=False
|
|
338
272
|
)
|
|
339
273
|
print(response)
|
webscout/Provider/OPENAI/e2b.py
CHANGED
|
@@ -1609,6 +1609,5 @@ if __name__ == "__main__":
|
|
|
1609
1609
|
print("\n--- End of Stream ---")
|
|
1610
1610
|
if not full_stream_response:
|
|
1611
1611
|
print(f"{RED}Stream test failed: No content received.{RESET}")
|
|
1612
|
-
|
|
1613
1612
|
except Exception as e:
|
|
1614
1613
|
print(f"{RED}Streaming Test Failed: {e}{RESET}")
|