webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +34 -16
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +155 -35
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +172 -82
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
- webscout/Provider/OPENAI/Cloudflare.py +38 -21
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +35 -18
- webscout/Provider/OPENAI/NEMOTRON.py +34 -34
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +304 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +374 -0
- webscout/Provider/OPENAI/__init__.py +7 -1
- webscout/Provider/OPENAI/ai4chat.py +73 -63
- webscout/Provider/OPENAI/api.py +869 -644
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +34 -13
- webscout/Provider/OPENAI/chatgpt.py +575 -556
- webscout/Provider/OPENAI/chatgptclone.py +512 -487
- webscout/Provider/OPENAI/chatsandbox.py +11 -6
- webscout/Provider/OPENAI/copilot.py +258 -0
- webscout/Provider/OPENAI/deepinfra.py +327 -318
- webscout/Provider/OPENAI/e2b.py +140 -104
- webscout/Provider/OPENAI/exaai.py +420 -411
- webscout/Provider/OPENAI/exachat.py +448 -443
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +12 -8
- webscout/Provider/OPENAI/glider.py +15 -8
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +311 -307
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +18 -9
- webscout/Provider/OPENAI/multichat.py +7 -5
- webscout/Provider/OPENAI/netwrck.py +16 -11
- webscout/Provider/OPENAI/oivscode.py +290 -0
- webscout/Provider/OPENAI/opkfc.py +507 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +29 -17
- webscout/Provider/OPENAI/sonus.py +308 -303
- webscout/Provider/OPENAI/standardinput.py +442 -433
- webscout/Provider/OPENAI/textpollinations.py +18 -11
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +17 -10
- webscout/Provider/OPENAI/typegpt.py +21 -11
- webscout/Provider/OPENAI/uncovrAI.py +477 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +435 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +26 -37
- webscout/Provider/OPENAI/yep.py +384 -356
- webscout/Provider/PI.py +2 -1
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/granite.py +41 -6
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
- webscout-8.3.dist-info/RECORD +290 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- webscout-8.2.8.dist-info/RECORD +0 -334
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
|
@@ -22,7 +22,8 @@ from webscout.Provider.OPENAI.utils import (
|
|
|
22
22
|
ChoiceDelta,
|
|
23
23
|
CompletionUsage,
|
|
24
24
|
format_prompt,
|
|
25
|
-
get_system_prompt
|
|
25
|
+
get_system_prompt,
|
|
26
|
+
count_tokens
|
|
26
27
|
)
|
|
27
28
|
|
|
28
29
|
# ANSI escape codes for formatting
|
|
@@ -43,6 +44,8 @@ class Completions(BaseCompletions):
|
|
|
43
44
|
stream: bool = False,
|
|
44
45
|
temperature: Optional[float] = None,
|
|
45
46
|
top_p: Optional[float] = None,
|
|
47
|
+
timeout: Optional[int] = None,
|
|
48
|
+
proxies: Optional[dict] = None,
|
|
46
49
|
**kwargs: Any
|
|
47
50
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
48
51
|
"""
|
|
@@ -68,20 +71,27 @@ class Completions(BaseCompletions):
|
|
|
68
71
|
}
|
|
69
72
|
|
|
70
73
|
if stream:
|
|
71
|
-
return self._create_stream(request_id, created_time, model, api_payload)
|
|
74
|
+
return self._create_stream(request_id, created_time, model, api_payload, timeout=timeout, proxies=proxies)
|
|
72
75
|
else:
|
|
73
|
-
return self._create_non_stream(request_id, created_time, model, api_payload)
|
|
76
|
+
return self._create_non_stream(request_id, created_time, model, api_payload, timeout=timeout, proxies=proxies)
|
|
74
77
|
|
|
75
78
|
def _create_stream(
|
|
76
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
79
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
80
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
77
81
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
82
|
+
original_proxies = self._client.session.proxies
|
|
83
|
+
if proxies is not None:
|
|
84
|
+
self._client.session.proxies = proxies
|
|
85
|
+
else:
|
|
86
|
+
# Ensure session proxies are reset if no specific proxies are passed for this call
|
|
87
|
+
self._client.session.proxies = {}
|
|
78
88
|
try:
|
|
79
89
|
response = self._client.session.post(
|
|
80
90
|
self._client.api_endpoint,
|
|
81
91
|
json=payload,
|
|
82
92
|
stream=True,
|
|
83
|
-
timeout=self._client.timeout,
|
|
84
|
-
impersonate="chrome120"
|
|
93
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
94
|
+
impersonate="chrome120"
|
|
85
95
|
)
|
|
86
96
|
response.raise_for_status()
|
|
87
97
|
|
|
@@ -100,7 +110,7 @@ class Completions(BaseCompletions):
|
|
|
100
110
|
for text_chunk in processed_stream:
|
|
101
111
|
if text_chunk and isinstance(text_chunk, str):
|
|
102
112
|
streaming_text += text_chunk
|
|
103
|
-
completion_tokens +=
|
|
113
|
+
completion_tokens += count_tokens(text_chunk)
|
|
104
114
|
|
|
105
115
|
delta = ChoiceDelta(content=text_chunk, role="assistant")
|
|
106
116
|
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
@@ -126,18 +136,26 @@ class Completions(BaseCompletions):
|
|
|
126
136
|
except Exception as e:
|
|
127
137
|
print(f"{RED}Error during FreeGemini stream request: {e}{RESET}")
|
|
128
138
|
raise IOError(f"FreeGemini stream request failed: {e}") from e
|
|
139
|
+
finally:
|
|
140
|
+
self._client.session.proxies = original_proxies
|
|
129
141
|
|
|
130
142
|
def _create_non_stream(
|
|
131
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
143
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any],
|
|
144
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
132
145
|
) -> ChatCompletion:
|
|
146
|
+
original_proxies = self._client.session.proxies
|
|
147
|
+
if proxies is not None:
|
|
148
|
+
self._client.session.proxies = proxies
|
|
149
|
+
else:
|
|
150
|
+
self._client.session.proxies = {}
|
|
133
151
|
try:
|
|
134
152
|
# For non-streaming, we'll still use streaming since the API returns data in chunks
|
|
135
153
|
response = self._client.session.post(
|
|
136
154
|
self._client.api_endpoint,
|
|
137
155
|
json=payload,
|
|
138
156
|
stream=True, # API always returns streaming format
|
|
139
|
-
timeout=self._client.timeout,
|
|
140
|
-
impersonate="chrome120"
|
|
157
|
+
timeout=timeout if timeout is not None else self._client.timeout,
|
|
158
|
+
impersonate="chrome120"
|
|
141
159
|
)
|
|
142
160
|
response.raise_for_status()
|
|
143
161
|
|
|
@@ -160,9 +178,9 @@ class Completions(BaseCompletions):
|
|
|
160
178
|
# Skip invalid JSON
|
|
161
179
|
pass
|
|
162
180
|
|
|
163
|
-
# Create usage statistics
|
|
164
|
-
prompt_tokens =
|
|
165
|
-
completion_tokens =
|
|
181
|
+
# Create usage statistics using count_tokens
|
|
182
|
+
prompt_tokens = count_tokens(str(payload))
|
|
183
|
+
completion_tokens = count_tokens(full_text_response)
|
|
166
184
|
total_tokens = prompt_tokens + completion_tokens
|
|
167
185
|
|
|
168
186
|
usage = CompletionUsage(
|
|
@@ -196,6 +214,8 @@ class Completions(BaseCompletions):
|
|
|
196
214
|
except Exception as e:
|
|
197
215
|
print(f"{RED}Error during FreeGemini non-stream request: {e}{RESET}")
|
|
198
216
|
raise IOError(f"FreeGemini request failed: {e}") from e
|
|
217
|
+
finally:
|
|
218
|
+
self._client.session.proxies = original_proxies
|
|
199
219
|
|
|
200
220
|
@staticmethod
|
|
201
221
|
def _gemini_extractor(data: Dict) -> Optional[str]:
|
|
@@ -233,20 +253,17 @@ class FreeGemini(OpenAICompatibleProvider):
|
|
|
233
253
|
|
|
234
254
|
def __init__(
|
|
235
255
|
self,
|
|
236
|
-
timeout: int = 30,
|
|
237
256
|
):
|
|
238
257
|
"""
|
|
239
258
|
Initialize the FreeGemini client.
|
|
240
|
-
|
|
241
|
-
Args:
|
|
242
|
-
timeout: Request timeout in seconds
|
|
243
259
|
"""
|
|
244
|
-
self.timeout =
|
|
260
|
+
self.timeout = 30
|
|
245
261
|
# Update the API endpoint to match the working implementation
|
|
246
262
|
self.api_endpoint = "https://free-gemini.vercel.app/api/google/v1beta/models/gemini-2.0-flash:streamGenerateContent?alt=sse"
|
|
247
263
|
|
|
248
264
|
# Initialize session with curl_cffi for better Cloudflare handling
|
|
249
265
|
self.session = Session()
|
|
266
|
+
self.session.proxies = {}
|
|
250
267
|
|
|
251
268
|
# Use LitAgent for fingerprinting
|
|
252
269
|
self.agent = LitAgent()
|
|
@@ -9,7 +9,7 @@ from typing import List, Dict, Optional, Union, Generator, Any
|
|
|
9
9
|
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
10
10
|
from webscout.Provider.OPENAI.utils import (
|
|
11
11
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
12
|
-
ChatCompletionMessage, CompletionUsage, format_prompt
|
|
12
|
+
ChatCompletionMessage, CompletionUsage, format_prompt, count_tokens
|
|
13
13
|
)
|
|
14
14
|
try:
|
|
15
15
|
from webscout.litagent import LitAgent
|
|
@@ -34,6 +34,8 @@ class Completions(BaseCompletions):
|
|
|
34
34
|
stream: bool = False,
|
|
35
35
|
temperature: Optional[float] = None,
|
|
36
36
|
top_p: Optional[float] = None,
|
|
37
|
+
timeout: Optional[int] = None,
|
|
38
|
+
proxies: Optional[dict] = None,
|
|
37
39
|
**kwargs: Any
|
|
38
40
|
) -> ChatCompletion:
|
|
39
41
|
nemotron_model_name = self._client.convert_model_name(model)
|
|
@@ -48,13 +50,14 @@ class Completions(BaseCompletions):
|
|
|
48
50
|
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
49
51
|
created_time = int(time.time())
|
|
50
52
|
# Always use non-stream mode, ignore 'stream' argument
|
|
51
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
53
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout=timeout, proxies=proxies)
|
|
52
54
|
|
|
53
55
|
def _create_stream(
|
|
54
|
-
self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any]
|
|
56
|
+
self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any],
|
|
57
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
55
58
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
56
59
|
try:
|
|
57
|
-
response_generator = self._client._internal_make_request(payload, stream=True)
|
|
60
|
+
response_generator = self._client._internal_make_request(payload, stream=True, request_timeout=timeout, request_proxies=proxies)
|
|
58
61
|
for text_chunk in response_generator:
|
|
59
62
|
if text_chunk:
|
|
60
63
|
delta = ChoiceDelta(content=text_chunk, role="assistant")
|
|
@@ -79,18 +82,19 @@ class Completions(BaseCompletions):
|
|
|
79
82
|
raise IOError(f"NEMOTRON request failed: {e}") from e
|
|
80
83
|
|
|
81
84
|
def _create_non_stream(
|
|
82
|
-
self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any]
|
|
85
|
+
self, request_id: str, created_time: int, model_name: str, payload: Dict[str, Any],
|
|
86
|
+
timeout: Optional[int] = None, proxies: Optional[dict] = None
|
|
83
87
|
) -> ChatCompletion:
|
|
84
88
|
full_response_content = ""
|
|
85
89
|
try:
|
|
86
|
-
response_generator = self._client._internal_make_request(payload, stream=False)
|
|
90
|
+
response_generator = self._client._internal_make_request(payload, stream=False, request_timeout=timeout, request_proxies=proxies)
|
|
87
91
|
full_response_content = next(response_generator, "")
|
|
88
92
|
except Exception as e:
|
|
89
93
|
pass
|
|
90
94
|
message = ChatCompletionMessage(role="assistant", content=full_response_content)
|
|
91
95
|
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
92
|
-
prompt_tokens =
|
|
93
|
-
completion_tokens =
|
|
96
|
+
prompt_tokens = count_tokens(payload.get("content", ""))
|
|
97
|
+
completion_tokens = count_tokens(full_response_content)
|
|
94
98
|
usage = CompletionUsage(
|
|
95
99
|
prompt_tokens=prompt_tokens,
|
|
96
100
|
completion_tokens=completion_tokens,
|
|
@@ -111,24 +115,17 @@ class Chat(BaseChat):
|
|
|
111
115
|
|
|
112
116
|
class NEMOTRON(OpenAICompatibleProvider):
|
|
113
117
|
AVAILABLE_MODELS = [
|
|
114
|
-
"
|
|
115
|
-
"
|
|
118
|
+
"gpt4o",
|
|
119
|
+
"nemotron70b",
|
|
116
120
|
]
|
|
117
121
|
|
|
118
|
-
# Model mapping for payload
|
|
119
|
-
MODEL_PAYLOAD_MAPPING = {
|
|
120
|
-
"NEMOTRON/gpt4o": "gpt4o",
|
|
121
|
-
"NEMOTRON/nemotron70b": "nemotron70b",
|
|
122
|
-
}
|
|
123
|
-
|
|
124
122
|
API_BASE_URL = "https://nemotron.one/api/chat"
|
|
125
123
|
def __init__(
|
|
126
|
-
self
|
|
127
|
-
timeout: int = 30,
|
|
128
|
-
proxies: dict = {}
|
|
124
|
+
self
|
|
129
125
|
):
|
|
130
126
|
self.session = requests.Session()
|
|
131
|
-
self.timeout =
|
|
127
|
+
self.timeout = 30
|
|
128
|
+
self.session.proxies = {}
|
|
132
129
|
agent = LitAgent()
|
|
133
130
|
user_agent = agent.random()
|
|
134
131
|
self.base_headers = {
|
|
@@ -143,8 +140,6 @@ class NEMOTRON(OpenAICompatibleProvider):
|
|
|
143
140
|
"user-agent": user_agent
|
|
144
141
|
}
|
|
145
142
|
self.session.headers.update(self.base_headers)
|
|
146
|
-
if proxies:
|
|
147
|
-
self.session.proxies.update(proxies)
|
|
148
143
|
self.chat = Chat(self)
|
|
149
144
|
|
|
150
145
|
def _generate_random_email(self) -> str:
|
|
@@ -183,18 +178,12 @@ class NEMOTRON(OpenAICompatibleProvider):
|
|
|
183
178
|
Returns:
|
|
184
179
|
NEMOTRON model name for API payload
|
|
185
180
|
"""
|
|
186
|
-
#
|
|
187
|
-
if model_alias.
|
|
188
|
-
base_model = model_alias.split("/")[1]
|
|
189
|
-
if base_model in ["gpt4o", "nemotron70b"]:
|
|
190
|
-
return base_model
|
|
191
|
-
|
|
192
|
-
# Handle direct model names
|
|
193
|
-
if model_alias in ["gpt4o", "nemotron70b"]:
|
|
181
|
+
# Accept only direct model names
|
|
182
|
+
if model_alias in self.AVAILABLE_MODELS:
|
|
194
183
|
return model_alias
|
|
195
184
|
|
|
196
185
|
# Case-insensitive matching
|
|
197
|
-
for m in
|
|
186
|
+
for m in self.AVAILABLE_MODELS:
|
|
198
187
|
if m.lower() == model_alias.lower():
|
|
199
188
|
return m
|
|
200
189
|
|
|
@@ -205,10 +194,19 @@ class NEMOTRON(OpenAICompatibleProvider):
|
|
|
205
194
|
def _internal_make_request(
|
|
206
195
|
self,
|
|
207
196
|
payload: Dict[str, Any],
|
|
208
|
-
stream: bool = False
|
|
197
|
+
stream: bool = False,
|
|
198
|
+
request_timeout: Optional[int] = None,
|
|
199
|
+
request_proxies: Optional[dict] = None
|
|
209
200
|
) -> Generator[str, None, None]:
|
|
210
201
|
request_headers = self.base_headers.copy()
|
|
211
202
|
request_headers["referer"] = f"https://nemotron.one/chat/{payload['model']}"
|
|
203
|
+
original_proxies = self.session.proxies.copy()
|
|
204
|
+
if request_proxies is not None:
|
|
205
|
+
self.session.proxies.update(request_proxies)
|
|
206
|
+
elif not self.session.proxies:
|
|
207
|
+
pass
|
|
208
|
+
else:
|
|
209
|
+
self.session.proxies = {}
|
|
212
210
|
try:
|
|
213
211
|
if stream:
|
|
214
212
|
with self.session.post(
|
|
@@ -216,7 +214,7 @@ class NEMOTRON(OpenAICompatibleProvider):
|
|
|
216
214
|
headers=request_headers,
|
|
217
215
|
json=payload,
|
|
218
216
|
stream=True,
|
|
219
|
-
timeout=self.timeout
|
|
217
|
+
timeout=request_timeout if request_timeout is not None else self.timeout
|
|
220
218
|
) as response:
|
|
221
219
|
response.raise_for_status()
|
|
222
220
|
yield from sanitize_stream(
|
|
@@ -228,7 +226,7 @@ class NEMOTRON(OpenAICompatibleProvider):
|
|
|
228
226
|
self.API_BASE_URL,
|
|
229
227
|
headers=request_headers,
|
|
230
228
|
json=payload,
|
|
231
|
-
timeout=self.timeout
|
|
229
|
+
timeout=request_timeout if request_timeout is not None else self.timeout
|
|
232
230
|
)
|
|
233
231
|
response.raise_for_status()
|
|
234
232
|
yield response.text
|
|
@@ -236,6 +234,8 @@ class NEMOTRON(OpenAICompatibleProvider):
|
|
|
236
234
|
raise exceptions.ProviderConnectionError(f"NEMOTRON API Connection error: {str(e)}")
|
|
237
235
|
except Exception as e:
|
|
238
236
|
raise RuntimeError(f"NEMOTRON API request unexpected error: {str(e)}")
|
|
237
|
+
finally:
|
|
238
|
+
self.session.proxies = original_proxies
|
|
239
239
|
@property
|
|
240
240
|
def models(self):
|
|
241
241
|
class _ModelList:
|