webscout 8.2.8__py3-none-any.whl → 8.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +34 -16
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +491 -87
- webscout/Bard.py +441 -323
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +7 -59
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AISEARCH/Perplexity.py +332 -358
- webscout/Provider/AISEARCH/felo_search.py +9 -35
- webscout/Provider/AISEARCH/genspark_search.py +30 -56
- webscout/Provider/AISEARCH/hika_search.py +4 -16
- webscout/Provider/AISEARCH/iask_search.py +410 -436
- webscout/Provider/AISEARCH/monica_search.py +4 -30
- webscout/Provider/AISEARCH/scira_search.py +6 -32
- webscout/Provider/AISEARCH/webpilotai_search.py +38 -64
- webscout/Provider/Blackboxai.py +155 -35
- webscout/Provider/ChatSandbox.py +2 -1
- webscout/Provider/Deepinfra.py +339 -339
- webscout/Provider/ExaChat.py +358 -358
- webscout/Provider/Gemini.py +169 -169
- webscout/Provider/GithubChat.py +1 -2
- webscout/Provider/Glider.py +3 -3
- webscout/Provider/HeckAI.py +172 -82
- webscout/Provider/LambdaChat.py +1 -0
- webscout/Provider/MCPCore.py +7 -3
- webscout/Provider/OPENAI/BLACKBOXAI.py +421 -139
- webscout/Provider/OPENAI/Cloudflare.py +38 -21
- webscout/Provider/OPENAI/FalconH1.py +457 -0
- webscout/Provider/OPENAI/FreeGemini.py +35 -18
- webscout/Provider/OPENAI/NEMOTRON.py +34 -34
- webscout/Provider/OPENAI/PI.py +427 -0
- webscout/Provider/OPENAI/Qwen3.py +304 -0
- webscout/Provider/OPENAI/README.md +952 -1253
- webscout/Provider/OPENAI/TwoAI.py +374 -0
- webscout/Provider/OPENAI/__init__.py +7 -1
- webscout/Provider/OPENAI/ai4chat.py +73 -63
- webscout/Provider/OPENAI/api.py +869 -644
- webscout/Provider/OPENAI/base.py +2 -0
- webscout/Provider/OPENAI/c4ai.py +34 -13
- webscout/Provider/OPENAI/chatgpt.py +575 -556
- webscout/Provider/OPENAI/chatgptclone.py +512 -487
- webscout/Provider/OPENAI/chatsandbox.py +11 -6
- webscout/Provider/OPENAI/copilot.py +258 -0
- webscout/Provider/OPENAI/deepinfra.py +327 -318
- webscout/Provider/OPENAI/e2b.py +140 -104
- webscout/Provider/OPENAI/exaai.py +420 -411
- webscout/Provider/OPENAI/exachat.py +448 -443
- webscout/Provider/OPENAI/flowith.py +7 -3
- webscout/Provider/OPENAI/freeaichat.py +12 -8
- webscout/Provider/OPENAI/glider.py +15 -8
- webscout/Provider/OPENAI/groq.py +5 -2
- webscout/Provider/OPENAI/heckai.py +311 -307
- webscout/Provider/OPENAI/llmchatco.py +9 -7
- webscout/Provider/OPENAI/mcpcore.py +18 -9
- webscout/Provider/OPENAI/multichat.py +7 -5
- webscout/Provider/OPENAI/netwrck.py +16 -11
- webscout/Provider/OPENAI/oivscode.py +290 -0
- webscout/Provider/OPENAI/opkfc.py +507 -496
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +29 -17
- webscout/Provider/OPENAI/sonus.py +308 -303
- webscout/Provider/OPENAI/standardinput.py +442 -433
- webscout/Provider/OPENAI/textpollinations.py +18 -11
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +17 -10
- webscout/Provider/OPENAI/typegpt.py +21 -11
- webscout/Provider/OPENAI/uncovrAI.py +477 -462
- webscout/Provider/OPENAI/utils.py +90 -79
- webscout/Provider/OPENAI/venice.py +435 -425
- webscout/Provider/OPENAI/wisecat.py +387 -381
- webscout/Provider/OPENAI/writecream.py +166 -163
- webscout/Provider/OPENAI/x0gpt.py +26 -37
- webscout/Provider/OPENAI/yep.py +384 -356
- webscout/Provider/PI.py +2 -1
- webscout/Provider/TTI/README.md +55 -101
- webscout/Provider/TTI/__init__.py +4 -9
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/base.py +159 -159
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TextPollinationsAI.py +308 -308
- webscout/Provider/TwoAI.py +239 -44
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Writecream.py +246 -246
- webscout/Provider/__init__.py +2 -2
- webscout/Provider/ai4chat.py +33 -8
- webscout/Provider/granite.py +41 -6
- webscout/Provider/koala.py +169 -169
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +3 -2
- webscout/Provider/scnet.py +1 -0
- webscout/Provider/typegpt.py +3 -3
- webscout/Provider/uncovr.py +368 -368
- webscout/client.py +70 -0
- webscout/litprinter/__init__.py +58 -58
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +3 -1
- webscout/scout/core/crawler.py +134 -64
- webscout/scout/core/scout.py +148 -109
- webscout/scout/element.py +106 -88
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/plugins/manager.py +9 -2
- webscout/version.py +1 -1
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/METADATA +160 -35
- webscout-8.3.dist-info/RECORD +290 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/WHEEL +1 -1
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/entry_points.txt +1 -0
- webscout/Litlogger/Readme.md +0 -175
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/TTI/AiForce/README.md +0 -159
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/README.md +0 -99
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/README.md +0 -174
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/README.md +0 -101
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/README.md +0 -155
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/README.md +0 -146
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/aiarta/README.md +0 -134
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/README.md +0 -100
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/README.md +0 -129
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/README.md +0 -114
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/README.md +0 -161
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/README.md +0 -79
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/README.md +0 -139
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/UNFINISHED/oivscode.py +0 -351
- webscout-8.2.8.dist-info/RECORD +0 -334
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.8.dist-info → webscout-8.3.dist-info}/top_level.txt +0 -0
|
@@ -37,6 +37,8 @@ class Completions(BaseCompletions):
|
|
|
37
37
|
top_p: Optional[float] = None, # Note: LLMChatCo doesn't seem to use top_p directly in payload
|
|
38
38
|
web_search: bool = False, # LLMChatCo specific parameter
|
|
39
39
|
system_prompt: Optional[str] = "You are a helpful assistant.", # Default system prompt if not provided
|
|
40
|
+
timeout: Optional[int] = None,
|
|
41
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
40
42
|
**kwargs: Any
|
|
41
43
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
42
44
|
"""
|
|
@@ -88,12 +90,12 @@ class Completions(BaseCompletions):
|
|
|
88
90
|
created_time = int(time.time())
|
|
89
91
|
|
|
90
92
|
if stream:
|
|
91
|
-
return self._create_stream(request_id, created_time, actual_model, payload)
|
|
93
|
+
return self._create_stream(request_id, created_time, actual_model, payload, timeout, proxies)
|
|
92
94
|
else:
|
|
93
|
-
return self._create_non_stream(request_id, created_time, actual_model, payload)
|
|
95
|
+
return self._create_non_stream(request_id, created_time, actual_model, payload, timeout, proxies)
|
|
94
96
|
|
|
95
97
|
def _create_stream(
|
|
96
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
98
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
97
99
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
98
100
|
try:
|
|
99
101
|
response = self._client.session.post(
|
|
@@ -101,7 +103,8 @@ class Completions(BaseCompletions):
|
|
|
101
103
|
headers=self._client.headers,
|
|
102
104
|
json=payload,
|
|
103
105
|
stream=True,
|
|
104
|
-
timeout=self._client.timeout
|
|
106
|
+
timeout=timeout or self._client.timeout,
|
|
107
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
105
108
|
)
|
|
106
109
|
|
|
107
110
|
if not response.ok:
|
|
@@ -197,14 +200,14 @@ class Completions(BaseCompletions):
|
|
|
197
200
|
|
|
198
201
|
|
|
199
202
|
def _create_non_stream(
|
|
200
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
203
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
201
204
|
) -> ChatCompletion:
|
|
202
205
|
# Non-streaming requires accumulating stream chunks
|
|
203
206
|
full_response_content = ""
|
|
204
207
|
finish_reason = "stop" # Assume stop unless error occurs
|
|
205
208
|
|
|
206
209
|
try:
|
|
207
|
-
stream_generator = self._create_stream(request_id, created_time, model, payload)
|
|
210
|
+
stream_generator = self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
208
211
|
for chunk in stream_generator:
|
|
209
212
|
if chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content:
|
|
210
213
|
full_response_content += chunk.choices[0].delta.content
|
|
@@ -332,4 +335,3 @@ class LLMChatCo(OpenAICompatibleProvider):
|
|
|
332
335
|
def list(inner_self):
|
|
333
336
|
return type(self).AVAILABLE_MODELS
|
|
334
337
|
return _ModelList()
|
|
335
|
-
|
|
@@ -43,6 +43,8 @@ class Completions(BaseCompletions):
|
|
|
43
43
|
stream: bool = False,
|
|
44
44
|
temperature: Optional[float] = None,
|
|
45
45
|
top_p: Optional[float] = None,
|
|
46
|
+
timeout: Optional[int] = None,
|
|
47
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
46
48
|
**kwargs: Any
|
|
47
49
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
48
50
|
"""
|
|
@@ -75,12 +77,12 @@ class Completions(BaseCompletions):
|
|
|
75
77
|
created_time = int(time.time())
|
|
76
78
|
|
|
77
79
|
if stream:
|
|
78
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
80
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
79
81
|
else:
|
|
80
|
-
return self._create_non_stream_from_stream(request_id, created_time, model, payload)
|
|
82
|
+
return self._create_non_stream_from_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
81
83
|
|
|
82
84
|
def _create_stream(
|
|
83
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
85
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
84
86
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
85
87
|
"""Handles the streaming response from MCPCore."""
|
|
86
88
|
final_usage_data = None # To store usage if received
|
|
@@ -90,7 +92,8 @@ class Completions(BaseCompletions):
|
|
|
90
92
|
headers=self._client.headers,
|
|
91
93
|
json=payload,
|
|
92
94
|
stream=True,
|
|
93
|
-
timeout=self._client.timeout,
|
|
95
|
+
timeout=timeout or self._client.timeout,
|
|
96
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
94
97
|
impersonate="chrome110" # Impersonation often helps
|
|
95
98
|
)
|
|
96
99
|
|
|
@@ -167,9 +170,15 @@ class Completions(BaseCompletions):
|
|
|
167
170
|
# system_fingerprint=..., # Can be added if available in final event
|
|
168
171
|
)
|
|
169
172
|
# Add usage to the final chunk dictionary representation if available
|
|
170
|
-
|
|
173
|
+
if hasattr(final_chunk, "model_dump"):
|
|
174
|
+
final_chunk_dict = final_chunk.model_dump(exclude_none=True)
|
|
175
|
+
else:
|
|
176
|
+
final_chunk_dict = final_chunk.dict(exclude_none=True)
|
|
171
177
|
if usage_obj:
|
|
172
|
-
|
|
178
|
+
if hasattr(usage_obj, "model_dump"):
|
|
179
|
+
final_chunk_dict["usage"] = usage_obj.model_dump(exclude_none=True)
|
|
180
|
+
else:
|
|
181
|
+
final_chunk_dict["usage"] = usage_obj.dict(exclude_none=True)
|
|
173
182
|
|
|
174
183
|
# Yield the final dictionary or object as needed by downstream consumers
|
|
175
184
|
# Yielding the object aligns better with the generator type hint
|
|
@@ -187,7 +196,7 @@ class Completions(BaseCompletions):
|
|
|
187
196
|
raise IOError(f"MCPCore stream processing failed: {e}{error_details}") from e
|
|
188
197
|
|
|
189
198
|
def _create_non_stream_from_stream(
|
|
190
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
199
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
191
200
|
) -> ChatCompletion:
|
|
192
201
|
"""Handles the non-streaming response by making a single POST request (like deepinfra)."""
|
|
193
202
|
try:
|
|
@@ -199,7 +208,8 @@ class Completions(BaseCompletions):
|
|
|
199
208
|
self._client.api_endpoint,
|
|
200
209
|
headers=self._client.headers,
|
|
201
210
|
json=payload,
|
|
202
|
-
timeout=self._client.timeout,
|
|
211
|
+
timeout=timeout or self._client.timeout,
|
|
212
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
203
213
|
impersonate="chrome110"
|
|
204
214
|
)
|
|
205
215
|
if not response.ok:
|
|
@@ -380,4 +390,3 @@ class MCPCore(OpenAICompatibleProvider):
|
|
|
380
390
|
def list(inner_self):
|
|
381
391
|
return type(self).AVAILABLE_MODELS
|
|
382
392
|
return _ModelList()
|
|
383
|
-
|
|
@@ -9,7 +9,7 @@ from .base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
|
9
9
|
from .utils import (
|
|
10
10
|
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
11
11
|
ChatCompletionMessage, CompletionUsage,
|
|
12
|
-
format_prompt
|
|
12
|
+
format_prompt, count_tokens
|
|
13
13
|
)
|
|
14
14
|
|
|
15
15
|
# Import curl_cffi for Cloudflare bypass
|
|
@@ -94,6 +94,8 @@ class Completions(BaseCompletions):
|
|
|
94
94
|
stream: bool = False,
|
|
95
95
|
temperature: Optional[float] = None,
|
|
96
96
|
top_p: Optional[float] = None,
|
|
97
|
+
timeout: Optional[int] = None,
|
|
98
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
97
99
|
**kwargs: Any
|
|
98
100
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
99
101
|
"""
|
|
@@ -132,7 +134,7 @@ class Completions(BaseCompletions):
|
|
|
132
134
|
created_time = int(time.time())
|
|
133
135
|
|
|
134
136
|
# Make the API request
|
|
135
|
-
response_text = self._client._make_api_request(user_message)
|
|
137
|
+
response_text = self._client._make_api_request(user_message, timeout=timeout, proxies=proxies)
|
|
136
138
|
|
|
137
139
|
# If streaming is requested, simulate streaming with the full response
|
|
138
140
|
if stream:
|
|
@@ -154,9 +156,9 @@ class Completions(BaseCompletions):
|
|
|
154
156
|
message = ChatCompletionMessage(role="assistant", content=response_text)
|
|
155
157
|
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
156
158
|
|
|
157
|
-
# Estimate token usage
|
|
158
|
-
prompt_tokens =
|
|
159
|
-
completion_tokens =
|
|
159
|
+
# Estimate token usage using count_tokens
|
|
160
|
+
prompt_tokens = count_tokens(user_message)
|
|
161
|
+
completion_tokens = count_tokens(response_text)
|
|
160
162
|
total_tokens = prompt_tokens + completion_tokens
|
|
161
163
|
|
|
162
164
|
usage = CompletionUsage(
|
|
@@ -15,7 +15,8 @@ from .utils import (
|
|
|
15
15
|
ChoiceDelta,
|
|
16
16
|
CompletionUsage,
|
|
17
17
|
format_prompt,
|
|
18
|
-
get_system_prompt
|
|
18
|
+
get_system_prompt,
|
|
19
|
+
count_tokens
|
|
19
20
|
)
|
|
20
21
|
|
|
21
22
|
# ANSI escape codes for formatting
|
|
@@ -36,6 +37,8 @@ class Completions(BaseCompletions):
|
|
|
36
37
|
stream: bool = False,
|
|
37
38
|
temperature: Optional[float] = None,
|
|
38
39
|
top_p: Optional[float] = None,
|
|
40
|
+
timeout: Optional[int] = None,
|
|
41
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
39
42
|
**kwargs: Any
|
|
40
43
|
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
41
44
|
"""
|
|
@@ -60,19 +63,20 @@ class Completions(BaseCompletions):
|
|
|
60
63
|
created_time = int(time.time())
|
|
61
64
|
|
|
62
65
|
if stream:
|
|
63
|
-
return self._create_stream(request_id, created_time, model, payload)
|
|
66
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
64
67
|
else:
|
|
65
|
-
return self._create_non_stream(request_id, created_time, model, payload)
|
|
68
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
66
69
|
|
|
67
70
|
def _create_stream(
|
|
68
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
71
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
69
72
|
) -> Generator[ChatCompletionChunk, None, None]:
|
|
70
73
|
try:
|
|
71
74
|
response = self._client.session.post(
|
|
72
75
|
"https://netwrck.com/api/chatpred_or",
|
|
73
76
|
json=payload,
|
|
74
77
|
headers=self._client.headers,
|
|
75
|
-
timeout=self._client.timeout,
|
|
78
|
+
timeout=timeout or self._client.timeout,
|
|
79
|
+
proxies=proxies or getattr(self._client, "proxies", None),
|
|
76
80
|
stream=True
|
|
77
81
|
)
|
|
78
82
|
response.raise_for_status()
|
|
@@ -91,7 +95,7 @@ class Completions(BaseCompletions):
|
|
|
91
95
|
# Format the decoded line using the client's formatter
|
|
92
96
|
formatted_content = self._client.format_text(decoded_line)
|
|
93
97
|
streaming_text += formatted_content
|
|
94
|
-
completion_tokens +=
|
|
98
|
+
completion_tokens += count_tokens(formatted_content)
|
|
95
99
|
|
|
96
100
|
# Create a delta object for this chunk
|
|
97
101
|
delta = ChoiceDelta(content=formatted_content)
|
|
@@ -126,14 +130,15 @@ class Completions(BaseCompletions):
|
|
|
126
130
|
raise IOError(f"Netwrck request failed: {e}") from e
|
|
127
131
|
|
|
128
132
|
def _create_non_stream(
|
|
129
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any]
|
|
133
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
130
134
|
) -> ChatCompletion:
|
|
131
135
|
try:
|
|
132
136
|
response = self._client.session.post(
|
|
133
137
|
"https://netwrck.com/api/chatpred_or",
|
|
134
138
|
json=payload,
|
|
135
139
|
headers=self._client.headers,
|
|
136
|
-
timeout=self._client.timeout
|
|
140
|
+
timeout=timeout or self._client.timeout,
|
|
141
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
137
142
|
)
|
|
138
143
|
response.raise_for_status()
|
|
139
144
|
|
|
@@ -142,9 +147,9 @@ class Completions(BaseCompletions):
|
|
|
142
147
|
# Format the full response using the client's formatter
|
|
143
148
|
full_response = self._client.format_text(raw_response)
|
|
144
149
|
|
|
145
|
-
# Create usage statistics
|
|
146
|
-
prompt_tokens =
|
|
147
|
-
completion_tokens =
|
|
150
|
+
# Create usage statistics using count_tokens
|
|
151
|
+
prompt_tokens = count_tokens(payload.get("query", ""))
|
|
152
|
+
completion_tokens = count_tokens(full_response)
|
|
148
153
|
total_tokens = prompt_tokens + completion_tokens
|
|
149
154
|
|
|
150
155
|
usage = CompletionUsage(
|
|
@@ -0,0 +1,290 @@
|
|
|
1
|
+
import random
|
|
2
|
+
import secrets
|
|
3
|
+
import requests
|
|
4
|
+
import json
|
|
5
|
+
import time
|
|
6
|
+
import uuid
|
|
7
|
+
import string
|
|
8
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
9
|
+
|
|
10
|
+
# Import base classes and utility structures
|
|
11
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
12
|
+
from webscout.Provider.OPENAI.utils import (
|
|
13
|
+
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
14
|
+
ChatCompletionMessage, CompletionUsage
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
# --- oivscode Client ---
|
|
18
|
+
|
|
19
|
+
class Completions(BaseCompletions):
|
|
20
|
+
def __init__(self, client: 'oivscode'):
|
|
21
|
+
self._client = client
|
|
22
|
+
|
|
23
|
+
def create(
|
|
24
|
+
self,
|
|
25
|
+
*,
|
|
26
|
+
model: str,
|
|
27
|
+
messages: List[Dict[str, str]],
|
|
28
|
+
max_tokens: Optional[int] = 2049,
|
|
29
|
+
stream: bool = False,
|
|
30
|
+
temperature: Optional[float] = None,
|
|
31
|
+
top_p: Optional[float] = None,
|
|
32
|
+
timeout: Optional[int] = None,
|
|
33
|
+
proxies: Optional[Dict[str, str]] = None,
|
|
34
|
+
**kwargs: Any
|
|
35
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
36
|
+
"""
|
|
37
|
+
Creates a model response for the given chat conversation.
|
|
38
|
+
Mimics openai.chat.completions.create
|
|
39
|
+
"""
|
|
40
|
+
payload = {
|
|
41
|
+
"model": model,
|
|
42
|
+
"messages": messages,
|
|
43
|
+
"max_tokens": max_tokens,
|
|
44
|
+
"stream": stream,
|
|
45
|
+
}
|
|
46
|
+
if temperature is not None:
|
|
47
|
+
payload["temperature"] = temperature
|
|
48
|
+
if top_p is not None:
|
|
49
|
+
payload["top_p"] = top_p
|
|
50
|
+
|
|
51
|
+
payload.update(kwargs)
|
|
52
|
+
|
|
53
|
+
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
54
|
+
created_time = int(time.time())
|
|
55
|
+
|
|
56
|
+
if stream:
|
|
57
|
+
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
58
|
+
else:
|
|
59
|
+
return self._create_non_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
60
|
+
|
|
61
|
+
def _post_with_retry(self, payload, stream=False, timeout=None, proxies=None):
|
|
62
|
+
"""
|
|
63
|
+
Try all endpoints until one succeeds or all fail.
|
|
64
|
+
"""
|
|
65
|
+
last_exception = None
|
|
66
|
+
for endpoint in self._client.api_endpoints:
|
|
67
|
+
try:
|
|
68
|
+
response = self._client.session.post(
|
|
69
|
+
endpoint,
|
|
70
|
+
headers=self._client.headers,
|
|
71
|
+
json=payload,
|
|
72
|
+
stream=stream,
|
|
73
|
+
timeout=timeout or self._client.timeout,
|
|
74
|
+
proxies=proxies or getattr(self._client, "proxies", None)
|
|
75
|
+
)
|
|
76
|
+
response.raise_for_status()
|
|
77
|
+
self._client.base_url = endpoint # Update to working endpoint
|
|
78
|
+
return response
|
|
79
|
+
except requests.exceptions.RequestException as e:
|
|
80
|
+
last_exception = e
|
|
81
|
+
continue
|
|
82
|
+
raise IOError(f"All oivscode endpoints failed: {last_exception}") from last_exception
|
|
83
|
+
|
|
84
|
+
def _create_stream(
|
|
85
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
86
|
+
) -> Generator[ChatCompletionChunk, None, None]:
|
|
87
|
+
try:
|
|
88
|
+
response = self._post_with_retry(payload, stream=True, timeout=timeout, proxies=proxies)
|
|
89
|
+
prompt_tokens = 0
|
|
90
|
+
completion_tokens = 0
|
|
91
|
+
total_tokens = 0
|
|
92
|
+
|
|
93
|
+
for line in response.iter_lines():
|
|
94
|
+
if line:
|
|
95
|
+
decoded_line = line.decode('utf-8').strip()
|
|
96
|
+
|
|
97
|
+
if decoded_line.startswith("data: "):
|
|
98
|
+
json_str = decoded_line[6:]
|
|
99
|
+
if json_str == "[DONE]":
|
|
100
|
+
break
|
|
101
|
+
try:
|
|
102
|
+
data = json.loads(json_str)
|
|
103
|
+
choice_data = data.get('choices', [{}])[0]
|
|
104
|
+
delta_data = choice_data.get('delta', {})
|
|
105
|
+
finish_reason = choice_data.get('finish_reason')
|
|
106
|
+
|
|
107
|
+
usage_data = data.get('usage', {})
|
|
108
|
+
if usage_data:
|
|
109
|
+
prompt_tokens = usage_data.get('prompt_tokens', prompt_tokens)
|
|
110
|
+
completion_tokens = usage_data.get('completion_tokens', completion_tokens)
|
|
111
|
+
total_tokens = usage_data.get('total_tokens', total_tokens)
|
|
112
|
+
|
|
113
|
+
delta = ChoiceDelta(
|
|
114
|
+
content=delta_data.get('content'),
|
|
115
|
+
role=delta_data.get('role'),
|
|
116
|
+
tool_calls=delta_data.get('tool_calls')
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
choice = Choice(
|
|
120
|
+
index=choice_data.get('index', 0),
|
|
121
|
+
delta=delta,
|
|
122
|
+
finish_reason=finish_reason,
|
|
123
|
+
logprobs=choice_data.get('logprobs')
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
chunk = ChatCompletionChunk(
|
|
127
|
+
id=request_id,
|
|
128
|
+
choices=[choice],
|
|
129
|
+
created=created_time,
|
|
130
|
+
model=model,
|
|
131
|
+
system_fingerprint=data.get('system_fingerprint')
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
if hasattr(chunk, "model_dump"):
|
|
135
|
+
chunk_dict = chunk.model_dump(exclude_none=True)
|
|
136
|
+
else:
|
|
137
|
+
chunk_dict = chunk.dict(exclude_none=True)
|
|
138
|
+
|
|
139
|
+
usage_dict = {
|
|
140
|
+
"prompt_tokens": prompt_tokens or 10,
|
|
141
|
+
"completion_tokens": completion_tokens or (len(delta_data.get('content', '')) if delta_data.get('content') else 0),
|
|
142
|
+
"total_tokens": total_tokens or (10 + (len(delta_data.get('content', '')) if delta_data.get('content') else 0)),
|
|
143
|
+
"estimated_cost": None
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
if delta_data.get('content'):
|
|
147
|
+
completion_tokens += 1
|
|
148
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
149
|
+
usage_dict["completion_tokens"] = completion_tokens
|
|
150
|
+
usage_dict["total_tokens"] = total_tokens
|
|
151
|
+
|
|
152
|
+
chunk_dict["usage"] = usage_dict
|
|
153
|
+
|
|
154
|
+
yield chunk
|
|
155
|
+
except json.JSONDecodeError:
|
|
156
|
+
print(f"Warning: Could not decode JSON line: {json_str}")
|
|
157
|
+
continue
|
|
158
|
+
except requests.exceptions.RequestException as e:
|
|
159
|
+
print(f"Error during oivscode stream request: {e}")
|
|
160
|
+
raise IOError(f"oivscode request failed: {e}") from e
|
|
161
|
+
except Exception as e:
|
|
162
|
+
print(f"Error processing oivscode stream: {e}")
|
|
163
|
+
raise
|
|
164
|
+
|
|
165
|
+
def _create_non_stream(
|
|
166
|
+
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
167
|
+
) -> ChatCompletion:
|
|
168
|
+
try:
|
|
169
|
+
response = self._post_with_retry(payload, stream=False, timeout=timeout, proxies=proxies)
|
|
170
|
+
data = response.json()
|
|
171
|
+
|
|
172
|
+
choices_data = data.get('choices', [])
|
|
173
|
+
usage_data = data.get('usage', {})
|
|
174
|
+
|
|
175
|
+
choices = []
|
|
176
|
+
for choice_d in choices_data:
|
|
177
|
+
message_d = choice_d.get('message', {})
|
|
178
|
+
message = ChatCompletionMessage(
|
|
179
|
+
role=message_d.get('role', 'assistant'),
|
|
180
|
+
content=message_d.get('content', '')
|
|
181
|
+
)
|
|
182
|
+
choice = Choice(
|
|
183
|
+
index=choice_d.get('index', 0),
|
|
184
|
+
message=message,
|
|
185
|
+
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
186
|
+
)
|
|
187
|
+
choices.append(choice)
|
|
188
|
+
|
|
189
|
+
usage = CompletionUsage(
|
|
190
|
+
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
191
|
+
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
192
|
+
total_tokens=usage_data.get('total_tokens', 0)
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
completion = ChatCompletion(
|
|
196
|
+
id=request_id,
|
|
197
|
+
choices=choices,
|
|
198
|
+
created=created_time,
|
|
199
|
+
model=data.get('model', model),
|
|
200
|
+
usage=usage,
|
|
201
|
+
)
|
|
202
|
+
return completion
|
|
203
|
+
|
|
204
|
+
except requests.exceptions.RequestException as e:
|
|
205
|
+
print(f"Error during oivscode non-stream request: {e}")
|
|
206
|
+
raise IOError(f"oivscode request failed: {e}") from e
|
|
207
|
+
except Exception as e:
|
|
208
|
+
print(f"Error processing oivscode response: {e}")
|
|
209
|
+
raise
|
|
210
|
+
|
|
211
|
+
class Chat(BaseChat):
|
|
212
|
+
def __init__(self, client: 'oivscode'):
|
|
213
|
+
self.completions = Completions(client)
|
|
214
|
+
|
|
215
|
+
class oivscode(OpenAICompatibleProvider):
|
|
216
|
+
|
|
217
|
+
AVAILABLE_MODELS = [
|
|
218
|
+
"*",
|
|
219
|
+
"Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
220
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
221
|
+
"claude-3-5-sonnet-20240620",
|
|
222
|
+
"claude-3-5-sonnet-20241022",
|
|
223
|
+
"claude-3-7-sonnet-20250219",
|
|
224
|
+
"custom/blackbox-base",
|
|
225
|
+
"custom/blackbox-pro",
|
|
226
|
+
"custom/blackbox-pro-designer",
|
|
227
|
+
"custom/blackbox-pro-plus",
|
|
228
|
+
"deepseek-r1",
|
|
229
|
+
"deepseek-v3",
|
|
230
|
+
"deepseek/deepseek-chat",
|
|
231
|
+
"gemini-2.5-pro-preview-03-25",
|
|
232
|
+
"gpt-4o-mini",
|
|
233
|
+
"grok-3-beta",
|
|
234
|
+
"image-gen",
|
|
235
|
+
"llama-4-maverick-17b-128e-instruct-fp8",
|
|
236
|
+
"o1",
|
|
237
|
+
"o3-mini",
|
|
238
|
+
"o4-mini",
|
|
239
|
+
"transcribe",
|
|
240
|
+
"anthropic/claude-sonnet-4"
|
|
241
|
+
]
|
|
242
|
+
|
|
243
|
+
def __init__(self, timeout: Optional[int] = None):
|
|
244
|
+
self.timeout = timeout
|
|
245
|
+
self.api_endpoints = [
|
|
246
|
+
"https://oi-vscode-server.onrender.com/v1/chat/completions",
|
|
247
|
+
"https://oi-vscode-server-2.onrender.com/v1/chat/completions",
|
|
248
|
+
"https://oi-vscode-server-5.onrender.com/v1/chat/completions",
|
|
249
|
+
"https://oi-vscode-server-0501.onrender.com/v1/chat/completions"
|
|
250
|
+
]
|
|
251
|
+
self.api_endpoint = random.choice(self.api_endpoints)
|
|
252
|
+
self.base_url = self.api_endpoint
|
|
253
|
+
self.session = requests.Session()
|
|
254
|
+
self.headers = {
|
|
255
|
+
"accept": "*/*",
|
|
256
|
+
"accept-language": "en-US,en;q=0.9,en-GB;q=0.8,en-IN;q=0.7",
|
|
257
|
+
"cache-control": "no-cache",
|
|
258
|
+
"content-type": "application/json",
|
|
259
|
+
"pragma": "no-cache",
|
|
260
|
+
"priority": "u=1, i",
|
|
261
|
+
"sec-ch-ua": '"Not A(Brand";v="8", "Chromium";v="132", "Microsoft Edge";v="132"',
|
|
262
|
+
"sec-ch-ua-mobile": "?0",
|
|
263
|
+
"sec-ch-ua-platform": '"Windows"',
|
|
264
|
+
"sec-fetch-dest": "empty",
|
|
265
|
+
"sec-fetch-mode": "cors",
|
|
266
|
+
"sec-fetch-site": "same-site",
|
|
267
|
+
}
|
|
268
|
+
self.userid = ''.join(secrets.choice(string.ascii_letters + string.digits) for _ in range(21))
|
|
269
|
+
self.headers["userid"] = self.userid
|
|
270
|
+
self.session.headers.update(self.headers)
|
|
271
|
+
self.chat = Chat(self)
|
|
272
|
+
|
|
273
|
+
@property
|
|
274
|
+
def models(self):
|
|
275
|
+
class _ModelList:
|
|
276
|
+
def list(inner_self):
|
|
277
|
+
return type(self).AVAILABLE_MODELS
|
|
278
|
+
return _ModelList()
|
|
279
|
+
|
|
280
|
+
if __name__ == "__main__":
|
|
281
|
+
# Example usage
|
|
282
|
+
client = oivscode()
|
|
283
|
+
chat = client.chat
|
|
284
|
+
response = chat.completions.create(
|
|
285
|
+
model="Qwen/Qwen2.5-72B-Instruct-Turbo",
|
|
286
|
+
messages=[{"role": "user", "content": "Hello, how are you?"}],
|
|
287
|
+
max_tokens=50,
|
|
288
|
+
stream=False
|
|
289
|
+
)
|
|
290
|
+
print(response)
|