webscout 8.3.7__py3-none-any.whl → 2025.10.13__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -60
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +16 -1
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -316
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -314
- webscout/Provider/Andi.py +1 -1
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +237 -236
- webscout/Provider/ChatSandbox.py +343 -343
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -208
- webscout/Provider/Deepinfra.py +370 -366
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -308
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -164
- webscout/Provider/GeminiProxy.py +167 -167
- webscout/Provider/GithubChat.py +371 -372
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -383
- webscout/Provider/Jadve.py +282 -282
- webscout/Provider/K2Think.py +307 -307
- webscout/Provider/Koboldai.py +205 -205
- webscout/Provider/LambdaChat.py +423 -423
- webscout/Provider/Nemotron.py +244 -244
- webscout/Provider/Netwrck.py +248 -248
- webscout/Provider/OLLAMA.py +395 -395
- webscout/Provider/OPENAI/Cloudflare.py +393 -393
- webscout/Provider/OPENAI/FalconH1.py +451 -451
- webscout/Provider/OPENAI/FreeGemini.py +296 -296
- webscout/Provider/OPENAI/K2Think.py +431 -431
- webscout/Provider/OPENAI/NEMOTRON.py +240 -240
- webscout/Provider/OPENAI/PI.py +427 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +465 -465
- webscout/Provider/OPENAI/__init__.py +33 -18
- webscout/Provider/OPENAI/base.py +248 -248
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +592 -592
- webscout/Provider/OPENAI/chatgptclone.py +521 -521
- webscout/Provider/OPENAI/chatsandbox.py +202 -202
- webscout/Provider/OPENAI/deepinfra.py +318 -314
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -314
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -355
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -535
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -340
- webscout/Provider/OPENAI/toolbaz.py +419 -416
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +243 -243
- webscout/Provider/PI.py +405 -405
- webscout/Provider/Perplexitylabs.py +430 -430
- webscout/Provider/QwenLM.py +272 -272
- webscout/Provider/STT/__init__.py +16 -1
- webscout/Provider/Sambanova.py +257 -257
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -18
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -18
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -237
- webscout/Provider/TextPollinationsAI.py +310 -310
- webscout/Provider/TogetherAI.py +356 -356
- webscout/Provider/TwoAI.py +312 -312
- webscout/Provider/TypliAI.py +311 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/UNFINISHED/GizAI.py +294 -294
- webscout/Provider/UNFINISHED/Marcus.py +198 -198
- webscout/Provider/UNFINISHED/Qodo.py +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/UNFINISHED/XenAI.py +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +250 -250
- webscout/Provider/VercelAI.py +256 -256
- webscout/Provider/WiseCat.py +231 -231
- webscout/Provider/WrDoChat.py +366 -366
- webscout/Provider/__init__.py +33 -18
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -331
- webscout/Provider/cerebras.py +446 -446
- webscout/Provider/chatglm.py +394 -301
- webscout/Provider/cleeai.py +211 -211
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -263
- webscout/Provider/julius.py +223 -223
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +383 -383
- webscout/Provider/searchchat.py +292 -292
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -367
- webscout/Provider/turboseek.py +273 -273
- webscout/Provider/typefully.py +207 -207
- webscout/Provider/yep.py +372 -372
- webscout/__init__.py +27 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +663 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/search/__init__.py +51 -0
- webscout/search/base.py +195 -0
- webscout/search/duckduckgo_main.py +54 -0
- webscout/search/engines/__init__.py +48 -0
- webscout/search/engines/bing.py +84 -0
- webscout/search/engines/bing_news.py +52 -0
- webscout/search/engines/brave.py +43 -0
- webscout/search/engines/duckduckgo/__init__.py +25 -0
- webscout/search/engines/duckduckgo/answers.py +78 -0
- webscout/search/engines/duckduckgo/base.py +187 -0
- webscout/search/engines/duckduckgo/images.py +97 -0
- webscout/search/engines/duckduckgo/maps.py +168 -0
- webscout/search/engines/duckduckgo/news.py +68 -0
- webscout/search/engines/duckduckgo/suggestions.py +21 -0
- webscout/search/engines/duckduckgo/text.py +211 -0
- webscout/search/engines/duckduckgo/translate.py +47 -0
- webscout/search/engines/duckduckgo/videos.py +63 -0
- webscout/search/engines/duckduckgo/weather.py +74 -0
- webscout/search/engines/mojeek.py +37 -0
- webscout/search/engines/wikipedia.py +56 -0
- webscout/search/engines/yahoo.py +65 -0
- webscout/search/engines/yahoo_news.py +64 -0
- webscout/search/engines/yandex.py +43 -0
- webscout/search/engines/yep/__init__.py +13 -0
- webscout/search/engines/yep/base.py +32 -0
- webscout/search/engines/yep/images.py +99 -0
- webscout/search/engines/yep/suggestions.py +35 -0
- webscout/search/engines/yep/text.py +114 -0
- webscout/search/http_client.py +156 -0
- webscout/search/results.py +137 -0
- webscout/search/yep_main.py +44 -0
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/version.py.bak +2 -0
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/METADATA +936 -937
- webscout-2025.10.13.dist-info/RECORD +329 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/OPENAI/Qwen3.py +0 -303
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/tempid.py +0 -134
- webscout/webscout_search.py +0 -1183
- webscout/webscout_search_async.py +0 -649
- webscout/yep_search.py +0 -346
- webscout-8.3.7.dist-info/RECORD +0 -301
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/WHEEL +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.7.dist-info → webscout-2025.10.13.dist-info}/top_level.txt +0 -0
|
@@ -1,202 +1,202 @@
|
|
|
1
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
2
|
-
import time
|
|
3
|
-
import json
|
|
4
|
-
from webscout.litagent import LitAgent
|
|
5
|
-
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
6
|
-
from webscout.Provider.OPENAI.utils import (
|
|
7
|
-
ChatCompletion,
|
|
8
|
-
ChatCompletionChunk,
|
|
9
|
-
Choice,
|
|
10
|
-
ChatCompletionMessage,
|
|
11
|
-
ChoiceDelta,
|
|
12
|
-
CompletionUsage,
|
|
13
|
-
format_prompt,
|
|
14
|
-
count_tokens
|
|
15
|
-
)
|
|
16
|
-
from curl_cffi.requests import Session
|
|
17
|
-
from curl_cffi.const import CurlHttpVersion
|
|
18
|
-
from webscout.AIutel import sanitize_stream
|
|
19
|
-
from webscout import exceptions
|
|
20
|
-
|
|
21
|
-
# ANSI escape codes for formatting
|
|
22
|
-
BOLD = "\033[1m"
|
|
23
|
-
RED = "\033[91m"
|
|
24
|
-
RESET = "\033[0m"
|
|
25
|
-
|
|
26
|
-
class Completions(BaseCompletions):
|
|
27
|
-
def __init__(self, client: 'ChatSandbox'):
|
|
28
|
-
self._client = client
|
|
29
|
-
|
|
30
|
-
@staticmethod
|
|
31
|
-
def _chatsandbox_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
32
|
-
"""Extracts content from the chatsandbox stream format."""
|
|
33
|
-
if isinstance(chunk, str):
|
|
34
|
-
try:
|
|
35
|
-
data = json.loads(chunk)
|
|
36
|
-
if isinstance(data, dict) and "reasoning_content" in data:
|
|
37
|
-
return data["reasoning_content"]
|
|
38
|
-
return chunk
|
|
39
|
-
except json.JSONDecodeError:
|
|
40
|
-
return chunk
|
|
41
|
-
return None
|
|
42
|
-
|
|
43
|
-
def create(
|
|
44
|
-
self,
|
|
45
|
-
*,
|
|
46
|
-
model: str,
|
|
47
|
-
messages: List[Dict[str, str]],
|
|
48
|
-
max_tokens: Optional[int] = None,
|
|
49
|
-
stream: bool = False,
|
|
50
|
-
temperature: Optional[float] = None,
|
|
51
|
-
top_p: Optional[float] = None,
|
|
52
|
-
timeout: Optional[int] = None,
|
|
53
|
-
proxies: Optional[dict] = None,
|
|
54
|
-
**kwargs: Any
|
|
55
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
56
|
-
"""
|
|
57
|
-
OpenAI-compatible chat/completions endpoint for ChatSandbox.
|
|
58
|
-
"""
|
|
59
|
-
# Use model name conversion for compatibility
|
|
60
|
-
model = self._client.convert_model_name(model)
|
|
61
|
-
# Compose the conversation prompt using format_prompt
|
|
62
|
-
question = format_prompt(messages, add_special_tokens=False, do_continue=True)
|
|
63
|
-
payload = {
|
|
64
|
-
"messages": [question],
|
|
65
|
-
"character": model
|
|
66
|
-
}
|
|
67
|
-
request_id = f"chatcmpl-{int(time.time() * 1000)}"
|
|
68
|
-
created_time = int(time.time())
|
|
69
|
-
url = "https://chatsandbox.com/api/chat"
|
|
70
|
-
agent = LitAgent()
|
|
71
|
-
headers = {
|
|
72
|
-
'authority': 'chatsandbox.com',
|
|
73
|
-
'accept': '*/*',
|
|
74
|
-
'accept-encoding': 'gzip, deflate, br',
|
|
75
|
-
'accept-language': 'en-US,en;q=0.9',
|
|
76
|
-
'content-type': 'application/json',
|
|
77
|
-
'origin': 'https://chatsandbox.com',
|
|
78
|
-
'referer': f'https://chatsandbox.com/chat/{model}',
|
|
79
|
-
'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
80
|
-
'sec-ch-ua-mobile': '?0',
|
|
81
|
-
'sec-ch-ua-platform': '"Windows"',
|
|
82
|
-
'sec-fetch-dest': 'empty',
|
|
83
|
-
'sec-fetch-mode': 'cors',
|
|
84
|
-
'sec-fetch-site': 'same-origin',
|
|
85
|
-
'user-agent': agent.random(),
|
|
86
|
-
'dnt': '1',
|
|
87
|
-
'sec-gpc': '1',
|
|
88
|
-
}
|
|
89
|
-
session = Session()
|
|
90
|
-
session.headers.update(headers)
|
|
91
|
-
session.proxies = proxies if proxies is not None else {}
|
|
92
|
-
|
|
93
|
-
def for_stream():
|
|
94
|
-
try:
|
|
95
|
-
response = session.post(
|
|
96
|
-
url,
|
|
97
|
-
json=payload,
|
|
98
|
-
stream=True,
|
|
99
|
-
timeout=timeout if timeout is not None else 30,
|
|
100
|
-
impersonate="chrome120",
|
|
101
|
-
http_version=CurlHttpVersion.V1_1
|
|
102
|
-
)
|
|
103
|
-
if not response.ok:
|
|
104
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
105
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
106
|
-
)
|
|
107
|
-
|
|
108
|
-
streaming_text = ""
|
|
109
|
-
# Use sanitize_stream with the custom extractor
|
|
110
|
-
processed_stream = sanitize_stream(
|
|
111
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
112
|
-
intro_value=None, # No simple prefix to remove here
|
|
113
|
-
to_json=False, # Content is not JSON
|
|
114
|
-
content_extractor=self._chatsandbox_extractor # Use the specific extractor
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
for content_chunk in processed_stream:
|
|
118
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
119
|
-
streaming_text += content_chunk
|
|
120
|
-
delta = ChoiceDelta(content=content_chunk)
|
|
121
|
-
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
122
|
-
chunk_obj = ChatCompletionChunk(
|
|
123
|
-
id=request_id,
|
|
124
|
-
choices=[choice],
|
|
125
|
-
created=created_time,
|
|
126
|
-
model=model,
|
|
127
|
-
)
|
|
128
|
-
yield chunk_obj
|
|
129
|
-
|
|
130
|
-
# Final chunk
|
|
131
|
-
delta = ChoiceDelta(content=None)
|
|
132
|
-
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
133
|
-
chunk_obj = ChatCompletionChunk(
|
|
134
|
-
id=request_id,
|
|
135
|
-
choices=[choice],
|
|
136
|
-
created=created_time,
|
|
137
|
-
model=model,
|
|
138
|
-
)
|
|
139
|
-
yield chunk_obj
|
|
140
|
-
except Exception as e:
|
|
141
|
-
raise RuntimeError(f"ChatSandbox streaming request failed: {e}")
|
|
142
|
-
def for_non_stream():
|
|
143
|
-
streaming_text = ""
|
|
144
|
-
for chunk_obj in for_stream():
|
|
145
|
-
if chunk_obj.choices[0].delta.content:
|
|
146
|
-
streaming_text += chunk_obj.choices[0].delta.content
|
|
147
|
-
prompt_tokens = count_tokens(question)
|
|
148
|
-
completion_tokens = count_tokens(streaming_text)
|
|
149
|
-
total_tokens = prompt_tokens + completion_tokens
|
|
150
|
-
usage = CompletionUsage(
|
|
151
|
-
prompt_tokens=prompt_tokens,
|
|
152
|
-
completion_tokens=completion_tokens,
|
|
153
|
-
total_tokens=total_tokens
|
|
154
|
-
)
|
|
155
|
-
message = ChatCompletionMessage(role="assistant", content=streaming_text)
|
|
156
|
-
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
157
|
-
completion = ChatCompletion(
|
|
158
|
-
id=request_id,
|
|
159
|
-
choices=[choice],
|
|
160
|
-
created=created_time,
|
|
161
|
-
model=model,
|
|
162
|
-
usage=usage,
|
|
163
|
-
)
|
|
164
|
-
return completion
|
|
165
|
-
return for_stream() if stream else for_non_stream()
|
|
166
|
-
|
|
167
|
-
class Chat(BaseChat):
|
|
168
|
-
def __init__(self, client: 'ChatSandbox'):
|
|
169
|
-
self.completions = Completions(client)
|
|
170
|
-
|
|
171
|
-
class ChatSandbox(OpenAICompatibleProvider):
|
|
172
|
-
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large", "deepseek-r1", "deepseek-r1-full", "gemini-thinking", "openai-o1-mini", "llama", "mistral", "gemma-3"]
|
|
173
|
-
chat: Chat
|
|
174
|
-
def __init__(self):
|
|
175
|
-
self.chat = Chat(self)
|
|
176
|
-
@property
|
|
177
|
-
def models(self):
|
|
178
|
-
class _ModelList:
|
|
179
|
-
def list(inner_self):
|
|
180
|
-
return type(self).AVAILABLE_MODELS
|
|
181
|
-
return _ModelList()
|
|
182
|
-
def convert_model_name(self, model: str) -> str:
|
|
183
|
-
if model in self.AVAILABLE_MODELS:
|
|
184
|
-
return model
|
|
185
|
-
for available_model in self.AVAILABLE_MODELS:
|
|
186
|
-
if model.lower() in available_model.lower():
|
|
187
|
-
return available_model
|
|
188
|
-
# Default to openai if no match
|
|
189
|
-
print(f"{RED}{BOLD}Warning: Model '{model}' not found, using default model 'openai'{RESET}")
|
|
190
|
-
return "openai"
|
|
191
|
-
|
|
192
|
-
if __name__ == "__main__":
|
|
193
|
-
client = ChatSandbox()
|
|
194
|
-
response = client.chat.completions.create(
|
|
195
|
-
model="openai",
|
|
196
|
-
messages=[
|
|
197
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
|
198
|
-
{"role": "user", "content": "Explain the theory of relativity in simple terms."}
|
|
199
|
-
],
|
|
200
|
-
stream=False
|
|
201
|
-
)
|
|
202
|
-
print(response.choices[0].message.content)
|
|
1
|
+
from typing import List, Dict, Optional, Union, Generator, Any
|
|
2
|
+
import time
|
|
3
|
+
import json
|
|
4
|
+
from webscout.litagent import LitAgent
|
|
5
|
+
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
6
|
+
from webscout.Provider.OPENAI.utils import (
|
|
7
|
+
ChatCompletion,
|
|
8
|
+
ChatCompletionChunk,
|
|
9
|
+
Choice,
|
|
10
|
+
ChatCompletionMessage,
|
|
11
|
+
ChoiceDelta,
|
|
12
|
+
CompletionUsage,
|
|
13
|
+
format_prompt,
|
|
14
|
+
count_tokens
|
|
15
|
+
)
|
|
16
|
+
from curl_cffi.requests import Session
|
|
17
|
+
from curl_cffi.const import CurlHttpVersion
|
|
18
|
+
from webscout.AIutel import sanitize_stream
|
|
19
|
+
from webscout import exceptions
|
|
20
|
+
|
|
21
|
+
# ANSI escape codes for formatting
|
|
22
|
+
BOLD = "\033[1m"
|
|
23
|
+
RED = "\033[91m"
|
|
24
|
+
RESET = "\033[0m"
|
|
25
|
+
|
|
26
|
+
class Completions(BaseCompletions):
|
|
27
|
+
def __init__(self, client: 'ChatSandbox'):
|
|
28
|
+
self._client = client
|
|
29
|
+
|
|
30
|
+
@staticmethod
|
|
31
|
+
def _chatsandbox_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
32
|
+
"""Extracts content from the chatsandbox stream format."""
|
|
33
|
+
if isinstance(chunk, str):
|
|
34
|
+
try:
|
|
35
|
+
data = json.loads(chunk)
|
|
36
|
+
if isinstance(data, dict) and "reasoning_content" in data:
|
|
37
|
+
return data["reasoning_content"]
|
|
38
|
+
return chunk
|
|
39
|
+
except json.JSONDecodeError:
|
|
40
|
+
return chunk
|
|
41
|
+
return None
|
|
42
|
+
|
|
43
|
+
def create(
|
|
44
|
+
self,
|
|
45
|
+
*,
|
|
46
|
+
model: str,
|
|
47
|
+
messages: List[Dict[str, str]],
|
|
48
|
+
max_tokens: Optional[int] = None,
|
|
49
|
+
stream: bool = False,
|
|
50
|
+
temperature: Optional[float] = None,
|
|
51
|
+
top_p: Optional[float] = None,
|
|
52
|
+
timeout: Optional[int] = None,
|
|
53
|
+
proxies: Optional[dict] = None,
|
|
54
|
+
**kwargs: Any
|
|
55
|
+
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
56
|
+
"""
|
|
57
|
+
OpenAI-compatible chat/completions endpoint for ChatSandbox.
|
|
58
|
+
"""
|
|
59
|
+
# Use model name conversion for compatibility
|
|
60
|
+
model = self._client.convert_model_name(model)
|
|
61
|
+
# Compose the conversation prompt using format_prompt
|
|
62
|
+
question = format_prompt(messages, add_special_tokens=False, do_continue=True)
|
|
63
|
+
payload = {
|
|
64
|
+
"messages": [question],
|
|
65
|
+
"character": model
|
|
66
|
+
}
|
|
67
|
+
request_id = f"chatcmpl-{int(time.time() * 1000)}"
|
|
68
|
+
created_time = int(time.time())
|
|
69
|
+
url = "https://chatsandbox.com/api/chat"
|
|
70
|
+
agent = LitAgent()
|
|
71
|
+
headers = {
|
|
72
|
+
'authority': 'chatsandbox.com',
|
|
73
|
+
'accept': '*/*',
|
|
74
|
+
'accept-encoding': 'gzip, deflate, br',
|
|
75
|
+
'accept-language': 'en-US,en;q=0.9',
|
|
76
|
+
'content-type': 'application/json',
|
|
77
|
+
'origin': 'https://chatsandbox.com',
|
|
78
|
+
'referer': f'https://chatsandbox.com/chat/{model}',
|
|
79
|
+
'sec-ch-ua': '"Microsoft Edge";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
|
|
80
|
+
'sec-ch-ua-mobile': '?0',
|
|
81
|
+
'sec-ch-ua-platform': '"Windows"',
|
|
82
|
+
'sec-fetch-dest': 'empty',
|
|
83
|
+
'sec-fetch-mode': 'cors',
|
|
84
|
+
'sec-fetch-site': 'same-origin',
|
|
85
|
+
'user-agent': agent.random(),
|
|
86
|
+
'dnt': '1',
|
|
87
|
+
'sec-gpc': '1',
|
|
88
|
+
}
|
|
89
|
+
session = Session()
|
|
90
|
+
session.headers.update(headers)
|
|
91
|
+
session.proxies = proxies if proxies is not None else {}
|
|
92
|
+
|
|
93
|
+
def for_stream():
|
|
94
|
+
try:
|
|
95
|
+
response = session.post(
|
|
96
|
+
url,
|
|
97
|
+
json=payload,
|
|
98
|
+
stream=True,
|
|
99
|
+
timeout=timeout if timeout is not None else 30,
|
|
100
|
+
impersonate="chrome120",
|
|
101
|
+
http_version=CurlHttpVersion.V1_1
|
|
102
|
+
)
|
|
103
|
+
if not response.ok:
|
|
104
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
105
|
+
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
106
|
+
)
|
|
107
|
+
|
|
108
|
+
streaming_text = ""
|
|
109
|
+
# Use sanitize_stream with the custom extractor
|
|
110
|
+
processed_stream = sanitize_stream(
|
|
111
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
112
|
+
intro_value=None, # No simple prefix to remove here
|
|
113
|
+
to_json=False, # Content is not JSON
|
|
114
|
+
content_extractor=self._chatsandbox_extractor # Use the specific extractor
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
for content_chunk in processed_stream:
|
|
118
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
119
|
+
streaming_text += content_chunk
|
|
120
|
+
delta = ChoiceDelta(content=content_chunk)
|
|
121
|
+
choice = Choice(index=0, delta=delta, finish_reason=None)
|
|
122
|
+
chunk_obj = ChatCompletionChunk(
|
|
123
|
+
id=request_id,
|
|
124
|
+
choices=[choice],
|
|
125
|
+
created=created_time,
|
|
126
|
+
model=model,
|
|
127
|
+
)
|
|
128
|
+
yield chunk_obj
|
|
129
|
+
|
|
130
|
+
# Final chunk
|
|
131
|
+
delta = ChoiceDelta(content=None)
|
|
132
|
+
choice = Choice(index=0, delta=delta, finish_reason="stop")
|
|
133
|
+
chunk_obj = ChatCompletionChunk(
|
|
134
|
+
id=request_id,
|
|
135
|
+
choices=[choice],
|
|
136
|
+
created=created_time,
|
|
137
|
+
model=model,
|
|
138
|
+
)
|
|
139
|
+
yield chunk_obj
|
|
140
|
+
except Exception as e:
|
|
141
|
+
raise RuntimeError(f"ChatSandbox streaming request failed: {e}")
|
|
142
|
+
def for_non_stream():
|
|
143
|
+
streaming_text = ""
|
|
144
|
+
for chunk_obj in for_stream():
|
|
145
|
+
if chunk_obj.choices[0].delta.content:
|
|
146
|
+
streaming_text += chunk_obj.choices[0].delta.content
|
|
147
|
+
prompt_tokens = count_tokens(question)
|
|
148
|
+
completion_tokens = count_tokens(streaming_text)
|
|
149
|
+
total_tokens = prompt_tokens + completion_tokens
|
|
150
|
+
usage = CompletionUsage(
|
|
151
|
+
prompt_tokens=prompt_tokens,
|
|
152
|
+
completion_tokens=completion_tokens,
|
|
153
|
+
total_tokens=total_tokens
|
|
154
|
+
)
|
|
155
|
+
message = ChatCompletionMessage(role="assistant", content=streaming_text)
|
|
156
|
+
choice = Choice(index=0, message=message, finish_reason="stop")
|
|
157
|
+
completion = ChatCompletion(
|
|
158
|
+
id=request_id,
|
|
159
|
+
choices=[choice],
|
|
160
|
+
created=created_time,
|
|
161
|
+
model=model,
|
|
162
|
+
usage=usage,
|
|
163
|
+
)
|
|
164
|
+
return completion
|
|
165
|
+
return for_stream() if stream else for_non_stream()
|
|
166
|
+
|
|
167
|
+
class Chat(BaseChat):
|
|
168
|
+
def __init__(self, client: 'ChatSandbox'):
|
|
169
|
+
self.completions = Completions(client)
|
|
170
|
+
|
|
171
|
+
class ChatSandbox(OpenAICompatibleProvider):
|
|
172
|
+
AVAILABLE_MODELS = ["openai", "deepseek", "llama", "gemini", "mistral-large", "deepseek-r1", "deepseek-r1-full", "gemini-thinking", "openai-o1-mini", "llama", "mistral", "gemma-3"]
|
|
173
|
+
chat: Chat
|
|
174
|
+
def __init__(self):
|
|
175
|
+
self.chat = Chat(self)
|
|
176
|
+
@property
|
|
177
|
+
def models(self):
|
|
178
|
+
class _ModelList:
|
|
179
|
+
def list(inner_self):
|
|
180
|
+
return type(self).AVAILABLE_MODELS
|
|
181
|
+
return _ModelList()
|
|
182
|
+
def convert_model_name(self, model: str) -> str:
|
|
183
|
+
if model in self.AVAILABLE_MODELS:
|
|
184
|
+
return model
|
|
185
|
+
for available_model in self.AVAILABLE_MODELS:
|
|
186
|
+
if model.lower() in available_model.lower():
|
|
187
|
+
return available_model
|
|
188
|
+
# Default to openai if no match
|
|
189
|
+
print(f"{RED}{BOLD}Warning: Model '{model}' not found, using default model 'openai'{RESET}")
|
|
190
|
+
return "openai"
|
|
191
|
+
|
|
192
|
+
if __name__ == "__main__":
|
|
193
|
+
client = ChatSandbox()
|
|
194
|
+
response = client.chat.completions.create(
|
|
195
|
+
model="openai",
|
|
196
|
+
messages=[
|
|
197
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
198
|
+
{"role": "user", "content": "Explain the theory of relativity in simple terms."}
|
|
199
|
+
],
|
|
200
|
+
stream=False
|
|
201
|
+
)
|
|
202
|
+
print(response.choices[0].message.content)
|