webscout 8.3.6__py3-none-any.whl → 2025.10.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +250 -250
- webscout/AIbase.py +379 -379
- webscout/AIutel.py +60 -58
- webscout/Bard.py +1012 -1012
- webscout/Bing_search.py +417 -417
- webscout/DWEBS.py +529 -529
- webscout/Extra/Act.md +309 -309
- webscout/Extra/GitToolkit/__init__.py +10 -10
- webscout/Extra/GitToolkit/gitapi/README.md +110 -110
- webscout/Extra/GitToolkit/gitapi/__init__.py +11 -11
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -195
- webscout/Extra/GitToolkit/gitapi/user.py +96 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +61 -61
- webscout/Extra/YTToolkit/README.md +375 -375
- webscout/Extra/YTToolkit/YTdownloader.py +956 -956
- webscout/Extra/YTToolkit/__init__.py +2 -2
- webscout/Extra/YTToolkit/transcriber.py +475 -475
- webscout/Extra/YTToolkit/ytapi/README.md +44 -44
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -118
- webscout/Extra/YTToolkit/ytapi/https.py +88 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +58 -58
- webscout/Extra/YTToolkit/ytapi/pool.py +7 -7
- webscout/Extra/YTToolkit/ytapi/query.py +39 -39
- webscout/Extra/YTToolkit/ytapi/stream.py +62 -62
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -62
- webscout/Extra/YTToolkit/ytapi/video.py +232 -232
- webscout/Extra/autocoder/__init__.py +9 -9
- webscout/Extra/autocoder/autocoder.py +1105 -1105
- webscout/Extra/autocoder/autocoder_utiles.py +332 -332
- webscout/Extra/gguf.md +429 -429
- webscout/Extra/gguf.py +1213 -1213
- webscout/Extra/tempmail/README.md +487 -487
- webscout/Extra/tempmail/__init__.py +27 -27
- webscout/Extra/tempmail/async_utils.py +140 -140
- webscout/Extra/tempmail/base.py +160 -160
- webscout/Extra/tempmail/cli.py +186 -186
- webscout/Extra/tempmail/emailnator.py +84 -84
- webscout/Extra/tempmail/mail_tm.py +360 -360
- webscout/Extra/tempmail/temp_mail_io.py +291 -291
- webscout/Extra/weather.md +281 -281
- webscout/Extra/weather.py +193 -193
- webscout/Litlogger/README.md +10 -10
- webscout/Litlogger/__init__.py +15 -15
- webscout/Litlogger/formats.py +13 -13
- webscout/Litlogger/handlers.py +121 -121
- webscout/Litlogger/levels.py +13 -13
- webscout/Litlogger/logger.py +134 -134
- webscout/Provider/AISEARCH/Perplexity.py +332 -332
- webscout/Provider/AISEARCH/README.md +279 -279
- webscout/Provider/AISEARCH/__init__.py +33 -11
- webscout/Provider/AISEARCH/felo_search.py +206 -206
- webscout/Provider/AISEARCH/genspark_search.py +323 -323
- webscout/Provider/AISEARCH/hika_search.py +185 -185
- webscout/Provider/AISEARCH/iask_search.py +410 -410
- webscout/Provider/AISEARCH/monica_search.py +219 -219
- webscout/Provider/AISEARCH/scira_search.py +316 -314
- webscout/Provider/AISEARCH/stellar_search.py +177 -177
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -255
- webscout/Provider/Aitopia.py +314 -315
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/Apriel.py +306 -0
- webscout/Provider/ChatGPTClone.py +236 -236
- webscout/Provider/ChatSandbox.py +343 -342
- webscout/Provider/Cloudflare.py +324 -324
- webscout/Provider/Cohere.py +208 -207
- webscout/Provider/Deepinfra.py +370 -369
- webscout/Provider/ExaAI.py +260 -260
- webscout/Provider/ExaChat.py +308 -387
- webscout/Provider/Flowith.py +221 -221
- webscout/Provider/GMI.py +293 -0
- webscout/Provider/Gemini.py +164 -162
- webscout/Provider/GeminiProxy.py +167 -166
- webscout/Provider/GithubChat.py +371 -370
- webscout/Provider/Groq.py +800 -800
- webscout/Provider/HeckAI.py +383 -379
- webscout/Provider/Jadve.py +282 -297
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +206 -384
- webscout/Provider/LambdaChat.py +423 -425
- webscout/Provider/Nemotron.py +244 -245
- webscout/Provider/Netwrck.py +248 -247
- webscout/Provider/OLLAMA.py +395 -394
- webscout/Provider/OPENAI/Cloudflare.py +394 -395
- webscout/Provider/OPENAI/FalconH1.py +452 -457
- webscout/Provider/OPENAI/FreeGemini.py +297 -299
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +432 -329
- webscout/Provider/OPENAI/NEMOTRON.py +241 -244
- webscout/Provider/OPENAI/PI.py +428 -427
- webscout/Provider/OPENAI/README.md +959 -959
- webscout/Provider/OPENAI/TogetherAI.py +345 -345
- webscout/Provider/OPENAI/TwoAI.py +466 -467
- webscout/Provider/OPENAI/__init__.py +33 -59
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +249 -269
- webscout/Provider/OPENAI/chatglm.py +528 -0
- webscout/Provider/OPENAI/chatgpt.py +593 -588
- webscout/Provider/OPENAI/chatgptclone.py +521 -524
- webscout/Provider/OPENAI/chatsandbox.py +202 -177
- webscout/Provider/OPENAI/deepinfra.py +319 -315
- webscout/Provider/OPENAI/e2b.py +1665 -1665
- webscout/Provider/OPENAI/exaai.py +420 -420
- webscout/Provider/OPENAI/exachat.py +452 -452
- webscout/Provider/OPENAI/friendli.py +232 -232
- webscout/Provider/OPENAI/{refact.py → gmi.py} +324 -274
- webscout/Provider/OPENAI/groq.py +364 -364
- webscout/Provider/OPENAI/heckai.py +314 -311
- webscout/Provider/OPENAI/llmchatco.py +337 -337
- webscout/Provider/OPENAI/netwrck.py +355 -354
- webscout/Provider/OPENAI/oivscode.py +290 -290
- webscout/Provider/OPENAI/opkfc.py +518 -518
- webscout/Provider/OPENAI/pydantic_imports.py +1 -1
- webscout/Provider/OPENAI/scirachat.py +535 -529
- webscout/Provider/OPENAI/sonus.py +308 -308
- webscout/Provider/OPENAI/standardinput.py +442 -442
- webscout/Provider/OPENAI/textpollinations.py +340 -348
- webscout/Provider/OPENAI/toolbaz.py +419 -413
- webscout/Provider/OPENAI/typefully.py +362 -362
- webscout/Provider/OPENAI/utils.py +295 -295
- webscout/Provider/OPENAI/venice.py +436 -436
- webscout/Provider/OPENAI/wisecat.py +387 -387
- webscout/Provider/OPENAI/writecream.py +166 -166
- webscout/Provider/OPENAI/x0gpt.py +378 -378
- webscout/Provider/OPENAI/yep.py +389 -389
- webscout/Provider/OpenGPT.py +230 -230
- webscout/Provider/Openai.py +244 -496
- webscout/Provider/PI.py +405 -404
- webscout/Provider/Perplexitylabs.py +430 -431
- webscout/Provider/QwenLM.py +272 -254
- webscout/Provider/STT/__init__.py +32 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +257 -258
- webscout/Provider/StandardInput.py +309 -309
- webscout/Provider/TTI/README.md +82 -82
- webscout/Provider/TTI/__init__.py +33 -12
- webscout/Provider/TTI/aiarta.py +413 -413
- webscout/Provider/TTI/base.py +136 -136
- webscout/Provider/TTI/bing.py +243 -243
- webscout/Provider/TTI/gpt1image.py +149 -149
- webscout/Provider/TTI/imagen.py +196 -196
- webscout/Provider/TTI/infip.py +211 -211
- webscout/Provider/TTI/magicstudio.py +232 -232
- webscout/Provider/TTI/monochat.py +219 -219
- webscout/Provider/TTI/piclumen.py +214 -214
- webscout/Provider/TTI/pixelmuse.py +232 -232
- webscout/Provider/TTI/pollinations.py +232 -232
- webscout/Provider/TTI/together.py +288 -288
- webscout/Provider/TTI/utils.py +12 -12
- webscout/Provider/TTI/venice.py +367 -367
- webscout/Provider/TTS/README.md +192 -192
- webscout/Provider/TTS/__init__.py +33 -10
- webscout/Provider/TTS/parler.py +110 -110
- webscout/Provider/TTS/streamElements.py +333 -333
- webscout/Provider/TTS/utils.py +280 -280
- webscout/Provider/TeachAnything.py +237 -236
- webscout/Provider/TextPollinationsAI.py +311 -318
- webscout/Provider/TogetherAI.py +356 -357
- webscout/Provider/TwoAI.py +313 -569
- webscout/Provider/TypliAI.py +312 -311
- webscout/Provider/UNFINISHED/ChatHub.py +208 -208
- webscout/Provider/UNFINISHED/ChutesAI.py +313 -313
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +294 -294
- webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +198 -198
- webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +477 -477
- webscout/Provider/UNFINISHED/VercelAIGateway.py +338 -338
- webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +324 -324
- webscout/Provider/UNFINISHED/Youchat.py +330 -330
- webscout/Provider/UNFINISHED/liner.py +334 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +262 -262
- webscout/Provider/UNFINISHED/puterjs.py +634 -634
- webscout/Provider/UNFINISHED/samurai.py +223 -223
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -119
- webscout/Provider/Venice.py +251 -250
- webscout/Provider/VercelAI.py +256 -255
- webscout/Provider/WiseCat.py +232 -231
- webscout/Provider/WrDoChat.py +367 -366
- webscout/Provider/__init__.py +33 -86
- webscout/Provider/ai4chat.py +174 -174
- webscout/Provider/akashgpt.py +331 -334
- webscout/Provider/cerebras.py +446 -340
- webscout/Provider/chatglm.py +394 -214
- webscout/Provider/cleeai.py +211 -212
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +282 -282
- webscout/Provider/geminiapi.py +208 -208
- webscout/Provider/granite.py +261 -261
- webscout/Provider/hermes.py +263 -265
- webscout/Provider/julius.py +223 -222
- webscout/Provider/learnfastai.py +309 -309
- webscout/Provider/llama3mitril.py +214 -214
- webscout/Provider/llmchat.py +243 -243
- webscout/Provider/llmchatco.py +290 -290
- webscout/Provider/meta.py +801 -801
- webscout/Provider/oivscode.py +309 -309
- webscout/Provider/scira_chat.py +384 -457
- webscout/Provider/searchchat.py +292 -291
- webscout/Provider/sonus.py +258 -258
- webscout/Provider/toolbaz.py +370 -364
- webscout/Provider/turboseek.py +274 -265
- webscout/Provider/typefully.py +208 -207
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +372 -371
- webscout/__init__.py +30 -31
- webscout/__main__.py +5 -5
- webscout/auth/api_key_manager.py +189 -189
- webscout/auth/config.py +175 -175
- webscout/auth/models.py +185 -185
- webscout/auth/routes.py +664 -664
- webscout/auth/simple_logger.py +236 -236
- webscout/cli.py +523 -523
- webscout/conversation.py +438 -438
- webscout/exceptions.py +361 -361
- webscout/litagent/Readme.md +298 -298
- webscout/litagent/__init__.py +28 -28
- webscout/litagent/agent.py +581 -581
- webscout/litagent/constants.py +59 -59
- webscout/litprinter/__init__.py +58 -58
- webscout/models.py +181 -181
- webscout/optimizers.py +419 -419
- webscout/prompt_manager.py +288 -288
- webscout/sanitize.py +1078 -1078
- webscout/scout/README.md +401 -401
- webscout/scout/__init__.py +8 -8
- webscout/scout/core/__init__.py +6 -6
- webscout/scout/core/crawler.py +297 -297
- webscout/scout/core/scout.py +706 -706
- webscout/scout/core/search_result.py +95 -95
- webscout/scout/core/text_analyzer.py +62 -62
- webscout/scout/core/text_utils.py +277 -277
- webscout/scout/core/web_analyzer.py +51 -51
- webscout/scout/element.py +599 -599
- webscout/scout/parsers/__init__.py +69 -69
- webscout/scout/parsers/html5lib_parser.py +172 -172
- webscout/scout/parsers/html_parser.py +236 -236
- webscout/scout/parsers/lxml_parser.py +178 -178
- webscout/scout/utils.py +37 -37
- webscout/swiftcli/Readme.md +323 -323
- webscout/swiftcli/__init__.py +95 -95
- webscout/swiftcli/core/__init__.py +7 -7
- webscout/swiftcli/core/cli.py +308 -308
- webscout/swiftcli/core/context.py +104 -104
- webscout/swiftcli/core/group.py +241 -241
- webscout/swiftcli/decorators/__init__.py +28 -28
- webscout/swiftcli/decorators/command.py +221 -221
- webscout/swiftcli/decorators/options.py +220 -220
- webscout/swiftcli/decorators/output.py +302 -302
- webscout/swiftcli/exceptions.py +21 -21
- webscout/swiftcli/plugins/__init__.py +9 -9
- webscout/swiftcli/plugins/base.py +135 -135
- webscout/swiftcli/plugins/manager.py +269 -269
- webscout/swiftcli/utils/__init__.py +59 -59
- webscout/swiftcli/utils/formatting.py +252 -252
- webscout/swiftcli/utils/parsing.py +267 -267
- webscout/update_checker.py +117 -117
- webscout/version.py +1 -1
- webscout/webscout_search.py +1183 -1183
- webscout/webscout_search_async.py +649 -649
- webscout/yep_search.py +346 -346
- webscout/zeroart/README.md +89 -89
- webscout/zeroart/__init__.py +134 -134
- webscout/zeroart/base.py +66 -66
- webscout/zeroart/effects.py +100 -100
- webscout/zeroart/fonts.py +1238 -1238
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/METADATA +937 -936
- webscout-2025.10.11.dist-info/RECORD +300 -0
- webscout/Provider/AISEARCH/DeepFind.py +0 -254
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/Qwen3.py +0 -304
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/OPENAI/qodo.py +0 -630
- webscout/Provider/OPENAI/xenai.py +0 -514
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- webscout/tempid.py +0 -128
- webscout-8.3.6.dist-info/RECORD +0 -327
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-2025.10.11.dist-info}/top_level.txt +0 -0
|
@@ -1,431 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
import uuid
|
|
3
|
-
import json
|
|
4
|
-
import random
|
|
5
|
-
import string
|
|
6
|
-
from typing import List, Dict, Optional, Union, Generator, Any
|
|
7
|
-
|
|
8
|
-
# Use curl_cffi for requests
|
|
9
|
-
from curl_cffi.requests import Session
|
|
10
|
-
from curl_cffi import CurlError
|
|
11
|
-
|
|
12
|
-
# Import base classes and utility structures
|
|
13
|
-
from webscout.Provider.OPENAI.base import OpenAICompatibleProvider, BaseChat, BaseCompletions
|
|
14
|
-
from webscout.Provider.OPENAI.utils import (
|
|
15
|
-
ChatCompletionChunk, ChatCompletion, Choice, ChoiceDelta,
|
|
16
|
-
ChatCompletionMessage, CompletionUsage
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
# Import LitAgent for user agent generation
|
|
20
|
-
try:
|
|
21
|
-
from webscout.litagent import LitAgent
|
|
22
|
-
except ImportError:
|
|
23
|
-
# Define a dummy LitAgent if webscout is not installed or accessible
|
|
24
|
-
class LitAgent:
|
|
25
|
-
def random(self) -> str:
|
|
26
|
-
# Return a default user agent if LitAgent is unavailable
|
|
27
|
-
print("Warning: LitAgent not found. Using default minimal headers.")
|
|
28
|
-
return "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36"
|
|
29
|
-
|
|
30
|
-
# ANSI escape codes for formatting
|
|
31
|
-
BOLD = "\033[1m"
|
|
32
|
-
RED = "\033[91m"
|
|
33
|
-
RESET = "\033[0m"
|
|
34
|
-
|
|
35
|
-
class Completions(BaseCompletions):
|
|
36
|
-
def __init__(self, client: 'MCPCore'):
|
|
37
|
-
self._client = client
|
|
38
|
-
|
|
39
|
-
def create(
|
|
40
|
-
self,
|
|
41
|
-
*,
|
|
42
|
-
model: str,
|
|
43
|
-
messages: List[Dict[str, str]],
|
|
44
|
-
max_tokens: Optional[int] = None,
|
|
45
|
-
stream: bool = False,
|
|
46
|
-
temperature: Optional[float] = None,
|
|
47
|
-
top_p: Optional[float] = None,
|
|
48
|
-
timeout: Optional[int] = None,
|
|
49
|
-
proxies: Optional[Dict[str, str]] = None,
|
|
50
|
-
**kwargs: Any
|
|
51
|
-
) -> Union[ChatCompletion, Generator[ChatCompletionChunk, None, None]]:
|
|
52
|
-
"""
|
|
53
|
-
Creates a model response for the given chat conversation using MCPCore API.
|
|
54
|
-
Mimics openai.chat.completions.create
|
|
55
|
-
"""
|
|
56
|
-
if model not in self._client.AVAILABLE_MODELS:
|
|
57
|
-
raise ValueError(f"Model '{model}' not supported by MCPCore. Available: {self._client.AVAILABLE_MODELS}")
|
|
58
|
-
|
|
59
|
-
# Construct the MCPCore-specific payload
|
|
60
|
-
payload = {
|
|
61
|
-
"stream": stream,
|
|
62
|
-
"model": model,
|
|
63
|
-
"messages": messages,
|
|
64
|
-
"params": kwargs.get("params", {}),
|
|
65
|
-
"tool_servers": kwargs.get("tool_servers", []),
|
|
66
|
-
"features": kwargs.get("features", {"web_search": False}),
|
|
67
|
-
"chat_id": kwargs.get("chat_id", str(uuid.uuid4())),
|
|
68
|
-
"id": str(uuid.uuid4()), # Message ID
|
|
69
|
-
"stream_options": kwargs.get("stream_options", {"include_usage": True})
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
# Add optional OpenAI params to MCPCore's 'params' field if provided
|
|
73
|
-
if temperature is not None: payload["params"]["temperature"] = temperature
|
|
74
|
-
if top_p is not None: payload["params"]["top_p"] = top_p
|
|
75
|
-
if max_tokens is not None: payload["params"]["max_tokens"] = max_tokens
|
|
76
|
-
|
|
77
|
-
# Generate standard OpenAI-compatible IDs
|
|
78
|
-
request_id = f"chatcmpl-{uuid.uuid4()}"
|
|
79
|
-
created_time = int(time.time())
|
|
80
|
-
|
|
81
|
-
if stream:
|
|
82
|
-
return self._create_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
83
|
-
else:
|
|
84
|
-
return self._create_non_stream_from_stream(request_id, created_time, model, payload, timeout, proxies)
|
|
85
|
-
|
|
86
|
-
def _create_stream(
|
|
87
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
88
|
-
) -> Generator[ChatCompletionChunk, None, None]:
|
|
89
|
-
"""Handles the streaming response from MCPCore."""
|
|
90
|
-
final_usage_data = None # To store usage if received
|
|
91
|
-
try:
|
|
92
|
-
response = self._client.session.post(
|
|
93
|
-
self._client.api_endpoint,
|
|
94
|
-
headers=self._client.headers,
|
|
95
|
-
json=payload,
|
|
96
|
-
stream=True,
|
|
97
|
-
timeout=timeout or self._client.timeout,
|
|
98
|
-
proxies=proxies or getattr(self._client, "proxies", None),
|
|
99
|
-
impersonate="chrome110" # Impersonation often helps
|
|
100
|
-
)
|
|
101
|
-
|
|
102
|
-
if not response.ok:
|
|
103
|
-
try:
|
|
104
|
-
error_text = response.text
|
|
105
|
-
except Exception:
|
|
106
|
-
error_text = "<Failed to read error response>"
|
|
107
|
-
raise IOError(
|
|
108
|
-
f"MCPCore API Error: {response.status_code} {response.reason} - {error_text}"
|
|
109
|
-
)
|
|
110
|
-
|
|
111
|
-
for line_bytes in response.iter_lines():
|
|
112
|
-
if line_bytes:
|
|
113
|
-
try:
|
|
114
|
-
line = line_bytes.decode('utf-8').strip()
|
|
115
|
-
if line.startswith("data: "):
|
|
116
|
-
json_str = line[6:]
|
|
117
|
-
if json_str == "[DONE]":
|
|
118
|
-
break # End of stream signal
|
|
119
|
-
|
|
120
|
-
json_data = json.loads(json_str)
|
|
121
|
-
|
|
122
|
-
# Check for usage data in the chunk (often comes near the end)
|
|
123
|
-
if 'usage' in json_data and json_data['usage']:
|
|
124
|
-
final_usage_data = json_data['usage']
|
|
125
|
-
# Don't yield a chunk just for usage, wait for content or final chunk
|
|
126
|
-
|
|
127
|
-
if 'choices' in json_data and len(json_data['choices']) > 0:
|
|
128
|
-
choice_data = json_data['choices'][0]
|
|
129
|
-
delta_data = choice_data.get('delta', {})
|
|
130
|
-
finish_reason = choice_data.get('finish_reason')
|
|
131
|
-
content = delta_data.get('content')
|
|
132
|
-
role = delta_data.get('role', 'assistant') # Default role
|
|
133
|
-
|
|
134
|
-
# Only yield chunks with content or finish reason
|
|
135
|
-
if content is not None or finish_reason:
|
|
136
|
-
delta = ChoiceDelta(content=content, role=role)
|
|
137
|
-
choice = Choice(index=0, delta=delta, finish_reason=finish_reason)
|
|
138
|
-
chunk = ChatCompletionChunk(
|
|
139
|
-
id=request_id,
|
|
140
|
-
choices=[choice],
|
|
141
|
-
created=created_time,
|
|
142
|
-
model=model,
|
|
143
|
-
system_fingerprint=json_data.get('system_fingerprint')
|
|
144
|
-
)
|
|
145
|
-
yield chunk
|
|
146
|
-
|
|
147
|
-
except (json.JSONDecodeError, UnicodeDecodeError):
|
|
148
|
-
print(f"{RED}Warning: Could not decode JSON line: {line}{RESET}")
|
|
149
|
-
continue
|
|
150
|
-
except Exception as e:
|
|
151
|
-
print(f"{RED}Error processing stream line: {e} - Line: {line}{RESET}")
|
|
152
|
-
continue
|
|
153
|
-
|
|
154
|
-
# Final chunk to ensure stream termination is signaled correctly
|
|
155
|
-
# (even if [DONE] was received, this confirms the generator end)
|
|
156
|
-
final_delta = ChoiceDelta()
|
|
157
|
-
# Include usage in the final chunk if available
|
|
158
|
-
usage_obj = None
|
|
159
|
-
if final_usage_data:
|
|
160
|
-
usage_obj = CompletionUsage(
|
|
161
|
-
prompt_tokens=final_usage_data.get('prompt_tokens', 0),
|
|
162
|
-
completion_tokens=final_usage_data.get('completion_tokens', 0),
|
|
163
|
-
total_tokens=final_usage_data.get('total_tokens', 0),
|
|
164
|
-
)
|
|
165
|
-
|
|
166
|
-
final_choice = Choice(index=0, delta=final_delta, finish_reason="stop")
|
|
167
|
-
final_chunk = ChatCompletionChunk(
|
|
168
|
-
id=request_id,
|
|
169
|
-
choices=[final_choice],
|
|
170
|
-
created=created_time,
|
|
171
|
-
model=model,
|
|
172
|
-
# system_fingerprint=..., # Can be added if available in final event
|
|
173
|
-
)
|
|
174
|
-
# Add usage to the final chunk dictionary representation if available
|
|
175
|
-
if hasattr(final_chunk, "model_dump"):
|
|
176
|
-
final_chunk_dict = final_chunk.model_dump(exclude_none=True)
|
|
177
|
-
else:
|
|
178
|
-
final_chunk_dict = final_chunk.dict(exclude_none=True)
|
|
179
|
-
if usage_obj:
|
|
180
|
-
if hasattr(usage_obj, "model_dump"):
|
|
181
|
-
final_chunk_dict["usage"] = usage_obj.model_dump(exclude_none=True)
|
|
182
|
-
else:
|
|
183
|
-
final_chunk_dict["usage"] = usage_obj.dict(exclude_none=True)
|
|
184
|
-
|
|
185
|
-
# Yield the final dictionary or object as needed by downstream consumers
|
|
186
|
-
# Yielding the object aligns better with the generator type hint
|
|
187
|
-
yield final_chunk
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
except CurlError as e:
|
|
191
|
-
print(f"{RED}CurlError during MCPCore stream request: {e}{RESET}")
|
|
192
|
-
raise IOError(f"MCPCore request failed due to network/curl issue: {e}") from e
|
|
193
|
-
except Exception as e:
|
|
194
|
-
print(f"{RED}Unexpected error during MCPCore stream: {e}{RESET}")
|
|
195
|
-
error_details = ""
|
|
196
|
-
if hasattr(e, 'response') and e.response is not None:
|
|
197
|
-
error_details = f" - Status: {e.response.status_code}, Response: {e.response.text}"
|
|
198
|
-
raise IOError(f"MCPCore stream processing failed: {e}{error_details}") from e
|
|
199
|
-
|
|
200
|
-
def _create_non_stream_from_stream(
|
|
201
|
-
self, request_id: str, created_time: int, model: str, payload: Dict[str, Any], timeout: Optional[int] = None, proxies: Optional[Dict[str, str]] = None
|
|
202
|
-
) -> ChatCompletion:
|
|
203
|
-
"""Handles the non-streaming response by making a single POST request (like deepinfra)."""
|
|
204
|
-
try:
|
|
205
|
-
# Ensure stream is False for non-streaming
|
|
206
|
-
payload = dict(payload)
|
|
207
|
-
payload["stream"] = False
|
|
208
|
-
|
|
209
|
-
response = self._client.session.post(
|
|
210
|
-
self._client.api_endpoint,
|
|
211
|
-
headers=self._client.headers,
|
|
212
|
-
json=payload,
|
|
213
|
-
timeout=timeout or self._client.timeout,
|
|
214
|
-
proxies=proxies or getattr(self._client, "proxies", None),
|
|
215
|
-
impersonate="chrome110"
|
|
216
|
-
)
|
|
217
|
-
if not response.ok:
|
|
218
|
-
try:
|
|
219
|
-
error_text = response.text
|
|
220
|
-
except Exception:
|
|
221
|
-
error_text = "<Failed to read error response>"
|
|
222
|
-
raise IOError(
|
|
223
|
-
f"MCPCore API Error: {response.status_code} {response.reason} - {error_text}"
|
|
224
|
-
)
|
|
225
|
-
|
|
226
|
-
data = response.json()
|
|
227
|
-
choices_data = data.get('choices', [])
|
|
228
|
-
usage_data = data.get('usage', {})
|
|
229
|
-
|
|
230
|
-
choices = []
|
|
231
|
-
for choice_d in choices_data:
|
|
232
|
-
message_d = choice_d.get('message', {})
|
|
233
|
-
message = ChatCompletionMessage(
|
|
234
|
-
role=message_d.get('role', 'assistant'),
|
|
235
|
-
content=message_d.get('content', '')
|
|
236
|
-
)
|
|
237
|
-
choice = Choice(
|
|
238
|
-
index=choice_d.get('index', 0),
|
|
239
|
-
message=message,
|
|
240
|
-
finish_reason=choice_d.get('finish_reason', 'stop')
|
|
241
|
-
)
|
|
242
|
-
choices.append(choice)
|
|
243
|
-
|
|
244
|
-
usage = CompletionUsage(
|
|
245
|
-
prompt_tokens=usage_data.get('prompt_tokens', 0),
|
|
246
|
-
completion_tokens=usage_data.get('completion_tokens', 0),
|
|
247
|
-
total_tokens=usage_data.get('total_tokens', 0)
|
|
248
|
-
)
|
|
249
|
-
|
|
250
|
-
completion = ChatCompletion(
|
|
251
|
-
id=request_id,
|
|
252
|
-
choices=choices,
|
|
253
|
-
created=created_time,
|
|
254
|
-
model=data.get('model', model),
|
|
255
|
-
usage=usage,
|
|
256
|
-
)
|
|
257
|
-
return completion
|
|
258
|
-
|
|
259
|
-
except CurlError as e:
|
|
260
|
-
print(f"{RED}CurlError during MCPCore non-stream request: {e}{RESET}")
|
|
261
|
-
raise IOError(f"MCPCore request failed due to network/curl issue: {e}") from e
|
|
262
|
-
except Exception as e:
|
|
263
|
-
print(f"{RED}Unexpected error during MCPCore non-stream: {e}{RESET}")
|
|
264
|
-
error_details = ""
|
|
265
|
-
if hasattr(e, 'response') and e.response is not None:
|
|
266
|
-
error_details = f" - Status: {e.response.status_code}, Response: {e.response.text}"
|
|
267
|
-
raise IOError(f"MCPCore non-stream processing failed: {e}{error_details}") from e
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
class Chat(BaseChat):
|
|
271
|
-
def __init__(self, client: 'MCPCore'):
|
|
272
|
-
self.completions = Completions(client)
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
class MCPCore(OpenAICompatibleProvider):
|
|
276
|
-
"""
|
|
277
|
-
OpenAI-compatible client for the MCPCore API (chat.mcpcore.xyz).
|
|
278
|
-
|
|
279
|
-
Usage:
|
|
280
|
-
client = MCPCore()
|
|
281
|
-
response = client.chat.completions.create(
|
|
282
|
-
model="google/gemma-7b-it",
|
|
283
|
-
messages=[{"role": "user", "content": "Hello!"}]
|
|
284
|
-
)
|
|
285
|
-
print(response.choices[0].message.content)
|
|
286
|
-
"""
|
|
287
|
-
AVAILABLE_MODELS = [
|
|
288
|
-
"@cf/deepseek-ai/deepseek-math-7b-instruct",
|
|
289
|
-
"@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
|
|
290
|
-
"@cf/defog/sqlcoder-7b-2",
|
|
291
|
-
"@cf/fblgit/una-cybertron-7b-v2-bf16",
|
|
292
|
-
"@cf/google/gemma-3-12b-it",
|
|
293
|
-
"@cf/meta/llama-2-7b-chat-int8",
|
|
294
|
-
"@hf/thebloke/llama-2-13b-chat-awq",
|
|
295
|
-
"@hf/thebloke/llamaguard-7b-awq",
|
|
296
|
-
"@hf/thebloke/mistral-7b-instruct-v0.1-awq",
|
|
297
|
-
"@hf/thebloke/neural-chat-7b-v3-1-awq",
|
|
298
|
-
"anthropic/claude-3.5-haiku",
|
|
299
|
-
"anthropic/claude-3.5-sonnet",
|
|
300
|
-
"anthropic/claude-3.7-sonnet",
|
|
301
|
-
"anthropic/claude-3.7-sonnet:thinking",
|
|
302
|
-
"anthropic/claude-opus-4",
|
|
303
|
-
"anthropic/claude-sonnet-4",
|
|
304
|
-
"openai/chatgpt-4o-latest",
|
|
305
|
-
"openai/gpt-3.5-turbo",
|
|
306
|
-
"openai/gpt-4.1",
|
|
307
|
-
"openai/gpt-4.1-mini",
|
|
308
|
-
"openai/gpt-4.1-nano",
|
|
309
|
-
"openai/gpt-4o-mini-search-preview",
|
|
310
|
-
"openai/gpt-4o-search-preview",
|
|
311
|
-
"openai/o1-pro",
|
|
312
|
-
"openai/o3-mini",
|
|
313
|
-
"sarvam-m",
|
|
314
|
-
"x-ai/grok-3-beta",
|
|
315
|
-
]
|
|
316
|
-
|
|
317
|
-
def _auto_fetch_token(self):
|
|
318
|
-
"""Automatically fetch a token from the signup endpoint."""
|
|
319
|
-
session = Session()
|
|
320
|
-
def random_string(length=8):
|
|
321
|
-
return ''.join(random.choices(string.ascii_lowercase, k=length))
|
|
322
|
-
name = random_string(6)
|
|
323
|
-
email = f"{random_string(8)}@gmail.com"
|
|
324
|
-
password = email
|
|
325
|
-
profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
|
|
326
|
-
payload = {
|
|
327
|
-
"name": name,
|
|
328
|
-
"email": email,
|
|
329
|
-
"password": password,
|
|
330
|
-
"profile_image_url": profile_image_url
|
|
331
|
-
}
|
|
332
|
-
headers = {
|
|
333
|
-
**LitAgent().generate_fingerprint(),
|
|
334
|
-
'origin': 'https://chat.mcpcore.xyz',
|
|
335
|
-
'referer': 'https://chat.mcpcore.xyz/auth',
|
|
336
|
-
}
|
|
337
|
-
try:
|
|
338
|
-
resp = session.post(
|
|
339
|
-
"https://chat.mcpcore.xyz/api/v1/auths/signup",
|
|
340
|
-
headers=headers,
|
|
341
|
-
json=payload,
|
|
342
|
-
timeout=30,
|
|
343
|
-
impersonate="chrome110"
|
|
344
|
-
)
|
|
345
|
-
if resp.ok:
|
|
346
|
-
data = resp.json()
|
|
347
|
-
token = data.get("token")
|
|
348
|
-
if token:
|
|
349
|
-
return token
|
|
350
|
-
# fallback: try to get from set-cookie
|
|
351
|
-
set_cookie = resp.headers.get("set-cookie", "")
|
|
352
|
-
if "token=" in set_cookie:
|
|
353
|
-
return set_cookie.split("token=")[1].split(";")[0]
|
|
354
|
-
raise RuntimeError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
|
|
355
|
-
except Exception as e:
|
|
356
|
-
raise RuntimeError(f"Token auto-fetch failed: {e}")
|
|
357
|
-
|
|
358
|
-
def __init__(
|
|
359
|
-
self,
|
|
360
|
-
timeout: int = 60,
|
|
361
|
-
):
|
|
362
|
-
"""
|
|
363
|
-
Initializes the MCPCore OpenAI-compatible client.
|
|
364
|
-
|
|
365
|
-
Args:
|
|
366
|
-
timeout: Request timeout in seconds.
|
|
367
|
-
"""
|
|
368
|
-
self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
|
|
369
|
-
self.timeout = timeout
|
|
370
|
-
self.token = self._auto_fetch_token()
|
|
371
|
-
self.session = Session() # Use curl_cffi Session
|
|
372
|
-
|
|
373
|
-
# Basic headers + Authorization
|
|
374
|
-
self.headers = {
|
|
375
|
-
**LitAgent().generate_fingerprint(),
|
|
376
|
-
'origin': 'https://chat.mcpcore.xyz',
|
|
377
|
-
'referer': 'https://chat.mcpcore.xyz/auth',
|
|
378
|
-
}
|
|
379
|
-
# Add more headers mimicking browser behavior if needed, e.g., sec-ch-ua, etc.
|
|
380
|
-
# Example:
|
|
381
|
-
# self.headers.update({
|
|
382
|
-
# 'sec-ch-ua': '"Not/A)Brand";v="99", "Google Chrome";v="127", "Chromium";v="127"',
|
|
383
|
-
# 'sec-ch-ua-mobile': '?0',
|
|
384
|
-
# 'sec-ch-ua-platform': '"Windows"',
|
|
385
|
-
# 'sec-fetch-dest': 'empty',
|
|
386
|
-
# 'sec-fetch-mode': 'cors',
|
|
387
|
-
# 'sec-fetch-site': 'same-origin',
|
|
388
|
-
# })
|
|
389
|
-
|
|
390
|
-
self.session.headers.update(self.headers)
|
|
391
|
-
self.chat = Chat(self) # Initialize chat interface
|
|
392
|
-
|
|
393
|
-
@property
|
|
394
|
-
def models(self):
|
|
395
|
-
class _ModelList:
|
|
396
|
-
def list(inner_self):
|
|
397
|
-
return type(self).AVAILABLE_MODELS
|
|
398
|
-
return _ModelList()
|
|
399
|
-
|
|
400
|
-
if __name__ == "__main__":
|
|
401
|
-
print("-" * 100)
|
|
402
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
403
|
-
print("-" * 100)
|
|
404
|
-
|
|
405
|
-
test_prompt = "Say 'Hello' in one word"
|
|
406
|
-
|
|
407
|
-
client = MCPCore()
|
|
408
|
-
for model in client.models.list():
|
|
409
|
-
print(f"\rTesting {model}...", end="")
|
|
410
|
-
try:
|
|
411
|
-
presp = client.chat.completions.create(
|
|
412
|
-
model=model,
|
|
413
|
-
messages=[{"role": "user", "content": test_prompt}]
|
|
414
|
-
)
|
|
415
|
-
# Try to get the response text (truncate to 100 chars)
|
|
416
|
-
if hasattr(presp, "choices") and presp.choices and hasattr(presp.choices[0], "message"):
|
|
417
|
-
content = presp.choices[0].message.content or ""
|
|
418
|
-
clean_text = content.strip().encode('utf-8', errors='ignore').decode('utf-8')
|
|
419
|
-
display_text = clean_text[:100] + "..." if len(clean_text) > 100 else clean_text
|
|
420
|
-
status = "✓" if clean_text else "✗"
|
|
421
|
-
if not clean_text:
|
|
422
|
-
display_text = "Empty or invalid response"
|
|
423
|
-
else:
|
|
424
|
-
status = "✗"
|
|
425
|
-
display_text = "Empty or invalid response"
|
|
426
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
427
|
-
except Exception as e:
|
|
428
|
-
error_msg = str(e)
|
|
429
|
-
if len(error_msg) > 100:
|
|
430
|
-
error_msg = error_msg[:97] + "..."
|
|
431
|
-
print(f"\r{model:<50} {'✗':<10} Error: {error_msg}")
|