webscout 8.3.6__py3-none-any.whl → 8.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +2 -0
- webscout/Provider/AISEARCH/__init__.py +18 -11
- webscout/Provider/AISEARCH/scira_search.py +3 -1
- webscout/Provider/Aitopia.py +2 -3
- webscout/Provider/Andi.py +3 -3
- webscout/Provider/ChatGPTClone.py +1 -1
- webscout/Provider/ChatSandbox.py +1 -0
- webscout/Provider/Cloudflare.py +1 -1
- webscout/Provider/Cohere.py +1 -0
- webscout/Provider/Deepinfra.py +7 -10
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +1 -80
- webscout/Provider/Flowith.py +1 -1
- webscout/Provider/Gemini.py +7 -5
- webscout/Provider/GeminiProxy.py +1 -0
- webscout/Provider/GithubChat.py +3 -1
- webscout/Provider/Groq.py +1 -1
- webscout/Provider/HeckAI.py +8 -4
- webscout/Provider/Jadve.py +23 -38
- webscout/Provider/K2Think.py +308 -0
- webscout/Provider/Koboldai.py +8 -186
- webscout/Provider/LambdaChat.py +2 -4
- webscout/Provider/Nemotron.py +3 -4
- webscout/Provider/Netwrck.py +3 -2
- webscout/Provider/OLLAMA.py +1 -0
- webscout/Provider/OPENAI/Cloudflare.py +6 -7
- webscout/Provider/OPENAI/FalconH1.py +2 -7
- webscout/Provider/OPENAI/FreeGemini.py +6 -8
- webscout/Provider/OPENAI/{monochat.py → K2Think.py} +180 -77
- webscout/Provider/OPENAI/NEMOTRON.py +3 -6
- webscout/Provider/OPENAI/PI.py +5 -4
- webscout/Provider/OPENAI/Qwen3.py +2 -3
- webscout/Provider/OPENAI/TogetherAI.py +2 -2
- webscout/Provider/OPENAI/TwoAI.py +3 -4
- webscout/Provider/OPENAI/__init__.py +17 -58
- webscout/Provider/OPENAI/ai4chat.py +313 -303
- webscout/Provider/OPENAI/base.py +9 -29
- webscout/Provider/OPENAI/chatgpt.py +7 -2
- webscout/Provider/OPENAI/chatgptclone.py +4 -7
- webscout/Provider/OPENAI/chatsandbox.py +84 -59
- webscout/Provider/OPENAI/deepinfra.py +6 -6
- webscout/Provider/OPENAI/heckai.py +4 -1
- webscout/Provider/OPENAI/netwrck.py +1 -0
- webscout/Provider/OPENAI/scirachat.py +6 -0
- webscout/Provider/OPENAI/textpollinations.py +3 -11
- webscout/Provider/OPENAI/toolbaz.py +14 -11
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/Openai.py +150 -402
- webscout/Provider/PI.py +1 -0
- webscout/Provider/Perplexitylabs.py +1 -2
- webscout/Provider/QwenLM.py +107 -89
- webscout/Provider/STT/__init__.py +17 -2
- webscout/Provider/{Llama3.py → Sambanova.py} +9 -10
- webscout/Provider/StandardInput.py +1 -1
- webscout/Provider/TTI/__init__.py +18 -12
- webscout/Provider/TTS/__init__.py +18 -10
- webscout/Provider/TeachAnything.py +1 -0
- webscout/Provider/TextPollinationsAI.py +5 -12
- webscout/Provider/TogetherAI.py +86 -87
- webscout/Provider/TwoAI.py +53 -309
- webscout/Provider/TypliAI.py +2 -1
- webscout/Provider/{GizAI.py → UNFINISHED/GizAI.py} +1 -1
- webscout/Provider/Venice.py +2 -1
- webscout/Provider/VercelAI.py +1 -0
- webscout/Provider/WiseCat.py +2 -1
- webscout/Provider/WrDoChat.py +2 -1
- webscout/Provider/__init__.py +18 -86
- webscout/Provider/ai4chat.py +1 -1
- webscout/Provider/akashgpt.py +7 -10
- webscout/Provider/cerebras.py +115 -9
- webscout/Provider/chatglm.py +170 -83
- webscout/Provider/cleeai.py +1 -2
- webscout/Provider/deepseek_assistant.py +1 -1
- webscout/Provider/elmo.py +1 -1
- webscout/Provider/geminiapi.py +1 -1
- webscout/Provider/granite.py +1 -1
- webscout/Provider/hermes.py +1 -3
- webscout/Provider/julius.py +1 -0
- webscout/Provider/learnfastai.py +1 -1
- webscout/Provider/llama3mitril.py +1 -1
- webscout/Provider/llmchat.py +1 -1
- webscout/Provider/llmchatco.py +1 -1
- webscout/Provider/meta.py +3 -3
- webscout/Provider/oivscode.py +2 -2
- webscout/Provider/scira_chat.py +51 -124
- webscout/Provider/searchchat.py +1 -0
- webscout/Provider/sonus.py +1 -1
- webscout/Provider/toolbaz.py +15 -12
- webscout/Provider/turboseek.py +31 -22
- webscout/Provider/typefully.py +2 -1
- webscout/Provider/x0gpt.py +1 -0
- webscout/Provider/yep.py +2 -1
- webscout/tempid.py +6 -0
- webscout/version.py +1 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/METADATA +2 -1
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/RECORD +103 -129
- webscout/Provider/AllenAI.py +0 -440
- webscout/Provider/Blackboxai.py +0 -793
- webscout/Provider/FreeGemini.py +0 -250
- webscout/Provider/GptOss.py +0 -207
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Kimi.py +0 -445
- webscout/Provider/MCPCore.py +0 -322
- webscout/Provider/MiniMax.py +0 -207
- webscout/Provider/OPENAI/BLACKBOXAI.py +0 -1045
- webscout/Provider/OPENAI/MiniMax.py +0 -298
- webscout/Provider/OPENAI/autoproxy.py +0 -1067
- webscout/Provider/OPENAI/copilot.py +0 -321
- webscout/Provider/OPENAI/gptoss.py +0 -288
- webscout/Provider/OPENAI/kimi.py +0 -469
- webscout/Provider/OPENAI/mcpcore.py +0 -431
- webscout/Provider/OPENAI/multichat.py +0 -378
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/UNFINISHED/fetch_together_models.py +0 -90
- webscout/Provider/asksteve.py +0 -220
- webscout/Provider/copilot.py +0 -441
- webscout/Provider/freeaichat.py +0 -294
- webscout/Provider/koala.py +0 -182
- webscout/Provider/lmarena.py +0 -198
- webscout/Provider/monochat.py +0 -275
- webscout/Provider/multichat.py +0 -375
- webscout/Provider/scnet.py +0 -244
- webscout/Provider/talkai.py +0 -194
- /webscout/Provider/{Marcus.py → UNFINISHED/Marcus.py} +0 -0
- /webscout/Provider/{Qodo.py → UNFINISHED/Qodo.py} +0 -0
- /webscout/Provider/{XenAI.py → UNFINISHED/XenAI.py} +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/WHEEL +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/entry_points.txt +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.3.6.dist-info → webscout-8.3.7.dist-info}/top_level.txt +0 -0
webscout/Provider/MCPCore.py
DELETED
|
@@ -1,322 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
import uuid
|
|
3
|
-
import random
|
|
4
|
-
import string
|
|
5
|
-
from typing import Any, Dict, Generator, Union
|
|
6
|
-
|
|
7
|
-
# Use curl_cffi for requests
|
|
8
|
-
from curl_cffi.requests import Session
|
|
9
|
-
from curl_cffi import CurlError
|
|
10
|
-
|
|
11
|
-
from webscout.AIutel import Optimizers
|
|
12
|
-
from webscout.AIutel import Conversation
|
|
13
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
14
|
-
from webscout.AIbase import Provider
|
|
15
|
-
from webscout import exceptions
|
|
16
|
-
from webscout.litagent import LitAgent
|
|
17
|
-
|
|
18
|
-
class MCPCore(Provider):
|
|
19
|
-
"""
|
|
20
|
-
A class to interact with the chat.mcpcore.xyz API.
|
|
21
|
-
Supports streaming responses.
|
|
22
|
-
"""
|
|
23
|
-
|
|
24
|
-
# Add more models if known, starting with the one from the example
|
|
25
|
-
AVAILABLE_MODELS = [
|
|
26
|
-
"@cf/deepseek-ai/deepseek-math-7b-instruct",
|
|
27
|
-
"@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
|
|
28
|
-
"@cf/defog/sqlcoder-7b-2",
|
|
29
|
-
"@cf/fblgit/una-cybertron-7b-v2-bf16",
|
|
30
|
-
"@cf/google/gemma-3-12b-it",
|
|
31
|
-
"@cf/meta/llama-2-7b-chat-int8",
|
|
32
|
-
"@hf/thebloke/llama-2-13b-chat-awq",
|
|
33
|
-
"@hf/thebloke/llamaguard-7b-awq",
|
|
34
|
-
"@hf/thebloke/mistral-7b-instruct-v0.1-awq",
|
|
35
|
-
"@hf/thebloke/neural-chat-7b-v3-1-awq",
|
|
36
|
-
"anthropic/claude-3.5-haiku",
|
|
37
|
-
"anthropic/claude-3.5-sonnet",
|
|
38
|
-
"anthropic/claude-3.7-sonnet",
|
|
39
|
-
"anthropic/claude-3.7-sonnet:thinking",
|
|
40
|
-
"anthropic/claude-opus-4",
|
|
41
|
-
"anthropic/claude-sonnet-4",
|
|
42
|
-
"openai/chatgpt-4o-latest",
|
|
43
|
-
"openai/gpt-3.5-turbo",
|
|
44
|
-
"openai/gpt-4.1",
|
|
45
|
-
"openai/gpt-4.1-mini",
|
|
46
|
-
"openai/gpt-4.1-nano",
|
|
47
|
-
"openai/gpt-4o-mini-search-preview",
|
|
48
|
-
"openai/gpt-4o-search-preview",
|
|
49
|
-
"openai/o1-pro",
|
|
50
|
-
"openai/o3-mini",
|
|
51
|
-
"sarvam-m",
|
|
52
|
-
"x-ai/grok-3-beta",
|
|
53
|
-
]
|
|
54
|
-
|
|
55
|
-
def __init__(
|
|
56
|
-
self,
|
|
57
|
-
is_conversation: bool = True,
|
|
58
|
-
max_tokens: int = 2048,
|
|
59
|
-
timeout: int = 60,
|
|
60
|
-
intro: str = None,
|
|
61
|
-
filepath: str = None,
|
|
62
|
-
update_file: bool = True,
|
|
63
|
-
proxies: dict = {},
|
|
64
|
-
history_offset: int = 10250,
|
|
65
|
-
act: str = None,
|
|
66
|
-
model: str = "qwen3-32b",
|
|
67
|
-
system_prompt: str = "You are a helpful assistant.",
|
|
68
|
-
):
|
|
69
|
-
"""Initializes the MCPCore API client."""
|
|
70
|
-
if model not in self.AVAILABLE_MODELS:
|
|
71
|
-
print(f"Warning: Model '{model}' is not listed in AVAILABLE_MODELS. Proceeding with the provided model.")
|
|
72
|
-
|
|
73
|
-
self.api_endpoint = "https://chat.mcpcore.xyz/api/chat/completions"
|
|
74
|
-
|
|
75
|
-
self.model = model
|
|
76
|
-
self.system_prompt = system_prompt
|
|
77
|
-
|
|
78
|
-
# Initialize curl_cffi Session
|
|
79
|
-
self.session = Session()
|
|
80
|
-
|
|
81
|
-
# Set up headers based on the provided request
|
|
82
|
-
self.headers = {
|
|
83
|
-
**LitAgent().generate_fingerprint(),
|
|
84
|
-
'origin': 'https://chat.mcpcore.xyz',
|
|
85
|
-
'referer': 'https://chat.mcpcore.xyz/',
|
|
86
|
-
}
|
|
87
|
-
|
|
88
|
-
# Apply headers, proxies, and cookies to the session
|
|
89
|
-
self.session.headers.update(self.headers)
|
|
90
|
-
self.session.proxies = proxies
|
|
91
|
-
|
|
92
|
-
# Provider settings
|
|
93
|
-
self.is_conversation = is_conversation
|
|
94
|
-
self.max_tokens_to_sample = max_tokens
|
|
95
|
-
self.timeout = timeout
|
|
96
|
-
self.last_response = {}
|
|
97
|
-
|
|
98
|
-
# Initialize optimizers
|
|
99
|
-
self.__available_optimizers = (
|
|
100
|
-
method
|
|
101
|
-
for method in dir(Optimizers)
|
|
102
|
-
if callable(getattr(Optimizers, method))
|
|
103
|
-
and not method.startswith("__")
|
|
104
|
-
)
|
|
105
|
-
Conversation.intro = (
|
|
106
|
-
AwesomePrompts().get_act(
|
|
107
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
108
|
-
)
|
|
109
|
-
if act
|
|
110
|
-
else intro or Conversation.intro
|
|
111
|
-
)
|
|
112
|
-
self.conversation = Conversation(
|
|
113
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
114
|
-
)
|
|
115
|
-
self.conversation.history_offset = history_offset
|
|
116
|
-
|
|
117
|
-
# Token handling: always auto-fetch token, no cookies logic
|
|
118
|
-
self.token = self._auto_fetch_token()
|
|
119
|
-
|
|
120
|
-
# Set the Authorization header for the session
|
|
121
|
-
self.session.headers.update({
|
|
122
|
-
'authorization': f'Bearer {self.token}',
|
|
123
|
-
})
|
|
124
|
-
|
|
125
|
-
def _auto_fetch_token(self):
|
|
126
|
-
"""Automatically fetch a token from the signup endpoint."""
|
|
127
|
-
session = Session()
|
|
128
|
-
def random_string(length=8):
|
|
129
|
-
return ''.join(random.choices(string.ascii_lowercase, k=length))
|
|
130
|
-
name = random_string(6)
|
|
131
|
-
email = f"{random_string(8)}@gmail.com"
|
|
132
|
-
password = email
|
|
133
|
-
profile_image_url = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAGQAAABkCAYAAABw4pVUAAAAAXNSR0IArs4c6QAAAkRJREFUeF7tmDFOw0AUBdcSiIaKM3CKHIQ7UHEISq5AiUTFHYC0XADoTRsJEZFEjhFIaYAim92fjGFS736/zOTZzjavl0d98oMh0CgE4+IriEJYPhQC86EQhdAIwPL4DFEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg1RCIwALI4NUQiMACyODVEIjAAsjg2BCfkAIqwAA94KZ/EAAAAASUVORK5CYII="
|
|
134
|
-
payload = {
|
|
135
|
-
"name": name,
|
|
136
|
-
"email": email,
|
|
137
|
-
"password": password,
|
|
138
|
-
"profile_image_url": profile_image_url
|
|
139
|
-
}
|
|
140
|
-
headers = {
|
|
141
|
-
**LitAgent().generate_fingerprint(),
|
|
142
|
-
'origin': 'https://chat.mcpcore.xyz',
|
|
143
|
-
'referer': 'https://chat.mcpcore.xyz/auth',
|
|
144
|
-
}
|
|
145
|
-
try:
|
|
146
|
-
resp = session.post(
|
|
147
|
-
"https://chat.mcpcore.xyz/api/v1/auths/signup",
|
|
148
|
-
headers=headers,
|
|
149
|
-
json=payload,
|
|
150
|
-
timeout=30,
|
|
151
|
-
impersonate="chrome110"
|
|
152
|
-
)
|
|
153
|
-
if resp.ok:
|
|
154
|
-
data = resp.json()
|
|
155
|
-
token = data.get("token")
|
|
156
|
-
if token:
|
|
157
|
-
return token
|
|
158
|
-
# fallback: try to get from set-cookie
|
|
159
|
-
set_cookie = resp.headers.get("set-cookie", "")
|
|
160
|
-
if "token=" in set_cookie:
|
|
161
|
-
return set_cookie.split("token=")[1].split(";")[0]
|
|
162
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to auto-fetch token: {resp.status_code} {resp.text}")
|
|
163
|
-
except Exception as e:
|
|
164
|
-
raise exceptions.FailedToGenerateResponseError(f"Token auto-fetch failed: {e}")
|
|
165
|
-
|
|
166
|
-
def ask(
|
|
167
|
-
self,
|
|
168
|
-
prompt: str,
|
|
169
|
-
stream: bool = False,
|
|
170
|
-
raw: bool = False,
|
|
171
|
-
optimizer: str = None,
|
|
172
|
-
conversationally: bool = False,
|
|
173
|
-
**kwargs
|
|
174
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
175
|
-
"""Sends a prompt to the MCPCore API and returns the response."""
|
|
176
|
-
|
|
177
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
178
|
-
|
|
179
|
-
if optimizer:
|
|
180
|
-
if optimizer in self.__available_optimizers:
|
|
181
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
182
|
-
conversation_prompt if conversationally else prompt
|
|
183
|
-
)
|
|
184
|
-
else:
|
|
185
|
-
raise exceptions.InvalidOptimizerError(
|
|
186
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
187
|
-
)
|
|
188
|
-
|
|
189
|
-
chat_id = kwargs.get("chat_id", str(uuid.uuid4()))
|
|
190
|
-
message_id = str(uuid.uuid4())
|
|
191
|
-
|
|
192
|
-
payload = {
|
|
193
|
-
"stream": stream,
|
|
194
|
-
"model": self.model,
|
|
195
|
-
"messages": [
|
|
196
|
-
{"role": "system", "content": self.system_prompt},
|
|
197
|
-
{"role": "user", "content": conversation_prompt}
|
|
198
|
-
],
|
|
199
|
-
"params": kwargs.get("params", {}),
|
|
200
|
-
"tool_servers": kwargs.get("tool_servers", []),
|
|
201
|
-
"features": kwargs.get("features", {"web_search": False}),
|
|
202
|
-
"chat_id": chat_id,
|
|
203
|
-
"id": message_id,
|
|
204
|
-
"stream_options": kwargs.get("stream_options", {"include_usage": True})
|
|
205
|
-
}
|
|
206
|
-
|
|
207
|
-
def for_stream():
|
|
208
|
-
streaming_text = ""
|
|
209
|
-
try:
|
|
210
|
-
response = self.session.post(
|
|
211
|
-
self.api_endpoint,
|
|
212
|
-
json=payload,
|
|
213
|
-
stream=True,
|
|
214
|
-
timeout=self.timeout,
|
|
215
|
-
impersonate="chrome110"
|
|
216
|
-
)
|
|
217
|
-
response.raise_for_status()
|
|
218
|
-
|
|
219
|
-
# Use sanitize_stream
|
|
220
|
-
processed_stream = sanitize_stream(
|
|
221
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
222
|
-
intro_value="data:",
|
|
223
|
-
to_json=True, # Stream sends JSON
|
|
224
|
-
skip_markers=["[DONE]"],
|
|
225
|
-
content_extractor=lambda chunk: chunk.get('choices', [{}])[0].get('delta', {}).get('content') if isinstance(chunk, dict) else None,
|
|
226
|
-
yield_raw_on_error=False # Skip non-JSON or lines where extractor fails
|
|
227
|
-
)
|
|
228
|
-
|
|
229
|
-
for content_chunk in processed_stream:
|
|
230
|
-
# content_chunk is the string extracted by the content_extractor
|
|
231
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
232
|
-
streaming_text += content_chunk
|
|
233
|
-
yield dict(text=content_chunk) if not raw else content_chunk
|
|
234
|
-
|
|
235
|
-
self.last_response = {"text": streaming_text}
|
|
236
|
-
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
237
|
-
|
|
238
|
-
except CurlError as e:
|
|
239
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {e}") from e
|
|
240
|
-
except Exception as e:
|
|
241
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
242
|
-
raise exceptions.FailedToGenerateResponseError(f"An unexpected error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
243
|
-
|
|
244
|
-
def for_non_stream():
|
|
245
|
-
full_text = ""
|
|
246
|
-
try:
|
|
247
|
-
stream_generator = self.ask(
|
|
248
|
-
prompt, stream=True, raw=False, optimizer=optimizer, conversationally=conversationally, **kwargs
|
|
249
|
-
)
|
|
250
|
-
for chunk_data in stream_generator:
|
|
251
|
-
if isinstance(chunk_data, dict):
|
|
252
|
-
full_text += chunk_data["text"]
|
|
253
|
-
elif isinstance(chunk_data, str):
|
|
254
|
-
full_text += chunk_data
|
|
255
|
-
except Exception as e:
|
|
256
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to aggregate non-stream response: {str(e)}") from e
|
|
257
|
-
|
|
258
|
-
return full_text if raw else self.last_response
|
|
259
|
-
|
|
260
|
-
return for_stream() if stream else for_non_stream()
|
|
261
|
-
|
|
262
|
-
def chat(
|
|
263
|
-
self,
|
|
264
|
-
prompt: str,
|
|
265
|
-
stream: bool = False,
|
|
266
|
-
optimizer: str = None,
|
|
267
|
-
conversationally: bool = False,
|
|
268
|
-
**kwargs
|
|
269
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
270
|
-
"""Generates a response from the MCPCore API."""
|
|
271
|
-
|
|
272
|
-
def for_stream_chat() -> Generator[str, None, None]:
|
|
273
|
-
gen = self.ask(
|
|
274
|
-
prompt, stream=True, raw=False,
|
|
275
|
-
optimizer=optimizer, conversationally=conversationally, **kwargs
|
|
276
|
-
)
|
|
277
|
-
for response_dict in gen:
|
|
278
|
-
yield self.get_message(response_dict)
|
|
279
|
-
|
|
280
|
-
def for_non_stream_chat() -> str:
|
|
281
|
-
response_data = self.ask(
|
|
282
|
-
prompt, stream=False, raw=False,
|
|
283
|
-
optimizer=optimizer, conversationally=conversationally, **kwargs
|
|
284
|
-
)
|
|
285
|
-
return self.get_message(response_data)
|
|
286
|
-
|
|
287
|
-
return for_stream_chat() if stream else for_non_stream_chat()
|
|
288
|
-
|
|
289
|
-
def get_message(self, response: Dict[str, Any]) -> str:
|
|
290
|
-
"""Extracts the message from the API response."""
|
|
291
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
292
|
-
return response.get("text", "")
|
|
293
|
-
|
|
294
|
-
# Example usage (no cookies file needed)
|
|
295
|
-
if __name__ == "__main__":
|
|
296
|
-
from rich import print
|
|
297
|
-
|
|
298
|
-
print("-" * 80)
|
|
299
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
300
|
-
print("-" * 80)
|
|
301
|
-
|
|
302
|
-
for model in MCPCore.AVAILABLE_MODELS:
|
|
303
|
-
try:
|
|
304
|
-
test_ai = MCPCore(model=model, timeout=60)
|
|
305
|
-
response = test_ai.chat("Say 'Hello' in one word", stream=True)
|
|
306
|
-
response_text = ""
|
|
307
|
-
# Accumulate the response text without printing in the loop
|
|
308
|
-
for chunk in response:
|
|
309
|
-
response_text += chunk
|
|
310
|
-
|
|
311
|
-
if response_text and len(response_text.strip()) > 0:
|
|
312
|
-
status = "✓"
|
|
313
|
-
# Truncate response if too long
|
|
314
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
315
|
-
else:
|
|
316
|
-
status = "✗"
|
|
317
|
-
display_text = "Empty or invalid response"
|
|
318
|
-
# Print the final status and response, overwriting the "Testing..." line
|
|
319
|
-
print(f"\r{model:<50} {status:<10} {display_text}")
|
|
320
|
-
except Exception as e:
|
|
321
|
-
# Print error, overwriting the "Testing..." line
|
|
322
|
-
print(f"\r{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/MiniMax.py
DELETED
|
@@ -1,207 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import json
|
|
3
|
-
import requests
|
|
4
|
-
from typing import Any, Dict, Optional, Union, Generator
|
|
5
|
-
from webscout.AIutel import sanitize_stream, Optimizers, Conversation, AwesomePrompts
|
|
6
|
-
from webscout.AIbase import Provider
|
|
7
|
-
from webscout import exceptions
|
|
8
|
-
|
|
9
|
-
class MiniMax(Provider):
|
|
10
|
-
"""
|
|
11
|
-
Provider for MiniMax-Reasoning-01 API, following the standard provider interface.
|
|
12
|
-
"""
|
|
13
|
-
AVAILABLE_MODELS = ["MiniMax-Reasoning-01"]
|
|
14
|
-
API_URL = "https://api.minimaxi.chat/v1/text/chatcompletion_v2"
|
|
15
|
-
# TODO: Move API_KEY to env/config for security
|
|
16
|
-
API_KEY = os.environ.get("MINIMAX_API_KEY") or """eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJtbyBuaSIsIlVzZXJOYW1lIjoibW8gbmkiLCJBY2NvdW50IjoiIiwiU3ViamVjdElEIjoiMTg3NjIwMDY0ODA2NDYzNTI0MiIsIlBob25lIjoiIiwiR3JvdXBJRCI6IjE4NzYyMDA2NDgwNjA0NDA5MzgiLCJQYWdlTmFtZSI6IiIsIk1haWwiOiJuaW1vQHN1YnN1cC52aXAiLCJDcmVhdGVUaW1lIjoiMjAyNS0wMS0wNyAxMToyNzowNyIsIlRva2VuVHlwZSI6MSwiaXNzIjoibWluaW1heCJ9.Ge1ZnpFPUfXVdMini0P_qXbP_9VYwzXiffG9DsNQck4GtYEOs33LDeAiwrVsrrLZfvJ2icQZ4sRZS54wmPuWua_Dav6pYJty8ZtahmUX1IuhlUX5YErhhCRAIy3J1xB8FkLHLyylChuBHpkNz6O6BQLmPqmoa-cOYK9Qrc6IDeu8SX1iMzO9-MSkcWNvkvpCF2Pf9tekBVWNKMDK6IZoMEPbtkaPXdDyP6l0M0e2AlL_E0oM9exg3V-ohAi8OTPFyqM6dcd4TwF-b9DULxfIsRFw401mvIxcTDWa42u2LULewdATVRD2BthU65tuRqEiWeFWMvFlPj2soMze_QIiUA"""
|
|
17
|
-
MODEL_CONTROL_DEFAULTS = {"tokens_to_generate": 40000, "temperature": 1, "top_p": 0.95}
|
|
18
|
-
|
|
19
|
-
def __init__(
|
|
20
|
-
self,
|
|
21
|
-
is_conversation: bool = True,
|
|
22
|
-
max_tokens: int = 2049,
|
|
23
|
-
timeout: int = 30,
|
|
24
|
-
intro: str = None,
|
|
25
|
-
filepath: str = None,
|
|
26
|
-
update_file: bool = True,
|
|
27
|
-
proxies: dict = {},
|
|
28
|
-
history_offset: int = 10250,
|
|
29
|
-
act: str = None,
|
|
30
|
-
model: str = "MiniMax-Reasoning-01",
|
|
31
|
-
system_prompt: str = "You are a helpful assistant, always respond in english",
|
|
32
|
-
):
|
|
33
|
-
if model not in self.AVAILABLE_MODELS:
|
|
34
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
35
|
-
self.model = model
|
|
36
|
-
self.api_url = self.API_URL
|
|
37
|
-
self.api_key = self.API_KEY
|
|
38
|
-
self.timeout = timeout
|
|
39
|
-
self.is_conversation = is_conversation
|
|
40
|
-
self.max_tokens_to_sample = max_tokens
|
|
41
|
-
self.last_response = {}
|
|
42
|
-
self.system_prompt = system_prompt
|
|
43
|
-
self.proxies = proxies
|
|
44
|
-
self.__available_optimizers = tuple(
|
|
45
|
-
method for method in dir(Optimizers)
|
|
46
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
47
|
-
)
|
|
48
|
-
Conversation.intro = (
|
|
49
|
-
AwesomePrompts().get_act(
|
|
50
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
51
|
-
)
|
|
52
|
-
if act
|
|
53
|
-
else intro or Conversation.intro
|
|
54
|
-
)
|
|
55
|
-
self.conversation = Conversation(
|
|
56
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
57
|
-
)
|
|
58
|
-
self.conversation.history_offset = history_offset
|
|
59
|
-
|
|
60
|
-
@staticmethod
|
|
61
|
-
def _extract_content(chunk: Any) -> Optional[dict]:
|
|
62
|
-
if not isinstance(chunk, dict):
|
|
63
|
-
return None
|
|
64
|
-
choice = chunk.get('choices', [{}])[0]
|
|
65
|
-
delta = choice.get('delta', {})
|
|
66
|
-
content = delta.get('content')
|
|
67
|
-
reasoning = delta.get('reasoning_content')
|
|
68
|
-
result = {}
|
|
69
|
-
if content:
|
|
70
|
-
result['content'] = content
|
|
71
|
-
if reasoning:
|
|
72
|
-
result['reasoning_content'] = reasoning
|
|
73
|
-
return result if result else None
|
|
74
|
-
|
|
75
|
-
def ask(
|
|
76
|
-
self,
|
|
77
|
-
prompt: str,
|
|
78
|
-
stream: bool = True,
|
|
79
|
-
raw: bool = False,
|
|
80
|
-
optimizer: str = None,
|
|
81
|
-
conversationally: bool = False,
|
|
82
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
83
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
84
|
-
if optimizer:
|
|
85
|
-
if optimizer in self.__available_optimizers:
|
|
86
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
87
|
-
conversation_prompt if conversationally else prompt
|
|
88
|
-
)
|
|
89
|
-
else:
|
|
90
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
91
|
-
messages = [
|
|
92
|
-
{'role': 'system', 'content': self.system_prompt},
|
|
93
|
-
{'role': 'user', 'content': conversation_prompt}
|
|
94
|
-
]
|
|
95
|
-
data = {
|
|
96
|
-
'model': self.model,
|
|
97
|
-
'messages': messages,
|
|
98
|
-
'stream': True,
|
|
99
|
-
'max_tokens': self.MODEL_CONTROL_DEFAULTS.get('tokens_to_generate', 512),
|
|
100
|
-
'temperature': self.MODEL_CONTROL_DEFAULTS.get('temperature', 1.0),
|
|
101
|
-
'top_p': self.MODEL_CONTROL_DEFAULTS.get('top_p', 1.0),
|
|
102
|
-
}
|
|
103
|
-
headers = {
|
|
104
|
-
'Content-Type': 'application/json',
|
|
105
|
-
'Authorization': f'Bearer {self.api_key}',
|
|
106
|
-
}
|
|
107
|
-
def for_stream():
|
|
108
|
-
try:
|
|
109
|
-
response = requests.post(
|
|
110
|
-
self.api_url,
|
|
111
|
-
headers=headers,
|
|
112
|
-
data=json.dumps(data),
|
|
113
|
-
stream=True,
|
|
114
|
-
timeout=self.timeout,
|
|
115
|
-
proxies=self.proxies if self.proxies else None
|
|
116
|
-
)
|
|
117
|
-
if not response.ok:
|
|
118
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
119
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
120
|
-
)
|
|
121
|
-
streaming_response = ""
|
|
122
|
-
last_content = ""
|
|
123
|
-
last_reasoning = ""
|
|
124
|
-
in_think = False
|
|
125
|
-
processed_stream = sanitize_stream(
|
|
126
|
-
response.iter_lines(),
|
|
127
|
-
intro_value="data:",
|
|
128
|
-
to_json=True,
|
|
129
|
-
content_extractor=self._extract_content,
|
|
130
|
-
raw=False # always process as dict for logic
|
|
131
|
-
)
|
|
132
|
-
for chunk in processed_stream:
|
|
133
|
-
if not chunk:
|
|
134
|
-
continue
|
|
135
|
-
content = chunk.get('content') if isinstance(chunk, dict) else None
|
|
136
|
-
reasoning = chunk.get('reasoning_content') if isinstance(chunk, dict) else None
|
|
137
|
-
# Handle reasoning_content with <think> tags
|
|
138
|
-
if reasoning and reasoning != last_reasoning:
|
|
139
|
-
if not in_think:
|
|
140
|
-
yield "<think>\n\n"
|
|
141
|
-
in_think = True
|
|
142
|
-
yield reasoning
|
|
143
|
-
last_reasoning = reasoning
|
|
144
|
-
# If we were in <think> and now have new content, close <think>
|
|
145
|
-
if in_think and content and content != last_content:
|
|
146
|
-
yield "</think>\n\n"
|
|
147
|
-
in_think = False
|
|
148
|
-
# Handle normal content
|
|
149
|
-
if content and content != last_content:
|
|
150
|
-
yield content
|
|
151
|
-
streaming_response += content
|
|
152
|
-
last_content = content
|
|
153
|
-
if not raw:
|
|
154
|
-
self.last_response = {"text": streaming_response}
|
|
155
|
-
self.conversation.update_chat_history(prompt, streaming_response)
|
|
156
|
-
except Exception as e:
|
|
157
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {e}")
|
|
158
|
-
def for_non_stream():
|
|
159
|
-
full_response = ""
|
|
160
|
-
for chunk in for_stream():
|
|
161
|
-
if isinstance(chunk, dict) and "text" in chunk:
|
|
162
|
-
full_response += chunk["text"]
|
|
163
|
-
elif isinstance(chunk, str):
|
|
164
|
-
full_response += chunk
|
|
165
|
-
if not raw:
|
|
166
|
-
self.last_response = {"text": full_response}
|
|
167
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
168
|
-
return {"text": full_response}
|
|
169
|
-
else:
|
|
170
|
-
return full_response
|
|
171
|
-
return for_stream() if stream else for_non_stream()
|
|
172
|
-
|
|
173
|
-
def chat(
|
|
174
|
-
self,
|
|
175
|
-
prompt: str,
|
|
176
|
-
stream: bool = True,
|
|
177
|
-
optimizer: str = None,
|
|
178
|
-
conversationally: bool = False,
|
|
179
|
-
raw: bool = False,
|
|
180
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
181
|
-
def for_stream():
|
|
182
|
-
for response in self.ask(
|
|
183
|
-
prompt, stream=True, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
184
|
-
):
|
|
185
|
-
if raw:
|
|
186
|
-
yield response
|
|
187
|
-
else:
|
|
188
|
-
yield response
|
|
189
|
-
def for_non_stream():
|
|
190
|
-
result = self.ask(
|
|
191
|
-
prompt, stream=False, raw=raw, optimizer=optimizer, conversationally=conversationally
|
|
192
|
-
)
|
|
193
|
-
if raw:
|
|
194
|
-
return result
|
|
195
|
-
else:
|
|
196
|
-
return self.get_message(result)
|
|
197
|
-
return for_stream() if stream else for_non_stream()
|
|
198
|
-
|
|
199
|
-
def get_message(self, response: dict) -> str:
|
|
200
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
201
|
-
return response.get("text", "")
|
|
202
|
-
|
|
203
|
-
if __name__ == "__main__":
|
|
204
|
-
ai = MiniMax()
|
|
205
|
-
resp = ai.chat("What is the capital of France?", stream=True, raw=False)
|
|
206
|
-
for chunk in resp:
|
|
207
|
-
print(chunk, end="", flush=True)
|